summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rwxr-xr-xCONTRIBUTORS14
-rwxr-xr-xVERSION4
-rwxr-xr-xlinux/ws_main.py1
-rwxr-xr-xlinux_dpdk/ws_main.py8
-rwxr-xr-xscripts/automation/regression/CProgressDisp.py16
-rwxr-xr-xscripts/automation/regression/functional_tests/pkt_bld_general_test.py2
-rwxr-xr-xscripts/automation/regression/functional_tests/platform_cmd_cache_test.py6
-rwxr-xr-xscripts/automation/regression/functional_tests/platform_dual_if_obj_test.py2
-rwxr-xr-xscripts/automation/regression/functional_tests/platform_if_manager_test.py10
-rw-r--r--scripts/automation/regression/functional_tests/scapy_pkt_builder_test.py26
-rw-r--r--scripts/automation/regression/functional_tests/stl_basic_tests.py127
-rwxr-xr-xscripts/automation/regression/misc_methods.py37
-rwxr-xr-xscripts/automation/regression/outer_packages.py56
-rwxr-xr-xscripts/automation/regression/platform_cmd_link.py31
-rw-r--r--scripts/automation/regression/setups/trex17/benchmark.yaml62
-rw-r--r--scripts/automation/regression/setups/trex17/config.yaml39
-rw-r--r--scripts/automation/regression/trex.py28
-rwxr-xr-xscripts/automation/regression/trex_unit_test.py15
-rwxr-xr-xscripts/automation/trex_control_plane/client/trex_client.py9
-rwxr-xr-xscripts/automation/trex_control_plane/server/outer_packages.py2
-rwxr-xr-xscripts/automation/trex_control_plane/stl/console/trex_console.py81
-rw-r--r--scripts/automation/trex_control_plane/stl/console/trex_tui.py32
-rw-r--r--scripts/automation/trex_control_plane/stl/examples/stl_bi_dir_flows.py16
-rw-r--r--scripts/automation/trex_control_plane/stl/examples/stl_bi_dir_flows1.py16
-rw-r--r--scripts/automation/trex_control_plane/stl/examples/stl_flow_stats.py26
-rw-r--r--scripts/automation/trex_control_plane/stl/examples/stl_imix.py18
-rw-r--r--scripts/automation/trex_control_plane/stl/examples/stl_pcap.py6
-rw-r--r--scripts/automation/trex_control_plane/stl/examples/stl_profile.py12
-rw-r--r--scripts/automation/trex_control_plane/stl/examples/stl_run_udp_simple.py18
-rw-r--r--scripts/automation/trex_control_plane/stl/examples/stl_simple_burst.py10
-rw-r--r--scripts/automation/trex_control_plane/stl/examples/stl_simple_console_like.py16
-rw-r--r--scripts/automation/trex_control_plane/stl/trex_stl_lib/__init__.py6
-rw-r--r--scripts/automation/trex_control_plane/stl/trex_stl_lib/api.py12
-rw-r--r--scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_async_client.py16
-rw-r--r--scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_client.py96
-rw-r--r--scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_exceptions.py2
-rw-r--r--scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_ext.py92
-rwxr-xr-xscripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_hltapi.py49
-rw-r--r--scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_jsonrpc_client.py17
-rw-r--r--scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_packet_builder_scapy.py64
-rw-r--r--scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_port.py41
-rw-r--r--scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_sim.py73
-rw-r--r--scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_stats.py54
-rw-r--r--scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_std.py4
-rw-r--r--scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_streams.py47
-rw-r--r--scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_types.py16
-rw-r--r--scripts/automation/trex_control_plane/stl/trex_stl_lib/utils/common.py4
-rw-r--r--scripts/automation/trex_control_plane/stl/trex_stl_lib/utils/pcap.py29
-rw-r--r--scripts/automation/trex_control_plane/stl/trex_stl_lib/utils/text_opts.py4
-rw-r--r--scripts/automation/trex_control_plane/stl/trex_stl_lib/utils/text_tables.py7
-rw-r--r--scripts/external_libs/dpkt-1.8.6/AUTHORS60
-rw-r--r--scripts/external_libs/dpkt-1.8.6/CHANGES71
-rw-r--r--scripts/external_libs/dpkt-1.8.6/LICENSE28
-rw-r--r--scripts/external_libs/dpkt-1.8.6/MANIFEST.in2
-rw-r--r--scripts/external_libs/dpkt-1.8.6/PKG-INFO122
-rw-r--r--scripts/external_libs/dpkt-1.8.6/README.rst104
-rw-r--r--scripts/external_libs/dpkt-1.8.6/dpkt.egg-info/PKG-INFO122
-rw-r--r--scripts/external_libs/dpkt-1.8.6/dpkt.egg-info/SOURCES.txt80
-rw-r--r--scripts/external_libs/dpkt-1.8.6/dpkt.egg-info/dependency_links.txt1
-rw-r--r--scripts/external_libs/dpkt-1.8.6/dpkt.egg-info/not-zip-safe1
-rw-r--r--scripts/external_libs/dpkt-1.8.6/dpkt.egg-info/top_level.txt1
-rw-r--r--scripts/external_libs/dpkt-1.8.6/dpkt/__init__.py70
-rw-r--r--scripts/external_libs/dpkt-1.8.6/dpkt/ah.py31
-rw-r--r--scripts/external_libs/dpkt-1.8.6/dpkt/aim.py47
-rw-r--r--scripts/external_libs/dpkt-1.8.6/dpkt/aoe.py70
-rw-r--r--scripts/external_libs/dpkt-1.8.6/dpkt/aoeata.py34
-rw-r--r--scripts/external_libs/dpkt-1.8.6/dpkt/aoecfg.py24
-rw-r--r--scripts/external_libs/dpkt-1.8.6/dpkt/arp.py31
-rw-r--r--scripts/external_libs/dpkt-1.8.6/dpkt/asn1.py119
-rw-r--r--scripts/external_libs/dpkt-1.8.6/dpkt/bgp.py760
-rw-r--r--scripts/external_libs/dpkt-1.8.6/dpkt/cdp.py95
-rw-r--r--scripts/external_libs/dpkt-1.8.6/dpkt/crc32c.py80
-rw-r--r--scripts/external_libs/dpkt-1.8.6/dpkt/dhcp.py168
-rw-r--r--scripts/external_libs/dpkt-1.8.6/dpkt/diameter.py181
-rw-r--r--scripts/external_libs/dpkt-1.8.6/dpkt/dns.py342
-rw-r--r--scripts/external_libs/dpkt-1.8.6/dpkt/dot1q.py1110
-rw-r--r--scripts/external_libs/dpkt-1.8.6/dpkt/dpkt.py168
-rw-r--r--scripts/external_libs/dpkt-1.8.6/dpkt/dtp.py23
-rw-r--r--scripts/external_libs/dpkt-1.8.6/dpkt/esp.py11
-rw-r--r--scripts/external_libs/dpkt-1.8.6/dpkt/ethernet.py140
-rw-r--r--scripts/external_libs/dpkt-1.8.6/dpkt/gre.py103
-rw-r--r--scripts/external_libs/dpkt-1.8.6/dpkt/gzip.py117
-rw-r--r--scripts/external_libs/dpkt-1.8.6/dpkt/h225.py217
-rw-r--r--scripts/external_libs/dpkt-1.8.6/dpkt/hsrp.py32
-rw-r--r--scripts/external_libs/dpkt-1.8.6/dpkt/http.py237
-rw-r--r--scripts/external_libs/dpkt-1.8.6/dpkt/icmp.py122
-rw-r--r--scripts/external_libs/dpkt-1.8.6/dpkt/icmp6.py72
-rw-r--r--scripts/external_libs/dpkt-1.8.6/dpkt/ieee80211.py706
-rw-r--r--scripts/external_libs/dpkt-1.8.6/dpkt/igmp.py17
-rw-r--r--scripts/external_libs/dpkt-1.8.6/dpkt/ip.py301
-rw-r--r--scripts/external_libs/dpkt-1.8.6/dpkt/ip6.py307
-rw-r--r--scripts/external_libs/dpkt-1.8.6/dpkt/ipx.py17
-rw-r--r--scripts/external_libs/dpkt-1.8.6/dpkt/llc.py55
-rw-r--r--scripts/external_libs/dpkt-1.8.6/dpkt/loopback.py20
-rw-r--r--scripts/external_libs/dpkt-1.8.6/dpkt/mrt.py92
-rw-r--r--scripts/external_libs/dpkt-1.8.6/dpkt/netbios.py154
-rw-r--r--scripts/external_libs/dpkt-1.8.6/dpkt/netflow.py214
-rw-r--r--scripts/external_libs/dpkt-1.8.6/dpkt/ntp.py83
-rw-r--r--scripts/external_libs/dpkt-1.8.6/dpkt/ospf.py25
-rw-r--r--scripts/external_libs/dpkt-1.8.6/dpkt/pcap.py164
-rw-r--r--scripts/external_libs/dpkt-1.8.6/dpkt/pim.py24
-rw-r--r--scripts/external_libs/dpkt-1.8.6/dpkt/pmap.py17
-rw-r--r--scripts/external_libs/dpkt-1.8.6/dpkt/ppp.py63
-rw-r--r--scripts/external_libs/dpkt-1.8.6/dpkt/pppoe.py38
-rw-r--r--scripts/external_libs/dpkt-1.8.6/dpkt/qq.py224
-rw-r--r--scripts/external_libs/dpkt-1.8.6/dpkt/radiotap.py292
-rw-r--r--scripts/external_libs/dpkt-1.8.6/dpkt/radius.py88
-rw-r--r--scripts/external_libs/dpkt-1.8.6/dpkt/rfb.py81
-rw-r--r--scripts/external_libs/dpkt-1.8.6/dpkt/rip.py84
-rw-r--r--scripts/external_libs/dpkt-1.8.6/dpkt/rpc.py146
-rw-r--r--scripts/external_libs/dpkt-1.8.6/dpkt/rtp.py70
-rw-r--r--scripts/external_libs/dpkt-1.8.6/dpkt/rx.py44
-rw-r--r--scripts/external_libs/dpkt-1.8.6/dpkt/sccp.py196
-rw-r--r--scripts/external_libs/dpkt-1.8.6/dpkt/sctp.py90
-rw-r--r--scripts/external_libs/dpkt-1.8.6/dpkt/sip.py32
-rw-r--r--scripts/external_libs/dpkt-1.8.6/dpkt/sll.py23
-rw-r--r--scripts/external_libs/dpkt-1.8.6/dpkt/smb.py19
-rw-r--r--scripts/external_libs/dpkt-1.8.6/dpkt/snoop.py118
-rw-r--r--scripts/external_libs/dpkt-1.8.6/dpkt/ssl.py560
-rw-r--r--scripts/external_libs/dpkt-1.8.6/dpkt/ssl_ciphersuites.py76
-rw-r--r--scripts/external_libs/dpkt-1.8.6/dpkt/stp.py21
-rw-r--r--scripts/external_libs/dpkt-1.8.6/dpkt/stun.py45
-rw-r--r--scripts/external_libs/dpkt-1.8.6/dpkt/tcp.py98
-rw-r--r--scripts/external_libs/dpkt-1.8.6/dpkt/telnet.py77
-rw-r--r--scripts/external_libs/dpkt-1.8.6/dpkt/tftp.py55
-rw-r--r--scripts/external_libs/dpkt-1.8.6/dpkt/tns.py24
-rw-r--r--scripts/external_libs/dpkt-1.8.6/dpkt/tpkt.py15
-rw-r--r--scripts/external_libs/dpkt-1.8.6/dpkt/udp.py15
-rw-r--r--scripts/external_libs/dpkt-1.8.6/dpkt/vrrp.py48
-rw-r--r--scripts/external_libs/dpkt-1.8.6/dpkt/yahoo.py29
-rw-r--r--scripts/external_libs/dpkt-1.8.6/setup.cfg20
-rw-r--r--scripts/external_libs/dpkt-1.8.6/setup.py40
-rwxr-xr-xscripts/external_libs/nose-1.3.4/AUTHORS27
-rwxr-xr-xscripts/external_libs/nose-1.3.4/PKG-INFO38
-rwxr-xr-xscripts/external_libs/nose-1.3.4/lgpl.txt504
-rwxr-xr-xscripts/external_libs/nose-1.3.4/python2/nose/__init__.py (renamed from scripts/external_libs/nose-1.3.4/nose/__init__.py)0
-rwxr-xr-xscripts/external_libs/nose-1.3.4/python2/nose/__main__.py (renamed from scripts/external_libs/nose-1.3.4/nose/__main__.py)0
-rwxr-xr-xscripts/external_libs/nose-1.3.4/python2/nose/case.py (renamed from scripts/external_libs/nose-1.3.4/nose/case.py)0
-rwxr-xr-xscripts/external_libs/nose-1.3.4/python2/nose/commands.py (renamed from scripts/external_libs/nose-1.3.4/nose/commands.py)0
-rwxr-xr-xscripts/external_libs/nose-1.3.4/python2/nose/config.py (renamed from scripts/external_libs/nose-1.3.4/nose/config.py)0
-rwxr-xr-xscripts/external_libs/nose-1.3.4/python2/nose/core.py (renamed from scripts/external_libs/nose-1.3.4/nose/core.py)0
-rwxr-xr-xscripts/external_libs/nose-1.3.4/python2/nose/exc.py (renamed from scripts/external_libs/nose-1.3.4/nose/exc.py)0
-rwxr-xr-xscripts/external_libs/nose-1.3.4/python2/nose/ext/__init__.py (renamed from scripts/external_libs/nose-1.3.4/nose/ext/__init__.py)0
-rwxr-xr-xscripts/external_libs/nose-1.3.4/python2/nose/ext/dtcompat.py (renamed from scripts/external_libs/nose-1.3.4/nose/ext/dtcompat.py)0
-rwxr-xr-xscripts/external_libs/nose-1.3.4/python2/nose/failure.py (renamed from scripts/external_libs/nose-1.3.4/nose/failure.py)0
-rwxr-xr-xscripts/external_libs/nose-1.3.4/python2/nose/importer.py (renamed from scripts/external_libs/nose-1.3.4/nose/importer.py)0
-rwxr-xr-xscripts/external_libs/nose-1.3.4/python2/nose/inspector.py (renamed from scripts/external_libs/nose-1.3.4/nose/inspector.py)0
-rwxr-xr-xscripts/external_libs/nose-1.3.4/python2/nose/loader.py (renamed from scripts/external_libs/nose-1.3.4/nose/loader.py)0
-rwxr-xr-xscripts/external_libs/nose-1.3.4/python2/nose/plugins/__init__.py (renamed from scripts/external_libs/nose-1.3.4/nose/plugins/__init__.py)0
-rwxr-xr-xscripts/external_libs/nose-1.3.4/python2/nose/plugins/allmodules.py (renamed from scripts/external_libs/nose-1.3.4/nose/plugins/allmodules.py)0
-rwxr-xr-xscripts/external_libs/nose-1.3.4/python2/nose/plugins/attrib.py (renamed from scripts/external_libs/nose-1.3.4/nose/plugins/attrib.py)0
-rwxr-xr-xscripts/external_libs/nose-1.3.4/python2/nose/plugins/base.py (renamed from scripts/external_libs/nose-1.3.4/nose/plugins/base.py)0
-rwxr-xr-xscripts/external_libs/nose-1.3.4/python2/nose/plugins/builtin.py (renamed from scripts/external_libs/nose-1.3.4/nose/plugins/builtin.py)0
-rwxr-xr-xscripts/external_libs/nose-1.3.4/python2/nose/plugins/capture.py (renamed from scripts/external_libs/nose-1.3.4/nose/plugins/capture.py)0
-rwxr-xr-xscripts/external_libs/nose-1.3.4/python2/nose/plugins/collect.py (renamed from scripts/external_libs/nose-1.3.4/nose/plugins/collect.py)0
-rwxr-xr-xscripts/external_libs/nose-1.3.4/python2/nose/plugins/cover.py (renamed from scripts/external_libs/nose-1.3.4/nose/plugins/cover.py)0
-rwxr-xr-xscripts/external_libs/nose-1.3.4/python2/nose/plugins/debug.py (renamed from scripts/external_libs/nose-1.3.4/nose/plugins/debug.py)0
-rwxr-xr-xscripts/external_libs/nose-1.3.4/python2/nose/plugins/deprecated.py (renamed from scripts/external_libs/nose-1.3.4/nose/plugins/deprecated.py)0
-rwxr-xr-xscripts/external_libs/nose-1.3.4/python2/nose/plugins/doctests.py (renamed from scripts/external_libs/nose-1.3.4/nose/plugins/doctests.py)0
-rwxr-xr-xscripts/external_libs/nose-1.3.4/python2/nose/plugins/errorclass.py (renamed from scripts/external_libs/nose-1.3.4/nose/plugins/errorclass.py)0
-rwxr-xr-xscripts/external_libs/nose-1.3.4/python2/nose/plugins/failuredetail.py (renamed from scripts/external_libs/nose-1.3.4/nose/plugins/failuredetail.py)0
-rwxr-xr-xscripts/external_libs/nose-1.3.4/python2/nose/plugins/isolate.py (renamed from scripts/external_libs/nose-1.3.4/nose/plugins/isolate.py)0
-rwxr-xr-xscripts/external_libs/nose-1.3.4/python2/nose/plugins/logcapture.py (renamed from scripts/external_libs/nose-1.3.4/nose/plugins/logcapture.py)0
-rwxr-xr-xscripts/external_libs/nose-1.3.4/python2/nose/plugins/manager.py (renamed from scripts/external_libs/nose-1.3.4/nose/plugins/manager.py)0
-rwxr-xr-xscripts/external_libs/nose-1.3.4/python2/nose/plugins/multiprocess.py (renamed from scripts/external_libs/nose-1.3.4/nose/plugins/multiprocess.py)0
-rwxr-xr-xscripts/external_libs/nose-1.3.4/python2/nose/plugins/plugintest.py (renamed from scripts/external_libs/nose-1.3.4/nose/plugins/plugintest.py)0
-rwxr-xr-xscripts/external_libs/nose-1.3.4/python2/nose/plugins/prof.py (renamed from scripts/external_libs/nose-1.3.4/nose/plugins/prof.py)0
-rwxr-xr-xscripts/external_libs/nose-1.3.4/python2/nose/plugins/skip.py (renamed from scripts/external_libs/nose-1.3.4/nose/plugins/skip.py)0
-rwxr-xr-xscripts/external_libs/nose-1.3.4/python2/nose/plugins/testid.py (renamed from scripts/external_libs/nose-1.3.4/nose/plugins/testid.py)0
-rwxr-xr-xscripts/external_libs/nose-1.3.4/python2/nose/plugins/xunit.py (renamed from scripts/external_libs/nose-1.3.4/nose/plugins/xunit.py)0
-rwxr-xr-xscripts/external_libs/nose-1.3.4/python2/nose/proxy.py (renamed from scripts/external_libs/nose-1.3.4/nose/proxy.py)0
-rwxr-xr-xscripts/external_libs/nose-1.3.4/python2/nose/pyversion.py (renamed from scripts/external_libs/nose-1.3.4/nose/pyversion.py)0
-rwxr-xr-xscripts/external_libs/nose-1.3.4/python2/nose/result.py (renamed from scripts/external_libs/nose-1.3.4/nose/result.py)0
-rwxr-xr-xscripts/external_libs/nose-1.3.4/python2/nose/selector.py (renamed from scripts/external_libs/nose-1.3.4/nose/selector.py)0
-rwxr-xr-xscripts/external_libs/nose-1.3.4/python2/nose/sphinx/__init__.py (renamed from scripts/external_libs/nose-1.3.4/nose/sphinx/__init__.py)0
-rwxr-xr-xscripts/external_libs/nose-1.3.4/python2/nose/sphinx/pluginopts.py (renamed from scripts/external_libs/nose-1.3.4/nose/sphinx/pluginopts.py)0
-rwxr-xr-xscripts/external_libs/nose-1.3.4/python2/nose/suite.py (renamed from scripts/external_libs/nose-1.3.4/nose/suite.py)0
-rwxr-xr-xscripts/external_libs/nose-1.3.4/python2/nose/tools/__init__.py (renamed from scripts/external_libs/nose-1.3.4/nose/tools/__init__.py)0
-rwxr-xr-xscripts/external_libs/nose-1.3.4/python2/nose/tools/nontrivial.py (renamed from scripts/external_libs/nose-1.3.4/nose/tools/nontrivial.py)0
-rwxr-xr-xscripts/external_libs/nose-1.3.4/python2/nose/tools/trivial.py (renamed from scripts/external_libs/nose-1.3.4/nose/tools/trivial.py)0
-rwxr-xr-xscripts/external_libs/nose-1.3.4/python2/nose/twistedtools.py (renamed from scripts/external_libs/nose-1.3.4/nose/twistedtools.py)0
-rwxr-xr-xscripts/external_libs/nose-1.3.4/python2/nose/usage.txt (renamed from scripts/external_libs/nose-1.3.4/nose/usage.txt)0
-rwxr-xr-xscripts/external_libs/nose-1.3.4/python2/nose/util.py (renamed from scripts/external_libs/nose-1.3.4/nose/util.py)0
-rw-r--r--scripts/external_libs/nose-1.3.4/python3/nose/__init__.py15
-rw-r--r--scripts/external_libs/nose-1.3.4/python3/nose/__main__.py8
-rw-r--r--scripts/external_libs/nose-1.3.4/python3/nose/case.py398
-rw-r--r--scripts/external_libs/nose-1.3.4/python3/nose/commands.py172
-rw-r--r--scripts/external_libs/nose-1.3.4/python3/nose/config.py661
-rw-r--r--scripts/external_libs/nose-1.3.4/python3/nose/core.py341
-rw-r--r--scripts/external_libs/nose-1.3.4/python3/nose/exc.py9
-rw-r--r--scripts/external_libs/nose-1.3.4/python3/nose/ext/__init__.py3
-rw-r--r--scripts/external_libs/nose-1.3.4/python3/nose/ext/dtcompat.py2272
-rw-r--r--scripts/external_libs/nose-1.3.4/python3/nose/failure.py42
-rw-r--r--scripts/external_libs/nose-1.3.4/python3/nose/importer.py167
-rw-r--r--scripts/external_libs/nose-1.3.4/python3/nose/inspector.py208
-rw-r--r--scripts/external_libs/nose-1.3.4/python3/nose/loader.py619
-rw-r--r--scripts/external_libs/nose-1.3.4/python3/nose/plugins/__init__.py190
-rw-r--r--scripts/external_libs/nose-1.3.4/python3/nose/plugins/allmodules.py45
-rw-r--r--scripts/external_libs/nose-1.3.4/python3/nose/plugins/attrib.py287
-rw-r--r--scripts/external_libs/nose-1.3.4/python3/nose/plugins/base.py725
-rw-r--r--scripts/external_libs/nose-1.3.4/python3/nose/plugins/builtin.py34
-rw-r--r--scripts/external_libs/nose-1.3.4/python3/nose/plugins/capture.py115
-rw-r--r--scripts/external_libs/nose-1.3.4/python3/nose/plugins/collect.py95
-rw-r--r--scripts/external_libs/nose-1.3.4/python3/nose/plugins/cover.py253
-rw-r--r--scripts/external_libs/nose-1.3.4/python3/nose/plugins/debug.py67
-rw-r--r--scripts/external_libs/nose-1.3.4/python3/nose/plugins/deprecated.py45
-rw-r--r--scripts/external_libs/nose-1.3.4/python3/nose/plugins/doctests.py452
-rw-r--r--scripts/external_libs/nose-1.3.4/python3/nose/plugins/errorclass.py210
-rw-r--r--scripts/external_libs/nose-1.3.4/python3/nose/plugins/failuredetail.py49
-rw-r--r--scripts/external_libs/nose-1.3.4/python3/nose/plugins/isolate.py103
-rw-r--r--scripts/external_libs/nose-1.3.4/python3/nose/plugins/logcapture.py245
-rw-r--r--scripts/external_libs/nose-1.3.4/python3/nose/plugins/manager.py460
-rw-r--r--scripts/external_libs/nose-1.3.4/python3/nose/plugins/multiprocess.py835
-rw-r--r--scripts/external_libs/nose-1.3.4/python3/nose/plugins/plugintest.py417
-rw-r--r--scripts/external_libs/nose-1.3.4/python3/nose/plugins/prof.py154
-rw-r--r--scripts/external_libs/nose-1.3.4/python3/nose/plugins/skip.py63
-rw-r--r--scripts/external_libs/nose-1.3.4/python3/nose/plugins/testid.py306
-rw-r--r--scripts/external_libs/nose-1.3.4/python3/nose/plugins/xunit.py329
-rw-r--r--scripts/external_libs/nose-1.3.4/python3/nose/proxy.py188
-rw-r--r--scripts/external_libs/nose-1.3.4/python3/nose/pyversion.py214
-rw-r--r--scripts/external_libs/nose-1.3.4/python3/nose/result.py200
-rw-r--r--scripts/external_libs/nose-1.3.4/python3/nose/selector.py247
-rw-r--r--scripts/external_libs/nose-1.3.4/python3/nose/sphinx/__init__.py1
-rw-r--r--scripts/external_libs/nose-1.3.4/python3/nose/sphinx/pluginopts.py189
-rw-r--r--scripts/external_libs/nose-1.3.4/python3/nose/suite.py610
-rw-r--r--scripts/external_libs/nose-1.3.4/python3/nose/tools/__init__.py15
-rw-r--r--scripts/external_libs/nose-1.3.4/python3/nose/tools/nontrivial.py151
-rw-r--r--scripts/external_libs/nose-1.3.4/python3/nose/tools/trivial.py54
-rw-r--r--scripts/external_libs/nose-1.3.4/python3/nose/twistedtools.py173
-rw-r--r--scripts/external_libs/nose-1.3.4/python3/nose/usage.txt115
-rw-r--r--scripts/external_libs/nose-1.3.4/python3/nose/util.py660
-rw-r--r--scripts/external_libs/pyyaml-3.11/python2/yaml/__init__.py315
-rw-r--r--scripts/external_libs/pyyaml-3.11/python2/yaml/composer.py139
-rw-r--r--scripts/external_libs/pyyaml-3.11/python2/yaml/constructor.py675
-rw-r--r--scripts/external_libs/pyyaml-3.11/python2/yaml/cyaml.py85
-rw-r--r--scripts/external_libs/pyyaml-3.11/python2/yaml/dumper.py62
-rw-r--r--scripts/external_libs/pyyaml-3.11/python2/yaml/emitter.py1140
-rw-r--r--scripts/external_libs/pyyaml-3.11/python2/yaml/error.py75
-rw-r--r--scripts/external_libs/pyyaml-3.11/python2/yaml/events.py86
-rw-r--r--scripts/external_libs/pyyaml-3.11/python2/yaml/loader.py40
-rw-r--r--scripts/external_libs/pyyaml-3.11/python2/yaml/nodes.py49
-rw-r--r--scripts/external_libs/pyyaml-3.11/python2/yaml/parser.py589
-rw-r--r--scripts/external_libs/pyyaml-3.11/python2/yaml/reader.py190
-rw-r--r--scripts/external_libs/pyyaml-3.11/python2/yaml/representer.py484
-rw-r--r--scripts/external_libs/pyyaml-3.11/python2/yaml/resolver.py224
-rw-r--r--scripts/external_libs/pyyaml-3.11/python2/yaml/scanner.py1457
-rw-r--r--scripts/external_libs/pyyaml-3.11/python2/yaml/serializer.py111
-rw-r--r--scripts/external_libs/pyyaml-3.11/python2/yaml/tokens.py104
-rw-r--r--scripts/external_libs/pyyaml-3.11/python3/yaml/__init__.py312
-rw-r--r--scripts/external_libs/pyyaml-3.11/python3/yaml/composer.py139
-rw-r--r--scripts/external_libs/pyyaml-3.11/python3/yaml/constructor.py686
-rw-r--r--scripts/external_libs/pyyaml-3.11/python3/yaml/cyaml.py85
-rw-r--r--scripts/external_libs/pyyaml-3.11/python3/yaml/dumper.py62
-rw-r--r--scripts/external_libs/pyyaml-3.11/python3/yaml/emitter.py1137
-rw-r--r--scripts/external_libs/pyyaml-3.11/python3/yaml/error.py75
-rw-r--r--scripts/external_libs/pyyaml-3.11/python3/yaml/events.py86
-rw-r--r--scripts/external_libs/pyyaml-3.11/python3/yaml/loader.py40
-rw-r--r--scripts/external_libs/pyyaml-3.11/python3/yaml/nodes.py49
-rw-r--r--scripts/external_libs/pyyaml-3.11/python3/yaml/parser.py589
-rw-r--r--scripts/external_libs/pyyaml-3.11/python3/yaml/reader.py192
-rw-r--r--scripts/external_libs/pyyaml-3.11/python3/yaml/representer.py374
-rw-r--r--scripts/external_libs/pyyaml-3.11/python3/yaml/resolver.py224
-rw-r--r--scripts/external_libs/pyyaml-3.11/python3/yaml/scanner.py1448
-rw-r--r--scripts/external_libs/pyyaml-3.11/python3/yaml/serializer.py111
-rw-r--r--scripts/external_libs/pyyaml-3.11/python3/yaml/tokens.py104
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/__init__.py (renamed from scripts/external_libs/platform/cel59/32bit/zmq/__init__.py)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/auth/__init__.py (renamed from scripts/external_libs/platform/cel59/32bit/zmq/auth/__init__.py)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/auth/base.py (renamed from scripts/external_libs/platform/cel59/32bit/zmq/auth/base.py)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/auth/certs.py (renamed from scripts/external_libs/platform/cel59/32bit/zmq/auth/certs.py)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/auth/ioloop.py (renamed from scripts/external_libs/platform/cel59/32bit/zmq/auth/ioloop.py)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/auth/thread.py (renamed from scripts/external_libs/platform/cel59/32bit/zmq/auth/thread.py)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/backend/__init__.py (renamed from scripts/external_libs/platform/cel59/32bit/zmq/backend/__init__.py)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/backend/cffi/__init__.py (renamed from scripts/external_libs/platform/cel59/32bit/zmq/backend/cffi/__init__.py)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/backend/cffi/_cdefs.h (renamed from scripts/external_libs/platform/cel59/32bit/zmq/backend/cffi/_cdefs.h)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/backend/cffi/_cffi.py (renamed from scripts/external_libs/platform/cel59/32bit/zmq/backend/cffi/_cffi.py)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/backend/cffi/_poll.py (renamed from scripts/external_libs/platform/cel59/32bit/zmq/backend/cffi/_poll.py)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/backend/cffi/_verify.c (renamed from scripts/external_libs/platform/cel59/32bit/zmq/backend/cffi/_verify.c)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/backend/cffi/constants.py (renamed from scripts/external_libs/platform/cel59/32bit/zmq/backend/cffi/constants.py)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/backend/cffi/context.py (renamed from scripts/external_libs/platform/cel59/32bit/zmq/backend/cffi/context.py)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/backend/cffi/devices.py (renamed from scripts/external_libs/platform/cel59/32bit/zmq/backend/cffi/devices.py)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/backend/cffi/error.py (renamed from scripts/external_libs/platform/cel59/32bit/zmq/backend/cffi/error.py)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/backend/cffi/message.py (renamed from scripts/external_libs/platform/cel59/32bit/zmq/backend/cffi/message.py)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/backend/cffi/socket.py (renamed from scripts/external_libs/platform/cel59/32bit/zmq/backend/cffi/socket.py)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/backend/cffi/utils.py (renamed from scripts/external_libs/platform/cel59/32bit/zmq/backend/cffi/utils.py)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/backend/cython/__init__.py (renamed from scripts/external_libs/platform/cel59/32bit/zmq/backend/cython/__init__.py)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/backend/cython/_device.so (renamed from scripts/external_libs/platform/cel59/32bit/zmq/backend/cython/_device.so)bin95085 -> 95085 bytes
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/backend/cython/_poll.so (renamed from scripts/external_libs/platform/cel59/32bit/zmq/backend/cython/_poll.so)bin134770 -> 134770 bytes
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/backend/cython/_version.so (renamed from scripts/external_libs/platform/cel59/32bit/zmq/backend/cython/_version.so)bin37898 -> 37898 bytes
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/backend/cython/checkrc.pxd (renamed from scripts/external_libs/platform/cel59/32bit/zmq/backend/cython/checkrc.pxd)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/backend/cython/constants.so (renamed from scripts/external_libs/platform/cel59/32bit/zmq/backend/cython/constants.so)bin138758 -> 138758 bytes
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/backend/cython/context.pxd (renamed from scripts/external_libs/platform/cel59/32bit/zmq/backend/cython/context.pxd)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/backend/cython/context.so (renamed from scripts/external_libs/platform/cel59/32bit/zmq/backend/cython/context.so)bin143066 -> 143066 bytes
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/backend/cython/error.so (renamed from scripts/external_libs/platform/cel59/32bit/zmq/backend/cython/error.so)bin50334 -> 50334 bytes
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/backend/cython/libzmq.pxd (renamed from scripts/external_libs/platform/cel59/32bit/zmq/backend/cython/libzmq.pxd)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/backend/cython/message.pxd (renamed from scripts/external_libs/platform/cel59/32bit/zmq/backend/cython/message.pxd)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/backend/cython/message.so (renamed from scripts/external_libs/platform/cel59/32bit/zmq/backend/cython/message.so)bin213231 -> 213231 bytes
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/backend/cython/socket.pxd (renamed from scripts/external_libs/platform/cel59/32bit/zmq/backend/cython/socket.pxd)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/backend/cython/socket.so (renamed from scripts/external_libs/platform/cel59/32bit/zmq/backend/cython/socket.so)bin354893 -> 354893 bytes
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/backend/cython/utils.pxd (renamed from scripts/external_libs/platform/cel59/32bit/zmq/backend/cython/utils.pxd)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/backend/cython/utils.so (renamed from scripts/external_libs/platform/cel59/32bit/zmq/backend/cython/utils.so)bin90506 -> 90506 bytes
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/backend/select.py (renamed from scripts/external_libs/platform/cel59/32bit/zmq/backend/select.py)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/devices/__init__.py (renamed from scripts/external_libs/platform/cel59/32bit/zmq/devices/__init__.py)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/devices/basedevice.py (renamed from scripts/external_libs/platform/cel59/32bit/zmq/devices/basedevice.py)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/devices/monitoredqueue.pxd (renamed from scripts/external_libs/platform/cel59/32bit/zmq/devices/monitoredqueue.pxd)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/devices/monitoredqueue.py (renamed from scripts/external_libs/platform/cel59/32bit/zmq/devices/monitoredqueue.py)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/devices/monitoredqueue.so (renamed from scripts/external_libs/platform/cel59/32bit/zmq/devices/monitoredqueue.so)bin135659 -> 135659 bytes
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/devices/monitoredqueuedevice.py (renamed from scripts/external_libs/platform/cel59/32bit/zmq/devices/monitoredqueuedevice.py)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/devices/proxydevice.py (renamed from scripts/external_libs/platform/cel59/32bit/zmq/devices/proxydevice.py)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/error.py (renamed from scripts/external_libs/platform/cel59/32bit/zmq/error.py)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/eventloop/__init__.py (renamed from scripts/external_libs/platform/cel59/32bit/zmq/eventloop/__init__.py)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/eventloop/ioloop.py (renamed from scripts/external_libs/platform/cel59/32bit/zmq/eventloop/ioloop.py)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/eventloop/minitornado/__init__.py (renamed from scripts/external_libs/platform/cel59/32bit/zmq/eventloop/minitornado/__init__.py)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/eventloop/minitornado/concurrent.py (renamed from scripts/external_libs/platform/cel59/32bit/zmq/eventloop/minitornado/concurrent.py)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/eventloop/minitornado/ioloop.py (renamed from scripts/external_libs/platform/cel59/32bit/zmq/eventloop/minitornado/ioloop.py)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/eventloop/minitornado/log.py (renamed from scripts/external_libs/platform/cel59/32bit/zmq/eventloop/minitornado/log.py)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/eventloop/minitornado/platform/__init__.py (renamed from scripts/external_libs/platform/cel59/32bit/zmq/eventloop/minitornado/platform/__init__.py)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/eventloop/minitornado/platform/auto.py (renamed from scripts/external_libs/platform/cel59/32bit/zmq/eventloop/minitornado/platform/auto.py)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/eventloop/minitornado/platform/common.py (renamed from scripts/external_libs/platform/cel59/32bit/zmq/eventloop/minitornado/platform/common.py)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/eventloop/minitornado/platform/interface.py (renamed from scripts/external_libs/platform/cel59/32bit/zmq/eventloop/minitornado/platform/interface.py)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/eventloop/minitornado/platform/posix.py (renamed from scripts/external_libs/platform/cel59/32bit/zmq/eventloop/minitornado/platform/posix.py)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/eventloop/minitornado/platform/windows.py (renamed from scripts/external_libs/platform/cel59/32bit/zmq/eventloop/minitornado/platform/windows.py)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/eventloop/minitornado/stack_context.py (renamed from scripts/external_libs/platform/cel59/32bit/zmq/eventloop/minitornado/stack_context.py)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/eventloop/minitornado/util.py (renamed from scripts/external_libs/platform/cel59/32bit/zmq/eventloop/minitornado/util.py)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/eventloop/zmqstream.py (renamed from scripts/external_libs/platform/cel59/32bit/zmq/eventloop/zmqstream.py)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/green/__init__.py (renamed from scripts/external_libs/platform/cel59/32bit/zmq/green/__init__.py)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/green/core.py (renamed from scripts/external_libs/platform/cel59/32bit/zmq/green/core.py)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/green/device.py (renamed from scripts/external_libs/platform/cel59/32bit/zmq/green/device.py)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/green/eventloop/__init__.py (renamed from scripts/external_libs/platform/cel59/32bit/zmq/green/eventloop/__init__.py)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/green/eventloop/ioloop.py (renamed from scripts/external_libs/platform/cel59/32bit/zmq/green/eventloop/ioloop.py)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/green/eventloop/zmqstream.py (renamed from scripts/external_libs/platform/cel59/32bit/zmq/green/eventloop/zmqstream.py)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/green/poll.py (renamed from scripts/external_libs/platform/cel59/32bit/zmq/green/poll.py)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/libzmq.so.3 (renamed from scripts/external_libs/platform/cel59/32bit/zmq/libzmq.so.3)bin604489 -> 604489 bytes
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/log/__init__.py (renamed from scripts/external_libs/platform/cel59/32bit/zmq/log/__init__.py)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/log/handlers.py (renamed from scripts/external_libs/platform/cel59/32bit/zmq/log/handlers.py)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/ssh/__init__.py (renamed from scripts/external_libs/platform/cel59/32bit/zmq/ssh/__init__.py)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/ssh/forward.py (renamed from scripts/external_libs/platform/cel59/32bit/zmq/ssh/forward.py)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/ssh/tunnel.py (renamed from scripts/external_libs/platform/cel59/32bit/zmq/ssh/tunnel.py)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/sugar/__init__.py (renamed from scripts/external_libs/platform/cel59/32bit/zmq/sugar/__init__.py)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/sugar/attrsettr.py (renamed from scripts/external_libs/platform/cel59/32bit/zmq/sugar/attrsettr.py)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/sugar/constants.py (renamed from scripts/external_libs/platform/cel59/32bit/zmq/sugar/constants.py)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/sugar/context.py (renamed from scripts/external_libs/platform/cel59/32bit/zmq/sugar/context.py)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/sugar/frame.py (renamed from scripts/external_libs/platform/cel59/32bit/zmq/sugar/frame.py)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/sugar/poll.py (renamed from scripts/external_libs/platform/cel59/32bit/zmq/sugar/poll.py)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/sugar/socket.py (renamed from scripts/external_libs/platform/cel59/32bit/zmq/sugar/socket.py)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/sugar/tracker.py (renamed from scripts/external_libs/platform/cel59/32bit/zmq/sugar/tracker.py)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/sugar/version.py (renamed from scripts/external_libs/platform/cel59/32bit/zmq/sugar/version.py)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/tests/__init__.py (renamed from scripts/external_libs/platform/cel59/32bit/zmq/tests/__init__.py)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/tests/test_auth.py (renamed from scripts/external_libs/platform/cel59/32bit/zmq/tests/test_auth.py)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/tests/test_cffi_backend.py (renamed from scripts/external_libs/platform/cel59/32bit/zmq/tests/test_cffi_backend.py)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/tests/test_constants.py (renamed from scripts/external_libs/platform/cel59/32bit/zmq/tests/test_constants.py)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/tests/test_context.py (renamed from scripts/external_libs/platform/cel59/32bit/zmq/tests/test_context.py)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/tests/test_device.py (renamed from scripts/external_libs/platform/cel59/32bit/zmq/tests/test_device.py)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/tests/test_error.py (renamed from scripts/external_libs/platform/cel59/32bit/zmq/tests/test_error.py)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/tests/test_etc.py (renamed from scripts/external_libs/platform/cel59/32bit/zmq/tests/test_etc.py)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/tests/test_imports.py (renamed from scripts/external_libs/platform/cel59/32bit/zmq/tests/test_imports.py)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/tests/test_ioloop.py (renamed from scripts/external_libs/platform/cel59/32bit/zmq/tests/test_ioloop.py)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/tests/test_log.py (renamed from scripts/external_libs/platform/cel59/32bit/zmq/tests/test_log.py)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/tests/test_message.py (renamed from scripts/external_libs/platform/cel59/32bit/zmq/tests/test_message.py)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/tests/test_monitor.py (renamed from scripts/external_libs/platform/cel59/32bit/zmq/tests/test_monitor.py)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/tests/test_monqueue.py (renamed from scripts/external_libs/platform/cel59/32bit/zmq/tests/test_monqueue.py)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/tests/test_multipart.py (renamed from scripts/external_libs/platform/cel59/32bit/zmq/tests/test_multipart.py)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/tests/test_pair.py (renamed from scripts/external_libs/platform/cel59/32bit/zmq/tests/test_pair.py)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/tests/test_poll.py (renamed from scripts/external_libs/platform/cel59/32bit/zmq/tests/test_poll.py)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/tests/test_pubsub.py (renamed from scripts/external_libs/platform/cel59/32bit/zmq/tests/test_pubsub.py)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/tests/test_reqrep.py (renamed from scripts/external_libs/platform/cel59/32bit/zmq/tests/test_reqrep.py)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/tests/test_security.py (renamed from scripts/external_libs/platform/cel59/32bit/zmq/tests/test_security.py)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/tests/test_socket.py (renamed from scripts/external_libs/platform/cel59/32bit/zmq/tests/test_socket.py)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/tests/test_stopwatch.py (renamed from scripts/external_libs/platform/cel59/32bit/zmq/tests/test_stopwatch.py)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/tests/test_version.py (renamed from scripts/external_libs/platform/cel59/32bit/zmq/tests/test_version.py)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/tests/test_win32_shim.py (renamed from scripts/external_libs/platform/cel59/32bit/zmq/tests/test_win32_shim.py)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/tests/test_z85.py (renamed from scripts/external_libs/platform/cel59/32bit/zmq/tests/test_z85.py)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/tests/test_zmqstream.py (renamed from scripts/external_libs/platform/cel59/32bit/zmq/tests/test_zmqstream.py)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/utils/__init__.py (renamed from scripts/external_libs/platform/cel59/32bit/zmq/utils/__init__.py)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/utils/buffers.pxd (renamed from scripts/external_libs/platform/cel59/32bit/zmq/utils/buffers.pxd)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/utils/compiler.json (renamed from scripts/external_libs/platform/cel59/32bit/zmq/utils/compiler.json)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/utils/config.json (renamed from scripts/external_libs/platform/cel59/32bit/zmq/utils/config.json)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/utils/constant_names.py (renamed from scripts/external_libs/platform/cel59/32bit/zmq/utils/constant_names.py)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/utils/garbage.py (renamed from scripts/external_libs/platform/cel59/32bit/zmq/utils/garbage.py)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/utils/getpid_compat.h (renamed from scripts/external_libs/platform/cel59/32bit/zmq/utils/getpid_compat.h)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/utils/interop.py (renamed from scripts/external_libs/platform/cel59/32bit/zmq/utils/interop.py)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/utils/ipcmaxlen.h (renamed from scripts/external_libs/platform/cel59/32bit/zmq/utils/ipcmaxlen.h)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/utils/jsonapi.py (renamed from scripts/external_libs/platform/cel59/32bit/zmq/utils/jsonapi.py)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/utils/monitor.py (renamed from scripts/external_libs/platform/cel59/32bit/zmq/utils/monitor.py)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/utils/pyversion_compat.h (renamed from scripts/external_libs/platform/cel59/32bit/zmq/utils/pyversion_compat.h)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/utils/sixcerpt.py (renamed from scripts/external_libs/platform/cel59/32bit/zmq/utils/sixcerpt.py)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/utils/strtypes.py (renamed from scripts/external_libs/platform/cel59/32bit/zmq/utils/strtypes.py)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/utils/win32.py (renamed from scripts/external_libs/platform/cel59/32bit/zmq/utils/win32.py)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/utils/z85.py (renamed from scripts/external_libs/platform/cel59/32bit/zmq/utils/z85.py)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/utils/zmq_compat.h (renamed from scripts/external_libs/platform/cel59/32bit/zmq/utils/zmq_compat.h)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/utils/zmq_constants.h (renamed from scripts/external_libs/platform/cel59/32bit/zmq/utils/zmq_constants.h)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/__init__.py (renamed from scripts/external_libs/platform/cel59/zmq/__init__.py)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/auth/__init__.py (renamed from scripts/external_libs/platform/cel59/zmq/auth/__init__.py)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/auth/base.py (renamed from scripts/external_libs/platform/cel59/zmq/auth/base.py)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/auth/certs.py (renamed from scripts/external_libs/platform/cel59/zmq/auth/certs.py)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/auth/ioloop.py (renamed from scripts/external_libs/platform/cel59/zmq/auth/ioloop.py)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/auth/thread.py (renamed from scripts/external_libs/platform/cel59/zmq/auth/thread.py)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/backend/__init__.py (renamed from scripts/external_libs/platform/cel59/zmq/backend/__init__.py)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/backend/cffi/__init__.py (renamed from scripts/external_libs/platform/cel59/zmq/backend/cffi/__init__.py)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/backend/cffi/_cdefs.h (renamed from scripts/external_libs/platform/cel59/zmq/backend/cffi/_cdefs.h)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/backend/cffi/_cffi.py (renamed from scripts/external_libs/platform/cel59/zmq/backend/cffi/_cffi.py)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/backend/cffi/_poll.py (renamed from scripts/external_libs/platform/cel59/zmq/backend/cffi/_poll.py)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/backend/cffi/_verify.c (renamed from scripts/external_libs/platform/cel59/zmq/backend/cffi/_verify.c)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/backend/cffi/constants.py (renamed from scripts/external_libs/platform/cel59/zmq/backend/cffi/constants.py)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/backend/cffi/context.py (renamed from scripts/external_libs/platform/cel59/zmq/backend/cffi/context.py)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/backend/cffi/devices.py (renamed from scripts/external_libs/platform/cel59/zmq/backend/cffi/devices.py)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/backend/cffi/error.py (renamed from scripts/external_libs/platform/cel59/zmq/backend/cffi/error.py)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/backend/cffi/message.py (renamed from scripts/external_libs/platform/cel59/zmq/backend/cffi/message.py)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/backend/cffi/socket.py (renamed from scripts/external_libs/platform/cel59/zmq/backend/cffi/socket.py)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/backend/cffi/utils.py (renamed from scripts/external_libs/platform/cel59/zmq/backend/cffi/utils.py)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/backend/cython/__init__.py (renamed from scripts/external_libs/platform/cel59/zmq/backend/cython/__init__.py)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/backend/cython/_device.c (renamed from scripts/external_libs/platform/cel59/zmq/backend/cython/_device.c)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/backend/cython/_device.pyx (renamed from scripts/external_libs/platform/cel59/zmq/backend/cython/_device.pyx)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/backend/cython/_device.so (renamed from scripts/external_libs/platform/cel59/zmq/backend/cython/_device.so)bin109915 -> 109915 bytes
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/backend/cython/_poll.c (renamed from scripts/external_libs/platform/cel59/zmq/backend/cython/_poll.c)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/backend/cython/_poll.pyx (renamed from scripts/external_libs/platform/cel59/zmq/backend/cython/_poll.pyx)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/backend/cython/_poll.so (renamed from scripts/external_libs/platform/cel59/zmq/backend/cython/_poll.so)bin178363 -> 178363 bytes
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/backend/cython/_version.c (renamed from scripts/external_libs/platform/cel59/zmq/backend/cython/_version.c)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/backend/cython/_version.pyx (renamed from scripts/external_libs/platform/cel59/zmq/backend/cython/_version.pyx)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/backend/cython/_version.so (renamed from scripts/external_libs/platform/cel59/zmq/backend/cython/_version.so)bin38976 -> 38976 bytes
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/backend/cython/checkrc.pxd (renamed from scripts/external_libs/platform/cel59/zmq/backend/cython/checkrc.pxd)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/backend/cython/constant_enums.pxi (renamed from scripts/external_libs/platform/cel59/zmq/backend/cython/constant_enums.pxi)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/backend/cython/constants.c (renamed from scripts/external_libs/platform/cel59/zmq/backend/cython/constants.c)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/backend/cython/constants.pxi (renamed from scripts/external_libs/platform/cel59/zmq/backend/cython/constants.pxi)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/backend/cython/constants.pyx (renamed from scripts/external_libs/platform/cel59/zmq/backend/cython/constants.pyx)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/backend/cython/constants.so (renamed from scripts/external_libs/platform/cel59/zmq/backend/cython/constants.so)bin125944 -> 125944 bytes
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/backend/cython/context.c (renamed from scripts/external_libs/platform/cel59/zmq/backend/cython/context.c)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/backend/cython/context.pxd (renamed from scripts/external_libs/platform/cel59/zmq/backend/cython/context.pxd)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/backend/cython/context.pyx (renamed from scripts/external_libs/platform/cel59/zmq/backend/cython/context.pyx)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/backend/cython/context.so (renamed from scripts/external_libs/platform/cel59/zmq/backend/cython/context.so)bin182970 -> 182970 bytes
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/backend/cython/error.c (renamed from scripts/external_libs/platform/cel59/zmq/backend/cython/error.c)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/backend/cython/error.pyx (renamed from scripts/external_libs/platform/cel59/zmq/backend/cython/error.pyx)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/backend/cython/error.so (renamed from scripts/external_libs/platform/cel59/zmq/backend/cython/error.so)bin54791 -> 54791 bytes
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/backend/cython/libzmq.pxd (renamed from scripts/external_libs/platform/cel59/zmq/backend/cython/libzmq.pxd)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/backend/cython/message.c (renamed from scripts/external_libs/platform/cel59/zmq/backend/cython/message.c)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/backend/cython/message.pxd (renamed from scripts/external_libs/platform/cel59/zmq/backend/cython/message.pxd)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/backend/cython/message.pyx (renamed from scripts/external_libs/platform/cel59/zmq/backend/cython/message.pyx)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/backend/cython/message.so (renamed from scripts/external_libs/platform/cel59/zmq/backend/cython/message.so)bin268368 -> 268368 bytes
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/backend/cython/rebuffer.pyx (renamed from scripts/external_libs/platform/cel59/zmq/backend/cython/rebuffer.pyx)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/backend/cython/socket.c (renamed from scripts/external_libs/platform/cel59/zmq/backend/cython/socket.c)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/backend/cython/socket.pxd (renamed from scripts/external_libs/platform/cel59/zmq/backend/cython/socket.pxd)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/backend/cython/socket.pyx (renamed from scripts/external_libs/platform/cel59/zmq/backend/cython/socket.pyx)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/backend/cython/socket.so (renamed from scripts/external_libs/platform/cel59/zmq/backend/cython/socket.so)bin432735 -> 432735 bytes
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/backend/cython/utils.c (renamed from scripts/external_libs/platform/cel59/zmq/backend/cython/utils.c)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/backend/cython/utils.pxd (renamed from scripts/external_libs/platform/cel59/zmq/backend/cython/utils.pxd)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/backend/cython/utils.pyx (renamed from scripts/external_libs/platform/cel59/zmq/backend/cython/utils.pyx)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/backend/cython/utils.so (renamed from scripts/external_libs/platform/cel59/zmq/backend/cython/utils.so)bin98904 -> 98904 bytes
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/backend/select.py (renamed from scripts/external_libs/platform/cel59/zmq/backend/select.py)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/devices/__init__.py (renamed from scripts/external_libs/platform/cel59/zmq/devices/__init__.py)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/devices/basedevice.py (renamed from scripts/external_libs/platform/cel59/zmq/devices/basedevice.py)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/devices/monitoredqueue.c (renamed from scripts/external_libs/platform/cel59/zmq/devices/monitoredqueue.c)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/devices/monitoredqueue.pxd (renamed from scripts/external_libs/platform/cel59/zmq/devices/monitoredqueue.pxd)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/devices/monitoredqueue.py (renamed from scripts/external_libs/platform/cel59/zmq/devices/monitoredqueue.py)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/devices/monitoredqueue.pyx (renamed from scripts/external_libs/platform/cel59/zmq/devices/monitoredqueue.pyx)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/devices/monitoredqueue.so (renamed from scripts/external_libs/platform/cel59/zmq/devices/monitoredqueue.so)bin147488 -> 147488 bytes
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/devices/monitoredqueuedevice.py (renamed from scripts/external_libs/platform/cel59/zmq/devices/monitoredqueuedevice.py)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/devices/proxydevice.py (renamed from scripts/external_libs/platform/cel59/zmq/devices/proxydevice.py)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/error.py (renamed from scripts/external_libs/platform/cel59/zmq/error.py)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/eventloop/__init__.py (renamed from scripts/external_libs/platform/cel59/zmq/eventloop/__init__.py)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/eventloop/ioloop.py (renamed from scripts/external_libs/platform/cel59/zmq/eventloop/ioloop.py)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/eventloop/minitornado/__init__.py (renamed from scripts/external_libs/platform/cel59/zmq/eventloop/minitornado/__init__.py)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/eventloop/minitornado/concurrent.py (renamed from scripts/external_libs/platform/cel59/zmq/eventloop/minitornado/concurrent.py)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/eventloop/minitornado/ioloop.py (renamed from scripts/external_libs/platform/cel59/zmq/eventloop/minitornado/ioloop.py)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/eventloop/minitornado/log.py (renamed from scripts/external_libs/platform/cel59/zmq/eventloop/minitornado/log.py)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/eventloop/minitornado/platform/__init__.py (renamed from scripts/external_libs/platform/cel59/zmq/eventloop/minitornado/platform/__init__.py)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/eventloop/minitornado/platform/auto.py (renamed from scripts/external_libs/platform/cel59/zmq/eventloop/minitornado/platform/auto.py)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/eventloop/minitornado/platform/common.py (renamed from scripts/external_libs/platform/cel59/zmq/eventloop/minitornado/platform/common.py)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/eventloop/minitornado/platform/interface.py (renamed from scripts/external_libs/platform/cel59/zmq/eventloop/minitornado/platform/interface.py)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/eventloop/minitornado/platform/posix.py (renamed from scripts/external_libs/platform/cel59/zmq/eventloop/minitornado/platform/posix.py)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/eventloop/minitornado/platform/windows.py (renamed from scripts/external_libs/platform/cel59/zmq/eventloop/minitornado/platform/windows.py)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/eventloop/minitornado/stack_context.py (renamed from scripts/external_libs/platform/cel59/zmq/eventloop/minitornado/stack_context.py)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/eventloop/minitornado/util.py (renamed from scripts/external_libs/platform/cel59/zmq/eventloop/minitornado/util.py)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/eventloop/zmqstream.py (renamed from scripts/external_libs/platform/cel59/zmq/eventloop/zmqstream.py)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/green/__init__.py (renamed from scripts/external_libs/platform/cel59/zmq/green/__init__.py)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/green/core.py (renamed from scripts/external_libs/platform/cel59/zmq/green/core.py)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/green/device.py (renamed from scripts/external_libs/platform/cel59/zmq/green/device.py)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/green/eventloop/__init__.py (renamed from scripts/external_libs/platform/cel59/zmq/green/eventloop/__init__.py)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/green/eventloop/ioloop.py (renamed from scripts/external_libs/platform/cel59/zmq/green/eventloop/ioloop.py)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/green/eventloop/zmqstream.py (renamed from scripts/external_libs/platform/cel59/zmq/green/eventloop/zmqstream.py)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/green/poll.py (renamed from scripts/external_libs/platform/cel59/zmq/green/poll.py)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/libzmq.so.3bin0 -> 3284330 bytes
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/log/__init__.py (renamed from scripts/external_libs/platform/cel59/zmq/log/__init__.py)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/log/handlers.py (renamed from scripts/external_libs/platform/cel59/zmq/log/handlers.py)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/ssh/__init__.py (renamed from scripts/external_libs/platform/cel59/zmq/ssh/__init__.py)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/ssh/forward.py (renamed from scripts/external_libs/platform/cel59/zmq/ssh/forward.py)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/ssh/tunnel.py (renamed from scripts/external_libs/platform/cel59/zmq/ssh/tunnel.py)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/sugar/__init__.py (renamed from scripts/external_libs/platform/cel59/zmq/sugar/__init__.py)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/sugar/attrsettr.py (renamed from scripts/external_libs/platform/cel59/zmq/sugar/attrsettr.py)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/sugar/constants.py (renamed from scripts/external_libs/platform/cel59/zmq/sugar/constants.py)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/sugar/context.py (renamed from scripts/external_libs/platform/cel59/zmq/sugar/context.py)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/sugar/frame.py (renamed from scripts/external_libs/platform/cel59/zmq/sugar/frame.py)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/sugar/poll.py (renamed from scripts/external_libs/platform/cel59/zmq/sugar/poll.py)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/sugar/socket.py (renamed from scripts/external_libs/platform/cel59/zmq/sugar/socket.py)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/sugar/tracker.py (renamed from scripts/external_libs/platform/cel59/zmq/sugar/tracker.py)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/sugar/version.py (renamed from scripts/external_libs/platform/cel59/zmq/sugar/version.py)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/tests/__init__.py (renamed from scripts/external_libs/platform/cel59/zmq/tests/__init__.py)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/tests/test_auth.py (renamed from scripts/external_libs/platform/cel59/zmq/tests/test_auth.py)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/tests/test_cffi_backend.py (renamed from scripts/external_libs/platform/cel59/zmq/tests/test_cffi_backend.py)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/tests/test_constants.py (renamed from scripts/external_libs/platform/cel59/zmq/tests/test_constants.py)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/tests/test_context.py (renamed from scripts/external_libs/platform/cel59/zmq/tests/test_context.py)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/tests/test_device.py (renamed from scripts/external_libs/platform/cel59/zmq/tests/test_device.py)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/tests/test_error.py (renamed from scripts/external_libs/platform/cel59/zmq/tests/test_error.py)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/tests/test_etc.py (renamed from scripts/external_libs/platform/cel59/zmq/tests/test_etc.py)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/tests/test_imports.py (renamed from scripts/external_libs/platform/cel59/zmq/tests/test_imports.py)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/tests/test_ioloop.py (renamed from scripts/external_libs/platform/cel59/zmq/tests/test_ioloop.py)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/tests/test_log.py (renamed from scripts/external_libs/platform/cel59/zmq/tests/test_log.py)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/tests/test_message.py (renamed from scripts/external_libs/platform/cel59/zmq/tests/test_message.py)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/tests/test_monitor.py (renamed from scripts/external_libs/platform/cel59/zmq/tests/test_monitor.py)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/tests/test_monqueue.py (renamed from scripts/external_libs/platform/cel59/zmq/tests/test_monqueue.py)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/tests/test_multipart.py (renamed from scripts/external_libs/platform/cel59/zmq/tests/test_multipart.py)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/tests/test_pair.py (renamed from scripts/external_libs/platform/cel59/zmq/tests/test_pair.py)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/tests/test_poll.py (renamed from scripts/external_libs/platform/cel59/zmq/tests/test_poll.py)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/tests/test_pubsub.py (renamed from scripts/external_libs/platform/cel59/zmq/tests/test_pubsub.py)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/tests/test_reqrep.py (renamed from scripts/external_libs/platform/cel59/zmq/tests/test_reqrep.py)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/tests/test_security.py (renamed from scripts/external_libs/platform/cel59/zmq/tests/test_security.py)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/tests/test_socket.py (renamed from scripts/external_libs/platform/cel59/zmq/tests/test_socket.py)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/tests/test_stopwatch.py (renamed from scripts/external_libs/platform/cel59/zmq/tests/test_stopwatch.py)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/tests/test_version.py (renamed from scripts/external_libs/platform/cel59/zmq/tests/test_version.py)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/tests/test_win32_shim.py (renamed from scripts/external_libs/platform/cel59/zmq/tests/test_win32_shim.py)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/tests/test_z85.py (renamed from scripts/external_libs/platform/cel59/zmq/tests/test_z85.py)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/tests/test_zmqstream.py (renamed from scripts/external_libs/platform/cel59/zmq/tests/test_zmqstream.py)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/utils/__init__.py (renamed from scripts/external_libs/platform/cel59/zmq/utils/__init__.py)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/utils/buffers.pxd (renamed from scripts/external_libs/platform/cel59/zmq/utils/buffers.pxd)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/utils/compiler.json (renamed from scripts/external_libs/platform/cel59/zmq/utils/compiler.json)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/utils/config.json (renamed from scripts/external_libs/platform/cel59/zmq/utils/config.json)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/utils/constant_names.py (renamed from scripts/external_libs/platform/cel59/zmq/utils/constant_names.py)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/utils/garbage.py (renamed from scripts/external_libs/platform/cel59/zmq/utils/garbage.py)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/utils/getpid_compat.h (renamed from scripts/external_libs/platform/cel59/zmq/utils/getpid_compat.h)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/utils/interop.py (renamed from scripts/external_libs/platform/cel59/zmq/utils/interop.py)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/utils/ipcmaxlen.h (renamed from scripts/external_libs/platform/cel59/zmq/utils/ipcmaxlen.h)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/utils/jsonapi.py (renamed from scripts/external_libs/platform/cel59/zmq/utils/jsonapi.py)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/utils/monitor.py (renamed from scripts/external_libs/platform/cel59/zmq/utils/monitor.py)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/utils/pyversion_compat.h (renamed from scripts/external_libs/platform/cel59/zmq/utils/pyversion_compat.h)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/utils/sixcerpt.py (renamed from scripts/external_libs/platform/cel59/zmq/utils/sixcerpt.py)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/utils/strtypes.py (renamed from scripts/external_libs/platform/cel59/zmq/utils/strtypes.py)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/utils/win32.py (renamed from scripts/external_libs/platform/cel59/zmq/utils/win32.py)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/utils/z85.py (renamed from scripts/external_libs/platform/cel59/zmq/utils/z85.py)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/utils/zmq_compat.h (renamed from scripts/external_libs/platform/cel59/zmq/utils/zmq_compat.h)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/utils/zmq_constants.h (renamed from scripts/external_libs/platform/cel59/zmq/utils/zmq_constants.h)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/__init__.py (renamed from scripts/external_libs/platform/fedora18/zmq/__init__.py)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/auth/__init__.py (renamed from scripts/external_libs/platform/fedora18/zmq/auth/__init__.py)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/auth/base.py (renamed from scripts/external_libs/platform/fedora18/zmq/auth/base.py)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/auth/certs.py (renamed from scripts/external_libs/platform/fedora18/zmq/auth/certs.py)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/auth/ioloop.py (renamed from scripts/external_libs/platform/fedora18/zmq/auth/ioloop.py)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/auth/thread.py (renamed from scripts/external_libs/platform/fedora18/zmq/auth/thread.py)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/backend/__init__.py (renamed from scripts/external_libs/platform/fedora18/zmq/backend/__init__.py)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/backend/cffi/__init__.py (renamed from scripts/external_libs/platform/fedora18/zmq/backend/cffi/__init__.py)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/backend/cffi/_cdefs.h (renamed from scripts/external_libs/platform/fedora18/zmq/backend/cffi/_cdefs.h)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/backend/cffi/_cffi.py (renamed from scripts/external_libs/platform/fedora18/zmq/backend/cffi/_cffi.py)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/backend/cffi/_poll.py (renamed from scripts/external_libs/platform/fedora18/zmq/backend/cffi/_poll.py)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/backend/cffi/_verify.c (renamed from scripts/external_libs/platform/fedora18/zmq/backend/cffi/_verify.c)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/backend/cffi/constants.py (renamed from scripts/external_libs/platform/fedora18/zmq/backend/cffi/constants.py)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/backend/cffi/context.py (renamed from scripts/external_libs/platform/fedora18/zmq/backend/cffi/context.py)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/backend/cffi/devices.py (renamed from scripts/external_libs/platform/fedora18/zmq/backend/cffi/devices.py)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/backend/cffi/error.py (renamed from scripts/external_libs/platform/fedora18/zmq/backend/cffi/error.py)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/backend/cffi/message.py (renamed from scripts/external_libs/platform/fedora18/zmq/backend/cffi/message.py)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/backend/cffi/socket.py (renamed from scripts/external_libs/platform/fedora18/zmq/backend/cffi/socket.py)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/backend/cffi/utils.py (renamed from scripts/external_libs/platform/fedora18/zmq/backend/cffi/utils.py)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/backend/cython/__init__.py (renamed from scripts/external_libs/platform/fedora18/zmq/backend/cython/__init__.py)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/backend/cython/_device.py (renamed from scripts/external_libs/platform/fedora18/zmq/backend/cython/_device.py)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/backend/cython/_device.so (renamed from scripts/external_libs/platform/fedora18/zmq/backend/cython/_device.so)bin116272 -> 116272 bytes
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/backend/cython/_poll.py (renamed from scripts/external_libs/platform/fedora18/zmq/backend/cython/_poll.py)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/backend/cython/_poll.so (renamed from scripts/external_libs/platform/fedora18/zmq/backend/cython/_poll.so)bin164229 -> 164229 bytes
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/backend/cython/_version.py (renamed from scripts/external_libs/platform/fedora18/zmq/backend/cython/_version.py)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/backend/cython/_version.so (renamed from scripts/external_libs/platform/fedora18/zmq/backend/cython/_version.so)bin45734 -> 45734 bytes
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/backend/cython/checkrc.pxd (renamed from scripts/external_libs/platform/fedora18/zmq/backend/cython/checkrc.pxd)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/backend/cython/constants.py (renamed from scripts/external_libs/platform/fedora18/zmq/backend/cython/constants.py)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/backend/cython/constants.so (renamed from scripts/external_libs/platform/fedora18/zmq/backend/cython/constants.so)bin189238 -> 189238 bytes
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/backend/cython/context.pxd (renamed from scripts/external_libs/platform/fedora18/zmq/backend/cython/context.pxd)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/backend/cython/context.py (renamed from scripts/external_libs/platform/fedora18/zmq/backend/cython/context.py)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/backend/cython/context.so (renamed from scripts/external_libs/platform/fedora18/zmq/backend/cython/context.so)bin172716 -> 172716 bytes
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/backend/cython/error.py (renamed from scripts/external_libs/platform/fedora18/zmq/backend/cython/error.py)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/backend/cython/error.so (renamed from scripts/external_libs/platform/fedora18/zmq/backend/cython/error.so)bin63000 -> 63000 bytes
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/backend/cython/libzmq.pxd (renamed from scripts/external_libs/platform/fedora18/zmq/backend/cython/libzmq.pxd)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/backend/cython/message.pxd (renamed from scripts/external_libs/platform/fedora18/zmq/backend/cython/message.pxd)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/backend/cython/message.py (renamed from scripts/external_libs/platform/fedora18/zmq/backend/cython/message.py)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/backend/cython/message.so (renamed from scripts/external_libs/platform/fedora18/zmq/backend/cython/message.so)bin256078 -> 256078 bytes
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/backend/cython/socket.pxd (renamed from scripts/external_libs/platform/fedora18/zmq/backend/cython/socket.pxd)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/backend/cython/socket.py (renamed from scripts/external_libs/platform/fedora18/zmq/backend/cython/socket.py)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/backend/cython/socket.so (renamed from scripts/external_libs/platform/fedora18/zmq/backend/cython/socket.so)bin472585 -> 472585 bytes
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/backend/cython/utils.pxd (renamed from scripts/external_libs/platform/fedora18/zmq/backend/cython/utils.pxd)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/backend/cython/utils.py (renamed from scripts/external_libs/platform/fedora18/zmq/backend/cython/utils.py)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/backend/cython/utils.so (renamed from scripts/external_libs/platform/fedora18/zmq/backend/cython/utils.so)bin106936 -> 106936 bytes
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/backend/select.py (renamed from scripts/external_libs/platform/fedora18/zmq/backend/select.py)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/devices/__init__.py (renamed from scripts/external_libs/platform/fedora18/zmq/devices/__init__.py)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/devices/basedevice.py (renamed from scripts/external_libs/platform/fedora18/zmq/devices/basedevice.py)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/devices/monitoredqueue.pxd (renamed from scripts/external_libs/platform/fedora18/zmq/devices/monitoredqueue.pxd)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/devices/monitoredqueue.py (renamed from scripts/external_libs/platform/fedora18/zmq/devices/monitoredqueue.py)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/devices/monitoredqueue.so (renamed from scripts/external_libs/platform/fedora18/zmq/devices/monitoredqueue.so)bin157950 -> 157950 bytes
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/devices/monitoredqueuedevice.py (renamed from scripts/external_libs/platform/fedora18/zmq/devices/monitoredqueuedevice.py)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/devices/proxydevice.py (renamed from scripts/external_libs/platform/fedora18/zmq/devices/proxydevice.py)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/error.py (renamed from scripts/external_libs/platform/fedora18/zmq/error.py)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/eventloop/__init__.py (renamed from scripts/external_libs/platform/fedora18/zmq/eventloop/__init__.py)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/eventloop/ioloop.py (renamed from scripts/external_libs/platform/fedora18/zmq/eventloop/ioloop.py)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/eventloop/minitornado/__init__.py (renamed from scripts/external_libs/platform/fedora18/zmq/eventloop/minitornado/__init__.py)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/eventloop/minitornado/concurrent.py (renamed from scripts/external_libs/platform/fedora18/zmq/eventloop/minitornado/concurrent.py)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/eventloop/minitornado/ioloop.py (renamed from scripts/external_libs/platform/fedora18/zmq/eventloop/minitornado/ioloop.py)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/eventloop/minitornado/log.py (renamed from scripts/external_libs/platform/fedora18/zmq/eventloop/minitornado/log.py)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/eventloop/minitornado/platform/__init__.py (renamed from scripts/external_libs/platform/fedora18/zmq/eventloop/minitornado/platform/__init__.py)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/eventloop/minitornado/platform/auto.py (renamed from scripts/external_libs/platform/fedora18/zmq/eventloop/minitornado/platform/auto.py)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/eventloop/minitornado/platform/common.py (renamed from scripts/external_libs/platform/fedora18/zmq/eventloop/minitornado/platform/common.py)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/eventloop/minitornado/platform/interface.py (renamed from scripts/external_libs/platform/fedora18/zmq/eventloop/minitornado/platform/interface.py)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/eventloop/minitornado/platform/posix.py (renamed from scripts/external_libs/platform/fedora18/zmq/eventloop/minitornado/platform/posix.py)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/eventloop/minitornado/platform/windows.py (renamed from scripts/external_libs/platform/fedora18/zmq/eventloop/minitornado/platform/windows.py)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/eventloop/minitornado/stack_context.py (renamed from scripts/external_libs/platform/fedora18/zmq/eventloop/minitornado/stack_context.py)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/eventloop/minitornado/util.py (renamed from scripts/external_libs/platform/fedora18/zmq/eventloop/minitornado/util.py)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/eventloop/zmqstream.py (renamed from scripts/external_libs/platform/fedora18/zmq/eventloop/zmqstream.py)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/green/__init__.py (renamed from scripts/external_libs/platform/fedora18/zmq/green/__init__.py)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/green/core.py (renamed from scripts/external_libs/platform/fedora18/zmq/green/core.py)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/green/device.py (renamed from scripts/external_libs/platform/fedora18/zmq/green/device.py)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/green/eventloop/__init__.py (renamed from scripts/external_libs/platform/fedora18/zmq/green/eventloop/__init__.py)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/green/eventloop/ioloop.py (renamed from scripts/external_libs/platform/fedora18/zmq/green/eventloop/ioloop.py)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/green/eventloop/zmqstream.py (renamed from scripts/external_libs/platform/fedora18/zmq/green/eventloop/zmqstream.py)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/green/poll.py (renamed from scripts/external_libs/platform/fedora18/zmq/green/poll.py)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/libzmq.so.3 (renamed from scripts/external_libs/platform/fedora18/zmq/libzmq.so.3)bin3150071 -> 3150071 bytes
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/log/__init__.py (renamed from scripts/external_libs/platform/fedora18/zmq/log/__init__.py)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/log/handlers.py (renamed from scripts/external_libs/platform/fedora18/zmq/log/handlers.py)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/ssh/__init__.py (renamed from scripts/external_libs/platform/fedora18/zmq/ssh/__init__.py)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/ssh/forward.py (renamed from scripts/external_libs/platform/fedora18/zmq/ssh/forward.py)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/ssh/tunnel.py (renamed from scripts/external_libs/platform/fedora18/zmq/ssh/tunnel.py)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/sugar/.version.py.swp (renamed from scripts/external_libs/platform/fedora18/zmq/sugar/.version.py.swp)bin12288 -> 12288 bytes
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/sugar/__init__.py (renamed from scripts/external_libs/platform/fedora18/zmq/sugar/__init__.py)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/sugar/attrsettr.py (renamed from scripts/external_libs/platform/fedora18/zmq/sugar/attrsettr.py)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/sugar/constants.py (renamed from scripts/external_libs/platform/fedora18/zmq/sugar/constants.py)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/sugar/context.py (renamed from scripts/external_libs/platform/fedora18/zmq/sugar/context.py)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/sugar/frame.py (renamed from scripts/external_libs/platform/fedora18/zmq/sugar/frame.py)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/sugar/poll.py (renamed from scripts/external_libs/platform/fedora18/zmq/sugar/poll.py)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/sugar/socket.py (renamed from scripts/external_libs/platform/fedora18/zmq/sugar/socket.py)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/sugar/tracker.py (renamed from scripts/external_libs/platform/fedora18/zmq/sugar/tracker.py)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/sugar/version.py (renamed from scripts/external_libs/platform/fedora18/zmq/sugar/version.py)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/tests/__init__.py (renamed from scripts/external_libs/platform/fedora18/zmq/tests/__init__.py)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/tests/test_auth.py (renamed from scripts/external_libs/platform/fedora18/zmq/tests/test_auth.py)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/tests/test_cffi_backend.py (renamed from scripts/external_libs/platform/fedora18/zmq/tests/test_cffi_backend.py)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/tests/test_constants.py (renamed from scripts/external_libs/platform/fedora18/zmq/tests/test_constants.py)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/tests/test_context.py (renamed from scripts/external_libs/platform/fedora18/zmq/tests/test_context.py)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/tests/test_device.py (renamed from scripts/external_libs/platform/fedora18/zmq/tests/test_device.py)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/tests/test_error.py (renamed from scripts/external_libs/platform/fedora18/zmq/tests/test_error.py)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/tests/test_etc.py (renamed from scripts/external_libs/platform/fedora18/zmq/tests/test_etc.py)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/tests/test_imports.py (renamed from scripts/external_libs/platform/fedora18/zmq/tests/test_imports.py)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/tests/test_ioloop.py (renamed from scripts/external_libs/platform/fedora18/zmq/tests/test_ioloop.py)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/tests/test_log.py (renamed from scripts/external_libs/platform/fedora18/zmq/tests/test_log.py)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/tests/test_message.py (renamed from scripts/external_libs/platform/fedora18/zmq/tests/test_message.py)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/tests/test_monitor.py (renamed from scripts/external_libs/platform/fedora18/zmq/tests/test_monitor.py)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/tests/test_monqueue.py (renamed from scripts/external_libs/platform/fedora18/zmq/tests/test_monqueue.py)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/tests/test_multipart.py (renamed from scripts/external_libs/platform/fedora18/zmq/tests/test_multipart.py)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/tests/test_pair.py (renamed from scripts/external_libs/platform/fedora18/zmq/tests/test_pair.py)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/tests/test_poll.py (renamed from scripts/external_libs/platform/fedora18/zmq/tests/test_poll.py)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/tests/test_pubsub.py (renamed from scripts/external_libs/platform/fedora18/zmq/tests/test_pubsub.py)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/tests/test_reqrep.py (renamed from scripts/external_libs/platform/fedora18/zmq/tests/test_reqrep.py)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/tests/test_security.py (renamed from scripts/external_libs/platform/fedora18/zmq/tests/test_security.py)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/tests/test_socket.py (renamed from scripts/external_libs/platform/fedora18/zmq/tests/test_socket.py)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/tests/test_stopwatch.py (renamed from scripts/external_libs/platform/fedora18/zmq/tests/test_stopwatch.py)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/tests/test_version.py (renamed from scripts/external_libs/platform/fedora18/zmq/tests/test_version.py)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/tests/test_win32_shim.py (renamed from scripts/external_libs/platform/fedora18/zmq/tests/test_win32_shim.py)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/tests/test_z85.py (renamed from scripts/external_libs/platform/fedora18/zmq/tests/test_z85.py)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/tests/test_zmqstream.py (renamed from scripts/external_libs/platform/fedora18/zmq/tests/test_zmqstream.py)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/utils/__init__.py (renamed from scripts/external_libs/platform/fedora18/zmq/utils/__init__.py)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/utils/buffers.pxd (renamed from scripts/external_libs/platform/fedora18/zmq/utils/buffers.pxd)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/utils/compiler.json (renamed from scripts/external_libs/platform/fedora18/zmq/utils/compiler.json)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/utils/config.json (renamed from scripts/external_libs/platform/fedora18/zmq/utils/config.json)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/utils/constant_names.py (renamed from scripts/external_libs/platform/fedora18/zmq/utils/constant_names.py)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/utils/garbage.py (renamed from scripts/external_libs/platform/fedora18/zmq/utils/garbage.py)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/utils/getpid_compat.h (renamed from scripts/external_libs/platform/fedora18/zmq/utils/getpid_compat.h)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/utils/interop.py (renamed from scripts/external_libs/platform/fedora18/zmq/utils/interop.py)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/utils/ipcmaxlen.h (renamed from scripts/external_libs/platform/fedora18/zmq/utils/ipcmaxlen.h)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/utils/jsonapi.py (renamed from scripts/external_libs/platform/fedora18/zmq/utils/jsonapi.py)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/utils/monitor.py (renamed from scripts/external_libs/platform/fedora18/zmq/utils/monitor.py)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/utils/pyversion_compat.h (renamed from scripts/external_libs/platform/fedora18/zmq/utils/pyversion_compat.h)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/utils/sixcerpt.py (renamed from scripts/external_libs/platform/fedora18/zmq/utils/sixcerpt.py)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/utils/strtypes.py (renamed from scripts/external_libs/platform/fedora18/zmq/utils/strtypes.py)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/utils/win32.py (renamed from scripts/external_libs/platform/fedora18/zmq/utils/win32.py)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/utils/z85.py (renamed from scripts/external_libs/platform/fedora18/zmq/utils/z85.py)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/utils/zmq_compat.h (renamed from scripts/external_libs/platform/fedora18/zmq/utils/zmq_compat.h)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/utils/zmq_constants.h (renamed from scripts/external_libs/platform/fedora18/zmq/utils/zmq_constants.h)0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/__init__.py64
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/auth/__init__.py10
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/auth/base.py272
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/auth/certs.py119
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/auth/ioloop.py34
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/auth/thread.py184
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/backend/__init__.py45
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/backend/cffi/__init__.py22
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/backend/cffi/_cdefs.h68
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/backend/cffi/_cffi.py127
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/backend/cffi/_poll.py56
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/backend/cffi/_verify.c12
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/backend/cffi/constants.py15
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/backend/cffi/context.py100
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/backend/cffi/devices.py24
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/backend/cffi/error.py13
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/backend/cffi/message.py69
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/backend/cffi/socket.py244
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/backend/cffi/utils.py62
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/backend/cython/__init__.py23
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/backend/cython/_device.cpython-34m.sobin0 -> 97498 bytes
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/backend/cython/_poll.cpython-34m.sobin0 -> 139944 bytes
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/backend/cython/_version.cpython-34m.sobin0 -> 39265 bytes
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/backend/cython/checkrc.pxd23
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/backend/cython/constants.cpython-34m.sobin0 -> 177989 bytes
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/backend/cython/context.cpython-34m.sobin0 -> 138159 bytes
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/backend/cython/context.pxd41
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/backend/cython/error.cpython-34m.sobin0 -> 49568 bytes
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/backend/cython/libzmq.pxd110
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/backend/cython/message.cpython-34m.sobin0 -> 210926 bytes
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/backend/cython/message.pxd63
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/backend/cython/socket.cpython-34m.sobin0 -> 377409 bytes
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/backend/cython/socket.pxd47
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/backend/cython/utils.cpython-34m.sobin0 -> 93525 bytes
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/backend/cython/utils.pxd29
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/backend/select.py39
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/devices/__init__.py16
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/devices/basedevice.py229
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/devices/monitoredqueue.cpython-34m.sobin0 -> 131122 bytes
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/devices/monitoredqueue.pxd177
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/devices/monitoredqueue.py37
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/devices/monitoredqueuedevice.py66
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/devices/proxydevice.py90
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/error.py164
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/eventloop/__init__.py5
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/eventloop/ioloop.py193
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/eventloop/minitornado/__init__.py0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/eventloop/minitornado/concurrent.py11
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/eventloop/minitornado/ioloop.py829
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/eventloop/minitornado/log.py6
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/eventloop/minitornado/platform/__init__.py0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/eventloop/minitornado/platform/auto.py45
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/eventloop/minitornado/platform/common.py91
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/eventloop/minitornado/platform/interface.py63
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/eventloop/minitornado/platform/posix.py70
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/eventloop/minitornado/platform/windows.py20
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/eventloop/minitornado/stack_context.py376
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/eventloop/minitornado/util.py184
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/eventloop/zmqstream.py529
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/green/__init__.py40
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/green/core.py287
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/green/device.py32
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/green/eventloop/__init__.py3
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/green/eventloop/ioloop.py33
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/green/eventloop/zmqstream.py11
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/green/poll.py95
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/libzmq.so.3bin0 -> 604489 bytes
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/log/__init__.py0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/log/handlers.py146
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/ssh/__init__.py1
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/ssh/forward.py91
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/ssh/tunnel.py376
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/sugar/__init__.py27
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/sugar/attrsettr.py52
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/sugar/constants.py98
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/sugar/context.py192
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/sugar/frame.py19
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/sugar/poll.py161
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/sugar/socket.py495
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/sugar/tracker.py120
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/sugar/version.py48
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/tests/__init__.py211
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/tests/test_auth.py431
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/tests/test_cffi_backend.py310
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/tests/test_constants.py104
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/tests/test_context.py257
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/tests/test_device.py146
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/tests/test_error.py43
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/tests/test_etc.py15
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/tests/test_imports.py62
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/tests/test_ioloop.py113
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/tests/test_log.py116
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/tests/test_message.py362
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/tests/test_monitor.py71
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/tests/test_monqueue.py227
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/tests/test_multipart.py35
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/tests/test_pair.py53
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/tests/test_poll.py229
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/tests/test_pubsub.py41
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/tests/test_reqrep.py62
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/tests/test_security.py212
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/tests/test_socket.py450
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/tests/test_stopwatch.py42
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/tests/test_version.py44
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/tests/test_win32_shim.py56
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/tests/test_z85.py63
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/tests/test_zmqstream.py34
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/utils/__init__.py0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/utils/buffers.pxd313
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/utils/compiler.json19
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/utils/config.json13
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/utils/constant_names.py365
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/utils/garbage.py180
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/utils/getpid_compat.h6
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/utils/interop.py33
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/utils/ipcmaxlen.h21
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/utils/jsonapi.py59
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/utils/monitor.py68
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/utils/pyversion_compat.h25
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/utils/sixcerpt.py52
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/utils/strtypes.py45
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/utils/win32.py132
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/utils/z85.py56
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/utils/zmq_compat.h80
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/utils/zmq_constants.h622
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/__init__.py64
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/auth/__init__.py10
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/auth/base.py272
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/auth/certs.py119
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/auth/ioloop.py34
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/auth/thread.py184
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/backend/__init__.py45
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/backend/cffi/__init__.py22
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/backend/cffi/_cdefs.h68
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/backend/cffi/_cffi.py127
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/backend/cffi/_poll.py56
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/backend/cffi/_verify.c12
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/backend/cffi/constants.py15
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/backend/cffi/context.py100
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/backend/cffi/devices.py24
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/backend/cffi/error.py13
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/backend/cffi/message.py69
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/backend/cffi/socket.py244
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/backend/cffi/utils.py62
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/backend/cython/__init__.py23
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/backend/cython/_device.cpython-34m.sobin0 -> 129115 bytes
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/backend/cython/_poll.cpython-34m.sobin0 -> 185685 bytes
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/backend/cython/_version.cpython-34m.sobin0 -> 47906 bytes
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/backend/cython/checkrc.pxd23
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/backend/cython/constants.cpython-34m.sobin0 -> 226114 bytes
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/backend/cython/context.cpython-34m.sobin0 -> 183749 bytes
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/backend/cython/context.pxd41
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/backend/cython/error.cpython-34m.sobin0 -> 62151 bytes
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/backend/cython/libzmq.pxd110
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/backend/cython/message.cpython-34m.sobin0 -> 287478 bytes
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/backend/cython/message.pxd63
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/backend/cython/socket.cpython-34m.sobin0 -> 522913 bytes
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/backend/cython/socket.pxd47
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/backend/cython/utils.cpython-34m.sobin0 -> 121602 bytes
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/backend/cython/utils.pxd29
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/backend/select.py39
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/devices/__init__.py16
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/devices/basedevice.py229
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/devices/monitoredqueue.cpython-34m.sobin0 -> 166197 bytes
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/devices/monitoredqueue.pxd177
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/devices/monitoredqueue.py37
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/devices/monitoredqueuedevice.py66
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/devices/proxydevice.py90
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/error.py164
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/eventloop/__init__.py5
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/eventloop/ioloop.py193
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/eventloop/minitornado/__init__.py0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/eventloop/minitornado/concurrent.py11
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/eventloop/minitornado/ioloop.py829
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/eventloop/minitornado/log.py6
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/eventloop/minitornado/platform/__init__.py0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/eventloop/minitornado/platform/auto.py45
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/eventloop/minitornado/platform/common.py91
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/eventloop/minitornado/platform/interface.py63
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/eventloop/minitornado/platform/posix.py70
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/eventloop/minitornado/platform/windows.py20
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/eventloop/minitornado/stack_context.py376
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/eventloop/minitornado/util.py184
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/eventloop/zmqstream.py529
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/green/__init__.py40
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/green/core.py287
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/green/device.py32
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/green/eventloop/__init__.py3
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/green/eventloop/ioloop.py33
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/green/eventloop/zmqstream.py11
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/green/poll.py95
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/libzmq.so.3bin0 -> 3284330 bytes
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/log/__init__.py0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/log/handlers.py146
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/ssh/__init__.py1
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/ssh/forward.py91
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/ssh/tunnel.py376
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/sugar/__init__.py27
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/sugar/attrsettr.py52
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/sugar/constants.py98
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/sugar/context.py192
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/sugar/frame.py19
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/sugar/poll.py161
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/sugar/socket.py495
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/sugar/tracker.py120
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/sugar/version.py48
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/tests/__init__.py211
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/tests/test_auth.py431
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/tests/test_cffi_backend.py310
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/tests/test_constants.py104
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/tests/test_context.py257
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/tests/test_device.py146
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/tests/test_error.py43
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/tests/test_etc.py15
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/tests/test_imports.py62
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/tests/test_ioloop.py113
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/tests/test_log.py116
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/tests/test_message.py362
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/tests/test_monitor.py71
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/tests/test_monqueue.py227
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/tests/test_multipart.py35
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/tests/test_pair.py53
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/tests/test_poll.py229
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/tests/test_pubsub.py41
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/tests/test_reqrep.py62
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/tests/test_security.py212
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/tests/test_socket.py450
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/tests/test_stopwatch.py42
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/tests/test_version.py44
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/tests/test_win32_shim.py56
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/tests/test_z85.py63
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/tests/test_zmqstream.py34
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/utils/__init__.py0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/utils/buffers.pxd313
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/utils/compiler.json19
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/utils/config.json13
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/utils/constant_names.py365
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/utils/garbage.py180
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/utils/getpid_compat.h6
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/utils/interop.py33
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/utils/ipcmaxlen.h21
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/utils/jsonapi.py59
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/utils/monitor.py68
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/utils/pyversion_compat.h25
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/utils/sixcerpt.py52
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/utils/strtypes.py45
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/utils/win32.py132
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/utils/z85.py56
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/utils/zmq_compat.h80
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/utils/zmq_constants.h622
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/__init__.py64
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/auth/__init__.py10
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/auth/base.py272
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/auth/certs.py119
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/auth/ioloop.py34
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/auth/thread.py184
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/backend/__init__.py45
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/backend/cffi/__init__.py22
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/backend/cffi/_cdefs.h68
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/backend/cffi/_cffi.py127
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/backend/cffi/_poll.py56
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/backend/cffi/_verify.c12
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/backend/cffi/constants.py15
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/backend/cffi/context.py100
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/backend/cffi/devices.py24
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/backend/cffi/error.py13
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/backend/cffi/message.py69
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/backend/cffi/socket.py244
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/backend/cffi/utils.py62
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/backend/cython/__init__.py23
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/backend/cython/_device.cpython-34m.sobin0 -> 132310 bytes
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/backend/cython/_poll.cpython-34m.sobin0 -> 190430 bytes
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/backend/cython/_version.cpython-34m.sobin0 -> 49496 bytes
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/backend/cython/checkrc.pxd23
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/backend/cython/constants.cpython-34m.sobin0 -> 207992 bytes
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/backend/cython/context.cpython-34m.sobin0 -> 194763 bytes
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/backend/cython/context.pxd41
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/backend/cython/error.cpython-34m.sobin0 -> 65496 bytes
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/backend/cython/libzmq.pxd110
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/backend/cython/message.cpython-34m.sobin0 -> 291652 bytes
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/backend/cython/message.pxd63
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/backend/cython/socket.cpython-34m.sobin0 -> 534130 bytes
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/backend/cython/socket.pxd47
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/backend/cython/utils.cpython-34m.sobin0 -> 121076 bytes
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/backend/cython/utils.pxd29
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/backend/select.py39
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/devices/__init__.py16
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/devices/basedevice.py229
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/devices/monitoredqueue.cpython-34m.sobin0 -> 174423 bytes
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/devices/monitoredqueue.pxd177
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/devices/monitoredqueue.py37
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/devices/monitoredqueuedevice.py66
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/devices/proxydevice.py90
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/error.py164
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/eventloop/__init__.py5
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/eventloop/ioloop.py193
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/eventloop/minitornado/__init__.py0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/eventloop/minitornado/concurrent.py11
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/eventloop/minitornado/ioloop.py829
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/eventloop/minitornado/log.py6
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/eventloop/minitornado/platform/__init__.py0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/eventloop/minitornado/platform/auto.py45
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/eventloop/minitornado/platform/common.py91
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/eventloop/minitornado/platform/interface.py63
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/eventloop/minitornado/platform/posix.py70
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/eventloop/minitornado/platform/windows.py20
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/eventloop/minitornado/stack_context.py376
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/eventloop/minitornado/util.py184
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/eventloop/zmqstream.py529
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/green/__init__.py40
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/green/core.py287
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/green/device.py32
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/green/eventloop/__init__.py3
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/green/eventloop/ioloop.py33
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/green/eventloop/zmqstream.py11
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/green/poll.py95
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/libzmq.so.3bin0 -> 3150071 bytes
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/log/__init__.py0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/log/handlers.py146
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/ssh/__init__.py1
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/ssh/forward.py91
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/ssh/tunnel.py376
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/sugar/__init__.py27
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/sugar/attrsettr.py52
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/sugar/constants.py98
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/sugar/context.py192
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/sugar/frame.py19
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/sugar/poll.py161
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/sugar/socket.py495
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/sugar/tracker.py120
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/sugar/version.py48
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/tests/__init__.py211
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/tests/test_auth.py431
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/tests/test_cffi_backend.py310
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/tests/test_constants.py104
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/tests/test_context.py257
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/tests/test_device.py146
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/tests/test_error.py43
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/tests/test_etc.py15
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/tests/test_imports.py62
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/tests/test_ioloop.py113
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/tests/test_log.py116
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/tests/test_message.py362
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/tests/test_monitor.py71
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/tests/test_monqueue.py227
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/tests/test_multipart.py35
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/tests/test_pair.py53
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/tests/test_poll.py229
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/tests/test_pubsub.py41
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/tests/test_reqrep.py62
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/tests/test_security.py212
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/tests/test_socket.py450
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/tests/test_stopwatch.py42
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/tests/test_version.py44
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/tests/test_win32_shim.py56
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/tests/test_z85.py63
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/tests/test_zmqstream.py34
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/utils/__init__.py0
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/utils/buffers.pxd313
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/utils/compiler.json19
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/utils/config.json9
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/utils/constant_names.py365
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/utils/garbage.py180
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/utils/getpid_compat.h6
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/utils/interop.py33
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/utils/ipcmaxlen.h21
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/utils/jsonapi.py59
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/utils/monitor.py68
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/utils/pyversion_compat.h25
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/utils/sixcerpt.py52
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/utils/strtypes.py45
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/utils/win32.py132
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/utils/z85.py56
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/utils/zmq_compat.h80
-rw-r--r--scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/utils/zmq_constants.h622
-rw-r--r--scripts/external_libs/scapy-2.3.1/python2/scapy/__init__.py (renamed from scripts/external_libs/scapy-2.3.1/scapy/__init__.py)0
-rw-r--r--scripts/external_libs/scapy-2.3.1/python2/scapy/all.py (renamed from scripts/external_libs/scapy-2.3.1/scapy/all.py)0
-rw-r--r--scripts/external_libs/scapy-2.3.1/python2/scapy/ansmachine.py (renamed from scripts/external_libs/scapy-2.3.1/scapy/ansmachine.py)0
-rw-r--r--scripts/external_libs/scapy-2.3.1/python2/scapy/arch/__init__.py (renamed from scripts/external_libs/scapy-2.3.1/scapy/arch/__init__.py)0
-rw-r--r--scripts/external_libs/scapy-2.3.1/python2/scapy/arch/bsd.py (renamed from scripts/external_libs/scapy-2.3.1/scapy/arch/bsd.py)0
-rw-r--r--scripts/external_libs/scapy-2.3.1/python2/scapy/arch/linux.py (renamed from scripts/external_libs/scapy-2.3.1/scapy/arch/linux.py)0
-rw-r--r--scripts/external_libs/scapy-2.3.1/python2/scapy/arch/pcapdnet.py (renamed from scripts/external_libs/scapy-2.3.1/scapy/arch/pcapdnet.py)0
-rw-r--r--scripts/external_libs/scapy-2.3.1/python2/scapy/arch/solaris.py (renamed from scripts/external_libs/scapy-2.3.1/scapy/arch/solaris.py)0
-rw-r--r--scripts/external_libs/scapy-2.3.1/python2/scapy/arch/unix.py (renamed from scripts/external_libs/scapy-2.3.1/scapy/arch/unix.py)0
-rw-r--r--scripts/external_libs/scapy-2.3.1/python2/scapy/arch/windows/__init__.py (renamed from scripts/external_libs/scapy-2.3.1/scapy/arch/windows/__init__.py)0
-rw-r--r--scripts/external_libs/scapy-2.3.1/python2/scapy/as_resolvers.py (renamed from scripts/external_libs/scapy-2.3.1/scapy/as_resolvers.py)0
-rw-r--r--scripts/external_libs/scapy-2.3.1/python2/scapy/asn1/__init__.py (renamed from scripts/external_libs/scapy-2.3.1/scapy/asn1/__init__.py)0
-rw-r--r--scripts/external_libs/scapy-2.3.1/python2/scapy/asn1/asn1.py (renamed from scripts/external_libs/scapy-2.3.1/scapy/asn1/asn1.py)0
-rw-r--r--scripts/external_libs/scapy-2.3.1/python2/scapy/asn1/ber.py (renamed from scripts/external_libs/scapy-2.3.1/scapy/asn1/ber.py)0
-rw-r--r--scripts/external_libs/scapy-2.3.1/python2/scapy/asn1/mib.py (renamed from scripts/external_libs/scapy-2.3.1/scapy/asn1/mib.py)0
-rw-r--r--scripts/external_libs/scapy-2.3.1/python2/scapy/asn1fields.py (renamed from scripts/external_libs/scapy-2.3.1/scapy/asn1fields.py)0
-rw-r--r--scripts/external_libs/scapy-2.3.1/python2/scapy/asn1packet.py (renamed from scripts/external_libs/scapy-2.3.1/scapy/asn1packet.py)0
-rw-r--r--scripts/external_libs/scapy-2.3.1/python2/scapy/automaton.py (renamed from scripts/external_libs/scapy-2.3.1/scapy/automaton.py)0
-rw-r--r--scripts/external_libs/scapy-2.3.1/python2/scapy/autorun.py (renamed from scripts/external_libs/scapy-2.3.1/scapy/autorun.py)0
-rw-r--r--scripts/external_libs/scapy-2.3.1/python2/scapy/base_classes.py (renamed from scripts/external_libs/scapy-2.3.1/scapy/base_classes.py)0
-rw-r--r--scripts/external_libs/scapy-2.3.1/python2/scapy/config.py (renamed from scripts/external_libs/scapy-2.3.1/scapy/config.py)0
-rw-r--r--scripts/external_libs/scapy-2.3.1/python2/scapy/contrib/__init__.py (renamed from scripts/external_libs/scapy-2.3.1/scapy/contrib/__init__.py)0
-rw-r--r--scripts/external_libs/scapy-2.3.1/python2/scapy/contrib/avs.py (renamed from scripts/external_libs/scapy-2.3.1/scapy/contrib/avs.py)0
-rw-r--r--scripts/external_libs/scapy-2.3.1/python2/scapy/contrib/bgp.py (renamed from scripts/external_libs/scapy-2.3.1/scapy/contrib/bgp.py)0
-rw-r--r--scripts/external_libs/scapy-2.3.1/python2/scapy/contrib/carp.py (renamed from scripts/external_libs/scapy-2.3.1/scapy/contrib/carp.py)0
-rw-r--r--scripts/external_libs/scapy-2.3.1/python2/scapy/contrib/cdp.py (renamed from scripts/external_libs/scapy-2.3.1/scapy/contrib/cdp.py)0
-rw-r--r--scripts/external_libs/scapy-2.3.1/python2/scapy/contrib/chdlc.py (renamed from scripts/external_libs/scapy-2.3.1/scapy/contrib/chdlc.py)0
-rw-r--r--scripts/external_libs/scapy-2.3.1/python2/scapy/contrib/dtp.py (renamed from scripts/external_libs/scapy-2.3.1/scapy/contrib/dtp.py)0
-rw-r--r--scripts/external_libs/scapy-2.3.1/python2/scapy/contrib/eigrp.py (renamed from scripts/external_libs/scapy-2.3.1/scapy/contrib/eigrp.py)0
-rw-r--r--scripts/external_libs/scapy-2.3.1/python2/scapy/contrib/etherip.py (renamed from scripts/external_libs/scapy-2.3.1/scapy/contrib/etherip.py)0
-rw-r--r--scripts/external_libs/scapy-2.3.1/python2/scapy/contrib/gsm_um.py (renamed from scripts/external_libs/scapy-2.3.1/scapy/contrib/gsm_um.py)0
-rw-r--r--scripts/external_libs/scapy-2.3.1/python2/scapy/contrib/igmp.py (renamed from scripts/external_libs/scapy-2.3.1/scapy/contrib/igmp.py)0
-rw-r--r--scripts/external_libs/scapy-2.3.1/python2/scapy/contrib/igmpv3.py (renamed from scripts/external_libs/scapy-2.3.1/scapy/contrib/igmpv3.py)0
-rw-r--r--scripts/external_libs/scapy-2.3.1/python2/scapy/contrib/ikev2.py (renamed from scripts/external_libs/scapy-2.3.1/scapy/contrib/ikev2.py)0
-rw-r--r--scripts/external_libs/scapy-2.3.1/python2/scapy/contrib/ldp.py (renamed from scripts/external_libs/scapy-2.3.1/scapy/contrib/ldp.py)0
-rw-r--r--scripts/external_libs/scapy-2.3.1/python2/scapy/contrib/mpls.py (renamed from scripts/external_libs/scapy-2.3.1/scapy/contrib/mpls.py)0
-rw-r--r--scripts/external_libs/scapy-2.3.1/python2/scapy/contrib/ospf.py (renamed from scripts/external_libs/scapy-2.3.1/scapy/contrib/ospf.py)0
-rw-r--r--scripts/external_libs/scapy-2.3.1/python2/scapy/contrib/ppi.py (renamed from scripts/external_libs/scapy-2.3.1/scapy/contrib/ppi.py)0
-rw-r--r--scripts/external_libs/scapy-2.3.1/python2/scapy/contrib/ppi_cace.py (renamed from scripts/external_libs/scapy-2.3.1/scapy/contrib/ppi_cace.py)0
-rw-r--r--scripts/external_libs/scapy-2.3.1/python2/scapy/contrib/ppi_geotag.py (renamed from scripts/external_libs/scapy-2.3.1/scapy/contrib/ppi_geotag.py)0
-rw-r--r--scripts/external_libs/scapy-2.3.1/python2/scapy/contrib/ripng.py (renamed from scripts/external_libs/scapy-2.3.1/scapy/contrib/ripng.py)0
-rw-r--r--scripts/external_libs/scapy-2.3.1/python2/scapy/contrib/rsvp.py (renamed from scripts/external_libs/scapy-2.3.1/scapy/contrib/rsvp.py)0
-rw-r--r--scripts/external_libs/scapy-2.3.1/python2/scapy/contrib/skinny.py (renamed from scripts/external_libs/scapy-2.3.1/scapy/contrib/skinny.py)0
-rw-r--r--scripts/external_libs/scapy-2.3.1/python2/scapy/contrib/ubberlogger.py (renamed from scripts/external_libs/scapy-2.3.1/scapy/contrib/ubberlogger.py)0
-rw-r--r--scripts/external_libs/scapy-2.3.1/python2/scapy/contrib/vqp.py (renamed from scripts/external_libs/scapy-2.3.1/scapy/contrib/vqp.py)0
-rw-r--r--scripts/external_libs/scapy-2.3.1/python2/scapy/contrib/vtp.py (renamed from scripts/external_libs/scapy-2.3.1/scapy/contrib/vtp.py)0
-rw-r--r--scripts/external_libs/scapy-2.3.1/python2/scapy/contrib/wpa_eapol.py (renamed from scripts/external_libs/scapy-2.3.1/scapy/contrib/wpa_eapol.py)0
-rw-r--r--scripts/external_libs/scapy-2.3.1/python2/scapy/crypto/__init__.py (renamed from scripts/external_libs/scapy-2.3.1/scapy/crypto/__init__.py)0
-rw-r--r--scripts/external_libs/scapy-2.3.1/python2/scapy/crypto/cert.py (renamed from scripts/external_libs/scapy-2.3.1/scapy/crypto/cert.py)0
-rw-r--r--scripts/external_libs/scapy-2.3.1/python2/scapy/dadict.py (renamed from scripts/external_libs/scapy-2.3.1/scapy/dadict.py)0
-rw-r--r--scripts/external_libs/scapy-2.3.1/python2/scapy/data.py (renamed from scripts/external_libs/scapy-2.3.1/scapy/data.py)0
-rw-r--r--scripts/external_libs/scapy-2.3.1/python2/scapy/error.py (renamed from scripts/external_libs/scapy-2.3.1/scapy/error.py)0
-rw-r--r--scripts/external_libs/scapy-2.3.1/python2/scapy/fields.py (renamed from scripts/external_libs/scapy-2.3.1/scapy/fields.py)0
-rw-r--r--scripts/external_libs/scapy-2.3.1/python2/scapy/layers/__init__.py (renamed from scripts/external_libs/scapy-2.3.1/scapy/layers/__init__.py)0
-rw-r--r--scripts/external_libs/scapy-2.3.1/python2/scapy/layers/all.py (renamed from scripts/external_libs/scapy-2.3.1/scapy/layers/all.py)0
-rw-r--r--scripts/external_libs/scapy-2.3.1/python2/scapy/layers/bluetooth.py (renamed from scripts/external_libs/scapy-2.3.1/scapy/layers/bluetooth.py)0
-rw-r--r--scripts/external_libs/scapy-2.3.1/python2/scapy/layers/dhcp.py (renamed from scripts/external_libs/scapy-2.3.1/scapy/layers/dhcp.py)0
-rw-r--r--scripts/external_libs/scapy-2.3.1/python2/scapy/layers/dhcp6.py (renamed from scripts/external_libs/scapy-2.3.1/scapy/layers/dhcp6.py)0
-rw-r--r--scripts/external_libs/scapy-2.3.1/python2/scapy/layers/dns.py (renamed from scripts/external_libs/scapy-2.3.1/scapy/layers/dns.py)0
-rw-r--r--scripts/external_libs/scapy-2.3.1/python2/scapy/layers/dot11.py (renamed from scripts/external_libs/scapy-2.3.1/scapy/layers/dot11.py)0
-rw-r--r--scripts/external_libs/scapy-2.3.1/python2/scapy/layers/gprs.py (renamed from scripts/external_libs/scapy-2.3.1/scapy/layers/gprs.py)0
-rw-r--r--scripts/external_libs/scapy-2.3.1/python2/scapy/layers/hsrp.py (renamed from scripts/external_libs/scapy-2.3.1/scapy/layers/hsrp.py)0
-rw-r--r--scripts/external_libs/scapy-2.3.1/python2/scapy/layers/inet.py (renamed from scripts/external_libs/scapy-2.3.1/scapy/layers/inet.py)0
-rw-r--r--scripts/external_libs/scapy-2.3.1/python2/scapy/layers/inet6.py (renamed from scripts/external_libs/scapy-2.3.1/scapy/layers/inet6.py)0
-rw-r--r--scripts/external_libs/scapy-2.3.1/python2/scapy/layers/ipsec.py (renamed from scripts/external_libs/scapy-2.3.1/scapy/layers/ipsec.py)0
-rw-r--r--scripts/external_libs/scapy-2.3.1/python2/scapy/layers/ir.py (renamed from scripts/external_libs/scapy-2.3.1/scapy/layers/ir.py)0
-rw-r--r--scripts/external_libs/scapy-2.3.1/python2/scapy/layers/isakmp.py (renamed from scripts/external_libs/scapy-2.3.1/scapy/layers/isakmp.py)0
-rw-r--r--scripts/external_libs/scapy-2.3.1/python2/scapy/layers/l2.py (renamed from scripts/external_libs/scapy-2.3.1/scapy/layers/l2.py)0
-rw-r--r--scripts/external_libs/scapy-2.3.1/python2/scapy/layers/l2tp.py (renamed from scripts/external_libs/scapy-2.3.1/scapy/layers/l2tp.py)0
-rw-r--r--scripts/external_libs/scapy-2.3.1/python2/scapy/layers/llmnr.py (renamed from scripts/external_libs/scapy-2.3.1/scapy/layers/llmnr.py)0
-rw-r--r--scripts/external_libs/scapy-2.3.1/python2/scapy/layers/mgcp.py (renamed from scripts/external_libs/scapy-2.3.1/scapy/layers/mgcp.py)0
-rw-r--r--scripts/external_libs/scapy-2.3.1/python2/scapy/layers/mobileip.py (renamed from scripts/external_libs/scapy-2.3.1/scapy/layers/mobileip.py)0
-rw-r--r--scripts/external_libs/scapy-2.3.1/python2/scapy/layers/netbios.py (renamed from scripts/external_libs/scapy-2.3.1/scapy/layers/netbios.py)0
-rw-r--r--scripts/external_libs/scapy-2.3.1/python2/scapy/layers/netflow.py (renamed from scripts/external_libs/scapy-2.3.1/scapy/layers/netflow.py)0
-rw-r--r--scripts/external_libs/scapy-2.3.1/python2/scapy/layers/ntp.py (renamed from scripts/external_libs/scapy-2.3.1/scapy/layers/ntp.py)0
-rw-r--r--scripts/external_libs/scapy-2.3.1/python2/scapy/layers/pflog.py (renamed from scripts/external_libs/scapy-2.3.1/scapy/layers/pflog.py)0
-rw-r--r--scripts/external_libs/scapy-2.3.1/python2/scapy/layers/ppp.py (renamed from scripts/external_libs/scapy-2.3.1/scapy/layers/ppp.py)0
-rw-r--r--scripts/external_libs/scapy-2.3.1/python2/scapy/layers/radius.py (renamed from scripts/external_libs/scapy-2.3.1/scapy/layers/radius.py)0
-rw-r--r--scripts/external_libs/scapy-2.3.1/python2/scapy/layers/rip.py (renamed from scripts/external_libs/scapy-2.3.1/scapy/layers/rip.py)0
-rw-r--r--scripts/external_libs/scapy-2.3.1/python2/scapy/layers/rtp.py (renamed from scripts/external_libs/scapy-2.3.1/scapy/layers/rtp.py)0
-rw-r--r--scripts/external_libs/scapy-2.3.1/python2/scapy/layers/sctp.py (renamed from scripts/external_libs/scapy-2.3.1/scapy/layers/sctp.py)0
-rw-r--r--scripts/external_libs/scapy-2.3.1/python2/scapy/layers/sebek.py (renamed from scripts/external_libs/scapy-2.3.1/scapy/layers/sebek.py)0
-rw-r--r--scripts/external_libs/scapy-2.3.1/python2/scapy/layers/skinny.py (renamed from scripts/external_libs/scapy-2.3.1/scapy/layers/skinny.py)0
-rw-r--r--scripts/external_libs/scapy-2.3.1/python2/scapy/layers/smb.py (renamed from scripts/external_libs/scapy-2.3.1/scapy/layers/smb.py)0
-rw-r--r--scripts/external_libs/scapy-2.3.1/python2/scapy/layers/snmp.py (renamed from scripts/external_libs/scapy-2.3.1/scapy/layers/snmp.py)0
-rw-r--r--scripts/external_libs/scapy-2.3.1/python2/scapy/layers/tftp.py (renamed from scripts/external_libs/scapy-2.3.1/scapy/layers/tftp.py)0
-rw-r--r--scripts/external_libs/scapy-2.3.1/python2/scapy/layers/vrrp.py (renamed from scripts/external_libs/scapy-2.3.1/scapy/layers/vrrp.py)0
-rw-r--r--scripts/external_libs/scapy-2.3.1/python2/scapy/layers/x509.py (renamed from scripts/external_libs/scapy-2.3.1/scapy/layers/x509.py)0
-rw-r--r--scripts/external_libs/scapy-2.3.1/python2/scapy/main.py (renamed from scripts/external_libs/scapy-2.3.1/scapy/main.py)0
-rw-r--r--scripts/external_libs/scapy-2.3.1/python2/scapy/modules/__init__.py (renamed from scripts/external_libs/scapy-2.3.1/scapy/modules/__init__.py)0
-rw-r--r--scripts/external_libs/scapy-2.3.1/python2/scapy/modules/geoip.py (renamed from scripts/external_libs/scapy-2.3.1/scapy/modules/geoip.py)0
-rw-r--r--scripts/external_libs/scapy-2.3.1/python2/scapy/modules/nmap.py (renamed from scripts/external_libs/scapy-2.3.1/scapy/modules/nmap.py)0
-rw-r--r--scripts/external_libs/scapy-2.3.1/python2/scapy/modules/p0f.py (renamed from scripts/external_libs/scapy-2.3.1/scapy/modules/p0f.py)0
-rw-r--r--scripts/external_libs/scapy-2.3.1/python2/scapy/modules/queso.py (renamed from scripts/external_libs/scapy-2.3.1/scapy/modules/queso.py)0
-rw-r--r--scripts/external_libs/scapy-2.3.1/python2/scapy/modules/voip.py (renamed from scripts/external_libs/scapy-2.3.1/scapy/modules/voip.py)0
-rw-r--r--scripts/external_libs/scapy-2.3.1/python2/scapy/packet.py (renamed from scripts/external_libs/scapy-2.3.1/scapy/packet.py)0
-rw-r--r--scripts/external_libs/scapy-2.3.1/python2/scapy/pipetool.py (renamed from scripts/external_libs/scapy-2.3.1/scapy/pipetool.py)0
-rw-r--r--scripts/external_libs/scapy-2.3.1/python2/scapy/plist.py (renamed from scripts/external_libs/scapy-2.3.1/scapy/plist.py)0
-rw-r--r--scripts/external_libs/scapy-2.3.1/python2/scapy/pton_ntop.py (renamed from scripts/external_libs/scapy-2.3.1/scapy/pton_ntop.py)0
-rw-r--r--scripts/external_libs/scapy-2.3.1/python2/scapy/route.py (renamed from scripts/external_libs/scapy-2.3.1/scapy/route.py)0
-rw-r--r--scripts/external_libs/scapy-2.3.1/python2/scapy/route6.py (renamed from scripts/external_libs/scapy-2.3.1/scapy/route6.py)0
-rw-r--r--scripts/external_libs/scapy-2.3.1/python2/scapy/scapypipes.py (renamed from scripts/external_libs/scapy-2.3.1/scapy/scapypipes.py)0
-rw-r--r--scripts/external_libs/scapy-2.3.1/python2/scapy/sendrecv.py (renamed from scripts/external_libs/scapy-2.3.1/scapy/sendrecv.py)0
-rw-r--r--scripts/external_libs/scapy-2.3.1/python2/scapy/supersocket.py (renamed from scripts/external_libs/scapy-2.3.1/scapy/supersocket.py)0
-rw-r--r--scripts/external_libs/scapy-2.3.1/python2/scapy/themes.py (renamed from scripts/external_libs/scapy-2.3.1/scapy/themes.py)0
-rw-r--r--scripts/external_libs/scapy-2.3.1/python2/scapy/tools/UTscapy.py (renamed from scripts/external_libs/scapy-2.3.1/scapy/tools/UTscapy.py)0
-rw-r--r--scripts/external_libs/scapy-2.3.1/python2/scapy/tools/__init__.py (renamed from scripts/external_libs/scapy-2.3.1/scapy/tools/__init__.py)0
-rw-r--r--scripts/external_libs/scapy-2.3.1/python2/scapy/tools/check_asdis.py (renamed from scripts/external_libs/scapy-2.3.1/scapy/tools/check_asdis.py)0
-rw-r--r--scripts/external_libs/scapy-2.3.1/python2/scapy/utils.py (renamed from scripts/external_libs/scapy-2.3.1/scapy/utils.py)0
-rw-r--r--scripts/external_libs/scapy-2.3.1/python2/scapy/utils6.py (renamed from scripts/external_libs/scapy-2.3.1/scapy/utils6.py)0
-rw-r--r--scripts/external_libs/scapy-2.3.1/python2/scapy/volatile.py (renamed from scripts/external_libs/scapy-2.3.1/scapy/volatile.py)0
-rw-r--r--scripts/external_libs/scapy-2.3.1/python3/scapy/__init__.py15
-rw-r--r--scripts/external_libs/scapy-2.3.1/python3/scapy/abc.py1
-rw-r--r--scripts/external_libs/scapy-2.3.1/python3/scapy/all.py49
-rw-r--r--scripts/external_libs/scapy-2.3.1/python3/scapy/ansmachine.py130
-rw-r--r--scripts/external_libs/scapy-2.3.1/python3/scapy/arch/__init__.py108
-rw-r--r--scripts/external_libs/scapy-2.3.1/python3/scapy/arch/bsd.py12
-rw-r--r--scripts/external_libs/scapy-2.3.1/python3/scapy/arch/cdnet.py229
-rw-r--r--scripts/external_libs/scapy-2.3.1/python3/scapy/arch/linux.py530
-rw-r--r--scripts/external_libs/scapy-2.3.1/python3/scapy/arch/pcapdnet.py565
-rw-r--r--scripts/external_libs/scapy-2.3.1/python3/scapy/arch/solaris.py16
-rw-r--r--scripts/external_libs/scapy-2.3.1/python3/scapy/arch/unix.py168
-rw-r--r--scripts/external_libs/scapy-2.3.1/python3/scapy/arch/windows/__init__.py501
-rw-r--r--scripts/external_libs/scapy-2.3.1/python3/scapy/arch/winpcapy.py739
-rw-r--r--scripts/external_libs/scapy-2.3.1/python3/scapy/as_resolvers.py115
-rw-r--r--scripts/external_libs/scapy-2.3.1/python3/scapy/asn1/__init__.py12
-rw-r--r--scripts/external_libs/scapy-2.3.1/python3/scapy/asn1/asn1.py321
-rw-r--r--scripts/external_libs/scapy-2.3.1/python3/scapy/asn1/ber.py370
-rw-r--r--scripts/external_libs/scapy-2.3.1/python3/scapy/asn1/mib.py149
-rw-r--r--scripts/external_libs/scapy-2.3.1/python3/scapy/asn1fields.py341
-rw-r--r--scripts/external_libs/scapy-2.3.1/python3/scapy/asn1packet.py26
-rw-r--r--scripts/external_libs/scapy-2.3.1/python3/scapy/automaton.py753
-rw-r--r--scripts/external_libs/scapy-2.3.1/python3/scapy/autorun.py142
-rw-r--r--scripts/external_libs/scapy-2.3.1/python3/scapy/base_classes.py237
-rw-r--r--scripts/external_libs/scapy-2.3.1/python3/scapy/config.py394
-rw-r--r--scripts/external_libs/scapy-2.3.1/python3/scapy/contrib/__init__.py8
-rw-r--r--scripts/external_libs/scapy-2.3.1/python3/scapy/contrib/avs.py57
-rw-r--r--scripts/external_libs/scapy-2.3.1/python3/scapy/contrib/bgp.py168
-rw-r--r--scripts/external_libs/scapy-2.3.1/python3/scapy/contrib/carp.py65
-rw-r--r--scripts/external_libs/scapy-2.3.1/python3/scapy/contrib/cdp.py306
-rw-r--r--scripts/external_libs/scapy-2.3.1/python3/scapy/contrib/chdlc.py42
-rw-r--r--scripts/external_libs/scapy-2.3.1/python3/scapy/contrib/dtp.py115
-rw-r--r--scripts/external_libs/scapy-2.3.1/python3/scapy/contrib/eigrp.py488
-rw-r--r--scripts/external_libs/scapy-2.3.1/python3/scapy/contrib/etherip.py19
-rw-r--r--scripts/external_libs/scapy-2.3.1/python3/scapy/contrib/gsm_um.py13119
-rw-r--r--scripts/external_libs/scapy-2.3.1/python3/scapy/contrib/gtp.py546
-rw-r--r--scripts/external_libs/scapy-2.3.1/python3/scapy/contrib/igmp.py171
-rw-r--r--scripts/external_libs/scapy-2.3.1/python3/scapy/contrib/igmpv3.py270
-rw-r--r--scripts/external_libs/scapy-2.3.1/python3/scapy/contrib/ikev2.py362
-rw-r--r--scripts/external_libs/scapy-2.3.1/python3/scapy/contrib/ldp.py475
-rw-r--r--scripts/external_libs/scapy-2.3.1/python3/scapy/contrib/mpls.py17
-rw-r--r--scripts/external_libs/scapy-2.3.1/python3/scapy/contrib/ospf.py833
-rw-r--r--scripts/external_libs/scapy-2.3.1/python3/scapy/contrib/ppi.py86
-rw-r--r--scripts/external_libs/scapy-2.3.1/python3/scapy/contrib/ppi_cace.py87
-rw-r--r--scripts/external_libs/scapy-2.3.1/python3/scapy/contrib/ppi_geotag.py464
-rw-r--r--scripts/external_libs/scapy-2.3.1/python3/scapy/contrib/ripng.py41
-rw-r--r--scripts/external_libs/scapy-2.3.1/python3/scapy/contrib/rsvp.py188
-rw-r--r--scripts/external_libs/scapy-2.3.1/python3/scapy/contrib/skinny.py499
-rw-r--r--scripts/external_libs/scapy-2.3.1/python3/scapy/contrib/ubberlogger.py101
-rw-r--r--scripts/external_libs/scapy-2.3.1/python3/scapy/contrib/vqp.py58
-rw-r--r--scripts/external_libs/scapy-2.3.1/python3/scapy/contrib/vtp.py171
-rw-r--r--scripts/external_libs/scapy-2.3.1/python3/scapy/contrib/wpa_eapol.py35
-rw-r--r--scripts/external_libs/scapy-2.3.1/python3/scapy/crypto/__init__.py17
-rw-r--r--scripts/external_libs/scapy-2.3.1/python3/scapy/crypto/cert.py2486
-rw-r--r--scripts/external_libs/scapy-2.3.1/python3/scapy/dadict.py91
-rw-r--r--scripts/external_libs/scapy-2.3.1/python3/scapy/data.py215
-rw-r--r--scripts/external_libs/scapy-2.3.1/python3/scapy/error.py60
-rw-r--r--scripts/external_libs/scapy-2.3.1/python3/scapy/fields.py935
-rw-r--r--scripts/external_libs/scapy-2.3.1/python3/scapy/layers/__init__.py8
-rw-r--r--scripts/external_libs/scapy-2.3.1/python3/scapy/layers/all.py45
-rw-r--r--scripts/external_libs/scapy-2.3.1/python3/scapy/layers/bluetooth.py213
-rw-r--r--scripts/external_libs/scapy-2.3.1/python3/scapy/layers/dhcp.py381
-rw-r--r--scripts/external_libs/scapy-2.3.1/python3/scapy/layers/dhcp6.py1718
-rw-r--r--scripts/external_libs/scapy-2.3.1/python3/scapy/layers/dns.py712
-rw-r--r--scripts/external_libs/scapy-2.3.1/python3/scapy/layers/dot11.py560
-rw-r--r--scripts/external_libs/scapy-2.3.1/python3/scapy/layers/gprs.py21
-rw-r--r--scripts/external_libs/scapy-2.3.1/python3/scapy/layers/hsrp.py79
-rw-r--r--scripts/external_libs/scapy-2.3.1/python3/scapy/layers/inet.py1569
-rw-r--r--scripts/external_libs/scapy-2.3.1/python3/scapy/layers/inet6.py3047
-rw-r--r--scripts/external_libs/scapy-2.3.1/python3/scapy/layers/ipsec.py995
-rw-r--r--scripts/external_libs/scapy-2.3.1/python3/scapy/layers/ir.py44
-rw-r--r--scripts/external_libs/scapy-2.3.1/python3/scapy/layers/isakmp.py355
-rw-r--r--scripts/external_libs/scapy-2.3.1/python3/scapy/layers/l2.py543
-rw-r--r--scripts/external_libs/scapy-2.3.1/python3/scapy/layers/l2tp.py36
-rw-r--r--scripts/external_libs/scapy-2.3.1/python3/scapy/layers/llmnr.py65
-rw-r--r--scripts/external_libs/scapy-2.3.1/python3/scapy/layers/mgcp.py45
-rw-r--r--scripts/external_libs/scapy-2.3.1/python3/scapy/layers/mobileip.py47
-rw-r--r--scripts/external_libs/scapy-2.3.1/python3/scapy/layers/netbios.py222
-rw-r--r--scripts/external_libs/scapy-2.3.1/python3/scapy/layers/netflow.py48
-rw-r--r--scripts/external_libs/scapy-2.3.1/python3/scapy/layers/ntp.py77
-rw-r--r--scripts/external_libs/scapy-2.3.1/python3/scapy/layers/pflog.py59
-rw-r--r--scripts/external_libs/scapy-2.3.1/python3/scapy/layers/ppp.py349
-rw-r--r--scripts/external_libs/scapy-2.3.1/python3/scapy/layers/radius.py65
-rw-r--r--scripts/external_libs/scapy-2.3.1/python3/scapy/layers/rip.py74
-rw-r--r--scripts/external_libs/scapy-2.3.1/python3/scapy/layers/rtp.py40
-rw-r--r--scripts/external_libs/scapy-2.3.1/python3/scapy/layers/sctp.py439
-rw-r--r--scripts/external_libs/scapy-2.3.1/python3/scapy/layers/sebek.py109
-rw-r--r--scripts/external_libs/scapy-2.3.1/python3/scapy/layers/skinny.py161
-rw-r--r--scripts/external_libs/scapy-2.3.1/python3/scapy/layers/smb.py354
-rw-r--r--scripts/external_libs/scapy-2.3.1/python3/scapy/layers/snmp.py255
-rw-r--r--scripts/external_libs/scapy-2.3.1/python3/scapy/layers/tftp.py477
-rw-r--r--scripts/external_libs/scapy-2.3.1/python3/scapy/layers/vrrp.py39
-rw-r--r--scripts/external_libs/scapy-2.3.1/python3/scapy/layers/x509.py108
-rw-r--r--scripts/external_libs/scapy-2.3.1/python3/scapy/main.py380
-rw-r--r--scripts/external_libs/scapy-2.3.1/python3/scapy/modules/__init__.py8
-rw-r--r--scripts/external_libs/scapy-2.3.1/python3/scapy/modules/geoip.py77
-rw-r--r--scripts/external_libs/scapy-2.3.1/python3/scapy/modules/nmap.py215
-rw-r--r--scripts/external_libs/scapy-2.3.1/python3/scapy/modules/p0f.py549
-rw-r--r--scripts/external_libs/scapy-2.3.1/python3/scapy/modules/queso.py113
-rw-r--r--scripts/external_libs/scapy-2.3.1/python3/scapy/modules/voip.py149
-rw-r--r--scripts/external_libs/scapy-2.3.1/python3/scapy/packet.py1360
-rw-r--r--scripts/external_libs/scapy-2.3.1/python3/scapy/pipetool.py566
-rw-r--r--scripts/external_libs/scapy-2.3.1/python3/scapy/plist.py517
-rw-r--r--scripts/external_libs/scapy-2.3.1/python3/scapy/pton_ntop.py90
-rw-r--r--scripts/external_libs/scapy-2.3.1/python3/scapy/route.py175
-rw-r--r--scripts/external_libs/scapy-2.3.1/python3/scapy/route6.py288
-rw-r--r--scripts/external_libs/scapy-2.3.1/python3/scapy/scapypipes.py123
-rw-r--r--scripts/external_libs/scapy-2.3.1/python3/scapy/sendrecv.py678
-rw-r--r--scripts/external_libs/scapy-2.3.1/python3/scapy/supersocket.py141
-rw-r--r--scripts/external_libs/scapy-2.3.1/python3/scapy/themes.py277
-rw-r--r--scripts/external_libs/scapy-2.3.1/python3/scapy/tools/UTscapy.py677
-rw-r--r--scripts/external_libs/scapy-2.3.1/python3/scapy/tools/__init__.py8
-rw-r--r--scripts/external_libs/scapy-2.3.1/python3/scapy/tools/check_asdis.py103
-rw-r--r--scripts/external_libs/scapy-2.3.1/python3/scapy/utils.py1054
-rw-r--r--scripts/external_libs/scapy-2.3.1/python3/scapy/utils6.py823
-rw-r--r--scripts/external_libs/scapy-2.3.1/python3/scapy/volatile.py685
-rw-r--r--scripts/external_libs/scapy3-0.18-origin.rarbin0 -> 367282 bytes
-rw-r--r--scripts/external_libs/texttable-0.8.4/texttable.py4
-rwxr-xr-xscripts/find_python.sh7
-rwxr-xr-xscripts/run_functional_tests23
-rw-r--r--scripts/stl/udp_1pkt_simple_test.py6
-rw-r--r--scripts/stl/udp_1pkt_simple_test2.py6
-rw-r--r--scripts/stl/udp_1pkt_tuple_gen_split.py2
-rwxr-xr-xscripts/trex-console3
-rwxr-xr-xsrc/bp_sim.cpp7
-rwxr-xr-xsrc/bp_sim.h5
-rw-r--r--src/debug.cpp12
-rw-r--r--src/flow_stat.cpp234
-rw-r--r--src/flow_stat.h27
-rw-r--r--src/flow_stat_parser.cpp52
-rw-r--r--src/flow_stat_parser.h36
-rw-r--r--src/gtest/trex_stateless_gtest.cpp2
-rw-r--r--src/internal_api/trex_platform_api.h13
-rw-r--r--src/latency.cpp11
-rw-r--r--src/latency.h4
-rw-r--r--src/main_dpdk.cpp377
-rw-r--r--src/main_dpdk.h10
-rwxr-xr-xsrc/msg_manager.cpp17
-rwxr-xr-xsrc/msg_manager.h26
-rw-r--r--src/stateless/cp/trex_stateless.cpp1
-rw-r--r--src/stateless/cp/trex_stateless_port.cpp7
-rw-r--r--src/stateless/cp/trex_stateless_port.h100
-rw-r--r--src/stateless/cp/trex_streams_compiler.cpp3
-rw-r--r--src/stateless/dp/trex_stateless_dp_core.cpp16
-rw-r--r--src/stateless/messaging/trex_stateless_messaging.cpp46
-rw-r--r--src/stateless/messaging/trex_stateless_messaging.h80
-rw-r--r--src/stateless/rx/trex_stateless_rx_core.cpp217
-rw-r--r--src/stateless/rx/trex_stateless_rx_core.h80
1306 files changed, 117477 insertions, 11469 deletions
diff --git a/CONTRIBUTORS b/CONTRIBUTORS
index 21f87879..ba4f03a5 100755
--- a/CONTRIBUTORS
+++ b/CONTRIBUTORS
@@ -1,8 +1,12 @@
Hanoch haim hhaim@cisco.com
-Dave Johnson
-Wenxian Li
+Dave Johnson @cisco.com
+Wenxian Li @cisco.com
Dan Klein
-Itay Marom
-Ido Barnea
-Yaroslav Brustinov
+Itay Marom @cisco.com
+Ido Barnea @cisco.com
+Yaroslav Brustinov @cisco.com
+David Shen @cisco.com
+Bill Eubanks @cisco.com
+Tina Chen @cisco.com
+
diff --git a/VERSION b/VERSION
index ea5b4f56..8b606e76 100755
--- a/VERSION
+++ b/VERSION
@@ -1,4 +1,6 @@
-v1.96
+v1.97
+
+
diff --git a/linux/ws_main.py b/linux/ws_main.py
index 9422a8ff..58f5b661 100755
--- a/linux/ws_main.py
+++ b/linux/ws_main.py
@@ -258,6 +258,7 @@ includes_path =''' ../src/pal/linux/
../src/rpc-server/
../src/stateless/cp/
../src/stateless/dp/
+ ../src/stateless/rx/
../src/stateless/messaging/
../external_libs/json/
../external_libs/zmq/include/
diff --git a/linux_dpdk/ws_main.py b/linux_dpdk/ws_main.py
index ad8d259d..2aa06e3b 100755
--- a/linux_dpdk/ws_main.py
+++ b/linux_dpdk/ws_main.py
@@ -173,7 +173,8 @@ stateless_src = SrcGroup(dir='src/stateless/',
'cp/trex_vm_splitter.cpp',
'cp/trex_dp_port_events.cpp',
'dp/trex_stateless_dp_core.cpp',
- 'messaging/trex_stateless_messaging.cpp'
+ 'messaging/trex_stateless_messaging.cpp',
+ 'rx/trex_stateless_rx_core.cpp'
])
# JSON package
json_src = SrcGroup(dir='external_libs/json',
@@ -423,6 +424,7 @@ includes_path =''' ../src/pal/linux_dpdk/
../src/rpc-server/
../src/stateless/cp/
../src/stateless/dp/
+ ../src/stateless/rx/
../src/stateless/messaging/
../external_libs/yaml-cpp/include/
@@ -984,7 +986,9 @@ def publish_ext(bld, custom_source = None):
from_ = custom_source
else:
from_ = exec_p+'/'+release_name;
- os.system('rsync -avz -e "ssh -i %s" --rsync-path=/usr/bin/rsync %s %s@%s:%s/release/%s' % (Env().get_trex_ex_web_key(),from_, Env().get_trex_ex_web_user(),Env().get_trex_ex_web_srv(),Env().get_trex_ex_web_path() ,release_name) )
+ cmd='rsync -avz --progress -e "ssh -i %s" --rsync-path=/usr/bin/rsync %s %s@%s:%s/release/%s' % (Env().get_trex_ex_web_key(),from_, Env().get_trex_ex_web_user(),Env().get_trex_ex_web_srv(),Env().get_trex_ex_web_path() ,release_name)
+ print cmd
+ os.system( cmd )
os.system("ssh -i %s -l %s %s 'cd %s/release/;rm be_latest; ln -P %s be_latest' " %(Env().get_trex_ex_web_key(),Env().get_trex_ex_web_user(),Env().get_trex_ex_web_srv(),Env().get_trex_ex_web_path(),release_name))
#os.system("ssh -i %s -l %s %s 'cd %s/release/;rm latest; ln -P %s latest' " %(Env().get_trex_ex_web_key(),Env().get_trex_ex_web_user(),Env().get_trex_ex_web_srv(),Env().get_trex_ex_web_path(),release_name))
diff --git a/scripts/automation/regression/CProgressDisp.py b/scripts/automation/regression/CProgressDisp.py
index ec7920c3..b911c527 100755
--- a/scripts/automation/regression/CProgressDisp.py
+++ b/scripts/automation/regression/CProgressDisp.py
@@ -1,5 +1,5 @@
#!/router/bin/python
-
+from __future__ import print_function
import threading
import sys
import time
@@ -16,16 +16,16 @@ class ProgressThread(threading.Thread):
def run(self):
if self.notifyMessage is not None:
- print(self.notifyMessage),
+ print(self.notifyMessage, end=' ')
while not self.stoprequest.is_set():
- print "\b.",
+ print("\b.", end=' ')
sys.stdout.flush()
time.sleep(5)
def join(self, timeout=None):
if self.notifyMessage is not None:
- print termstyle.green("Done!\n"),
+ print(termstyle.green("Done!\n"), end=' ')
self.stoprequest.set()
super(ProgressThread, self).join(timeout)
@@ -44,7 +44,7 @@ class TimedProgressBar(threading.Thread):
def run (self):
# global g_stop
- print
+ print()
self.pbar.start()
try:
@@ -57,10 +57,10 @@ class TimedProgressBar(threading.Thread):
except KeyboardInterrupt:
# self.pbar.finish()
- print "\nInterrupted by user!!"
+ print("\nInterrupted by user!!")
self.join()
finally:
- print
+ print()
def join(self, isPlannedStop = True, timeout=None):
if isPlannedStop:
@@ -82,6 +82,6 @@ def timedProgressBar(time_in_secs):
time.sleep(0.5)
pbar.update(i)
pbar.finish()
- print
+ print()
diff --git a/scripts/automation/regression/functional_tests/pkt_bld_general_test.py b/scripts/automation/regression/functional_tests/pkt_bld_general_test.py
index 5f89eaff..9a1b708a 100755
--- a/scripts/automation/regression/functional_tests/pkt_bld_general_test.py
+++ b/scripts/automation/regression/functional_tests/pkt_bld_general_test.py
@@ -14,7 +14,7 @@ class CGeneralPktBld_Test(object):
@staticmethod
def print_packet(pkt_obj):
- print "\nGenerated packet:\n{}".format(repr(pkt_obj))
+ print("\nGenerated packet:\n{}".format(repr(pkt_obj)))
def setUp(self):
diff --git a/scripts/automation/regression/functional_tests/platform_cmd_cache_test.py b/scripts/automation/regression/functional_tests/platform_cmd_cache_test.py
index 24ccf7a5..0be21280 100755
--- a/scripts/automation/regression/functional_tests/platform_cmd_cache_test.py
+++ b/scripts/automation/regression/functional_tests/platform_cmd_cache_test.py
@@ -9,7 +9,7 @@ from nose.tools import assert_not_equal
class CCommandCache_Test(functional_general_test.CGeneralFunctional_Test):
def setUp(self):
- self.cache = CCommandCache()
+ self.cache = CCommandCache()
self.cache.add('IF', "ip nbar protocol-discovery", 'GigabitEthernet0/0/1')
self.cache.add('IF', "ip nbar protocol-discovery", 'GigabitEthernet0/0/2')
self.cache.add('conf', "arp 1.1.1.1 0000.0001.0000 arpa")
@@ -30,10 +30,10 @@ class CCommandCache_Test(functional_general_test.CGeneralFunctional_Test):
def test_dump_config (self):
import sys
- from StringIO import StringIO
+ from io import StringIO, BytesIO
saved_stdout = sys.stdout
try:
- out = StringIO()
+ out = BytesIO() if sys.version_info < (3,0) else StringIO()
sys.stdout = out
self.cache.dump_config()
output = out.getvalue().strip()
diff --git a/scripts/automation/regression/functional_tests/platform_dual_if_obj_test.py b/scripts/automation/regression/functional_tests/platform_dual_if_obj_test.py
index ff54b9ee..d848b466 100755
--- a/scripts/automation/regression/functional_tests/platform_dual_if_obj_test.py
+++ b/scripts/automation/regression/functional_tests/platform_dual_if_obj_test.py
@@ -9,7 +9,7 @@ from nose.tools import assert_not_equal
class CDualIfObj_Test(functional_general_test.CGeneralFunctional_Test):
def setUp(self):
- self.if_1 = CIfObj('gig0/0/1', '1.1.1.1', '2001:DB8:0:2222:0:0:0:1', '0000.0001.0000', '0000.0001.0000', IFType.Client)
+ self.if_1 = CIfObj('gig0/0/1', '1.1.1.1', '2001:DB8:0:2222:0:0:0:1', '0000.0001.0000', '0000.0001.0000', IFType.Client)
self.if_2 = CIfObj('gig0/0/2', '1.1.2.1', '2001:DB8:1:2222:0:0:0:1', '0000.0002.0000', '0000.0002.0000', IFType.Server)
self.if_3 = CIfObj('gig0/0/3', '1.1.3.1', '2001:DB8:2:2222:0:0:0:1', '0000.0003.0000', '0000.0003.0000', IFType.Client)
self.if_4 = CIfObj('gig0/0/4', '1.1.4.1', '2001:DB8:3:2222:0:0:0:1', '0000.0004.0000', '0000.0004.0000', IFType.Server)
diff --git a/scripts/automation/regression/functional_tests/platform_if_manager_test.py b/scripts/automation/regression/functional_tests/platform_if_manager_test.py
index b09e8d75..72015f55 100755
--- a/scripts/automation/regression/functional_tests/platform_if_manager_test.py
+++ b/scripts/automation/regression/functional_tests/platform_if_manager_test.py
@@ -28,13 +28,13 @@ class CIfManager_Test(functional_general_test.CGeneralFunctional_Test):
assert_equal( len(self.if_mng.get_dual_if_list()), 2 )
# check the classification with intf name
- assert_equal( map(CIfObj.get_name, self.if_mng.get_if_list() ), ['GigabitEthernet0/0/1','GigabitEthernet0/0/2','GigabitEthernet0/0/3','GigabitEthernet0/0/4'] )
- assert_equal( map(CIfObj.get_name, self.if_mng.get_if_list(is_duplicated = True) ), ['GigabitEthernet0/0/3','GigabitEthernet0/0/4'] )
- assert_equal( map(CIfObj.get_name, self.if_mng.get_if_list(is_duplicated = False) ), ['GigabitEthernet0/0/1','GigabitEthernet0/0/2'] )
- assert_equal( map(CIfObj.get_name, self.if_mng.get_duplicated_if() ), ['GigabitEthernet0/0/3', 'GigabitEthernet0/0/4'] )
+ assert_equal( list(map(CIfObj.get_name, self.if_mng.get_if_list()) ), ['GigabitEthernet0/0/1','GigabitEthernet0/0/2','GigabitEthernet0/0/3','GigabitEthernet0/0/4'] )
+ assert_equal( list(map(CIfObj.get_name, self.if_mng.get_if_list(is_duplicated = True)) ), ['GigabitEthernet0/0/3','GigabitEthernet0/0/4'] )
+ assert_equal( list(map(CIfObj.get_name, self.if_mng.get_if_list(is_duplicated = False)) ), ['GigabitEthernet0/0/1','GigabitEthernet0/0/2'] )
+ assert_equal( list(map(CIfObj.get_name, self.if_mng.get_duplicated_if()) ), ['GigabitEthernet0/0/3', 'GigabitEthernet0/0/4'] )
# check the classification with vrf name
- assert_equal( map(CDualIfObj.get_vrf_name, self.if_mng.get_dual_if_list() ), [None, 'dup'] )
+ assert_equal( list(map(CDualIfObj.get_vrf_name, self.if_mng.get_dual_if_list()) ), [None, 'dup'] )
def tearDown(self):
pass
diff --git a/scripts/automation/regression/functional_tests/scapy_pkt_builder_test.py b/scripts/automation/regression/functional_tests/scapy_pkt_builder_test.py
index 14f0014c..a3fcd091 100644
--- a/scripts/automation/regression/functional_tests/scapy_pkt_builder_test.py
+++ b/scripts/automation/regression/functional_tests/scapy_pkt_builder_test.py
@@ -45,7 +45,7 @@ class CTRexPktBuilderSanitySCapy_Test(pkt_bld_general_test.CGeneralPktBld_Test):
pkt_builder.dump_scripts ()
- print pkt_builder.get_vm_data()
+ print(pkt_builder.get_vm_data())
assert_equal( pkt_builder.get_vm_data(), {'split_by_var': '', 'instructions': [{'name': 'a', 'max_value': 268435466, 'min_value': 268435457, 'init_value': 268435457, 'size': 4, 'type': 'flow_var', 'step':1,'op': 'inc'}, {'is_big_endian': True, 'pkt_offset': 26, 'type': 'write_flow_var', 'name': 'a', 'add_value': 0}, {'pkt_offset': 14, 'type': 'fix_checksum_ipv4'}]} )
@@ -178,7 +178,7 @@ class CTRexPktBuilderSanitySCapy_Test(pkt_bld_general_test.CGeneralPktBld_Test):
build ipv6 packet
"""
- print "start "
+ print("start ")
py='\x55'*(64)
p=Ether()/IPv6()/UDP(dport=12,sport=1025)/py
@@ -300,25 +300,25 @@ class CTRexPktBuilderSanitySCapy_Test(pkt_bld_general_test.CGeneralPktBld_Test):
def test_simple_pkt_loader(self):
p=RawPcapReader("functional_tests/golden/basic_imix_golden.cap")
- print ""
+ print("")
for pkt in p:
- print pkt[1]
- print hexdump(str(pkt[0]))
+ print(pkt[1])
+ print(hexdump(str(pkt[0])))
break;
def test_simple_pkt_loader1(self):
pkt_builder = STLPktBuilder(pkt = "functional_tests/golden/udp_590.cap", build_raw = False);
- print ""
+ print("")
pkt_builder.dump_as_hex()
r = pkt_builder.pkt_raw
- assert_equal(ord(r[1]),0x50)
- assert_equal(ord(r[0]),0x00)
- assert_equal(ord(r[0x240]),0x16)
- assert_equal(ord(r[0x24d]),0x79)
+ assert_equal(safe_ord(r[1]),0x50)
+ assert_equal(safe_ord(r[0]),0x00)
+ assert_equal(safe_ord(r[0x240]),0x16)
+ assert_equal(safe_ord(r[0x24d]),0x79)
assert_equal(len(r),590)
- print len(r)
+ print(len(r))
def test_simple_pkt_loader2(self):
@@ -341,8 +341,8 @@ class CTRexPktBuilderSanitySCapy_Test(pkt_bld_general_test.CGeneralPktBld_Test):
py='\x55'*(64)
p=Ether()/IP()/UDP(dport=12,sport=1025)/py
- pkt_str = str(p);
- print ""
+ pkt_str = bytes(p);
+ print("")
hexdump(pkt_str);
scapy_pkt = Ether(pkt_str);
scapy_pkt.show2();
diff --git a/scripts/automation/regression/functional_tests/stl_basic_tests.py b/scripts/automation/regression/functional_tests/stl_basic_tests.py
index ea515401..2bf97307 100644
--- a/scripts/automation/regression/functional_tests/stl_basic_tests.py
+++ b/scripts/automation/regression/functional_tests/stl_basic_tests.py
@@ -7,9 +7,9 @@ from nose.tools import assert_not_equal
from nose.tools import nottest
from nose.plugins.attrib import attr
from trex import CTRexScenario
-from dpkt import pcap
from trex_stl_lib import trex_stl_sim
from trex_stl_lib.trex_stl_streams import STLProfile
+from trex_stl_lib.trex_stl_packet_builder_scapy import RawPcapReader, RawPcapWriter
import sys
import os
import subprocess
@@ -36,7 +36,7 @@ class CStlBasic_Test(functional_general_test.CGeneralFunctional_Test):
self.profiles['random_size_9k'] = os.path.join(self.profiles_path, "../udp_rand_len_9k.py")
self.profiles['imix_tuple_gen'] = os.path.join(self.profiles_path, "imix_1pkt_tuple_gen.yaml")
- for k, v in self.profiles.iteritems():
+ for k, v in self.profiles.items():
self.verify_exists(v)
self.valgrind_profiles = [ self.profiles['imix_3pkt_vm'],
@@ -65,23 +65,19 @@ class CStlBasic_Test(functional_general_test.CGeneralFunctional_Test):
def compare_caps (self, cap1, cap2, max_diff_sec = 0.01):
- with open(cap1, 'r') as f1:
- reader1 = pcap.Reader(f1)
- pkts1 = reader1.readpkts()
-
- with open(cap2, 'r') as f2:
- reader2 = pcap.Reader(f2)
- pkts2 = reader2.readpkts()
+ pkts1 = list(RawPcapReader(cap1))
+ pkts2 = list(RawPcapReader(cap2))
assert_equal(len(pkts1), len(pkts2))
- for pkt1, pkt2, i in zip(pkts1, pkts2, xrange(1, len(pkts1))):
- ts1 = pkt1[0]
- ts2 = pkt2[0]
+ for pkt1, pkt2, i in zip(pkts1, pkts2, range(1, len(pkts1))):
+ ts1 = float(pkt1[1][0]) + (float(pkt1[1][1]) / 1e6)
+ ts2 = float(pkt2[1][0]) + (float(pkt2[1][1]) / 1e6)
+
if abs(ts1-ts2) > 0.000005: # 5 nsec
raise AssertionError("TS error: cap files '{0}', '{1}' differ in cap #{2} - '{3}' vs. '{4}'".format(cap1, cap2, i, ts1, ts2))
- if pkt1[1] != pkt2[1]:
+ if pkt1[0] != pkt2[0]:
raise AssertionError("RAW error: cap files '{0}', '{1}' differ in cap #{2}".format(cap1, cap2, i))
@@ -121,6 +117,7 @@ class CStlBasic_Test(functional_general_test.CGeneralFunctional_Test):
finally:
if not do_no_remove:
os.unlink(output_cap)
+
if test_generated:
try:
generated_filename = input_file.replace('.py', '_GENERATED.py').replace('.yaml', '_GENERATED.py')
@@ -128,19 +125,24 @@ class CStlBasic_Test(functional_general_test.CGeneralFunctional_Test):
profile = STLProfile.load_py(input_file)
elif input_file.endswith('.yaml'):
profile = STLProfile.load_yaml(input_file)
- profile.dump_to_code(generated_filename)
+ profile.dump_to_code(generated_filename)
+
rc = self.run_sim(generated_filename, output_cap, options, silent)
assert_equal(rc, True)
if compare:
self.compare_caps(output_cap, golden_file)
+
except Exception as e:
- print e
+ print(e)
+
finally:
if not do_no_remove_generated:
os.unlink(generated_filename)
- os.unlink(generated_filename + 'c')
+ # python 3 does not generate PYC under the same dir
+ if os.path.exists(generated_filename + 'c'):
+ os.unlink(generated_filename + 'c')
if not do_no_remove:
os.unlink(output_cap)
@@ -148,52 +150,52 @@ class CStlBasic_Test(functional_general_test.CGeneralFunctional_Test):
def test_stl_profiles (self):
p = [
- ["udp_1pkt_1mac_override.py","-m 1 -l 50",True],
- ["syn_attack.py","-m 1 -l 50",True], # can't compare random now
- ["udp_1pkt_1mac.py","-m 1 -l 50",True],
- ["udp_1pkt_mac.py","-m 1 -l 50",True],
- ["udp_1pkt.py","-m 1 -l 50",True],
- ["udp_1pkt_tuple_gen.py","-m 1 -l 50",True],
- ["udp_rand_len_9k.py","-m 1 -l 50",True], # can't do the compare
- ["udp_1pkt_mpls.py","-m 1 -l 50",True],
- ["udp_1pkt_mpls_vm.py","-m 1 ",True],
- ["imix.py","-m 1 -l 100",True],
- ["udp_inc_len_9k.py","-m 1 -l 100",True],
- ["udp_1pkt_range_clients.py","-m 1 -l 100",True],
- ["multi_burst_2st_1000pkt.py","-m 1 -l 100",True],
- ["pcap.py", "-m 1", True],
- ["pcap_with_vm.py", "-m 1", True],
+ ["udp_1pkt_1mac_override.py","-m 1 -l 50",True],
+ ["syn_attack.py","-m 1 -l 50",True], # can't compare random now
+ ["udp_1pkt_1mac.py","-m 1 -l 50",True],
+ ["udp_1pkt_mac.py","-m 1 -l 50",True],
+ ["udp_1pkt.py","-m 1 -l 50",True],
+ ["udp_1pkt_tuple_gen.py","-m 1 -l 50",True],
+ ["udp_rand_len_9k.py","-m 1 -l 50",True], # can't do the compare
+ ["udp_1pkt_mpls.py","-m 1 -l 50",True],
+ ["udp_1pkt_mpls_vm.py","-m 1 ",True],
+ ["imix.py","-m 1 -l 100",True],
+ ["udp_inc_len_9k.py","-m 1 -l 100",True],
+ ["udp_1pkt_range_clients.py","-m 1 -l 100",True],
+ ["multi_burst_2st_1000pkt.py","-m 1 -l 100",True],
+ ["pcap.py", "-m 1", True],
+ ["pcap_with_vm.py", "-m 1", True],
# YAML test
- ["yaml/burst_1000_pkt.yaml","-m 1 -l 100",True],
- ["yaml/burst_1pkt_1burst.yaml","-m 1 -l 100",True],
- ["yaml/burst_1pkt_vm.yaml","-m 1 -l 100",True],
- ["yaml/imix_1pkt.yaml","-m 1 -l 100",True],
- ["yaml/imix_1pkt_2.yaml","-m 1 -l 100",True],
- ["yaml/imix_1pkt_tuple_gen.yaml","-m 1 -l 100",True],
- ["yaml/imix_1pkt_vm.yaml","-m 1 -l 100",True],
- ["udp_1pkt_pcap.py","-m 1 -l 10",True],
- ["udp_3pkt_pcap.py","-m 1 -l 10",True],
- #["udp_1pkt_simple.py","-m 1 -l 3",True],
- ["udp_1pkt_pcap_relative_path.py","-m 1 -l 3",True],
- ["udp_1pkt_tuple_gen_split.py","-m 1 -c 2 -l 100",True],
- ["udp_1pkt_range_clients_split.py","-m 1 -c 2 -l 100",True],
- ["udp_1pkt_vxlan.py","-m 1 -c 1 -l 17",True, False], # can't generate: no VXLAN in Scapy, only in profile
- ["udp_1pkt_ipv6_in_ipv4.py","-m 1 -c 1 -l 17",True],
- ["yaml/imix_3pkt.yaml","-m 50kpps --limit 20 --cores 2",True],
- ["yaml/imix_3pkt_vm.yaml","-m 50kpps --limit 20 --cores 2",True],
- ["udp_1pkt_simple_mac_dst.py","-m 1 -l 1 ",True],
- ["udp_1pkt_simple_mac_src.py","-m 1 -l 1 ",True],
- ["udp_1pkt_simple_mac_dst_src.py","-m 1 -l 1 ",True],
- ["burst_3st_loop_x_times.py","-m 1 -l 20 ",True],
- ["udp_1pkt_mac_step.py","-m 1 -l 20 ",True],
- ["udp_1pkt_mac_mask1.py","-m 1 -l 20 ",True] ,
- ["udp_1pkt_mac_mask2.py","-m 1 -l 20 ",True],
- ["udp_1pkt_mac_mask3.py","-m 1 -l 20 ",True],
- ["udp_1pkt_simple_test2.py","-m 1 -l 10 ",True], # test split of packet with ip option
- ["udp_1pkt_simple_test.py","-m 1 -l 10 ",True],
- ["udp_1pkt_mac_mask5.py","-m 1 -l 30 ",True],
- ["udp_1pkt_range_clients_split_garp.py","-m 1 -l 50",True]
+ ["yaml/burst_1000_pkt.yaml","-m 1 -l 100",True],
+ ["yaml/burst_1pkt_1burst.yaml","-m 1 -l 100",True],
+ ["yaml/burst_1pkt_vm.yaml","-m 1 -l 100",True],
+ ["yaml/imix_1pkt.yaml","-m 1 -l 100",True],
+ ["yaml/imix_1pkt_2.yaml","-m 1 -l 100",True],
+ ["yaml/imix_1pkt_tuple_gen.yaml","-m 1 -l 100",True],
+ ["yaml/imix_1pkt_vm.yaml","-m 1 -l 100",True],
+ ["udp_1pkt_pcap.py","-m 1 -l 10",True],
+ ["udp_3pkt_pcap.py","-m 1 -l 10",True],
+ #["udp_1pkt_simple.py","-m 1 -l 3",True],
+ ["udp_1pkt_pcap_relative_path.py","-m 1 -l 3",True],
+ ["udp_1pkt_tuple_gen_split.py","-m 1 -c 2 -l 100",True],
+ ["udp_1pkt_range_clients_split.py","-m 1 -c 2 -l 100",True],
+ ["udp_1pkt_vxlan.py","-m 1 -c 1 -l 17",True, False], # can't generate: no VXLAN in Scapy, only in profile
+ ["udp_1pkt_ipv6_in_ipv4.py","-m 1 -c 1 -l 17",True],
+ ["yaml/imix_3pkt.yaml","-m 50kpps --limit 20 --cores 2",True],
+ ["yaml/imix_3pkt_vm.yaml","-m 50kpps --limit 20 --cores 2",True],
+ ["udp_1pkt_simple_mac_dst.py","-m 1 -l 1 ",True],
+ ["udp_1pkt_simple_mac_src.py","-m 1 -l 1 ",True],
+ ["udp_1pkt_simple_mac_dst_src.py","-m 1 -l 1 ",True],
+ ["burst_3st_loop_x_times.py","-m 1 -l 20 ",True],
+ ["udp_1pkt_mac_step.py","-m 1 -l 20 ",True],
+ ["udp_1pkt_mac_mask1.py","-m 1 -l 20 ",True] ,
+ ["udp_1pkt_mac_mask2.py","-m 1 -l 20 ",True],
+ ["udp_1pkt_mac_mask3.py","-m 1 -l 20 ",True],
+ ["udp_1pkt_simple_test2.py","-m 1 -l 10 ",True], # test split of packet with ip option
+ ["udp_1pkt_simple_test.py","-m 1 -l 10 ",True],
+ ["udp_1pkt_mac_mask5.py","-m 1 -l 30 ",True],
+ ["udp_1pkt_range_clients_split_garp.py","-m 1 -l 50",True]
];
@@ -240,11 +242,10 @@ class CStlBasic_Test(functional_general_test.CGeneralFunctional_Test):
# valgrind tests - this runs in multi thread as it safe (no output)
def test_valgrind_various_profiles (self):
-
- print "\n"
+ print("\n")
threads = []
for profile in self.valgrind_profiles:
- print "\n*** VALGRIND: testing profile '{0}' ***\n".format(profile)
+ print("\n*** VALGRIND: testing profile '{0}' ***\n".format(profile))
obj = {'t': None, 'rc': None}
t = Thread(target = self.run_sim,
kwargs = {'obj': obj, 'yaml': profile, 'output':None, 'options': "--cores 8 --limit 20 --valgrind", 'silent': True})
diff --git a/scripts/automation/regression/misc_methods.py b/scripts/automation/regression/misc_methods.py
index 783858e8..54e3ba5d 100755
--- a/scripts/automation/regression/misc_methods.py
+++ b/scripts/automation/regression/misc_methods.py
@@ -1,9 +1,12 @@
#!/router/bin/python
+import sys
+if sys.version_info >= (3, 0):
+ import configparser
+else:
+ import ConfigParser
-import ConfigParser
import outer_packages
import yaml
-import sys
from collections import namedtuple
import subprocess, shlex
import os
@@ -13,7 +16,7 @@ TRexConfig = namedtuple('TRexConfig', 'trex, router, tftp')
# debug/development purpose, lists object's attributes and their values
def print_r(obj):
for attr in dir(obj):
- print 'obj.%s %s' % (attr, getattr(obj, attr))
+ print('obj.%s %s' % (attr, getattr(obj, attr)))
def mix_string (str):
"""Convert all string to lowercase letters, and replaces spaces with '_' char"""
@@ -22,20 +25,20 @@ def mix_string (str):
# executes given command, returns tuple (return_code, stdout, stderr)
def run_command(cmd, background = False):
if background:
- print 'Running command in background:', cmd
+ print('Running command in background:', cmd)
with open(os.devnull, 'w') as tempf:
subprocess.Popen(shlex.split(cmd), stdin=tempf, stdout=tempf, stderr=tempf)
return (None,)*3
else:
- print 'Running command:', cmd
+ print('Running command:', cmd)
proc = subprocess.Popen(shlex.split(cmd), stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(stdout, stderr) = proc.communicate()
if stdout:
- print 'Stdout:\n%s' % stdout
+ print('Stdout:\n%s' % stdout)
if proc.returncode:
if stderr:
- print 'Stderr:\n%s' % stderr
- print 'Return code: %s' % proc.returncode
+ print('Stderr:\n%s' % stderr)
+ print('Return code: %s' % proc.returncode)
return (proc.returncode, stdout, stderr)
@@ -81,7 +84,7 @@ def get_single_net_client_addr (ip_addr, octetListDict = {'3' : 1}, ip_type = 'i
if ip_type == 'ipv4':
ip_lst = ip_addr.split('.')
- for octet,increment in octetListDict.iteritems():
+ for octet,increment in octetListDict.items():
int_octet = int(octet)
if ((int_octet < 0) or (int_octet > 3)):
raise ValueError('the provided octet is not legal in {0} format'.format(ip_type) )
@@ -96,7 +99,7 @@ def get_single_net_client_addr (ip_addr, octetListDict = {'3' : 1}, ip_type = 'i
else: # this is a ipv6 address, handle accordingly
ip_lst = ip_addr.split(':')
- for octet,increment in octetListDict.iteritems():
+ for octet,increment in octetListDict.items():
int_octet = int(octet)
if ((int_octet < 0) or (int_octet > 7)):
raise ValueError('the provided octet is not legal in {0} format'.format(ip_type) )
@@ -159,11 +162,11 @@ def load_complete_config_file (filepath):
raise ValueError('A clean router configuration wasn`t provided.')
except ValueError:
- print '!!!!!'
+ print("")
raise
except Exception as inst:
- print "\nBad configuration file provided: '{0}'\n".format(filepath)
+ print("\nBad configuration file provided: '{0}'\n".format(filepath))
raise inst
return TRexConfig(trex_config, rtr_config, tftp_config)
@@ -174,8 +177,8 @@ def load_object_config_file (filepath):
config = yaml.load(f)
return config
except Exception as inst:
- print "\nBad configuration file provided: '{0}'\n".format(filepath)
- print inst
+ print("\nBad configuration file provided: '{0}'\n".format(filepath))
+ print(inst)
exit(-1)
@@ -202,7 +205,7 @@ def query_yes_no(question, default="yes"):
while True:
sys.stdout.write(question + prompt)
- choice = raw_input().lower()
+ choice = input().lower()
if default is not None and choice == '':
return valid[default]
elif choice in valid:
@@ -227,8 +230,8 @@ def load_benchmark_config_file (filepath):
benchmark_config = yaml.load(f)
except Exception as inst:
- print "\nBad configuration file provided: '{0}'\n".format(filepath)
- print inst
+ print("\nBad configuration file provided: '{0}'\n".format(filepath))
+ print(inst)
exit(-1)
return benchmark_config
diff --git a/scripts/automation/regression/outer_packages.py b/scripts/automation/regression/outer_packages.py
index f55c247d..bec9fe21 100755
--- a/scripts/automation/regression/outer_packages.py
+++ b/scripts/automation/regression/outer_packages.py
@@ -11,28 +11,56 @@ PATH_TO_PYTHON_LIB = os.path.abspath(os.path.join(TREX_PATH, 'external_libs'))
PATH_TO_CTRL_PLANE = os.path.abspath(os.path.join(TREX_PATH, 'automation', 'trex_control_plane'))
PATH_STL_API = os.path.abspath(os.path.join(PATH_TO_CTRL_PLANE, 'stl'))
-NIGHTLY_MODULES = ['ansi2html',
- 'enum34-1.0.4',
- 'nose-1.3.4',
- 'rednose-0.4.1',
- 'progressbar-2.2',
- 'termstyle',
- 'dpkt-1.8.6',
- 'yaml-3.11',
+
+NIGHTLY_MODULES = [ {'name': 'ansi2html'},
+ {'name': 'enum34-1.0.4'},
+ {'name': 'rednose-0.4.1'},
+ {'name': 'progressbar-2.2'},
+ {'name': 'termstyle'},
+ {'name': 'pyyaml-3.11', 'py-dep': True},
+ {'name': 'nose-1.3.4', 'py-dep': True}
]
+
+def generate_module_path (module, is_python3, is_64bit, is_cel):
+ platform_path = [module['name']]
+
+ if module.get('py-dep'):
+ platform_path.append('python3' if is_python3 else 'python2')
+
+ if module.get('arch-dep'):
+ platform_path.append('cel59' if is_cel else 'fedora18')
+ platform_path.append('64bit' if is_64bit else '32bit')
+
+ return os.path.normcase(os.path.join(PATH_TO_PYTHON_LIB, *platform_path))
+
+
+def import_module_list(modules_list):
+
+ # platform data
+ is_64bit = platform.architecture()[0] == '64bit'
+ is_python3 = (sys.version_info >= (3, 0))
+ is_cel = os.path.exists('/etc/system-profile')
+
+ # regular modules
+ for p in modules_list:
+ full_path = generate_module_path(p, is_python3, is_64bit, is_cel)
+
+ if not os.path.exists(full_path):
+ print("Unable to find required module library: '{0}'".format(p['name']))
+ print("Please provide the correct path using PATH_TO_PYTHON_LIB variable")
+ print("current path used: '{0}'".format(full_path))
+ exit(0)
+
+ sys.path.insert(1, full_path)
+
+
def import_nightly_modules ():
sys.path.append(TREX_PATH)
sys.path.append(PATH_TO_CTRL_PLANE)
sys.path.append(PATH_STL_API)
import_module_list(NIGHTLY_MODULES)
-def import_module_list (modules_list):
- assert(isinstance(modules_list, list))
- for p in modules_list:
- full_path = os.path.join(PATH_TO_PYTHON_LIB, p)
- fix_path = os.path.normcase(full_path) #CURRENT_PATH+p)
- sys.path.insert(1, full_path)
import_nightly_modules()
diff --git a/scripts/automation/regression/platform_cmd_link.py b/scripts/automation/regression/platform_cmd_link.py
index 3d577baf..247127ca 100755
--- a/scripts/automation/regression/platform_cmd_link.py
+++ b/scripts/automation/regression/platform_cmd_link.py
@@ -5,13 +5,14 @@ import CustomLogger
import misc_methods
import telnetlib
import socket
+from collections import OrderedDict
class CCommandCache(object):
def __init__(self):
self.__gen_clean_data_structure()
def __gen_clean_data_structure (self):
- self.cache = {"IF" : {},
+ self.cache = {"IF" : OrderedDict(),
"CONF" : [],
"EXEC" : []}
@@ -36,31 +37,31 @@ class CCommandCache(object):
def dump_config (self):
# dump IF config:
- print "configure terminal"
- for intf, intf_cmd_list in self.cache['IF'].iteritems():
- print "interface {if_name}".format( if_name = intf )
- print '\n'.join(intf_cmd_list)
+ print("configure terminal")
+ for intf, intf_cmd_list in self.cache['IF'].items():
+ print("interface {if_name}".format( if_name = intf ))
+ print('\n'.join(intf_cmd_list))
if self.cache['IF']:
# add 'exit' note only if if config actually took place
- print 'exit' # exit to global config mode
+ print('exit') # exit to global config mode
# dump global config
if self.cache['CONF']:
- print '\n'.join(self.cache['CONF'])
+ print('\n'.join(self.cache['CONF']))
# exit back to en mode
- print "exit"
+ print("exit")
# dump exec config
if self.cache['EXEC']:
- print '\n'.join(self.cache['EXEC'])
+ print('\n'.join(self.cache['EXEC']))
def get_config_list (self):
conf_list = []
conf_list.append("configure terminal")
- for intf, intf_cmd_list in self.cache['IF'].iteritems():
+ for intf, intf_cmd_list in self.cache['IF'].items():
conf_list.append( "interface {if_name}".format( if_name = intf ) )
conf_list.extend( intf_cmd_list )
if len(conf_list)>1:
@@ -94,7 +95,7 @@ class CCommandLink(object):
def __transmit (self, cmd_list, **kwargs):
self.history.extend(cmd_list)
if not self.silent_mode:
- print '\n'.join(cmd_list) # prompting the pushed platform commands
+ print('\n'.join(cmd_list)) # prompting the pushed platform commands
if not self.virtual_mode:
# transmit the command to platform.
return self.telnet_con.write_ios_cmd(cmd_list, **kwargs)
@@ -181,7 +182,7 @@ class CDeviceCfg(object):
def dump_config (self):
import yaml
- print yaml.dump(self.interfaces_cfg, default_flow_style=False)
+ print(yaml.dump(self.interfaces_cfg, default_flow_style=False))
class CIfObj(object):
_obj_id = 0
@@ -274,7 +275,7 @@ class CIfManager(object):
_ipv6_gen = misc_methods.get_network_addr(ip_type = 'ipv6')
def __init__(self):
- self.interfarces = {}
+ self.interfarces = OrderedDict()
self.dual_intf = []
self.full_device_cfg = None
@@ -339,7 +340,7 @@ class CIfManager(object):
def get_if_list (self, if_type = IFType.All, is_duplicated = None):
result = []
- for if_name,if_obj in self.interfarces.iteritems():
+ for if_name,if_obj in self.interfarces.items():
if (if_type == IFType.All) or ( if_obj.get_if_type() == if_type) :
if (is_duplicated is None) or (if_obj.get_pair_parent().is_duplicated() == is_duplicated):
# append this if_obj only if matches both IFType and is_duplicated conditions
@@ -362,7 +363,7 @@ class CIfManager(object):
def dump_if_config (self):
if self.full_device_cfg is None:
- print "Device configuration isn't loaded.\nPlease load config and try again."
+ print("Device configuration isn't loaded.\nPlease load config and try again.")
else:
self.full_device_cfg.dump_config()
diff --git a/scripts/automation/regression/setups/trex17/benchmark.yaml b/scripts/automation/regression/setups/trex17/benchmark.yaml
new file mode 100644
index 00000000..e5459dce
--- /dev/null
+++ b/scripts/automation/regression/setups/trex17/benchmark.yaml
@@ -0,0 +1,62 @@
+################################################################
+#### T-Rex benchmark configuration file ####
+################################################################
+
+
+test_rx_check :
+ multiplier : 0.8
+ cores : 1
+ rx_sample_rate : 128
+ exp_gbps : 0.5
+ cpu_to_core_ratio : 37270000
+ exp_bw : 1
+ exp_latency : 1
+
+
+test_routing_imix_64 :
+ multiplier : 28
+ cores : 1
+ cpu_to_core_ratio : 280
+ exp_latency : 1
+
+test_routing_imix :
+ multiplier : 0.5
+ cores : 1
+ cpu_to_core_ratio : 1800
+ exp_latency : 1
+
+test_static_routing_imix :
+ stat_route_dict :
+ clients_start : 16.0.0.1
+ servers_start : 48.0.0.1
+ dual_port_mask : 1.0.0.0
+ client_destination_mask : 255.0.0.0
+ server_destination_mask : 255.0.0.0
+ multiplier : 0.8
+ cores : 1
+ cpu_to_core_ratio : 1800
+ exp_latency : 1
+
+test_static_routing_imix_asymmetric:
+ stat_route_dict :
+ clients_start : 16.0.0.1
+ servers_start : 48.0.0.1
+ dual_port_mask : 1.0.0.0
+ client_destination_mask : 255.0.0.0
+ server_destination_mask : 255.0.0.0
+ multiplier : 0.8
+ cores : 1
+ cpu_to_core_ratio : 1800
+ exp_latency : 1
+
+test_ipv6_simple :
+ multiplier : 0.5
+ cores : 1
+ cpu_to_core_ratio : 30070000
+ cpu2core_custom_dev: YES
+ cpu2core_dev : 0.07
+
+
+test_jumbo:
+ multiplier : 2.8
+ cores : 1 \ No newline at end of file
diff --git a/scripts/automation/regression/setups/trex17/config.yaml b/scripts/automation/regression/setups/trex17/config.yaml
new file mode 100644
index 00000000..cf490b85
--- /dev/null
+++ b/scripts/automation/regression/setups/trex17/config.yaml
@@ -0,0 +1,39 @@
+################################################################
+#### T-Rex nightly test configuration file ####
+################################################################
+
+
+### T-Rex configuration:
+# hostname - can be DNS name or IP for the TRex machine for ssh to the box
+# password - root password for TRex machine
+# is_dual - should the TRex inject with -p ?
+# version_path - path to the t-rex version and executable
+# cores - how many cores should be used
+# latency - rate of latency packets injected by the Trex
+# modes - list of modes (tagging) of this setup (loopback, virtual etc.)
+# * loopback - Trex works via loopback. Router and TFTP configurations may be skipped.
+# * VM - Virtual OS (accept low CPU utilization in tests, latency can get spikes)
+# * virt_nics - NICs are virtual (VMXNET3 etc. have their limitations in tests)
+
+### Router configuration:
+# hostname - the router hostname as apears in ______# cli prefix
+# ip_address - the router's ip that can be used to communicate with
+# image - the desired imaged wished to be loaded as the router's running config
+# line_password - router password when access via Telent
+# en_password - router password when changing to "enable" mode
+# interfaces - an array of client-server pairs, representing the interfaces configurations of the router
+# configurations - an array of configurations that could possibly loaded into the router during the test.
+# The "clean" configuration is a mandatory configuration the router will load with to run the basic test bench
+
+### TFTP configuration:
+# hostname - the tftp hostname
+# ip_address - the tftp's ip address
+# images_path - the tftp's relative path in which the router's images are located
+
+### Test_misc configuration:
+# expected_bw - the "golden" bandwidth (in Gbps) results planned on receiving from the test
+
+trex:
+ hostname : csi-trex-17
+ cores : 2
+ modes : [loopback, virt_nics, VM]
diff --git a/scripts/automation/regression/trex.py b/scripts/automation/regression/trex.py
index 8efa41f6..9459e7c6 100644
--- a/scripts/automation/regression/trex.py
+++ b/scripts/automation/regression/trex.py
@@ -96,7 +96,7 @@ class CTRexRunner:
self.yaml)
# self.trex_config['trex_latency'])
- for key, value in kwargs.iteritems():
+ for key, value in kwargs.items():
tmp_key = key.replace('_','-')
dash = ' -' if (len(key)==1) else ' --'
if value == True:
@@ -104,7 +104,7 @@ class CTRexRunner:
else:
trex_cmd += (dash + '{k} {val}'.format( k = tmp_key, val = value ))
- print "\nT-REX COMMAND: ", trex_cmd
+ print("\nT-REX COMMAND: ", trex_cmd)
cmd = 'sshpass.exp %s %s root "cd %s; %s > %s"' % (self.trex_config['trex_password'],
self.trex_config['trex_name'],
@@ -172,7 +172,7 @@ class CTRexRunner:
fin = datetime.datetime.now()
# print "Time difference : ", fin-start
runtime_deviation = abs(( (end_time - start_time)/ (duration+15) ) - 1)
- print "runtime_deviation: %2.0f %%" % ( runtime_deviation*100.0)
+ print("runtime_deviation: %2.0f %%" % ( runtime_deviation*100.0))
if ( runtime_deviation > 0.6 ) :
# If the run stopped immediately - classify as Trex in use or reachability issue
interrupted = True
@@ -183,7 +183,7 @@ class CTRexRunner:
# results = subprocess.Popen(cmd, stdout = open(os.devnull, 'wb'),
# shell=True, preexec_fn=os.setsid)
except KeyboardInterrupt:
- print "\nT-Rex test interrupted by user during traffic generation!!"
+ print("\nT-Rex test interrupted by user during traffic generation!!")
results.killpg(results.pid, signal.SIGTERM) # Send the kill signal to all the process groups
interrupted = True
raise RuntimeError
@@ -245,7 +245,7 @@ class CTRexResult():
Prints nicely the content of self.result dictionary into the screen
"""
for key, value in self.result.items():
- print "{0:20} : \t{1}".format(key, float(value))
+ print("{0:20} : \t{1}".format(key, float(value)))
def update (self, key, val, _str):
""" update (self, key, val, _str) -> None
@@ -273,7 +273,7 @@ class CTRexResult():
elif s[0]=="K":
val = val*1E3
- if self.result.has_key(key):
+ if key in self.result:
if self.result[key] > 0:
if (val/self.result[key] > 0.97 ):
self.result[key]= val
@@ -331,7 +331,7 @@ class CTRexResult():
if match:
key = misc_methods.mix_string(match.group(1))
val = float(match.group(4))
- if d.has_key(key):
+ if key in d:
if stop_read == False:
self.update (key, val, match.group(5))
else:
@@ -345,7 +345,7 @@ class CTRexResult():
if match:
key = misc_methods.mix_string(match.group(1))
val = float(match.group(4))
- if d.has_key(key):
+ if key in d:
if stop_read == False:
self.update (key, val, match.group(5))
else:
@@ -370,7 +370,7 @@ class CTRexResult():
if match:
key = misc_methods.mix_string(match.group(1))
val = float(match.group(3))
- if self.result.has_key(key):
+ if key in self.result:
if (self.result[key] < val): # update only if larger than previous value
self.result[key] = val
else:
@@ -391,7 +391,7 @@ class CTRexResult():
def get_status (self, drop_expected = False):
if (self.error != ""):
- print self.error
+ print(self.error)
return (self.STATUS_ERR_FATAL)
d = self.result
@@ -417,9 +417,9 @@ class CTRexResult():
# expected measurement
expect_vs_measued=d['total-tx']/d['expected-bps']
if ( (expect_vs_measued >1.1) or (expect_vs_measued < 0.9) ) :
- print expect_vs_measued
- print d['total-tx']
- print d['expected-bps']
+ print(expect_vs_measued)
+ print(d['total-tx'])
+ print(d['expected-bps'])
self.reason="measure is not as expected"
return self.STATUS_ERR_BAD_EXPECTED_MEASUREMENT
@@ -442,7 +442,7 @@ def test_TRex_result_parser():
t=CTRexResult('trex.txt');
t.load_file_lines()
t.parse()
- print t.result
+ print(t.result)
diff --git a/scripts/automation/regression/trex_unit_test.py b/scripts/automation/regression/trex_unit_test.py
index c8565c19..4348d004 100755
--- a/scripts/automation/regression/trex_unit_test.py
+++ b/scripts/automation/regression/trex_unit_test.py
@@ -27,6 +27,7 @@ Description:
import os
import sys
import outer_packages
+
import nose
from nose.plugins import Plugin
import logging
@@ -87,7 +88,7 @@ def kill_trex_process(trex_data):
try:
proc_name, pid, full_cmd = re.split('\s+', process, maxsplit=2)
if proc_name.find('t-rex-64') >= 0:
- print 'Killing remote process: %s' % full_cmd
+ print('Killing remote process: %s' % full_cmd)
trex_remote_command(trex_data, 'kill %s' % pid, from_scripts = False)
except:
continue
@@ -184,7 +185,7 @@ class CTRexTestConfiguringPlugin(Plugin):
rsync_command = rsync_template % (new_path, self.pkg, os.path.basename(self.pkg), new_path, new_path, new_path)
return_code, stdout, stderr = trex_remote_command(self.configuration.trex, rsync_command, from_scripts = False)
if return_code:
- print 'Failed copying'
+ print('Failed copying')
sys.exit(-1)
CTRexScenario.scripts_path = new_path
CTRexScenario.is_copied = True
@@ -198,7 +199,7 @@ class CTRexTestConfiguringPlugin(Plugin):
kill_trex_process(self.configuration.trex)
time.sleep(1)
elif check_trex_running(self.configuration.trex):
- print 'TRex is already running'
+ print('TRex is already running')
sys.exit(-1)
@@ -250,7 +251,7 @@ def save_setup_info():
with open('%s/report_%s.info' % (CTRexScenario.report_dir, CTRexScenario.setup_name), 'w') as f:
f.write(setup_info)
except Exception as err:
- print 'Error saving setup info: %s ' % err
+ print('Error saving setup info: %s ' % err)
def set_report_dir (report_dir):
@@ -334,12 +335,12 @@ if __name__ == "__main__":
result = nose.run(argv = nose_argv + additional_args, addplugins = [red_nose, config_plugin]) and result
except Exception as e:
result = False
- print e
+ print(e)
finally:
save_setup_info()
if (result == True and not CTRexScenario.is_test_list):
- print termstyle.green("""
+ print(termstyle.green("""
..::''''::..
.;'' ``;.
:: :: :: ::
@@ -356,7 +357,7 @@ if __name__ == "__main__":
/ ___/ __ |_\ \_\ \/_/
/_/ /_/ |_/___/___(_)
- """)
+ """))
sys.exit(0)
sys.exit(-1)
diff --git a/scripts/automation/trex_control_plane/client/trex_client.py b/scripts/automation/trex_control_plane/client/trex_client.py
index 9e3944d4..dfd3dc01 100755
--- a/scripts/automation/trex_control_plane/client/trex_client.py
+++ b/scripts/automation/trex_control_plane/client/trex_client.py
@@ -3,12 +3,11 @@
import sys
import os
-try:
- # support import for Python 2
+if __package__:
+ from . import outer_packages
+else:
import outer_packages
-except ImportError:
- # support import for Python 3
- import client.outer_packages
+
import jsonrpclib
from jsonrpclib import ProtocolError, AppError
from common.trex_status_e import TRexStatus
diff --git a/scripts/automation/trex_control_plane/server/outer_packages.py b/scripts/automation/trex_control_plane/server/outer_packages.py
index 3d4f039a..313a93a6 100755
--- a/scripts/automation/trex_control_plane/server/outer_packages.py
+++ b/scripts/automation/trex_control_plane/server/outer_packages.py
@@ -6,7 +6,7 @@ import os
CURRENT_PATH = os.path.dirname(os.path.realpath(__file__))
ROOT_PATH = os.path.abspath(os.path.join(CURRENT_PATH, os.pardir)) # path to trex_control_plane directory
PATH_TO_PYTHON_LIB = os.path.abspath(os.path.join(ROOT_PATH, os.pardir, os.pardir, 'external_libs'))
-PATH_TO_PLATFORM_LIB = os.path.abspath(os.path.join(PATH_TO_PYTHON_LIB, 'platform/fedora18'))
+PATH_TO_PLATFORM_LIB = os.path.abspath(os.path.join(PATH_TO_PYTHON_LIB, 'pyzmq-14.5.0', 'python2', 'fedora18', '64bit'))
SERVER_MODULES = ['enum34-1.0.4',
'zmq',
diff --git a/scripts/automation/trex_control_plane/stl/console/trex_console.py b/scripts/automation/trex_control_plane/stl/console/trex_console.py
index 9dbe82c8..8c71065c 100755
--- a/scripts/automation/trex_control_plane/stl/console/trex_console.py
+++ b/scripts/automation/trex_control_plane/stl/console/trex_console.py
@@ -16,6 +16,8 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
+from __future__ import print_function
+
import subprocess
import cmd
import json
@@ -35,7 +37,7 @@ from trex_stl_lib.utils.common import user_input, get_current_user
from trex_stl_lib.utils import parsing_opts
-import trex_tui
+from . import trex_tui
from functools import wraps
@@ -48,9 +50,9 @@ class ConsoleLogger(LoggerApi):
def write (self, msg, newline = True):
if newline:
- print msg
+ print(msg)
else:
- print msg,
+ print(msg, end=' ')
def flush (self):
sys.stdout.flush()
@@ -66,20 +68,25 @@ class ConsoleLogger(LoggerApi):
def set_window_always_on_top (title):
# we need the GDK module, if not available - ignroe this command
try:
- import gtk.gdk
+ if sys.version_info < (3,0):
+ from gtk import gdk
+ else:
+ #from gi.repository import Gdk as gdk
+ return
+
except ImportError:
return
# search the window and set it as above
- root = gtk.gdk.get_default_root_window()
+ root = gdk.get_default_root_window()
for id in root.property_get('_NET_CLIENT_LIST')[2]:
- w = gtk.gdk.window_foreign_new(id)
+ w = gdk.window_foreign_new(id)
if w:
name = w.property_get('WM_NAME')[2]
if name == title:
w.set_keep_above(True)
- gtk.gdk.window_process_all_updates()
+ gdk.window_process_all_updates()
break
@@ -111,7 +118,7 @@ class TRexGeneralCmd(cmd.Cmd):
# make the directory available for every user
try:
original_umask = os.umask(0)
- os.makedirs(self._history_file_dir, mode = 0777)
+ os.makedirs(self._history_file_dir, mode = 0o777)
finally:
os.umask(original_umask)
@@ -124,14 +131,14 @@ class TRexGeneralCmd(cmd.Cmd):
length = readline.get_current_history_length()
- for i in xrange(1, length + 1):
+ for i in range(1, length + 1):
cmd = readline.get_history_item(i)
- print "{:<5} {:}".format(i, cmd)
+ print("{:<5} {:}".format(i, cmd))
def get_history_item (self, index):
length = readline.get_current_history_length()
if index > length:
- print format_text("please select an index between {0} and {1}".format(0, length))
+ print(format_text("please select an index between {0} and {1}".format(0, length)))
return None
return readline.get_history_item(index)
@@ -191,7 +198,7 @@ class TRexConsole(TRexGeneralCmd):
func_name = func_name[3:]
if not inst.stateless_client.is_connected():
- print format_text("\n'{0}' cannot be executed on offline mode\n".format(func_name), 'bold')
+ print(format_text("\n'{0}' cannot be executed on offline mode\n".format(func_name), 'bold'))
return
ret = f(*args)
@@ -209,11 +216,11 @@ class TRexConsole(TRexGeneralCmd):
func_name = func_name[3:]
if not inst.stateless_client.is_connected():
- print format_text("\n'{0}' cannot be executed on offline mode\n".format(func_name), 'bold')
+ print(format_text("\n'{0}' cannot be executed on offline mode\n".format(func_name), 'bold'))
return
if inst.stateless_client.is_all_ports_acquired():
- print format_text("\n'{0}' cannot be executed on read only mode\n".format(func_name), 'bold')
+ print(format_text("\n'{0}' cannot be executed on read only mode\n".format(func_name), 'bold'))
return
rc = f(*args)
@@ -266,7 +273,7 @@ class TRexConsole(TRexGeneralCmd):
return stop
def default(self, line):
- print "'{0}' is an unrecognized command. type 'help' or '?' for a list\n".format(line)
+ print("'{0}' is an unrecognized command. type 'help' or '?' for a list\n".format(line))
@staticmethod
def tree_autocomplete(text):
@@ -303,20 +310,20 @@ class TRexConsole(TRexGeneralCmd):
def do_verbose(self, line):
'''Shows or set verbose mode\n'''
if line == "":
- print "\nverbose is " + ("on\n" if self.verbose else "off\n")
+ print("\nverbose is " + ("on\n" if self.verbose else "off\n"))
elif line == "on":
self.verbose = True
self.stateless_client.set_verbose("high")
- print format_text("\nverbose set to on\n", 'green', 'bold')
+ print(format_text("\nverbose set to on\n", 'green', 'bold'))
elif line == "off":
self.verbose = False
self.stateless_client.set_verbose("normal")
- print format_text("\nverbose set to off\n", 'green', 'bold')
+ print(format_text("\nverbose set to off\n", 'green', 'bold'))
else:
- print format_text("\nplease specify 'on' or 'off'\n", 'bold')
+ print(format_text("\nplease specify 'on' or 'off'\n", 'bold'))
# show history
def help_history (self):
@@ -344,26 +351,26 @@ class TRexConsole(TRexGeneralCmd):
'''Maps ports topology\n'''
ports = self.stateless_client.get_acquired_ports()
if not ports:
- print "No ports acquired\n"
+ print("No ports acquired\n")
with self.stateless_client.logger.supress():
table = stl_map_ports(self.stateless_client, ports = ports)
- print format_text('\nAcquired ports topology:\n', 'bold', 'underline')
+ print(format_text('\nAcquired ports topology:\n', 'bold', 'underline'))
# bi-dir ports
- print format_text('Bi-directional ports:\n','underline')
+ print(format_text('Bi-directional ports:\n','underline'))
for port_a, port_b in table['bi']:
- print "port {0} <--> port {1}".format(port_a, port_b)
+ print("port {0} <--> port {1}".format(port_a, port_b))
- print ""
+ print("")
# unknown ports
- print format_text('Mapping unknown:\n','underline')
+ print(format_text('Mapping unknown:\n','underline'))
for port in table['unknown']:
- print "port {0}".format(port)
- print ""
+ print("port {0}".format(port))
+ print("")
@@ -394,7 +401,7 @@ class TRexConsole(TRexGeneralCmd):
if cmd == None:
return
- print "Executing '{0}'".format(cmd)
+ print("Executing '{0}'".format(cmd))
return self.onecmd(cmd)
@@ -541,11 +548,11 @@ class TRexConsole(TRexGeneralCmd):
events = self.stateless_client.get_events()
for ev in events:
- print ev
+ print(ev)
if opts.clear:
self.stateless_client.clear_events()
- print format_text("\n\nEvent log was cleared\n\n")
+ print(format_text("\n\nEvent log was cleared\n\n"))
def complete_profile(self, text, line, begidx, endidx):
@@ -571,7 +578,7 @@ class TRexConsole(TRexGeneralCmd):
if opts.xterm:
if not os.path.exists('/usr/bin/xterm'):
- print format_text("XTERM does not exists on this machine", 'bold')
+ print(format_text("XTERM does not exists on this machine", 'bold'))
return
info = self.stateless_client.get_connection_info()
@@ -616,8 +623,8 @@ class TRexConsole(TRexGeneralCmd):
func()
return
- print "\nSupported Console Commands:"
- print "----------------------------\n"
+ print("\nSupported Console Commands:")
+ print("----------------------------\n")
cmds = [x[3:] for x in self.get_names() if x.startswith("do_")]
hidden = ['EOF', 'q', 'exit', 'h', 'shell']
@@ -635,7 +642,7 @@ class TRexConsole(TRexGeneralCmd):
help = "*** Undocumented Function ***\n"
l=help.splitlines()
- print "{:<30} {:<30}".format(cmd + " - ",l[0] )
+ print("{:<30} {:<30}".format(cmd + " - ",l[0] ))
# a custorm cmdloop wrapper
def start(self):
@@ -647,7 +654,7 @@ class TRexConsole(TRexGeneralCmd):
if not readline.get_line_buffer():
raise KeyboardInterrupt
else:
- print ""
+ print("")
self.intro = None
continue
@@ -691,7 +698,7 @@ def run_script_file (self, filename, stateless_client):
stateless_client.logger.log(format_text("Executing line {0} : '{1}'\n".format(index, line)))
if not cmd in cmd_table:
- print "\n*** Error at line {0} : '{1}'\n".format(index, line)
+ print("\n*** Error at line {0} : '{1}'\n".format(index, line))
stateless_client.logger.log(format_text("unknown command '{0}'\n".format(cmd), 'bold'))
return False
@@ -828,7 +835,7 @@ def main():
console.start()
except KeyboardInterrupt as e:
- print "\n\n*** Caught Ctrl + C... Exiting...\n\n"
+ print("\n\n*** Caught Ctrl + C... Exiting...\n\n")
finally:
with stateless_client.logger.supress():
diff --git a/scripts/automation/trex_control_plane/stl/console/trex_tui.py b/scripts/automation/trex_control_plane/stl/console/trex_tui.py
index 02b00b78..88c53d10 100644
--- a/scripts/automation/trex_control_plane/stl/console/trex_tui.py
+++ b/scripts/automation/trex_control_plane/stl/console/trex_tui.py
@@ -4,7 +4,11 @@ import os
import time
from collections import OrderedDict
import datetime
-from cStringIO import StringIO
+
+if sys.version_info > (3,0):
+ from io import StringIO
+else:
+ from cStringIO import StringIO
from trex_stl_lib.utils.text_opts import *
from trex_stl_lib.utils import text_tables
@@ -22,9 +26,9 @@ class SimpleBar(object):
def show (self):
if self.desc:
- print format_text("{0} {1}".format(self.desc, self.pattern[self.index]), 'bold')
+ print(format_text("{0} {1}".format(self.desc, self.pattern[self.index]), 'bold'))
else:
- print format_text("{0}".format(self.pattern[self.index]), 'bold')
+ print(format_text("{0}".format(self.pattern[self.index]), 'bold'))
self.index = (self.index + 1) % self.pattern_len
@@ -66,7 +70,7 @@ class TrexTUIDashBoard(TrexTUIPanel):
def show (self):
stats = self.stateless_client._get_formatted_stats(self.ports)
# print stats to screen
- for stat_type, stat_data in stats.iteritems():
+ for stat_type, stat_data in stats.items():
text_tables.print_table_with_header(stat_data.text_table, stat_type)
@@ -153,7 +157,7 @@ class TrexTUIPort(TrexTUIPanel):
def show (self):
stats = self.stateless_client._get_formatted_stats([self.port_id])
# print stats to screen
- for stat_type, stat_data in stats.iteritems():
+ for stat_type, stat_data in stats.items():
text_tables.print_table_with_header(stat_data.text_table, stat_type)
def get_key_actions (self):
@@ -233,7 +237,7 @@ class TrexTUIStreamsStats(TrexTUIPanel):
def show (self):
stats = self.stateless_client._get_formatted_stats(port_id_list = None, stats_mask = trex_stl_stats.SS_COMPAT)
# print stats to screen
- for stat_type, stat_data in stats.iteritems():
+ for stat_type, stat_data in stats.items():
text_tables.print_table_with_header(stat_data.text_table, stat_type)
pass
@@ -261,10 +265,10 @@ class TrexTUILog():
if cut < 0:
cut = 0
- print format_text("\nLog:", 'bold', 'underline')
+ print(format_text("\nLog:", 'bold', 'underline'))
for msg in self.log[cut:]:
- print msg
+ print(msg)
# Panels manager (contains server panels)
@@ -304,7 +308,7 @@ class TrexTUIPanelManager():
def generate_legend (self):
self.legend = "\n{:<12}".format("browse:")
- for k, v in self.key_actions.iteritems():
+ for k, v in self.key_actions.items():
if v['show']:
x = "'{0}' - {1}, ".format(k, v['legend'])
self.legend += "{:}".format(x)
@@ -313,7 +317,7 @@ class TrexTUIPanelManager():
self.legend += "\n{:<12}".format(self.main_panel.get_name() + ":")
- for k, v in self.main_panel.get_key_actions().iteritems():
+ for k, v in self.main_panel.get_key_actions().items():
if v['show']:
x = "'{0}' - {1}, ".format(k, v['legend'])
self.legend += "{:}".format(x)
@@ -326,7 +330,7 @@ class TrexTUIPanelManager():
self.dis_bar.show()
def print_legend (self):
- print format_text(self.legend, 'bold')
+ print(format_text(self.legend, 'bold'))
# on window switch or turn on / off of the TUI we call this
@@ -405,7 +409,7 @@ class TrexTUI():
def handle_key_input (self):
# try to read a single key
- ch = os.read(sys.stdin.fileno(), 1)
+ ch = os.read(sys.stdin.fileno(), 1).decode()
if ch != None and len(ch) > 0:
return (self.pm.handle_key(ch), True)
@@ -473,7 +477,7 @@ class TrexTUI():
# restore
termios.tcsetattr(sys.stdin, termios.TCSADRAIN, old_settings)
- print ""
+ print("")
# draw once
@@ -488,7 +492,7 @@ class TrexTUI():
sys.stdout = old_stdout
self.clear_screen()
- print mystdout.getvalue()
+ print(mystdout.getvalue())
sys.stdout.flush()
self.draw_policer = 0
diff --git a/scripts/automation/trex_control_plane/stl/examples/stl_bi_dir_flows.py b/scripts/automation/trex_control_plane/stl/examples/stl_bi_dir_flows.py
index 05fff67c..ff16d397 100644
--- a/scripts/automation/trex_control_plane/stl/examples/stl_bi_dir_flows.py
+++ b/scripts/automation/trex_control_plane/stl/examples/stl_bi_dir_flows.py
@@ -73,7 +73,7 @@ def simple_burst ():
c.clear_stats()
# choose rate and start traffic for 10 seconds on 5 mpps
- print "Running 5 Mpps on ports 0, 1 for 10 seconds..."
+ print("Running 5 Mpps on ports 0, 1 for 10 seconds...")
c.start(ports = [0, 1], mult = "5mpps", duration = 10)
# block until done
@@ -82,14 +82,14 @@ def simple_burst ():
# read the stats after the test
stats = c.get_stats()
- print json.dumps(stats[0], indent = 4, separators=(',', ': '), sort_keys = True)
- print json.dumps(stats[1], indent = 4, separators=(',', ': '), sort_keys = True)
+ print(json.dumps(stats[0], indent = 4, separators=(',', ': '), sort_keys = True))
+ print(json.dumps(stats[1], indent = 4, separators=(',', ': '), sort_keys = True))
lost_a = stats[0]["opackets"] - stats[1]["ipackets"]
lost_b = stats[1]["opackets"] - stats[0]["ipackets"]
- print "\npackets lost from 0 --> 1: {0} pkts".format(lost_a)
- print "packets lost from 1 --> 0: {0} pkts".format(lost_b)
+ print("\npackets lost from 0 --> 1: {0} pkts".format(lost_a))
+ print("packets lost from 1 --> 0: {0} pkts".format(lost_b))
if (lost_a == 0) and (lost_b == 0):
passed = True
@@ -98,15 +98,15 @@ def simple_burst ():
except STLError as e:
passed = False
- print e
+ print(e)
finally:
c.disconnect()
if passed:
- print "\nTest has passed :-)\n"
+ print("\nTest has passed :-)\n")
else:
- print "\nTest has failed :-(\n"
+ print("\nTest has failed :-(\n")
while True:
# run the tests
diff --git a/scripts/automation/trex_control_plane/stl/examples/stl_bi_dir_flows1.py b/scripts/automation/trex_control_plane/stl/examples/stl_bi_dir_flows1.py
index 264d985e..6e08a0fa 100644
--- a/scripts/automation/trex_control_plane/stl/examples/stl_bi_dir_flows1.py
+++ b/scripts/automation/trex_control_plane/stl/examples/stl_bi_dir_flows1.py
@@ -73,7 +73,7 @@ def simple_burst ():
c.clear_stats()
# choose rate and start traffic for 10 seconds on 5 mpps
- print "Running 5 Mpps on ports 0, 1 for 10 seconds..."
+ print("Running 5 Mpps on ports 0, 1 for 10 seconds...")
c.start(ports = [2, 3], mult = "5mpps", duration = 10)
# block until done
@@ -82,14 +82,14 @@ def simple_burst ():
# read the stats after the test
stats = c.get_stats()
- print json.dumps(stats[2], indent = 4, separators=(',', ': '), sort_keys = True)
- print json.dumps(stats[3], indent = 4, separators=(',', ': '), sort_keys = True)
+ print(json.dumps(stats[2], indent = 4, separators=(',', ': '), sort_keys = True))
+ print(json.dumps(stats[3], indent = 4, separators=(',', ': '), sort_keys = True))
lost_a = stats[2]["opackets"] - stats[3]["ipackets"]
lost_b = stats[3]["opackets"] - stats[2]["ipackets"]
- print "\npackets lost from 0 --> 1: {0} pkts".format(lost_a)
- print "packets lost from 1 --> 0: {0} pkts".format(lost_b)
+ print("\npackets lost from 0 --> 1: {0} pkts".format(lost_a))
+ print("packets lost from 1 --> 0: {0} pkts".format(lost_b))
if (lost_a == 0) and (lost_b == 0):
passed = True
@@ -98,15 +98,15 @@ def simple_burst ():
except STLError as e:
passed = False
- print e
+ print(e)
finally:
c.disconnect()
if passed:
- print "\nTest has passed :-)\n"
+ print("\nTest has passed :-)\n")
else:
- print "\nTest has failed :-(\n"
+ print("\nTest has failed :-(\n")
while True :
# run the tests
diff --git a/scripts/automation/trex_control_plane/stl/examples/stl_flow_stats.py b/scripts/automation/trex_control_plane/stl/examples/stl_flow_stats.py
index fa6e67c3..d938852e 100644
--- a/scripts/automation/trex_control_plane/stl/examples/stl_flow_stats.py
+++ b/scripts/automation/trex_control_plane/stl/examples/stl_flow_stats.py
@@ -6,7 +6,7 @@ import pprint
def rx_example (tx_port, rx_port, burst_size):
- print "\nGoing to inject {0} packets on port {1} - checking RX stats on port {2}\n".format(burst_size, tx_port, rx_port)
+ print("\nGoing to inject {0} packets on port {1} - checking RX stats on port {2}\n".format(burst_size, tx_port, rx_port))
# create client
c = STLClient()
@@ -32,10 +32,10 @@ def rx_example (tx_port, rx_port, burst_size):
# add both streams to ports
c.add_streams([s1], ports = [tx_port])
- print "\ninjecting {0} packets on port {1}\n".format(total_pkts, tx_port)
+ print("\ninjecting {0} packets on port {1}\n".format(total_pkts, tx_port))
for i in range(0, 10):
- print "\nStarting iteration: {0}:".format(i)
+ print("\nStarting iteration: {0}:".format(i))
rc = rx_iteration(c, tx_port, rx_port, total_pkts, pkt.get_pkt_len())
if not rc:
passed = False
@@ -44,15 +44,15 @@ def rx_example (tx_port, rx_port, burst_size):
except STLError as e:
passed = False
- print e
+ print(e)
finally:
c.disconnect()
if passed:
- print "\nTest has passed :-)\n"
+ print("\nTest has passed :-)\n")
else:
- print "\nTest has failed :-(\n"
+ print("\nTest has failed :-(\n")
# RX one iteration
def rx_iteration (c, tx_port, rx_port, total_pkts, pkt_len):
@@ -64,7 +64,7 @@ def rx_iteration (c, tx_port, rx_port, total_pkts, pkt_len):
flow_stats = c.get_stats()['flow_stats'].get(5)
if not flow_stats:
- print "no flow stats available"
+ print("no flow stats available")
return False
tx_pkts = flow_stats['tx_pkts'].get(tx_port, 0)
@@ -72,25 +72,25 @@ def rx_iteration (c, tx_port, rx_port, total_pkts, pkt_len):
rx_pkts = flow_stats['rx_pkts'].get(rx_port, 0)
if tx_pkts != total_pkts:
- print "TX pkts mismatch - got: {0}, expected: {1}".format(tx_pkts, total_pkts)
+ print("TX pkts mismatch - got: {0}, expected: {1}".format(tx_pkts, total_pkts))
pprint.pprint(flow_stats)
return False
else:
- print "TX pkts match - {0}".format(tx_pkts)
+ print("TX pkts match - {0}".format(tx_pkts))
if tx_bytes != (total_pkts * pkt_len):
- print "TX bytes mismatch - got: {0}, expected: {1}".format(tx_bytes, (total_pkts * pkt_len))
+ print("TX bytes mismatch - got: {0}, expected: {1}".format(tx_bytes, (total_pkts * pkt_len)))
pprint.pprint(flow_stats)
return False
else:
- print "TX bytes match - {0}".format(tx_bytes)
+ print("TX bytes match - {0}".format(tx_bytes))
if rx_pkts != total_pkts:
- print "RX pkts mismatch - got: {0}, expected: {1}".format(rx_pkts, total_pkts)
+ print("RX pkts mismatch - got: {0}, expected: {1}".format(rx_pkts, total_pkts))
pprint.pprint(flow_stats)
return False
else:
- print "RX pkts match - {0}".format(rx_pkts)
+ print("RX pkts match - {0}".format(rx_pkts))
return True
diff --git a/scripts/automation/trex_control_plane/stl/examples/stl_imix.py b/scripts/automation/trex_control_plane/stl/examples/stl_imix.py
index 94165614..56fd3cfd 100644
--- a/scripts/automation/trex_control_plane/stl/examples/stl_imix.py
+++ b/scripts/automation/trex_control_plane/stl/examples/stl_imix.py
@@ -34,7 +34,7 @@ def imix_test (server):
dir_0 = [x[0] for x in table['bi']]
dir_1 = [x[1] for x in table['bi']]
- print "Mapped ports to sides {0} <--> {1}".format(dir_0, dir_1)
+ print("Mapped ports to sides {0} <--> {1}".format(dir_0, dir_1))
# load IMIX profile
profile = STLProfile.load_py('../../../../stl/imix.py')
@@ -50,7 +50,7 @@ def imix_test (server):
# choose rate and start traffic for 10 seconds on 5 mpps
duration = 10
mult = "30%"
- print "Injecting {0} <--> {1} on total rate of '{2}' for {3} seconds".format(dir_0, dir_1, mult, duration)
+ print("Injecting {0} <--> {1} on total rate of '{2}' for {3} seconds".format(dir_0, dir_1, mult, duration))
c.start(ports = (dir_0 + dir_1), mult = mult, duration = duration, total = True)
@@ -75,11 +75,11 @@ def imix_test (server):
lost_0 = dir_0_opackets - dir_1_ipackets
lost_1 = dir_1_opackets - dir_0_ipackets
- print "\nPackets injected from {0}: {1:,}".format(dir_0, dir_0_opackets)
- print "Packets injected from {0}: {1:,}".format(dir_1, dir_1_opackets)
+ print("\nPackets injected from {0}: {1:,}".format(dir_0, dir_0_opackets))
+ print("Packets injected from {0}: {1:,}".format(dir_1, dir_1_opackets))
- print "\npackets lost from {0} --> {1}: {2:,} pkts".format(dir_0, dir_0, lost_0)
- print "packets lost from {0} --> {1}: {2:,} pkts".format(dir_1, dir_1, lost_1)
+ print("\npackets lost from {0} --> {1}: {2:,} pkts".format(dir_0, dir_0, lost_0))
+ print("packets lost from {0} --> {1}: {2:,} pkts".format(dir_1, dir_1, lost_1))
if (lost_0 <= 0) and (lost_1 <= 0): # less or equal because we might have incoming arps etc.
passed = True
@@ -89,16 +89,16 @@ def imix_test (server):
except STLError as e:
passed = False
- print e
+ print(e)
finally:
c.disconnect()
if passed:
- print "\nTest has passed :-)\n"
+ print("\nTest has passed :-)\n")
sys.exit(0)
else:
- print "\nTest has failed :-(\n"
+ print("\nTest has failed :-(\n")
sys.exit(-1)
parser = argparse.ArgumentParser(description="Example for TRex Stateless, sending IMIX traffic")
diff --git a/scripts/automation/trex_control_plane/stl/examples/stl_pcap.py b/scripts/automation/trex_control_plane/stl/examples/stl_pcap.py
index a729a572..317f44c7 100644
--- a/scripts/automation/trex_control_plane/stl/examples/stl_pcap.py
+++ b/scripts/automation/trex_control_plane/stl/examples/stl_pcap.py
@@ -31,7 +31,7 @@ def inject_pcap (pcap_file, port, loop_count, ipg_usec, use_vm):
profile = STLProfile.load_pcap(pcap_file, ipg_usec = ipg_usec, loop_count = loop_count, vm = vm)
- print "Loaded pcap {0} with {1} packets...\n".format(pcap_file, len(profile))
+ print("Loaded pcap {0} with {1} packets...\n".format(pcap_file, len(profile)))
# uncomment this for simulator run
#STLSim().run(profile.get_streams(), outfile = 'out.cap')
@@ -47,10 +47,10 @@ def inject_pcap (pcap_file, port, loop_count, ipg_usec, use_vm):
stats = c.get_stats()
opackets = stats[port]['opackets']
- print "{0} packets were Tx on port {1}\n".format(opackets, port)
+ print("{0} packets were Tx on port {1}\n".format(opackets, port))
except STLError as e:
- print e
+ print(e)
finally:
c.disconnect()
diff --git a/scripts/automation/trex_control_plane/stl/examples/stl_profile.py b/scripts/automation/trex_control_plane/stl/examples/stl_profile.py
index ad9d525a..3ae5f855 100644
--- a/scripts/automation/trex_control_plane/stl/examples/stl_profile.py
+++ b/scripts/automation/trex_control_plane/stl/examples/stl_profile.py
@@ -24,11 +24,11 @@ def simple ():
try:
profile = STLProfile.load(profile_file)
except STLError as e:
- print format_text("\nError while loading profile '{0}'\n".format(opts.file[0]), 'bold')
- print e.brief() + "\n"
+ print(format_text("\nError while loading profile '{0}'\n".format(opts.file[0]), 'bold'))
+ print(e.brief() + "\n")
return
- print profile.dump_to_yaml()
+ print(profile.dump_to_yaml())
c.remove_all_streams(my_ports)
@@ -43,15 +43,15 @@ def simple ():
except STLError as e:
passed = False
- print e
+ print(e)
finally:
c.disconnect()
if passed:
- print "\nTest has passed :-)\n"
+ print("\nTest has passed :-)\n")
else:
- print "\nTest has failed :-(\n"
+ print("\nTest has failed :-(\n")
# run the tests
diff --git a/scripts/automation/trex_control_plane/stl/examples/stl_run_udp_simple.py b/scripts/automation/trex_control_plane/stl/examples/stl_run_udp_simple.py
index 465b3dde..d06414e4 100644
--- a/scripts/automation/trex_control_plane/stl/examples/stl_run_udp_simple.py
+++ b/scripts/automation/trex_control_plane/stl/examples/stl_run_udp_simple.py
@@ -108,7 +108,7 @@ def simple_burst (duration = 10, frame_size = 9000, speed = '1gbps'):
c.clear_stats()
# choose rate and start traffic for 10 seconds on 5 mpps
- print "Running {0} on ports 0, 1 for 10 seconds, UDP {1}...".format(speed,frame_size+4)
+ print("Running {0} on ports 0, 1 for 10 seconds, UDP {1}...".format(speed,frame_size+4))
c.start(ports = [0, 1], mult = speed, duration = duration)
# block until done
@@ -118,14 +118,14 @@ def simple_burst (duration = 10, frame_size = 9000, speed = '1gbps'):
stats = c.get_stats()
#print stats
- print json.dumps(stats[0], indent = 4, separators=(',', ': '), sort_keys = True)
- print json.dumps(stats[1], indent = 4, separators=(',', ': '), sort_keys = True)
+ print(json.dumps(stats[0], indent = 4, separators=(',', ': '), sort_keys = True))
+ print(json.dumps(stats[1], indent = 4, separators=(',', ': '), sort_keys = True))
lost_a = stats[0]["opackets"] - stats[1]["ipackets"]
lost_b = stats[1]["opackets"] - stats[0]["ipackets"]
- print "\npackets lost from 0 --> 1: {0} pkts".format(lost_a)
- print "packets lost from 1 --> 0: {0} pkts".format(lost_b)
+ print("\npackets lost from 0 --> 1: {0} pkts".format(lost_a))
+ print("packets lost from 1 --> 0: {0} pkts".format(lost_b))
if (lost_a == 0) and (lost_b == 0):
passed = True
@@ -134,15 +134,15 @@ def simple_burst (duration = 10, frame_size = 9000, speed = '1gbps'):
except STLError as e:
passed = False
- print e
+ print(e)
finally:
c.disconnect()
if passed:
- print "\nPASSED\n"
+ print("\nPASSED\n")
else:
- print "\nFAILED\n"
+ print("\nFAILED\n")
def process_options ():
parser = argparse.ArgumentParser(usage="""
@@ -202,7 +202,7 @@ def process_options ():
version=H_VER )
t_global.args = parser.parse_args();
- print t_global.args
+ print(t_global.args)
diff --git a/scripts/automation/trex_control_plane/stl/examples/stl_simple_burst.py b/scripts/automation/trex_control_plane/stl/examples/stl_simple_burst.py
index ed0cb93a..29341674 100644
--- a/scripts/automation/trex_control_plane/stl/examples/stl_simple_burst.py
+++ b/scripts/automation/trex_control_plane/stl/examples/stl_simple_burst.py
@@ -33,7 +33,7 @@ def simple_burst ():
stream_ids = c.add_streams([s1, s2], ports = [0, 3])
# run 5 times
- for i in xrange(1, 6):
+ for i in range(1, 6):
c.clear_stats()
c.start(ports = [0, 3], mult = "1gbps")
c.wait_on_traffic(ports = [0, 3])
@@ -41,22 +41,22 @@ def simple_burst ():
stats = c.get_stats()
ipackets = stats['total']['ipackets']
- print "Test iteration {0} - Packets Received: {1} ".format(i, ipackets)
+ print("Test iteration {0} - Packets Received: {1} ".format(i, ipackets))
# (5000 + 3000) * 2 ports = 16,000
if (ipackets != (16000)):
passed = False
except STLError as e:
passed = False
- print e
+ print(e)
finally:
c.disconnect()
if passed:
- print "\nTest has passed :-)\n"
+ print("\nTest has passed :-)\n")
else:
- print "\nTest has failed :-(\n"
+ print("\nTest has failed :-(\n")
# run the tests
diff --git a/scripts/automation/trex_control_plane/stl/examples/stl_simple_console_like.py b/scripts/automation/trex_control_plane/stl/examples/stl_simple_console_like.py
index dc9b2b2b..03909e65 100644
--- a/scripts/automation/trex_control_plane/stl/examples/stl_simple_console_like.py
+++ b/scripts/automation/trex_control_plane/stl/examples/stl_simple_console_like.py
@@ -19,14 +19,14 @@ def simple ():
# prepare our ports
c.reset(ports = my_ports)
- print (" is connected {0}".format(c.is_connected()))
+ print((" is connected {0}".format(c.is_connected())))
- print (" number of ports {0}".format(c.get_port_count()))
- print (" acquired_ports {0}".format(c.get_acquired_ports()))
+ print((" number of ports {0}".format(c.get_port_count())))
+ print((" acquired_ports {0}".format(c.get_acquired_ports())))
# port stats
- print c.get_stats(my_ports)
+ print(c.get_stats(my_ports))
# port info
- print c.get_port_info(my_ports)
+ print(c.get_port_info(my_ports))
c.ping()
@@ -43,15 +43,15 @@ def simple ():
except STLError as e:
passed = False
- print e
+ print(e)
finally:
c.disconnect()
if passed:
- print "\nTest has passed :-)\n"
+ print("\nTest has passed :-)\n")
else:
- print "\nTest has failed :-(\n"
+ print("\nTest has failed :-(\n")
# run the tests
diff --git a/scripts/automation/trex_control_plane/stl/trex_stl_lib/__init__.py b/scripts/automation/trex_control_plane/stl/trex_stl_lib/__init__.py
index 8488a80a..ba9459c1 100644
--- a/scripts/automation/trex_control_plane/stl/trex_stl_lib/__init__.py
+++ b/scripts/automation/trex_control_plane/stl/trex_stl_lib/__init__.py
@@ -4,8 +4,4 @@ if sys.version_info < (2, 7):
print("\n**** TRex STL pacakge requires Python version >= 2.7 ***\n")
exit(-1)
-if sys.version_info >= (3, 0):
- print("\n**** TRex STL pacakge does not support Python 3 (yet) ***\n")
- exit(-1)
-
-import trex_stl_ext
+from . import trex_stl_ext
diff --git a/scripts/automation/trex_control_plane/stl/trex_stl_lib/api.py b/scripts/automation/trex_control_plane/stl/trex_stl_lib/api.py
index 9b8f9f79..bd95a20a 100644
--- a/scripts/automation/trex_control_plane/stl/trex_stl_lib/api.py
+++ b/scripts/automation/trex_control_plane/stl/trex_stl_lib/api.py
@@ -1,18 +1,18 @@
# client and exceptions
-from trex_stl_exceptions import *
-from trex_stl_client import STLClient, LoggerApi
+from .trex_stl_exceptions import *
+from .trex_stl_client import STLClient, LoggerApi
# streams
-from trex_stl_streams import *
+from .trex_stl_streams import *
# packet builder
-from trex_stl_packet_builder_scapy import *
+from .trex_stl_packet_builder_scapy import *
from scapy.all import *
# simulator
-from trex_stl_sim import STLSim
+from .trex_stl_sim import STLSim
# std lib (various lib functions)
-from trex_stl_std import *
+from .trex_stl_std import *
diff --git a/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_async_client.py b/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_async_client.py
index ae6cb497..0f0fe83e 100644
--- a/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_async_client.py
+++ b/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_async_client.py
@@ -8,11 +8,11 @@ import zmq
import re
import random
-from trex_stl_jsonrpc_client import JsonRpcClient, BatchMessage
+from .trex_stl_jsonrpc_client import JsonRpcClient, BatchMessage
-from utils.text_opts import *
-from trex_stl_stats import *
-from trex_stl_types import *
+from .utils.text_opts import *
+from .trex_stl_stats import *
+from .trex_stl_types import *
# basic async stats class
class CTRexAsyncStats(object):
@@ -102,7 +102,7 @@ class CTRexAsyncStatsManager():
port_stats = {}
# filter the values per port and general
- for key, value in snapshot.iteritems():
+ for key, value in snapshot.items():
# match a pattern of ports
m = re.search('(.*)\-([0-8])', key)
@@ -124,7 +124,7 @@ class CTRexAsyncStatsManager():
self.general_stats.update(general_stats)
# update all ports
- for port_id, data in port_stats.iteritems():
+ for port_id, data in port_stats.items():
if not port_id in self.port_stats:
self.port_stats[port_id] = CTRexAsyncStatsPort()
@@ -209,7 +209,7 @@ class CTRexAsyncClient():
def _run (self):
# socket must be created on the same thread
- self.socket.setsockopt(zmq.SUBSCRIBE, '')
+ self.socket.setsockopt(zmq.SUBSCRIBE, b'')
self.socket.setsockopt(zmq.RCVTIMEO, 5000)
self.socket.connect(self.tr)
@@ -316,7 +316,7 @@ class CTRexAsyncClient():
return rc
# fast loop
- for i in xrange(0, 100):
+ for i in range(0, 100):
if self.async_barrier['ack']:
break
time.sleep(0.001)
diff --git a/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_client.py b/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_client.py
index 33a2cd21..25e35423 100644
--- a/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_client.py
+++ b/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_client.py
@@ -1,18 +1,18 @@
#!/router/bin/python
# for API usage the path name must be full
-from trex_stl_lib.trex_stl_exceptions import *
-from trex_stl_lib.trex_stl_streams import *
+from .trex_stl_exceptions import *
+from .trex_stl_streams import *
-from trex_stl_jsonrpc_client import JsonRpcClient, BatchMessage
-import trex_stl_stats
+from .trex_stl_jsonrpc_client import JsonRpcClient, BatchMessage
+from . import trex_stl_stats
-from trex_stl_port import Port
-from trex_stl_types import *
-from trex_stl_async_client import CTRexAsyncClient
+from .trex_stl_port import Port
+from .trex_stl_types import *
+from .trex_stl_async_client import CTRexAsyncClient
-from utils import parsing_opts, text_tables, common
-from utils.text_opts import *
+from .utils import parsing_opts, text_tables, common
+from .utils.text_opts import *
from functools import wraps
from collections import namedtuple
@@ -47,7 +47,7 @@ class LoggerApi(object):
raise Exception("implement this")
def set_verbose (self, level):
- if not level in xrange(self.VERBOSE_QUIET, self.VERBOSE_HIGH + 1):
+ if not level in range(self.VERBOSE_QUIET, self.VERBOSE_HIGH + 1):
raise ValueError("bad value provided for logger")
self.level = level
@@ -113,9 +113,9 @@ class DefaultLogger(LoggerApi):
def write (self, msg, newline = True):
if newline:
- print msg
+ print(msg)
else:
- print msg,
+ print (msg),
def flush (self):
sys.stdout.flush()
@@ -165,13 +165,13 @@ class AsyncEventHandler(object):
port_stats = {}
# filter the values per port and general
- for key, value in dump_data.iteritems():
+ for key, value in dump_data.items():
# match a pattern of ports
m = re.search('(.*)\-(\d+)', key)
if m:
port_id = int(m.group(2))
field_name = m.group(1)
- if self.client.ports.has_key(port_id):
+ if port_id in self.client.ports:
if not port_id in port_stats:
port_stats[port_id] = {}
port_stats[port_id][field_name] = value
@@ -185,7 +185,7 @@ class AsyncEventHandler(object):
self.client.global_stats.update(global_stats, baseline)
# update all ports
- for port_id, data in port_stats.iteritems():
+ for port_id, data in port_stats.items():
self.client.ports[port_id].port_stats.update(data, baseline)
@@ -351,7 +351,7 @@ class CCommLink(object):
if self.virtual:
self._prompt_virtual_tx_msg()
_, msg = self.rpc_link.create_jsonrpc_v2(method_name, params)
- print msg
+ print(msg)
return
else:
return self.rpc_link.invoke_rpc_method(method_name, params)
@@ -359,9 +359,9 @@ class CCommLink(object):
def transmit_batch(self, batch_list):
if self.virtual:
self._prompt_virtual_tx_msg()
- print [msg
+ print([msg
for _, msg in [self.rpc_link.create_jsonrpc_v2(command.method, command.params)
- for command in batch_list]]
+ for command in batch_list]])
else:
batch = self.rpc_link.create_batch()
for command in batch_list:
@@ -370,8 +370,8 @@ class CCommLink(object):
return batch.invoke()
def _prompt_virtual_tx_msg(self):
- print "Transmitting virtually over tcp://{server}:{port}".format(server=self.server,
- port=self.port)
+ print("Transmitting virtually over tcp://{server}:{port}".format(server=self.server,
+ port=self.port))
@@ -710,7 +710,7 @@ class STLClient(object):
self.supported_cmds = rc.data()
# create ports
- for port_id in xrange(self.system_info["port_count"]):
+ for port_id in range(self.system_info["port_count"]):
info = self.system_info['ports'][port_id]
self.ports[port_id] = Port(port_id,
@@ -781,7 +781,7 @@ class STLClient(object):
port_stats = self.ports[port_id].get_stats()
stats[port_id] = port_stats
- for k, v in port_stats.iteritems():
+ for k, v in port_stats.items():
if not k in total:
total[k] = v
else:
@@ -826,9 +826,10 @@ class STLClient(object):
# stats
def _get_formatted_stats(self, port_id_list, stats_mask = trex_stl_stats.COMPACT):
- stats_opts = trex_stl_stats.ALL_STATS_OPTS.intersection(stats_mask)
- stats_obj = {}
+ stats_opts = common.list_intersect(trex_stl_stats.ALL_STATS_OPTS, stats_mask)
+
+ stats_obj = OrderedDict()
for stats_type in stats_opts:
stats_obj.update(self.stats_generator.generate_single_statistic(port_id_list, stats_type))
@@ -859,7 +860,7 @@ class STLClient(object):
@staticmethod
def __get_mask_keys(ok_values={True}, **kwargs):
masked_keys = set()
- for key, val in kwargs.iteritems():
+ for key, val in kwargs.items():
if val in ok_values:
masked_keys.add(key)
return masked_keys
@@ -1074,30 +1075,30 @@ class STLClient(object):
"""
- return self.ports.keys()
+ return list(self.ports)
# get all acquired ports
def get_acquired_ports(self):
return [port_id
- for port_id, port_obj in self.ports.iteritems()
+ for port_id, port_obj in self.ports.items()
if port_obj.is_acquired()]
# get all active ports (TX or pause)
def get_active_ports(self):
return [port_id
- for port_id, port_obj in self.ports.iteritems()
+ for port_id, port_obj in self.ports.items()
if port_obj.is_active()]
# get paused ports
def get_paused_ports (self):
return [port_id
- for port_id, port_obj in self.ports.iteritems()
+ for port_id, port_obj in self.ports.items()
if port_obj.is_paused()]
# get all TX ports
def get_transmitting_ports (self):
return [port_id
- for port_id, port_obj in self.ports.iteritems()
+ for port_id, port_obj in self.ports.items()
if port_obj.is_transmitting()]
@@ -2148,13 +2149,13 @@ class STLClient(object):
# set to show all stats if no filter was given
mask = trex_stl_stats.ALL_STATS_OPTS
- stats_opts = trex_stl_stats.ALL_STATS_OPTS.intersection(mask)
+ stats_opts = common.list_intersect(trex_stl_stats.ALL_STATS_OPTS, mask)
stats = self._get_formatted_stats(opts.ports, mask)
# print stats to screen
- for stat_type, stat_data in stats.iteritems():
+ for stat_type, stat_data in stats.items():
text_tables.print_table_with_header(stat_data.text_table, stat_type)
@@ -2179,7 +2180,7 @@ class STLClient(object):
else:
# print stats to screen
- for stream_hdr, port_streams_data in streams.iteritems():
+ for stream_hdr, port_streams_data in streams.items():
text_tables.print_table_with_header(port_streams_data.text_table,
header= stream_hdr.split(":")[0] + ":",
untouched_header= stream_hdr.split(":")[1])
@@ -2234,21 +2235,16 @@ class STLClient(object):
else:
self.stop(active_ports)
- try:
- # pcap injection removes all previous streams from the ports
- self.remove_all_streams(ports = opts.ports)
+ # pcap injection removes all previous streams from the ports
+ self.remove_all_streams(ports = opts.ports)
- profile = STLProfile.load_pcap(opts.file[0],
- opts.ipg_usec,
- opts.speedup,
- opts.count)
+ profile = STLProfile.load_pcap(opts.file[0],
+ opts.ipg_usec,
+ opts.speedup,
+ opts.count)
- id_list = self.add_streams(profile.get_streams(), opts.ports)
- self.start(ports = opts.ports, duration = opts.duration, force = opts.force)
-
- except STLError as e:
- stl.logger.log(e.brief())
- return
+ id_list = self.add_streams(profile.get_streams(), opts.ports)
+ self.start(ports = opts.ports, duration = opts.duration, force = opts.force)
return True
@@ -2268,11 +2264,7 @@ class STLClient(object):
if opts is None:
return
- try:
- self.set_port_attr(opts.ports, opts.prom)
- except STLError as e:
- stl.logger.log(brief())
- return
+ self.set_port_attr(opts.ports, opts.prom)
@@ -2304,7 +2296,7 @@ class STLClient(object):
if profile_type == 'python':
self.logger.log('Type: {:^12}'.format('Python Module'))
- self.logger.log('Tunables: {:^12}'.format(['{0} = {1}'.format(k ,v) for k, v in info['tunables'].iteritems()]))
+ self.logger.log('Tunables: {:^12}'.format(['{0} = {1}'.format(k ,v) for k, v in info['tunables'].items()]))
elif profile_type == 'yaml':
self.logger.log('Type: {:^12}'.format('YAML'))
diff --git a/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_exceptions.py b/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_exceptions.py
index e84b032b..585af231 100644
--- a/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_exceptions.py
+++ b/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_exceptions.py
@@ -1,7 +1,7 @@
import os
import sys
-from utils.text_opts import *
+from .utils.text_opts import *
# basic error for API
class STLError(Exception):
diff --git a/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_ext.py b/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_ext.py
index 9289e513..c614c4bd 100644
--- a/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_ext.py
+++ b/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_ext.py
@@ -1,6 +1,7 @@
import sys
import os
import warnings
+import platform
# if not set - set it to default
TREX_STL_EXT_PATH = os.environ.get('TREX_STL_EXT_PATH')
@@ -13,74 +14,49 @@ if not TREX_STL_EXT_PATH:
# the modules required
-CLIENT_UTILS_MODULES = ['dpkt-1.8.6',
- 'yaml-3.11',
- 'texttable-0.8.4',
- 'scapy-2.3.1'
+# py-dep requires python2/python3 directories
+# arch-dep requires cel59/fedora and 32bit/64bit directories
+CLIENT_UTILS_MODULES = [ {'name': 'texttable-0.8.4'},
+ {'name': 'pyyaml-3.11', 'py-dep': True},
+ {'name': 'scapy-2.3.1', 'py-dep': True},
+ {'name': 'pyzmq-14.5.0', 'py-dep': True, 'arch-dep': True}
]
+def generate_module_path (module, is_python3, is_64bit, is_cel):
+ platform_path = [module['name']]
+
+ if module.get('py-dep'):
+ platform_path.append('python3' if is_python3 else 'python2')
+
+ if module.get('arch-dep'):
+ platform_path.append('cel59' if is_cel else 'fedora18')
+ platform_path.append('64bit' if is_64bit else '32bit')
+
+ return os.path.normcase(os.path.join(TREX_STL_EXT_PATH, *platform_path))
+
+
def import_module_list(modules_list):
- assert(isinstance(modules_list, list))
+ # platform data
+ is_64bit = platform.architecture()[0] == '64bit'
+ is_python3 = (sys.version_info >= (3, 0))
+ is_cel = os.path.exists('/etc/system-profile')
+
+ # regular modules
for p in modules_list:
- full_path = os.path.join(TREX_STL_EXT_PATH, p)
- fix_path = os.path.normcase(full_path)
+ full_path = generate_module_path(p, is_python3, is_64bit, is_cel)
- if not os.path.exists(fix_path):
- print "Unable to find required module library: '{0}'".format(p)
- print "Please provide the correct path using TREX_STL_EXT_PATH variable"
- print "current path used: '{0}'".format(TREX_STL_EXT_PATH)
+ if not os.path.exists(full_path):
+ print("Unable to find required module library: '{0}'".format(p['name']))
+ print("Please provide the correct path using TREX_STL_EXT_PATH variable")
+ print("current path used: '{0}'".format(full_path))
exit(0)
sys.path.insert(1, full_path)
-# TODO; REFACTOR THIS....it looks horrible
-def import_platform_dirs ():
- # handle platform dirs
-
- # try fedora 18 first and then cel5.9
- # we are using the ZMQ module to determine the right platform
-
- full_path = os.path.join(TREX_STL_EXT_PATH, 'platform/fedora18')
- fix_path = os.path.normcase(full_path)
- sys.path.insert(0, full_path)
- try:
- # try to import and delete it from the namespace
- import zmq
- del zmq
- return
- except:
- sys.path.pop(0)
- pass
-
- full_path = os.path.join(TREX_STL_EXT_PATH, 'platform/cel59')
- fix_path = os.path.normcase(full_path)
- sys.path.insert(0, full_path)
- try:
- # try to import and delete it from the namespace
- import zmq
- del zmq
- return
- except:
- sys.path.pop(0)
- pass
-
- full_path = os.path.join(TREX_STL_EXT_PATH, 'platform/cel59/32bit')
- fix_path = os.path.normcase(full_path)
- sys.path.insert(0, full_path)
- try:
- # try to import and delete it from the namespace
- import zmq
- del zmq
- return
-
- except:
- sys.path.pop(0)
- sys.modules['zmq'] = None
- warnings.warn("unable to determine platform type for ZMQ import")
-
-
+
+
+
import_module_list(CLIENT_UTILS_MODULES)
-import_platform_dirs()
diff --git a/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_hltapi.py b/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_hltapi.py
index 23ecaf83..9387c3a6 100755
--- a/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_hltapi.py
+++ b/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_hltapi.py
@@ -172,11 +172,12 @@ import sys
import os
import socket
import copy
-from trex_stl_lib.api import *
-from trex_stl_types import *
-from utils.common import get_number
from collections import defaultdict
+from trex_stl_lib.api import *
+from .trex_stl_types import *
+from .utils.common import get_number
+
class HLT_ERR(dict):
def __init__(self, log = 'Unknown error', **kwargs):
@@ -877,11 +878,15 @@ def generate_packet(**user_kwargs):
raise STLError('mac_src_count has to be at least 1')
if count > 0 or kwargs['mac_src_mode'] == 'random':
mac_src = ipv4_str_to_num(mac2str(kwargs['mac_src'])[2:]) # take only 32 lsb
+
step = kwargs['mac_src_step']
- if step < 1:
- raise STLError('mac_src_step has to be at least 1')
+
if type(step) is str:
step = ipv4_str_to_num(mac2str(step)[2:]) # take only 32 lsb
+
+ if step < 1:
+ raise STLError('mac_src_step has to be at least 1')
+
if kwargs['mac_src_mode'] == 'increment':
add_val = mac_src - 0x7fffffff
var_name = '%s_%s_%s_%s' % ('inc', 4, count, step)
@@ -913,10 +918,13 @@ def generate_packet(**user_kwargs):
if count > 0 or kwargs['mac_dst_mode'] == 'random':
mac_dst = ipv4_str_to_num(mac2str(kwargs['mac_dst'])[2:]) # take only 32 lsb
step = kwargs['mac_dst_step']
- if step < 1:
- raise STLError('mac_dst_step has to be at least 1')
+
if type(step) is str:
step = ipv4_str_to_num(mac2str(step)[2:]) # take only 32 lsb
+
+ if step < 1:
+ raise STLError('mac_dst_step has to be at least 1')
+
if kwargs['mac_dst_mode'] == 'increment':
add_val = mac_dst - 0x7fffffff
var_name = '%s_%s_%s_%s' % ('inc', 4, count, step)
@@ -1037,10 +1045,12 @@ def generate_packet(**user_kwargs):
if type(ip_src_addr) is str:
ip_src_addr = ipv4_str_to_num(is_valid_ipv4(ip_src_addr))
step = kwargs['ip_src_step']
- if step < 1:
- raise STLError('ip_src_step has to be at least 1')
if type(step) is str:
step = ipv4_str_to_num(is_valid_ipv4(step))
+
+ if step < 1:
+ raise STLError('ip_src_step has to be at least 1')
+
if kwargs['ip_src_mode'] == 'increment':
add_val = ip_src_addr - 0x7fffffff
var_name = '%s_%s_%s_%s' % ('inc', 4, count, step)
@@ -1075,10 +1085,13 @@ def generate_packet(**user_kwargs):
if type(ip_dst_addr) is str:
ip_dst_addr = ipv4_str_to_num(is_valid_ipv4(ip_dst_addr))
step = kwargs['ip_dst_step']
- if step < 1:
- raise STLError('ip_dst_step has to be at least 1')
+
if type(step) is str:
step = ipv4_str_to_num(is_valid_ipv4(step))
+
+ if step < 1:
+ raise STLError('ip_dst_step has to be at least 1')
+
if kwargs['ip_dst_mode'] == 'increment':
add_val = ip_dst_addr - 0x7fffffff
var_name = '%s_%s_%s_%s' % ('inc', 4, count, step)
@@ -1131,10 +1144,13 @@ def generate_packet(**user_kwargs):
if count > 0 or kwargs['ipv6_src_mode'] == 'random':
ipv6_src_addr_num = ipv4_str_to_num(is_valid_ipv6(kwargs['ipv6_src_addr'])[-4:])
step = kwargs['ipv6_src_step']
- if step < 1:
- raise STLError('ipv6_src_step has to be at least 1')
+
if type(step) is str: # convert ipv6 step to number
step = ipv4_str_to_num(is_valid_ipv6(step)[-4:])
+
+ if step < 1:
+ raise STLError('ipv6_src_step has to be at least 1')
+
if kwargs['ipv6_src_mode'] == 'increment':
add_val = ipv6_src_addr_num - 0x7fffffff
var_name = '%s_%s_%s_%s' % ('inc', 4, count, step)
@@ -1166,10 +1182,13 @@ def generate_packet(**user_kwargs):
if count > 0 or kwargs['ipv6_dst_mode'] == 'random':
ipv6_dst_addr_num = ipv4_str_to_num(is_valid_ipv6(kwargs['ipv6_dst_addr'])[-4:])
step = kwargs['ipv6_dst_step']
- if step < 1:
- raise STLError('ipv6_dst_step has to be at least 1')
+
if type(step) is str: # convert ipv6 step to number
step = ipv4_str_to_num(is_valid_ipv6(step)[-4:])
+
+ if step < 1:
+ raise STLError('ipv6_dst_step has to be at least 1')
+
if kwargs['ipv6_dst_mode'] == 'increment':
add_val = ipv6_dst_addr_num - 0x7fffffff
var_name = '%s_%s_%s_%s' % ('inc', 4, count, step)
diff --git a/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_jsonrpc_client.py b/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_jsonrpc_client.py
index 7b284cb5..166fd64e 100644
--- a/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_jsonrpc_client.py
+++ b/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_jsonrpc_client.py
@@ -4,11 +4,11 @@ import zmq
import json
import re
from collections import namedtuple
-from trex_stl_types import *
-from utils.common import random_id_gen
import zlib
import struct
+from .trex_stl_types import *
+from .utils.common import random_id_gen
class bcolors:
BLUE = '\033[94m'
@@ -100,7 +100,7 @@ class JsonRpcClient(object):
msg["params"] = params
- msg["id"] = self.id_gen.next()
+ msg["id"] = next(self.id_gen)
if encode:
return id, json.dumps(msg)
@@ -143,16 +143,21 @@ class JsonRpcClient(object):
if self.logger.check_verbose(self.logger.VERBOSE_HIGH):
self.verbose_msg("Sending Request To Server:\n\n" + self.pretty_json(msg) + "\n")
- if len(msg) > self.MSG_COMPRESS_THRESHOLD:
- response = self.send_raw_msg(self.compress_msg(msg))
+ # encode string to buffer
+ buffer = msg.encode()
+
+ if len(buffer) > self.MSG_COMPRESS_THRESHOLD:
+ response = self.send_raw_msg(self.compress_msg(buffer))
if response:
response = self.decompress_msg(response)
else:
- response = self.send_raw_msg(msg)
+ response = self.send_raw_msg(buffer)
if not response:
return response
+ # return to string
+ response = response.decode()
# print after
if self.logger.check_verbose(self.logger.VERBOSE_HIGH):
diff --git a/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_packet_builder_scapy.py b/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_packet_builder_scapy.py
index 643b024c..a7064853 100644
--- a/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_packet_builder_scapy.py
+++ b/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_packet_builder_scapy.py
@@ -7,9 +7,10 @@ import yaml
import binascii
import base64
import inspect
+import copy
-from trex_stl_packet_builder_interface import CTrexPktBuilderInterface
-from trex_stl_types import *
+from .trex_stl_packet_builder_interface import CTrexPktBuilderInterface
+from .trex_stl_types import *
from scapy.all import *
class CTRexPacketBuildException(Exception):
@@ -28,22 +29,30 @@ class CTRexPacketBuildException(Exception):
################################################################################################
+def safe_ord (c):
+ if type(c) is str:
+ return ord(c)
+ elif type(c) is int:
+ return c
+ else:
+ raise TypeError("cannot convert: {0} of type: {1}".format(c, type(c)))
+
def _buffer_to_num(str_buffer):
- validate_type('str_buffer', str_buffer, str)
+ validate_type('str_buffer', str_buffer, bytes)
res=0
for i in str_buffer:
res = res << 8
- res += ord(i)
+ res += safe_ord(i)
return res
def ipv4_str_to_num (ipv4_buffer):
- validate_type('ipv4_buffer', ipv4_buffer, str)
+ validate_type('ipv4_buffer', ipv4_buffer, bytes)
assert len(ipv4_buffer)==4, 'size of ipv4_buffer is not 4'
return _buffer_to_num(ipv4_buffer)
def mac_str_to_num (mac_buffer):
- validate_type('mac_buffer', mac_buffer, str)
+ validate_type('mac_buffer', mac_buffer, bytes)
assert len(mac_buffer)==6, 'size of mac_buffer is not 6'
return _buffer_to_num(mac_buffer)
@@ -52,10 +61,10 @@ def is_valid_ipv4(ip_addr):
"""
return buffer in network order
"""
- if type(ip_addr)==str and len(ip_addr) == 4:
+ if type(ip_addr) == bytes and len(ip_addr) == 4:
return ip_addr
- if type(ip_addr)==int :
+ if type(ip_addr)== int:
ip_addr = socket.inet_ntoa(struct.pack("!I", ip_addr))
try:
@@ -70,7 +79,7 @@ def is_valid_ipv6(ipv6_addr):
"""
return buffer in network order
"""
- if type(ipv6_addr)==str and len(ipv6_addr) == 16:
+ if type(ipv6_addr) == bytes and len(ipv6_addr) == 16:
return ipv6_addr
try:
return socket.inet_pton(socket.AF_INET6, ipv6_addr)
@@ -347,15 +356,15 @@ class CTRexVmEngine(object):
def dump (self):
cnt=0;
for obj in self.ins:
- print "ins",cnt
+ print("ins",cnt)
cnt = cnt +1
- print obj.__dict__
+ print(obj.__dict__)
def dump_bjson (self):
- print json.dumps(self.get_json(), sort_keys=True, indent=4)
+ print(json.dumps(self.get_json(), sort_keys=True, indent=4))
def dump_as_yaml (self):
- print yaml.dump(self.get_json(), default_flow_style=False)
+ print(yaml.dump(self.get_json(), default_flow_style=False))
@@ -517,10 +526,10 @@ class CTRexVmDescBase(object):
return self.get_obj().__dict__
def dump_bjson(self):
- print json.dumps(self.get_json(), sort_keys=True, indent=4)
+ print(json.dumps(self.get_json(), sort_keys=True, indent=4))
def dump_as_yaml(self):
- print yaml.dump(self.get_json(), default_flow_style=False)
+ print(yaml.dump(self.get_json(), default_flow_style=False))
def get_var_ref (self):
@@ -1127,7 +1136,7 @@ class STLPktBuilder(CTrexPktBuilderInterface):
pkt : string,
Scapy or pcap file filename a scapy packet
- pkt_buffer : string
+ pkt_buffer : bytes
a packet as buffer
vm : list or base on :class:`trex_stl_lib.trex_stl_packet_builder_scapy.STLScVmRaw`
@@ -1147,6 +1156,9 @@ class STLPktBuilder(CTrexPktBuilderInterface):
"""
super(STLPktBuilder, self).__init__()
+ validate_type('pkt', pkt, (type(None), str, Packet))
+ validate_type('pkt_buffer', pkt_buffer, (type(None), bytes))
+
self.pkt = None # as input
self.pkt_raw = None # from raw pcap file
self.vm_scripts = [] # list of high level instructions
@@ -1185,7 +1197,7 @@ class STLPktBuilder(CTrexPktBuilderInterface):
def dump_vm_data_as_yaml(self):
- print yaml.dump(self.get_vm_data(), default_flow_style=False)
+ print(yaml.dump(self.get_vm_data(), default_flow_style=False))
def get_vm_data(self):
"""
@@ -1223,8 +1235,7 @@ class STLPktBuilder(CTrexPktBuilderInterface):
"""
pkt_buf = self._get_pkt_as_str()
-
- return {'binary': base64.b64encode(pkt_buf) if encode else pkt_buf,
+ return {'binary': base64.b64encode(pkt_buf).decode() if encode else pkt_buf,
'meta': self.metadata}
@@ -1239,7 +1250,7 @@ class STLPktBuilder(CTrexPktBuilderInterface):
def dump_as_hex (self):
pkt_buf = self._get_pkt_as_str()
- print hexdump(pkt_buf)
+ print(hexdump(pkt_buf))
def pkt_layers_desc (self):
"""
@@ -1257,7 +1268,7 @@ class STLPktBuilder(CTrexPktBuilderInterface):
def set_pkt_as_str (self, pkt_buffer):
- validate_type('pkt_buffer', pkt_buffer, str)
+ validate_type('pkt_buffer', pkt_buffer, bytes)
self.pkt_raw = pkt_buffer
@@ -1279,7 +1290,7 @@ class STLPktBuilder(CTrexPktBuilderInterface):
for pkt in p:
was_set=True;
- self.pkt_raw = str(pkt[0])
+ self.pkt_raw = pkt[0]
break
if not was_set :
raise CTRexPacketBuildException(-14, "no buffer inside the pcap file {0}".format(f_path))
@@ -1389,7 +1400,7 @@ class STLPktBuilder(CTrexPktBuilderInterface):
if var_names :
for var_name in var_names:
- if vars.has_key(var_name):
+ if var_name in vars:
raise CTRexPacketBuildException(-11,("variable %s define twice ") % (var_name) );
else:
vars[var_name]=1
@@ -1398,7 +1409,7 @@ class STLPktBuilder(CTrexPktBuilderInterface):
for desc in obj.commands:
var_name = desc.get_var_ref()
if var_name :
- if not vars.has_key(var_name):
+ if not var_name in vars:
raise CTRexPacketBuildException(-11,("variable %s does not exists ") % (var_name) );
desc.compile(self);
@@ -1451,10 +1462,13 @@ class STLPktBuilder(CTrexPktBuilderInterface):
return p_utl.get_field_offet_by_str(field_name)
def _get_pkt_as_str(self):
+
if self.pkt:
- return str(self.pkt)
+ return bytes(self.pkt)
+
if self.pkt_raw:
return self.pkt_raw
+
raise CTRexPacketBuildException(-11, 'empty packet');
def _add_tuple_gen(self,tuple_gen):
diff --git a/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_port.py b/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_port.py
index 0558360d..47124114 100644
--- a/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_port.py
+++ b/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_port.py
@@ -1,11 +1,12 @@
from collections import namedtuple, OrderedDict
-from trex_stl_packet_builder_scapy import STLPktBuilder
-from trex_stl_streams import STLStream
+from .trex_stl_packet_builder_scapy import STLPktBuilder
+from .trex_stl_streams import STLStream
+from .trex_stl_types import *
+from . import trex_stl_stats
+
import base64
-import trex_stl_stats
-from trex_stl_types import *
import time
import copy
@@ -59,7 +60,7 @@ class Port(object):
self.port_stats = trex_stl_stats.CPortStats(self)
- self.next_available_id = long(1)
+ self.next_available_id = 1
def err(self, msg):
@@ -138,7 +139,7 @@ class Port(object):
raise Exception("port {0}: bad state received from server '{1}'".format(self.port_id, port_state))
- self.next_available_id = long(rc.data()['max_stream_id']) + 1
+ self.next_available_id = int(rc.data()['max_stream_id']) + 1
# attributes
self.attr = rc.data()['attr']
@@ -151,7 +152,7 @@ class Port(object):
if rc.bad():
return self.err(rc.err())
- for k, v in rc.data()['streams'].iteritems():
+ for k, v in rc.data()['streams'].items():
self.streams[k] = {'next_id': v['next_stream_id'],
'pkt' : base64.b64decode(v['packet']['binary']),
'mode' : v['mode']['type'],
@@ -488,21 +489,21 @@ class Port(object):
rate = self.get_profile()['rate']
graph = self.get_profile()['graph']
- print format_text("Profile Map Per Port\n", 'underline', 'bold')
+ print(format_text("Profile Map Per Port\n", 'underline', 'bold'))
factor = mult_to_factor(mult, rate['max_bps_l2'], rate['max_pps'], rate['max_line_util'])
- print "Profile max BPS L2 (base / req): {:^12} / {:^12}".format(format_num(rate['max_bps_l2'], suffix = "bps"),
- format_num(rate['max_bps_l2'] * factor, suffix = "bps"))
+ print("Profile max BPS L2 (base / req): {:^12} / {:^12}".format(format_num(rate['max_bps_l2'], suffix = "bps"),
+ format_num(rate['max_bps_l2'] * factor, suffix = "bps")))
- print "Profile max BPS L1 (base / req): {:^12} / {:^12}".format(format_num(rate['max_bps_l1'], suffix = "bps"),
- format_num(rate['max_bps_l1'] * factor, suffix = "bps"))
+ print("Profile max BPS L1 (base / req): {:^12} / {:^12}".format(format_num(rate['max_bps_l1'], suffix = "bps"),
+ format_num(rate['max_bps_l1'] * factor, suffix = "bps")))
- print "Profile max PPS (base / req): {:^12} / {:^12}".format(format_num(rate['max_pps'], suffix = "pps"),
- format_num(rate['max_pps'] * factor, suffix = "pps"),)
+ print("Profile max PPS (base / req): {:^12} / {:^12}".format(format_num(rate['max_pps'], suffix = "pps"),
+ format_num(rate['max_pps'] * factor, suffix = "pps"),))
- print "Profile line util. (base / req): {:^12} / {:^12}".format(format_percentage(rate['max_line_util']),
- format_percentage(rate['max_line_util'] * factor))
+ print("Profile line util. (base / req): {:^12} / {:^12}".format(format_percentage(rate['max_line_util']),
+ format_percentage(rate['max_line_util'] * factor)))
# duration
@@ -517,9 +518,9 @@ class Port(object):
exp_time_factor_sec = duration
- print "Duration (base / req): {:^12} / {:^12}".format(format_time(exp_time_base_sec),
- format_time(exp_time_factor_sec))
- print "\n"
+ print("Duration (base / req): {:^12} / {:^12}".format(format_time(exp_time_base_sec),
+ format_time(exp_time_factor_sec)))
+ print("\n")
# generate port info
def get_info (self):
@@ -576,7 +577,7 @@ class Port(object):
return {}
data = {}
- for id, obj in self.streams.iteritems():
+ for id, obj in self.streams.items():
# lazy build scapy repr.
if not 'pkt_type' in obj:
diff --git a/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_sim.py b/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_sim.py
index 00fa6a93..18678e3e 100644
--- a/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_sim.py
+++ b/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_sim.py
@@ -1,4 +1,3 @@
-#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@@ -17,58 +16,26 @@ See the License for the specific language governing permissions and
limitations under the License.
"""
# simulator can be run as a standalone
-import trex_stl_ext
+from . import trex_stl_ext
+from .trex_stl_exceptions import *
+from .trex_stl_streams import *
+from .utils import parsing_opts
+from .trex_stl_client import STLClient
+from .utils import pcap
-from trex_stl_exceptions import *
from yaml import YAMLError
-from trex_stl_streams import *
-from utils import parsing_opts
-from trex_stl_client import STLClient
import re
import json
-
-
import argparse
import tempfile
import subprocess
import os
-from dpkt import pcap
from operator import itemgetter
class BpSimException(Exception):
pass
-def merge_cap_files (pcap_file_list, out_filename, delete_src = False):
-
- out_pkts = []
- if not all([os.path.exists(f) for f in pcap_file_list]):
- print "failed to merge cap file list...\nnot all files exist\n"
- return
-
- # read all packets to a list
- for src in pcap_file_list:
- f = open(src, 'r')
- reader = pcap.Reader(f)
- pkts = reader.readpkts()
- out_pkts += pkts
- f.close()
- if delete_src:
- os.unlink(src)
-
- # sort by the timestamp
- out_pkts = sorted(out_pkts, key=itemgetter(0))
-
-
- out = open(out_filename, 'w')
- out_writer = pcap.Writer(out)
-
- for ts, pkt in out_pkts:
- out_writer.writepkt(pkt, ts)
-
- out.close()
-
-
# stateless simulation
class STLSim(object):
@@ -214,16 +181,16 @@ class STLSim(object):
duration = duration))
if mode == 'json':
- print json.dumps(cmds_json, indent = 4, separators=(',', ': '), sort_keys = True)
+ print(json.dumps(cmds_json, indent = 4, separators=(',', ': '), sort_keys = True))
return
elif mode == 'yaml':
- print STLProfile(stream_list).dump_to_yaml()
+ print(STLProfile(stream_list).dump_to_yaml())
return
elif mode == 'pkt':
- print STLProfile(stream_list).dump_as_pkt();
+ print(STLProfile(stream_list).dump_as_pkt())
return
elif mode == 'native':
- print STLProfile(stream_list).dump_to_code()
+ print(STLProfile(stream_list).dump_to_code())
return
@@ -247,7 +214,7 @@ class STLSim(object):
# write to temp file
f = tempfile.NamedTemporaryFile(delete = False)
- msg = json.dumps(cmds_json)
+ msg = json.dumps(cmds_json).encode()
f.write(msg)
f.close()
@@ -296,10 +263,10 @@ class STLSim(object):
elif self.mode == 'gdb':
cmd = ['/usr/bin/gdb', '--args'] + cmd
- print "executing command: '{0}'".format(" ".join(cmd))
+ print("executing command: '{0}'".format(" ".join(cmd)))
if self.silent:
- FNULL = open(os.devnull, 'w')
+ FNULL = open(os.devnull, 'wb')
rc = subprocess.call(cmd, stdout=FNULL)
else:
rc = subprocess.call(cmd)
@@ -321,9 +288,9 @@ class STLSim(object):
return
- print "Mering cores output to a single pcap file...\n"
- inputs = ["{0}-{1}".format(self.outfile, index) for index in xrange(0, self.dp_core_count)]
- merge_cap_files(inputs, self.outfile, delete_src = True)
+ print("Mering cores output to a single pcap file...\n")
+ inputs = ["{0}-{1}".format(self.outfile, index) for index in range(0, self.dp_core_count)]
+ pcap.merge_cap_files(inputs, self.outfile, delete_src = True)
@@ -362,7 +329,7 @@ def setParserOptions():
dest = "dp_core_count",
default = 1,
type = int,
- choices = xrange(1, 9))
+ choices = list(range(1, 9)))
parser.add_argument("-n", "--core_index",
help = "Record only a specific core",
@@ -457,7 +424,7 @@ def setParserOptions():
def validate_args (parser, options):
if options.dp_core_index:
- if not options.dp_core_index in xrange(0, options.dp_core_count):
+ if not options.dp_core_index in range(0, options.dp_core_count):
parser.error("DP core index valid range is 0 to {0}".format(options.dp_core_count - 1))
# zero is ok - no limit, but other values must be at least as the number of cores
@@ -504,11 +471,11 @@ def main (args = None):
tunables = options.tunables)
except KeyboardInterrupt as e:
- print "\n\n*** Caught Ctrl + C... Exiting...\n\n"
+ print("\n\n*** Caught Ctrl + C... Exiting...\n\n")
return (-1)
except STLError as e:
- print e
+ print(e)
return (-1)
return (0)
diff --git a/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_stats.py b/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_stats.py
index 12bf881a..18c49d4e 100644
--- a/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_stats.py
+++ b/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_stats.py
@@ -1,9 +1,9 @@
#!/router/bin/python
-from utils import text_tables
-from utils.text_opts import format_text, format_threshold, format_num
+from .utils import text_tables
+from .utils.text_opts import format_text, format_threshold, format_num
-from trex_stl_async_client import CTRexAsyncStats
+from .trex_stl_async_client import CTRexAsyncStats
from collections import namedtuple, OrderedDict, deque
import copy
@@ -20,15 +20,15 @@ PORT_STATS = 'p'
PORT_STATUS = 'ps'
STREAMS_STATS = 's'
-ALL_STATS_OPTS = {GLOBAL_STATS, PORT_STATS, PORT_STATUS, STREAMS_STATS}
-COMPACT = {GLOBAL_STATS, PORT_STATS}
-SS_COMPAT = {GLOBAL_STATS, STREAMS_STATS}
+ALL_STATS_OPTS = [GLOBAL_STATS, PORT_STATS, PORT_STATUS, STREAMS_STATS]
+COMPACT = [GLOBAL_STATS, PORT_STATS]
+SS_COMPAT = [GLOBAL_STATS, STREAMS_STATS]
ExportableStats = namedtuple('ExportableStats', ['raw_data', 'text_table'])
# deep mrege of dicts dst = src + dst
def deep_merge_dicts (dst, src):
- for k, v in src.iteritems():
+ for k, v in src.items():
# if not exists - deep copy it
if not k in dst:
dst[k] = copy.deepcopy(v)
@@ -57,10 +57,10 @@ def is_intable (value):
def calculate_diff (samples):
total = 0.0
- weight_step = 1.0 / sum(xrange(0, len(samples)))
+ weight_step = 1.0 / sum(range(0, len(samples)))
weight = weight_step
- for i in xrange(0, len(samples) - 1):
+ for i in range(0, len(samples) - 1):
current = samples[i] if samples[i] > 0 else 1
next = samples[i + 1] if samples[i + 1] > 0 else 1
@@ -77,10 +77,10 @@ def calculate_diff (samples):
def calculate_diff_raw (samples):
total = 0.0
- weight_step = 1.0 / sum(xrange(0, len(samples)))
+ weight_step = 1.0 / sum(range(0, len(samples)))
weight = weight_step
- for i in xrange(0, len(samples) - 1):
+ for i in range(0, len(samples) - 1):
current = samples[i]
next = samples[i + 1]
@@ -140,7 +140,7 @@ class CTRexInfoGenerator(object):
stats_table.set_cols_align(["l", "l"])
stats_table.add_rows([[k.replace("_", " ").title(), v]
- for k, v in stats_data.iteritems()],
+ for k, v in stats_data.items()],
header=False)
return {"global_statistics": ExportableStats(stats_data, stats_table)}
@@ -156,7 +156,7 @@ class CTRexInfoGenerator(object):
stats_table.set_cols_dtype(['t'] + ['t'] * stream_count)
stats_table.add_rows([[k] + v
- for k, v in sstats_data.iteritems()],
+ for k, v in sstats_data.items()],
header=False)
header = ["PG ID"] + [key for key in streams_keys]
@@ -221,7 +221,7 @@ class CTRexInfoGenerator(object):
stats_table.set_cols_dtype(['t'] + ['t'] * total_cols)
stats_table.add_rows([[k] + v
- for k, v in per_field_stats.iteritems()],
+ for k, v in per_field_stats.items()],
header=False)
stats_table.header(header)
@@ -288,7 +288,7 @@ class CTRexInfoGenerator(object):
stats_table.set_cols_dtype(['t'] + ['t'] * total_cols)
stats_table.add_rows([[k] + v
- for k, v in per_field_stats.iteritems()],
+ for k, v in per_field_stats.items()],
header=False)
stats_table.header(header)
@@ -328,7 +328,7 @@ class CTRexInfoGenerator(object):
stats_table.set_cols_width([15] + [20] * len(relevant_ports))
stats_table.add_rows([[k] + v
- for k, v in per_field_status.iteritems()],
+ for k, v in per_field_status.items()],
header=False)
stats_table.header(["port"] + [port.port_id
for port in relevant_ports])
@@ -350,7 +350,7 @@ class CTRexInfoGenerator(object):
p_type_field_len = 0
- for stream_id, stream_id_sum in return_streams_data['streams'].iteritems():
+ for stream_id, stream_id_sum in return_streams_data['streams'].items():
stream_id_sum['packet_type'] = self._trim_packet_headers(stream_id_sum['packet_type'], 30)
p_type_field_len = max(p_type_field_len, len(stream_id_sum['packet_type']))
@@ -360,7 +360,7 @@ class CTRexInfoGenerator(object):
info_table.set_cols_dtype(["t"] + ["t"] + ["t"] + ["t"] + ["t"] + ["t"])
info_table.add_rows([v.values()
- for k, v in return_streams_data['streams'].iteritems()],
+ for k, v in return_streams_data['streams'].items()],
header=False)
info_table.header(["ID", "packet type", "length", "mode", "rate", "next stream"])
@@ -370,17 +370,17 @@ class CTRexInfoGenerator(object):
def __get_relevant_ports(self, port_id_list):
# fetch owned ports
ports = [port_obj
- for _, port_obj in self._ports_dict.iteritems()
+ for _, port_obj in self._ports_dict.items()
if port_obj.port_id in port_id_list]
# display only the first FOUR options, by design
if len(ports) > 4:
- print format_text("[WARNING]: ", 'magenta', 'bold'), format_text("displaying up to 4 ports", 'magenta')
+ self.logger.log(format_text("[WARNING]: ", 'magenta', 'bold'), format_text("displaying up to 4 ports", 'magenta'))
ports = ports[:4]
return ports
def __update_per_field_dict(self, dict_src_data, dict_dest_ref):
- for key, val in dict_src_data.iteritems():
+ for key, val in dict_src_data.items():
if key in dict_dest_ref:
dict_dest_ref[key].append(val)
@@ -634,7 +634,7 @@ class CPortStats(CTRexStats):
@staticmethod
def __merge_dicts (target, src):
- for k, v in src.iteritems():
+ for k, v in src.items():
if k in target:
target[k] += v
else:
@@ -799,7 +799,7 @@ class CRxStats(CTRexStats):
# does the current snapshot has this field ?
if field in current_pg:
- for port, pv in current_pg[field].iteritems():
+ for port, pv in current_pg[field].items():
if not is_intable(port):
continue
@@ -807,7 +807,7 @@ class CRxStats(CTRexStats):
# sum up
total = None
- for port, pv in output[field].iteritems():
+ for port, pv in output[field].items():
if not is_intable(port):
continue
if total is None:
@@ -953,7 +953,7 @@ class CRxStats(CTRexStats):
def get_stats (self):
stats = {}
- for pg_id, value in self.latest_stats.iteritems():
+ for pg_id, value in self.latest_stats.items():
# skip non ints
if not is_intable(pg_id):
continue
@@ -962,7 +962,7 @@ class CRxStats(CTRexStats):
for field in ['tx_pkts', 'tx_bytes', 'rx_pkts']:
stats[int(pg_id)][field] = {'total': self.get_rel([pg_id, field, 'total'])}
- for port, pv in value[field].iteritems():
+ for port, pv in value[field].items():
try:
int(port)
except ValueError:
@@ -976,7 +976,7 @@ class CRxStats(CTRexStats):
def generate_stats (self):
# for TUI - maximum 4
- pg_ids = filter(is_intable, self.latest_stats.keys())[:4]
+ pg_ids = list(filter(is_intable, self.latest_stats.keys()))[:4]
cnt = len(pg_ids)
formatted_stats = OrderedDict([ ('Tx pps', []),
diff --git a/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_std.py b/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_std.py
index 20600791..30fdb2dd 100644
--- a/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_std.py
+++ b/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_std.py
@@ -1,5 +1,5 @@
-from trex_stl_streams import *
-from trex_stl_packet_builder_scapy import *
+from .trex_stl_streams import *
+from .trex_stl_packet_builder_scapy import *
# map ports
# will destroy all streams/data on the ports
diff --git a/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_streams.py b/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_streams.py
index b14353f4..4f8ce3e6 100644
--- a/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_streams.py
+++ b/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_streams.py
@@ -1,9 +1,9 @@
#!/router/bin/python
-from trex_stl_exceptions import *
-from trex_stl_types import verify_exclusive_arg, validate_type
-from trex_stl_packet_builder_interface import CTrexPktBuilderInterface
-from trex_stl_packet_builder_scapy import STLPktBuilder, Ether, IP, UDP, TCP, RawPcapReader
+from .trex_stl_exceptions import *
+from .trex_stl_types import verify_exclusive_arg, validate_type
+from .trex_stl_packet_builder_interface import CTrexPktBuilderInterface
+from .trex_stl_packet_builder_scapy import STLPktBuilder, Ether, IP, UDP, TCP, RawPcapReader
from collections import OrderedDict, namedtuple
from scapy.utils import ltoa
@@ -12,8 +12,8 @@ import yaml
import base64
import string
import traceback
-from types import NoneType
import copy
+import imp
# base class for TX mode
class STLTXMode(object):
@@ -318,11 +318,11 @@ class STLStream(object):
# type checking
validate_type('mode', mode, STLTXMode)
- validate_type('packet', packet, (NoneType, CTrexPktBuilderInterface))
+ validate_type('packet', packet, (type(None), CTrexPktBuilderInterface))
validate_type('enabled', enabled, bool)
validate_type('self_start', self_start, bool)
validate_type('isg', isg, (int, float))
- validate_type('stream_id', stream_id, (NoneType, int))
+ validate_type('stream_id', stream_id, (type(None), int))
validate_type('random_seed',random_seed,int);
if (type(mode) == STLTXCont) and (next != None):
@@ -474,12 +474,12 @@ class STLStream(object):
def to_pkt_dump (self):
""" print packet description from scapy """
if self.name:
- print "Stream Name: ",self.name
+ print("Stream Name: ",self.name)
scapy_b = self.scapy_pkt_builder;
if scapy_b and isinstance(scapy_b,STLPktBuilder):
scapy_b.to_pkt_dump()
else:
- print "Nothing to dump"
+ print("Nothing to dump")
@@ -525,17 +525,26 @@ class STLStream(object):
if payload:
payload.remove_payload() # fcs etc.
data = payload.fields.get('load', '')
- replchars = re.compile('(\s|/|\'|\\\|[^' + re.escape(string.printable) + '])') # convert bad chars to hex
- new_data = replchars.sub(self.__replchars_to_hex, data)
- payload_start = packet_command.find("Raw(load='")
+
+ good_printable = [c for c in string.printable if ord(c) not in range(32)]
+ good_printable.remove("'")
+
+ if type(data) is str:
+ new_data = ''.join([c if c in good_printable else r'\x{0:02x}'.format(ord(c)) for c in data])
+ else:
+ new_data = ''.join([chr(c) if chr(c) in good_printable else r'\x{0:02x}'.format(c) for c in data])
+
+ payload_start = packet_command.find("Raw(load=")
if payload_start != -1:
packet_command = packet_command[:payload_start-1]
layers = packet_command.split('/')
+
if payload:
if len(new_data) and new_data == new_data[0] * len(new_data):
layers.append("Raw(load='%s' * %s)" % (new_data[0], len(new_data)))
else:
layers.append("Raw(load='%s')" % new_data)
+
packet_code = 'packet = (' + (' / \n ').join(layers) + ')'
vm_list = []
for inst in self.fields['vm']['instructions']:
@@ -844,7 +853,7 @@ class STLProfile(object):
tunables = func.__code__.co_varnames[1:argc]
# fetch defaults
- defaults = func.func_defaults
+ defaults = func.__defaults__
if len(defaults) != (argc - 1):
raise STLError("Module should provide default values for all arguments on get_streams()")
@@ -868,8 +877,8 @@ class STLProfile(object):
try:
file = os.path.basename(python_file).split('.')[0]
- module = __import__(file, globals(), locals(), [], -1)
- reload(module) # reload the update
+ module = __import__(file, globals(), locals(), [], 0)
+ imp.reload(module) # reload the update
t = STLProfile.get_module_tunables(module)
for arg in kwargs:
@@ -931,7 +940,7 @@ class STLProfile(object):
raise STLError("file '{0}' does not exists".format(pcap_file))
# make sure IPG is not less than 1 usec
- if ipg_usec < 1:
+ if ipg_usec is not None and ipg_usec < 1:
raise STLError("ipg_usec cannot be less than 1 usec: '{0}'".format(ipg_usec))
if loop_count < 0:
@@ -1018,9 +1027,9 @@ class STLProfile(object):
""" dump the profile as scapy packet. in case it is raw convert to scapy and dump it"""
cnt=0;
for stream in self.streams:
- print "======================="
- print "Stream %d" % cnt
- print "======================="
+ print("=======================")
+ print("Stream %d" % cnt)
+ print("=======================")
cnt = cnt +1
stream.to_pkt_dump()
diff --git a/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_types.py b/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_types.py
index 4b599f16..e5305c78 100644
--- a/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_types.py
+++ b/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_types.py
@@ -1,7 +1,7 @@
from collections import namedtuple
-from utils.text_opts import *
-from trex_stl_exceptions import *
+from .utils.text_opts import *
+from .trex_stl_exceptions import *
import types
RpcCmdData = namedtuple('RpcCmdData', ['method', 'params'])
@@ -26,6 +26,8 @@ class RC():
def __nonzero__ (self):
return self.good()
+ def __bool__ (self):
+ return self.good()
def add (self, rc):
self.rc_list += rc.rc_list
@@ -60,9 +62,9 @@ class RC():
def prn_func (self, msg, newline = True):
if newline:
- print msg
+ print(msg)
else:
- print msg,
+ print(msg),
def annotate (self, log_func = None, desc = None, show_status = True):
@@ -76,12 +78,12 @@ class RC():
if self.bad():
# print all the errors
- print ""
+ print("")
for x in self.rc_list:
if not x.rc:
log_func(format_text("\n{0}".format(x.data), 'bold'))
- print ""
+ print("")
if show_status:
log_func(format_text("[FAILED]\n", 'red', 'bold'))
@@ -135,6 +137,6 @@ def validate_type(arg_name, arg, valid_types):
# throws STLError if not exactly one argument is present
def verify_exclusive_arg (args_list):
- if not (len(filter(lambda x: x is not None, args_list)) == 1):
+ if not (len(list(filter(lambda x: x is not None, args_list))) == 1):
raise STLError('exactly one parameter from {0} should be provided'.format(args_list))
diff --git a/scripts/automation/trex_control_plane/stl/trex_stl_lib/utils/common.py b/scripts/automation/trex_control_plane/stl/trex_stl_lib/utils/common.py
index 9490c1b0..ae74e932 100644
--- a/scripts/automation/trex_control_plane/stl/trex_stl_lib/utils/common.py
+++ b/scripts/automation/trex_control_plane/stl/trex_stl_lib/utils/common.py
@@ -54,3 +54,7 @@ def get_number(input):
return int(input)
except:
return None
+
+def list_intersect(l1, l2):
+ return list(filter(lambda x: x in l2, l1))
+
diff --git a/scripts/automation/trex_control_plane/stl/trex_stl_lib/utils/pcap.py b/scripts/automation/trex_control_plane/stl/trex_stl_lib/utils/pcap.py
new file mode 100644
index 00000000..ab4f98a7
--- /dev/null
+++ b/scripts/automation/trex_control_plane/stl/trex_stl_lib/utils/pcap.py
@@ -0,0 +1,29 @@
+import os
+from ..trex_stl_packet_builder_scapy import RawPcapReader, RawPcapWriter
+
+
+def __ts_key (a):
+ return float(a[1][0]) + (float(a[1][1]) / 1e6)
+
+def merge_cap_files (pcap_file_list, out_filename, delete_src = False):
+
+ if not all([os.path.exists(f) for f in pcap_file_list]):
+ print("failed to merge cap file list...\nnot all files exist\n")
+ return
+
+ out_pkts = []
+ for src in pcap_file_list:
+ pkts = RawPcapReader(src)
+ out_pkts += pkts
+ if delete_src:
+ os.unlink(src)
+
+ # sort by timestamp
+ out_pkts = sorted(out_pkts, key = __ts_key)
+
+ writer = RawPcapWriter(out_filename, linktype = 1)
+
+ writer._write_header(None)
+ for pkt in out_pkts:
+ writer._write_packet(pkt[0], sec=pkt[1][0], usec=pkt[1][1], caplen=pkt[1][2], wirelen=None)
+
diff --git a/scripts/automation/trex_control_plane/stl/trex_stl_lib/utils/text_opts.py b/scripts/automation/trex_control_plane/stl/trex_stl_lib/utils/text_opts.py
index 78a0ab1f..bc2d44f4 100644
--- a/scripts/automation/trex_control_plane/stl/trex_stl_lib/utils/text_opts.py
+++ b/scripts/automation/trex_control_plane/stl/trex_stl_lib/utils/text_opts.py
@@ -19,8 +19,8 @@ TEXT_CODES = {'bold': {'start': '\x1b[1m',
'end': '\x1b[24m'}}
class TextCodesStripper:
- keys = [re.escape(v['start']) for k,v in TEXT_CODES.iteritems()]
- keys += [re.escape(v['end']) for k,v in TEXT_CODES.iteritems()]
+ keys = [re.escape(v['start']) for k,v in TEXT_CODES.items()]
+ keys += [re.escape(v['end']) for k,v in TEXT_CODES.items()]
pattern = re.compile("|".join(keys))
@staticmethod
diff --git a/scripts/automation/trex_control_plane/stl/trex_stl_lib/utils/text_tables.py b/scripts/automation/trex_control_plane/stl/trex_stl_lib/utils/text_tables.py
index 07753fda..8917cd28 100644
--- a/scripts/automation/trex_control_plane/stl/trex_stl_lib/utils/text_tables.py
+++ b/scripts/automation/trex_control_plane/stl/trex_stl_lib/utils/text_tables.py
@@ -1,5 +1,5 @@
from texttable import Texttable
-from text_opts import format_text
+from .text_opts import format_text
class TRexTextTable(Texttable):
@@ -22,9 +22,8 @@ def generate_trex_stats_table():
def print_table_with_header(texttable_obj, header="", untouched_header=""):
header = header.replace("_", " ").title() + untouched_header
- print format_text(header, 'cyan', 'underline') + "\n"
-
- print (texttable_obj.draw() + "\n").encode('utf-8')
+ print(format_text(header, 'cyan', 'underline') + "\n")
+ print((texttable_obj.draw() + "\n"))
if __name__ == "__main__":
pass
diff --git a/scripts/external_libs/dpkt-1.8.6/AUTHORS b/scripts/external_libs/dpkt-1.8.6/AUTHORS
deleted file mode 100644
index dcefbef9..00000000
--- a/scripts/external_libs/dpkt-1.8.6/AUTHORS
+++ /dev/null
@@ -1,60 +0,0 @@
-
-Original author
----------------
-
-Dug Song <dugsong@monkey.org>
-
-
-Contributors
-------------
-
-Timur Alperovich <timuralp@umich.edu>
- radiotap module
-
-Nic Bellamy <nic.bellamy@vadacom.co.nz>
- HTTP header parsing fix
-
-the grugq <thegrugq@gmail.com>
- better RTP module
-
-David Helder <dhelder@gizmolabs.org>
- bug fixes
-
-Przemyslaw Karwasiecki <karwas@gmail.com>
- TABLE_DUMP in MRT module
-
-Reza Lotun <rlotun@cs.ubc.ca>
- MetaPacket cleanup
-
-Jeff Nathan <jeff@snort.org>
- bug fixes
-
-Tim Newsham <newsham@lava.net>
- IPv6 bugfixing and improvements
-
-keisuke.nishimoto@gmail.com
- Snoop file parser
-
-Jon Oberheide <jon@oberheide.org>
- STUN, H.225, TPKT, NTP, RIP, Diameter, SCTP, BGP, MRT, RX modules
-
-plotnikoff@gmail.com
- handle dynamic imports from py2exe/freeze.py/zipped egg packages
-
-simdream@gmail.com
- handle multiple cookie values in HTTP
-
-Owen Stephens <owen@owenstephens.co.uk>
- IP6 extension header support
-
-Robert Stone <otaku@monkey.org>
- Netflow and QQ modules
-
-Thomas Taranowski <thomastaranowski@yahoo.com>
- dnet IP checksum bug on i386
-
-Jirka Vejrazka
- bug fixes
-
-Tim Yardley <yardley@gmail.com>
- DHCP definitions
diff --git a/scripts/external_libs/dpkt-1.8.6/CHANGES b/scripts/external_libs/dpkt-1.8.6/CHANGES
deleted file mode 100644
index a5f05121..00000000
--- a/scripts/external_libs/dpkt-1.8.6/CHANGES
+++ /dev/null
@@ -1,71 +0,0 @@
-dpkg-1.8:
- - fix a typo in vrrp.py
- - fix IPv4 and IPv6 packet to correctly handle zero payload length
- - store cipher_suite as int in TLSServerHello to allow app-specific messages
- - improve SSL parsing
-
-dpkt-1.7:
- - handle dynamic imports from py2exe/freeze.py/zipped egg
- packages, from plotnikoff
- - decode Ethernet MPLS labels, Cisco ISL VLAN tags, 802.2 LLC fields
- - handle multiply-defined HTTP headers from simdream
- - add IPv6 extension header support (minus ESP) from Owen Stephens
- - add radiotap module from Timur Alperovich
- - add IEEE80211 module from Jon Oberheide
- - add RFB module from Jon Oberheide
- - fix IP6 checksum to include options
- - rename 'as' to 'asn' field in BGP header
- - fix transport-layer checksum in IP6
- - handle improper TCP header offset
- - fix SSL typo
- - handle malformed ICMP headers
- - added RX module from Jon Oberheide
- - fixed loopback module IP/IP6 decoding
- - set transport-layer (TCP, UDP) checksum in IP
- - MRT module fixes
- - fix pcap.Writer timestamp calculation
-
-dpkt-1.6:
- - DNS RR packing fixed
- - added STUN, H.225, TPKT, NTP, RIP, Diameter, SCTP,
- BGP, and MRT modules from Jon Oberheide
- - new dpkt.NeedData exception
-
-dpkt-1.5:
- - IP6 checksum fix
- - __getitem__() interface to Packet (e.g. ip['src'] == ip.src)
- - faster Ethernet, IP, PPP module loading
- - support any endianness capture file in pcap module,
- and export a pypcap-compatible Reader
- - additional CDP definitions
- - replaced rtp module with the grugq's version
- - added QQ module from Robert Stone
- - added gzip module
- - added PPPoE module
- - added RADIUS module
-
-dpkt-1.4:
- - fix IP checksum bug on i386, caught by Thomas Taranowski
-
-dpkt-1.3:
- - autoload IP, Ethernet dispatch tables
- - IP6 bugfixes from Tim Newsham
- - additional DHCP definitions from Tim Yardley
- - HTTP bugfixes and abstraction (see SIP)
- - RPC bugfixes
- - added pypcap-compatible PcapReader
- - added Linux libpcap "cooked" capture module
- - added preliminary SSL module
- - added SIP module
- - added SCCP module
- - added RTP module
- - added Portmap module
-
-dpkt-1.2:
- - changed license from GPL to BSD
- - added DTP module
- - added HTTP module
- - added DNS RR decodes
- - added enough PPP to decode PPTP GRE encapsulation
-
-# $Id: CHANGES 379 2006-07-27 05:23:19Z dugsong $
diff --git a/scripts/external_libs/dpkt-1.8.6/LICENSE b/scripts/external_libs/dpkt-1.8.6/LICENSE
deleted file mode 100644
index 99d14371..00000000
--- a/scripts/external_libs/dpkt-1.8.6/LICENSE
+++ /dev/null
@@ -1,28 +0,0 @@
-
- Copyright (c) 2004 Dug Song <dugsong@monkey.org>
- All rights reserved, all wrongs reversed.
-
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
-
- 1. Redistributions of source code must retain the above copyright
- notice, this list of conditions and the following disclaimer.
- 2. Redistributions in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in the
- documentation and/or other materials provided with the distribution.
- 3. The names of the authors and copyright holders may not be used to
- endorse or promote products derived from this software without
- specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES,
- INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY
- AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
- THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
- OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
- WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
- OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
- ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
diff --git a/scripts/external_libs/dpkt-1.8.6/MANIFEST.in b/scripts/external_libs/dpkt-1.8.6/MANIFEST.in
deleted file mode 100644
index e96d54c1..00000000
--- a/scripts/external_libs/dpkt-1.8.6/MANIFEST.in
+++ /dev/null
@@ -1,2 +0,0 @@
-
-include AUTHORS CHANGES README.rst LICENSE
diff --git a/scripts/external_libs/dpkt-1.8.6/PKG-INFO b/scripts/external_libs/dpkt-1.8.6/PKG-INFO
deleted file mode 100644
index e82397e7..00000000
--- a/scripts/external_libs/dpkt-1.8.6/PKG-INFO
+++ /dev/null
@@ -1,122 +0,0 @@
-Metadata-Version: 1.1
-Name: dpkt
-Version: 1.8.6
-Summary: fast, simple packet creation / parsing, with definitions for the basic TCP/IP protocols
-Home-page: http://dpkt.googlecode.com/
-Author: Dug Song <dugsong@monkey.org>
-Author-email: UNKNOWN
-License: BSD
-Description:
- ====
- dpkt
- ====
-
- | |docs| |travis| |coveralls| |landscape| |version|
- | |downloads| |wheel| |supported-versions| |supported-implementations|
-
- .. |docs| image:: https://readthedocs.org/projects/dpkt/badge/?style=flat
- :target: https://readthedocs.org/projects/dpkt
- :alt: Documentation Status
-
- .. |travis| image:: http://img.shields.io/travis/kbandla/dpkt/master.png?style=flat
- :alt: Travis-CI Build Status
- :target: https://travis-ci.org/kbandla/dpkt
-
- .. |coveralls| image:: http://img.shields.io/coveralls/kbandla/dpkt/master.png?style=flat
- :alt: Coverage Status
- :target: https://coveralls.io/r/kbandla/dpkt
-
- .. |landscape| image:: https://landscape.io/github/kbandla/dpkt/master/landscape.svg?style=flat
- :target: https://landscape.io/github/kbandla/dpkt/master
- :alt: Code Quality Status
-
- .. |version| image:: http://img.shields.io/pypi/v/dpkt.png?style=flat
- :alt: PyPI Package latest release
- :target: https://pypi.python.org/pypi/dpkt
-
- .. |downloads| image:: http://img.shields.io/pypi/dm/dpkt.png?style=flat
- :alt: PyPI Package monthly downloads
- :target: https://pypi.python.org/pypi/dpkt
-
- .. |wheel| image:: https://pypip.in/wheel/dpkt/badge.png?style=flat
- :alt: PyPI Wheel
- :target: https://pypi.python.org/pypi/dpkt
-
- .. |supported-versions| image:: https://pypip.in/py_versions/dpkt/badge.png?style=flat
- :alt: Supported versions
- :target: https://pypi.python.org/pypi/dpkt
-
- .. |supported-implementations| image:: https://pypip.in/implementation/dpkt/badge.png?style=flat
- :alt: Supported implementations
- :target: https://pypi.python.org/pypi/dpkt
-
- Installation
- ============
-
- ::
-
- pip install dpkt
-
- Documentation
- =============
-
- https://dpkt.readthedocs.org/
-
- Development
- ===========
-
- To run the all tests run::
-
- tox
-
-
- Deviations from upstream
- ~~~~~~~~~~~~~~~~~~~~~~~~
-
- This code is based on `dpkt code <https://code.google.com/p/dpkt/>`__ lead by Dug Song.
-
- At this point, this is not the exact `upstream
- version <https://code.google.com/p/dpkt/>`__. If you are looking for the
- latest stock dpkt, please get it from the above link.
-
- Almost all of the upstream changes are pulled. However, some modules are
- not. Here is a list of the changes:
-
- - `dpkt/dpkt.py <https://github.com/kbandla/dpkt/commit/336fe02b0e2f00b382d91cd42558a69eec16d6c7>`__:
- decouple dnet from dpkt
- - `dpkt/dns.py <https://github.com/kbandla/dpkt/commit/2bf3cde213144391fd90488d12f9ccce51b5fbca>`__
- : parse some more DNS flags
-
- Examples
- --------
-
- [@jonoberheide's](https://twitter.com/jonoberheide) old examples still
- apply:
-
- - `dpkt Tutorial #1: ICMP
- Echo <https://jon.oberheide.org/blog/2008/08/25/dpkt-tutorial-1-icmp-echo/>`__
- - `dpkt Tutorial #2: Parsing a PCAP
- File <https://jon.oberheide.org/blog/2008/10/15/dpkt-tutorial-2-parsing-a-pcap-file/>`__
- - `dpkt Tutorial #3: dns
- spoofing <https://jon.oberheide.org/blog/2008/12/20/dpkt-tutorial-3-dns-spoofing/>`__
- - `dpkt Tutorial #4: AS Paths from
- MRT/BGP <https://jon.oberheide.org/blog/2009/03/25/dpkt-tutorial-4-as-paths-from-mrt-bgp/>`__
-
- `Jeff Silverman <https://github.com/jeffsilverm>`__ has some
- `code <https://github.com/jeffsilverm/dpkt_doc>`__ and
- `documentation <http://www.commercialventvac.com/dpkt.html>`__.
-
- LICENSE
- -------
-
- BSD 3-Clause License, as the upstream project
-
-Platform: UNKNOWN
-Classifier: Development Status :: 4 - Beta
-Classifier: Intended Audience :: Developers
-Classifier: License :: OSI Approved :: BSD License
-Classifier: Natural Language :: English
-Classifier: Programming Language :: Python :: 2.6
-Classifier: Programming Language :: Python :: 2.7
-Classifier: Programming Language :: Python :: Implementation :: CPython
-Classifier: Programming Language :: Python :: Implementation :: PyPy
diff --git a/scripts/external_libs/dpkt-1.8.6/README.rst b/scripts/external_libs/dpkt-1.8.6/README.rst
deleted file mode 100644
index 9339c574..00000000
--- a/scripts/external_libs/dpkt-1.8.6/README.rst
+++ /dev/null
@@ -1,104 +0,0 @@
-
-====
-dpkt
-====
-
-| |docs| |travis| |coveralls| |landscape| |version|
-| |downloads| |wheel| |supported-versions| |supported-implementations|
-
-.. |docs| image:: https://readthedocs.org/projects/dpkt/badge/?style=flat
- :target: https://readthedocs.org/projects/dpkt
- :alt: Documentation Status
-
-.. |travis| image:: http://img.shields.io/travis/kbandla/dpkt/master.png?style=flat
- :alt: Travis-CI Build Status
- :target: https://travis-ci.org/kbandla/dpkt
-
-.. |coveralls| image:: http://img.shields.io/coveralls/kbandla/dpkt/master.png?style=flat
- :alt: Coverage Status
- :target: https://coveralls.io/r/kbandla/dpkt
-
-.. |landscape| image:: https://landscape.io/github/kbandla/dpkt/master/landscape.svg?style=flat
- :target: https://landscape.io/github/kbandla/dpkt/master
- :alt: Code Quality Status
-
-.. |version| image:: http://img.shields.io/pypi/v/dpkt.png?style=flat
- :alt: PyPI Package latest release
- :target: https://pypi.python.org/pypi/dpkt
-
-.. |downloads| image:: http://img.shields.io/pypi/dm/dpkt.png?style=flat
- :alt: PyPI Package monthly downloads
- :target: https://pypi.python.org/pypi/dpkt
-
-.. |wheel| image:: https://pypip.in/wheel/dpkt/badge.png?style=flat
- :alt: PyPI Wheel
- :target: https://pypi.python.org/pypi/dpkt
-
-.. |supported-versions| image:: https://pypip.in/py_versions/dpkt/badge.png?style=flat
- :alt: Supported versions
- :target: https://pypi.python.org/pypi/dpkt
-
-.. |supported-implementations| image:: https://pypip.in/implementation/dpkt/badge.png?style=flat
- :alt: Supported implementations
- :target: https://pypi.python.org/pypi/dpkt
-
-Installation
-============
-
-::
-
- pip install dpkt
-
-Documentation
-=============
-
-https://dpkt.readthedocs.org/
-
-Development
-===========
-
-To run the all tests run::
-
- tox
-
-
-Deviations from upstream
-~~~~~~~~~~~~~~~~~~~~~~~~
-
-This code is based on `dpkt code <https://code.google.com/p/dpkt/>`__ lead by Dug Song.
-
-At this point, this is not the exact `upstream
-version <https://code.google.com/p/dpkt/>`__. If you are looking for the
-latest stock dpkt, please get it from the above link.
-
-Almost all of the upstream changes are pulled. However, some modules are
-not. Here is a list of the changes:
-
-- `dpkt/dpkt.py <https://github.com/kbandla/dpkt/commit/336fe02b0e2f00b382d91cd42558a69eec16d6c7>`__:
- decouple dnet from dpkt
-- `dpkt/dns.py <https://github.com/kbandla/dpkt/commit/2bf3cde213144391fd90488d12f9ccce51b5fbca>`__
- : parse some more DNS flags
-
-Examples
---------
-
-[@jonoberheide's](https://twitter.com/jonoberheide) old examples still
-apply:
-
-- `dpkt Tutorial #1: ICMP
- Echo <https://jon.oberheide.org/blog/2008/08/25/dpkt-tutorial-1-icmp-echo/>`__
-- `dpkt Tutorial #2: Parsing a PCAP
- File <https://jon.oberheide.org/blog/2008/10/15/dpkt-tutorial-2-parsing-a-pcap-file/>`__
-- `dpkt Tutorial #3: dns
- spoofing <https://jon.oberheide.org/blog/2008/12/20/dpkt-tutorial-3-dns-spoofing/>`__
-- `dpkt Tutorial #4: AS Paths from
- MRT/BGP <https://jon.oberheide.org/blog/2009/03/25/dpkt-tutorial-4-as-paths-from-mrt-bgp/>`__
-
-`Jeff Silverman <https://github.com/jeffsilverm>`__ has some
-`code <https://github.com/jeffsilverm/dpkt_doc>`__ and
-`documentation <http://www.commercialventvac.com/dpkt.html>`__.
-
-LICENSE
--------
-
-BSD 3-Clause License, as the upstream project
diff --git a/scripts/external_libs/dpkt-1.8.6/dpkt.egg-info/PKG-INFO b/scripts/external_libs/dpkt-1.8.6/dpkt.egg-info/PKG-INFO
deleted file mode 100644
index e82397e7..00000000
--- a/scripts/external_libs/dpkt-1.8.6/dpkt.egg-info/PKG-INFO
+++ /dev/null
@@ -1,122 +0,0 @@
-Metadata-Version: 1.1
-Name: dpkt
-Version: 1.8.6
-Summary: fast, simple packet creation / parsing, with definitions for the basic TCP/IP protocols
-Home-page: http://dpkt.googlecode.com/
-Author: Dug Song <dugsong@monkey.org>
-Author-email: UNKNOWN
-License: BSD
-Description:
- ====
- dpkt
- ====
-
- | |docs| |travis| |coveralls| |landscape| |version|
- | |downloads| |wheel| |supported-versions| |supported-implementations|
-
- .. |docs| image:: https://readthedocs.org/projects/dpkt/badge/?style=flat
- :target: https://readthedocs.org/projects/dpkt
- :alt: Documentation Status
-
- .. |travis| image:: http://img.shields.io/travis/kbandla/dpkt/master.png?style=flat
- :alt: Travis-CI Build Status
- :target: https://travis-ci.org/kbandla/dpkt
-
- .. |coveralls| image:: http://img.shields.io/coveralls/kbandla/dpkt/master.png?style=flat
- :alt: Coverage Status
- :target: https://coveralls.io/r/kbandla/dpkt
-
- .. |landscape| image:: https://landscape.io/github/kbandla/dpkt/master/landscape.svg?style=flat
- :target: https://landscape.io/github/kbandla/dpkt/master
- :alt: Code Quality Status
-
- .. |version| image:: http://img.shields.io/pypi/v/dpkt.png?style=flat
- :alt: PyPI Package latest release
- :target: https://pypi.python.org/pypi/dpkt
-
- .. |downloads| image:: http://img.shields.io/pypi/dm/dpkt.png?style=flat
- :alt: PyPI Package monthly downloads
- :target: https://pypi.python.org/pypi/dpkt
-
- .. |wheel| image:: https://pypip.in/wheel/dpkt/badge.png?style=flat
- :alt: PyPI Wheel
- :target: https://pypi.python.org/pypi/dpkt
-
- .. |supported-versions| image:: https://pypip.in/py_versions/dpkt/badge.png?style=flat
- :alt: Supported versions
- :target: https://pypi.python.org/pypi/dpkt
-
- .. |supported-implementations| image:: https://pypip.in/implementation/dpkt/badge.png?style=flat
- :alt: Supported implementations
- :target: https://pypi.python.org/pypi/dpkt
-
- Installation
- ============
-
- ::
-
- pip install dpkt
-
- Documentation
- =============
-
- https://dpkt.readthedocs.org/
-
- Development
- ===========
-
- To run the all tests run::
-
- tox
-
-
- Deviations from upstream
- ~~~~~~~~~~~~~~~~~~~~~~~~
-
- This code is based on `dpkt code <https://code.google.com/p/dpkt/>`__ lead by Dug Song.
-
- At this point, this is not the exact `upstream
- version <https://code.google.com/p/dpkt/>`__. If you are looking for the
- latest stock dpkt, please get it from the above link.
-
- Almost all of the upstream changes are pulled. However, some modules are
- not. Here is a list of the changes:
-
- - `dpkt/dpkt.py <https://github.com/kbandla/dpkt/commit/336fe02b0e2f00b382d91cd42558a69eec16d6c7>`__:
- decouple dnet from dpkt
- - `dpkt/dns.py <https://github.com/kbandla/dpkt/commit/2bf3cde213144391fd90488d12f9ccce51b5fbca>`__
- : parse some more DNS flags
-
- Examples
- --------
-
- [@jonoberheide's](https://twitter.com/jonoberheide) old examples still
- apply:
-
- - `dpkt Tutorial #1: ICMP
- Echo <https://jon.oberheide.org/blog/2008/08/25/dpkt-tutorial-1-icmp-echo/>`__
- - `dpkt Tutorial #2: Parsing a PCAP
- File <https://jon.oberheide.org/blog/2008/10/15/dpkt-tutorial-2-parsing-a-pcap-file/>`__
- - `dpkt Tutorial #3: dns
- spoofing <https://jon.oberheide.org/blog/2008/12/20/dpkt-tutorial-3-dns-spoofing/>`__
- - `dpkt Tutorial #4: AS Paths from
- MRT/BGP <https://jon.oberheide.org/blog/2009/03/25/dpkt-tutorial-4-as-paths-from-mrt-bgp/>`__
-
- `Jeff Silverman <https://github.com/jeffsilverm>`__ has some
- `code <https://github.com/jeffsilverm/dpkt_doc>`__ and
- `documentation <http://www.commercialventvac.com/dpkt.html>`__.
-
- LICENSE
- -------
-
- BSD 3-Clause License, as the upstream project
-
-Platform: UNKNOWN
-Classifier: Development Status :: 4 - Beta
-Classifier: Intended Audience :: Developers
-Classifier: License :: OSI Approved :: BSD License
-Classifier: Natural Language :: English
-Classifier: Programming Language :: Python :: 2.6
-Classifier: Programming Language :: Python :: 2.7
-Classifier: Programming Language :: Python :: Implementation :: CPython
-Classifier: Programming Language :: Python :: Implementation :: PyPy
diff --git a/scripts/external_libs/dpkt-1.8.6/dpkt.egg-info/SOURCES.txt b/scripts/external_libs/dpkt-1.8.6/dpkt.egg-info/SOURCES.txt
deleted file mode 100644
index c2521e2a..00000000
--- a/scripts/external_libs/dpkt-1.8.6/dpkt.egg-info/SOURCES.txt
+++ /dev/null
@@ -1,80 +0,0 @@
-AUTHORS
-CHANGES
-LICENSE
-MANIFEST.in
-README.rst
-setup.cfg
-setup.py
-dpkt/__init__.py
-dpkt/ah.py
-dpkt/aim.py
-dpkt/aoe.py
-dpkt/aoeata.py
-dpkt/aoecfg.py
-dpkt/arp.py
-dpkt/asn1.py
-dpkt/bgp.py
-dpkt/cdp.py
-dpkt/crc32c.py
-dpkt/dhcp.py
-dpkt/diameter.py
-dpkt/dns.py
-dpkt/dpkt.py
-dpkt/dtp.py
-dpkt/esp.py
-dpkt/ethernet.py
-dpkt/gre.py
-dpkt/gzip.py
-dpkt/h225.py
-dpkt/hsrp.py
-dpkt/http.py
-dpkt/icmp.py
-dpkt/icmp6.py
-dpkt/ieee80211.py
-dpkt/igmp.py
-dpkt/ip.py
-dpkt/ip6.py
-dpkt/ipx.py
-dpkt/llc.py
-dpkt/loopback.py
-dpkt/mrt.py
-dpkt/netbios.py
-dpkt/netflow.py
-dpkt/ntp.py
-dpkt/ospf.py
-dpkt/pcap.py
-dpkt/pim.py
-dpkt/pmap.py
-dpkt/ppp.py
-dpkt/pppoe.py
-dpkt/qq.py
-dpkt/radiotap.py
-dpkt/radius.py
-dpkt/rfb.py
-dpkt/rip.py
-dpkt/rpc.py
-dpkt/rtp.py
-dpkt/rx.py
-dpkt/sccp.py
-dpkt/sctp.py
-dpkt/sip.py
-dpkt/sll.py
-dpkt/smb.py
-dpkt/snoop.py
-dpkt/ssl.py
-dpkt/ssl_ciphersuites.py
-dpkt/stp.py
-dpkt/stun.py
-dpkt/tcp.py
-dpkt/telnet.py
-dpkt/tftp.py
-dpkt/tns.py
-dpkt/tpkt.py
-dpkt/udp.py
-dpkt/vrrp.py
-dpkt/yahoo.py
-dpkt.egg-info/PKG-INFO
-dpkt.egg-info/SOURCES.txt
-dpkt.egg-info/dependency_links.txt
-dpkt.egg-info/not-zip-safe
-dpkt.egg-info/top_level.txt \ No newline at end of file
diff --git a/scripts/external_libs/dpkt-1.8.6/dpkt.egg-info/dependency_links.txt b/scripts/external_libs/dpkt-1.8.6/dpkt.egg-info/dependency_links.txt
deleted file mode 100644
index 8b137891..00000000
--- a/scripts/external_libs/dpkt-1.8.6/dpkt.egg-info/dependency_links.txt
+++ /dev/null
@@ -1 +0,0 @@
-
diff --git a/scripts/external_libs/dpkt-1.8.6/dpkt.egg-info/not-zip-safe b/scripts/external_libs/dpkt-1.8.6/dpkt.egg-info/not-zip-safe
deleted file mode 100644
index 8b137891..00000000
--- a/scripts/external_libs/dpkt-1.8.6/dpkt.egg-info/not-zip-safe
+++ /dev/null
@@ -1 +0,0 @@
-
diff --git a/scripts/external_libs/dpkt-1.8.6/dpkt.egg-info/top_level.txt b/scripts/external_libs/dpkt-1.8.6/dpkt.egg-info/top_level.txt
deleted file mode 100644
index 4daab81a..00000000
--- a/scripts/external_libs/dpkt-1.8.6/dpkt.egg-info/top_level.txt
+++ /dev/null
@@ -1 +0,0 @@
-dpkt
diff --git a/scripts/external_libs/dpkt-1.8.6/dpkt/__init__.py b/scripts/external_libs/dpkt-1.8.6/dpkt/__init__.py
deleted file mode 100644
index 31d6281d..00000000
--- a/scripts/external_libs/dpkt-1.8.6/dpkt/__init__.py
+++ /dev/null
@@ -1,70 +0,0 @@
-"""fast, simple packet creation and parsing."""
-
-__author__ = 'Dug Song <dugsong@monkey.org>'
-__copyright__ = 'Copyright (c) 2004 Dug Song'
-__license__ = 'BSD'
-__url__ = 'http://dpkt.googlecode.com/'
-__version__ = '1.8.6'
-
-from dpkt import *
-
-import ah
-import aim
-import arp
-import asn1
-import bgp
-import cdp
-import dhcp
-import diameter
-import dns
-import dtp
-import esp
-import ethernet
-import gre
-import gzip
-import h225
-import hsrp
-import http
-import icmp
-import icmp6
-import ieee80211
-import igmp
-import ip
-import ip6
-import ipx
-import llc
-import loopback
-import mrt
-import netbios
-import netflow
-import ntp
-import ospf
-import pcap
-import pim
-import pmap
-import ppp
-import pppoe
-import qq
-import radiotap
-import radius
-import rfb
-import rip
-import rpc
-import rtp
-import rx
-import sccp
-import sctp
-import sip
-import sll
-import smb
-import ssl
-import stp
-import stun
-import tcp
-import telnet
-import tftp
-import tns
-import tpkt
-import udp
-import vrrp
-import yahoo
diff --git a/scripts/external_libs/dpkt-1.8.6/dpkt/ah.py b/scripts/external_libs/dpkt-1.8.6/dpkt/ah.py
deleted file mode 100644
index 87def27e..00000000
--- a/scripts/external_libs/dpkt-1.8.6/dpkt/ah.py
+++ /dev/null
@@ -1,31 +0,0 @@
-# $Id: ah.py 34 2007-01-28 07:54:20Z dugsong $
-
-"""Authentication Header."""
-
-import dpkt
-
-class AH(dpkt.Packet):
- __hdr__ = (
- ('nxt', 'B', 0),
- ('len', 'B', 0), # payload length
- ('rsvd', 'H', 0),
- ('spi', 'I', 0),
- ('seq', 'I', 0)
- )
- auth = ''
- def unpack(self, buf):
- dpkt.Packet.unpack(self, buf)
- self.auth = self.data[:self.len]
- buf = self.data[self.len:]
- import ip
- try:
- self.data = ip.IP.get_proto(self.nxt)(buf)
- setattr(self, self.data.__class__.__name__.lower(), self.data)
- except (KeyError, dpkt.UnpackError):
- self.data = buf
-
- def __len__(self):
- return self.__hdr_len__ + len(self.auth) + len(self.data)
-
- def __str__(self):
- return self.pack_hdr() + str(self.auth) + str(self.data)
diff --git a/scripts/external_libs/dpkt-1.8.6/dpkt/aim.py b/scripts/external_libs/dpkt-1.8.6/dpkt/aim.py
deleted file mode 100644
index 0fb58063..00000000
--- a/scripts/external_libs/dpkt-1.8.6/dpkt/aim.py
+++ /dev/null
@@ -1,47 +0,0 @@
-# $Id: aim.py 23 2006-11-08 15:45:33Z dugsong $
-
-"""AOL Instant Messenger."""
-
-import dpkt
-import struct
-
-# OSCAR: http://iserverd1.khstu.ru/oscar/
-
-class FLAP(dpkt.Packet):
- __hdr__ = (
- ('ast', 'B', 0x2a), # '*'
- ('type', 'B', 0),
- ('seq', 'H', 0),
- ('len', 'H', 0)
- )
- def unpack(self, buf):
- dpkt.Packet.unpack(self, buf)
- if self.ast != 0x2a:
- raise dpkt.UnpackError('invalid FLAP header')
- if len(self.data) < self.len:
- raise dpkt.NeedData, '%d left, %d needed' % (len(self.data), self.len)
-
-class SNAC(dpkt.Packet):
- __hdr__ = (
- ('family', 'H', 0),
- ('subtype', 'H', 0),
- ('flags', 'H', 0),
- ('reqid', 'I', 0)
- )
-
-def tlv(buf):
- n = 4
- try:
- t, l = struct.unpack('>HH', buf[:n])
- except struct.error:
- raise dpkt.UnpackError
- v = buf[n:n+l]
- if len(v) < l:
- raise dpkt.NeedData
- buf = buf[n+l:]
- return (t,l,v, buf)
-
-# TOC 1.0: http://jamwt.com/Py-TOC/PROTOCOL
-
-# TOC 2.0: http://www.firestuff.org/projects/firetalk/doc/toc2.txt
-
diff --git a/scripts/external_libs/dpkt-1.8.6/dpkt/aoe.py b/scripts/external_libs/dpkt-1.8.6/dpkt/aoe.py
deleted file mode 100644
index 45a1eaf2..00000000
--- a/scripts/external_libs/dpkt-1.8.6/dpkt/aoe.py
+++ /dev/null
@@ -1,70 +0,0 @@
-"""ATA over Ethernet Protocol."""
-
-import struct
-
-
-import dpkt
-
-
-class AOE(dpkt.Packet):
- __hdr__ = (
- ('ver_fl', 'B', 0x10),
- ('err', 'B', 0),
- ('maj', 'H', 0),
- ('min', 'B', 0),
- ('cmd', 'B', 0),
- ('tag', 'I', 0),
- )
- _cmdsw = {}
-
- def _get_ver(self): return self.ver_fl >> 4
- def _set_ver(self, ver): self.ver_fl = (ver << 4) | (self.ver_fl & 0xf)
- ver = property(_get_ver, _set_ver)
-
- def _get_fl(self): return self.ver_fl & 0xf
- def _set_fl(self, fl): self.ver_fl = (self.ver_fl & 0xf0) | fl
- fl = property(_get_fl, _set_fl)
-
- def set_cmd(cls, cmd, pktclass):
- cls._cmdsw[cmd] = pktclass
- set_cmd = classmethod(set_cmd)
-
- def get_cmd(cls, cmd):
- return cls._cmdsw[cmd]
- get_cmd = classmethod(get_cmd)
-
- def unpack(self, buf):
- dpkt.Packet.unpack(self, buf)
- try:
- self.data = self._cmdsw[self.cmd](self.data)
- setattr(self, self.data.__class__.__name__.lower(), self.data)
- except (KeyError, struct.error, dpkt.UnpackError):
- pass
-
- def pack_hdr(self):
- try:
- return dpkt.Packet.pack_hdr(self)
- except struct.error, e:
- raise dpkt.PackError(str(e))
-
-
-AOE_CMD_ATA = 0
-AOE_CMD_CFG = 1
-AOE_FLAG_RSP = 1 << 3
-
-
-def __load_cmds():
- prefix = 'AOE_CMD_'
- g = globals()
- for k, v in g.iteritems():
- if k.startswith(prefix):
- name = 'aoe' + k[len(prefix):].lower()
- try:
- mod = __import__(name, g)
- except ImportError:
- continue
- AOE.set_cmd(v, getattr(mod, name.upper()))
-
-
-if not AOE._cmdsw:
- __load_cmds()
diff --git a/scripts/external_libs/dpkt-1.8.6/dpkt/aoeata.py b/scripts/external_libs/dpkt-1.8.6/dpkt/aoeata.py
deleted file mode 100644
index 67e2ca11..00000000
--- a/scripts/external_libs/dpkt-1.8.6/dpkt/aoeata.py
+++ /dev/null
@@ -1,34 +0,0 @@
-'''ATA over Ethernet ATA command'''
-
-import dpkt, aoe
-
-ATA_DEVICE_IDENTIFY = 0xec
-
-class AOEATA(dpkt.Packet):
- __hdr__ = (
- ('aflags', 'B', 0),
- ('errfeat', 'B', 0),
- ('scnt', 'B', 0),
- ('cmdstat', 'B', ATA_DEVICE_IDENTIFY),
- ('lba0', 'B', 0),
- ('lba1', 'B', 0),
- ('lba2', 'B', 0),
- ('lba3', 'B', 0),
- ('lba4', 'B', 0),
- ('lba5', 'B', 0),
- ('res', 'H', 0),
- )
-
- # XXX: in unpack, switch on ATA command like icmp does on type
-
-
-if __name__ == '__main__':
- import unittest
-
- class AOEATATestCase(unittest.TestCase):
- def test_AOEATA(self):
- s = '\x03\x0a\x6b\x19\x00\x00\x00\x00\x45\x00\x00\x28\x94\x1f\x00\x00\xe3\x06\x99\xb4\x23\x2b\x24\x00\xde\x8e\x84\x42\xab\xd1\x00\x50\x00\x35\xe1\x29\x20\xd9\x00\x00\x00\x22\x9b\xf0\xe2\x04\x65\x6b'
- aoeata = AOEATA(s)
- self.failUnless(str(aoeata) == s)
-
- unittest.main()
diff --git a/scripts/external_libs/dpkt-1.8.6/dpkt/aoecfg.py b/scripts/external_libs/dpkt-1.8.6/dpkt/aoecfg.py
deleted file mode 100644
index 00831504..00000000
--- a/scripts/external_libs/dpkt-1.8.6/dpkt/aoecfg.py
+++ /dev/null
@@ -1,24 +0,0 @@
-'''ATA over Ethernet ATA command'''
-
-import dpkt
-
-class AOECFG(dpkt.Packet):
- __hdr__ = (
- ('bufcnt', 'H', 0),
- ('fwver', 'H', 0),
- ('scnt', 'B', 0),
- ('aoeccmd', 'B', 0),
- ('cslen', 'H', 0),
- )
-
-
-if __name__ == '__main__':
- import unittest
-
- class AOECFGTestCase(unittest.TestCase):
- def test_AOECFG(self):
- s = '\x01\x02\x03\x04\x05\x06\x11\x12\x13\x14\x15\x16\x88\xa2\x10\x00\x00\x01\x02\x01\x80\x00\x00\x00\x12\x34\x00\x00\x00\x00\x04\x00' + '\0xed' * 1024
- aoecfg = AOECFG(s[14+10:])
- self.failUnless(aoecfg.bufcnt == 0x1234)
-
- unittest.main()
diff --git a/scripts/external_libs/dpkt-1.8.6/dpkt/arp.py b/scripts/external_libs/dpkt-1.8.6/dpkt/arp.py
deleted file mode 100644
index 6e742ee1..00000000
--- a/scripts/external_libs/dpkt-1.8.6/dpkt/arp.py
+++ /dev/null
@@ -1,31 +0,0 @@
-# $Id: arp.py 23 2006-11-08 15:45:33Z dugsong $
-
-"""Address Resolution Protocol."""
-
-import dpkt
-
-# Hardware address format
-ARP_HRD_ETH = 0x0001 # ethernet hardware
-ARP_HRD_IEEE802 = 0x0006 # IEEE 802 hardware
-
-# Protocol address format
-ARP_PRO_IP = 0x0800 # IP protocol
-
-# ARP operation
-ARP_OP_REQUEST = 1 # request to resolve ha given pa
-ARP_OP_REPLY = 2 # response giving hardware address
-ARP_OP_REVREQUEST = 3 # request to resolve pa given ha
-ARP_OP_REVREPLY = 4 # response giving protocol address
-
-class ARP(dpkt.Packet):
- __hdr__ = (
- ('hrd', 'H', ARP_HRD_ETH),
- ('pro', 'H', ARP_PRO_IP),
- ('hln', 'B', 6), # hardware address length
- ('pln', 'B', 4), # protocol address length
- ('op', 'H', ARP_OP_REQUEST),
- ('sha', '6s', ''),
- ('spa', '4s', ''),
- ('tha', '6s', ''),
- ('tpa', '4s', '')
- )
diff --git a/scripts/external_libs/dpkt-1.8.6/dpkt/asn1.py b/scripts/external_libs/dpkt-1.8.6/dpkt/asn1.py
deleted file mode 100644
index 9a088107..00000000
--- a/scripts/external_libs/dpkt-1.8.6/dpkt/asn1.py
+++ /dev/null
@@ -1,119 +0,0 @@
-# $Id: asn1.py 23 2006-11-08 15:45:33Z dugsong $
-
-"""Abstract Syntax Notation #1."""
-
-import struct, time
-import dpkt
-
-# Type class
-CLASSMASK = 0xc0
-UNIVERSAL = 0x00
-APPLICATION = 0x40
-CONTEXT = 0x80
-PRIVATE = 0xc0
-
-# Constructed (vs. primitive)
-CONSTRUCTED = 0x20
-
-# Universal-class tags
-TAGMASK = 0x1f
-INTEGER = 2
-BIT_STRING = 3 # arbitrary bit string
-OCTET_STRING = 4 # arbitrary octet string
-NULL = 5
-OID = 6 # object identifier
-SEQUENCE = 16 # ordered collection of types
-SET = 17 # unordered collection of types
-PRINT_STRING = 19 # printable string
-T61_STRING = 20 # T.61 (8-bit) character string
-IA5_STRING = 22 # ASCII
-UTC_TIME = 23
-
-def utctime(buf):
- """Convert ASN.1 UTCTime string to UTC float."""
- yy = int(buf[:2])
- mm = int(buf[2:4])
- dd = int(buf[4:6])
- hh = int(buf[6:8])
- mm = int(buf[8:10])
- try:
- ss = int(buf[10:12])
- buf = buf[12:]
- except TypeError:
- ss = 0
- buf = buf[10:]
- if buf[0] == '+':
- hh -= int(buf[1:3])
- mm -= int(buf[3:5])
- elif buf[0] == '-':
- hh += int(buf[1:3])
- mm += int(buf[3:5])
- return time.mktime((2000 + yy, mm, dd, hh, mm, ss, 0, 0, 0))
-
-def decode(buf):
- """Sleazy ASN.1 decoder.
- Return list of (id, value) tuples from ASN.1 BER/DER encoded buffer.
- """
- msg = []
- while buf:
- t = ord(buf[0])
- constructed = t & CONSTRUCTED
- tag = t & TAGMASK
- l = ord(buf[1])
- c = 0
- if constructed and l == 128:
- # XXX - constructed, indefinite length
- msg.append(t, decode(buf[2:]))
- elif l >= 128:
- c = l & 127
- if c == 1:
- l = ord(buf[2])
- elif c == 2:
- l = struct.unpack('>H', buf[2:4])[0]
- elif c == 3:
- l = struct.unpack('>I', buf[1:5])[0] & 0xfff
- c = 2
- elif c == 4:
- l = struct.unpack('>I', buf[2:6])[0]
- else:
- # XXX - can be up to 127 bytes, but...
- raise dpkt.UnpackError('excessive long-form ASN.1 length %d' % l)
-
- # Skip type, length
- buf = buf[2+c:]
-
- # Parse content
- if constructed:
- msg.append((t, decode(buf)))
- elif tag == INTEGER:
- if l == 0:
- n = 0
- elif l == 1:
- n = ord(buf[0])
- elif l == 2:
- n = struct.unpack('>H', buf[:2])[0]
- elif l == 3:
- n = struct.unpack('>I', buf[:4])[0] >> 8
- elif l == 4:
- n = struct.unpack('>I', buf[:4])[0]
- else:
- raise dpkt.UnpackError('excessive integer length > %d bytes' % l)
- msg.append((t, n))
- elif tag == UTC_TIME:
- msg.append((t, utctime(buf[:l])))
- else:
- msg.append((t, buf[:l]))
-
- # Skip content
- buf = buf[l:]
- return msg
-
-if __name__ == '__main__':
- import unittest
-
- class ASN1TestCase(unittest.TestCase):
- def test_asn1(self):
- s = '0\x82\x02Q\x02\x01\x0bc\x82\x02J\x04xcn=Douglas J Song 1, ou=Information Technology Division, ou=Faculty and Staff, ou=People, o=University of Michigan, c=US\n\x01\x00\n\x01\x03\x02\x01\x00\x02\x01\x00\x01\x01\x00\x87\x0bobjectclass0\x82\x01\xb0\x04\rmemberOfGroup\x04\x03acl\x04\x02cn\x04\x05title\x04\rpostalAddress\x04\x0ftelephoneNumber\x04\x04mail\x04\x06member\x04\thomePhone\x04\x11homePostalAddress\x04\x0bobjectClass\x04\x0bdescription\x04\x18facsimileTelephoneNumber\x04\x05pager\x04\x03uid\x04\x0cuserPassword\x04\x08joinable\x04\x10associatedDomain\x04\x05owner\x04\x0erfc822ErrorsTo\x04\x08ErrorsTo\x04\x10rfc822RequestsTo\x04\nRequestsTo\x04\tmoderator\x04\nlabeledURL\x04\nonVacation\x04\x0fvacationMessage\x04\x05drink\x04\x0elastModifiedBy\x04\x10lastModifiedTime\x04\rmodifiersname\x04\x0fmodifytimestamp\x04\x0ccreatorsname\x04\x0fcreatetimestamp'
- self.failUnless(decode(s) == [(48, [(2, 11), (99, [(4, 'cn=Douglas J Song 1, ou=Information Technology Division, ou=Faculty and Staff, ou=People, o=University of Michigan, c=US'), (10, '\x00'), (10, '\x03'), (2, 0), (2, 0), (1, '\x00'), (135, 'objectclass'), (48, [(4, 'memberOfGroup'), (4, 'acl'), (4, 'cn'), (4, 'title'), (4, 'postalAddress'), (4, 'telephoneNumber'), (4, 'mail'), (4, 'member'), (4, 'homePhone'), (4, 'homePostalAddress'), (4, 'objectClass'), (4, 'description'), (4, 'facsimileTelephoneNumber'), (4, 'pager'), (4, 'uid'), (4, 'userPassword'), (4, 'joinable'), (4, 'associatedDomain'), (4, 'owner'), (4, 'rfc822ErrorsTo'), (4, 'ErrorsTo'), (4, 'rfc822RequestsTo'), (4, 'RequestsTo'), (4, 'moderator'), (4, 'labeledURL'), (4, 'onVacation'), (4, 'vacationMessage'), (4, 'drink'), (4, 'lastModifiedBy'), (4, 'lastModifiedTime'), (4, 'modifiersname'), (4, 'modifytimestamp'), (4, 'creatorsname'), (4, 'createtimestamp')])])])])
-
- unittest.main()
diff --git a/scripts/external_libs/dpkt-1.8.6/dpkt/bgp.py b/scripts/external_libs/dpkt-1.8.6/dpkt/bgp.py
deleted file mode 100644
index b9fb26a0..00000000
--- a/scripts/external_libs/dpkt-1.8.6/dpkt/bgp.py
+++ /dev/null
@@ -1,760 +0,0 @@
-# $Id: bgp.py 76 2011-01-06 15:51:30Z dugsong $
-
-"""Border Gateway Protocol."""
-
-import dpkt
-import struct, socket
-
-# Border Gateway Protocol 4 - RFC 4271
-# Communities Attribute - RFC 1997
-# Capabilities - RFC 3392
-# Route Refresh - RFC 2918
-# Route Reflection - RFC 4456
-# Confederations - RFC 3065
-# Cease Subcodes - RFC 4486
-# NOPEER Community - RFC 3765
-# Multiprotocol Extensions - 2858
-
-# Message Types
-OPEN = 1
-UPDATE = 2
-NOTIFICATION = 3
-KEEPALIVE = 4
-ROUTE_REFRESH = 5
-
-# Attribute Types
-ORIGIN = 1
-AS_PATH = 2
-NEXT_HOP = 3
-MULTI_EXIT_DISC = 4
-LOCAL_PREF = 5
-ATOMIC_AGGREGATE = 6
-AGGREGATOR = 7
-COMMUNITIES = 8
-ORIGINATOR_ID = 9
-CLUSTER_LIST = 10
-MP_REACH_NLRI = 14
-MP_UNREACH_NLRI = 15
-
-# Origin Types
-ORIGIN_IGP = 0
-ORIGIN_EGP = 1
-INCOMPLETE = 2
-
-# AS Path Types
-AS_SET = 1
-AS_SEQUENCE = 2
-AS_CONFED_SEQUENCE = 3
-AS_CONFED_SET = 4
-
-# Reserved Communities Types
-NO_EXPORT = 0xffffff01L
-NO_ADVERTISE = 0xffffff02L
-NO_EXPORT_SUBCONFED = 0xffffff03L
-NO_PEER = 0xffffff04L
-
-# Common AFI types
-AFI_IPV4 = 1
-AFI_IPV6 = 2
-
-# Multiprotocol SAFI types
-SAFI_UNICAST = 1
-SAFI_MULTICAST = 2
-SAFI_UNICAST_MULTICAST = 3
-
-# OPEN Message Optional Parameters
-AUTHENTICATION = 1
-CAPABILITY = 2
-
-# Capability Types
-CAP_MULTIPROTOCOL = 1
-CAP_ROUTE_REFRESH = 2
-
-# NOTIFICATION Error Codes
-MESSAGE_HEADER_ERROR = 1
-OPEN_MESSAGE_ERROR = 2
-UPDATE_MESSAGE_ERROR = 3
-HOLD_TIMER_EXPIRED = 4
-FSM_ERROR = 5
-CEASE = 6
-
-# Message Header Error Subcodes
-CONNECTION_NOT_SYNCHRONIZED = 1
-BAD_MESSAGE_LENGTH = 2
-BAD_MESSAGE_TYPE = 3
-
-# OPEN Message Error Subcodes
-UNSUPPORTED_VERSION_NUMBER = 1
-BAD_PEER_AS = 2
-BAD_BGP_IDENTIFIER = 3
-UNSUPPORTED_OPTIONAL_PARAMETER = 4
-AUTHENTICATION_FAILURE = 5
-UNACCEPTABLE_HOLD_TIME = 6
-UNSUPPORTED_CAPABILITY = 7
-
-# UPDATE Message Error Subcodes
-MALFORMED_ATTRIBUTE_LIST = 1
-UNRECOGNIZED_ATTRIBUTE = 2
-MISSING_ATTRIBUTE = 3
-ATTRIBUTE_FLAGS_ERROR = 4
-ATTRIBUTE_LENGTH_ERROR = 5
-INVALID_ORIGIN_ATTRIBUTE = 6
-AS_ROUTING_LOOP = 7
-INVALID_NEXT_HOP_ATTRIBUTE = 8
-OPTIONAL_ATTRIBUTE_ERROR = 9
-INVALID_NETWORK_FIELD = 10
-MALFORMED_AS_PATH = 11
-
-# Cease Error Subcodes
-MAX_NUMBER_OF_PREFIXES_REACHED = 1
-ADMINISTRATIVE_SHUTDOWN = 2
-PEER_DECONFIGURED = 3
-ADMINISTRATIVE_RESET = 4
-CONNECTION_REJECTED = 5
-OTHER_CONFIGURATION_CHANGE = 6
-CONNECTION_COLLISION_RESOLUTION = 7
-OUT_OF_RESOURCES = 8
-
-
-class BGP(dpkt.Packet):
- __hdr__ = (
- ('marker', '16s', '\xff' * 16),
- ('len', 'H', 0),
- ('type', 'B', OPEN)
- )
-
- def unpack(self, buf):
- dpkt.Packet.unpack(self, buf)
- self.data = self.data[:self.len - self.__hdr_len__]
- if self.type == OPEN:
- self.data = self.open = self.Open(self.data)
- elif self.type == UPDATE:
- self.data = self.update = self.Update(self.data)
- elif self.type == NOTIFICATION:
- self.data = self.notifiation = self.Notification(self.data)
- elif self.type == KEEPALIVE:
- self.data = self.keepalive = self.Keepalive(self.data)
- elif self.type == ROUTE_REFRESH:
- self.data = self.route_refresh = self.RouteRefresh(self.data)
-
- class Open(dpkt.Packet):
- __hdr__ = (
- ('v', 'B', 4),
- ('asn', 'H', 0),
- ('holdtime', 'H', 0),
- ('identifier', 'I', 0),
- ('param_len', 'B', 0)
- )
- __hdr_defaults__ = {
- 'parameters': []
- }
-
- def unpack(self, buf):
- dpkt.Packet.unpack(self, buf)
- l = []
- plen = self.param_len
- while plen > 0:
- param = self.Parameter(self.data)
- self.data = self.data[len(param):]
- plen -= len(param)
- l.append(param)
- self.data = self.parameters = l
-
- def __len__(self):
- return self.__hdr_len__ + \
- sum(map(len, self.parameters))
-
- def __str__(self):
- params = ''.join(map(str, self.parameters))
- self.param_len = len(params)
- return self.pack_hdr() + params
-
- class Parameter(dpkt.Packet):
- __hdr__ = (
- ('type', 'B', 0),
- ('len', 'B', 0)
- )
-
- def unpack(self, buf):
- dpkt.Packet.unpack(self, buf)
- self.data = self.data[:self.len]
-
- if self.type == AUTHENTICATION:
- self.data = self.authentication = self.Authentication(self.data)
- elif self.type == CAPABILITY:
- self.data = self.capability = self.Capability(self.data)
-
- class Authentication(dpkt.Packet):
- __hdr__ = (
- ('code', 'B', 0),
- )
-
- class Capability(dpkt.Packet):
- __hdr__ = (
- ('code', 'B', 0),
- ('len', 'B', 0)
- )
-
- def unpack(self, buf):
- dpkt.Packet.unpack(self, buf)
- self.data = self.data[:self.len]
-
-
- class Update(dpkt.Packet):
- __hdr_defaults__ = {
- 'withdrawn': [],
- 'attributes': [],
- 'announced': []
- }
-
- def unpack(self, buf):
- self.data = buf
-
- # Withdrawn Routes
- wlen = struct.unpack('>H', self.data[:2])[0]
- self.data = self.data[2:]
- l = []
- while wlen > 0:
- route = RouteIPV4(self.data)
- self.data = self.data[len(route):]
- wlen -= len(route)
- l.append(route)
- self.withdrawn = l
-
- # Path Attributes
- plen = struct.unpack('>H', self.data[:2])[0]
- self.data = self.data[2:]
- l = []
- while plen > 0:
- attr = self.Attribute(self.data)
- self.data = self.data[len(attr):]
- plen -= len(attr)
- l.append(attr)
- self.attributes = l
-
- # Announced Routes
- l = []
- while self.data:
- route = RouteIPV4(self.data)
- self.data = self.data[len(route):]
- l.append(route)
- self.announced = l
-
- def __len__(self):
- return 2 + sum(map(len, self.withdrawn)) + \
- 2 + sum(map(len, self.attributes)) + \
- sum(map(len, self.announced))
-
- def __str__(self):
- return struct.pack('>H', sum(map(len, self.withdrawn))) + \
- ''.join(map(str, self.withdrawn)) + \
- struct.pack('>H', sum(map(len, self.attributes))) + \
- ''.join(map(str, self.attributes)) + \
- ''.join(map(str, self.announced))
-
- class Attribute(dpkt.Packet):
- __hdr__ = (
- ('flags', 'B', 0),
- ('type', 'B', 0)
- )
-
- def _get_o(self):
- return (self.flags >> 7) & 0x1
- def _set_o(self, o):
- self.flags = (self.flags & ~0x80) | ((o & 0x1) << 7)
- optional = property(_get_o, _set_o)
-
- def _get_t(self):
- return (self.flags >> 6) & 0x1
- def _set_t(self, t):
- self.flags = (self.flags & ~0x40) | ((t & 0x1) << 6)
- transitive = property(_get_t, _set_t)
-
- def _get_p(self):
- return (self.flags >> 5) & 0x1
- def _set_p(self, p):
- self.flags = (self.flags & ~0x20) | ((p & 0x1) << 5)
- partial = property(_get_p, _set_p)
-
- def _get_e(self):
- return (self.flags >> 4) & 0x1
- def _set_e(self, e):
- self.flags = (self.flags & ~0x10) | ((e & 0x1) << 4)
- extended_length = property(_get_e, _set_e)
-
- def unpack(self, buf):
- dpkt.Packet.unpack(self, buf)
-
- if self.extended_length:
- self.len = struct.unpack('>H', self.data[:2])[0]
- self.data = self.data[2:]
- else:
- self.len = struct.unpack('B', self.data[:1])[0]
- self.data = self.data[1:]
-
- self.data = self.data[:self.len]
-
- if self.type == ORIGIN:
- self.data = self.origin = self.Origin(self.data)
- elif self.type == AS_PATH:
- self.data = self.as_path = self.ASPath(self.data)
- elif self.type == NEXT_HOP:
- self.data = self.next_hop = self.NextHop(self.data)
- elif self.type == MULTI_EXIT_DISC:
- self.data = self.multi_exit_disc = self.MultiExitDisc(self.data)
- elif self.type == LOCAL_PREF:
- self.data = self.local_pref = self.LocalPref(self.data)
- elif self.type == ATOMIC_AGGREGATE:
- self.data = self.atomic_aggregate = self.AtomicAggregate(self.data)
- elif self.type == AGGREGATOR:
- self.data = self.aggregator = self.Aggregator(self.data)
- elif self.type == COMMUNITIES:
- self.data = self.communities = self.Communities(self.data)
- elif self.type == ORIGINATOR_ID:
- self.data = self.originator_id = self.OriginatorID(self.data)
- elif self.type == CLUSTER_LIST:
- self.data = self.cluster_list = self.ClusterList(self.data)
- elif self.type == MP_REACH_NLRI:
- self.data = self.mp_reach_nlri = self.MPReachNLRI(self.data)
- elif self.type == MP_UNREACH_NLRI:
- self.data = self.mp_unreach_nlri = self.MPUnreachNLRI(self.data)
-
- def __len__(self):
- if self.extended_length:
- attr_len = 2
- else:
- attr_len = 1
- return self.__hdr_len__ + \
- attr_len + \
- len(self.data)
-
- def __str__(self):
- if self.extended_length:
- attr_len_str = struct.pack('>H', self.len)
- else:
- attr_len_str = struct.pack('B', self.len)
- return self.pack_hdr() + \
- attr_len_str + \
- str(self.data)
-
- class Origin(dpkt.Packet):
- __hdr__ = (
- ('type', 'B', ORIGIN_IGP),
- )
-
- class ASPath(dpkt.Packet):
- __hdr_defaults__ = {
- 'segments': []
- }
-
- def unpack(self, buf):
- self.data = buf
- l = []
- while self.data:
- seg = self.ASPathSegment(self.data)
- self.data = self.data[len(seg):]
- l.append(seg)
- self.data = self.segments = l
-
- def __len__(self):
- return sum(map(len, self.data))
-
- def __str__(self):
- return ''.join(map(str, self.data))
-
- class ASPathSegment(dpkt.Packet):
- __hdr__ = (
- ('type', 'B', 0),
- ('len', 'B', 0)
- )
-
- def unpack(self, buf):
- dpkt.Packet.unpack(self, buf)
- l = []
- for i in range(self.len):
- AS = struct.unpack('>H', self.data[:2])[0]
- self.data = self.data[2:]
- l.append(AS)
- self.data = self.path = l
-
- def __len__(self):
- return self.__hdr_len__ + \
- 2 * len(self.path)
-
- def __str__(self):
- as_str = ''
- for AS in self.path:
- as_str += struct.pack('>H', AS)
- return self.pack_hdr() + \
- as_str
-
- class NextHop(dpkt.Packet):
- __hdr__ = (
- ('ip', 'I', 0),
- )
-
- class MultiExitDisc(dpkt.Packet):
- __hdr__ = (
- ('value', 'I', 0),
- )
-
- class LocalPref(dpkt.Packet):
- __hdr__ = (
- ('value', 'I', 0),
- )
-
- class AtomicAggregate(dpkt.Packet):
- def unpack(self, buf):
- pass
-
- def __len__(self):
- return 0
-
- def __str__(self):
- return ''
-
- class Aggregator(dpkt.Packet):
- __hdr__ = (
- ('asn', 'H', 0),
- ('ip', 'I', 0)
- )
-
- class Communities(dpkt.Packet):
- __hdr_defaults__ = {
- 'list': []
- }
-
- def unpack(self, buf):
- self.data = buf
- l = []
- while self.data:
- val = struct.unpack('>I', self.data[:4])[0]
- if (val >= 0x00000000L and val <= 0x0000ffffL) or \
- (val >= 0xffff0000L and val <= 0xffffffffL):
- comm = self.ReservedCommunity(self.data[:4])
- else:
- comm = self.Community(self.data[:4])
- self.data = self.data[len(comm):]
- l.append(comm)
- self.data = self.list = l
-
- def __len__(self):
- return sum(map(len, self.data))
-
- def __str__(self):
- return ''.join(map(str, self.data))
-
- class Community(dpkt.Packet):
- __hdr__ = (
- ('asn', 'H', 0),
- ('value', 'H', 0)
- )
-
- class ReservedCommunity(dpkt.Packet):
- __hdr__ = (
- ('value', 'I', 0),
- )
-
- class OriginatorID(dpkt.Packet):
- __hdr__ = (
- ('value', 'I', 0),
- )
-
- class ClusterList(dpkt.Packet):
- __hdr_defaults__ = {
- 'list': []
- }
-
- def unpack(self, buf):
- self.data = buf
- l = []
- while self.data:
- id = struct.unpack('>I', self.data[:4])[0]
- self.data = self.data[4:]
- l.append(id)
- self.data = self.list = l
-
- def __len__(self):
- return 4 * len(self.list)
-
- def __str__(self):
- cluster_str = ''
- for val in self.list:
- cluster_str += struct.pack('>I', val)
- return cluster_str
-
- class MPReachNLRI(dpkt.Packet):
- __hdr__ = (
- ('afi', 'H', AFI_IPV4),
- ('safi', 'B', SAFI_UNICAST),
- )
-
- def unpack(self, buf):
- dpkt.Packet.unpack(self, buf)
-
- # Next Hop
- nlen = struct.unpack('B', self.data[:1])[0]
- self.data = self.data[1:]
- self.next_hop = self.data[:nlen]
- self.data = self.data[nlen:]
-
- # SNPAs
- l = []
- num_snpas = struct.unpack('B', self.data[:1])[0]
- self.data = self.data[1:]
- for i in range(num_snpas):
- snpa = self.SNPA(self.data)
- self.data = self.data[len(snpa):]
- l.append(snpa)
- self.snpas = l
-
- if self.afi == AFI_IPV4:
- Route = RouteIPV4
- elif self.afi == AFI_IPV6:
- Route = RouteIPV6
- else:
- Route = RouteGeneric
-
- # Announced Routes
- l = []
- while self.data:
- route = Route(self.data)
- self.data = self.data[len(route):]
- l.append(route)
- self.data = self.announced = l
-
- def __len__(self):
- return self.__hdr_len__ + \
- 1 + len(self.next_hop) + \
- 1 + sum(map(len, self.snpas)) + \
- sum(map(len, self.announced))
-
- def __str__(self):
- return self.pack_hdr() + \
- struct.pack('B', len(self.next_hop)) + \
- str(self.next_hop) + \
- struct.pack('B', len(self.snpas)) + \
- ''.join(map(str, self.snpas)) + \
- ''.join(map(str, self.announced))
-
- class SNPA:
- __hdr__ = (
- ('len', 'B', 0),
- )
-
- def unpack(self, buf):
- dpkt.Packet.unpack(self, buf)
- self.data = self.data[:(self.len + 1) / 2]
-
- class MPUnreachNLRI(dpkt.Packet):
- __hdr__ = (
- ('afi', 'H', AFI_IPV4),
- ('safi', 'B', SAFI_UNICAST),
- )
-
- def unpack(self, buf):
- dpkt.Packet.unpack(self, buf)
-
- if self.afi == AFI_IPV4:
- Route = RouteIPV4
- elif self.afi == AFI_IPV6:
- Route = RouteIPV6
- else:
- Route = RouteGeneric
-
- # Withdrawn Routes
- l = []
- while self.data:
- route = Route(self.data)
- self.data = self.data[len(route):]
- l.append(route)
- self.data = self.withdrawn = l
-
- def __len__(self):
- return self.__hdr_len__ + \
- sum(map(len, self.data))
-
- def __str__(self):
- return self.pack_hdr() + \
- ''.join(map(str, self.data))
-
-
- class Notification(dpkt.Packet):
- __hdr__ = (
- ('code', 'B', 0),
- ('subcode', 'B', 0),
- )
-
- def unpack(self, buf):
- dpkt.Packet.unpack(self, buf)
- self.error = self.data
-
-
- class Keepalive(dpkt.Packet):
- def unpack(self, buf):
- pass
-
- def __len__(self):
- return 0
-
- def __str__(self):
- return ''
-
-
- class RouteRefresh(dpkt.Packet):
- __hdr__ = (
- ('afi', 'H', AFI_IPV4),
- ('rsvd', 'B', 0),
- ('safi', 'B', SAFI_UNICAST)
- )
-
-
-class RouteGeneric(dpkt.Packet):
- __hdr__ = (
- ('len', 'B', 0),
- )
-
- def unpack(self, buf):
- dpkt.Packet.unpack(self, buf)
- self.data = self.prefix = self.data[:(self.len + 7) / 8]
-
-class RouteIPV4(dpkt.Packet):
- __hdr__ = (
- ('len', 'B', 0),
- )
-
- def unpack(self, buf):
- dpkt.Packet.unpack(self, buf)
- tmp = self.data[:(self.len + 7) / 8]
- tmp += (4 - len(tmp)) * '\x00'
- self.data = self.prefix = tmp
-
- def __repr__(self):
- cidr = '%s/%d' % (socket.inet_ntoa(self.prefix), self.len)
- return '%s(%s)' % (self.__class__.__name__, cidr)
-
- def __len__(self):
- return self.__hdr_len__ + \
- (self.len + 7) / 8
-
- def __str__(self):
- return self.pack_hdr() + \
- self.prefix[:(self.len + 7) / 8]
-
-class RouteIPV6(dpkt.Packet):
- __hdr__ = (
- ('len', 'B', 0),
- )
-
- def unpack(self, buf):
- dpkt.Packet.unpack(self, buf)
- tmp = self.data[:(self.len + 7) / 8]
- tmp += (16 - len(tmp)) * '\x00'
- self.data = self.prefix = tmp
-
- def __len__(self):
- return self.__hdr_len__ + \
- (self.len + 7) / 8
-
- def __str__(self):
- return self.pack_hdr() + \
- self.prefix[:(self.len + 7) / 8]
-
-
-if __name__ == '__main__':
- import unittest
-
- class BGPTestCase(unittest.TestCase):
- def testPack(self):
- b1 = BGP(self.bgp1)
- self.failUnless(self.bgp1 == str(b1))
- b2 = BGP(self.bgp2)
- self.failUnless(self.bgp2 == str(b2))
- b3 = BGP(self.bgp3)
- self.failUnless(self.bgp3 == str(b3))
- b4 = BGP(self.bgp4)
- self.failUnless(self.bgp4 == str(b4))
-
- def testUnpack(self):
- b1 = BGP(self.bgp1)
- self.failUnless(b1.len == 19)
- self.failUnless(b1.type == KEEPALIVE)
- self.failUnless(b1.keepalive is not None)
-
- b2 = BGP(self.bgp2)
- self.failUnless(b2.type == UPDATE)
- self.failUnless(len(b2.update.withdrawn) == 0)
- self.failUnless(len(b2.update.announced) == 1)
- self.failUnless(len(b2.update.attributes) == 9)
- a = b2.update.attributes[1]
- self.failUnless(a.type == AS_PATH)
- self.failUnless(a.len == 10)
- self.failUnless(len(a.as_path.segments) == 2)
- s = a.as_path.segments[0]
- self.failUnless(s.type == AS_SET)
- self.failUnless(s.len == 2)
- self.failUnless(len(s.path) == 2)
- self.failUnless(s.path[0] == 500)
-
- a = b2.update.attributes[6]
- self.failUnless(a.type == COMMUNITIES)
- self.failUnless(a.len == 12)
- self.failUnless(len(a.communities.list) == 3)
- c = a.communities.list[0]
- self.failUnless(c.asn == 65215)
- self.failUnless(c.value == 1)
- r = b2.update.announced[0]
- self.failUnless(r.len == 22)
- self.failUnless(r.prefix == '\xc0\xa8\x04\x00')
-
- b3 = BGP(self.bgp3)
- self.failUnless(b3.type == UPDATE)
- self.failUnless(len(b3.update.withdrawn) == 0)
- self.failUnless(len(b3.update.announced) == 0)
- self.failUnless(len(b3.update.attributes) == 6)
- a = b3.update.attributes[0]
- self.failUnless(a.optional == False)
- self.failUnless(a.transitive == True)
- self.failUnless(a.partial == False)
- self.failUnless(a.extended_length == False)
- self.failUnless(a.type == ORIGIN)
- self.failUnless(a.len == 1)
- o = a.origin
- self.failUnless(o.type == ORIGIN_IGP)
- a = b3.update.attributes[5]
- self.failUnless(a.optional == True)
- self.failUnless(a.transitive == False)
- self.failUnless(a.partial == False)
- self.failUnless(a.extended_length == True)
- self.failUnless(a.type == MP_REACH_NLRI)
- self.failUnless(a.len == 30)
- m = a.mp_reach_nlri
- self.failUnless(m.afi == AFI_IPV4)
- self.failUnless(len(m.snpas) == 0)
- self.failUnless(len(m.announced) == 1)
- p = m.announced[0]
- self.failUnless(p.len == 96)
-
- b4 = BGP(self.bgp4)
- self.failUnless(b4.len == 45)
- self.failUnless(b4.type == OPEN)
- self.failUnless(b4.open.asn == 237)
- self.failUnless(b4.open.param_len == 16)
- self.failUnless(len(b4.open.parameters) == 3)
- p = b4.open.parameters[0]
- self.failUnless(p.type == CAPABILITY)
- self.failUnless(p.len == 6)
- c = p.capability
- self.failUnless(c.code == CAP_MULTIPROTOCOL)
- self.failUnless(c.len == 4)
- self.failUnless(c.data == '\x00\x01\x00\x01')
- c = b4.open.parameters[2].capability
- self.failUnless(c.code == CAP_ROUTE_REFRESH)
- self.failUnless(c.len == 0)
-
- bgp1 = '\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\x00\x13\x04'
- bgp2 = '\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\x00\x63\x02\x00\x00\x00\x48\x40\x01\x01\x00\x40\x02\x0a\x01\x02\x01\xf4\x01\xf4\x02\x01\xfe\xbb\x40\x03\x04\xc0\xa8\x00\x0f\x40\x05\x04\x00\x00\x00\x64\x40\x06\x00\xc0\x07\x06\xfe\xba\xc0\xa8\x00\x0a\xc0\x08\x0c\xfe\xbf\x00\x01\x03\x16\x00\x04\x01\x54\x00\xfa\x80\x09\x04\xc0\xa8\x00\x0f\x80\x0a\x04\xc0\xa8\x00\xfa\x16\xc0\xa8\x04'
- bgp3 = '\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\x00\x79\x02\x00\x00\x00\x62\x40\x01\x01\x00\x40\x02\x00\x40\x05\x04\x00\x00\x00\x64\xc0\x10\x08\x00\x02\x01\x2c\x00\x00\x01\x2c\xc0\x80\x24\x00\x00\xfd\xe9\x40\x01\x01\x00\x40\x02\x04\x02\x01\x15\xb3\x40\x05\x04\x00\x00\x00\x2c\x80\x09\x04\x16\x05\x05\x05\x80\x0a\x04\x16\x05\x05\x05\x90\x0e\x00\x1e\x00\x01\x80\x0c\x00\x00\x00\x00\x00\x00\x00\x00\x0c\x04\x04\x04\x00\x60\x18\x77\x01\x00\x00\x01\xf4\x00\x00\x01\xf4\x85'
- bgp4 = '\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\x00\x2d\x01\x04\x00\xed\x00\x5a\xc6\x6e\x83\x7d\x10\x02\x06\x01\x04\x00\x01\x00\x01\x02\x02\x80\x00\x02\x02\x02\x00'
-
- unittest.main()
diff --git a/scripts/external_libs/dpkt-1.8.6/dpkt/cdp.py b/scripts/external_libs/dpkt-1.8.6/dpkt/cdp.py
deleted file mode 100644
index 71c2c6ba..00000000
--- a/scripts/external_libs/dpkt-1.8.6/dpkt/cdp.py
+++ /dev/null
@@ -1,95 +0,0 @@
-# $Id: cdp.py 23 2006-11-08 15:45:33Z dugsong $
-
-"""Cisco Discovery Protocol."""
-
-import struct
-import dpkt
-
-CDP_DEVID = 1 # string
-CDP_ADDRESS = 2
-CDP_PORTID = 3 # string
-CDP_CAPABILITIES = 4 # 32-bit bitmask
-CDP_VERSION = 5 # string
-CDP_PLATFORM = 6 # string
-CDP_IPPREFIX = 7
-
-CDP_VTP_MGMT_DOMAIN = 9 # string
-CDP_NATIVE_VLAN = 10 # 16-bit integer
-CDP_DUPLEX = 11 # 8-bit boolean
-CDP_TRUST_BITMAP = 18 # 8-bit bitmask0x13
-CDP_UNTRUST_COS = 19 # 8-bit port
-CDP_SYSTEM_NAME = 20 # string
-CDP_SYSTEM_OID = 21 # 10-byte binary string
-CDP_MGMT_ADDRESS = 22 # 32-bit number of addrs, Addresses
-CDP_LOCATION = 23 # string
-
-class CDP(dpkt.Packet):
- __hdr__ = (
- ('version', 'B', 2),
- ('ttl', 'B', 180),
- ('sum', 'H', 0)
- )
- class Address(dpkt.Packet):
- # XXX - only handle NLPID/IP for now
- __hdr__ = (
- ('ptype', 'B', 1), # protocol type (NLPID)
- ('plen', 'B', 1), # protocol length
- ('p', 'B', 0xcc), # IP
- ('alen', 'H', 4) # address length
- )
- def unpack(self, buf):
- dpkt.Packet.unpack(self, buf)
- self.data = self.data[:self.alen]
-
- class TLV(dpkt.Packet):
- __hdr__ = (
- ('type', 'H', 0),
- ('len', 'H', 4)
- )
- def unpack(self, buf):
- dpkt.Packet.unpack(self, buf)
- self.data = self.data[:self.len - 4]
- if self.type == CDP_ADDRESS:
- n = struct.unpack('>I', self.data[:4])[0]
- buf = self.data[4:]
- l = []
- for i in range(n):
- a = CDP.Address(buf)
- l.append(a)
- buf = buf[len(a):]
- self.data = l
-
- def __len__(self):
- if self.type == CDP_ADDRESS:
- n = 4 + sum(map(len, self.data))
- else:
- n = len(self.data)
- return self.__hdr_len__ + n
-
- def __str__(self):
- self.len = len(self)
- if self.type == CDP_ADDRESS:
- s = struct.pack('>I', len(self.data)) + \
- ''.join(map(str, self.data))
- else:
- s = self.data
- return self.pack_hdr() + s
-
- def unpack(self, buf):
- dpkt.Packet.unpack(self, buf)
- buf = self.data
- l = []
- while buf:
- tlv = self.TLV(buf)
- l.append(tlv)
- buf = buf[len(tlv):]
- self.data = l
-
- def __len__(self):
- return self.__hdr_len__ + sum(map(len, self.data))
-
- def __str__(self):
- data = ''.join(map(str, self.data))
- if not self.sum:
- self.sum = dpkt.in_cksum(self.pack_hdr() + data)
- return self.pack_hdr() + data
diff --git a/scripts/external_libs/dpkt-1.8.6/dpkt/crc32c.py b/scripts/external_libs/dpkt-1.8.6/dpkt/crc32c.py
deleted file mode 100644
index 45593882..00000000
--- a/scripts/external_libs/dpkt-1.8.6/dpkt/crc32c.py
+++ /dev/null
@@ -1,80 +0,0 @@
-# $Id: crc32c.py 23 2006-11-08 15:45:33Z dugsong $
-
-import array
-
-# CRC-32C Checksum
-# http://tools.ietf.org/html/rfc3309
-
-crc32c_table = (
- 0x00000000L, 0xF26B8303L, 0xE13B70F7L, 0x1350F3F4L, 0xC79A971FL,
- 0x35F1141CL, 0x26A1E7E8L, 0xD4CA64EBL, 0x8AD958CFL, 0x78B2DBCCL,
- 0x6BE22838L, 0x9989AB3BL, 0x4D43CFD0L, 0xBF284CD3L, 0xAC78BF27L,
- 0x5E133C24L, 0x105EC76FL, 0xE235446CL, 0xF165B798L, 0x030E349BL,
- 0xD7C45070L, 0x25AFD373L, 0x36FF2087L, 0xC494A384L, 0x9A879FA0L,
- 0x68EC1CA3L, 0x7BBCEF57L, 0x89D76C54L, 0x5D1D08BFL, 0xAF768BBCL,
- 0xBC267848L, 0x4E4DFB4BL, 0x20BD8EDEL, 0xD2D60DDDL, 0xC186FE29L,
- 0x33ED7D2AL, 0xE72719C1L, 0x154C9AC2L, 0x061C6936L, 0xF477EA35L,
- 0xAA64D611L, 0x580F5512L, 0x4B5FA6E6L, 0xB93425E5L, 0x6DFE410EL,
- 0x9F95C20DL, 0x8CC531F9L, 0x7EAEB2FAL, 0x30E349B1L, 0xC288CAB2L,
- 0xD1D83946L, 0x23B3BA45L, 0xF779DEAEL, 0x05125DADL, 0x1642AE59L,
- 0xE4292D5AL, 0xBA3A117EL, 0x4851927DL, 0x5B016189L, 0xA96AE28AL,
- 0x7DA08661L, 0x8FCB0562L, 0x9C9BF696L, 0x6EF07595L, 0x417B1DBCL,
- 0xB3109EBFL, 0xA0406D4BL, 0x522BEE48L, 0x86E18AA3L, 0x748A09A0L,
- 0x67DAFA54L, 0x95B17957L, 0xCBA24573L, 0x39C9C670L, 0x2A993584L,
- 0xD8F2B687L, 0x0C38D26CL, 0xFE53516FL, 0xED03A29BL, 0x1F682198L,
- 0x5125DAD3L, 0xA34E59D0L, 0xB01EAA24L, 0x42752927L, 0x96BF4DCCL,
- 0x64D4CECFL, 0x77843D3BL, 0x85EFBE38L, 0xDBFC821CL, 0x2997011FL,
- 0x3AC7F2EBL, 0xC8AC71E8L, 0x1C661503L, 0xEE0D9600L, 0xFD5D65F4L,
- 0x0F36E6F7L, 0x61C69362L, 0x93AD1061L, 0x80FDE395L, 0x72966096L,
- 0xA65C047DL, 0x5437877EL, 0x4767748AL, 0xB50CF789L, 0xEB1FCBADL,
- 0x197448AEL, 0x0A24BB5AL, 0xF84F3859L, 0x2C855CB2L, 0xDEEEDFB1L,
- 0xCDBE2C45L, 0x3FD5AF46L, 0x7198540DL, 0x83F3D70EL, 0x90A324FAL,
- 0x62C8A7F9L, 0xB602C312L, 0x44694011L, 0x5739B3E5L, 0xA55230E6L,
- 0xFB410CC2L, 0x092A8FC1L, 0x1A7A7C35L, 0xE811FF36L, 0x3CDB9BDDL,
- 0xCEB018DEL, 0xDDE0EB2AL, 0x2F8B6829L, 0x82F63B78L, 0x709DB87BL,
- 0x63CD4B8FL, 0x91A6C88CL, 0x456CAC67L, 0xB7072F64L, 0xA457DC90L,
- 0x563C5F93L, 0x082F63B7L, 0xFA44E0B4L, 0xE9141340L, 0x1B7F9043L,
- 0xCFB5F4A8L, 0x3DDE77ABL, 0x2E8E845FL, 0xDCE5075CL, 0x92A8FC17L,
- 0x60C37F14L, 0x73938CE0L, 0x81F80FE3L, 0x55326B08L, 0xA759E80BL,
- 0xB4091BFFL, 0x466298FCL, 0x1871A4D8L, 0xEA1A27DBL, 0xF94AD42FL,
- 0x0B21572CL, 0xDFEB33C7L, 0x2D80B0C4L, 0x3ED04330L, 0xCCBBC033L,
- 0xA24BB5A6L, 0x502036A5L, 0x4370C551L, 0xB11B4652L, 0x65D122B9L,
- 0x97BAA1BAL, 0x84EA524EL, 0x7681D14DL, 0x2892ED69L, 0xDAF96E6AL,
- 0xC9A99D9EL, 0x3BC21E9DL, 0xEF087A76L, 0x1D63F975L, 0x0E330A81L,
- 0xFC588982L, 0xB21572C9L, 0x407EF1CAL, 0x532E023EL, 0xA145813DL,
- 0x758FE5D6L, 0x87E466D5L, 0x94B49521L, 0x66DF1622L, 0x38CC2A06L,
- 0xCAA7A905L, 0xD9F75AF1L, 0x2B9CD9F2L, 0xFF56BD19L, 0x0D3D3E1AL,
- 0x1E6DCDEEL, 0xEC064EEDL, 0xC38D26C4L, 0x31E6A5C7L, 0x22B65633L,
- 0xD0DDD530L, 0x0417B1DBL, 0xF67C32D8L, 0xE52CC12CL, 0x1747422FL,
- 0x49547E0BL, 0xBB3FFD08L, 0xA86F0EFCL, 0x5A048DFFL, 0x8ECEE914L,
- 0x7CA56A17L, 0x6FF599E3L, 0x9D9E1AE0L, 0xD3D3E1ABL, 0x21B862A8L,
- 0x32E8915CL, 0xC083125FL, 0x144976B4L, 0xE622F5B7L, 0xF5720643L,
- 0x07198540L, 0x590AB964L, 0xAB613A67L, 0xB831C993L, 0x4A5A4A90L,
- 0x9E902E7BL, 0x6CFBAD78L, 0x7FAB5E8CL, 0x8DC0DD8FL, 0xE330A81AL,
- 0x115B2B19L, 0x020BD8EDL, 0xF0605BEEL, 0x24AA3F05L, 0xD6C1BC06L,
- 0xC5914FF2L, 0x37FACCF1L, 0x69E9F0D5L, 0x9B8273D6L, 0x88D28022L,
- 0x7AB90321L, 0xAE7367CAL, 0x5C18E4C9L, 0x4F48173DL, 0xBD23943EL,
- 0xF36E6F75L, 0x0105EC76L, 0x12551F82L, 0xE03E9C81L, 0x34F4F86AL,
- 0xC69F7B69L, 0xD5CF889DL, 0x27A40B9EL, 0x79B737BAL, 0x8BDCB4B9L,
- 0x988C474DL, 0x6AE7C44EL, 0xBE2DA0A5L, 0x4C4623A6L, 0x5F16D052L,
- 0xAD7D5351L
- )
-
-def add(crc, buf):
- buf = array.array('B', buf)
- for b in buf:
- crc = (crc >> 8) ^ crc32c_table[(crc ^ b) & 0xff]
- return crc
-
-def done(crc):
- tmp = ~crc & 0xffffffffL
- b0 = tmp & 0xff
- b1 = (tmp >> 8) & 0xff
- b2 = (tmp >> 16) & 0xff
- b3 = (tmp >> 24) & 0xff
- crc = (b0 << 24) | (b1 << 16) | (b2 << 8) | b3
- return crc
-
-def cksum(buf):
- """Return computed CRC-32c checksum."""
- return done(add(0xffffffffL, buf))
diff --git a/scripts/external_libs/dpkt-1.8.6/dpkt/dhcp.py b/scripts/external_libs/dpkt-1.8.6/dpkt/dhcp.py
deleted file mode 100644
index 9916884a..00000000
--- a/scripts/external_libs/dpkt-1.8.6/dpkt/dhcp.py
+++ /dev/null
@@ -1,168 +0,0 @@
-# $Id: dhcp.py 23 2006-11-08 15:45:33Z dugsong $
-
-"""Dynamic Host Configuration Protocol."""
-
-import arp, dpkt
-
-DHCP_OP_REQUEST = 1
-DHCP_OP_REPLY = 2
-
-DHCP_MAGIC = 0x63825363
-
-# DHCP option codes
-DHCP_OPT_NETMASK = 1 # I: subnet mask
-DHCP_OPT_TIMEOFFSET = 2
-DHCP_OPT_ROUTER = 3 # s: list of router ips
-DHCP_OPT_TIMESERVER = 4
-DHCP_OPT_NAMESERVER = 5
-DHCP_OPT_DNS_SVRS = 6 # s: list of DNS servers
-DHCP_OPT_LOGSERV = 7
-DHCP_OPT_COOKIESERV = 8
-DHCP_OPT_LPRSERV = 9
-DHCP_OPT_IMPSERV = 10
-DHCP_OPT_RESSERV = 11
-DHCP_OPT_HOSTNAME = 12 # s: client hostname
-DHCP_OPT_BOOTFILESIZE = 13
-DHCP_OPT_DUMPFILE = 14
-DHCP_OPT_DOMAIN = 15 # s: domain name
-DHCP_OPT_SWAPSERV = 16
-DHCP_OPT_ROOTPATH = 17
-DHCP_OPT_EXTENPATH = 18
-DHCP_OPT_IPFORWARD = 19
-DHCP_OPT_SRCROUTE = 20
-DHCP_OPT_POLICYFILTER = 21
-DHCP_OPT_MAXASMSIZE = 22
-DHCP_OPT_IPTTL = 23
-DHCP_OPT_MTUTIMEOUT = 24
-DHCP_OPT_MTUTABLE = 25
-DHCP_OPT_MTUSIZE = 26
-DHCP_OPT_LOCALSUBNETS = 27
-DHCP_OPT_BROADCASTADDR = 28
-DHCP_OPT_DOMASKDISCOV = 29
-DHCP_OPT_MASKSUPPLY = 30
-DHCP_OPT_DOROUTEDISC = 31
-DHCP_OPT_ROUTERSOLICIT = 32
-DHCP_OPT_STATICROUTE = 33
-DHCP_OPT_TRAILERENCAP = 34
-DHCP_OPT_ARPTIMEOUT = 35
-DHCP_OPT_ETHERENCAP = 36
-DHCP_OPT_TCPTTL = 37
-DHCP_OPT_TCPKEEPALIVE = 38
-DHCP_OPT_TCPALIVEGARBAGE = 39
-DHCP_OPT_NISDOMAIN = 40
-DHCP_OPT_NISSERVERS = 41
-DHCP_OPT_NISTIMESERV = 42
-DHCP_OPT_VENDSPECIFIC = 43
-DHCP_OPT_NBNS = 44
-DHCP_OPT_NBDD = 45
-DHCP_OPT_NBTCPIP = 46
-DHCP_OPT_NBTCPSCOPE = 47
-DHCP_OPT_XFONT = 48
-DHCP_OPT_XDISPLAYMGR = 49
-DHCP_OPT_REQ_IP = 50 # I: IP address
-DHCP_OPT_LEASE_SEC = 51 # I: lease seconds
-DHCP_OPT_OPTIONOVERLOAD = 52
-DHCP_OPT_MSGTYPE = 53 # B: message type
-DHCP_OPT_SERVER_ID = 54 # I: server IP address
-DHCP_OPT_PARAM_REQ = 55 # s: list of option codes
-DHCP_OPT_MESSAGE = 56
-DHCP_OPT_MAXMSGSIZE = 57
-DHCP_OPT_RENEWTIME = 58
-DHCP_OPT_REBINDTIME = 59
-DHCP_OPT_VENDOR_ID = 60 # s: vendor class id
-DHCP_OPT_CLIENT_ID = 61 # Bs: idtype, id (idtype 0: FQDN, idtype 1: MAC)
-DHCP_OPT_NISPLUSDOMAIN = 64
-DHCP_OPT_NISPLUSSERVERS = 65
-DHCP_OPT_MOBILEIPAGENT = 68
-DHCP_OPT_SMTPSERVER = 69
-DHCP_OPT_POP3SERVER = 70
-DHCP_OPT_NNTPSERVER = 71
-DHCP_OPT_WWWSERVER = 72
-DHCP_OPT_FINGERSERVER = 73
-DHCP_OPT_IRCSERVER = 74
-DHCP_OPT_STSERVER = 75
-DHCP_OPT_STDASERVER = 76
-
-# DHCP message type values
-DHCPDISCOVER = 1
-DHCPOFFER = 2
-DHCPREQUEST = 3
-DHCPDECLINE = 4
-DHCPACK = 5
-DHCPNAK = 6
-DHCPRELEASE = 7
-DHCPINFORM = 8
-
-class DHCP(dpkt.Packet):
- __hdr__ = (
- ('op', 'B', DHCP_OP_REQUEST),
- ('hrd', 'B', arp.ARP_HRD_ETH), # just like ARP.hrd
- ('hln', 'B', 6), # and ARP.hln
- ('hops', 'B', 0),
- ('xid', 'I', 0xdeadbeefL),
- ('secs', 'H', 0),
- ('flags', 'H', 0),
- ('ciaddr', 'I', 0),
- ('yiaddr', 'I', 0),
- ('siaddr', 'I', 0),
- ('giaddr', 'I', 0),
- ('chaddr', '16s', 16 * '\x00'),
- ('sname', '64s', 64 * '\x00'),
- ('file', '128s', 128 * '\x00'),
- ('magic', 'I', DHCP_MAGIC),
- )
- opts = (
- (DHCP_OPT_MSGTYPE, chr(DHCPDISCOVER)),
- (DHCP_OPT_PARAM_REQ, ''.join(map(chr, (DHCP_OPT_REQ_IP,
- DHCP_OPT_ROUTER,
- DHCP_OPT_NETMASK,
- DHCP_OPT_DNS_SVRS))))
- ) # list of (type, data) tuples
-
- def __len__(self):
- return self.__hdr_len__ + \
- sum([ 2 + len(o[1]) for o in self.opts ]) + 1 + len(self.data)
-
- def __str__(self):
- return self.pack_hdr() + self.pack_opts() + str(self.data)
-
- def pack_opts(self):
- """Return packed options string."""
- if not self.opts:
- return ''
- l = []
- for t, data in self.opts:
- l.append('%s%s%s' % (chr(t), chr(len(data)), data))
- l.append('\xff')
- return ''.join(l)
-
- def unpack(self, buf):
- dpkt.Packet.unpack(self, buf)
- self.chaddr = self.chaddr[:self.hln]
- buf = self.data
- l = []
- while buf:
- t = ord(buf[0])
- if t == 0xff:
- buf = buf[1:]
- break
- elif t == 0:
- buf = buf[1:]
- else:
- n = ord(buf[1])
- l.append((t, buf[2:2+n]))
- buf = buf[2+n:]
- self.opts = l
- self.data = buf
-
-if __name__ == '__main__':
- import unittest
-
- class DHCPTestCast(unittest.TestCase):
- def test_DHCP(self):
- s = '\x01\x01\x06\x00\xadS\xc8c\xb8\x87\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02U\x82\xf3\xa6\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00c\x82Sc5\x01\x01\xfb\x01\x01=\x07\x01\x00\x02U\x82\xf3\xa62\x04\n\x00\x01e\x0c\tGuinevere<\x08MSFT 5.07\n\x01\x0f\x03\x06,./\x1f!+\xff\x00\x00\x00\x00\x00'
- dhcp = DHCP(s)
- self.failUnless(s == str(dhcp))
-
- unittest.main()
-
diff --git a/scripts/external_libs/dpkt-1.8.6/dpkt/diameter.py b/scripts/external_libs/dpkt-1.8.6/dpkt/diameter.py
deleted file mode 100644
index 505eccd0..00000000
--- a/scripts/external_libs/dpkt-1.8.6/dpkt/diameter.py
+++ /dev/null
@@ -1,181 +0,0 @@
-# $Id: diameter.py 23 2006-11-08 15:45:33Z dugsong $
-
-"""Diameter."""
-
-import struct
-import dpkt
-
-# Diameter Base Protocol - RFC 3588
-# http://tools.ietf.org/html/rfc3588
-
-# Request/Answer Command Codes
-ABORT_SESSION = 274
-ACCOUTING = 271
-CAPABILITIES_EXCHANGE = 257
-DEVICE_WATCHDOG = 280
-DISCONNECT_PEER = 282
-RE_AUTH = 258
-SESSION_TERMINATION = 275
-
-class Diameter(dpkt.Packet):
- __hdr__ = (
- ('v', 'B', 1),
- ('len', '3s', 0),
- ('flags', 'B', 0),
- ('cmd', '3s', 0),
- ('app_id', 'I', 0),
- ('hop_id', 'I', 0),
- ('end_id', 'I', 0)
- )
-
- def _get_r(self):
- return (self.flags >> 7) & 0x1
- def _set_r(self, r):
- self.flags = (self.flags & ~0x80) | ((r & 0x1) << 7)
- request_flag = property(_get_r, _set_r)
-
- def _get_p(self):
- return (self.flags >> 6) & 0x1
- def _set_p(self, p):
- self.flags = (self.flags & ~0x40) | ((p & 0x1) << 6)
- proxiable_flag = property(_get_p, _set_p)
-
- def _get_e(self):
- return (self.flags >> 5) & 0x1
- def _set_e(self, e):
- self.flags = (self.flags & ~0x20) | ((e & 0x1) << 5)
- error_flag = property(_get_e, _set_e)
-
- def _get_t(self):
- return (self.flags >> 4) & 0x1
- def _set_t(self, t):
- self.flags = (self.flags & ~0x10) | ((t & 0x1) << 4)
- retransmit_flag = property(_get_t, _set_t)
-
- def unpack(self, buf):
- dpkt.Packet.unpack(self, buf)
- self.cmd = (ord(self.cmd[0]) << 16) | \
- (ord(self.cmd[1]) << 8) | \
- ord(self.cmd[2])
- self.len = (ord(self.len[0]) << 16) | \
- (ord(self.len[1]) << 8) | \
- ord(self.len[2])
- self.data = self.data[:self.len - self.__hdr_len__]
-
- l = []
- while self.data:
- avp = AVP(self.data)
- l.append(avp)
- self.data = self.data[len(avp):]
- self.data = self.avps = l
-
- def pack_hdr(self):
- self.len = chr((self.len >> 16) & 0xff) + \
- chr((self.len >> 8) & 0xff) + \
- chr(self.len & 0xff)
- self.cmd = chr((self.cmd >> 16) & 0xff) + \
- chr((self.cmd >> 8) & 0xff) + \
- chr(self.cmd & 0xff)
- return dpkt.Packet.pack_hdr(self)
-
- def __len__(self):
- return self.__hdr_len__ + \
- sum(map(len, self.data))
-
- def __str__(self):
- return self.pack_hdr() + \
- ''.join(map(str, self.data))
-
-class AVP(dpkt.Packet):
- __hdr__ = (
- ('code', 'I', 0),
- ('flags', 'B', 0),
- ('len', '3s', 0),
- )
-
- def _get_v(self):
- return (self.flags >> 7) & 0x1
- def _set_v(self, v):
- self.flags = (self.flags & ~0x80) | ((v & 0x1) << 7)
- vendor_flag = property(_get_v, _set_v)
-
- def _get_m(self):
- return (self.flags >> 6) & 0x1
- def _set_m(self, m):
- self.flags = (self.flags & ~0x40) | ((m & 0x1) << 6)
- mandatory_flag = property(_get_m, _set_m)
-
- def _get_p(self):
- return (self.flags >> 5) & 0x1
- def _set_p(self, p):
- self.flags = (self.flags & ~0x20) | ((p & 0x1) << 5)
- protected_flag = property(_get_p, _set_p)
-
- def unpack(self, buf):
- dpkt.Packet.unpack(self, buf)
- self.len = (ord(self.len[0]) << 16) | \
- (ord(self.len[1]) << 8) | \
- ord(self.len[2])
-
- if self.vendor_flag:
- self.vendor = struct.unpack('>I', self.data[:4])[0]
- self.data = self.data[4:self.len - self.__hdr_len__]
- else:
- self.data = self.data[:self.len - self.__hdr_len__]
-
- def pack_hdr(self):
- self.len = chr((self.len >> 16) & 0xff) + \
- chr((self.len >> 8) & 0xff) + \
- chr(self.len & 0xff)
- data = dpkt.Packet.pack_hdr(self)
- if self.vendor_flag:
- data += struct.pack('>I', self.vendor)
- return data
-
- def __len__(self):
- length = self.__hdr_len__ + \
- sum(map(len, self.data))
- if self.vendor_flag:
- length += 4
- return length
-
-
-if __name__ == '__main__':
- import unittest
-
- class DiameterTestCase(unittest.TestCase):
- def testPack(self):
- d = Diameter(self.s)
- self.failUnless(self.s == str(d))
- d = Diameter(self.t)
- self.failUnless(self.t == str(d))
-
- def testUnpack(self):
- d = Diameter(self.s)
- self.failUnless(d.len == 40)
- #self.failUnless(d.cmd == DEVICE_WATCHDOG_REQUEST)
- self.failUnless(d.request_flag == 1)
- self.failUnless(d.error_flag == 0)
- self.failUnless(len(d.avps) == 2)
-
- avp = d.avps[0]
- #self.failUnless(avp.code == ORIGIN_HOST)
- self.failUnless(avp.mandatory_flag == 1)
- self.failUnless(avp.vendor_flag == 0)
- self.failUnless(avp.len == 12)
- self.failUnless(len(avp) == 12)
- self.failUnless(avp.data == '\x68\x30\x30\x32')
-
- # also test the optional vendor id support
- d = Diameter(self.t)
- self.failUnless(d.len == 44)
- avp = d.avps[0]
- self.failUnless(avp.vendor_flag == 1)
- self.failUnless(avp.len == 16)
- self.failUnless(len(avp) == 16)
- self.failUnless(avp.vendor == 3735928559)
- self.failUnless(avp.data == '\x68\x30\x30\x32')
-
- s = '\x01\x00\x00\x28\x80\x00\x01\x18\x00\x00\x00\x00\x00\x00\x41\xc8\x00\x00\x00\x0c\x00\x00\x01\x08\x40\x00\x00\x0c\x68\x30\x30\x32\x00\x00\x01\x28\x40\x00\x00\x08'
- t = '\x01\x00\x00\x2c\x80\x00\x01\x18\x00\x00\x00\x00\x00\x00\x41\xc8\x00\x00\x00\x0c\x00\x00\x01\x08\xc0\x00\x00\x10\xde\xad\xbe\xef\x68\x30\x30\x32\x00\x00\x01\x28\x40\x00\x00\x08'
- unittest.main()
diff --git a/scripts/external_libs/dpkt-1.8.6/dpkt/dns.py b/scripts/external_libs/dpkt-1.8.6/dpkt/dns.py
deleted file mode 100644
index 24ca1bd6..00000000
--- a/scripts/external_libs/dpkt-1.8.6/dpkt/dns.py
+++ /dev/null
@@ -1,342 +0,0 @@
-# $Id: dns.py 27 2006-11-21 01:22:52Z dahelder $
-
-"""Domain Name System."""
-
-import struct
-import dpkt
-
-DNS_Q = 0
-DNS_R = 1
-
-# Opcodes
-DNS_QUERY = 0
-DNS_IQUERY = 1
-DNS_STATUS = 2
-DNS_NOTIFY = 4
-DNS_UPDATE = 5
-
-# Flags
-DNS_CD = 0x0010 # checking disabled
-DNS_AD = 0x0020 # authenticated data
-DNS_Z = 0x0040 # unused
-DNS_RA = 0x0080 # recursion available
-DNS_RD = 0x0100 # recursion desired
-DNS_TC = 0x0200 # truncated
-DNS_AA = 0x0400 # authoritative answer
-DNS_QR = 0x8000 # response ( query / response )
-
-# Response codes
-DNS_RCODE_NOERR = 0
-DNS_RCODE_FORMERR = 1
-DNS_RCODE_SERVFAIL = 2
-DNS_RCODE_NXDOMAIN = 3
-DNS_RCODE_NOTIMP = 4
-DNS_RCODE_REFUSED = 5
-DNS_RCODE_YXDOMAIN = 6
-DNS_RCODE_YXRRSET = 7
-DNS_RCODE_NXRRSET = 8
-DNS_RCODE_NOTAUTH = 9
-DNS_RCODE_NOTZONE = 10
-
-# RR types
-DNS_A = 1
-DNS_NS = 2
-DNS_CNAME = 5
-DNS_SOA = 6
-DNS_PTR = 12
-DNS_HINFO = 13
-DNS_MX = 15
-DNS_TXT = 16
-DNS_AAAA = 28
-DNS_SRV = 33
-
-# RR classes
-DNS_IN = 1
-DNS_CHAOS = 3
-DNS_HESIOD = 4
-DNS_ANY = 255
-
-def pack_name(name, off, label_ptrs):
- if name:
- labels = name.split('.')
- else:
- labels = []
- labels.append('')
- buf = ''
- for i, label in enumerate(labels):
- key = '.'.join(labels[i:]).upper()
- ptr = label_ptrs.get(key)
- if not ptr:
- if len(key) > 1:
- ptr = off + len(buf)
- if ptr < 0xc000:
- label_ptrs[key] = ptr
- i = len(label)
- buf += chr(i) + label
- else:
- buf += struct.pack('>H', (0xc000 | ptr))
- break
- return buf
-
-def unpack_name(buf, off):
- name = ''
- saved_off = 0
- for i in range(100): # XXX
- n = ord(buf[off])
- if n == 0:
- off += 1
- break
- elif (n & 0xc0) == 0xc0:
- ptr = struct.unpack('>H', buf[off:off+2])[0] & 0x3fff
- off += 2
- if not saved_off:
- saved_off = off
- # XXX - don't use recursion!@#$
- name = name + unpack_name(buf, ptr)[0] + '.'
- break
- else:
- off += 1
- name = name + buf[off:off+n] + '.'
- if len(name) > 255:
- raise dpkt.UnpackError('name longer than 255 bytes')
- off += n
- return name.strip('.'), off
-
-class DNS(dpkt.Packet):
- __hdr__ = (
- ('id', 'H', 0),
- ('op', 'H', DNS_RD), # recursive query
- # XXX - lists of query, RR objects
- ('qd', 'H', []),
- ('an', 'H', []),
- ('ns', 'H', []),
- ('ar', 'H', [])
- )
- def get_qr(self):
- return int((self.op & DNS_QR) == DNS_QR)
- def set_qr(self, v):
- if v: self.op |= DNS_QR
- else: self.op &= ~DNS_QR
- qr = property(get_qr, set_qr)
-
- def get_opcode(self):
- return (self.op >> 11) & 0xf
- def set_opcode(self, v):
- self.op = (self.op & ~0x7800) | ((v & 0xf) << 11)
- opcode = property(get_opcode, set_opcode)
-
- def get_aa(self):
- return int((self.op & DNS_AA) == DNS_AA)
- def set_aa(self, v):
- if v: self.op |= DNS_AA
- else: self.op &= ~DNS_AA
- aa = property(get_aa, set_aa)
-
- def get_rd(self):
- return int((self.op & DNS_RD) == DNS_RD)
- def set_rd(self,v):
- if v: self.op |= DNS_RD
- else: self.op &= ~DNS_RD
- rd = property(get_rd, set_rd)
-
- def get_ra(self):
- return int((self.op & DNS_RA) == DNS_RA)
- def set_ra(self,v):
- if v: self.op |= DNS_RA
- else: self.op &= ~DNS_RA
- ra = property(get_ra, set_ra)
-
- def get_zero(self):
- return int((self.op & DNS_Z) == DNS_Z)
- def set_zero(self, v):
- if v: self.op |= DNS_Z
- else: self.op &= ~DNS_Z
- zero = property(get_zero, set_zero)
-
- def get_rcode(self):
- return self.op & 0xf
- def set_rcode(self, v):
- self.op = (self.op & ~0xf) | (v & 0xf)
- rcode = property(get_rcode, set_rcode)
-
- class Q(dpkt.Packet):
- """DNS question."""
- __hdr__ = (
- ('name', '1025s', ''),
- ('type', 'H', DNS_A),
- ('cls', 'H', DNS_IN)
- )
- # XXX - suk
- def __len__(self):
- raise NotImplementedError
- __str__ = __len__
- def unpack(self, buf):
- raise NotImplementedError
-
- class RR(Q):
- """DNS resource record."""
- __hdr__ = (
- ('name', '1025s', ''),
- ('type', 'H', DNS_A),
- ('cls', 'H', DNS_IN),
- ('ttl', 'I', 0),
- ('rlen', 'H', 4),
- ('rdata', 's', '')
- )
- def pack_rdata(self, off, label_ptrs):
- # XXX - yeah, this sux
- if self.rdata:
- return self.rdata
- if self.type == DNS_A:
- return self.ip
- elif self.type == DNS_NS:
- return pack_name(self.nsname, off, label_ptrs)
- elif self.type == DNS_CNAME:
- return pack_name(self.cname, off, label_ptrs)
- elif self.type == DNS_PTR:
- return pack_name(self.ptrname, off, label_ptrs)
- elif self.type == DNS_SOA:
- l = []
- l.append(pack_name(self.mname, off, label_ptrs))
- l.append(pack_name(self.rname, off + len(l[0]), label_ptrs))
- l.append(struct.pack('>IIIII', self.serial, self.refresh,
- self.retry, self.expire, self.minimum))
- return ''.join(l)
- elif self.type == DNS_MX:
- return struct.pack('>H', self.preference) + \
- pack_name(self.mxname, off + 2, label_ptrs)
- elif self.type == DNS_TXT or self.type == DNS_HINFO:
- return ''.join([ '%s%s' % (chr(len(x)), x)
- for x in self.text ])
- elif self.type == DNS_AAAA:
- return self.ip6
- elif self.type == DNS_SRV:
- return struct.pack('>HHH', self.priority, self.weight, self.port) + \
- pack_name(self.srvname, off + 6, label_ptrs)
-
- def unpack_rdata(self, buf, off):
- if self.type == DNS_A:
- self.ip = self.rdata
- elif self.type == DNS_NS:
- self.nsname, off = unpack_name(buf, off)
- elif self.type == DNS_CNAME:
- self.cname, off = unpack_name(buf, off)
- elif self.type == DNS_PTR:
- self.ptrname, off = unpack_name(buf, off)
- elif self.type == DNS_SOA:
- self.mname, off = unpack_name(buf, off)
- self.rname, off = unpack_name(buf, off)
- self.serial, self.refresh, self.retry, self.expire, \
- self.minimum = struct.unpack('>IIIII', buf[off:off+20])
- elif self.type == DNS_MX:
- self.preference = struct.unpack('>H', self.rdata[:2])
- self.mxname, off = unpack_name(buf, off+2)
- elif self.type == DNS_TXT or self.type == DNS_HINFO:
- self.text = []
- buf = self.rdata
- while buf:
- n = ord(buf[0])
- self.text.append(buf[1:1+n])
- buf = buf[1+n:]
- elif self.type == DNS_AAAA:
- self.ip6 = self.rdata
- elif self.type == DNS_SRV:
- self.priority, self.weight, self.port = \
- struct.unpack('>HHH', self.rdata[:6])
- self.srvname, off = unpack_name(buf, off+6)
-
- def pack_q(self, buf, q):
- """Append packed DNS question and return buf."""
- return buf + pack_name(q.name, len(buf), self.label_ptrs) + \
- struct.pack('>HH', q.type, q.cls)
-
- def unpack_q(self, buf, off):
- """Return DNS question and new offset."""
- q = self.Q()
- q.name, off = unpack_name(buf, off)
- q.type, q.cls = struct.unpack('>HH', buf[off:off+4])
- off += 4
- return q, off
-
- def pack_rr(self, buf, rr):
- """Append packed DNS RR and return buf."""
- name = pack_name(rr.name, len(buf), self.label_ptrs)
- rdata = rr.pack_rdata(len(buf) + len(name) + 10, self.label_ptrs)
- return buf + name + struct.pack('>HHIH', rr.type, rr.cls, rr.ttl,
- len(rdata)) + rdata
-
- def unpack_rr(self, buf, off):
- """Return DNS RR and new offset."""
- rr = self.RR()
- rr.name, off = unpack_name(buf, off)
- rr.type, rr.cls, rr.ttl, rdlen = struct.unpack('>HHIH', buf[off:off+10])
- off += 10
- rr.rdata = buf[off:off+rdlen]
- rr.unpack_rdata(buf, off)
- off += rdlen
- return rr, off
-
- def unpack(self, buf):
- dpkt.Packet.unpack(self, buf)
- off = self.__hdr_len__
- cnt = self.qd
- self.qd = []
- for i in range(cnt):
- q, off = self.unpack_q(buf, off)
- self.qd.append(q)
- for x in ('an', 'ns', 'ar'):
- cnt = getattr(self, x, 0)
- setattr(self, x, [])
- for i in range(cnt):
- rr, off = self.unpack_rr(buf, off)
- getattr(self, x).append(rr)
- self.data = ''
-
- def __len__(self):
- # XXX - cop out
- return len(str(self))
-
- def __str__(self):
- # XXX - compress names on the fly
- self.label_ptrs = {}
- buf = struct.pack(self.__hdr_fmt__, self.id, self.op, len(self.qd),
- len(self.an), len(self.ns), len(self.ar))
- for q in self.qd:
- buf = self.pack_q(buf, q)
- for x in ('an', 'ns', 'ar'):
- for rr in getattr(self, x):
- buf = self.pack_rr(buf, rr)
- del self.label_ptrs
- return buf
-
-if __name__ == '__main__':
- import unittest
- from ip import IP
-
- class DNSTestCase(unittest.TestCase):
- def test_basic(self):
- s = 'E\x00\x02\x08\xc15\x00\x00\x80\x11\x92aBk0\x01Bk0w\x005\xc07\x01\xf4\xda\xc2d\xd2\x81\x80\x00\x01\x00\x03\x00\x0b\x00\x0b\x03www\x06google\x03com\x00\x00\x01\x00\x01\xc0\x0c\x00\x05\x00\x01\x00\x00\x03V\x00\x17\x03www\x06google\x06akadns\x03net\x00\xc0,\x00\x01\x00\x01\x00\x00\x01\xa3\x00\x04@\xe9\xabh\xc0,\x00\x01\x00\x01\x00\x00\x01\xa3\x00\x04@\xe9\xabc\xc07\x00\x02\x00\x01\x00\x00KG\x00\x0c\x04usw5\x04akam\xc0>\xc07\x00\x02\x00\x01\x00\x00KG\x00\x07\x04usw6\xc0t\xc07\x00\x02\x00\x01\x00\x00KG\x00\x07\x04usw7\xc0t\xc07\x00\x02\x00\x01\x00\x00KG\x00\x08\x05asia3\xc0t\xc07\x00\x02\x00\x01\x00\x00KG\x00\x05\x02za\xc07\xc07\x00\x02\x00\x01\x00\x00KG\x00\x0f\x02zc\x06akadns\x03org\x00\xc07\x00\x02\x00\x01\x00\x00KG\x00\x05\x02zf\xc07\xc07\x00\x02\x00\x01\x00\x00KG\x00\x05\x02zh\xc0\xd5\xc07\x00\x02\x00\x01\x00\x00KG\x00\x07\x04eur3\xc0t\xc07\x00\x02\x00\x01\x00\x00KG\x00\x07\x04use2\xc0t\xc07\x00\x02\x00\x01\x00\x00KG\x00\x07\x04use4\xc0t\xc0\xc1\x00\x01\x00\x01\x00\x00\xfb4\x00\x04\xd0\xb9\x84\xb0\xc0\xd2\x00\x01\x00\x01\x00\x001\x0c\x00\x04?\xf1\xc76\xc0\xed\x00\x01\x00\x01\x00\x00\xfb4\x00\x04?\xd7\xc6S\xc0\xfe\x00\x01\x00\x01\x00\x001\x0c\x00\x04?\xd00.\xc1\x0f\x00\x01\x00\x01\x00\x00\n\xdf\x00\x04\xc1-\x01g\xc1"\x00\x01\x00\x01\x00\x00\x101\x00\x04?\xd1\xaa\x88\xc15\x00\x01\x00\x01\x00\x00\r\x1a\x00\x04PCC\xb6\xc0o\x00\x01\x00\x01\x00\x00\x10\x7f\x00\x04?\xf1I\xd6\xc0\x87\x00\x01\x00\x01\x00\x00\n\xdf\x00\x04\xce\x84dl\xc0\x9a\x00\x01\x00\x01\x00\x00\n\xdf\x00\x04A\xcb\xea\x1b\xc0\xad\x00\x01\x00\x01\x00\x00\x0b)\x00\x04\xc1l\x9a\t'
- ip = IP(s)
- dns = DNS(ip.udp.data)
- self.failUnless(dns.qd[0].name == 'www.google.com' and
- dns.an[1].name == 'www.google.akadns.net')
- s = '\x05\xf5\x01\x00\x00\x01\x00\x00\x00\x00\x00\x00\x03www\x03cnn\x03com\x00\x00\x01\x00\x01'
- dns = DNS(s)
- self.failUnless(s == str(dns))
-
- def test_PTR(self):
- s = 'g\x02\x81\x80\x00\x01\x00\x01\x00\x03\x00\x00\x011\x011\x03211\x03141\x07in-addr\x04arpa\x00\x00\x0c\x00\x01\xc0\x0c\x00\x0c\x00\x01\x00\x00\r6\x00$\x07default\nv-umce-ifs\x05umnet\x05umich\x03edu\x00\xc0\x0e\x00\x02\x00\x01\x00\x00\r6\x00\r\x06shabby\x03ifs\xc0O\xc0\x0e\x00\x02\x00\x01\x00\x00\r6\x00\x0f\x0cfish-license\xc0m\xc0\x0e\x00\x02\x00\x01\x00\x00\r6\x00\x0b\x04dns2\x03itd\xc0O'
- dns = DNS(s)
- self.failUnless(dns.qd[0].name == '1.1.211.141.in-addr.arpa' and
- dns.an[0].ptrname == 'default.v-umce-ifs.umnet.umich.edu' and
- dns.ns[0].nsname == 'shabby.ifs.umich.edu' and
- dns.ns[1].ttl == 3382L and
- dns.ns[2].nsname == 'dns2.itd.umich.edu')
- self.failUnless(s == str(dns))
-
- def test_pack_name(self):
- # Empty name is \0
- x = pack_name('', 0, {})
- self.assertEqual(x, '\0')
-
- unittest.main()
diff --git a/scripts/external_libs/dpkt-1.8.6/dpkt/dot1q.py b/scripts/external_libs/dpkt-1.8.6/dpkt/dot1q.py
deleted file mode 100644
index ac6eb185..00000000
--- a/scripts/external_libs/dpkt-1.8.6/dpkt/dot1q.py
+++ /dev/null
@@ -1,1110 +0,0 @@
-
-
-
-<!DOCTYPE html>
-<html lang="en" class=" is-copy-enabled">
- <head prefix="og: http://ogp.me/ns# fb: http://ogp.me/ns/fb# object: http://ogp.me/ns/object# article: http://ogp.me/ns/article# profile: http://ogp.me/ns/profile#">
- <meta charset='utf-8'>
- <meta http-equiv="X-UA-Compatible" content="IE=edge">
- <meta http-equiv="Content-Language" content="en">
- <meta name="viewport" content="width=1020">
-
-
- <title>hexcap/dot1q.py at master · hexcap/hexcap</title>
- <link rel="search" type="application/opensearchdescription+xml" href="/opensearch.xml" title="GitHub">
- <link rel="fluid-icon" href="https://github.com/fluidicon.png" title="GitHub">
- <link rel="apple-touch-icon" sizes="57x57" href="/apple-touch-icon-114.png">
- <link rel="apple-touch-icon" sizes="114x114" href="/apple-touch-icon-114.png">
- <link rel="apple-touch-icon" sizes="72x72" href="/apple-touch-icon-144.png">
- <link rel="apple-touch-icon" sizes="144x144" href="/apple-touch-icon-144.png">
- <meta property="fb:app_id" content="1401488693436528">
-
- <meta content="@github" name="twitter:site" /><meta content="summary" name="twitter:card" /><meta content="hexcap/hexcap" name="twitter:title" /><meta content="hexcap - ncurses based hex editor for pcap files" name="twitter:description" /><meta content="https://avatars3.githubusercontent.com/u/5732830?v=3&amp;s=400" name="twitter:image:src" />
- <meta content="GitHub" property="og:site_name" /><meta content="object" property="og:type" /><meta content="https://avatars3.githubusercontent.com/u/5732830?v=3&amp;s=400" property="og:image" /><meta content="hexcap/hexcap" property="og:title" /><meta content="https://github.com/hexcap/hexcap" property="og:url" /><meta content="hexcap - ncurses based hex editor for pcap files" property="og:description" />
- <meta name="browser-stats-url" content="https://api.github.com/_private/browser/stats">
- <meta name="browser-errors-url" content="https://api.github.com/_private/browser/errors">
- <link rel="assets" href="https://assets-cdn.github.com/">
- <link rel="web-socket" href="wss://live.github.com/_sockets/NjE1MjA5Mjo4ZmY1YzBmYTBhNTQ0YzIzMTlkOWNlMDA3ZWQzNDM2Mzo1ZjcyZGY3ZTViMDQ2ODJlNjhjMDQzMDM5MjE3ZTIxNmI3ZTAwM2IyYmFhMDYyNjZlMTI0YmM1MDZlMzY2YTMw--521096dfee0071c38c7c0cbb37a19125d136bade">
- <meta name="pjax-timeout" content="1000">
- <link rel="sudo-modal" href="/sessions/sudo_modal">
-
- <meta name="msapplication-TileImage" content="/windows-tile.png">
- <meta name="msapplication-TileColor" content="#ffffff">
- <meta name="selected-link" value="repo_source" data-pjax-transient>
-
- <meta name="google-site-verification" content="KT5gs8h0wvaagLKAVWq8bbeNwnZZK1r1XQysX3xurLU">
- <meta name="google-analytics" content="UA-3769691-2">
-
- <meta content="collector.githubapp.com" name="octolytics-host" /><meta content="collector-cdn.github.com" name="octolytics-script-host" /><meta content="github" name="octolytics-app-id" /><meta content="C0764E39:0F18:5EE73AA:55ED75B0" name="octolytics-dimension-request_id" /><meta content="6152092" name="octolytics-actor-id" /><meta content="danklein10" name="octolytics-actor-login" /><meta content="a094d7c15626ff3dfc327b66cc85ebc7fb8bf99dd6502952c19c58263e0a4131" name="octolytics-actor-hash" />
-
- <meta content="Rails, view, blob#show" data-pjax-transient="true" name="analytics-event" />
- <meta class="js-ga-set" name="dimension1" content="Logged In">
- <meta class="js-ga-set" name="dimension4" content="Current repo nav">
- <meta name="is-dotcom" content="true">
- <meta name="hostname" content="github.com">
- <meta name="user-login" content="danklein10">
-
- <link rel="mask-icon" href="https://assets-cdn.github.com/pinned-octocat.svg" color="#4078c0">
- <link rel="icon" type="image/x-icon" href="https://assets-cdn.github.com/favicon.ico">
-
- <!-- </textarea> --><!-- '"` --><meta content="authenticity_token" name="csrf-param" />
-<meta content="38AGn6qyEzkQYOxGwzHrOFJlA8PVe1dGcw8fNEwl4eRgd01lt1QnIGHZA2qAEVIlvKBjuwlN1tMuWBkCq8W7Gg==" name="csrf-token" />
- <meta content="ac3d26b394d1e74b2cb512f7e309125427b6279a" name="form-nonce" />
-
- <link crossorigin="anonymous" href="https://assets-cdn.github.com/assets/github-20ef81825cb67c29f98949804b58cf91dbf3de37cb09ccaa59c93970272e0b35.css" media="all" rel="stylesheet" />
- <link crossorigin="anonymous" href="https://assets-cdn.github.com/assets/github2-726d0810d308c486e226fbd3d4392b84987fddf613a311c76e816dbaf2461c38.css" media="all" rel="stylesheet" />
-
-
-
-
- <meta http-equiv="x-pjax-version" content="e21519a1f589ffb51c1a9b6cfaa2bbfc">
-
-
- <meta name="description" content="hexcap - ncurses based hex editor for pcap files">
- <meta name="go-import" content="github.com/hexcap/hexcap git https://github.com/hexcap/hexcap.git">
-
- <meta content="5732830" name="octolytics-dimension-user_id" /><meta content="hexcap" name="octolytics-dimension-user_login" /><meta content="9747136" name="octolytics-dimension-repository_id" /><meta content="hexcap/hexcap" name="octolytics-dimension-repository_nwo" /><meta content="true" name="octolytics-dimension-repository_public" /><meta content="false" name="octolytics-dimension-repository_is_fork" /><meta content="9747136" name="octolytics-dimension-repository_network_root_id" /><meta content="hexcap/hexcap" name="octolytics-dimension-repository_network_root_nwo" />
- <link href="https://github.com/hexcap/hexcap/commits/master.atom" rel="alternate" title="Recent Commits to hexcap:master" type="application/atom+xml">
-
- </head>
-
-
- <body class="logged_in env-production windows vis-public page-blob">
- <a href="#start-of-content" tabindex="1" class="accessibility-aid js-skip-to-content">Skip to content</a>
-
-
-
-
-
-
-
- <div class="header header-logged-in true" role="banner">
- <div class="container clearfix">
-
- <a class="header-logo-invertocat" href="https://github.com/orgs/cisco-system-traffic-generator/dashboard" data-hotkey="g d" aria-label="Homepage" data-ga-click="Header, go to dashboard, icon:logo">
- <span class="mega-octicon octicon-mark-github"></span>
-</a>
-
-
- <div class="site-search repo-scope js-site-search" role="search">
- <!-- </textarea> --><!-- '"` --><form accept-charset="UTF-8" action="/hexcap/hexcap/search" class="js-site-search-form" data-global-search-url="/search" data-repo-search-url="/hexcap/hexcap/search" method="get"><div style="margin:0;padding:0;display:inline"><input name="utf8" type="hidden" value="&#x2713;" /></div>
- <label class="js-chromeless-input-container form-control">
- <div class="scope-badge">This repository</div>
- <input type="text"
- class="js-site-search-focus js-site-search-field is-clearable chromeless-input"
- data-hotkey="s"
- name="q"
- placeholder="Search"
- aria-label="Search this repository"
- data-global-scope-placeholder="Search GitHub"
- data-repo-scope-placeholder="Search"
- tabindex="1"
- autocapitalize="off">
- </label>
-</form>
- </div>
-
- <ul class="header-nav left" role="navigation">
- <li class="header-nav-item">
- <a href="/pulls" class="js-selected-navigation-item header-nav-link" data-ga-click="Header, click, Nav menu - item:pulls context:user" data-hotkey="g p" data-selected-links="/pulls /pulls/assigned /pulls/mentioned /pulls">
- Pull requests
-</a> </li>
- <li class="header-nav-item">
- <a href="/issues" class="js-selected-navigation-item header-nav-link" data-ga-click="Header, click, Nav menu - item:issues context:user" data-hotkey="g i" data-selected-links="/issues /issues/assigned /issues/mentioned /issues">
- Issues
-</a> </li>
- <li class="header-nav-item">
- <a class="header-nav-link" href="https://gist.github.com/" data-ga-click="Header, go to gist, text:gist">Gist</a>
- </li>
- </ul>
-
-
-<ul class="header-nav user-nav right" id="user-links">
- <li class="header-nav-item">
- <span class="js-socket-channel js-updatable-content"
- data-channel="notification-changed:danklein10"
- data-url="/notifications/header">
- <a href="/notifications" aria-label="You have no unread notifications" class="header-nav-link notification-indicator tooltipped tooltipped-s" data-ga-click="Header, go to notifications, icon:read" data-hotkey="g n">
- <span class="mail-status all-read"></span>
- <span class="octicon octicon-bell"></span>
-</a> </span>
-
- </li>
-
- <li class="header-nav-item dropdown js-menu-container">
- <a class="header-nav-link tooltipped tooltipped-s js-menu-target" href="/new"
- aria-label="Create new…"
- data-ga-click="Header, create new, icon:add">
- <span class="octicon octicon-plus left"></span>
- <span class="dropdown-caret"></span>
- </a>
-
- <div class="dropdown-menu-content js-menu-content">
- <ul class="dropdown-menu dropdown-menu-sw">
-
-<a class="dropdown-item" href="/new" data-ga-click="Header, create new repository">
- New repository
-</a>
-
-
- <a class="dropdown-item" href="/organizations/new" data-ga-click="Header, create new organization">
- New organization
- </a>
-
-
-
- <div class="dropdown-divider"></div>
- <div class="dropdown-header">
- <span title="hexcap/hexcap">This repository</span>
- </div>
- <a class="dropdown-item" href="/hexcap/hexcap/issues/new" data-ga-click="Header, create new issue">
- New issue
- </a>
-
- </ul>
- </div>
- </li>
-
- <li class="header-nav-item dropdown js-menu-container">
- <a class="header-nav-link name tooltipped tooltipped-s js-menu-target" href="/danklein10"
- aria-label="View profile and more"
- data-ga-click="Header, show menu, icon:avatar">
- <img alt="@danklein10" class="avatar" height="20" src="https://avatars1.githubusercontent.com/u/6152092?v=3&amp;s=40" width="20" />
- <span class="dropdown-caret"></span>
- </a>
-
- <div class="dropdown-menu-content js-menu-content">
- <div class="dropdown-menu dropdown-menu-sw">
- <div class="dropdown-header header-nav-current-user css-truncate">
- Signed in as <strong class="css-truncate-target">danklein10</strong>
- </div>
- <div class="dropdown-divider"></div>
-
- <a class="dropdown-item" href="/danklein10" data-ga-click="Header, go to profile, text:your profile">
- Your profile
- </a>
- <a class="dropdown-item" href="/stars" data-ga-click="Header, go to starred repos, text:your stars">
- Your stars
- </a>
- <a class="dropdown-item" href="/explore" data-ga-click="Header, go to explore, text:explore">
- Explore
- </a>
- <a class="dropdown-item" href="https://help.github.com" data-ga-click="Header, go to help, text:help">
- Help
- </a>
- <div class="dropdown-divider"></div>
-
- <a class="dropdown-item" href="/settings/profile" data-ga-click="Header, go to settings, icon:settings">
- Settings
- </a>
-
- <!-- </textarea> --><!-- '"` --><form accept-charset="UTF-8" action="/logout" class="logout-form" data-form-nonce="ac3d26b394d1e74b2cb512f7e309125427b6279a" method="post"><div style="margin:0;padding:0;display:inline"><input name="utf8" type="hidden" value="&#x2713;" /><input name="authenticity_token" type="hidden" value="N8m5Se0EOXw1Ui0v3MX+KEHzjZjgzzddbmLRz+Tv22u2OVzVM6Ztj/KT5sYvuJ3Krimi+7iKMVFWvkrAdu18aQ==" /></div>
- <button class="dropdown-item dropdown-signout" data-ga-click="Header, sign out, icon:logout">
- Sign out
- </button>
-</form> </div>
- </div>
- </li>
-</ul>
-
-
-
- </div>
-</div>
-
-
-
-
-
-
- <div id="start-of-content" class="accessibility-aid"></div>
-
- <div id="js-flash-container">
-</div>
-
-
- <div itemscope itemtype="http://schema.org/WebPage">
- <div class="pagehead repohead instapaper_ignore readability-menu">
- <div class="container">
-
- <div class="clearfix">
-
-<ul class="pagehead-actions">
-
- <li>
- <!-- </textarea> --><!-- '"` --><form accept-charset="UTF-8" action="/notifications/subscribe" class="js-social-container" data-autosubmit="true" data-form-nonce="ac3d26b394d1e74b2cb512f7e309125427b6279a" data-remote="true" method="post"><div style="margin:0;padding:0;display:inline"><input name="utf8" type="hidden" value="&#x2713;" /><input name="authenticity_token" type="hidden" value="oH+kDkXDGZ0VSv7TIzO+kQpjN+0KO7kmbf3Ltx0zg8M7XTbRwpsfJQpVrG2t3+0JACcc2FDlXzzNGCHk8MBhIA==" /></div> <input id="repository_id" name="repository_id" type="hidden" value="9747136" />
-
- <div class="select-menu js-menu-container js-select-menu">
- <a href="/hexcap/hexcap/subscription"
- class="btn btn-sm btn-with-count select-menu-button js-menu-target" role="button" tabindex="0" aria-haspopup="true"
- data-ga-click="Repository, click Watch settings, action:blob#show">
- <span class="js-select-button">
- <span class="octicon octicon-eye"></span>
- Watch
- </span>
- </a>
- <a class="social-count js-social-count" href="/hexcap/hexcap/watchers">
- 1
- </a>
-
- <div class="select-menu-modal-holder">
- <div class="select-menu-modal subscription-menu-modal js-menu-content" aria-hidden="true">
- <div class="select-menu-header">
- <span class="select-menu-title">Notifications</span>
- <span class="octicon octicon-x js-menu-close" role="button" aria-label="Close"></span>
- </div>
-
- <div class="select-menu-list js-navigation-container" role="menu">
-
- <div class="select-menu-item js-navigation-item selected" role="menuitem" tabindex="0">
- <span class="select-menu-item-icon octicon octicon-check"></span>
- <div class="select-menu-item-text">
- <input checked="checked" id="do_included" name="do" type="radio" value="included" />
- <span class="select-menu-item-heading">Not watching</span>
- <span class="description">Be notified when participating or @mentioned.</span>
- <span class="js-select-button-text hidden-select-button-text">
- <span class="octicon octicon-eye"></span>
- Watch
- </span>
- </div>
- </div>
-
- <div class="select-menu-item js-navigation-item " role="menuitem" tabindex="0">
- <span class="select-menu-item-icon octicon octicon octicon-check"></span>
- <div class="select-menu-item-text">
- <input id="do_subscribed" name="do" type="radio" value="subscribed" />
- <span class="select-menu-item-heading">Watching</span>
- <span class="description">Be notified of all conversations.</span>
- <span class="js-select-button-text hidden-select-button-text">
- <span class="octicon octicon-eye"></span>
- Unwatch
- </span>
- </div>
- </div>
-
- <div class="select-menu-item js-navigation-item " role="menuitem" tabindex="0">
- <span class="select-menu-item-icon octicon octicon-check"></span>
- <div class="select-menu-item-text">
- <input id="do_ignore" name="do" type="radio" value="ignore" />
- <span class="select-menu-item-heading">Ignoring</span>
- <span class="description">Never be notified.</span>
- <span class="js-select-button-text hidden-select-button-text">
- <span class="octicon octicon-mute"></span>
- Stop ignoring
- </span>
- </div>
- </div>
-
- </div>
-
- </div>
- </div>
- </div>
-</form>
- </li>
-
- <li>
-
- <div class="js-toggler-container js-social-container starring-container ">
-
- <!-- </textarea> --><!-- '"` --><form accept-charset="UTF-8" action="/hexcap/hexcap/unstar" class="js-toggler-form starred js-unstar-button" data-form-nonce="ac3d26b394d1e74b2cb512f7e309125427b6279a" data-remote="true" method="post"><div style="margin:0;padding:0;display:inline"><input name="utf8" type="hidden" value="&#x2713;" /><input name="authenticity_token" type="hidden" value="JgpGes1Y6iXFjyvt8la+hHwYGri9NzgLW6Fm/4Irc/gcJTASeXTPJc4DMFLoiG1jiPAke7zt61iMUnXyibSo/g==" /></div>
- <button
- class="btn btn-sm btn-with-count js-toggler-target"
- aria-label="Unstar this repository" title="Unstar hexcap/hexcap"
- data-ga-click="Repository, click unstar button, action:blob#show; text:Unstar">
- <span class="octicon octicon-star"></span>
- Unstar
- </button>
- <a class="social-count js-social-count" href="/hexcap/hexcap/stargazers">
- 9
- </a>
-</form>
- <!-- </textarea> --><!-- '"` --><form accept-charset="UTF-8" action="/hexcap/hexcap/star" class="js-toggler-form unstarred js-star-button" data-form-nonce="ac3d26b394d1e74b2cb512f7e309125427b6279a" data-remote="true" method="post"><div style="margin:0;padding:0;display:inline"><input name="utf8" type="hidden" value="&#x2713;" /><input name="authenticity_token" type="hidden" value="dWxeklRDfQhNmhahVOQuj1ONldm0pP66i9Wgz7xH3GeyuP9g6mXjnP/tSyp4qMR263/J9ch0SGGk5ownYVPsWg==" /></div>
- <button
- class="btn btn-sm btn-with-count js-toggler-target"
- aria-label="Star this repository" title="Star hexcap/hexcap"
- data-ga-click="Repository, click star button, action:blob#show; text:Star">
- <span class="octicon octicon-star"></span>
- Star
- </button>
- <a class="social-count js-social-count" href="/hexcap/hexcap/stargazers">
- 9
- </a>
-</form> </div>
-
- </li>
-
- <li>
- <a href="#fork-destination-box" class="btn btn-sm btn-with-count"
- title="Fork your own copy of hexcap/hexcap to your account"
- aria-label="Fork your own copy of hexcap/hexcap to your account"
- rel="facebox"
- data-ga-click="Repository, show fork modal, action:blob#show; text:Fork">
- <span class="octicon octicon-repo-forked"></span>
- Fork
- </a>
- <a href="/hexcap/hexcap/network" class="social-count">0</a>
-
- <div id="fork-destination-box" style="display: none;">
- <h2 class="facebox-header" data-facebox-id="facebox-header">Where should we fork this repository?</h2>
- <include-fragment src=""
- class="js-fork-select-fragment fork-select-fragment"
- data-url="/hexcap/hexcap/fork?fragment=1">
- <img alt="Loading" height="64" src="https://assets-cdn.github.com/images/spinners/octocat-spinner-128.gif" width="64" />
- </include-fragment>
- </div>
- </li>
-
-</ul>
-
- <h1 itemscope itemtype="http://data-vocabulary.org/Breadcrumb" class="entry-title public ">
- <span class="mega-octicon octicon-repo"></span>
- <span class="author"><a href="/hexcap" class="url fn" itemprop="url" rel="author"><span itemprop="title">hexcap</span></a></span><!--
---><span class="path-divider">/</span><!--
---><strong><a href="/hexcap/hexcap" data-pjax="#js-repo-pjax-container">hexcap</a></strong>
-
- <span class="page-context-loader">
- <img alt="" height="16" src="https://assets-cdn.github.com/images/spinners/octocat-spinner-32.gif" width="16" />
- </span>
-
-</h1>
-
- </div>
- </div>
- </div>
-
- <div class="container">
- <div class="repository-with-sidebar repo-container new-discussion-timeline ">
- <div class="repository-sidebar clearfix">
-
-<nav class="sunken-menu repo-nav js-repo-nav js-sidenav-container-pjax js-octicon-loaders"
- role="navigation"
- data-pjax="#js-repo-pjax-container"
- data-issue-count-url="/hexcap/hexcap/issues/counts">
- <ul class="sunken-menu-group">
- <li class="tooltipped tooltipped-w" aria-label="Code">
- <a href="/hexcap/hexcap" aria-label="Code" aria-selected="true" class="js-selected-navigation-item selected sunken-menu-item" data-hotkey="g c" data-selected-links="repo_source repo_downloads repo_commits repo_releases repo_tags repo_branches /hexcap/hexcap">
- <span class="octicon octicon-code"></span> <span class="full-word">Code</span>
- <img alt="" class="mini-loader" height="16" src="https://assets-cdn.github.com/images/spinners/octocat-spinner-32.gif" width="16" />
-</a> </li>
-
- <li class="tooltipped tooltipped-w" aria-label="Issues">
- <a href="/hexcap/hexcap/issues" aria-label="Issues" class="js-selected-navigation-item sunken-menu-item" data-hotkey="g i" data-selected-links="repo_issues repo_labels repo_milestones /hexcap/hexcap/issues">
- <span class="octicon octicon-issue-opened"></span> <span class="full-word">Issues</span>
- <span class="js-issue-replace-counter"></span>
- <img alt="" class="mini-loader" height="16" src="https://assets-cdn.github.com/images/spinners/octocat-spinner-32.gif" width="16" />
-</a> </li>
-
- <li class="tooltipped tooltipped-w" aria-label="Pull requests">
- <a href="/hexcap/hexcap/pulls" aria-label="Pull requests" class="js-selected-navigation-item sunken-menu-item" data-hotkey="g p" data-selected-links="repo_pulls /hexcap/hexcap/pulls">
- <span class="octicon octicon-git-pull-request"></span> <span class="full-word">Pull requests</span>
- <span class="js-pull-replace-counter"></span>
- <img alt="" class="mini-loader" height="16" src="https://assets-cdn.github.com/images/spinners/octocat-spinner-32.gif" width="16" />
-</a> </li>
-
- <li class="tooltipped tooltipped-w" aria-label="Wiki">
- <a href="/hexcap/hexcap/wiki" aria-label="Wiki" class="js-selected-navigation-item sunken-menu-item" data-hotkey="g w" data-selected-links="repo_wiki /hexcap/hexcap/wiki">
- <span class="octicon octicon-book"></span> <span class="full-word">Wiki</span>
- <img alt="" class="mini-loader" height="16" src="https://assets-cdn.github.com/images/spinners/octocat-spinner-32.gif" width="16" />
-</a> </li>
- </ul>
- <div class="sunken-menu-separator"></div>
- <ul class="sunken-menu-group">
-
- <li class="tooltipped tooltipped-w" aria-label="Pulse">
- <a href="/hexcap/hexcap/pulse" aria-label="Pulse" class="js-selected-navigation-item sunken-menu-item" data-selected-links="pulse /hexcap/hexcap/pulse">
- <span class="octicon octicon-pulse"></span> <span class="full-word">Pulse</span>
- <img alt="" class="mini-loader" height="16" src="https://assets-cdn.github.com/images/spinners/octocat-spinner-32.gif" width="16" />
-</a> </li>
-
- <li class="tooltipped tooltipped-w" aria-label="Graphs">
- <a href="/hexcap/hexcap/graphs" aria-label="Graphs" class="js-selected-navigation-item sunken-menu-item" data-selected-links="repo_graphs repo_contributors /hexcap/hexcap/graphs">
- <span class="octicon octicon-graph"></span> <span class="full-word">Graphs</span>
- <img alt="" class="mini-loader" height="16" src="https://assets-cdn.github.com/images/spinners/octocat-spinner-32.gif" width="16" />
-</a> </li>
- </ul>
-
-
-</nav>
-
- <div class="only-with-full-nav">
-
-<div class="js-clone-url clone-url open"
- data-protocol-type="http">
- <h3><span class="text-emphasized">HTTPS</span> clone URL</h3>
- <div class="input-group js-zeroclipboard-container">
- <input type="text" class="input-mini input-monospace js-url-field js-zeroclipboard-target"
- value="https://github.com/hexcap/hexcap.git" readonly="readonly" aria-label="HTTPS clone URL">
- <span class="input-group-button">
- <button aria-label="Copy to clipboard" class="js-zeroclipboard btn btn-sm zeroclipboard-button tooltipped tooltipped-s" data-copied-hint="Copied!" type="button"><span class="octicon octicon-clippy"></span></button>
- </span>
- </div>
-</div>
-
-
-<div class="js-clone-url clone-url "
- data-protocol-type="ssh">
- <h3><span class="text-emphasized">SSH</span> clone URL</h3>
- <div class="input-group js-zeroclipboard-container">
- <input type="text" class="input-mini input-monospace js-url-field js-zeroclipboard-target"
- value="git@github.com:hexcap/hexcap.git" readonly="readonly" aria-label="SSH clone URL">
- <span class="input-group-button">
- <button aria-label="Copy to clipboard" class="js-zeroclipboard btn btn-sm zeroclipboard-button tooltipped tooltipped-s" data-copied-hint="Copied!" type="button"><span class="octicon octicon-clippy"></span></button>
- </span>
- </div>
-</div>
-
-
-<div class="js-clone-url clone-url "
- data-protocol-type="subversion">
- <h3><span class="text-emphasized">Subversion</span> checkout URL</h3>
- <div class="input-group js-zeroclipboard-container">
- <input type="text" class="input-mini input-monospace js-url-field js-zeroclipboard-target"
- value="https://github.com/hexcap/hexcap" readonly="readonly" aria-label="Subversion checkout URL">
- <span class="input-group-button">
- <button aria-label="Copy to clipboard" class="js-zeroclipboard btn btn-sm zeroclipboard-button tooltipped tooltipped-s" data-copied-hint="Copied!" type="button"><span class="octicon octicon-clippy"></span></button>
- </span>
- </div>
-</div>
-
-
-
- <div class="clone-options">You can clone with
- <!-- </textarea> --><!-- '"` --><form accept-charset="UTF-8" action="/users/set_protocol?protocol_selector=http&amp;protocol_type=clone" class="inline-form js-clone-selector-form is-enabled" data-form-nonce="ac3d26b394d1e74b2cb512f7e309125427b6279a" data-remote="true" method="post"><div style="margin:0;padding:0;display:inline"><input name="utf8" type="hidden" value="&#x2713;" /><input name="authenticity_token" type="hidden" value="T3BePf/8xkb6d6hQ1WxSRPwxd0cVHJD04Qcvw7XDgDf2KRpWMc1KDQXjof8YNA9SMwlC0J+/rLdH0uf+d3FtGw==" /></div><button class="btn-link js-clone-selector" data-protocol="http" type="submit">HTTPS</button></form>, <!-- </textarea> --><!-- '"` --><form accept-charset="UTF-8" action="/users/set_protocol?protocol_selector=ssh&amp;protocol_type=clone" class="inline-form js-clone-selector-form is-enabled" data-form-nonce="ac3d26b394d1e74b2cb512f7e309125427b6279a" data-remote="true" method="post"><div style="margin:0;padding:0;display:inline"><input name="utf8" type="hidden" value="&#x2713;" /><input name="authenticity_token" type="hidden" value="3ZZHZ1uyO2YyXJllsAEy6Uyl5IzKd9pnlMqcxOr6siiIUSr9yHihssjv9CD7IZ6VhlEbKvHuVODR80DN4nF2MQ==" /></div><button class="btn-link js-clone-selector" data-protocol="ssh" type="submit">SSH</button></form>, or <!-- </textarea> --><!-- '"` --><form accept-charset="UTF-8" action="/users/set_protocol?protocol_selector=subversion&amp;protocol_type=clone" class="inline-form js-clone-selector-form is-enabled" data-form-nonce="ac3d26b394d1e74b2cb512f7e309125427b6279a" data-remote="true" method="post"><div style="margin:0;padding:0;display:inline"><input name="utf8" type="hidden" value="&#x2713;" /><input name="authenticity_token" type="hidden" value="kwiF8pqkQsMfxWxA65L8G9PRBro/xPHPAN3GTPg9dbRP/w7eUuTJu4tyT6Kxk2iP8bRpg53eQ+KYY8UiaGq19A==" /></div><button class="btn-link js-clone-selector" data-protocol="subversion" type="submit">Subversion</button></form>.
- <a href="https://help.github.com/articles/which-remote-url-should-i-use" class="help tooltipped tooltipped-n" aria-label="Get help on which URL is right for you.">
- <span class="octicon octicon-question"></span>
- </a>
- </div>
- <a href="https://windows.github.com" class="btn btn-sm sidebar-button" title="Save hexcap/hexcap to your computer and use it in GitHub Desktop." aria-label="Save hexcap/hexcap to your computer and use it in GitHub Desktop.">
- <span class="octicon octicon-desktop-download"></span>
- Clone in Desktop
- </a>
-
- <a href="/hexcap/hexcap/archive/master.zip"
- class="btn btn-sm sidebar-button"
- aria-label="Download the contents of hexcap/hexcap as a zip file"
- title="Download the contents of hexcap/hexcap as a zip file"
- rel="nofollow">
- <span class="octicon octicon-cloud-download"></span>
- Download ZIP
- </a>
- </div>
- </div>
- <div id="js-repo-pjax-container" class="repository-content context-loader-container" data-pjax-container>
-
-
-
-<a href="/hexcap/hexcap/blob/ee8686e7c7a4cc4a1d836895fda01012fb930251/dpkt/dpkt/dot1q.py" class="hidden js-permalink-shortcut" data-hotkey="y">Permalink</a>
-
-<!-- blob contrib key: blob_contributors:v21:46bde8b0229c416e6f538e044b428839 -->
-
- <div class="file-navigation js-zeroclipboard-container">
-
-<div class="select-menu js-menu-container js-select-menu left">
- <span class="btn btn-sm select-menu-button js-menu-target css-truncate" data-hotkey="w"
- data-ref="master"
- title="master"
- role="button" aria-label="Switch branches or tags" tabindex="0" aria-haspopup="true">
- <i>Branch:</i>
- <span class="js-select-button css-truncate-target">master</span>
- </span>
-
- <div class="select-menu-modal-holder js-menu-content js-navigation-container" data-pjax aria-hidden="true">
-
- <div class="select-menu-modal">
- <div class="select-menu-header">
- <span class="select-menu-title">Switch branches/tags</span>
- <span class="octicon octicon-x js-menu-close" role="button" aria-label="Close"></span>
- </div>
-
- <div class="select-menu-filters">
- <div class="select-menu-text-filter">
- <input type="text" aria-label="Filter branches/tags" id="context-commitish-filter-field" class="js-filterable-field js-navigation-enable" placeholder="Filter branches/tags">
- </div>
- <div class="select-menu-tabs">
- <ul>
- <li class="select-menu-tab">
- <a href="#" data-tab-filter="branches" data-filter-placeholder="Filter branches/tags" class="js-select-menu-tab" role="tab">Branches</a>
- </li>
- <li class="select-menu-tab">
- <a href="#" data-tab-filter="tags" data-filter-placeholder="Find a tag…" class="js-select-menu-tab" role="tab">Tags</a>
- </li>
- </ul>
- </div>
- </div>
-
- <div class="select-menu-list select-menu-tab-bucket js-select-menu-tab-bucket" data-tab-filter="branches" role="menu">
-
- <div data-filterable-for="context-commitish-filter-field" data-filterable-type="substring">
-
-
- <a class="select-menu-item js-navigation-item js-navigation-open selected"
- href="/hexcap/hexcap/blob/master/dpkt/dpkt/dot1q.py"
- data-name="master"
- data-skip-pjax="true"
- rel="nofollow">
- <span class="select-menu-item-icon octicon octicon-check"></span>
- <span class="select-menu-item-text css-truncate-target" title="master">
- master
- </span>
- </a>
- </div>
-
- <div class="select-menu-no-results">Nothing to show</div>
- </div>
-
- <div class="select-menu-list select-menu-tab-bucket js-select-menu-tab-bucket" data-tab-filter="tags">
- <div data-filterable-for="context-commitish-filter-field" data-filterable-type="substring">
-
-
- </div>
-
- <div class="select-menu-no-results">Nothing to show</div>
- </div>
-
- </div>
- </div>
-</div>
-
- <div class="btn-group right">
- <a href="/hexcap/hexcap/find/master"
- class="js-show-file-finder btn btn-sm empty-icon tooltipped tooltipped-nw"
- data-pjax
- data-hotkey="t"
- aria-label="Quickly jump between files">
- <span class="octicon octicon-list-unordered"></span>
- </a>
- <button aria-label="Copy file path to clipboard" class="js-zeroclipboard btn btn-sm zeroclipboard-button tooltipped tooltipped-s" data-copied-hint="Copied!" type="button"><span class="octicon octicon-clippy"></span></button>
- </div>
-
- <div class="breadcrumb js-zeroclipboard-target">
- <span class="repo-root js-repo-root"><span itemscope="" itemtype="http://data-vocabulary.org/Breadcrumb"><a href="/hexcap/hexcap" class="" data-branch="master" data-pjax="true" itemscope="url"><span itemprop="title">hexcap</span></a></span></span><span class="separator">/</span><span itemscope="" itemtype="http://data-vocabulary.org/Breadcrumb"><a href="/hexcap/hexcap/tree/master/dpkt" class="" data-branch="master" data-pjax="true" itemscope="url"><span itemprop="title">dpkt</span></a></span><span class="separator">/</span><span itemscope="" itemtype="http://data-vocabulary.org/Breadcrumb"><a href="/hexcap/hexcap/tree/master/dpkt/dpkt" class="" data-branch="master" data-pjax="true" itemscope="url"><span itemprop="title">dpkt</span></a></span><span class="separator">/</span><strong class="final-path">dot1q.py</strong>
- </div>
- </div>
-
-
- <div class="commit file-history-tease">
- <div class="file-history-tease-header">
- <img alt="@smutt" class="avatar" height="24" src="https://avatars3.githubusercontent.com/u/1223204?v=3&amp;s=48" width="24" />
- <span class="author"><a href="/smutt" rel="contributor">smutt</a></span>
- <time datetime="2014-06-11T23:12:09Z" is="relative-time">Jun 11, 2014</time>
- <div class="commit-title">
- <a href="/hexcap/hexcap/commit/1e35c451e857e98e9a572700f88b89e3847e53aa" class="message" data-pjax="true" title="Renamed dpkt-read-only/ to dpkt/">Renamed dpkt-read-only/ to dpkt/</a>
- </div>
- </div>
-
- <div class="participation">
- <p class="quickstat">
- <a href="#blob_contributors_box" rel="facebox">
- <strong>1</strong>
- contributor
- </a>
- </p>
-
- </div>
- <div id="blob_contributors_box" style="display:none">
- <h2 class="facebox-header" data-facebox-id="facebox-header">Users who have contributed to this file</h2>
- <ul class="facebox-user-list" data-facebox-id="facebox-description">
- <li class="facebox-user-list-item">
- <img alt="@smutt" height="24" src="https://avatars3.githubusercontent.com/u/1223204?v=3&amp;s=48" width="24" />
- <a href="/smutt">smutt</a>
- </li>
- </ul>
- </div>
- </div>
-
-<div class="file">
- <div class="file-header">
- <div class="file-actions">
-
- <div class="btn-group">
- <a href="/hexcap/hexcap/raw/master/dpkt/dpkt/dot1q.py" class="btn btn-sm " id="raw-url">Raw</a>
- <a href="/hexcap/hexcap/blame/master/dpkt/dpkt/dot1q.py" class="btn btn-sm js-update-url-with-hash">Blame</a>
- <a href="/hexcap/hexcap/commits/master/dpkt/dpkt/dot1q.py" class="btn btn-sm " rel="nofollow">History</a>
- </div>
-
- <a class="octicon-btn tooltipped tooltipped-nw"
- href="https://windows.github.com"
- aria-label="Open this file in GitHub Desktop"
- data-ga-click="Repository, open with desktop, type:windows">
- <span class="octicon octicon-device-desktop"></span>
- </a>
-
- <!-- </textarea> --><!-- '"` --><form accept-charset="UTF-8" action="/hexcap/hexcap/edit/master/dpkt/dpkt/dot1q.py" class="inline-form" data-form-nonce="ac3d26b394d1e74b2cb512f7e309125427b6279a" method="post"><div style="margin:0;padding:0;display:inline"><input name="utf8" type="hidden" value="&#x2713;" /><input name="authenticity_token" type="hidden" value="SU0n03ayv7838ksqISz81DL+rsSEUTJ8VWf2X1NBBMo9p5gPaM9YXCNa6Sezt7xjvJJLjZP1eBMl7fduX9rrpw==" /></div>
- <button class="octicon-btn tooltipped tooltipped-n" type="submit" aria-label="Fork this project and edit the file" data-hotkey="e" data-disable-with>
- <span class="octicon octicon-pencil"></span>
- </button>
-</form>
- <!-- </textarea> --><!-- '"` --><form accept-charset="UTF-8" action="/hexcap/hexcap/delete/master/dpkt/dpkt/dot1q.py" class="inline-form" data-form-nonce="ac3d26b394d1e74b2cb512f7e309125427b6279a" method="post"><div style="margin:0;padding:0;display:inline"><input name="utf8" type="hidden" value="&#x2713;" /><input name="authenticity_token" type="hidden" value="e3j1i6vBsJAzzNVXfe8zggdfxap3rh2bJPDs6A9KfsXTcxVc8/fqgnyjGgf61qd9qj5NUp6K+wZ7/IU756EzQA==" /></div>
- <button class="octicon-btn octicon-btn-danger tooltipped tooltipped-n" type="submit" aria-label="Fork this project and delete this file" data-disable-with>
- <span class="octicon octicon-trashcan"></span>
- </button>
-</form> </div>
-
- <div class="file-info">
- <span class="file-mode" title="File mode">executable file</span>
- <span class="file-info-divider"></span>
- 93 lines (79 sloc)
- <span class="file-info-divider"></span>
- 3.071 kB
- </div>
- </div>
-
-
- <div class="blob-wrapper data type-python">
- <table class="highlight tab-size js-file-line-container" data-tab-size="8">
- <tr>
- <td id="L1" class="blob-num js-line-number" data-line-number="1"></td>
- <td id="LC1" class="blob-code blob-code-inner js-file-line"><span class="pl-s"><span class="pl-pds">&quot;&quot;&quot;</span>IEEE 802.1q<span class="pl-pds">&quot;&quot;&quot;</span></span></td>
- </tr>
- <tr>
- <td id="L2" class="blob-num js-line-number" data-line-number="2"></td>
- <td id="LC2" class="blob-code blob-code-inner js-file-line">
-</td>
- </tr>
- <tr>
- <td id="L3" class="blob-num js-line-number" data-line-number="3"></td>
- <td id="LC3" class="blob-code blob-code-inner js-file-line"><span class="pl-k">import</span> struct</td>
- </tr>
- <tr>
- <td id="L4" class="blob-num js-line-number" data-line-number="4"></td>
- <td id="LC4" class="blob-code blob-code-inner js-file-line"><span class="pl-k">import</span> dpkt</td>
- </tr>
- <tr>
- <td id="L5" class="blob-num js-line-number" data-line-number="5"></td>
- <td id="LC5" class="blob-code blob-code-inner js-file-line">
-</td>
- </tr>
- <tr>
- <td id="L6" class="blob-num js-line-number" data-line-number="6"></td>
- <td id="LC6" class="blob-code blob-code-inner js-file-line"><span class="pl-c"># Ethernet payload types - http://standards.ieee.org/regauth/ethertype</span></td>
- </tr>
- <tr>
- <td id="L7" class="blob-num js-line-number" data-line-number="7"></td>
- <td id="LC7" class="blob-code blob-code-inner js-file-line">ETH_TYPE_PUP <span class="pl-k">=</span> <span class="pl-c1">0x</span>0200 <span class="pl-c"># PUP protocol</span></td>
- </tr>
- <tr>
- <td id="L8" class="blob-num js-line-number" data-line-number="8"></td>
- <td id="LC8" class="blob-code blob-code-inner js-file-line">ETH_TYPE_IP <span class="pl-k">=</span> <span class="pl-c1">0x</span>0800 <span class="pl-c"># IP protocol</span></td>
- </tr>
- <tr>
- <td id="L9" class="blob-num js-line-number" data-line-number="9"></td>
- <td id="LC9" class="blob-code blob-code-inner js-file-line">ETH_TYPE_ARP <span class="pl-k">=</span> <span class="pl-c1">0x</span>0806 <span class="pl-c"># address resolution protocol</span></td>
- </tr>
- <tr>
- <td id="L10" class="blob-num js-line-number" data-line-number="10"></td>
- <td id="LC10" class="blob-code blob-code-inner js-file-line">ETH_TYPE_CDP <span class="pl-k">=</span> <span class="pl-c1">0x</span>2000 <span class="pl-c"># Cisco Discovery Protocol</span></td>
- </tr>
- <tr>
- <td id="L11" class="blob-num js-line-number" data-line-number="11"></td>
- <td id="LC11" class="blob-code blob-code-inner js-file-line">ETH_TYPE_DTP <span class="pl-k">=</span> <span class="pl-c1">0x</span>2004 <span class="pl-c"># Cisco Dynamic Trunking Protocol</span></td>
- </tr>
- <tr>
- <td id="L12" class="blob-num js-line-number" data-line-number="12"></td>
- <td id="LC12" class="blob-code blob-code-inner js-file-line">ETH_TYPE_REVARP <span class="pl-k">=</span> <span class="pl-c1">0x</span>8035 <span class="pl-c"># reverse addr resolution protocol</span></td>
- </tr>
- <tr>
- <td id="L13" class="blob-num js-line-number" data-line-number="13"></td>
- <td id="LC13" class="blob-code blob-code-inner js-file-line">ETH_TYPE_DOT1Q <span class="pl-k">=</span> <span class="pl-c1">0x</span>8100 <span class="pl-c"># IEEE 802.1Q VLAN tagging</span></td>
- </tr>
- <tr>
- <td id="L14" class="blob-num js-line-number" data-line-number="14"></td>
- <td id="LC14" class="blob-code blob-code-inner js-file-line">ETH_TYPE_IPX <span class="pl-k">=</span> <span class="pl-c1">0x</span>8137 <span class="pl-c"># Internetwork Packet Exchange</span></td>
- </tr>
- <tr>
- <td id="L15" class="blob-num js-line-number" data-line-number="15"></td>
- <td id="LC15" class="blob-code blob-code-inner js-file-line">ETH_TYPE_IP6 <span class="pl-k">=</span> <span class="pl-c1">0x</span>86DD <span class="pl-c"># IPv6 protocol</span></td>
- </tr>
- <tr>
- <td id="L16" class="blob-num js-line-number" data-line-number="16"></td>
- <td id="LC16" class="blob-code blob-code-inner js-file-line">ETH_TYPE_PPP <span class="pl-k">=</span> <span class="pl-c1">0x</span>880B <span class="pl-c"># PPP</span></td>
- </tr>
- <tr>
- <td id="L17" class="blob-num js-line-number" data-line-number="17"></td>
- <td id="LC17" class="blob-code blob-code-inner js-file-line">ETH_TYPE_MPLS <span class="pl-k">=</span> <span class="pl-c1">0x</span>8847 <span class="pl-c"># MPLS</span></td>
- </tr>
- <tr>
- <td id="L18" class="blob-num js-line-number" data-line-number="18"></td>
- <td id="LC18" class="blob-code blob-code-inner js-file-line">ETH_TYPE_MPLS_MCAST <span class="pl-k">=</span> <span class="pl-c1">0x</span>8848 <span class="pl-c"># MPLS Multicast</span></td>
- </tr>
- <tr>
- <td id="L19" class="blob-num js-line-number" data-line-number="19"></td>
- <td id="LC19" class="blob-code blob-code-inner js-file-line">ETH_TYPE_PPPoE_DISC <span class="pl-k">=</span> <span class="pl-c1">0x</span>8863 <span class="pl-c"># PPP Over Ethernet Discovery Stage</span></td>
- </tr>
- <tr>
- <td id="L20" class="blob-num js-line-number" data-line-number="20"></td>
- <td id="LC20" class="blob-code blob-code-inner js-file-line">ETH_TYPE_PPPoE <span class="pl-k">=</span> <span class="pl-c1">0x</span>8864 <span class="pl-c"># PPP Over Ethernet Session Stage</span></td>
- </tr>
- <tr>
- <td id="L21" class="blob-num js-line-number" data-line-number="21"></td>
- <td id="LC21" class="blob-code blob-code-inner js-file-line">
-</td>
- </tr>
- <tr>
- <td id="L22" class="blob-num js-line-number" data-line-number="22"></td>
- <td id="LC22" class="blob-code blob-code-inner js-file-line"><span class="pl-k">class</span> <span class="pl-en">DOT1Q</span>(<span class="pl-e">dpkt.Packet</span>):</td>
- </tr>
- <tr>
- <td id="L23" class="blob-num js-line-number" data-line-number="23"></td>
- <td id="LC23" class="blob-code blob-code-inner js-file-line"> __hdr__ <span class="pl-k">=</span> (</td>
- </tr>
- <tr>
- <td id="L24" class="blob-num js-line-number" data-line-number="24"></td>
- <td id="LC24" class="blob-code blob-code-inner js-file-line"> (<span class="pl-s"><span class="pl-pds">&#39;</span>x2<span class="pl-pds">&#39;</span></span>, <span class="pl-s"><span class="pl-pds">&#39;</span>H<span class="pl-pds">&#39;</span></span>, <span class="pl-c1">0</span>),</td>
- </tr>
- <tr>
- <td id="L25" class="blob-num js-line-number" data-line-number="25"></td>
- <td id="LC25" class="blob-code blob-code-inner js-file-line"> (<span class="pl-s"><span class="pl-pds">&#39;</span>type<span class="pl-pds">&#39;</span></span>, <span class="pl-s"><span class="pl-pds">&#39;</span>H<span class="pl-pds">&#39;</span></span>, <span class="pl-c1">0</span>)</td>
- </tr>
- <tr>
- <td id="L26" class="blob-num js-line-number" data-line-number="26"></td>
- <td id="LC26" class="blob-code blob-code-inner js-file-line"> )</td>
- </tr>
- <tr>
- <td id="L27" class="blob-num js-line-number" data-line-number="27"></td>
- <td id="LC27" class="blob-code blob-code-inner js-file-line"> _typesw <span class="pl-k">=</span> {}</td>
- </tr>
- <tr>
- <td id="L28" class="blob-num js-line-number" data-line-number="28"></td>
- <td id="LC28" class="blob-code blob-code-inner js-file-line">
-</td>
- </tr>
- <tr>
- <td id="L29" class="blob-num js-line-number" data-line-number="29"></td>
- <td id="LC29" class="blob-code blob-code-inner js-file-line"> <span class="pl-c"># pcp == Priority Code Point(802.1p)</span></td>
- </tr>
- <tr>
- <td id="L30" class="blob-num js-line-number" data-line-number="30"></td>
- <td id="LC30" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">def</span> <span class="pl-en">_get_pcp</span>(<span class="pl-smi">self</span>): <span class="pl-k">return</span> <span class="pl-v">self</span>.x2 <span class="pl-k">&gt;&gt;</span> <span class="pl-c1">13</span></td>
- </tr>
- <tr>
- <td id="L31" class="blob-num js-line-number" data-line-number="31"></td>
- <td id="LC31" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">def</span> <span class="pl-en">_set_pcp</span>(<span class="pl-smi">self</span>, <span class="pl-smi">pcp</span>): <span class="pl-v">self</span>.x2 <span class="pl-k">=</span> (<span class="pl-v">self</span>.x2 <span class="pl-k">&amp;</span> <span class="pl-c1">0x</span>1fff) <span class="pl-k">|</span> (pcp <span class="pl-k">&lt;&lt;</span> <span class="pl-c1">13</span>)</td>
- </tr>
- <tr>
- <td id="L32" class="blob-num js-line-number" data-line-number="32"></td>
- <td id="LC32" class="blob-code blob-code-inner js-file-line"> pcp <span class="pl-k">=</span> <span class="pl-c1">property</span>(_get_pcp, _set_pcp)</td>
- </tr>
- <tr>
- <td id="L33" class="blob-num js-line-number" data-line-number="33"></td>
- <td id="LC33" class="blob-code blob-code-inner js-file-line">
-</td>
- </tr>
- <tr>
- <td id="L34" class="blob-num js-line-number" data-line-number="34"></td>
- <td id="LC34" class="blob-code blob-code-inner js-file-line"> <span class="pl-c"># dei == Drop Eligible Indicator(almost never actually used)</span></td>
- </tr>
- <tr>
- <td id="L35" class="blob-num js-line-number" data-line-number="35"></td>
- <td id="LC35" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">def</span> <span class="pl-en">_get_dei</span>(<span class="pl-smi">self</span>): <span class="pl-k">return</span> (<span class="pl-v">self</span>.x2 <span class="pl-k">&gt;&gt;</span> <span class="pl-c1">12</span>) <span class="pl-k">&amp;</span> <span class="pl-c1">1</span> </td>
- </tr>
- <tr>
- <td id="L36" class="blob-num js-line-number" data-line-number="36"></td>
- <td id="LC36" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">def</span> <span class="pl-en">_set_dei</span>(<span class="pl-smi">self</span>, <span class="pl-smi">dei</span>): <span class="pl-v">self</span>.x2 <span class="pl-k">=</span> (<span class="pl-v">self</span>.x2 <span class="pl-k">&amp;</span> <span class="pl-c1">61439</span>) <span class="pl-k">|</span> (dei <span class="pl-k">&lt;&lt;</span> <span class="pl-c1">12</span>)</td>
- </tr>
- <tr>
- <td id="L37" class="blob-num js-line-number" data-line-number="37"></td>
- <td id="LC37" class="blob-code blob-code-inner js-file-line"> dei <span class="pl-k">=</span> <span class="pl-c1">property</span>(_get_dei, _set_dei)</td>
- </tr>
- <tr>
- <td id="L38" class="blob-num js-line-number" data-line-number="38"></td>
- <td id="LC38" class="blob-code blob-code-inner js-file-line">
-</td>
- </tr>
- <tr>
- <td id="L39" class="blob-num js-line-number" data-line-number="39"></td>
- <td id="LC39" class="blob-code blob-code-inner js-file-line"> <span class="pl-c"># tag == vlan tag</span></td>
- </tr>
- <tr>
- <td id="L40" class="blob-num js-line-number" data-line-number="40"></td>
- <td id="LC40" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">def</span> <span class="pl-en">_get_tag</span>(<span class="pl-smi">self</span>): <span class="pl-k">return</span> <span class="pl-v">self</span>.x2 <span class="pl-k">&amp;</span> (<span class="pl-c1">65535</span> <span class="pl-k">&gt;&gt;</span> <span class="pl-c1">4</span>)</td>
- </tr>
- <tr>
- <td id="L41" class="blob-num js-line-number" data-line-number="41"></td>
- <td id="LC41" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">def</span> <span class="pl-en">_set_tag</span>(<span class="pl-smi">self</span>, <span class="pl-smi">tag</span>): <span class="pl-v">self</span>.x2 <span class="pl-k">=</span> (<span class="pl-v">self</span>.x2 <span class="pl-k">&amp;</span> <span class="pl-c1">0x</span>fff) <span class="pl-k">|</span> tag</td>
- </tr>
- <tr>
- <td id="L42" class="blob-num js-line-number" data-line-number="42"></td>
- <td id="LC42" class="blob-code blob-code-inner js-file-line"> tag <span class="pl-k">=</span> <span class="pl-c1">property</span>(_get_tag, _set_tag)</td>
- </tr>
- <tr>
- <td id="L43" class="blob-num js-line-number" data-line-number="43"></td>
- <td id="LC43" class="blob-code blob-code-inner js-file-line">
-</td>
- </tr>
- <tr>
- <td id="L44" class="blob-num js-line-number" data-line-number="44"></td>
- <td id="LC44" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">def</span> <span class="pl-en">set_type</span>(<span class="pl-smi">cls</span>, <span class="pl-smi">t</span>, <span class="pl-smi">pktclass</span>):</td>
- </tr>
- <tr>
- <td id="L45" class="blob-num js-line-number" data-line-number="45"></td>
- <td id="LC45" class="blob-code blob-code-inner js-file-line"> <span class="pl-v">cls</span>._typesw[t] <span class="pl-k">=</span> pktclass</td>
- </tr>
- <tr>
- <td id="L46" class="blob-num js-line-number" data-line-number="46"></td>
- <td id="LC46" class="blob-code blob-code-inner js-file-line"> set_type <span class="pl-k">=</span> <span class="pl-c1">classmethod</span>(set_type)</td>
- </tr>
- <tr>
- <td id="L47" class="blob-num js-line-number" data-line-number="47"></td>
- <td id="LC47" class="blob-code blob-code-inner js-file-line">
-</td>
- </tr>
- <tr>
- <td id="L48" class="blob-num js-line-number" data-line-number="48"></td>
- <td id="LC48" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">def</span> <span class="pl-en">get_type</span>(<span class="pl-smi">cls</span>, <span class="pl-smi">t</span>):</td>
- </tr>
- <tr>
- <td id="L49" class="blob-num js-line-number" data-line-number="49"></td>
- <td id="LC49" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">return</span> <span class="pl-v">cls</span>._typesw[t]</td>
- </tr>
- <tr>
- <td id="L50" class="blob-num js-line-number" data-line-number="50"></td>
- <td id="LC50" class="blob-code blob-code-inner js-file-line"> get_type <span class="pl-k">=</span> <span class="pl-c1">classmethod</span>(get_type)</td>
- </tr>
- <tr>
- <td id="L51" class="blob-num js-line-number" data-line-number="51"></td>
- <td id="LC51" class="blob-code blob-code-inner js-file-line">
-</td>
- </tr>
- <tr>
- <td id="L52" class="blob-num js-line-number" data-line-number="52"></td>
- <td id="LC52" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">def</span> <span class="pl-en">_unpack_data</span>(<span class="pl-smi">self</span>, <span class="pl-smi">buf</span>):</td>
- </tr>
- <tr>
- <td id="L53" class="blob-num js-line-number" data-line-number="53"></td>
- <td id="LC53" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">if</span> <span class="pl-v">self</span>.type <span class="pl-k">==</span> ETH_TYPE_MPLS <span class="pl-k">or</span> \</td>
- </tr>
- <tr>
- <td id="L54" class="blob-num js-line-number" data-line-number="54"></td>
- <td id="LC54" class="blob-code blob-code-inner js-file-line"> <span class="pl-v">self</span>.type <span class="pl-k">==</span> ETH_TYPE_MPLS_MCAST:</td>
- </tr>
- <tr>
- <td id="L55" class="blob-num js-line-number" data-line-number="55"></td>
- <td id="LC55" class="blob-code blob-code-inner js-file-line"> <span class="pl-c"># XXX - skip labels (max # of labels is undefined, just use 24)</span></td>
- </tr>
- <tr>
- <td id="L56" class="blob-num js-line-number" data-line-number="56"></td>
- <td id="LC56" class="blob-code blob-code-inner js-file-line"> <span class="pl-v">self</span>.labels <span class="pl-k">=</span> []</td>
- </tr>
- <tr>
- <td id="L57" class="blob-num js-line-number" data-line-number="57"></td>
- <td id="LC57" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">for</span> i <span class="pl-k">in</span> <span class="pl-c1">range</span>(<span class="pl-c1">24</span>):</td>
- </tr>
- <tr>
- <td id="L58" class="blob-num js-line-number" data-line-number="58"></td>
- <td id="LC58" class="blob-code blob-code-inner js-file-line"> entry <span class="pl-k">=</span> struct.unpack(<span class="pl-s"><span class="pl-pds">&#39;</span>&gt;I<span class="pl-pds">&#39;</span></span>, buf[i<span class="pl-k">*</span><span class="pl-c1">4</span>:i<span class="pl-k">*</span><span class="pl-c1">4</span><span class="pl-k">+</span><span class="pl-c1">4</span>])[<span class="pl-c1">0</span>]</td>
- </tr>
- <tr>
- <td id="L59" class="blob-num js-line-number" data-line-number="59"></td>
- <td id="LC59" class="blob-code blob-code-inner js-file-line"> label <span class="pl-k">=</span> ((entry <span class="pl-k">&amp;</span> MPLS_LABEL_MASK) <span class="pl-k">&gt;&gt;</span> MPLS_LABEL_SHIFT, \</td>
- </tr>
- <tr>
- <td id="L60" class="blob-num js-line-number" data-line-number="60"></td>
- <td id="LC60" class="blob-code blob-code-inner js-file-line"> (entry <span class="pl-k">&amp;</span> MPLS_QOS_MASK) <span class="pl-k">&gt;&gt;</span> MPLS_QOS_SHIFT, \</td>
- </tr>
- <tr>
- <td id="L61" class="blob-num js-line-number" data-line-number="61"></td>
- <td id="LC61" class="blob-code blob-code-inner js-file-line"> (entry <span class="pl-k">&amp;</span> MPLS_TTL_MASK) <span class="pl-k">&gt;&gt;</span> MPLS_TTL_SHIFT)</td>
- </tr>
- <tr>
- <td id="L62" class="blob-num js-line-number" data-line-number="62"></td>
- <td id="LC62" class="blob-code blob-code-inner js-file-line"> <span class="pl-v">self</span>.labels.append(label)</td>
- </tr>
- <tr>
- <td id="L63" class="blob-num js-line-number" data-line-number="63"></td>
- <td id="LC63" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">if</span> entry <span class="pl-k">&amp;</span> MPLS_STACK_BOTTOM:</td>
- </tr>
- <tr>
- <td id="L64" class="blob-num js-line-number" data-line-number="64"></td>
- <td id="LC64" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">break</span></td>
- </tr>
- <tr>
- <td id="L65" class="blob-num js-line-number" data-line-number="65"></td>
- <td id="LC65" class="blob-code blob-code-inner js-file-line"> <span class="pl-v">self</span>.type <span class="pl-k">=</span> ETH_TYPE_IP</td>
- </tr>
- <tr>
- <td id="L66" class="blob-num js-line-number" data-line-number="66"></td>
- <td id="LC66" class="blob-code blob-code-inner js-file-line"> buf <span class="pl-k">=</span> buf[(i <span class="pl-k">+</span> <span class="pl-c1">1</span>) <span class="pl-k">*</span> <span class="pl-c1">4</span>:]</td>
- </tr>
- <tr>
- <td id="L67" class="blob-num js-line-number" data-line-number="67"></td>
- <td id="LC67" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">try</span>:</td>
- </tr>
- <tr>
- <td id="L68" class="blob-num js-line-number" data-line-number="68"></td>
- <td id="LC68" class="blob-code blob-code-inner js-file-line"> <span class="pl-v">self</span>.data <span class="pl-k">=</span> <span class="pl-v">self</span>._typesw[<span class="pl-v">self</span>.type](buf)</td>
- </tr>
- <tr>
- <td id="L69" class="blob-num js-line-number" data-line-number="69"></td>
- <td id="LC69" class="blob-code blob-code-inner js-file-line"> <span class="pl-c1">setattr</span>(<span class="pl-v">self</span>, <span class="pl-v">self</span>.data.<span class="pl-c1">__class__</span>.<span class="pl-c1">__name__</span>.lower(), <span class="pl-v">self</span>.data)</td>
- </tr>
- <tr>
- <td id="L70" class="blob-num js-line-number" data-line-number="70"></td>
- <td id="LC70" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">except</span> (<span class="pl-c1">KeyError</span>, dpkt.UnpackError):</td>
- </tr>
- <tr>
- <td id="L71" class="blob-num js-line-number" data-line-number="71"></td>
- <td id="LC71" class="blob-code blob-code-inner js-file-line"> <span class="pl-v">self</span>.data <span class="pl-k">=</span> buf</td>
- </tr>
- <tr>
- <td id="L72" class="blob-num js-line-number" data-line-number="72"></td>
- <td id="LC72" class="blob-code blob-code-inner js-file-line">
-</td>
- </tr>
- <tr>
- <td id="L73" class="blob-num js-line-number" data-line-number="73"></td>
- <td id="LC73" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">def</span> <span class="pl-en">unpack</span>(<span class="pl-smi">self</span>, <span class="pl-smi">buf</span>):</td>
- </tr>
- <tr>
- <td id="L74" class="blob-num js-line-number" data-line-number="74"></td>
- <td id="LC74" class="blob-code blob-code-inner js-file-line"> dpkt.Packet.unpack(<span class="pl-v">self</span>, buf)</td>
- </tr>
- <tr>
- <td id="L75" class="blob-num js-line-number" data-line-number="75"></td>
- <td id="LC75" class="blob-code blob-code-inner js-file-line"> <span class="pl-v">self</span>._unpack_data(<span class="pl-v">self</span>.data)</td>
- </tr>
- <tr>
- <td id="L76" class="blob-num js-line-number" data-line-number="76"></td>
- <td id="LC76" class="blob-code blob-code-inner js-file-line">
-</td>
- </tr>
- <tr>
- <td id="L77" class="blob-num js-line-number" data-line-number="77"></td>
- <td id="LC77" class="blob-code blob-code-inner js-file-line">
-</td>
- </tr>
- <tr>
- <td id="L78" class="blob-num js-line-number" data-line-number="78"></td>
- <td id="LC78" class="blob-code blob-code-inner js-file-line"><span class="pl-c"># XXX - auto-load Ethernet dispatch table from ETH_TYPE_* definitions</span></td>
- </tr>
- <tr>
- <td id="L79" class="blob-num js-line-number" data-line-number="79"></td>
- <td id="LC79" class="blob-code blob-code-inner js-file-line"><span class="pl-k">def</span> <span class="pl-en">__load_types</span>():</td>
- </tr>
- <tr>
- <td id="L80" class="blob-num js-line-number" data-line-number="80"></td>
- <td id="LC80" class="blob-code blob-code-inner js-file-line"> g <span class="pl-k">=</span> <span class="pl-c1">globals</span>()</td>
- </tr>
- <tr>
- <td id="L81" class="blob-num js-line-number" data-line-number="81"></td>
- <td id="LC81" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">for</span> k, v <span class="pl-k">in</span> g.iteritems():</td>
- </tr>
- <tr>
- <td id="L82" class="blob-num js-line-number" data-line-number="82"></td>
- <td id="LC82" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">if</span> k.startswith(<span class="pl-s"><span class="pl-pds">&#39;</span>ETH_TYPE_<span class="pl-pds">&#39;</span></span>):</td>
- </tr>
- <tr>
- <td id="L83" class="blob-num js-line-number" data-line-number="83"></td>
- <td id="LC83" class="blob-code blob-code-inner js-file-line"> name <span class="pl-k">=</span> k[<span class="pl-c1">9</span>:]</td>
- </tr>
- <tr>
- <td id="L84" class="blob-num js-line-number" data-line-number="84"></td>
- <td id="LC84" class="blob-code blob-code-inner js-file-line"> modname <span class="pl-k">=</span> name.lower()</td>
- </tr>
- <tr>
- <td id="L85" class="blob-num js-line-number" data-line-number="85"></td>
- <td id="LC85" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">try</span>:</td>
- </tr>
- <tr>
- <td id="L86" class="blob-num js-line-number" data-line-number="86"></td>
- <td id="LC86" class="blob-code blob-code-inner js-file-line"> mod <span class="pl-k">=</span> <span class="pl-c1">__import__</span>(modname, g)</td>
- </tr>
- <tr>
- <td id="L87" class="blob-num js-line-number" data-line-number="87"></td>
- <td id="LC87" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">except</span> <span class="pl-c1">ImportError</span>:</td>
- </tr>
- <tr>
- <td id="L88" class="blob-num js-line-number" data-line-number="88"></td>
- <td id="LC88" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">continue</span></td>
- </tr>
- <tr>
- <td id="L89" class="blob-num js-line-number" data-line-number="89"></td>
- <td id="LC89" class="blob-code blob-code-inner js-file-line"> DOT1Q.set_type(v, <span class="pl-c1">getattr</span>(mod, name))</td>
- </tr>
- <tr>
- <td id="L90" class="blob-num js-line-number" data-line-number="90"></td>
- <td id="LC90" class="blob-code blob-code-inner js-file-line">
-</td>
- </tr>
- <tr>
- <td id="L91" class="blob-num js-line-number" data-line-number="91"></td>
- <td id="LC91" class="blob-code blob-code-inner js-file-line"><span class="pl-k">if</span> <span class="pl-k">not</span> DOT1Q._typesw:</td>
- </tr>
- <tr>
- <td id="L92" class="blob-num js-line-number" data-line-number="92"></td>
- <td id="LC92" class="blob-code blob-code-inner js-file-line"> __load_types()</td>
- </tr>
-</table>
-
- </div>
-
-</div>
-
-<a href="#jump-to-line" rel="facebox[.linejump]" data-hotkey="l" style="display:none">Jump to Line</a>
-<div id="jump-to-line" style="display:none">
- <!-- </textarea> --><!-- '"` --><form accept-charset="UTF-8" action="" class="js-jump-to-line-form" method="get"><div style="margin:0;padding:0;display:inline"><input name="utf8" type="hidden" value="&#x2713;" /></div>
- <input class="linejump-input js-jump-to-line-field" type="text" placeholder="Jump to line&hellip;" aria-label="Jump to line" autofocus>
- <button type="submit" class="btn">Go</button>
-</form></div>
-
- </div>
- </div>
- <div class="modal-backdrop"></div>
- </div>
- </div>
-
-
-
- <div class="container">
- <div class="site-footer" role="contentinfo">
- <ul class="site-footer-links right">
- <li><a href="https://status.github.com/" data-ga-click="Footer, go to status, text:status">Status</a></li>
- <li><a href="https://developer.github.com" data-ga-click="Footer, go to api, text:api">API</a></li>
- <li><a href="https://training.github.com" data-ga-click="Footer, go to training, text:training">Training</a></li>
- <li><a href="https://shop.github.com" data-ga-click="Footer, go to shop, text:shop">Shop</a></li>
- <li><a href="https://github.com/blog" data-ga-click="Footer, go to blog, text:blog">Blog</a></li>
- <li><a href="https://github.com/about" data-ga-click="Footer, go to about, text:about">About</a></li>
- <li><a href="https://github.com/pricing" data-ga-click="Footer, go to pricing, text:pricing">Pricing</a></li>
-
- </ul>
-
- <a href="https://github.com" aria-label="Homepage">
- <span class="mega-octicon octicon-mark-github" title="GitHub"></span>
-</a>
- <ul class="site-footer-links">
- <li>&copy; 2015 <span title="0.09612s from github-fe127-cp1-prd.iad.github.net">GitHub</span>, Inc.</li>
- <li><a href="https://github.com/site/terms" data-ga-click="Footer, go to terms, text:terms">Terms</a></li>
- <li><a href="https://github.com/site/privacy" data-ga-click="Footer, go to privacy, text:privacy">Privacy</a></li>
- <li><a href="https://github.com/security" data-ga-click="Footer, go to security, text:security">Security</a></li>
- <li><a href="https://github.com/contact" data-ga-click="Footer, go to contact, text:contact">Contact</a></li>
- <li><a href="https://help.github.com" data-ga-click="Footer, go to help, text:help">Help</a></li>
- </ul>
- </div>
-</div>
-
-
-
-
-
-
- <div id="ajax-error-message" class="flash flash-error">
- <span class="octicon octicon-alert"></span>
- <button type="button" class="flash-close js-flash-close js-ajax-error-dismiss" aria-label="Dismiss error">
- <span class="octicon octicon-x"></span>
- </button>
- Something went wrong with that request. Please try again.
- </div>
-
-
- <script crossorigin="anonymous" src="https://assets-cdn.github.com/assets/frameworks-06e65f5639cc52d1aaada53115a54614b60fa90ab446a673e3e1818df167663b.js"></script>
- <script async="async" crossorigin="anonymous" src="https://assets-cdn.github.com/assets/github-435b0c380a8b91b3f42654ca3dbe8b623eebe6dfa2314a80f961d364de7e3f42.js"></script>
-
-
- <div class="js-stale-session-flash stale-session-flash flash flash-warn flash-banner hidden">
- <span class="octicon octicon-alert"></span>
- <span class="signed-in-tab-flash">You signed in with another tab or window. <a href="">Reload</a> to refresh your session.</span>
- <span class="signed-out-tab-flash">You signed out in another tab or window. <a href="">Reload</a> to refresh your session.</span>
- </div>
- </body>
-</html>
-
diff --git a/scripts/external_libs/dpkt-1.8.6/dpkt/dpkt.py b/scripts/external_libs/dpkt-1.8.6/dpkt/dpkt.py
deleted file mode 100644
index e14d46bd..00000000
--- a/scripts/external_libs/dpkt-1.8.6/dpkt/dpkt.py
+++ /dev/null
@@ -1,168 +0,0 @@
-
-# $Id: dpkt.py 43 2007-08-02 22:42:59Z jon.oberheide $
-
-"""Simple packet creation and parsing."""
-
-import copy, itertools, socket, struct
-
-class Error(Exception): pass
-class UnpackError(Error): pass
-class NeedData(UnpackError): pass
-class PackError(Error): pass
-
-class _MetaPacket(type):
- def __new__(cls, clsname, clsbases, clsdict):
- t = type.__new__(cls, clsname, clsbases, clsdict)
- st = getattr(t, '__hdr__', None)
- if st is not None:
- # XXX - __slots__ only created in __new__()
- clsdict['__slots__'] = [ x[0] for x in st ] + [ 'data' ]
- t = type.__new__(cls, clsname, clsbases, clsdict)
- t.__hdr_fields__ = [ x[0] for x in st ]
- t.__hdr_fmt__ = getattr(t, '__byte_order__', '>') + \
- ''.join([ x[1] for x in st ])
- t.__hdr_len__ = struct.calcsize(t.__hdr_fmt__)
- t.__hdr_defaults__ = dict(zip(
- t.__hdr_fields__, [ x[2] for x in st ]))
- return t
-
-class Packet(object):
- """Base packet class, with metaclass magic to generate members from
- self.__hdr__.
-
- __hdr__ should be defined as a list of (name, structfmt, default) tuples
- __byte_order__ can be set to override the default ('>')
-
- Example::
-
- >>> class Foo(Packet):
- ... __hdr__ = (('foo', 'I', 1), ('bar', 'H', 2), ('baz', '4s', 'quux'))
- ...
- >>> foo = Foo(bar=3)
- >>> foo
- Foo(bar=3)
- >>> str(foo)
- '\x00\x00\x00\x01\x00\x03quux'
- >>> foo.bar
- 3
- >>> foo.baz
- 'quux'
- >>> foo.foo = 7
- >>> foo.baz = 'whee'
- >>> foo
- Foo(baz='whee', foo=7, bar=3)
- >>> Foo('hello, world!')
- Foo(baz=' wor', foo=1751477356L, bar=28460, data='ld!')
- """
- __metaclass__ = _MetaPacket
-
- def __init__(self, *args, **kwargs):
- """Packet constructor with ([buf], [field=val,...]) prototype.
-
- Arguments:
-
- buf -- optional packet buffer to unpack
-
- Optional keyword arguments correspond to members to set
- (matching fields in self.__hdr__, or 'data').
- """
- self.data = ''
- if args:
- try:
- self.unpack(args[0])
- except struct.error:
- if len(args[0]) < self.__hdr_len__:
- raise NeedData
- raise UnpackError('invalid %s: %r' %
- (self.__class__.__name__, args[0]))
- else:
- for k in self.__hdr_fields__:
- setattr(self, k, copy.copy(self.__hdr_defaults__[k]))
- for k, v in kwargs.iteritems():
- setattr(self, k, v)
-
- def __len__(self):
- return self.__hdr_len__ + len(self.data)
-
- def __getitem__(self, k):
- try: return getattr(self, k)
- except AttributeError: raise KeyError
-
- def __repr__(self):
- l = [ '%s=%r' % (k, getattr(self, k))
- for k in self.__hdr_defaults__
- if getattr(self, k) != self.__hdr_defaults__[k] ]
- if self.data:
- l.append('data=%r' % self.data)
- return '%s(%s)' % (self.__class__.__name__, ', '.join(l))
-
- def __str__(self):
- return self.pack_hdr() + str(self.data)
-
- def pack_hdr(self):
- """Return packed header string."""
- try:
- return struct.pack(self.__hdr_fmt__,
- *[ getattr(self, k) for k in self.__hdr_fields__ ])
- except struct.error:
- vals = []
- for k in self.__hdr_fields__:
- v = getattr(self, k)
- if isinstance(v, tuple):
- vals.extend(v)
- else:
- vals.append(v)
- try:
- return struct.pack(self.__hdr_fmt__, *vals)
- except struct.error, e:
- raise PackError(str(e))
-
- def pack(self):
- """Return packed header + self.data string."""
- return str(self)
-
- def unpack(self, buf):
- """Unpack packet header fields from buf, and set self.data."""
- for k, v in itertools.izip(self.__hdr_fields__,
- struct.unpack(self.__hdr_fmt__, buf[:self.__hdr_len__])):
- setattr(self, k, v)
- self.data = buf[self.__hdr_len__:]
-
-# XXX - ''.join([(len(`chr(x)`)==3) and chr(x) or '.' for x in range(256)])
-__vis_filter = """................................ !"#$%&\'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[.]^_`abcdefghijklmnopqrstuvwxyz{|}~................................................................................................................................."""
-
-def hexdump(buf, length=16):
- """Return a hexdump output string of the given buffer."""
- n = 0
- res = []
- while buf:
- line, buf = buf[:length], buf[length:]
- hexa = ' '.join(['%02x' % ord(x) for x in line])
- line = line.translate(__vis_filter)
- res.append(' %04d: %-*s %s' % (n, length * 3, hexa, line))
- n += length
- return '\n'.join(res)
-
-try:
- import dnet2
- def in_cksum_add(s, buf):
- return dnet.ip_cksum_add(buf, s)
- def in_cksum_done(s):
- return socket.ntohs(dnet.ip_cksum_carry(s))
-except ImportError:
- import array
- def in_cksum_add(s, buf):
- n = len(buf)
- cnt = (n / 2) * 2
- a = array.array('H', buf[:cnt])
- if cnt != n:
- a.append(struct.unpack('H', buf[-1] + '\x00')[0])
- return s + sum(a)
- def in_cksum_done(s):
- s = (s >> 16) + (s & 0xffff)
- s += (s >> 16)
- return socket.ntohs(~s & 0xffff)
-
-def in_cksum(buf):
- """Return computed Internet checksum."""
- return in_cksum_done(in_cksum_add(0, buf))
diff --git a/scripts/external_libs/dpkt-1.8.6/dpkt/dtp.py b/scripts/external_libs/dpkt-1.8.6/dpkt/dtp.py
deleted file mode 100644
index 9ceec387..00000000
--- a/scripts/external_libs/dpkt-1.8.6/dpkt/dtp.py
+++ /dev/null
@@ -1,23 +0,0 @@
-# $Id: dtp.py 23 2006-11-08 15:45:33Z dugsong $
-
-"""Dynamic Trunking Protocol."""
-
-import struct
-import dpkt
-
-class DTP(dpkt.Packet):
- __hdr__ = (
- ('v', 'B', 0),
- ) # rest is TLVs
- def unpack(self, buf):
- dpkt.Packet.unpack(self, buf)
- buf = self.data
- tvs = []
- while buf:
- t, l = struct.unpack('>HH', buf[:4])
- v, buf = buf[4:4+l], buf[4+l:]
- tvs.append((t, v))
- self.data = tvs
-
-TRUNK_NAME = 0x01
-MAC_ADDR = 0x04
diff --git a/scripts/external_libs/dpkt-1.8.6/dpkt/esp.py b/scripts/external_libs/dpkt-1.8.6/dpkt/esp.py
deleted file mode 100644
index 890482ea..00000000
--- a/scripts/external_libs/dpkt-1.8.6/dpkt/esp.py
+++ /dev/null
@@ -1,11 +0,0 @@
-# $Id: esp.py 23 2006-11-08 15:45:33Z dugsong $
-
-"""Encapsulated Security Protocol."""
-
-import dpkt
-
-class ESP(dpkt.Packet):
- __hdr__ = (
- ('spi', 'I', 0),
- ('seq', 'I', 0)
- )
diff --git a/scripts/external_libs/dpkt-1.8.6/dpkt/ethernet.py b/scripts/external_libs/dpkt-1.8.6/dpkt/ethernet.py
deleted file mode 100644
index eca04868..00000000
--- a/scripts/external_libs/dpkt-1.8.6/dpkt/ethernet.py
+++ /dev/null
@@ -1,140 +0,0 @@
-# $Id: ethernet.py 65 2010-03-26 02:53:51Z dugsong $
-
-"""Ethernet II, LLC (802.3+802.2), LLC/SNAP, and Novell raw 802.3,
-with automatic 802.1q, MPLS, PPPoE, and Cisco ISL decapsulation."""
-
-import struct
-import dpkt, stp
-
-ETH_CRC_LEN = 4
-ETH_HDR_LEN = 14
-
-ETH_LEN_MIN = 64 # minimum frame length with CRC
-ETH_LEN_MAX = 1518 # maximum frame length with CRC
-
-ETH_MTU = (ETH_LEN_MAX - ETH_HDR_LEN - ETH_CRC_LEN)
-ETH_MIN = (ETH_LEN_MIN - ETH_HDR_LEN - ETH_CRC_LEN)
-
-# Ethernet payload types - http://standards.ieee.org/regauth/ethertype
-ETH_TYPE_PUP = 0x0200 # PUP protocol
-ETH_TYPE_IP = 0x0800 # IP protocol
-ETH_TYPE_ARP = 0x0806 # address resolution protocol
-ETH_TYPE_AOE = 0x88a2 # AoE protocol
-ETH_TYPE_CDP = 0x2000 # Cisco Discovery Protocol
-ETH_TYPE_DTP = 0x2004 # Cisco Dynamic Trunking Protocol
-ETH_TYPE_REVARP = 0x8035 # reverse addr resolution protocol
-ETH_TYPE_8021Q = 0x8100 # IEEE 802.1Q VLAN tagging
-ETH_TYPE_IPX = 0x8137 # Internetwork Packet Exchange
-ETH_TYPE_IP6 = 0x86DD # IPv6 protocol
-ETH_TYPE_PPP = 0x880B # PPP
-ETH_TYPE_MPLS = 0x8847 # MPLS
-ETH_TYPE_MPLS_MCAST = 0x8848 # MPLS Multicast
-ETH_TYPE_PPPoE_DISC = 0x8863 # PPP Over Ethernet Discovery Stage
-ETH_TYPE_PPPoE = 0x8864 # PPP Over Ethernet Session Stage
-ETH_TYPE_LLDP = 0x88CC #Link Layer Discovery Protocol
-
-# MPLS label stack fields
-MPLS_LABEL_MASK = 0xfffff000
-MPLS_QOS_MASK = 0x00000e00
-MPLS_TTL_MASK = 0x000000ff
-MPLS_LABEL_SHIFT= 12
-MPLS_QOS_SHIFT = 9
-MPLS_TTL_SHIFT = 0
-MPLS_STACK_BOTTOM=0x0100
-
-class Ethernet(dpkt.Packet):
- __hdr__ = (
- ('dst', '6s', ''),
- ('src', '6s', ''),
- ('type', 'H', ETH_TYPE_IP)
- )
- _typesw = {}
-
- def _unpack_data(self, buf):
- if self.type == ETH_TYPE_8021Q:
- self.tag, self.type = struct.unpack('>HH', buf[:4])
- buf = buf[4:]
- elif self.type == ETH_TYPE_MPLS or \
- self.type == ETH_TYPE_MPLS_MCAST:
- # XXX - skip labels (max # of labels is undefined, just use 24)
- self.labels = []
- for i in range(24):
- entry = struct.unpack('>I', buf[i*4:i*4+4])[0]
- label = ((entry & MPLS_LABEL_MASK) >> MPLS_LABEL_SHIFT, \
- (entry & MPLS_QOS_MASK) >> MPLS_QOS_SHIFT, \
- (entry & MPLS_TTL_MASK) >> MPLS_TTL_SHIFT)
- self.labels.append(label)
- if entry & MPLS_STACK_BOTTOM:
- break
- self.type = ETH_TYPE_IP
- buf = buf[(i + 1) * 4:]
- try:
- self.data = self._typesw[self.type](buf)
- setattr(self, self.data.__class__.__name__.lower(), self.data)
- except (KeyError, dpkt.UnpackError):
- self.data = buf
-
- def unpack(self, buf):
- dpkt.Packet.unpack(self, buf)
- if self.type > 1500:
- # Ethernet II
- self._unpack_data(self.data)
- elif self.dst.startswith('\x01\x00\x0c\x00\x00') or \
- self.dst.startswith('\x03\x00\x0c\x00\x00'):
- # Cisco ISL
- self.vlan = struct.unpack('>H', self.data[6:8])[0]
- self.unpack(self.data[12:])
- elif self.data.startswith('\xff\xff'):
- # Novell "raw" 802.3
- self.type = ETH_TYPE_IPX
- self.data = self.ipx = self._typesw[ETH_TYPE_IPX](self.data[2:])
- else:
- # 802.2 LLC
- self.dsap, self.ssap, self.ctl = struct.unpack('BBB', self.data[:3])
- if self.data.startswith('\xaa\xaa'):
- # SNAP
- self.type = struct.unpack('>H', self.data[6:8])[0]
- self._unpack_data(self.data[8:])
- else:
- # non-SNAP
- dsap = ord(self.data[0])
- if dsap == 0x06: # SAP_IP
- self.data = self.ip = self._typesw[ETH_TYPE_IP](self.data[3:])
- elif dsap == 0x10 or dsap == 0xe0: # SAP_NETWARE{1,2}
- self.data = self.ipx = self._typesw[ETH_TYPE_IPX](self.data[3:])
- elif dsap == 0x42: # SAP_STP
- self.data = self.stp = stp.STP(self.data[3:])
-
- def set_type(cls, t, pktclass):
- cls._typesw[t] = pktclass
- set_type = classmethod(set_type)
-
- def get_type(cls, t):
- return cls._typesw[t]
- get_type = classmethod(get_type)
-
-# XXX - auto-load Ethernet dispatch table from ETH_TYPE_* definitions
-def __load_types():
- g = globals()
- for k, v in g.iteritems():
- if k.startswith('ETH_TYPE_'):
- name = k[9:]
- modname = name.lower()
- try:
- mod = __import__(modname, g)
- except ImportError:
- continue
- Ethernet.set_type(v, getattr(mod, name))
-
-if not Ethernet._typesw:
- __load_types()
-
-if __name__ == '__main__':
- import unittest
-
- class EthTestCase(unittest.TestCase):
- def test_eth(self):
- s = '\x00\xb0\xd0\xe1\x80r\x00\x11$\x8c\x11\xde\x86\xdd`\x00\x00\x00\x00(\x06@\xfe\x80\x00\x00\x00\x00\x00\x00\x02\x11$\xff\xfe\x8c\x11\xde\xfe\x80\x00\x00\x00\x00\x00\x00\x02\xb0\xd0\xff\xfe\xe1\x80r\xcd\xd3\x00\x16\xffP\xd7\x13\x00\x00\x00\x00\xa0\x02\xff\xffg\xd3\x00\x00\x02\x04\x05\xa0\x01\x03\x03\x00\x01\x01\x08\n}\x18:a\x00\x00\x00\x00'
- eth = Ethernet(s)
-
- unittest.main()
diff --git a/scripts/external_libs/dpkt-1.8.6/dpkt/gre.py b/scripts/external_libs/dpkt-1.8.6/dpkt/gre.py
deleted file mode 100644
index 4d462edc..00000000
--- a/scripts/external_libs/dpkt-1.8.6/dpkt/gre.py
+++ /dev/null
@@ -1,103 +0,0 @@
-# $Id: gre.py 75 2010-08-03 14:42:19Z jon.oberheide $
-
-"""Generic Routing Encapsulation."""
-
-import struct
-import dpkt
-
-GRE_CP = 0x8000 # Checksum Present
-GRE_RP = 0x4000 # Routing Present
-GRE_KP = 0x2000 # Key Present
-GRE_SP = 0x1000 # Sequence Present
-GRE_SS = 0x0800 # Strict Source Route
-GRE_AP = 0x0080 # Acknowledgment Present
-
-GRE_opt_fields = (
- (GRE_CP|GRE_RP, 'sum', 'H'), (GRE_CP|GRE_RP, 'off', 'H'),
- (GRE_KP, 'key', 'I'), (GRE_SP, 'seq', 'I'), (GRE_AP, 'ack', 'I')
- )
-class GRE(dpkt.Packet):
- __hdr__ = (
- ('flags', 'H', 0),
- ('p', 'H', 0x0800), # ETH_TYPE_IP
- )
- _protosw = {}
- sre = ()
- def get_v(self):
- return self.flags & 0x7
- def set_v(self, v):
- self.flags = (self.flags & ~0x7) | (v & 0x7)
- v = property(get_v, set_v)
-
- def get_recur(self):
- return (self.flags >> 5) & 0x7
- def set_recur(self, v):
- self.flags = (self.flags & ~0xe0) | ((v & 0x7) << 5)
- recur = property(get_recur, set_recur)
-
- class SRE(dpkt.Packet):
- __hdr__ = [
- ('family', 'H', 0),
- ('off', 'B', 0),
- ('len', 'B', 0)
- ]
- def unpack(self, buf):
- dpkt.Packet.unpack(self, buf)
- self.data = self.data[:self.len]
-
- def opt_fields_fmts(self):
- if self.v == 0:
- fields, fmts = [], []
- opt_fields = GRE_opt_fields
- else:
- fields, fmts = [ 'len', 'callid' ], [ 'H', 'H' ]
- opt_fields = GRE_opt_fields[-2:]
- for flags, field, fmt in opt_fields:
- if self.flags & flags:
- fields.append(field)
- fmts.append(fmt)
- return fields, fmts
-
- def unpack(self, buf):
- dpkt.Packet.unpack(self, buf)
- fields, fmts = self.opt_fields_fmts()
- if fields:
- fmt = ''.join(fmts)
- fmtlen = struct.calcsize(fmt)
- vals = struct.unpack(fmt, self.data[:fmtlen])
- self.data = self.data[fmtlen:]
- self.__dict__.update(dict(zip(fields, vals)))
- if self.flags & GRE_RP:
- l = []
- while True:
- sre = self.SRE(self.data)
- self.data = self.data[len(sre):]
- l.append(sre)
- if not sre.len:
- break
- self.sre = l
- self.data = ethernet.Ethernet._typesw[self.p](self.data)
- setattr(self, self.data.__class__.__name__.lower(), self.data)
-
- def __len__(self):
- opt_fmtlen = struct.calcsize(''.join(self.opt_fields_fmts()[1]))
- return self.__hdr_len__ + opt_fmtlen + \
- sum(map(len, self.sre)) + len(self.data)
-
- # XXX - need to fix up repr to display optional fields...
-
- def __str__(self):
- fields, fmts = self.opt_fields_fmts()
- if fields:
- vals = []
- for f in fields:
- vals.append(getattr(self, f))
- opt_s = struct.pack(''.join(fmts), *vals)
- else:
- opt_s = ''
- return self.pack_hdr() + opt_s + ''.join(map(str, self.sre)) + \
- str(self.data)
-
-# XXX - auto-load GRE dispatch table from Ethernet dispatch table
-import ethernet
-GRE._protosw.update(ethernet.Ethernet._typesw)
diff --git a/scripts/external_libs/dpkt-1.8.6/dpkt/gzip.py b/scripts/external_libs/dpkt-1.8.6/dpkt/gzip.py
deleted file mode 100644
index e0bc619f..00000000
--- a/scripts/external_libs/dpkt-1.8.6/dpkt/gzip.py
+++ /dev/null
@@ -1,117 +0,0 @@
-# $Id: gzip.py 23 2006-11-08 15:45:33Z dugsong $
-
-"""GNU zip."""
-
-import struct, zlib
-import dpkt
-
-# RFC 1952
-GZIP_MAGIC = '\x1f\x8b'
-
-# Compression methods
-GZIP_MSTORED = 0
-GZIP_MCOMPRESS = 1
-GZIP_MPACKED = 2
-GZIP_MLZHED = 3
-GZIP_MDEFLATE = 8
-
-# Flags
-GZIP_FTEXT = 0x01
-GZIP_FHCRC = 0x02
-GZIP_FEXTRA = 0x04
-GZIP_FNAME = 0x08
-GZIP_FCOMMENT = 0x10
-GZIP_FENCRYPT = 0x20
-GZIP_FRESERVED = 0xC0
-
-# OS
-GZIP_OS_MSDOS = 0
-GZIP_OS_AMIGA = 1
-GZIP_OS_VMS = 2
-GZIP_OS_UNIX = 3
-GZIP_OS_VMCMS = 4
-GZIP_OS_ATARI = 5
-GZIP_OS_OS2 = 6
-GZIP_OS_MACOS = 7
-GZIP_OS_ZSYSTEM = 8
-GZIP_OS_CPM = 9
-GZIP_OS_TOPS20 = 10
-GZIP_OS_WIN32 = 11
-GZIP_OS_QDOS = 12
-GZIP_OS_RISCOS = 13
-GZIP_OS_UNKNOWN = 255
-
-GZIP_FENCRYPT_LEN = 12
-
-class GzipExtra(dpkt.Packet):
- __hdr__ = (
- ('id', '2s', ''),
- ('len', 'H', 0)
- )
-
-class Gzip(dpkt.Packet):
- __hdr__ = (
- ('magic', '2s', GZIP_MAGIC),
- ('method', 'B', GZIP_MDEFLATE),
- ('flags', 'B', 0),
- ('mtime', 'I', 0),
- ('xflags', 'B', 0),
- ('os', 'B', GZIP_OS_UNIX),
-
- ('extra', '0s', ''), # XXX - GZIP_FEXTRA
- ('filename', '0s', ''), # XXX - GZIP_FNAME
- ('comment', '0s', '') # XXX - GZIP_FCOMMENT
- )
-
- def unpack(self, buf):
- super(Gzip, self).unpack(buf)
- if self.flags & GZIP_FEXTRA:
- n = struct.unpack(self.data[:2], '>H')[0]
- self.extra = GzipExtra(self.data[2:2+n])
- self.data = self.data[2+n:]
- if self.flags & GZIP_FNAME:
- n = self.data.find('\x00')
- self.filename = self.data[:n]
- self.data = self.data[n + 1:]
- if self.flags & GZIP_FCOMMENT:
- n = self.data.find('\x00')
- self.comment = self.data[:n]
- self.data = self.data[n + 1:]
- if self.flags & GZIP_FENCRYPT:
- self.data = self.data[GZIP_FENCRYPT_LEN:] # XXX - skip
- if self.flags & GZIP_FHCRC:
- self.data = self.data[2:] # XXX - skip
-
- def pack_hdr(self):
- l = []
- if self.extra:
- self.flags |= GZIP_FEXTRA
- s = str(self.extra)
- l.append(struct.pack('>H', len(s)))
- l.append(s)
- if self.filename:
- self.flags |= GZIP_FNAME
- l.append(self.filename)
- l.append('\x00')
- if self.comment:
- self.flags |= GZIP_FCOMMENT
- l.append(self.comment)
- l.append('\x00')
- l.insert(0, super(Gzip, self).pack_hdr())
- return ''.join(l)
-
- def compress(self):
- """Compress self.data."""
- c = zlib.compressobj(9, zlib.DEFLATED, -zlib.MAX_WBITS,
- zlib.DEF_MEM_LEVEL, 0)
- self.data = c.compress(self.data)
-
- def decompress(self):
- """Return decompressed payload."""
- d = zlib.decompressobj(-zlib.MAX_WBITS)
- return d.decompress(self.data)
-
-if __name__ == '__main__':
- import sys
- gz = Gzip(open(sys.argv[1]).read())
- print `gz`, `gz.decompress()`
diff --git a/scripts/external_libs/dpkt-1.8.6/dpkt/h225.py b/scripts/external_libs/dpkt-1.8.6/dpkt/h225.py
deleted file mode 100644
index a8fe2cbe..00000000
--- a/scripts/external_libs/dpkt-1.8.6/dpkt/h225.py
+++ /dev/null
@@ -1,217 +0,0 @@
-# $Id: h225.py 23 2006-11-08 15:45:33Z dugsong $
-
-"""ITU-T H.225.0 Call Signaling."""
-
-import dpkt, tpkt
-import struct
-
-# H225 Call Signaling
-#
-# Call messages and information elements (IEs) are defined by Q.931:
-# http://cvsup.de.openbsd.org/historic/comp/doc/standards/itu/Q/Q.931.ps.gz
-#
-# The User-to-User IEs of H225 are encoded by PER of ASN.1.
-
-# Call Establishment Messages
-ALERTING = 1
-CALL_PROCEEDING = 2
-CONNECT = 7
-CONNECT_ACKNOWLEDGE = 15
-PROGRESS = 3
-SETUP = 5
-SETUP_ACKNOWLEDGE = 13
-
-# Call Information Phase Messages
-RESUME = 38
-RESUME_ACKNOWLEDGE = 46
-RESUME_REJECT = 34
-SUSPEND = 37
-SUSPEND_ACKNOWLEDGE = 45
-SUSPEND_REJECT = 33
-USER_INFORMATION = 32
-
-# Call Clearing Messages
-DISCONNECT = 69
-RELEASE = 77
-RELEASE_COMPLETE = 90
-RESTART = 70
-RESTART_ACKNOWLEDGE = 78
-
-# Miscellaneous Messages
-SEGMENT = 96
-CONGESTION_CONTROL = 121
-INFORMATION = 123
-NOTIFY = 110
-STATUS = 125
-STATUS_ENQUIRY = 117
-
-# Type 1 Single Octet Information Element IDs
-RESERVED = 128
-SHIFT = 144
-CONGESTION_LEVEL = 176
-REPEAT_INDICATOR = 208
-
-# Type 2 Single Octet Information Element IDs
-MORE_DATA = 160
-SENDING_COMPLETE = 161
-
-# Variable Length Information Element IDs
-SEGMENTED_MESSAGE = 0
-BEARER_CAPABILITY = 4
-CAUSE = 8
-CALL_IDENTITY = 16
-CALL_STATE = 20
-CHANNEL_IDENTIFICATION = 24
-PROGRESS_INDICATOR = 30
-NETWORK_SPECIFIC_FACILITIES = 32
-NOTIFICATION_INDICATOR = 39
-DISPLAY = 40
-DATE_TIME = 41
-KEYPAD_FACILITY = 44
-SIGNAL = 52
-INFORMATION_RATE = 64
-END_TO_END_TRANSIT_DELAY = 66
-TRANSIT_DELAY_SELECTION_AND_INDICATION = 67
-PACKET_LAYER_BINARY_PARAMETERS = 68
-PACKET_LAYER_WINDOW_SIZE = 69
-PACKET_SIZE = 70
-CLOSED_USER_GROUP = 71
-REVERSE_CHARGE_INDICATION = 74
-CALLING_PARTY_NUMBER = 108
-CALLING_PARTY_SUBADDRESS = 109
-CALLED_PARTY_NUMBER = 112
-CALLED_PARTY_SUBADDRESS = 113
-REDIRECTING_NUMBER = 116
-TRANSIT_NETWORK_SELECTION = 120
-RESTART_INDICATOR = 121
-LOW_LAYER_COMPATIBILITY = 124
-HIGH_LAYER_COMPATIBILITY = 125
-USER_TO_USER = 126
-ESCAPE_FOR_EXTENSION = 127
-
-class H225(dpkt.Packet):
- __hdr__ = (
- ('proto', 'B', 8),
- ('ref_len', 'B', 2)
- )
-
- def unpack(self, buf):
- # TPKT header
- self.tpkt = tpkt.TPKT(buf)
- if self.tpkt.v != 3:
- raise dpkt.UnpackError('invalid TPKT version')
- if self.tpkt.rsvd != 0:
- raise dpkt.UnpackError('invalid TPKT reserved value')
- n = self.tpkt.len - self.tpkt.__hdr_len__
- if n > len(self.tpkt.data):
- raise dpkt.UnpackError('invalid TPKT length')
- buf = self.tpkt.data
-
- # Q.931 payload
- dpkt.Packet.unpack(self, buf)
- buf = buf[self.__hdr_len__:]
- self.ref_val = buf[:self.ref_len]
- buf = buf[self.ref_len:]
- self.type = struct.unpack('B', buf[:1])[0]
- buf = buf[1:]
-
- # Information Elements
- l = []
- while buf:
- ie = self.IE(buf)
- l.append(ie)
- buf = buf[len(ie):]
- self.data = l
-
- def __len__(self):
- return self.tpkt.__hdr_len__ + \
- self.__hdr_len__ + \
- sum(map(len, self.data))
-
- def __str__(self):
- return self.tpkt.pack_hdr() + \
- self.pack_hdr() + \
- self.ref_val + \
- struct.pack('B', self.type) + \
- ''.join(map(str, self.data))
-
- class IE(dpkt.Packet):
- __hdr__ = (
- ('type', 'B', 0),
- )
-
- def unpack(self, buf):
- dpkt.Packet.unpack(self, buf)
- buf = buf[self.__hdr_len__:]
-
- # single-byte IE
- if self.type & 0x80:
- self.len = 0
- self.data = None
- # multi-byte IE
- else:
- # special PER-encoded UUIE
- if self.type == USER_TO_USER:
- self.len = struct.unpack('>H', buf[:2])[0]
- buf = buf[2:]
- # normal TLV-like IE
- else:
- self.len = struct.unpack('B', buf[:1])[0]
- buf = buf[1:]
- self.data = buf[:self.len]
-
- def __len__(self):
- if self.type & 0x80:
- n = 0
- else:
- if self.type == USER_TO_USER:
- n = 2
- else:
- n = 1
- return self.__hdr_len__ + \
- self.len \
- + n
-
- def __str__(self):
- if self.type & 0x80:
- length_str = None
- else:
- if self.type == USER_TO_USER:
- length_str = struct.pack('>H', self.len)
- else:
- length_str = struct.pack('B', self.len)
- return struct.pack('B', self.type) + \
- length_str + \
- self.data
-
-
-if __name__ == '__main__':
- import unittest
-
- class H225TestCase(unittest.TestCase):
- def testPack(self):
- h = H225(self.s)
- self.failUnless(self.s == str(h))
-
- def testUnpack(self):
- h = H225(self.s)
- self.failUnless(h.tpkt.v == 3)
- self.failUnless(h.tpkt.rsvd == 0)
- self.failUnless(h.tpkt.len == 1041)
- self.failUnless(h.proto == 8)
- self.failUnless(h.type == SETUP)
- self.failUnless(len(h.data) == 3)
-
- ie = h.data[0]
- self.failUnless(ie.type == BEARER_CAPABILITY)
- self.failUnless(ie.len == 3)
- ie = h.data[1]
- self.failUnless(ie.type == DISPLAY)
- self.failUnless(ie.len == 14)
- ie = h.data[2]
- self.failUnless(ie.type == USER_TO_USER)
- self.failUnless(ie.len == 1008)
-
- s = '\x03\x00\x04\x11\x08\x02\x54\x2b\x05\x04\x03\x88\x93\xa5\x28\x0e\x4a\x6f\x6e\x20\x4f\x62\x65\x72\x68\x65\x69\x64\x65\x00\x7e\x03\xf0\x05\x20\xb8\x06\x00\x08\x91\x4a\x00\x04\x01\x40\x0c\x00\x4a\x00\x6f\x00\x6e\x00\x20\x00\x4f\x00\x62\x00\x65\x00\x72\x00\x68\x00\x65\x00\x69\x00\x64\x00\x65\x22\xc0\x09\x00\x00\x3d\x06\x65\x6b\x69\x67\x61\x00\x00\x14\x32\x2e\x30\x2e\x32\x20\x28\x4f\x50\x41\x4c\x20\x76\x32\x2e\x32\x2e\x32\x29\x00\x00\x00\x01\x40\x15\x00\x74\x00\x63\x00\x70\x00\x24\x00\x68\x00\x33\x00\x32\x00\x33\x00\x2e\x00\x76\x00\x6f\x00\x78\x00\x67\x00\x72\x00\x61\x00\x74\x00\x69\x00\x61\x00\x2e\x00\x6f\x00\x72\x00\x67\x00\x42\x87\x23\x2c\x06\xb8\x00\x6a\x8b\x1d\x0c\xb7\x06\xdb\x11\x9e\xca\x00\x10\xa4\x89\x6d\x6a\x00\xc5\x1d\x80\x04\x07\x00\x0a\x00\x01\x7a\x75\x30\x11\x00\x5e\x88\x1d\x0c\xb7\x06\xdb\x11\x9e\xca\x00\x10\xa4\x89\x6d\x6a\x82\x2b\x0e\x30\x40\x00\x00\x06\x04\x01\x00\x4c\x10\x09\x00\x00\x3d\x0f\x53\x70\x65\x65\x78\x20\x62\x73\x34\x20\x57\x69\x64\x65\x36\x80\x11\x1c\x00\x01\x00\x98\xa0\x26\x41\x13\x8a\x00\x98\xa0\x26\x41\x13\x8b\x26\x00\x00\x64\x0c\x10\x09\x00\x00\x3d\x0f\x53\x70\x65\x65\x78\x20\x62\x73\x34\x20\x57\x69\x64\x65\x36\x80\x0b\x0d\x00\x01\x00\x98\xa0\x26\x41\x13\x8b\x00\x2a\x40\x00\x00\x06\x04\x01\x00\x4c\x10\x09\x00\x00\x3d\x09\x69\x4c\x42\x43\x2d\x31\x33\x6b\x33\x80\x11\x1c\x00\x01\x00\x98\xa0\x26\x41\x13\x8a\x00\x98\xa0\x26\x41\x13\x8b\x20\x00\x00\x65\x0c\x10\x09\x00\x00\x3d\x09\x69\x4c\x42\x43\x2d\x31\x33\x6b\x33\x80\x0b\x0d\x00\x01\x00\x98\xa0\x26\x41\x13\x8b\x00\x20\x40\x00\x00\x06\x04\x01\x00\x4e\x0c\x03\x00\x83\x00\x80\x11\x1c\x00\x01\x00\x98\xa0\x26\x41\x13\x8a\x00\x98\xa0\x26\x41\x13\x8b\x16\x00\x00\x66\x0e\x0c\x03\x00\x83\x00\x80\x0b\x0d\x00\x01\x00\x98\xa0\x26\x41\x13\x8b\x00\x4b\x40\x00\x00\x06\x04\x01\x00\x4c\x10\xb5\x00\x53\x4c\x2a\x02\x00\x00\x00\x00\x00\x40\x01\x00\x00\x40\x01\x02\x00\x08\x00\x00\x00\x00\x00\x31\x00\x01\x00\x40\x1f\x00\x00\x59\x06\x00\x00\x41\x00\x00\x00\x02\x00\x40\x01\x00\x00\x80\x11\x1c\x00\x01\x00\x98\xa0\x26\x41\x13\x8a\x00\x98\xa0\x26\x41\x13\x8b\x41\x00\x00\x67\x0c\x10\xb5\x00\x53\x4c\x2a\x02\x00\x00\x00\x00\x00\x40\x01\x00\x00\x40\x01\x02\x00\x08\x00\x00\x00\x00\x00\x31\x00\x01\x00\x40\x1f\x00\x00\x59\x06\x00\x00\x41\x00\x00\x00\x02\x00\x40\x01\x00\x00\x80\x0b\x0d\x00\x01\x00\x98\xa0\x26\x41\x13\x8b\x00\x32\x40\x00\x00\x06\x04\x01\x00\x4c\x10\x09\x00\x00\x3d\x11\x53\x70\x65\x65\x78\x20\x62\x73\x34\x20\x4e\x61\x72\x72\x6f\x77\x33\x80\x11\x1c\x00\x01\x00\x98\xa0\x26\x41\x13\x8a\x00\x98\xa0\x26\x41\x13\x8b\x28\x00\x00\x68\x0c\x10\x09\x00\x00\x3d\x11\x53\x70\x65\x65\x78\x20\x62\x73\x34\x20\x4e\x61\x72\x72\x6f\x77\x33\x80\x0b\x0d\x00\x01\x00\x98\xa0\x26\x41\x13\x8b\x00\x1d\x40\x00\x00\x06\x04\x01\x00\x4c\x60\x1d\x80\x11\x1c\x00\x01\x00\x98\xa0\x26\x41\x13\x8a\x00\x98\xa0\x26\x41\x13\x8b\x13\x00\x00\x69\x0c\x60\x1d\x80\x0b\x0d\x00\x01\x00\x98\xa0\x26\x41\x13\x8b\x00\x1d\x40\x00\x00\x06\x04\x01\x00\x4c\x20\x1d\x80\x11\x1c\x00\x01\x00\x98\xa0\x26\x41\x13\x8a\x00\x98\xa0\x26\x41\x13\x8b\x13\x00\x00\x6a\x0c\x20\x1d\x80\x0b\x0d\x00\x01\x00\x98\xa0\x26\x41\x13\x8b\x00\x01\x00\x01\x00\x01\x00\x01\x00\x81\x03\x02\x80\xf8\x02\x70\x01\x06\x00\x08\x81\x75\x00\x0b\x80\x13\x80\x01\xf4\x00\x01\x00\x00\x01\x00\x00\x01\x00\x00\x0c\xc0\x01\x00\x01\x80\x0b\x80\x00\x00\x20\x20\x09\x00\x00\x3d\x0f\x53\x70\x65\x65\x78\x20\x62\x73\x34\x20\x57\x69\x64\x65\x36\x80\x00\x01\x20\x20\x09\x00\x00\x3d\x09\x69\x4c\x42\x43\x2d\x31\x33\x6b\x33\x80\x00\x02\x24\x18\x03\x00\xe6\x00\x80\x00\x03\x20\x20\xb5\x00\x53\x4c\x2a\x02\x00\x00\x00\x00\x00\x40\x01\x00\x00\x40\x01\x02\x00\x08\x00\x00\x00\x00\x00\x31\x00\x01\x00\x40\x1f\x00\x00\x59\x06\x00\x00\x41\x00\x00\x00\x02\x00\x40\x01\x00\x00\x80\x00\x04\x20\x20\x09\x00\x00\x3d\x11\x53\x70\x65\x65\x78\x20\x62\x73\x34\x20\x4e\x61\x72\x72\x6f\x77\x33\x80\x00\x05\x20\xc0\xef\x80\x00\x06\x20\x40\xef\x80\x00\x07\x08\xe0\x03\x51\x00\x80\x01\x00\x80\x00\x08\x08\xd0\x03\x51\x00\x80\x01\x00\x80\x00\x09\x83\x01\x50\x80\x00\x0a\x83\x01\x10\x80\x00\x0b\x83\x01\x40\x00\x80\x01\x03\x06\x00\x00\x00\x01\x00\x02\x00\x03\x00\x04\x00\x05\x00\x06\x01\x00\x07\x00\x08\x00\x00\x09\x01\x00\x0a\x00\x0b\x07\x01\x00\x32\x80\xa6\xff\x4c\x02\x80\x01\x80'
-
- unittest.main()
diff --git a/scripts/external_libs/dpkt-1.8.6/dpkt/hsrp.py b/scripts/external_libs/dpkt-1.8.6/dpkt/hsrp.py
deleted file mode 100644
index 9a082a3d..00000000
--- a/scripts/external_libs/dpkt-1.8.6/dpkt/hsrp.py
+++ /dev/null
@@ -1,32 +0,0 @@
-# $Id: hsrp.py 23 2006-11-08 15:45:33Z dugsong $
-
-"""Cisco Hot Standby Router Protocol."""
-
-import dpkt
-
-# Opcodes
-HELLO = 0
-COUP = 1
-RESIGN = 2
-
-# States
-INITIAL = 0x00
-LEARN = 0x01
-LISTEN = 0x02
-SPEAK = 0x04
-STANDBY = 0x08
-ACTIVE = 0x10
-
-class HSRP(dpkt.Packet):
- __hdr__ = (
- ('version', 'B', 0),
- ('opcode', 'B', 0),
- ('state', 'B', 0),
- ('hello', 'B', 0),
- ('hold', 'B', 0),
- ('priority', 'B', 0),
- ('group', 'B', 0),
- ('rsvd', 'B', 0),
- ('auth', '8s', 'cisco'),
- ('vip', '4s', '')
- )
diff --git a/scripts/external_libs/dpkt-1.8.6/dpkt/http.py b/scripts/external_libs/dpkt-1.8.6/dpkt/http.py
deleted file mode 100644
index ce0ddc64..00000000
--- a/scripts/external_libs/dpkt-1.8.6/dpkt/http.py
+++ /dev/null
@@ -1,237 +0,0 @@
-# $Id: http.py 86 2013-03-05 19:25:19Z andrewflnr@gmail.com $
-
-"""Hypertext Transfer Protocol."""
-
-import cStringIO
-import dpkt
-
-def parse_headers(f):
- """Return dict of HTTP headers parsed from a file object."""
- d = {}
- while 1:
- line = f.readline()
- if not line:
- raise dpkt.NeedData('premature end of headers')
- line = line.strip()
- if not line:
- break
- l = line.split(':', 1)
- if len(l[0].split()) != 1:
- raise dpkt.UnpackError('invalid header: %r' % line)
- k = l[0].lower()
- v = len(l) != 1 and l[1].lstrip() or ''
- if k in d:
- if not type(d[k]) is list:
- d[k] = [d[k]]
- d[k].append(v)
- else:
- d[k] = v
- return d
-
-def parse_body(f, headers):
- """Return HTTP body parsed from a file object, given HTTP header dict."""
- if headers.get('transfer-encoding', '').lower() == 'chunked':
- l = []
- found_end = False
- while 1:
- try:
- sz = f.readline().split(None, 1)[0]
- except IndexError:
- raise dpkt.UnpackError('missing chunk size')
- n = int(sz, 16)
- if n == 0:
- found_end = True
- buf = f.read(n)
- if f.readline().strip():
- break
- if n and len(buf) == n:
- l.append(buf)
- else:
- break
- if not found_end:
- raise dpkt.NeedData('premature end of chunked body')
- body = ''.join(l)
- elif 'content-length' in headers:
- n = int(headers['content-length'])
- body = f.read(n)
- if len(body) != n:
- raise dpkt.NeedData('short body (missing %d bytes)' % (n - len(body)))
- elif 'content-type' in headers:
- body = f.read()
- else:
- # XXX - need to handle HTTP/0.9
- body = ''
- return body
-
-class Message(dpkt.Packet):
- """Hypertext Transfer Protocol headers + body."""
- __metaclass__ = type
- __hdr_defaults__ = {}
- headers = None
- body = None
-
- def __init__(self, *args, **kwargs):
- if args:
- self.unpack(args[0])
- else:
- self.headers = {}
- self.body = ''
- for k, v in self.__hdr_defaults__.iteritems():
- setattr(self, k, v)
- for k, v in kwargs.iteritems():
- setattr(self, k, v)
-
- def unpack(self, buf):
- f = cStringIO.StringIO(buf)
- # Parse headers
- self.headers = parse_headers(f)
- # Parse body
- self.body = parse_body(f, self.headers)
- # Save the rest
- self.data = f.read()
-
- def pack_hdr(self):
- return ''.join([ '%s: %s\r\n' % t for t in self.headers.iteritems() ])
-
- def __len__(self):
- return len(str(self))
-
- def __str__(self):
- return '%s\r\n%s' % (self.pack_hdr(), self.body)
-
-class Request(Message):
- """Hypertext Transfer Protocol Request."""
- __hdr_defaults__ = {
- 'method':'GET',
- 'uri':'/',
- 'version':'1.0',
- }
- __methods = dict.fromkeys((
- 'GET', 'PUT', 'ICY',
- 'COPY', 'HEAD', 'LOCK', 'MOVE', 'POLL', 'POST',
- 'BCOPY', 'BMOVE', 'MKCOL', 'TRACE', 'LABEL', 'MERGE',
- 'DELETE', 'SEARCH', 'UNLOCK', 'REPORT', 'UPDATE', 'NOTIFY',
- 'BDELETE', 'CONNECT', 'OPTIONS', 'CHECKIN',
- 'PROPFIND', 'CHECKOUT', 'CCM_POST',
- 'SUBSCRIBE', 'PROPPATCH', 'BPROPFIND',
- 'BPROPPATCH', 'UNCHECKOUT', 'MKACTIVITY',
- 'MKWORKSPACE', 'UNSUBSCRIBE', 'RPC_CONNECT',
- 'VERSION-CONTROL',
- 'BASELINE-CONTROL'
- ))
- __proto = 'HTTP'
-
- def unpack(self, buf):
- f = cStringIO.StringIO(buf)
- line = f.readline()
- l = line.strip().split()
- if len(l) < 2:
- raise dpkt.UnpackError('invalid request: %r' % line)
- if l[0] not in self.__methods:
- raise dpkt.UnpackError('invalid http method: %r' % l[0])
- if len(l) == 2:
- # HTTP/0.9 does not specify a version in the request line
- self.version = '0.9'
- else:
- if not l[2].startswith(self.__proto):
- raise dpkt.UnpackError('invalid http version: %r' % l[2])
- self.version = l[2][len(self.__proto)+1:]
- self.method = l[0]
- self.uri = l[1]
- Message.unpack(self, f.read())
-
- def __str__(self):
- return '%s %s %s/%s\r\n' % (self.method, self.uri, self.__proto,
- self.version) + Message.__str__(self)
-
-class Response(Message):
- """Hypertext Transfer Protocol Response."""
- __hdr_defaults__ = {
- 'version':'1.0',
- 'status':'200',
- 'reason':'OK'
- }
- __proto = 'HTTP'
-
- def unpack(self, buf):
- f = cStringIO.StringIO(buf)
- line = f.readline()
- l = line.strip().split(None, 2)
- if len(l) < 2 or not l[0].startswith(self.__proto) or not l[1].isdigit():
- raise dpkt.UnpackError('invalid response: %r' % line)
- self.version = l[0][len(self.__proto)+1:]
- self.status = l[1]
- self.reason = l[2]
- Message.unpack(self, f.read())
-
- def __str__(self):
- return '%s/%s %s %s\r\n' % (self.__proto, self.version, self.status,
- self.reason) + Message.__str__(self)
-
-if __name__ == '__main__':
- import unittest
-
- class HTTPTest(unittest.TestCase):
- def test_parse_request(self):
- s = """POST /main/redirect/ab/1,295,,00.html HTTP/1.0\r\nReferer: http://www.email.com/login/snap/login.jhtml\r\nConnection: Keep-Alive\r\nUser-Agent: Mozilla/4.75 [en] (X11; U; OpenBSD 2.8 i386; Nav)\r\nHost: ltd.snap.com\r\nAccept: image/gif, image/x-xbitmap, image/jpeg, image/pjpeg, image/png, */*\r\nAccept-Encoding: gzip\r\nAccept-Language: en\r\nAccept-Charset: iso-8859-1,*,utf-8\r\nContent-type: application/x-www-form-urlencoded\r\nContent-length: 61\r\n\r\nsn=em&mn=dtest4&pw=this+is+atest&fr=true&login=Sign+in&od=www"""
- r = Request(s)
- assert r.method == 'POST'
- assert r.uri == '/main/redirect/ab/1,295,,00.html'
- assert r.body == 'sn=em&mn=dtest4&pw=this+is+atest&fr=true&login=Sign+in&od=www'
- assert r.headers['content-type'] == 'application/x-www-form-urlencoded'
- try:
- r = Request(s[:60])
- assert 'invalid headers parsed!'
- except dpkt.UnpackError:
- pass
-
- def test_format_request(self):
- r = Request()
- assert str(r) == 'GET / HTTP/1.0\r\n\r\n'
- r.method = 'POST'
- r.uri = '/foo/bar/baz.html'
- r.headers['content-type'] = 'text/plain'
- r.headers['content-length'] = '5'
- r.body = 'hello'
- s = str(r)
- assert s.startswith('POST /foo/bar/baz.html HTTP/1.0\r\n')
- assert s.endswith('\r\n\r\nhello')
- assert '\r\ncontent-length: 5\r\n' in s
- assert '\r\ncontent-type: text/plain\r\n' in s
- r = Request(str(r))
- assert str(r) == s
-
- def test_chunked_response(self):
- s = """HTTP/1.1 200 OK\r\nCache-control: no-cache\r\nPragma: no-cache\r\nContent-Type: text/javascript; charset=utf-8\r\nContent-Encoding: gzip\r\nTransfer-Encoding: chunked\r\nSet-Cookie: S=gmail=agg:gmail_yj=v2s:gmproxy=JkU; Domain=.google.com; Path=/\r\nServer: GFE/1.3\r\nDate: Mon, 12 Dec 2005 22:33:23 GMT\r\n\r\na\r\n\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\x00\r\n152\r\nm\x91MO\xc4 \x10\x86\xef\xfe\n\x82\xc9\x9eXJK\xe9\xb6\xee\xc1\xe8\x1e6\x9e4\xf1\xe0a5\x86R\xda\x12Yh\x80\xba\xfa\xef\x85\xee\x1a/\xf21\x99\x0c\xef0<\xc3\x81\xa0\xc3\x01\xe6\x10\xc1<\xa7eYT5\xa1\xa4\xac\xe1\xdb\x15:\xa4\x9d\x0c\xfa5K\x00\xf6.\xaa\xeb\x86\xd5y\xcdHY\x954\x8e\xbc*h\x8c\x8e!L7Y\xe6\'\xeb\x82WZ\xcf>8\x1ed\x87\x851X\xd8c\xe6\xbc\x17Z\x89\x8f\xac \x84e\xde\n!]\x96\x17i\xb5\x02{{\xc2z0\x1e\x0f#7\x9cw3v\x992\x9d\xfc\xc2c8\xea[/EP\xd6\xbc\xce\x84\xd0\xce\xab\xf7`\'\x1f\xacS\xd2\xc7\xd2\xfb\x94\x02N\xdc\x04\x0f\xee\xba\x19X\x03TtW\xd7\xb4\xd9\x92\n\xbcX\xa7;\xb0\x9b\'\x10$?F\xfd\xf3CzPt\x8aU\xef\xb8\xc8\x8b-\x18\xed\xec<\xe0\x83\x85\x08!\xf8"[\xb0\xd3j\x82h\x93\xb8\xcf\xd8\x9b\xba\xda\xd0\x92\x14\xa4a\rc\reM\xfd\x87=X;h\xd9j;\xe0db\x17\xc2\x02\xbd\xb0F\xc2in#\xfb:\xb6\xc4x\x15\xd6\x9f\x8a\xaf\xcf)\x0b^\xbc\xe7i\x11\x80\x8b\x00D\x01\xd8/\x82x\xf6\xd8\xf7J(\xae/\x11p\x1f+\xc4p\t:\xfe\xfd\xdf\xa3Y\xfa\xae4\x7f\x00\xc5\xa5\x95\xa1\xe2\x01\x00\x00\r\n0\r\n\r\n"""
- r = Response(s)
- assert r.version == '1.1'
- assert r.status == '200'
- assert r.reason == 'OK'
-
- def test_multicookie_response(self):
- s = """HTTP/1.x 200 OK\r\nSet-Cookie: first_cookie=cookie1; path=/; domain=.example.com\r\nSet-Cookie: second_cookie=cookie2; path=/; domain=.example.com\r\nContent-Length: 0\r\n\r\n"""
- r = Response(s)
- assert type(r.headers['set-cookie']) is list
- assert len(r.headers['set-cookie']) == 2
-
- def test_request_version(self):
- s = """GET / HTTP/1.0\r\n\r\n"""
- r = Request(s)
- assert r.method == 'GET'
- assert r.uri == '/'
- assert r.version == '1.0'
-
- s = """GET /\r\n\r\n"""
- r = Request(s)
- assert r.method == 'GET'
- assert r.uri == '/'
- assert r.version == '0.9'
-
- s = """GET / CHEESE/1.0\r\n\r\n"""
- try:
- r = Request(s)
- assert "invalid protocol version parsed!"
- except:
- pass
-
- unittest.main()
diff --git a/scripts/external_libs/dpkt-1.8.6/dpkt/icmp.py b/scripts/external_libs/dpkt-1.8.6/dpkt/icmp.py
deleted file mode 100644
index 37cdec05..00000000
--- a/scripts/external_libs/dpkt-1.8.6/dpkt/icmp.py
+++ /dev/null
@@ -1,122 +0,0 @@
-# $Id: icmp.py 45 2007-08-03 00:05:22Z jon.oberheide $
-
-"""Internet Control Message Protocol."""
-
-import dpkt, ip
-
-# Types (icmp_type) and codes (icmp_code) -
-# http://www.iana.org/assignments/icmp-parameters
-
-ICMP_CODE_NONE = 0 # for types without codes
-ICMP_ECHOREPLY = 0 # echo reply
-ICMP_UNREACH = 3 # dest unreachable, codes:
-ICMP_UNREACH_NET = 0 # bad net
-ICMP_UNREACH_HOST = 1 # bad host
-ICMP_UNREACH_PROTO = 2 # bad protocol
-ICMP_UNREACH_PORT = 3 # bad port
-ICMP_UNREACH_NEEDFRAG = 4 # IP_DF caused drop
-ICMP_UNREACH_SRCFAIL = 5 # src route failed
-ICMP_UNREACH_NET_UNKNOWN = 6 # unknown net
-ICMP_UNREACH_HOST_UNKNOWN = 7 # unknown host
-ICMP_UNREACH_ISOLATED = 8 # src host isolated
-ICMP_UNREACH_NET_PROHIB = 9 # for crypto devs
-ICMP_UNREACH_HOST_PROHIB = 10 # ditto
-ICMP_UNREACH_TOSNET = 11 # bad tos for net
-ICMP_UNREACH_TOSHOST = 12 # bad tos for host
-ICMP_UNREACH_FILTER_PROHIB = 13 # prohibited access
-ICMP_UNREACH_HOST_PRECEDENCE = 14 # precedence error
-ICMP_UNREACH_PRECEDENCE_CUTOFF = 15 # precedence cutoff
-ICMP_SRCQUENCH = 4 # packet lost, slow down
-ICMP_REDIRECT = 5 # shorter route, codes:
-ICMP_REDIRECT_NET = 0 # for network
-ICMP_REDIRECT_HOST = 1 # for host
-ICMP_REDIRECT_TOSNET = 2 # for tos and net
-ICMP_REDIRECT_TOSHOST = 3 # for tos and host
-ICMP_ALTHOSTADDR = 6 # alternate host address
-ICMP_ECHO = 8 # echo service
-ICMP_RTRADVERT = 9 # router advertise, codes:
-ICMP_RTRADVERT_NORMAL = 0 # normal
-ICMP_RTRADVERT_NOROUTE_COMMON = 16 # selective routing
-ICMP_RTRSOLICIT = 10 # router solicitation
-ICMP_TIMEXCEED = 11 # time exceeded, code:
-ICMP_TIMEXCEED_INTRANS = 0 # ttl==0 in transit
-ICMP_TIMEXCEED_REASS = 1 # ttl==0 in reass
-ICMP_PARAMPROB = 12 # ip header bad
-ICMP_PARAMPROB_ERRATPTR = 0 # req. opt. absent
-ICMP_PARAMPROB_OPTABSENT = 1 # req. opt. absent
-ICMP_PARAMPROB_LENGTH = 2 # bad length
-ICMP_TSTAMP = 13 # timestamp request
-ICMP_TSTAMPREPLY = 14 # timestamp reply
-ICMP_INFO = 15 # information request
-ICMP_INFOREPLY = 16 # information reply
-ICMP_MASK = 17 # address mask request
-ICMP_MASKREPLY = 18 # address mask reply
-ICMP_TRACEROUTE = 30 # traceroute
-ICMP_DATACONVERR = 31 # data conversion error
-ICMP_MOBILE_REDIRECT = 32 # mobile host redirect
-ICMP_IP6_WHEREAREYOU = 33 # IPv6 where-are-you
-ICMP_IP6_IAMHERE = 34 # IPv6 i-am-here
-ICMP_MOBILE_REG = 35 # mobile registration req
-ICMP_MOBILE_REGREPLY = 36 # mobile registration reply
-ICMP_DNS = 37 # domain name request
-ICMP_DNSREPLY = 38 # domain name reply
-ICMP_SKIP = 39 # SKIP
-ICMP_PHOTURIS = 40 # Photuris
-ICMP_PHOTURIS_UNKNOWN_INDEX = 0 # unknown sec index
-ICMP_PHOTURIS_AUTH_FAILED = 1 # auth failed
-ICMP_PHOTURIS_DECOMPRESS_FAILED = 2 # decompress failed
-ICMP_PHOTURIS_DECRYPT_FAILED = 3 # decrypt failed
-ICMP_PHOTURIS_NEED_AUTHN = 4 # no authentication
-ICMP_PHOTURIS_NEED_AUTHZ = 5 # no authorization
-ICMP_TYPE_MAX = 40
-
-class ICMP(dpkt.Packet):
- __hdr__ = (
- ('type', 'B', 8),
- ('code', 'B', 0),
- ('sum', 'H', 0)
- )
- class Echo(dpkt.Packet):
- __hdr__ = (('id', 'H', 0), ('seq', 'H', 0))
- class Quote(dpkt.Packet):
- __hdr__ = (('pad', 'I', 0),)
- def unpack(self, buf):
- dpkt.Packet.unpack(self, buf)
- self.data = self.ip = ip.IP(self.data)
- class Unreach(Quote):
- __hdr__ = (('pad', 'H', 0), ('mtu', 'H', 0))
- class Quench(Quote):
- pass
- class Redirect(Quote):
- __hdr__ = (('gw', 'I', 0),)
- class ParamProbe(Quote):
- __hdr__ = (('ptr', 'B', 0), ('pad1', 'B', 0), ('pad2', 'H', 0))
- class TimeExceed(Quote):
- pass
-
- _typesw = { 0:Echo, 3:Unreach, 4:Quench, 5:Redirect, 8:Echo,
- 11:TimeExceed }
-
- def unpack(self, buf):
- dpkt.Packet.unpack(self, buf)
- try:
- self.data = self._typesw[self.type](self.data)
- setattr(self, self.data.__class__.__name__.lower(), self.data)
- except (KeyError, dpkt.UnpackError):
- pass
-
- def __str__(self):
- if not self.sum:
- self.sum = dpkt.in_cksum(dpkt.Packet.__str__(self))
- return dpkt.Packet.__str__(self)
-
-if __name__ == '__main__':
- import unittest
-
- class ICMPTestCase(unittest.TestCase):
- def test_ICMP(self):
- s = '\x03\x0a\x6b\x19\x00\x00\x00\x00\x45\x00\x00\x28\x94\x1f\x00\x00\xe3\x06\x99\xb4\x23\x2b\x24\x00\xde\x8e\x84\x42\xab\xd1\x00\x50\x00\x35\xe1\x29\x20\xd9\x00\x00\x00\x22\x9b\xf0\xe2\x04\x65\x6b'
- icmp = ICMP(s)
- self.failUnless(str(icmp) == s)
-
- unittest.main()
diff --git a/scripts/external_libs/dpkt-1.8.6/dpkt/icmp6.py b/scripts/external_libs/dpkt-1.8.6/dpkt/icmp6.py
deleted file mode 100644
index 5f1d4dae..00000000
--- a/scripts/external_libs/dpkt-1.8.6/dpkt/icmp6.py
+++ /dev/null
@@ -1,72 +0,0 @@
-# $Id: icmp6.py 23 2006-11-08 15:45:33Z dugsong $
-
-"""Internet Control Message Protocol for IPv6."""
-
-import dpkt, ip6
-
-ICMP6_DST_UNREACH = 1 # dest unreachable, codes:
-ICMP6_PACKET_TOO_BIG = 2 # packet too big
-ICMP6_TIME_EXCEEDED = 3 # time exceeded, code:
-ICMP6_PARAM_PROB = 4 # ip6 header bad
-
-ICMP6_ECHO_REQUEST = 128 # echo service
-ICMP6_ECHO_REPLY = 129 # echo reply
-MLD_LISTENER_QUERY = 130 # multicast listener query
-MLD_LISTENER_REPORT = 131 # multicast listener report
-MLD_LISTENER_DONE = 132 # multicast listener done
-
-# RFC2292 decls
-ICMP6_MEMBERSHIP_QUERY = 130 # group membership query
-ICMP6_MEMBERSHIP_REPORT = 131 # group membership report
-ICMP6_MEMBERSHIP_REDUCTION = 132 # group membership termination
-
-ND_ROUTER_SOLICIT = 133 # router solicitation
-ND_ROUTER_ADVERT = 134 # router advertisment
-ND_NEIGHBOR_SOLICIT = 135 # neighbor solicitation
-ND_NEIGHBOR_ADVERT = 136 # neighbor advertisment
-ND_REDIRECT = 137 # redirect
-
-ICMP6_ROUTER_RENUMBERING = 138 # router renumbering
-
-ICMP6_WRUREQUEST = 139 # who are you request
-ICMP6_WRUREPLY = 140 # who are you reply
-ICMP6_FQDN_QUERY = 139 # FQDN query
-ICMP6_FQDN_REPLY = 140 # FQDN reply
-ICMP6_NI_QUERY = 139 # node information request
-ICMP6_NI_REPLY = 140 # node information reply
-
-ICMP6_MAXTYPE = 201
-
-class ICMP6(dpkt.Packet):
- __hdr__ = (
- ('type', 'B', 0),
- ('code', 'B', 0),
- ('sum', 'H', 0)
- )
- class Error(dpkt.Packet):
- __hdr__ = (('pad', 'I', 0), )
- def unpack(self, buf):
- dpkt.Packet.unpack(self, buf)
- self.data = self.ip6 = ip6.IP6(self.data)
- class Unreach(Error):
- pass
- class TooBig(Error):
- __hdr__ = (('mtu', 'I', 1232), )
- class TimeExceed(Error):
- pass
- class ParamProb(Error):
- __hdr__ = (('ptr', 'I', 0), )
-
- class Echo(dpkt.Packet):
- __hdr__ = (('id', 'H', 0), ('seq', 'H', 0))
-
- _typesw = { 1:Unreach, 2:TooBig, 3:TimeExceed, 4:ParamProb,
- 128:Echo, 129:Echo }
-
- def unpack(self, buf):
- dpkt.Packet.unpack(self, buf)
- try:
- self.data = self._typesw[self.type](self.data)
- setattr(self, self.data.__class__.__name__.lower(), self.data)
- except (KeyError, dpkt.UnpackError):
- self.data = buf
diff --git a/scripts/external_libs/dpkt-1.8.6/dpkt/ieee80211.py b/scripts/external_libs/dpkt-1.8.6/dpkt/ieee80211.py
deleted file mode 100644
index 8f41e0ac..00000000
--- a/scripts/external_libs/dpkt-1.8.6/dpkt/ieee80211.py
+++ /dev/null
@@ -1,706 +0,0 @@
-# $Id: 80211.py 53 2008-12-18 01:22:57Z jon.oberheide $
-
-"""IEEE 802.11."""
-
-import dpkt, socket, struct
-
-# Frame Types
-MGMT_TYPE = 0
-CTL_TYPE = 1
-DATA_TYPE = 2
-
-# Frame Sub-Types
-M_ASSOC_REQ = 0
-M_ASSOC_RESP = 1
-M_REASSOC_REQ = 2
-M_REASSOC_RESP = 3
-M_PROBE_REQ = 4
-M_PROBE_RESP = 5
-M_BEACON = 8
-M_ATIM = 9
-M_DISASSOC = 10
-M_AUTH = 11
-M_DEAUTH = 12
-M_ACTION = 13
-C_BLOCK_ACK_REQ = 8
-C_BLOCK_ACK = 9
-C_PS_POLL = 10
-C_RTS = 11
-C_CTS = 12
-C_ACK = 13
-C_CF_END = 14
-C_CF_END_ACK = 15
-D_DATA = 0
-D_DATA_CF_ACK = 1
-D_DATA_CF_POLL = 2
-D_DATA_CF_ACK_POLL = 3
-D_NULL = 4
-D_CF_ACK = 5
-D_CF_POLL = 6
-D_CF_ACK_POLL = 7
-D_QOS_DATA = 8
-D_QOS_CF_ACK = 9
-D_QOS_CF_POLL = 10
-D_QOS_CF_ACK_POLL = 11
-D_QOS_NULL = 12
-D_QOS_CF_POLL_EMPTY = 14
-
-TO_DS_FLAG = 10
-FROM_DS_FLAG = 1
-INTER_DS_FLAG = 11
-
-# Bitshifts for Frame Control
-_VERSION_MASK = 0x0300
-_TYPE_MASK = 0x0c00
-_SUBTYPE_MASK = 0xf000
-_TO_DS_MASK = 0x0001
-_FROM_DS_MASK = 0x0002
-_MORE_FRAG_MASK = 0x0004
-_RETRY_MASK = 0x0008
-_PWR_MGT_MASK = 0x0010
-_MORE_DATA_MASK = 0x0020
-_WEP_MASK = 0x0040
-_ORDER_MASK = 0x0080
-_VERSION_SHIFT = 8
-_TYPE_SHIFT = 10
-_SUBTYPE_SHIFT = 12
-_TO_DS_SHIFT = 0
-_FROM_DS_SHIFT = 1
-_MORE_FRAG_SHIFT = 2
-_RETRY_SHIFT = 3
-_PWR_MGT_SHIFT = 4
-_MORE_DATA_SHIFT = 5
-_WEP_SHIFT = 6
-_ORDER_SHIFT = 7
-
-# IEs
-IE_SSID = 0
-IE_RATES = 1
-IE_FH = 2
-IE_DS = 3
-IE_CF = 4
-IE_TIM = 5
-IE_IBSS = 6
-IE_HT_CAPA = 45
-IE_ESR = 50
-IE_HT_INFO = 61
-
-FCS_LENGTH = 4
-
-FRAMES_WITH_CAPABILITY = [ M_BEACON,
- M_ASSOC_RESP,
- M_ASSOC_REQ,
- M_REASSOC_REQ,
- ]
-
-# Block Ack control constants
-_ACK_POLICY_SHIFT = 0
-_MULTI_TID_SHIFT = 1
-_COMPRESSED_SHIFT = 2
-_TID_SHIFT = 12
-
-_ACK_POLICY_MASK = 0x0001
-_MULTI_TID_MASK = 0x0002
-_COMPRESSED_MASK = 0x0004
-_TID_MASK = 0xf000
-
-_COMPRESSED_BMP_LENGTH = 8
-_BMP_LENGTH = 128
-
-# Action frame categories
-BLOCK_ACK = 3
-
-# Block ack category action codes
-BLOCK_ACK_CODE_REQUEST = 0
-BLOCK_ACK_CODE_RESPONSE = 1
-
-class IEEE80211(dpkt.Packet):
- __hdr__ = (
- ('framectl', 'H', 0),
- ('duration', 'H', 0)
- )
-
- def _get_version(self): return (self.framectl & _VERSION_MASK) >> _VERSION_SHIFT
- def _set_version(self, val): self.framectl = (val << _VERSION_SHIFT) | (self.framectl & ~_VERSION_MASK)
- def _get_type(self): return (self.framectl & _TYPE_MASK) >> _TYPE_SHIFT
- def _set_type(self, val): self.framectl = (val << _TYPE_SHIFT) | (self.framectl & ~_TYPE_MASK)
- def _get_subtype(self): return (self.framectl & _SUBTYPE_MASK) >> _SUBTYPE_SHIFT
- def _set_subtype(self, val): self.framectl = (val << _SUBTYPE_SHIFT) | (self.framectl & ~_SUBTYPE_MASK)
- def _get_to_ds(self): return (self.framectl & _TO_DS_MASK) >> _TO_DS_SHIFT
- def _set_to_ds(self, val): self.framectl = (val << _TO_DS_SHIFT) | (self.framectl & ~_TO_DS_MASK)
- def _get_from_ds(self): return (self.framectl & _FROM_DS_MASK) >> _FROM_DS_SHIFT
- def _set_from_ds(self, val): self.framectl = (val << _FROM_DS_SHIFT) | (self.framectl & ~_FROM_DS_MASK)
- def _get_more_frag(self): return (self.framectl & _MORE_FRAG_MASK) >> _MORE_FRAG_SHIFT
- def _set_more_frag(self, val): self.framectl = (val << _MORE_FRAG_SHIFT) | (self.framectl & ~_MORE_FRAG_MASK)
- def _get_retry(self): return (self.framectl & _RETRY_MASK) >> _RETRY_SHIFT
- def _set_retry(self, val): self.framectl = (val << _RETRY_SHIFT) | (self.framectl & ~_RETRY_MASK)
- def _get_pwr_mgt(self): return (self.framectl & _PWR_MGT_MASK) >> _PWR_MGT_SHIFT
- def _set_pwr_mgt(self, val): self.framectl = (val << _PWR_MGT_SHIFT) | (self.framectl & ~_PWR_MGT_MASK)
- def _get_more_data(self): return (self.framectl & _MORE_DATA_MASK) >> _MORE_DATA_SHIFT
- def _set_more_data(self, val): self.framectl = (val << _MORE_DATA_SHIFT) | (self.framectl & ~_MORE_DATA_MASK)
- def _get_wep(self): return (self.framectl & _WEP_MASK) >> _WEP_SHIFT
- def _set_wep(self, val): self.framectl = (val << _WEP_SHIFT) | (self.framectl & ~_WEP_MASK)
- def _get_order(self): return (self.framectl & _ORDER_MASK) >> _ORDER_SHIFT
- def _set_order(self, val): self.framectl = (val << _ORDER_SHIFT) | (self.framectl & ~_ORDER_MASK)
-
- version = property(_get_version, _set_version)
- type = property(_get_type, _set_type)
- subtype = property(_get_subtype, _set_subtype)
- to_ds = property(_get_to_ds, _set_to_ds)
- from_ds = property(_get_from_ds, _set_from_ds)
- more_frag = property(_get_more_frag, _set_more_frag)
- retry = property(_get_retry, _set_retry)
- pwr_mgt = property(_get_pwr_mgt, _set_pwr_mgt)
- more_data = property(_get_more_data, _set_more_data)
- wep = property(_get_wep, _set_wep)
- order = property(_get_order, _set_order)
-
- def unpack_ies(self, buf):
- self.ies = []
-
- ie_decoder = {
- IE_SSID: ('ssid', self.IE),
- IE_RATES: ('rate', self.IE),
- IE_FH: ('fh', self.FH),
- IE_DS: ('ds', self.DS),
- IE_CF: ('cf', self.CF),
- IE_TIM: ('tim', self.TIM),
- IE_IBSS: ('ibss', self.IBSS),
- IE_HT_CAPA: ('ht_capa', self.IE),
- IE_ESR: ('esr', self.IE),
- IE_HT_INFO: ('ht_info', self.IE)
- }
-
- # each IE starts with an ID and a length
- while len(buf) > FCS_LENGTH:
- ie_id = struct.unpack('B',(buf[0]))[0]
- try:
- parser = ie_decoder[ie_id][1]
- name = ie_decoder[ie_id][0]
- except KeyError:
- parser = self.IE
- name = 'ie_' + str(ie_id)
- ie = parser(buf)
-
- ie.data = buf[2:2+ie.len]
- setattr(self, name, ie)
- self.ies.append(ie)
- buf = buf[2+ie.len:]
-
- class Capability:
- def __init__(self, field):
- self.ess = field & 1
- self.ibss = (field >> 1) & 1
- self.cf_poll = (field >> 2) & 1
- self.cf_poll_req = (field >> 3) & 1
- self.privacy = (field >> 4) & 1
- self.short_preamble = (field >> 5) & 1
- self.pbcc = (field >> 6) & 1
- self.hopping = (field >> 7) & 1
- self.spec_mgmt = (field >> 8) & 1
- self.qos = (field >> 9) & 1
- self.short_slot = (field >> 10) & 1
- self.apsd = (field >> 11) & 1
- self.dsss = (field >> 13) & 1
- self.delayed_blk_ack = (field >> 14) & 1
- self.imm_blk_ack = (field >> 15) & 1
-
- def __init__(self, *args, **kwargs):
- if kwargs and 'fcs' in kwargs:
- self.fcs_present = kwargs.pop('fcs')
- else:
- self.fcs_present = False
-
- super(IEEE80211, self).__init__(*args, **kwargs)
-
- def unpack(self, buf):
- dpkt.Packet.unpack(self, buf)
- self.data = buf[self.__hdr_len__:]
-
- m_decoder = {
- M_BEACON: ('beacon', self.Beacon),
- M_ASSOC_REQ: ('assoc_req', self.Assoc_Req),
- M_ASSOC_RESP: ('assoc_resp', self.Assoc_Resp),
- M_DISASSOC: ('diassoc', self.Disassoc),
- M_REASSOC_REQ: ('reassoc_req', self.Reassoc_Req),
- M_REASSOC_RESP: ('reassoc_resp',self.Assoc_Resp),
- M_AUTH: ('auth', self.Auth),
- M_PROBE_RESP: ('probe_resp', self.Beacon),
- M_DEAUTH: ('deauth', self.Deauth),
- M_ACTION: ('action', self.Action)
- }
-
- c_decoder = {
- C_RTS: ('rts', self.RTS),
- C_CTS: ('cts', self.CTS),
- C_ACK: ('ack', self.ACK),
- C_BLOCK_ACK_REQ:('bar', self.BlockAckReq),
- C_BLOCK_ACK: ('back', self.BlockAck),
- C_CF_END: ('cf_end', self.CFEnd),
- }
-
- d_dsData = {
- 0 : self.Data,
- FROM_DS_FLAG : self.DataFromDS,
- TO_DS_FLAG : self.DataToDS,
- INTER_DS_FLAG : self.DataInterDS
- }
-
-
- # For now decode everything with DATA. Haven't checked about other QoS
- # additions
- d_decoder = {
- # modified the decoder to consider the ToDS and FromDS flags
- # Omitting the 11 case for now
- D_DATA: ('data_frame', d_dsData),
- D_NULL: ('data_frame', d_dsData),
- D_QOS_DATA: ('data_frame', d_dsData),
- D_QOS_NULL: ('data_frame', d_dsData)
- }
-
- decoder = {
- MGMT_TYPE:m_decoder,
- CTL_TYPE:c_decoder,
- DATA_TYPE:d_decoder
- }
-
- # Strip off the FCS field
- if self.fcs_present:
- self.fcs = struct.unpack('I', self.data[-1 * FCS_LENGTH:])[0]
- self.data = self.data[0: -1 * FCS_LENGTH]
-
- if self.type == MGMT_TYPE:
- self.mgmt = self.MGMT_Frame(self.data)
- self.data = self.mgmt.data
- if self.subtype == M_PROBE_REQ:
- self.unpack_ies(self.data)
- return
- if self.subtype == M_ATIM:
- return
-
- try:
- parser = decoder[self.type][self.subtype][1]
- name = decoder[self.type][self.subtype][0]
- except KeyError:
- print "Key error:", self.type, self.subtype
- return
-
- if self.type == DATA_TYPE:
- # need to grab the ToDS/FromDS info
- parser = parser[self.to_ds*10+self.from_ds]
-
- if self.type == MGMT_TYPE:
- field = parser(self.mgmt.data)
- else:
- field = parser(self.data)
- self.data = field
-
- setattr(self, name, field)
-
- if self.type == MGMT_TYPE:
- self.ies = self.unpack_ies(field.data)
- if self.subtype in FRAMES_WITH_CAPABILITY:
- self.capability = self.Capability(socket.ntohs(field.capability))
-
- if self.type == DATA_TYPE and self.subtype == D_QOS_DATA:
- self.qos_data = self.QoS_Data(field.data)
- field.data = self.qos_data.data
-
- self.data = field.data
-
- class BlockAckReq(dpkt.Packet):
- __hdr__ = (
- ('dst', '6s', '\x00' * 6),
- ('src', '6s', '\x00' *6),
- ('ctl', 'H', 0),
- ('seq', 'H', 0),
- )
-
- class BlockAck(dpkt.Packet):
- __hdr__ = (
- ('dst', '6s', '\x00' * 6),
- ('src', '6s', '\x00' * 6),
- ('ctl', 'H', 0),
- ('seq', 'H', 0),
- )
-
- def _get_compressed(self): return (self.ctl & _COMPRESSED_MASK) >> _COMPRESSED_SHIFT
- def _set_compressed(self, val): self.ctl = (val << _COMPRESSED_SHIFT) | (self.ctl & ~_COMPRESSED_MASK)
-
- def _get_ack_policy(self): return (self.ctl & _ACK_POLICY_MASK) >> _ACK_POLICY_SHIFT
- def _set_ack_policy(self, val): self.ctl = (val << _ACK_POLICY_SHIFT) | (self.ctl & ~_ACK_POLICY_MASK)
-
- def _get_multi_tid(self): return (self.ctl & _MULTI_TID_MASK) >> _MULTI_TID_SHIFT
- def _set_multi_tid(self, val): self.ctl = (val << _MULTI_TID_SHIFT) | (self.ctl & ~_MULTI_TID_MASK)
-
- def _get_tid(self): return (self.ctl & _TID_MASK) >> _TID_SHIFT
- def _set_tid(self, val): self.ctl = (val << _TID_SHIFT) | (self.ctl & ~_TID_MASK)
-
- compressed = property(_get_compressed, _set_compressed)
- ack_policy = property(_get_ack_policy, _set_ack_policy)
- multi_tid = property(_get_multi_tid, _set_multi_tid)
- tid = property(_get_tid, _set_tid)
-
- def unpack(self, buf):
- dpkt.Packet.unpack(self, buf)
- self.data = buf[self.__hdr_len__:]
- self.ctl = socket.ntohs(self.ctl)
-
- if self.compressed:
- self.bmp = struct.unpack('8s', self.data[0:_COMPRESSED_BMP_LENGTH])[0]
- else:
- self.bmp = struct.unpack('128s', self.data[0:_BMP_LENGTH])[0]
- self.data = self.data[len(self.__hdr__) + len(self.bmp):]
-
- class RTS(dpkt.Packet):
- __hdr__ = (
- ('dst', '6s', '\x00' * 6),
- ('src', '6s', '\x00' * 6)
- )
-
- class CTS(dpkt.Packet):
- __hdr__ = (
- ('dst', '6s', '\x00' * 6),
- )
-
- class ACK(dpkt.Packet):
- __hdr__ = (
- ('dst', '6s', '\x00' * 6),
- )
-
- class CFEnd(dpkt.Packet):
- __hdr__ = (
- ('dst', '6s', '\x00' *6),
- ('src', '6s', '\x00' *6),
- )
-
- class MGMT_Frame(dpkt.Packet):
- __hdr__ = (
- ('dst', '6s', '\x00' *6),
- ('src', '6s', '\x00' *6),
- ('bssid', '6s', '\x00' *6),
- ('frag_seq', 'H', 0)
- )
-
- class Beacon(dpkt.Packet):
- __hdr__ = (
- ('timestamp', 'Q', 0),
- ('interval', 'H', 0),
- ('capability', 'H', 0)
- )
-
- class Disassoc(dpkt.Packet):
- __hdr__ = (
- ('reason', 'H', 0),
- )
-
- class Assoc_Req(dpkt.Packet):
- __hdr__ = (
- ('capability', 'H', 0),
- ('interval', 'H', 0)
- )
-
- class Assoc_Resp(dpkt.Packet):
- __hdr__ = (
- ('capability', 'H', 0),
- ('status', 'H', 0),
- ('aid', 'H', 0)
- )
-
- class Reassoc_Req(dpkt.Packet):
- __hdr__ = (
- ('capability', 'H', 0),
- ('interval', 'H', 0),
- ('current_ap', '6s', '\x00'*6)
- )
-
- # This obviously doesn't support any of AUTH frames that use encryption
- class Auth(dpkt.Packet):
- __hdr__ = (
- ('algorithm', 'H', 0),
- ('auth_seq', 'H', 0),
- )
-
- class Deauth(dpkt.Packet):
- __hdr__ = (
- ('reason', 'H', 0),
- )
-
- class Action(dpkt.Packet):
- __hdr__ = (
- ('category', 'B', 0),
- ('code', 'B', 0),
- )
-
- def unpack(self, buf):
- dpkt.Packet.unpack(self, buf)
-
- action_parser = {
- BLOCK_ACK: { BLOCK_ACK_CODE_REQUEST: ('block_ack_request', IEEE80211.BlockAckActionRequest),
- BLOCK_ACK_CODE_RESPONSE: ('block_ack_response', IEEE80211.BlockAckActionResponse),
- },
- }
-
- decoder = action_parser[self.category][self.code][1]
- field_name = action_parser[self.category][self.code][0]
- field = decoder(self.data)
- setattr(self, field_name, field)
- self.data = field.data
-
- class BlockAckActionRequest(dpkt.Packet):
- __hdr__ = (
- ('dialog', 'B', 0),
- ('parameters', 'H', 0),
- ('timeout', 'H', 0),
- ('starting_seq', 'H', 0),
- )
-
- class BlockAckActionResponse(dpkt.Packet):
- __hdr__ = (
- ('dialog', 'B', 0),
- ('status_code', 'H', 0),
- ('parameters', 'H', 0),
- ('timeout', 'H', 0),
- )
-
- class Data(dpkt.Packet):
- __hdr__ = (
- ('dst', '6s', '\x00'*6),
- ('src', '6s', '\x00'*6),
- ('bssid', '6s', '\x00'*6),
- ('frag_seq', 'H', 0)
- )
-
-
- class DataFromDS(dpkt.Packet):
- __hdr__ = (
- ('dst', '6s', '\x00'*6),
- ('bssid', '6s', '\x00'*6),
- ('src', '6s', '\x00'*6),
- ('frag_seq', 'H', 0)
- )
-
-
- class DataToDS(dpkt.Packet):
- __hdr__ = (
- ('bssid', '6s', '\x00'*6),
- ('src', '6s', '\x00'*6),
- ('dst', '6s', '\x00'*6),
- ('frag_seq', 'H', 0)
- )
-
- class DataInterDS(dpkt.Packet):
- __hdr__ = (
- ('dst', '6s', '\x00'*6),
- ('src', '6s', '\x00'*6),
- ('da', '6s', '\x00'*6),
- ('frag_seq', 'H', 0),
- ('sa', '6s', '\x00'*6)
- )
-
- class QoS_Data(dpkt.Packet):
- __hdr__ = (
- ('control', 'H', 0),
- )
-
- class IE(dpkt.Packet):
- __hdr__ = (
- ('id', 'B', 0),
- ('len', 'B', 0)
- )
- def unpack(self, buf):
- dpkt.Packet.unpack(self, buf)
- self.info = buf[2:self.len+ 2]
-
- class FH(dpkt.Packet):
- __hdr__ = (
- ('id', 'B', 0),
- ('len', 'B', 0),
- ('tu', 'H', 0),
- ('hopset', 'B', 0),
- ('hoppattern', 'B', 0),
- ('hopindex', 'B', 0)
- )
-
- class DS(dpkt.Packet):
- __hdr__ = (
- ('id', 'B', 0),
- ('len', 'B', 0),
- ('ch', 'B', 0)
- )
-
- class CF(dpkt.Packet):
- __hdr__ = (
- ('id', 'B', 0),
- ('len', 'B', 0),
- ('count', 'B', 0),
- ('period', 'B', 0),
- ('max', 'H', 0),
- ('dur', 'H', 0)
- )
-
- class TIM(dpkt.Packet):
- __hdr__ = (
- ('id', 'B', 0),
- ('len', 'B', 0),
- ('count', 'B', 0),
- ('period', 'B', 0),
- ('ctrl', 'H', 0)
- )
- def unpack(self, buf):
- dpkt.Packet.unpack(self, buf)
- self.bitmap = buf[5:self.len+ 2]
-
- class IBSS(dpkt.Packet):
- __hdr__ = (
- ('id', 'B', 0),
- ('len', 'B', 0),
- ('atim', 'H', 0)
- )
-
-
-
-if __name__ == '__main__':
- import unittest
-
- class IEEE80211TestCase(unittest.TestCase):
- def test_802211_ack(self):
- s = '\xd4\x00\x00\x00\x00\x12\xf0\xb6\x1c\xa4\xff\xff\xff\xff'
- ieee = IEEE80211(s, fcs = True)
- self.failUnless(ieee.version == 0)
- self.failUnless(ieee.type == CTL_TYPE)
- self.failUnless(ieee.subtype == C_ACK)
- self.failUnless(ieee.to_ds == 0)
- self.failUnless(ieee.from_ds == 0)
- self.failUnless(ieee.pwr_mgt == 0)
- self.failUnless(ieee.more_data == 0)
- self.failUnless(ieee.wep == 0)
- self.failUnless(ieee.order == 0)
- self.failUnless(ieee.ack.dst == '\x00\x12\xf0\xb6\x1c\xa4')
- fcs = struct.unpack('I', s[-4:])[0]
- self.failUnless(ieee.fcs == fcs)
-
- def test_80211_beacon(self):
- s = '\x80\x00\x00\x00\xff\xff\xff\xff\xff\xff\x00\x26\xcb\x18\x6a\x30\x00\x26\xcb\x18\x6a\x30\xa0\xd0\x77\x09\x32\x03\x8f\x00\x00\x00\x66\x00\x31\x04\x00\x04\x43\x41\x45\x4e\x01\x08\x82\x84\x8b\x0c\x12\x96\x18\x24\x03\x01\x01\x05\x04\x00\x01\x00\x00\x07\x06\x55\x53\x20\x01\x0b\x1a\x0b\x05\x00\x00\x6e\x00\x00\x2a\x01\x02\x2d\x1a\x6e\x18\x1b\xff\xff\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x30\x14\x01\x00\x00\x0f\xac\x04\x01\x00\x00\x0f\xac\x04\x01\x00\x00\x0f\xac\x01\x28\x00\x32\x04\x30\x48\x60\x6c\x36\x03\x51\x63\x03\x3d\x16\x01\x00\x05\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x85\x1e\x05\x00\x8f\x00\x0f\x00\xff\x03\x59\x00\x63\x73\x65\x2d\x33\x39\x31\x32\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x36\x96\x06\x00\x40\x96\x00\x14\x00\xdd\x18\x00\x50\xf2\x02\x01\x01\x80\x00\x03\xa4\x00\x00\x27\xa4\x00\x00\x42\x43\x5e\x00\x62\x32\x2f\x00\xdd\x06\x00\x40\x96\x01\x01\x04\xdd\x05\x00\x40\x96\x03\x05\xdd\x05\x00\x40\x96\x0b\x09\xdd\x08\x00\x40\x96\x13\x01\x00\x34\x01\xdd\x05\x00\x40\x96\x14\x05'
- ieee = IEEE80211(s, fcs = True)
- self.failUnless(ieee.version == 0)
- self.failUnless(ieee.type == MGMT_TYPE)
- self.failUnless(ieee.subtype == M_BEACON)
- self.failUnless(ieee.to_ds == 0)
- self.failUnless(ieee.from_ds == 0)
- self.failUnless(ieee.pwr_mgt == 0)
- self.failUnless(ieee.more_data == 0)
- self.failUnless(ieee.wep == 0)
- self.failUnless(ieee.order == 0)
- self.failUnless(ieee.mgmt.dst == '\xff\xff\xff\xff\xff\xff')
- self.failUnless(ieee.mgmt.src == '\x00\x26\xcb\x18\x6a\x30')
- self.failUnless(ieee.beacon.capability == 0x3104)
- self.failUnless(ieee.capability.privacy == 1)
- self.failUnless(ieee.ssid.data == 'CAEN')
- self.failUnless(ieee.rate.data == '\x82\x84\x8b\x0c\x12\x96\x18\x24')
- self.failUnless(ieee.ds.data == '\x01')
- self.failUnless(ieee.tim.data == '\x00\x01\x00\x00')
- fcs = struct.unpack('I', s[-4:])[0]
- self.failUnless(ieee.fcs == fcs)
-
- def test_80211_data(self):
- s = '\x08\x09\x20\x00\x00\x26\xcb\x17\x3d\x91\x00\x16\x44\xb0\xae\xc6\x00\x02\xb3\xd6\x26\x3c\x80\x7e\xaa\xaa\x03\x00\x00\x00\x08\x00\x45\x00\x00\x28\x07\x27\x40\x00\x80\x06\x1d\x39\x8d\xd4\x37\x3d\x3f\xf5\xd1\x69\xc0\x5f\x01\xbb\xb2\xd6\xef\x23\x38\x2b\x4f\x08\x50\x10\x42\x04\xac\x17\x00\x00'
- ieee = IEEE80211(s, fcs = True)
- self.failUnless(ieee.type == DATA_TYPE)
- self.failUnless(ieee.subtype == D_DATA)
- self.failUnless(ieee.data_frame.dst == '\x00\x02\xb3\xd6\x26\x3c')
- self.failUnless(ieee.data_frame.src == '\x00\x16\x44\xb0\xae\xc6')
- self.failUnless(ieee.data_frame.frag_seq == 0x807e)
- self.failUnless(ieee.data == '\xaa\xaa\x03\x00\x00\x00\x08\x00\x45\x00\x00\x28\x07\x27\x40\x00\x80\x06\x1d\x39\x8d\xd4\x37\x3d\x3f\xf5\xd1\x69\xc0\x5f\x01\xbb\xb2\xd6\xef\x23\x38\x2b\x4f\x08\x50\x10\x42\x04')
- self.failUnless(ieee.fcs == struct.unpack('I', '\xac\x17\x00\x00')[0])
-
- import llc, ip
- llc_pkt = llc.LLC(ieee.data_frame.data)
- ip_pkt = ip.IP(llc_pkt.data)
- self.failUnless(ip_pkt.dst == '\x3f\xf5\xd1\x69')
-
- def test_80211_data_qos(self):
- s = '\x88\x01\x3a\x01\x00\x26\xcb\x17\x44\xf0\x00\x23\xdf\xc9\xc0\x93\x00\x26\xcb\x17\x44\xf0\x20\x7b\x00\x00\xaa\xaa\x03\x00\x00\x00\x88\x8e\x01\x00\x00\x74\x02\x02\x00\x74\x19\x80\x00\x00\x00\x6a\x16\x03\x01\x00\x65\x01\x00\x00\x61\x03\x01\x4b\x4c\xa7\x7e\x27\x61\x6f\x02\x7b\x3c\x72\x39\xe3\x7b\xd7\x43\x59\x91\x7f\xaa\x22\x47\x51\xb6\x88\x9f\x85\x90\x87\x5a\xd1\x13\x20\xe0\x07\x00\x00\x68\xbd\xa4\x13\xb0\xd5\x82\x7e\xc7\xfb\xe7\xcc\xab\x6e\x5d\x5a\x51\x50\xd4\x45\xc5\xa1\x65\x53\xad\xb5\x88\x5b\x00\x1a\x00\x2f\x00\x05\x00\x04\x00\x35\x00\x0a\x00\x09\x00\x03\x00\x08\x00\x33\x00\x39\x00\x16\x00\x15\x00\x14\x01\x00\xff\xff\xff\xff'
- ieee = IEEE80211(s, fcs = True)
- self.failUnless(ieee.type == DATA_TYPE)
- self.failUnless(ieee.subtype == D_QOS_DATA)
- self.failUnless(ieee.data_frame.dst == '\x00\x26\xcb\x17\x44\xf0')
- self.failUnless(ieee.data_frame.src == '\x00\x23\xdf\xc9\xc0\x93')
- self.failUnless(ieee.data_frame.frag_seq == 0x207b)
- self.failUnless(ieee.data == '\xaa\xaa\x03\x00\x00\x00\x88\x8e\x01\x00\x00\x74\x02\x02\x00\x74\x19\x80\x00\x00\x00\x6a\x16\x03\x01\x00\x65\x01\x00\x00\x61\x03\x01\x4b\x4c\xa7\x7e\x27\x61\x6f\x02\x7b\x3c\x72\x39\xe3\x7b\xd7\x43\x59\x91\x7f\xaa\x22\x47\x51\xb6\x88\x9f\x85\x90\x87\x5a\xd1\x13\x20\xe0\x07\x00\x00\x68\xbd\xa4\x13\xb0\xd5\x82\x7e\xc7\xfb\xe7\xcc\xab\x6e\x5d\x5a\x51\x50\xd4\x45\xc5\xa1\x65\x53\xad\xb5\x88\x5b\x00\x1a\x00\x2f\x00\x05\x00\x04\x00\x35\x00\x0a\x00\x09\x00\x03\x00\x08\x00\x33\x00\x39\x00\x16\x00\x15\x00\x14\x01\x00')
- self.failUnless(ieee.qos_data.control == 0x0)
- self.failUnless(ieee.fcs == struct.unpack('I', '\xff\xff\xff\xff')[0])
-
- def test_bug(self):
- s = '\x88\x41\x2c\x00\x00\x26\xcb\x17\x44\xf0\x00\x1e\x52\x97\x14\x11\x00\x1f\x6d\xe8\x18\x00\xd0\x07\x00\x00\x6f\x00\x00\x20\x00\x00\x00\x00'
- ieee = IEEE80211(s)
- self.failUnless(ieee.wep == 1)
-
- def test_data_ds(self):
- # verifying the ToDS and FromDS fields and that we're getting the
- # correct values
-
- s = '\x08\x03\x00\x00\x01\x0b\x85\x00\x00\x00\x00\x26\xcb\x18\x73\x50\x01\x0b\x85\x00\x00\x00\x00\x89\x00\x26\xcb\x18\x73\x50'
- ieee = IEEE80211(s)
- self.failUnless(ieee.type == DATA_TYPE)
- self.failUnless(ieee.to_ds == 1)
- self.failUnless(ieee.from_ds == 1)
- self.failUnless(ieee.data_frame.sa == '\x00\x26\xcb\x18\x73\x50')
- self.failUnless(ieee.data_frame.src == '\x00\x26\xcb\x18\x73\x50')
- self.failUnless(ieee.data_frame.dst == '\x01\x0b\x85\x00\x00\x00')
- self.failUnless(ieee.data_frame.da == '\x01\x0b\x85\x00\x00\x00')
-
- s = '\x88\x41\x50\x01\x00\x26\xcb\x17\x48\xc1\x00\x24\x2c\xe7\xfe\x8a\xff\xff\xff\xff\xff\xff\x80\xa0\x00\x00\x09\x1a\x00\x20\x00\x00\x00\x00'
- ieee = IEEE80211(s)
- self.failUnless(ieee.type == DATA_TYPE)
- self.failUnless(ieee.to_ds == 1)
- self.failUnless(ieee.from_ds == 0)
- self.failUnless(ieee.data_frame.bssid == '\x00\x26\xcb\x17\x48\xc1')
- self.failUnless(ieee.data_frame.src == '\x00\x24\x2c\xe7\xfe\x8a')
- self.failUnless(ieee.data_frame.dst == '\xff\xff\xff\xff\xff\xff')
-
- s = '\x08\x02\x02\x01\x00\x02\x44\xac\x27\x70\x00\x1f\x33\x39\x75\x44\x00\x1f\x33\x39\x75\x44\x90\xa4'
- ieee = IEEE80211(s)
- self.failUnless(ieee.type == DATA_TYPE)
- self.failUnless(ieee.to_ds == 0)
- self.failUnless(ieee.from_ds == 1)
- self.failUnless(ieee.data_frame.bssid == '\x00\x1f\x33\x39\x75\x44')
- self.failUnless(ieee.data_frame.src == '\x00\x1f\x33\x39\x75\x44')
- self.failUnless(ieee.data_frame.dst == '\x00\x02\x44\xac\x27\x70')
-
- def test_compressed_block_ack(self):
- s = '\x94\x00\x00\x00\x34\xc0\x59\xd6\x3f\x62\xb4\x75\x0e\x46\x83\xc1\x05\x50\x80\xee\x03\x00\x00\x00\x00\x00\x00\x00\xa2\xe4\x98\x45'
- ieee = IEEE80211(s, fcs = True)
- self.failUnless(ieee.type == CTL_TYPE)
- self.failUnless(ieee.subtype == C_BLOCK_ACK)
- self.failUnless(ieee.back.dst == '\x34\xc0\x59\xd6\x3f\x62')
- self.failUnless(ieee.back.src == '\xb4\x75\x0e\x46\x83\xc1')
- self.failUnless(ieee.back.compressed == 1)
- self.failUnless(len(ieee.back.bmp) == 8)
- self.failUnless(ieee.back.ack_policy == 1)
- self.failUnless(ieee.back.tid == 5)
-
- def test_action_block_ack_request(self):
- s = '\xd0\x00\x3a\x01\x00\x23\x14\x36\x52\x30\xb4\x75\x0e\x46\x83\xc1\xb4\x75\x0e\x46\x83\xc1\x70\x14\x03\x00\x0d\x02\x10\x00\x00\x40\x29\x06\x50\x33\x9e'
- ieee = IEEE80211(s, fcs = True)
- self.failUnless(ieee.type == MGMT_TYPE)
- self.failUnless(ieee.subtype == M_ACTION)
- self.failUnless(ieee.action.category == BLOCK_ACK)
- self.failUnless(ieee.action.code == BLOCK_ACK_CODE_REQUEST)
- self.failUnless(ieee.action.block_ack_request.timeout == 0)
- parameters = struct.unpack('H', '\x10\x02')[0]
- self.failUnless(ieee.action.block_ack_request.parameters == parameters)
-
- def test_action_block_ack_response(self):
- s = '\xd0\x00\x3c\x00\xb4\x75\x0e\x46\x83\xc1\x00\x23\x14\x36\x52\x30\xb4\x75\x0e\x46\x83\xc1\xd0\x68\x03\x01\x0d\x00\x00\x02\x10\x88\x13\x9f\xc0\x0b\x75'
- ieee = IEEE80211(s, fcs = True)
- self.failUnless(ieee.type == MGMT_TYPE)
- self.failUnless(ieee.subtype == M_ACTION)
- self.failUnless(ieee.action.category == BLOCK_ACK)
- self.failUnless(ieee.action.code == BLOCK_ACK_CODE_RESPONSE)
- timeout = struct.unpack('H', '\x13\x88')[0]
- self.failUnless(ieee.action.block_ack_response.timeout == timeout)
- parameters = struct.unpack('H', '\x10\x02')[0]
- self.failUnless(ieee.action.block_ack_response.parameters == parameters)
-
- unittest.main()
diff --git a/scripts/external_libs/dpkt-1.8.6/dpkt/igmp.py b/scripts/external_libs/dpkt-1.8.6/dpkt/igmp.py
deleted file mode 100644
index 8031fa72..00000000
--- a/scripts/external_libs/dpkt-1.8.6/dpkt/igmp.py
+++ /dev/null
@@ -1,17 +0,0 @@
-# $Id: igmp.py 23 2006-11-08 15:45:33Z dugsong $
-
-"""Internet Group Management Protocol."""
-
-import dpkt
-
-class IGMP(dpkt.Packet):
- __hdr__ = (
- ('type', 'B', 0),
- ('maxresp', 'B', 0),
- ('sum', 'H', 0),
- ('group', 'I', 0)
- )
- def __str__(self):
- if not self.sum:
- self.sum = dpkt.in_cksum(dpkt.Packet.__str__(self))
- return dpkt.Packet.__str__(self)
diff --git a/scripts/external_libs/dpkt-1.8.6/dpkt/ip.py b/scripts/external_libs/dpkt-1.8.6/dpkt/ip.py
deleted file mode 100644
index a8f9bd96..00000000
--- a/scripts/external_libs/dpkt-1.8.6/dpkt/ip.py
+++ /dev/null
@@ -1,301 +0,0 @@
-# $Id: ip.py 87 2013-03-05 19:41:04Z andrewflnr@gmail.com $
-
-"""Internet Protocol."""
-
-import dpkt
-
-class IP(dpkt.Packet):
- __hdr__ = (
- ('v_hl', 'B', (4 << 4) | (20 >> 2)),
- ('tos', 'B', 0),
- ('len', 'H', 20),
- ('id', 'H', 0),
- ('off', 'H', 0),
- ('ttl', 'B', 64),
- ('p', 'B', 0),
- ('sum', 'H', 0),
- ('src', '4s', '\x00' * 4),
- ('dst', '4s', '\x00' * 4)
- )
- _protosw = {}
- opts = ''
-
- def _get_v(self): return self.v_hl >> 4
- def _set_v(self, v): self.v_hl = (v << 4) | (self.v_hl & 0xf)
- v = property(_get_v, _set_v)
-
- def _get_hl(self): return self.v_hl & 0xf
- def _set_hl(self, hl): self.v_hl = (self.v_hl & 0xf0) | hl
- hl = property(_get_hl, _set_hl)
-
- def __len__(self):
- return self.__hdr_len__ + len(self.opts) + len(self.data)
-
- def __str__(self):
- if self.sum == 0:
- self.sum = dpkt.in_cksum(self.pack_hdr() + self.opts)
- if (self.p == 6 or self.p == 17) and \
- (self.off & (IP_MF|IP_OFFMASK)) == 0 and \
- isinstance(self.data, dpkt.Packet) and self.data.sum == 0:
- # Set zeroed TCP and UDP checksums for non-fragments.
- p = str(self.data)
- s = dpkt.struct.pack('>4s4sxBH', self.src, self.dst,
- self.p, len(p))
- s = dpkt.in_cksum_add(0, s)
- s = dpkt.in_cksum_add(s, p)
- self.data.sum = dpkt.in_cksum_done(s)
- if self.p == 17 and self.data.sum == 0:
- self.data.sum = 0xffff # RFC 768
- # XXX - skip transports which don't need the pseudoheader
- return self.pack_hdr() + self.opts + str(self.data)
-
- def unpack(self, buf):
- dpkt.Packet.unpack(self, buf)
- ol = ((self.v_hl & 0xf) << 2) - self.__hdr_len__
- if ol < 0:
- raise dpkt.UnpackError, 'invalid header length'
- self.opts = buf[self.__hdr_len__:self.__hdr_len__ + ol]
- if self.len:
- buf = buf[self.__hdr_len__ + ol:self.len]
- else: # very likely due to TCP segmentation offload
- buf = buf[self.__hdr_len__ + ol:]
- try:
- self.data = self._protosw[self.p](buf)
- setattr(self, self.data.__class__.__name__.lower(), self.data)
- except (KeyError, dpkt.UnpackError):
- self.data = buf
-
- def set_proto(cls, p, pktclass):
- cls._protosw[p] = pktclass
- set_proto = classmethod(set_proto)
-
- def get_proto(cls, p):
- return cls._protosw[p]
- get_proto = classmethod(get_proto)
-
-# Type of service (ip_tos), RFC 1349 ("obsoleted by RFC 2474")
-IP_TOS_DEFAULT = 0x00 # default
-IP_TOS_LOWDELAY = 0x10 # low delay
-IP_TOS_THROUGHPUT = 0x08 # high throughput
-IP_TOS_RELIABILITY = 0x04 # high reliability
-IP_TOS_LOWCOST = 0x02 # low monetary cost - XXX
-IP_TOS_ECT = 0x02 # ECN-capable transport
-IP_TOS_CE = 0x01 # congestion experienced
-
-# IP precedence (high 3 bits of ip_tos), hopefully unused
-IP_TOS_PREC_ROUTINE = 0x00
-IP_TOS_PREC_PRIORITY = 0x20
-IP_TOS_PREC_IMMEDIATE = 0x40
-IP_TOS_PREC_FLASH = 0x60
-IP_TOS_PREC_FLASHOVERRIDE = 0x80
-IP_TOS_PREC_CRITIC_ECP = 0xa0
-IP_TOS_PREC_INTERNETCONTROL = 0xc0
-IP_TOS_PREC_NETCONTROL = 0xe0
-
-# Fragmentation flags (ip_off)
-IP_RF = 0x8000 # reserved
-IP_DF = 0x4000 # don't fragment
-IP_MF = 0x2000 # more fragments (not last frag)
-IP_OFFMASK = 0x1fff # mask for fragment offset
-
-# Time-to-live (ip_ttl), seconds
-IP_TTL_DEFAULT = 64 # default ttl, RFC 1122, RFC 1340
-IP_TTL_MAX = 255 # maximum ttl
-
-# Protocol (ip_p) - http://www.iana.org/assignments/protocol-numbers
-IP_PROTO_IP = 0 # dummy for IP
-IP_PROTO_HOPOPTS = IP_PROTO_IP # IPv6 hop-by-hop options
-IP_PROTO_ICMP = 1 # ICMP
-IP_PROTO_IGMP = 2 # IGMP
-IP_PROTO_GGP = 3 # gateway-gateway protocol
-IP_PROTO_IPIP = 4 # IP in IP
-IP_PROTO_ST = 5 # ST datagram mode
-IP_PROTO_TCP = 6 # TCP
-IP_PROTO_CBT = 7 # CBT
-IP_PROTO_EGP = 8 # exterior gateway protocol
-IP_PROTO_IGP = 9 # interior gateway protocol
-IP_PROTO_BBNRCC = 10 # BBN RCC monitoring
-IP_PROTO_NVP = 11 # Network Voice Protocol
-IP_PROTO_PUP = 12 # PARC universal packet
-IP_PROTO_ARGUS = 13 # ARGUS
-IP_PROTO_EMCON = 14 # EMCON
-IP_PROTO_XNET = 15 # Cross Net Debugger
-IP_PROTO_CHAOS = 16 # Chaos
-IP_PROTO_UDP = 17 # UDP
-IP_PROTO_MUX = 18 # multiplexing
-IP_PROTO_DCNMEAS = 19 # DCN measurement
-IP_PROTO_HMP = 20 # Host Monitoring Protocol
-IP_PROTO_PRM = 21 # Packet Radio Measurement
-IP_PROTO_IDP = 22 # Xerox NS IDP
-IP_PROTO_TRUNK1 = 23 # Trunk-1
-IP_PROTO_TRUNK2 = 24 # Trunk-2
-IP_PROTO_LEAF1 = 25 # Leaf-1
-IP_PROTO_LEAF2 = 26 # Leaf-2
-IP_PROTO_RDP = 27 # "Reliable Datagram" proto
-IP_PROTO_IRTP = 28 # Inet Reliable Transaction
-IP_PROTO_TP = 29 # ISO TP class 4
-IP_PROTO_NETBLT = 30 # Bulk Data Transfer
-IP_PROTO_MFPNSP = 31 # MFE Network Services
-IP_PROTO_MERITINP = 32 # Merit Internodal Protocol
-IP_PROTO_SEP = 33 # Sequential Exchange proto
-IP_PROTO_3PC = 34 # Third Party Connect proto
-IP_PROTO_IDPR = 35 # Interdomain Policy Route
-IP_PROTO_XTP = 36 # Xpress Transfer Protocol
-IP_PROTO_DDP = 37 # Datagram Delivery Proto
-IP_PROTO_CMTP = 38 # IDPR Ctrl Message Trans
-IP_PROTO_TPPP = 39 # TP++ Transport Protocol
-IP_PROTO_IL = 40 # IL Transport Protocol
-IP_PROTO_IP6 = 41 # IPv6
-IP_PROTO_SDRP = 42 # Source Demand Routing
-IP_PROTO_ROUTING = 43 # IPv6 routing header
-IP_PROTO_FRAGMENT = 44 # IPv6 fragmentation header
-IP_PROTO_RSVP = 46 # Reservation protocol
-IP_PROTO_GRE = 47 # General Routing Encap
-IP_PROTO_MHRP = 48 # Mobile Host Routing
-IP_PROTO_ENA = 49 # ENA
-IP_PROTO_ESP = 50 # Encap Security Payload
-IP_PROTO_AH = 51 # Authentication Header
-IP_PROTO_INLSP = 52 # Integated Net Layer Sec
-IP_PROTO_SWIPE = 53 # SWIPE
-IP_PROTO_NARP = 54 # NBMA Address Resolution
-IP_PROTO_MOBILE = 55 # Mobile IP, RFC 2004
-IP_PROTO_TLSP = 56 # Transport Layer Security
-IP_PROTO_SKIP = 57 # SKIP
-IP_PROTO_ICMP6 = 58 # ICMP for IPv6
-IP_PROTO_NONE = 59 # IPv6 no next header
-IP_PROTO_DSTOPTS = 60 # IPv6 destination options
-IP_PROTO_ANYHOST = 61 # any host internal proto
-IP_PROTO_CFTP = 62 # CFTP
-IP_PROTO_ANYNET = 63 # any local network
-IP_PROTO_EXPAK = 64 # SATNET and Backroom EXPAK
-IP_PROTO_KRYPTOLAN = 65 # Kryptolan
-IP_PROTO_RVD = 66 # MIT Remote Virtual Disk
-IP_PROTO_IPPC = 67 # Inet Pluribus Packet Core
-IP_PROTO_DISTFS = 68 # any distributed fs
-IP_PROTO_SATMON = 69 # SATNET Monitoring
-IP_PROTO_VISA = 70 # VISA Protocol
-IP_PROTO_IPCV = 71 # Inet Packet Core Utility
-IP_PROTO_CPNX = 72 # Comp Proto Net Executive
-IP_PROTO_CPHB = 73 # Comp Protocol Heart Beat
-IP_PROTO_WSN = 74 # Wang Span Network
-IP_PROTO_PVP = 75 # Packet Video Protocol
-IP_PROTO_BRSATMON = 76 # Backroom SATNET Monitor
-IP_PROTO_SUNND = 77 # SUN ND Protocol
-IP_PROTO_WBMON = 78 # WIDEBAND Monitoring
-IP_PROTO_WBEXPAK = 79 # WIDEBAND EXPAK
-IP_PROTO_EON = 80 # ISO CNLP
-IP_PROTO_VMTP = 81 # Versatile Msg Transport
-IP_PROTO_SVMTP = 82 # Secure VMTP
-IP_PROTO_VINES = 83 # VINES
-IP_PROTO_TTP = 84 # TTP
-IP_PROTO_NSFIGP = 85 # NSFNET-IGP
-IP_PROTO_DGP = 86 # Dissimilar Gateway Proto
-IP_PROTO_TCF = 87 # TCF
-IP_PROTO_EIGRP = 88 # EIGRP
-IP_PROTO_OSPF = 89 # Open Shortest Path First
-IP_PROTO_SPRITERPC = 90 # Sprite RPC Protocol
-IP_PROTO_LARP = 91 # Locus Address Resolution
-IP_PROTO_MTP = 92 # Multicast Transport Proto
-IP_PROTO_AX25 = 93 # AX.25 Frames
-IP_PROTO_IPIPENCAP = 94 # yet-another IP encap
-IP_PROTO_MICP = 95 # Mobile Internet Ctrl
-IP_PROTO_SCCSP = 96 # Semaphore Comm Sec Proto
-IP_PROTO_ETHERIP = 97 # Ethernet in IPv4
-IP_PROTO_ENCAP = 98 # encapsulation header
-IP_PROTO_ANYENC = 99 # private encryption scheme
-IP_PROTO_GMTP = 100 # GMTP
-IP_PROTO_IFMP = 101 # Ipsilon Flow Mgmt Proto
-IP_PROTO_PNNI = 102 # PNNI over IP
-IP_PROTO_PIM = 103 # Protocol Indep Multicast
-IP_PROTO_ARIS = 104 # ARIS
-IP_PROTO_SCPS = 105 # SCPS
-IP_PROTO_QNX = 106 # QNX
-IP_PROTO_AN = 107 # Active Networks
-IP_PROTO_IPCOMP = 108 # IP Payload Compression
-IP_PROTO_SNP = 109 # Sitara Networks Protocol
-IP_PROTO_COMPAQPEER = 110 # Compaq Peer Protocol
-IP_PROTO_IPXIP = 111 # IPX in IP
-IP_PROTO_VRRP = 112 # Virtual Router Redundancy
-IP_PROTO_PGM = 113 # PGM Reliable Transport
-IP_PROTO_ANY0HOP = 114 # 0-hop protocol
-IP_PROTO_L2TP = 115 # Layer 2 Tunneling Proto
-IP_PROTO_DDX = 116 # D-II Data Exchange (DDX)
-IP_PROTO_IATP = 117 # Interactive Agent Xfer
-IP_PROTO_STP = 118 # Schedule Transfer Proto
-IP_PROTO_SRP = 119 # SpectraLink Radio Proto
-IP_PROTO_UTI = 120 # UTI
-IP_PROTO_SMP = 121 # Simple Message Protocol
-IP_PROTO_SM = 122 # SM
-IP_PROTO_PTP = 123 # Performance Transparency
-IP_PROTO_ISIS = 124 # ISIS over IPv4
-IP_PROTO_FIRE = 125 # FIRE
-IP_PROTO_CRTP = 126 # Combat Radio Transport
-IP_PROTO_CRUDP = 127 # Combat Radio UDP
-IP_PROTO_SSCOPMCE = 128 # SSCOPMCE
-IP_PROTO_IPLT = 129 # IPLT
-IP_PROTO_SPS = 130 # Secure Packet Shield
-IP_PROTO_PIPE = 131 # Private IP Encap in IP
-IP_PROTO_SCTP = 132 # Stream Ctrl Transmission
-IP_PROTO_FC = 133 # Fibre Channel
-IP_PROTO_RSVPIGN = 134 # RSVP-E2E-IGNORE
-IP_PROTO_RAW = 255 # Raw IP packets
-IP_PROTO_RESERVED = IP_PROTO_RAW # Reserved
-IP_PROTO_MAX = 255
-
-# XXX - auto-load IP dispatch table from IP_PROTO_* definitions
-def __load_protos():
- g = globals()
- for k, v in g.iteritems():
- if k.startswith('IP_PROTO_'):
- name = k[9:].lower()
- try:
- mod = __import__(name, g)
- except ImportError:
- continue
- IP.set_proto(v, getattr(mod, name.upper()))
-
-if not IP._protosw:
- __load_protos()
-
-if __name__ == '__main__':
- import unittest
-
- class IPTestCase(unittest.TestCase):
- def test_IP(self):
- import udp
- s = 'E\x00\x00"\x00\x00\x00\x00@\x11r\xc0\x01\x02\x03\x04\x01\x02\x03\x04\x00o\x00\xde\x00\x0e\xbf5foobar'
- ip = IP(id=0, src='\x01\x02\x03\x04', dst='\x01\x02\x03\x04', p=17)
- u = udp.UDP(sport=111, dport=222)
- u.data = 'foobar'
- u.ulen += len(u.data)
- ip.data = u
- ip.len += len(u)
- self.failUnless(str(ip) == s)
-
- ip = IP(s)
- self.failUnless(str(ip) == s)
- self.failUnless(ip.udp.sport == 111)
- self.failUnless(ip.udp.data == 'foobar')
-
- def test_hl(self):
- s = 'BB\x03\x00\x00\x00\x00\x00\x00\x00\xd0\x00\xec\xbc\xa5\x00\x00\x00\x03\x80\x00\x00\xd0\x01\xf2\xac\xa5"0\x01\x00\x14\x00\x02\x00\x0f\x00\x00\x00\x00\x00\x00\x00\x00\x00'
- try:
- ip = IP(s)
- except dpkt.UnpackError:
- pass
-
- def test_opt(self):
- s = '\x4f\x00\x00\x50\xae\x08\x00\x00\x40\x06\x17\xfc\xc0\xa8\x0a\x26\xc0\xa8\x0a\x01\x07\x27\x08\x01\x02\x03\x04\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
- ip = IP(s)
- ip.sum = 0
- self.failUnless(str(ip) == s)
-
- def test_zerolen(self):
- import tcp
- d = 'X' * 2048
- s = 'E\x00\x00\x004\xce@\x00\x80\x06\x00\x00\x7f\x00\x00\x01\x7f\x00\x00\x01\xccN\x0c8`\xff\xc6N_\x8a\x12\x98P\x18@):\xa3\x00\x00' + d
- ip = IP(s)
- self.failUnless(isinstance(ip.data, tcp.TCP))
- self.failUnless(ip.tcp.data == d)
-
- unittest.main()
diff --git a/scripts/external_libs/dpkt-1.8.6/dpkt/ip6.py b/scripts/external_libs/dpkt-1.8.6/dpkt/ip6.py
deleted file mode 100644
index 38002fa6..00000000
--- a/scripts/external_libs/dpkt-1.8.6/dpkt/ip6.py
+++ /dev/null
@@ -1,307 +0,0 @@
-# $Id: ip6.py 87 2013-03-05 19:41:04Z andrewflnr@gmail.com $
-
-"""Internet Protocol, version 6."""
-
-import dpkt
-
-class IP6(dpkt.Packet):
- __hdr__ = (
- ('v_fc_flow', 'I', 0x60000000L),
- ('plen', 'H', 0), # payload length (not including header)
- ('nxt', 'B', 0), # next header protocol
- ('hlim', 'B', 0), # hop limit
- ('src', '16s', ''),
- ('dst', '16s', '')
- )
-
- # XXX - to be shared with IP. We cannot refer to the ip module
- # right now because ip.__load_protos() expects the IP6 class to be
- # defined.
- _protosw = None
-
- def _get_v(self):
- return self.v_fc_flow >> 28
- def _set_v(self, v):
- self.v_fc_flow = (self.v_fc_flow & ~0xf0000000L) | (v << 28)
- v = property(_get_v, _set_v)
-
- def _get_fc(self):
- return (self.v_fc_flow >> 20) & 0xff
- def _set_fc(self, v):
- self.v_fc_flow = (self.v_fc_flow & ~0xff00000L) | (v << 20)
- fc = property(_get_fc, _set_fc)
-
- def _get_flow(self):
- return self.v_fc_flow & 0xfffff
- def _set_flow(self, v):
- self.v_fc_flow = (self.v_fc_flow & ~0xfffff) | (v & 0xfffff)
- flow = property(_get_flow, _set_flow)
-
- def unpack(self, buf):
- dpkt.Packet.unpack(self, buf)
- self.extension_hdrs = dict(((i, None) for i in ext_hdrs))
-
- if self.plen:
- buf = self.data[:self.plen]
- else: # due to jumbo payload or TSO
- buf = self.data
-
- next = self.nxt
-
- while (next in ext_hdrs):
- ext = ext_hdrs_cls[next](buf)
- self.extension_hdrs[next] = ext
- buf = buf[ext.length:]
- next = ext.nxt
-
- # set the payload protocol id
- setattr(self, 'p', next)
-
- try:
- self.data = self._protosw[next](buf)
- setattr(self, self.data.__class__.__name__.lower(), self.data)
- except (KeyError, dpkt.UnpackError):
- self.data = buf
-
- def headers_str(self):
- """
- Output extension headers in order defined in RFC1883 (except dest opts)
- """
-
- header_str = ""
-
- for hdr in ext_hdrs:
- if not self.extension_hdrs[hdr] is None:
- header_str += str(self.extension_hdrs[hdr])
- return header_str
-
-
- def __str__(self):
- if (self.nxt == 6 or self.nxt == 17 or self.nxt == 58) and \
- not self.data.sum:
- # XXX - set TCP, UDP, and ICMPv6 checksums
- p = str(self.data)
- s = dpkt.struct.pack('>16s16sxBH', self.src, self.dst, self.nxt, len(p))
- s = dpkt.in_cksum_add(0, s)
- s = dpkt.in_cksum_add(s, p)
- try:
- self.data.sum = dpkt.in_cksum_done(s)
- except AttributeError:
- pass
- return self.pack_hdr() + self.headers_str() + str(self.data)
-
- def set_proto(cls, p, pktclass):
- cls._protosw[p] = pktclass
- set_proto = classmethod(set_proto)
-
- def get_proto(cls, p):
- return cls._protosw[p]
- get_proto = classmethod(get_proto)
-
-import ip
-# We are most likely still in the middle of ip.__load_protos() which
-# implicitly loads this module through __import__(), so the content of
-# ip.IP._protosw is still incomplete at the moment. By sharing the
-# same dictionary by reference as opposed to making a copy, when
-# ip.__load_protos() finishes, we will also automatically get the most
-# up-to-date dictionary.
-IP6._protosw = ip.IP._protosw
-
-class IP6ExtensionHeader(dpkt.Packet):
- """
- An extension header is very similar to a 'sub-packet'.
- We just want to re-use all the hdr unpacking etc.
- """
- pass
-
-class IP6OptsHeader(IP6ExtensionHeader):
- __hdr__ = (
- ('nxt', 'B', 0), # next extension header protocol
- ('len', 'B', 0) # option data length in 8 octect units (ignoring first 8 octets) so, len 0 == 64bit header
- )
-
- def unpack(self, buf):
- dpkt.Packet.unpack(self, buf)
- setattr(self, 'length', (self.len + 1) * 8)
- options = []
-
- index = 0
-
- while (index < self.length - 2):
- opt_type = ord(self.data[index])
-
- # PAD1 option
- if opt_type == 0:
- index += 1
- continue;
-
- opt_length = ord(self.data[index + 1])
-
- if opt_type == 1: # PADN option
- # PADN uses opt_length bytes in total
- index += opt_length + 2
- continue
-
- options.append({'type': opt_type, 'opt_length': opt_length, 'data': self.data[index + 2:index + 2 + opt_length]})
-
- # add the two chars and the option_length, to move to the next option
- index += opt_length + 2
-
- setattr(self, 'options', options)
-
-class IP6HopOptsHeader(IP6OptsHeader): pass
-
-class IP6DstOptsHeader(IP6OptsHeader): pass
-
-class IP6RoutingHeader(IP6ExtensionHeader):
- __hdr__ = (
- ('nxt', 'B', 0), # next extension header protocol
- ('len', 'B', 0), # extension data length in 8 octect units (ignoring first 8 octets) (<= 46 for type 0)
- ('type', 'B', 0), # routing type (currently, only 0 is used)
- ('segs_left', 'B', 0), # remaining segments in route, until destination (<= 23)
- ('rsvd_sl_bits', 'I', 0), # reserved (1 byte), strict/loose bitmap for addresses
- )
-
- def _get_sl_bits(self):
- return self.rsvd_sl_bits & 0xffffff
- def _set_sl_bits(self, v):
- self.rsvd_sl_bits = (self.rsvd_sl_bits & ~0xfffff) | (v & 0xfffff)
- sl_bits = property(_get_sl_bits, _set_sl_bits)
-
- def unpack(self, buf):
- hdr_size = 8
- addr_size = 16
-
- dpkt.Packet.unpack(self, buf)
-
- addresses = []
- num_addresses = self.len / 2
- buf = buf[hdr_size:hdr_size + num_addresses * addr_size]
-
- for i in range(num_addresses):
- addresses.append(buf[i * addr_size: i * addr_size + addr_size])
-
- self.data = buf
- setattr(self, 'addresses', addresses)
- setattr(self, 'length', self.len * 8 + 8)
-
-class IP6FragmentHeader(IP6ExtensionHeader):
- __hdr__ = (
- ('nxt', 'B', 0), # next extension header protocol
- ('resv', 'B', 0), # reserved, set to 0
- ('frag_off_resv_m', 'H', 0), # frag offset (13 bits), reserved zero (2 bits), More frags flag
- ('id', 'I', 0) # fragments id
- )
-
- def unpack(self, buf):
- dpkt.Packet.unpack(self, buf)
- setattr(self, 'length', self.__hdr_len__)
-
- def _get_frag_off(self):
- return self.frag_off_resv_m >> 3
- def _set_frag_off(self, v):
- self.frag_off_resv_m = (self.frag_off_resv_m & ~0xfff8) | (v << 3)
- frag_off = property(_get_frag_off, _set_frag_off)
-
- def _get_m_flag(self):
- return self.frag_off_resv_m & 1
- def _set_m_flag(self, v):
- self.frag_off_resv_m = (self.frag_off_resv_m & ~0xfffe) | v
- m_flag = property(_get_m_flag, _set_m_flag)
-
-class IP6AHHeader(IP6ExtensionHeader):
- __hdr__ = (
- ('nxt', 'B', 0), # next extension header protocol
- ('len', 'B', 0), # length of header in 4 octet units (ignoring first 2 units)
- ('resv', 'H', 0), # reserved, 2 bytes of 0
- ('spi', 'I', 0), # SPI security parameter index
- ('seq', 'I', 0) # sequence no.
- )
-
- def unpack(self, buf):
- dpkt.Packet.unpack(self, buf)
- setattr(self, 'length', (self.len + 2) * 4)
- setattr(self, 'auth_data', self.data[:(self.len - 1) * 4])
-
-
-class IP6ESPHeader(IP6ExtensionHeader):
- def unpack(self, buf):
- raise NotImplementedError("ESP extension headers are not supported.")
-
-
-ext_hdrs = [ip.IP_PROTO_HOPOPTS, ip.IP_PROTO_ROUTING, ip.IP_PROTO_FRAGMENT, ip.IP_PROTO_AH, ip.IP_PROTO_ESP, ip.IP_PROTO_DSTOPTS]
-ext_hdrs_cls = {ip.IP_PROTO_HOPOPTS: IP6HopOptsHeader,
- ip.IP_PROTO_ROUTING: IP6RoutingHeader,
- ip.IP_PROTO_FRAGMENT: IP6FragmentHeader,
- ip.IP_PROTO_ESP: IP6ESPHeader,
- ip.IP_PROTO_AH: IP6AHHeader,
- ip.IP_PROTO_DSTOPTS: IP6DstOptsHeader}
-
-if __name__ == '__main__':
- import unittest
-
- class IP6TestCase(unittest.TestCase):
-
- def test_IP6(self):
- s = '`\x00\x00\x00\x00(\x06@\xfe\x80\x00\x00\x00\x00\x00\x00\x02\x11$\xff\xfe\x8c\x11\xde\xfe\x80\x00\x00\x00\x00\x00\x00\x02\xb0\xd0\xff\xfe\xe1\x80r\xcd\xca\x00\x16\x04\x84F\xd5\x00\x00\x00\x00\xa0\x02\xff\xff\xf8\t\x00\x00\x02\x04\x05\xa0\x01\x03\x03\x00\x01\x01\x08\n}\x185?\x00\x00\x00\x00'
- ip = IP6(s)
- #print `ip`
- ip.data.sum = 0
- s2 = str(ip)
- ip2 = IP6(s)
- #print `ip2`
- assert(s == s2)
-
- def test_IP6RoutingHeader(self):
- s = '`\x00\x00\x00\x00<+@ H\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xde\xca G\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xca\xfe\x06\x04\x00\x02\x00\x00\x00\x00 \x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xde\xca "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xde\xca\x00\x14\x00P\x00\x00\x00\x00\x00\x00\x00\x00P\x02 \x00\x91\x7f\x00\x00'
- ip = IP6(s)
- s2 = str(ip)
- # 43 is Routing header id
- assert(len(ip.extension_hdrs[43].addresses) == 2)
- assert(ip.tcp)
- assert(s == s2)
-
-
- def test_IP6FragmentHeader(self):
- s = '\x06\xee\xff\xfb\x00\x00\xff\xff'
- fh = IP6FragmentHeader(s)
- s2 = str(fh)
- assert(fh.nxt == 6)
- assert(fh.id == 65535)
- assert(fh.frag_off == 8191)
- assert(fh.m_flag == 1)
-
- def test_IP6OptionsHeader(self):
- s = ';\x04\x01\x02\x00\x00\xc9\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\xc2\x04\x00\x00\x00\x00\x05\x02\x00\x00\x01\x02\x00\x00'
- options = IP6OptsHeader(s).options
- assert(len(options) == 3)
-
- def test_IP6AHHeader(self):
- s = ';\x04\x00\x00\x02\x02\x02\x02\x01\x01\x01\x01\x78\x78\x78\x78\x78\x78\x78\x78'
- ah = IP6AHHeader(s)
- assert(ah.length == 24)
- assert(ah.auth_data == 'xxxxxxxx')
- assert(ah.spi == 0x2020202)
- assert(ah.seq == 0x1010101)
-
- def test_IP6ExtensionHeaders(self):
- p = '`\x00\x00\x00\x00<+@ H\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xde\xca G\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xca\xfe\x06\x04\x00\x02\x00\x00\x00\x00 \x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xde\xca "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xde\xca\x00\x14\x00P\x00\x00\x00\x00\x00\x00\x00\x00P\x02 \x00\x91\x7f\x00\x00'
- ip = IP6(p)
-
- o = ';\x04\x01\x02\x00\x00\xc9\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\xc2\x04\x00\x00\x00\x00\x05\x02\x00\x00\x01\x02\x00\x00'
- options = IP6HopOptsHeader(o)
-
- ip.extension_hdrs[0] = options
-
- fh = '\x06\xee\xff\xfb\x00\x00\xff\xff'
- ip.extension_hdrs[44] = IP6FragmentHeader(fh)
-
- ah = ';\x04\x00\x00\x02\x02\x02\x02\x01\x01\x01\x01\x78\x78\x78\x78\x78\x78\x78\x78'
- ip.extension_hdrs[51] = IP6AHHeader(ah)
-
- do = ';\x02\x01\x02\x00\x00\xc9\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
- ip.extension_hdrs[60] = IP6DstOptsHeader(do)
-
- assert(len([k for k in ip.extension_hdrs if (not ip.extension_hdrs[k] is None)]) == 5)
-
- unittest.main()
diff --git a/scripts/external_libs/dpkt-1.8.6/dpkt/ipx.py b/scripts/external_libs/dpkt-1.8.6/dpkt/ipx.py
deleted file mode 100644
index 74e7982a..00000000
--- a/scripts/external_libs/dpkt-1.8.6/dpkt/ipx.py
+++ /dev/null
@@ -1,17 +0,0 @@
-# $Id: ipx.py 23 2006-11-08 15:45:33Z dugsong $
-
-"""Internetwork Packet Exchange."""
-
-import dpkt
-
-IPX_HDR_LEN = 30
-
-class IPX(dpkt.Packet):
- __hdr__ = (
- ('sum', 'H', 0xffff),
- ('len', 'H', IPX_HDR_LEN),
- ('tc', 'B', 0),
- ('pt', 'B', 0),
- ('dst', '12s', ''),
- ('src', '12s', '')
- )
diff --git a/scripts/external_libs/dpkt-1.8.6/dpkt/llc.py b/scripts/external_libs/dpkt-1.8.6/dpkt/llc.py
deleted file mode 100644
index 28246131..00000000
--- a/scripts/external_libs/dpkt-1.8.6/dpkt/llc.py
+++ /dev/null
@@ -1,55 +0,0 @@
-import struct
-import dpkt, stp, ethernet
-
-class LLC(dpkt.Packet):
- _typesw = {}
-
- def _unpack_data(self, buf):
- if self.type == ethernet.ETH_TYPE_8021Q:
- self.tag, self.type = struct.unpack('>HH', buf[:4])
- buf = buf[4:]
- elif self.type == ethernet.ETH_TYPE_MPLS or \
- self.type == ethernet.ETH_TYPE_MPLS_MCAST:
- # XXX - skip labels
- for i in range(24):
- if struct.unpack('>I', buf[i:i+4])[0] & 0x0100: # MPLS_STACK_BOTTOM
- break
- self.type = ethernet.ETH_TYPE_IP
- buf = buf[(i + 1) * 4:]
- try:
- self.data = self._typesw[self.type](buf)
- setattr(self, self.data.__class__.__name__.lower(), self.data)
- except (KeyError, dpkt.UnpackError):
- self.data = buf
-
- def unpack(self, buf):
- self.data = buf
- if self.data.startswith('\xaa\xaa'):
- # SNAP
- self.type = struct.unpack('>H', self.data[6:8])[0]
- self._unpack_data(self.data[8:])
- else:
- # non-SNAP
- dsap = ord(self.data[0])
- if dsap == 0x06: # SAP_IP
- self.data = self.ip = self._typesw[ethernet.ETH_TYPE_IP](self.data[3:])
- elif dsap == 0x10 or dsap == 0xe0: # SAP_NETWARE{1,2}
- self.data = self.ipx = self._typesw[ethernet.ETH_TYPE_IPX](self.data[3:])
- elif dsap == 0x42: # SAP_STP
- self.data = self.stp = stp.STP(self.data[3:])
-
-if __name__ == '__main__':
- import unittest
-
- class LLCTestCase(unittest.TestCase):
-
- def test_llc(self):
- s = '\xaa\xaa\x03\x00\x00\x00\x08\x00\x45\x00\x00\x28\x07\x27\x40\x00\x80\x06\x1d\x39\x8d\xd4\x37\x3d\x3f\xf5\xd1\x69\xc0\x5f\x01\xbb\xb2\xd6\xef\x23\x38\x2b\x4f\x08\x50\x10\x42\x04\xac\x17\x00\x00'
-
- import ip
- llc_pkt = LLC(s)
- ip_pkt = ip.IP(llc_pkt.data)
- self.failUnless(llc_pkt.type == ethernet.ETH_TYPE_IP)
- self.failUnless(ip_pkt.dst == '\x3f\xf5\xd1\x69')
-
- unittest.main()
diff --git a/scripts/external_libs/dpkt-1.8.6/dpkt/loopback.py b/scripts/external_libs/dpkt-1.8.6/dpkt/loopback.py
deleted file mode 100644
index 25992b31..00000000
--- a/scripts/external_libs/dpkt-1.8.6/dpkt/loopback.py
+++ /dev/null
@@ -1,20 +0,0 @@
-# $Id: loopback.py 38 2007-03-17 03:33:16Z dugsong $
-
-"""Platform-dependent loopback header."""
-
-import dpkt, ethernet, ip, ip6
-
-class Loopback(dpkt.Packet):
- __hdr__ = (('family', 'I', 0), )
- __byte_order__ = '@'
- def unpack(self, buf):
- dpkt.Packet.unpack(self, buf)
- if self.family == 2:
- self.data = ip.IP(self.data)
- elif self.family == 0x02000000:
- self.family = 2
- self.data = ip.IP(self.data)
- elif self.family in (24, 28, 30):
- self.data = ip6.IP6(self.data)
- elif self.family > 1500:
- self.data = ethernet.Ethernet(self.data)
diff --git a/scripts/external_libs/dpkt-1.8.6/dpkt/mrt.py b/scripts/external_libs/dpkt-1.8.6/dpkt/mrt.py
deleted file mode 100644
index 9f3c719c..00000000
--- a/scripts/external_libs/dpkt-1.8.6/dpkt/mrt.py
+++ /dev/null
@@ -1,92 +0,0 @@
-# $Id: mrt.py 29 2007-01-26 02:29:07Z jon.oberheide $
-
-"""Multi-threaded Routing Toolkit."""
-
-import dpkt
-import bgp
-
-# Multi-threaded Routing Toolkit
-# http://www.ietf.org/internet-drafts/draft-ietf-grow-mrt-03.txt
-
-# MRT Types
-NULL = 0
-START = 1
-DIE = 2
-I_AM_DEAD = 3
-PEER_DOWN = 4
-BGP = 5 # Deprecated by BGP4MP
-RIP = 6
-IDRP = 7
-RIPNG = 8
-BGP4PLUS = 9 # Deprecated by BGP4MP
-BGP4PLUS_01 = 10 # Deprecated by BGP4MP
-OSPF = 11
-TABLE_DUMP = 12
-BGP4MP = 16
-BGP4MP_ET = 17
-ISIS = 32
-ISIS_ET = 33
-OSPF_ET = 64
-
-# BGP4MP Subtypes
-BGP4MP_STATE_CHANGE = 0
-BGP4MP_MESSAGE = 1
-BGP4MP_ENTRY = 2
-BGP4MP_SNAPSHOT = 3
-BGP4MP_MESSAGE_32BIT_AS = 4
-
-# Address Family Types
-AFI_IPv4 = 1
-AFI_IPv6 = 2
-
-class MRTHeader(dpkt.Packet):
- __hdr__ = (
- ('ts', 'I', 0),
- ('type', 'H', 0),
- ('subtype', 'H', 0),
- ('len', 'I', 0)
- )
-
-class TableDump(dpkt.Packet):
- __hdr__ = (
- ('view', 'H', 0),
- ('seq', 'H', 0),
- ('prefix', 'I', 0),
- ('prefix_len', 'B', 0),
- ('status', 'B', 1),
- ('originated_ts', 'I', 0),
- ('peer_ip', 'I', 0),
- ('peer_as', 'H', 0),
- ('attr_len', 'H', 0)
- )
-
- def unpack(self, buf):
- dpkt.Packet.unpack(self, buf)
- plen = self.attr_len
- l = []
- while plen > 0:
- attr = bgp.BGP.Update.Attribute(self.data)
- self.data = self.data[len(attr):]
- plen -= len(attr)
- l.append(attr)
- self.attributes = l
-
-class BGP4MPMessage(dpkt.Packet):
- __hdr__ = (
- ('src_as', 'H', 0),
- ('dst_as', 'H', 0),
- ('intf', 'H', 0),
- ('family', 'H', AFI_IPv4),
- ('src_ip', 'I', 0),
- ('dst_ip', 'I', 0)
- )
-
-class BGP4MPMessage_32(dpkt.Packet):
- __hdr__ = (
- ('src_as', 'I', 0),
- ('dst_as', 'I', 0),
- ('intf', 'H', 0),
- ('family', 'H', AFI_IPv4),
- ('src_ip', 'I', 0),
- ('dst_ip', 'I', 0)
- )
diff --git a/scripts/external_libs/dpkt-1.8.6/dpkt/netbios.py b/scripts/external_libs/dpkt-1.8.6/dpkt/netbios.py
deleted file mode 100644
index e535ad04..00000000
--- a/scripts/external_libs/dpkt-1.8.6/dpkt/netbios.py
+++ /dev/null
@@ -1,154 +0,0 @@
-# $Id: netbios.py 23 2006-11-08 15:45:33Z dugsong $
-
-"""Network Basic Input/Output System."""
-
-import struct
-import dpkt, dns
-
-def encode_name(name):
- """Return the NetBIOS first-level encoded name."""
- l = []
- for c in struct.pack('16s', name):
- c = ord(c)
- l.append(chr((c >> 4) + 0x41))
- l.append(chr((c & 0xf) + 0x41))
- return ''.join(l)
-
-def decode_name(nbname):
- """Return the NetBIOS first-level decoded nbname."""
- if len(nbname) != 32:
- return nbname
- l = []
- for i in range(0, 32, 2):
- l.append(chr(((ord(nbname[i]) - 0x41) << 4) |
- ((ord(nbname[i+1]) - 0x41) & 0xf)))
- return ''.join(l).split('\x00', 1)[0]
-
-# RR types
-NS_A = 0x01 # IP address
-NS_NS = 0x02 # Name Server
-NS_NULL = 0x0A # NULL
-NS_NB = 0x20 # NetBIOS general Name Service
-NS_NBSTAT = 0x21 # NetBIOS NODE STATUS
-
-# RR classes
-NS_IN = 1
-
-# NBSTAT name flags
-NS_NAME_G = 0x8000 # group name (as opposed to unique)
-NS_NAME_DRG = 0x1000 # deregister
-NS_NAME_CNF = 0x0800 # conflict
-NS_NAME_ACT = 0x0400 # active
-NS_NAME_PRM = 0x0200 # permanent
-
-# NBSTAT service names
-nbstat_svcs = {
- # (service, unique): list of ordered (name prefix, service name) tuples
- (0x00, 0):[ ('', 'Domain Name') ],
- (0x00, 1):[ ('IS~', 'IIS'), ('', 'Workstation Service') ],
- (0x01, 0):[ ('__MSBROWSE__', 'Master Browser') ],
- (0x01, 1):[ ('', 'Messenger Service') ],
- (0x03, 1):[ ('', 'Messenger Service') ],
- (0x06, 1):[ ('', 'RAS Server Service') ],
- (0x1B, 1):[ ('', 'Domain Master Browser') ],
- (0x1C, 0):[ ('INet~Services', 'IIS'), ('', 'Domain Controllers') ],
- (0x1D, 1):[ ('', 'Master Browser') ],
- (0x1E, 0):[ ('', 'Browser Service Elections') ],
- (0x1F, 1):[ ('', 'NetDDE Service') ],
- (0x20, 1):[ ('Forte_$ND800ZA', 'DCA IrmaLan Gateway Server Service'),
- ('', 'File Server Service') ],
- (0x21, 1):[ ('', 'RAS Client Service') ],
- (0x22, 1):[ ('', 'Microsoft Exchange Interchange(MSMail Connector)') ],
- (0x23, 1):[ ('', 'Microsoft Exchange Store') ],
- (0x24, 1):[ ('', 'Microsoft Exchange Directory') ],
- (0x2B, 1):[ ('', 'Lotus Notes Server Service') ],
- (0x2F, 0):[ ('IRISMULTICAST', 'Lotus Notes') ],
- (0x30, 1):[ ('', 'Modem Sharing Server Service') ],
- (0x31, 1):[ ('', 'Modem Sharing Client Service') ],
- (0x33, 0):[ ('IRISNAMESERVER', 'Lotus Notes') ],
- (0x43, 1):[ ('', 'SMS Clients Remote Control') ],
- (0x44, 1):[ ('', 'SMS Administrators Remote Control Tool') ],
- (0x45, 1):[ ('', 'SMS Clients Remote Chat') ],
- (0x46, 1):[ ('', 'SMS Clients Remote Transfer') ],
- (0x4C, 1):[ ('', 'DEC Pathworks TCPIP service on Windows NT') ],
- (0x52, 1):[ ('', 'DEC Pathworks TCPIP service on Windows NT') ],
- (0x87, 1):[ ('', 'Microsoft Exchange MTA') ],
- (0x6A, 1):[ ('', 'Microsoft Exchange IMC') ],
- (0xBE, 1):[ ('', 'Network Monitor Agent') ],
- (0xBF, 1):[ ('', 'Network Monitor Application') ]
- }
-def node_to_service_name((name, service, flags)):
- try:
- unique = int(flags & NS_NAME_G == 0)
- for namepfx, svcname in nbstat_svcs[(service, unique)]:
- if name.startswith(namepfx):
- return svcname
- except KeyError:
- pass
- return ''
-
-class NS(dns.DNS):
- """NetBIOS Name Service."""
- class Q(dns.DNS.Q):
- pass
-
- class RR(dns.DNS.RR):
- """NetBIOS resource record."""
- def unpack_rdata(self, buf, off):
- if self.type == NS_A:
- self.ip = self.rdata
- elif self.type == NS_NBSTAT:
- num = ord(self.rdata[0])
- off = 1
- l = []
- for i in range(num):
- name = self.rdata[off:off+15].split(None, 1)[0].split('\x00', 1)[0]
- service = ord(self.rdata[off+15])
- off += 16
- flags = struct.unpack('>H', self.rdata[off:off+2])[0]
- off += 2
- l.append((name, service, flags))
- self.nodenames = l
- # XXX - skip stats
-
- def pack_name(self, buf, name):
- return dns.DNS.pack_name(self, buf, encode_name(name))
-
- def unpack_name(self, buf, off):
- name, off = dns.DNS.unpack_name(self, buf, off)
- return decode_name(name), off
-
-class Session(dpkt.Packet):
- """NetBIOS Session Service."""
- __hdr__ = (
- ('type', 'B', 0),
- ('flags', 'B', 0),
- ('len', 'H', 0)
- )
-
-SSN_MESSAGE = 0
-SSN_REQUEST = 1
-SSN_POSITIVE = 2
-SSN_NEGATIVE = 3
-SSN_RETARGET = 4
-SSN_KEEPALIVE = 5
-
-class Datagram(dpkt.Packet):
- """NetBIOS Datagram Service."""
- __hdr__ = (
- ('type', 'B', 0),
- ('flags', 'B', 0),
- ('id', 'H', 0),
- ('src', 'I', 0),
- ('sport', 'H', 0),
- ('len', 'H', 0),
- ('off', 'H', 0)
- )
-
-DGRAM_UNIQUE = 0x10
-DGRAM_GROUP = 0x11
-DGRAM_BROADCAST = 0x12
-DGRAM_ERROR = 0x13
-DGRAM_QUERY = 0x14
-DGRAM_POSITIVE = 0x15
-DGRAM_NEGATIVE = 0x16
diff --git a/scripts/external_libs/dpkt-1.8.6/dpkt/netflow.py b/scripts/external_libs/dpkt-1.8.6/dpkt/netflow.py
deleted file mode 100644
index 103b04f1..00000000
--- a/scripts/external_libs/dpkt-1.8.6/dpkt/netflow.py
+++ /dev/null
@@ -1,214 +0,0 @@
-# $Id: netflow.py 23 2006-11-08 15:45:33Z dugsong $
-
-"""Cisco Netflow."""
-
-import itertools, struct
-import dpkt
-
-class NetflowBase(dpkt.Packet):
- """Base class for Cisco Netflow packets."""
-
- __hdr__ = (
- ('version', 'H', 1),
- ('count', 'H', 0),
- ('sys_uptime', 'I', 0),
- ('unix_sec', 'I', 0),
- ('unix_nsec', 'I', 0)
- )
-
- def __len__(self):
- return self.__hdr_len__ + (len(self.data[0]) * self.count)
-
- def __str__(self):
- # for now, don't try to enforce any size limits
- self.count = len(self.data)
- return self.pack_hdr() + ''.join(map(str, self.data))
-
- def unpack(self, buf):
- dpkt.Packet.unpack(self, buf)
- buf = self.data
- l = []
- while buf:
- flow = self.NetflowRecord(buf)
- l.append(flow)
- buf = buf[len(flow):]
- self.data = l
-
- class NetflowRecordBase(dpkt.Packet):
- """Base class for netflow v1-v7 netflow records."""
-
- # performance optimizations
- def __len__(self):
- # don't bother with data
- return self.__hdr_len__
-
- def __str__(self):
- # don't bother with data
- return self.pack_hdr()
-
- def unpack(self, buf):
- # don't bother with data
- for k, v in itertools.izip(self.__hdr_fields__,
- struct.unpack(self.__hdr_fmt__, buf[:self.__hdr_len__])):
- setattr(self, k, v)
- self.data = ""
-
-
-class Netflow1(NetflowBase):
- """Netflow Version 1."""
-
- class NetflowRecord(NetflowBase.NetflowRecordBase):
- """Netflow v1 flow record."""
- __hdr__ = (
- ('src_addr', 'I', 0),
- ('dst_addr', 'I', 0),
- ('next_hop', 'I', 0),
- ('input_iface', 'H', 0),
- ('output_iface', 'H', 0),
- ('pkts_sent', 'I', 0),
- ('bytes_sent', 'I', 0),
- ('start_time', 'I', 0),
- ('end_time', 'I', 0),
- ('src_port', 'H', 0),
- ('dst_port', 'H', 0),
- ('pad1', 'H', 0),
- ('ip_proto', 'B', 0),
- ('tos', 'B', 0),
- ('tcp_flags', 'B', 0),
- ('pad2', 'B', 0),
- ('pad3', 'H', 0),
- ('reserved', 'I', 0)
- )
-
-# FYI, versions 2-4 don't appear to have ever seen the light of day.
-
-class Netflow5(NetflowBase):
- """Netflow Version 5."""
- __hdr__ = NetflowBase.__hdr__ + (
- ('flow_sequence', 'I', 0),
- ('engine_type', 'B', 0),
- ('engine_id', 'B', 0),
- ('reserved', 'H', 0),
- )
-
- class NetflowRecord(NetflowBase.NetflowRecordBase):
- """Netflow v5 flow record."""
- __hdr__ = (
- ('src_addr', 'I', 0),
- ('dst_addr', 'I', 0),
- ('next_hop', 'I', 0),
- ('input_iface', 'H', 0),
- ('output_iface', 'H', 0),
- ('pkts_sent', 'I', 0),
- ('bytes_sent', 'I', 0),
- ('start_time', 'I', 0),
- ('end_time', 'I', 0),
- ('src_port', 'H', 0),
- ('dst_port', 'H', 0),
- ('pad1', 'B', 0),
- ('tcp_flags', 'B', 0),
- ('ip_proto', 'B', 0),
- ('tos', 'B', 0),
- ('src_as', 'H', 0),
- ('dst_as', 'H', 0),
- ('src_mask', 'B', 0),
- ('dst_mask', 'B', 0),
- ('pad2', 'H', 0),
- )
-
-class Netflow6(NetflowBase):
- """Netflow Version 6.
- XXX - unsupported by Cisco, but may be found in the field.
- """
- __hdr__ = Netflow5.__hdr__
-
- class NetflowRecord(NetflowBase.NetflowRecordBase):
- """Netflow v6 flow record."""
- __hdr__ = (
- ('src_addr', 'I', 0),
- ('dst_addr', 'I', 0),
- ('next_hop', 'I', 0),
- ('input_iface', 'H', 0),
- ('output_iface', 'H', 0),
- ('pkts_sent', 'I', 0),
- ('bytes_sent', 'I', 0),
- ('start_time', 'I', 0),
- ('end_time', 'I', 0),
- ('src_port', 'H', 0),
- ('dst_port', 'H', 0),
- ('pad1', 'B', 0),
- ('tcp_flags', 'B', 0),
- ('ip_proto', 'B', 0),
- ('tos', 'B', 0),
- ('src_as', 'H', 0),
- ('dst_as', 'H', 0),
- ('src_mask', 'B', 0),
- ('dst_mask', 'B', 0),
- ('in_encaps', 'B', 0),
- ('out_encaps', 'B', 0),
- ('peer_nexthop', 'I', 0),
- )
-
-class Netflow7(NetflowBase):
- """Netflow Version 7."""
- __hdr__ = NetflowBase.__hdr__ + (
- ('flow_sequence', 'I', 0),
- ('reserved', 'I', 0),
- )
-
- class NetflowRecord(NetflowBase.NetflowRecordBase):
- """Netflow v7 flow record."""
- __hdr__ = (
- ('src_addr', 'I', 0),
- ('dst_addr', 'I', 0),
- ('next_hop', 'I', 0),
- ('input_iface', 'H', 0),
- ('output_iface', 'H', 0),
- ('pkts_sent', 'I', 0),
- ('bytes_sent', 'I', 0),
- ('start_time', 'I', 0),
- ('end_time', 'I', 0),
- ('src_port', 'H', 0),
- ('dst_port', 'H', 0),
- ('flags', 'B', 0),
- ('tcp_flags', 'B', 0),
- ('ip_proto', 'B', 0),
- ('tos', 'B', 0),
- ('src_as', 'H', 0),
- ('dst_as', 'H', 0),
- ('src_mask', 'B', 0),
- ('dst_mask', 'B', 0),
- ('pad2', 'H', 0),
- ('router_sc', 'I', 0),
- )
-
-# No support for v8 or v9 yet.
-
-if __name__ == '__main__':
- import unittest
-
- class NetflowV1TestCase(unittest.TestCase):
- sample_v1 = "\x00\x01\x00\x18gza<B\x00\xfc\x1c$\x93\x08p\xac\x01 W\xc0\xa8c\xf7\n\x00\x02\x01\x00\x03\x00\n\x00\x00\x00\x01\x00\x00\x02(gz7,gz7,\\\x1b\x00P\xac\x01\x11,\x10\x00\x00\x00\x00\x04\x00\x1b\xac\x01\x18S\xac\x18\xd9\xaa\xc0\xa82\x02\x00\x03\x00\x19\x00\x00\x00\x01\x00\x00\x05\xdcgz7|gz7|\xd8\xe3\x00P\xac\x01\x06,\x10\x00\x00\x00\x00\x04\x00\x1b\xac\x01\x14\x18\xac\x18\x8d\xcd\xc0\xa82f\x00\x03\x00\x07\x00\x00\x00\x01\x00\x00\x05\xdcgz7\x90gz7\x90\x8a\x81\x17o\xac\x01\x066\x10\x00\x00\x00\x00\x04\x00\x03\xac\x0f'$\xac\x01\xe5\x1d\xc0\xa82\x06\x00\x04\x00\x1b\x00\x00\x00\x01\x00\x00\x02(gz:8gz:8\xa3Q\x126\xac)\x06\xfd\x18\x00\x00\x00\x00\x04\x00\x1b\xac\x01\x16E\xac#\x17\x8e\xc0\xa82\x06\x00\x03\x00\x1b\x00\x00\x00\x01\x00\x00\x02(gz:Lgz:L\xc9\xff\x00P\xac\x1f\x06\x86\x02\x00\x00\x00\x00\x03\x00\x1b\xac\r\t\xff\xac\x01\x99\x95\xc0\xa82\x06\x00\x04\x00\x1b\x00\x00\x00\x01\x00\x00\x05\xdcgz:Xgz:X\xee9\x00\x17\xac\x01\x06\xde\x10\x00\x00\x00\x00\x04\x00\x03\xac\x0eJ\xd8\xac\x01\xae/\xc0\xa82\x06\x00\x04\x00\x1b\x00\x00\x00\x01\x00\x00\x05\xdcgz:hgz:h\xb3n\x00\x15\xac\x01\x06\x81\x10\x00\x00\x00\x00\x04\x00\x1b\xac\x01#8\xac\x01\xd9*\xc0\xa82\x06\x00\x03\x00\x1b\x00\x00\x00\x01\x00\x00\x05\xdcgz:tgz:t\x00\x00\x83P\xac!\x01\xab\x10\x00\x00\x00\x00\x03\x00\x1b\xac\n`7\xac*\x93J\xc0\xa82\x06\x00\x04\x00\x1b\x00\x00\x00\x01\x00\x00\x05\xdcgz:tgz:t\x00\x00\x00\x00\xac\x012\xa9\x10\x00\x00\x00\x00\x04\x00\x07\xac\nG\x1f\xac\x01\xfdJ\xc0\xa82\x06\x00\x04\x00\x1b\x00\x00\x00\x01\x00\x00\x00(gz:\x88gz:\x88!\x99i\x87\xac\x1e\x06~\x02\x00\x00\x00\x00\x03\x00\x1b\xac\x01(\xc9\xac\x01B\xc4\xc0\xa82\x02\x00\x03\x00\x19\x00\x00\x00\x01\x00\x00\x00(gz:\x88gz:\x88}6\x00P\xac\x01\x06\xfe\x10\x00\x00\x00\x00\x04\x00\x1b\xac\x0b\x08\xe8\xac\x01F\xe2\xc0\xa82\x02\x00\x04\x00\x19\x00\x00\x00\x01\x00\x00\x05\xdcgz:\x9cgz:\x9c`ii\x87\xac\x01\x06;\x10\x00\x00\x00\x00\x04\x00\x1b\xac\x01\x1d$\xac<\xf0\xc3\xc0\xa82\x06\x00\x03\x00\x1b\x00\x00\x00\x01\x00\x00\x05\xdcgz:\x9cgz:\x9cF2\x00\x14\xac\x01\x06s\x18\x00\x00\x00\x00\x04\x00\x03\xac\x0b\x11Q\xac\x01\xde\x06\xc0\xa82\x06\x00\x04\x00\x1b\x00\x00\x00\x01\x00\x00\x05\xdcgz:\xb0gz:\xb0\xef#\x1a+\xac)\x06\xe9\x10\x00\x00\x00\x00\x04\x00\x1b\xac\x0cR\xd9\xac\x01o\xe8\xc0\xa82\x02\x00\x04\x00\x19\x00\x00\x00\x01\x00\x00\x05\xdcgz:\xc4gz:\xc4\x13n\x00n\xac\x19\x06\xa8\x10\x00\x00\x00\x00\x03\x00\x19\xac\x01=\xdd\xac\x01}\xee\xc0\xa82f\x00\x03\x00\x07\x00\x00\x00\x01\x00\x00\x00(gz:\xc4gz:\xc4\x00\x00\xdc\xbb\xac\x01\x01\xd3\x10\x00\x00\x00\x00\x04\x00\x1b\xac\x0f(\xd1\xac\x01\xcc\xa5\xc0\xa82\x06\x00\x04\x00\x1b\x00\x00\x00\x01\x00\x00\x05\xdcgz:\xd8gz:\xd8\xc5s\x17o\xac\x19\x06#\x18\x00\x00\x00\x00\x03\x00\x07\xac\n\x85[\xc0\xa8cn\n\x00\x02\x01\x00\x04\x00\n\x00\x00\x00\x01\x00\x00\x05\xdcgz:\xe4gz:\xe4\xbfl\x00P\xac\x01\x06\xcf\x10\x00\x00\x00\x00\x04\x00\x07\xac\x010\x1f\xac\x18!E\xc0\xa82f\x00\x03\x00\x07\x00\x00\x00\x01\x00\x00\x05\xdcgz;\x00gz;\x00\x11\x95\x04\xbe\xc0\xa8\x06\xea\x10\x00\x00\x00\x00\x03\x00\n\xac\x010\xb6\xac\x1e\xf4\xaa\xc0\xa82\x06\x00\x03\x00\x1b\x00\x00\x00\x01\x00\x00\x05\xdcgz;4gz;4\x88d\x00\x17\xac\x01\x06\x1f\x10\x00\x00\x00\x00\x04\x00\x1b\xac\x01#_\xac\x1e\xb0\t\xc0\xa82\x06\x00\x03\x00\x1b\x00\x00\x00\x01\x00\x00\x05\xdcgz;Hgz;H\x81S\x00P\xac \x06N\x10\x00\x00\x00\x00\x03\x00\x1b\xac\x01\x04\xd9\xac\x01\x94c\xc0\xa82\x06\x00\x03\x00\x1b\x00\x00\x00\x01\x00\x00\x02(gz;\\gz;\\U\x10\x00P\xac\x01\x06P\x18\x00\x00\x00\x00\x04\x00\x1b\xac\x01<\xae\xac*\xac!\xc0\xa82\x06\x00\x03\x00\x1b\x00\x00\x00\x01\x00\x00\x00\xfagz;\x84gz;\x84\x0c\xe7\x00P\xac\x01\x11\xfd\x10\x00\x00\x00\x00\x04\x00\x1b\xac\x01\x1f\x1f\xac\x17\xedi\xc0\xa82\x02\x00\x03\x00\x19\x00\x00\x00\x01\x00\x00\x05\xdcgz;\x98gz;\x98\xba\x17\x00\x16\xac\x01\x06|\x10\x00\x00\x00\x00\x03\x00\x07"
-
- def testPack(self):
- pass
-
- def testUnpack(self):
- nf = Netflow1(self.sample_v1)
- assert len(nf.data) == 24
- #print repr(nfv1)
-
- class NetflowV5TestCase(unittest.TestCase):
- sample_v5 = '\x00\x05\x00\x1d\xb5\xfa\xc9\xd0:\x0bAB&Vw\xde\x9bsv1\x00\x01\x00\x00\xac\n\x86\xa6\xac\x01\xaa\xf7\xc0\xa822\x02q\x00i\x00\x00\x00\x01\x00\x00\x02(\xb5\xfa\x81\x14\xb5\xfa\x81\x1452\x00P\x00\x00\x06\x00\x00\x00\x00\x00\x00\x00\x00\x00\xac\x01\x91D\xac\x14C\xe4\xc0\xa82\x16\x00i\x02q\x00\x00\x00\x01\x00\x00\x00(\xb5\xfa\x9b\xbd\xb5\xfa\x9b\xbd\x00P\x85\xd7\x00\x00\x06\x00\x00\x00\x00\x00\x00\x00\x00\x00\xac\x17\xe2\xd7\xac\x01\x8cV\xc0\xa822\x02q\x00i\x00\x00\x00\x01\x00\x00\x05\xdc\xb5\xfao\xb8\xb5\xfao\xb8v\xe8\x17o\x00\x00\x06\x00\x00\x00\x00\x00\x00\x00\x00\x00\xac\x0e\xf2\xe5\xac\x01\x91\xb2\xc0\xa822\x02q\x00i\x00\x00\x00\x01\x00\x00\x00\xfa\xb5\xfa\x81\xee\xb5\xfa\x81\xee\xd0\xeb\x00\x15\x00\x00\x06\x00\x00\x00\x00\x00\x00\x00\x00\x00\xac\nCj\xac)\xa7\t\n\x00\x02\x01\x02q\x00\xdb\x00\x00\x00\x01\x00\x00\x02(\xb5\xfa\x85\x92\xb5\xfa\x85\x92\x8c\xb0\x005\x00\x00\x06\x00\x00\x00\x00\x00\x00\x00\x00\x00\xac\x01\x96=\xac\x15\x1a\xa8\xc0\xa82\x16\x00i\x02q\x00\x00\x00\x01\x00\x00\x05\xdc\xb5\xfa\x86\xe0\xb5\xfa\x86\xe0\xb4\xe7\x00\xc2\x00\x00\x06\x00\x00\x00\x00\x00\x00\x00\x00\x00\xac\x01V\xd1\xac\x01\x86\x15\xc0\xa822\x02q\x00i\x00\x00\x00\x01\x00\x00\x05\xdc\xb5\xfa}:\xb5\xfa}:[Q\x00P\x00\x00\x06\x00\x00\x00\x00\x00\x00\x00\x00\x00\xac2\xf1\xb1\xac)\x19\xca\n\x00\x02\x01\x02q\x00\xdb\x00\x00\x00\x01\x00\x00\x05\xdc\xb5\xfa\x83\xc3\xb5\xfa\x83\xc3\x16,\x00\x15\x00\x00\x06\x00\x00\x00\x00\x00\x00\x00\x00\x00\xac\x0cA4\xac\x01\x9az\xc0\xa822\x02q\x00i\x00\x00\x00\x01\x00\x00\x05\xdc\xb5\xfa\x8d\xa7\xb5\xfa\x8d\xa7\x173\x00\x15\x00\x00\x06\x00\x00\x00\x00\x00\x00\x00\x00\x00\xac\x1e\xd2\x84\xac)\xd8\xd2\n\x00\x02\x01\x02q\x00\xdb\x00\x00\x00\x01\x00\x00\x05\xdc\xb5\xfa\x8e\x97\xb5\xfa\x8e\x977*\x17o\x00\x00\x06\x00\x00\x00\x00\x00\x00\x00\x00\x00\xac\x01\x85J\xac \x11\xfc\xc0\xa82\x16\x00i\x02q\x00\x00\x00\x01\x00\x00\x02(\xb5\xfa\x884\xb5\xfa\x884\xf5\xdd\x00\x8f\x00\x00\x06\x00\x00\x00\x00\x00\x00\x00\x00\x00\xac\x01\x04\x80\xac<[n\n\x00\x02\x01\x02q\x00\xdb\x00\x00\x00\x01\x00\x00\x05\xdc\xb5\xfa\x9dr\xb5\xfa\x9drs$\x00\x16\x00\x00\x06\x00\x00\x00\x00\x00\x00\x00\x00\x00\xac\x01\xb9J\xac"\xc9\xd7\xc0\xa82\x16\x00i\x02q\x00\x00\x00\x01\x00\x00\x00(\xb5\xfa\x90r\xb5\xfa\x90r\x0f\x8d\x00\xc2\x00\x00\x06\x00\x00\x00\x00\x00\x00\x00\x00\x00\xac*\xa3\x10\xac\x01\xb4\x19\xc0\xa822\x02q\x00i\x00\x00\x00\x01\x00\x00\x00(\xb5\xfa\x92\x03\xb5\xfa\x92\x03pf\x00\x15\x00\x00\x06\x00\x00\x00\x00\x00\x00\x00\x00\x00\xac\x01\xabo\xac\x1e\x7fi\xc0\xa82\x16\x00i\x02q\x00\x00\x00\x01\x00\x00\x05\xdc\xb5\xfa\x93\x7f\xb5\xfa\x93\x7f\x00P\x0b\x98\x00\x00\x06\x00\x00\x00\x00\x00\x00\x00\x00\x00\xac\x0c\n\xea\xac\x01\xa1\x15\xc0\xa822\x02q\x00i\x00\x00\x00\x01\x00\x00\x05\xdc\xb5\xfay\xcf\xb5\xfay\xcf[3\x17\xe0\x00\x00\x06\x00\x00\x00\x00\x00\x00\x00\x00\x00\xac\x01\xbb\xb3\xac)u\x8c\n\x00\x02\x01\x00i\x00\xdb\x00\x00\x00\x01\x00\x00\x00\xfa\xb5\xfa\x943\xb5\xfa\x943\x00P\x1e\xca\x00\x00\x06\x00\x00\x00\x00\x00\x00\x00\x00\x00\xac\x0fJ`\xac\x01\xab\x94\xc0\xa822\x02q\x00i\x00\x00\x00\x01\x00\x00\x02(\xb5\xfa\x87[\xb5\xfa\x87[\x9a\xd6/\xab\x00\x00\x06\x00\x00\x00\x00\x00\x00\x00\x00\x00\xac*\x0f\x93\xac\x01\xb8\xa3\xc0\xa822\x02q\x00i\x00\x00\x00\x01\x00\x00\x00(\xb5\xfa\x89\xbb\xb5\xfa\x89\xbbn\xe1\x00P\x00\x00\x06\x00\x00\x00\x00\x00\x00\x00\x00\x00\xac\x01\x93\xa1\xac\x16\x80\x0c\xc0\xa82\x16\x00i\x02q\x00\x00\x00\x01\x00\x00\x00(\xb5\xfa\x87&\xb5\xfa\x87&\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\xac\x01\x83Z\xac\x1fR\xcd\xc0\xa82\x16\x00i\x02q\x00\x00\x00\x01\x00\x00\x05\xdc\xb5\xfa\x90\r\xb5\xfa\x90\r\xf7*\x00\x8a\x00\x00\x06\x00\x00\x00\x00\x00\x00\x00\x00\x00\xac\x0c\xe0\xad\xac\x01\xa8V\xc0\xa822\x02q\x00i\x00\x00\x00\x01\x00\x00\x05\xdc\xb5\xfa\x9c\xf6\xb5\xfa\x9c\xf6\xe5|\x1a+\x00\x00\x06\x00\x00\x00\x00\x00\x00\x00\x00\x00\xac\x1e\xccT\xac<x&\n\x00\x02\x01\x02q\x00\xdb\x00\x00\x00\x01\x00\x00\x05\xdc\xb5\xfa\x80\xea\xb5\xfa\x80\xea\x00\x00\x00\x00\x00\x00/\x00\x00\x00\x00\x00\x00\x00\x00\x00\xac\x01\xbb\x18\xac\x01|z\xc0\xa82\x16\x00i\x02q\x00\x00\x00\x01\x00\x00\x00\xfa\xb5\xfa\x88p\xb5\xfa\x88p\x00P\x0b}\x00\x00\x06\x00\x00\x00\x00\x00\x00\x00\x00\x00\xac\x17\x0er\xac\x01\x8f\xdd\xc0\xa822\x02q\x00i\x00\x00\x00\x01\x00\x00\x02(\xb5\xfa\x89\xf7\xb5\xfa\x89\xf7\r\xf7\x00\x8a\x00\x00\x06\x00\x00\x00\x00\x00\x00\x00\x00\x00\xac\n\xbb\x04\xac<\xb0\x15\n\x00\x02\x01\x02q\x00\xdb\x00\x00\x00\x01\x00\x00\x05\xdc\xb5\xfa\x90\xa9\xb5\xfa\x90\xa9\x9c\xd0\x00\x8f\x00\x00\x06\x00\x00\x00\x00\x00\x00\x00\x00\x00\xac\nz?\xac)\x03\xc8\n\x00\x02\x01\x02q\x00\xdb\x00\x00\x00\x01\x00\x00\x05\xdc\xb5\xfaue\xb5\xfaue\xee\xa6\x00P\x00\x00\x06\x00\x00\x00\x00\x00\x00\x00\x00\x00\xac\x01\xb5\x05\xc0\xa8c\x9f\n\x00\x02\x01\x00i\x00\xdb\x00\x00\x00\x01\x00\x00\x05\xdc\xb5\xfa{\xc7\xb5\xfa{\xc7\x00P\x86\xa9\x00\x00\x06\x00\x00\x00\x00\x00\x00\x00\x00\x00\xac2\xa5\x1b\xac)0\xbf\n\x00\x02\x01\x02q\x00\xdb\x00\x00\x00\x01\x00\x00\x00\xfa\xb5\xfa\x9bZ\xb5\xfa\x9bZC\xf9\x17\xe0\x00\x00\x06\x00\x00\x00\x00\x00\x00\x00\x00\x00'
-
- def testPack(self):
- pass
-
- def testUnpack(self):
- nf = Netflow5(self.sample_v5)
- assert len(nf.data) == 29
- #print repr(nfv5)
-
- unittest.main()
-
-
diff --git a/scripts/external_libs/dpkt-1.8.6/dpkt/ntp.py b/scripts/external_libs/dpkt-1.8.6/dpkt/ntp.py
deleted file mode 100644
index 7012af2a..00000000
--- a/scripts/external_libs/dpkt-1.8.6/dpkt/ntp.py
+++ /dev/null
@@ -1,83 +0,0 @@
-# $Id: ntp.py 48 2008-05-27 17:31:15Z yardley $
-
-"""Network Time Protocol."""
-
-import dpkt
-
-# NTP v4
-
-# Leap Indicator (LI) Codes
-NO_WARNING = 0
-LAST_MINUTE_61_SECONDS = 1
-LAST_MINUTE_59_SECONDS = 2
-ALARM_CONDITION = 3
-
-# Mode Codes
-RESERVED = 0
-SYMMETRIC_ACTIVE = 1
-SYMMETRIC_PASSIVE = 2
-CLIENT = 3
-SERVER = 4
-BROADCAST = 5
-CONTROL_MESSAGE = 6
-PRIVATE = 7
-
-class NTP(dpkt.Packet):
- __hdr__ = (
- ('flags', 'B', 0),
- ('stratum', 'B', 0),
- ('interval', 'B', 0),
- ('precision', 'B', 0),
- ('delay', 'I', 0),
- ('dispersion', 'I', 0),
- ('id', '4s', 0),
- ('update_time', '8s', 0),
- ('originate_time', '8s', 0),
- ('receive_time', '8s', 0),
- ('transmit_time', '8s', 0)
- )
-
- def _get_v(self):
- return (self.flags >> 3) & 0x7
- def _set_v(self, v):
- self.flags = (self.flags & ~0x38) | ((v & 0x7) << 3)
- v = property(_get_v, _set_v)
-
- def _get_li(self):
- return (self.flags >> 6) & 0x3
- def _set_li(self, li):
- self.flags = (self.flags & ~0xc0) | ((li & 0x3) << 6)
- li = property(_get_li, _set_li)
-
- def _get_mode(self):
- return (self.flags & 0x7)
- def _set_mode(self, mode):
- self.flags = (self.flags & ~0x7) | (mode & 0x7)
- mode = property(_get_mode, _set_mode)
-
-if __name__ == '__main__':
- import unittest
-
- class NTPTestCase(unittest.TestCase):
- def testPack(self):
- n = NTP(self.s)
- self.failUnless(self.s == str(n))
-
- def testUnpack(self):
- n = NTP(self.s)
- self.failUnless(n.li == NO_WARNING)
- self.failUnless(n.v == 4)
- self.failUnless(n.mode == SERVER)
- self.failUnless(n.stratum == 2)
- self.failUnless(n.id == '\xc1\x02\x04\x02')
-
- # test get/set functions
- n.li = ALARM_CONDITION
- n.v = 3
- n.mode = CLIENT
- self.failUnless(n.li == ALARM_CONDITION)
- self.failUnless(n.v == 3)
- self.failUnless(n.mode == CLIENT)
-
- s = '\x24\x02\x04\xef\x00\x00\x00\x84\x00\x00\x33\x27\xc1\x02\x04\x02\xc8\x90\xec\x11\x22\xae\x07\xe5\xc8\x90\xf9\xd9\xc0\x7e\x8c\xcd\xc8\x90\xf9\xd9\xda\xc5\xb0\x78\xc8\x90\xf9\xd9\xda\xc6\x8a\x93'
- unittest.main()
diff --git a/scripts/external_libs/dpkt-1.8.6/dpkt/ospf.py b/scripts/external_libs/dpkt-1.8.6/dpkt/ospf.py
deleted file mode 100644
index 0ea0b3ca..00000000
--- a/scripts/external_libs/dpkt-1.8.6/dpkt/ospf.py
+++ /dev/null
@@ -1,25 +0,0 @@
-# $Id: ospf.py 23 2006-11-08 15:45:33Z dugsong $
-
-"""Open Shortest Path First."""
-
-import dpkt
-
-AUTH_NONE = 0
-AUTH_PASSWORD = 1
-AUTH_CRYPTO = 2
-
-class OSPF(dpkt.Packet):
- __hdr__ = (
- ('v', 'B', 0),
- ('type', 'B', 0),
- ('len', 'H', 0),
- ('router', 'I', 0),
- ('area', 'I', 0),
- ('sum', 'H', 0),
- ('atype', 'H', 0),
- ('auth', '8s', '')
- )
- def __str__(self):
- if not self.sum:
- self.sum = dpkt.in_cksum(dpkt.Packet.__str__(self))
- return dpkt.Packet.__str__(self)
diff --git a/scripts/external_libs/dpkt-1.8.6/dpkt/pcap.py b/scripts/external_libs/dpkt-1.8.6/dpkt/pcap.py
deleted file mode 100644
index 45aca3b1..00000000
--- a/scripts/external_libs/dpkt-1.8.6/dpkt/pcap.py
+++ /dev/null
@@ -1,164 +0,0 @@
-# $Id: pcap.py 77 2011-01-06 15:59:38Z dugsong $
-
-"""Libpcap file format."""
-
-import sys, time
-import dpkt
-
-TCPDUMP_MAGIC = 0xa1b2c3d4L
-PMUDPCT_MAGIC = 0xd4c3b2a1L
-
-PCAP_VERSION_MAJOR = 2
-PCAP_VERSION_MINOR = 4
-
-DLT_NULL = 0
-DLT_EN10MB = 1
-DLT_EN3MB = 2
-DLT_AX25 = 3
-DLT_PRONET = 4
-DLT_CHAOS = 5
-DLT_IEEE802 = 6
-DLT_ARCNET = 7
-DLT_SLIP = 8
-DLT_PPP = 9
-DLT_FDDI = 10
-DLT_PFSYNC = 18
-DLT_IEEE802_11 = 105
-DLT_LINUX_SLL = 113
-DLT_PFLOG = 117
-DLT_IEEE802_11_RADIO = 127
-
-if sys.platform.find('openbsd') != -1:
- DLT_LOOP = 12
- DLT_RAW = 14
-else:
- DLT_LOOP = 108
- DLT_RAW = 12
-
-dltoff = { DLT_NULL:4, DLT_EN10MB:14, DLT_IEEE802:22, DLT_ARCNET:6,
- DLT_SLIP:16, DLT_PPP:4, DLT_FDDI:21, DLT_PFLOG:48, DLT_PFSYNC:4,
- DLT_LOOP:4, DLT_LINUX_SLL:16 }
-
-class PktHdr(dpkt.Packet):
- """pcap packet header."""
- __hdr__ = (
- ('tv_sec', 'I', 0),
- ('tv_usec', 'I', 0),
- ('caplen', 'I', 0),
- ('len', 'I', 0),
- )
-
-class LEPktHdr(PktHdr):
- __byte_order__ = '<'
-
-class FileHdr(dpkt.Packet):
- """pcap file header."""
- __hdr__ = (
- ('magic', 'I', TCPDUMP_MAGIC),
- ('v_major', 'H', PCAP_VERSION_MAJOR),
- ('v_minor', 'H', PCAP_VERSION_MINOR),
- ('thiszone', 'I', 0),
- ('sigfigs', 'I', 0),
- ('snaplen', 'I', 1500),
- ('linktype', 'I', 1),
- )
-
-class LEFileHdr(FileHdr):
- __byte_order__ = '<'
-
-class Writer(object):
- """Simple pcap dumpfile writer."""
- def __init__(self, fileobj, snaplen=1500, linktype=DLT_EN10MB):
- self.__f = fileobj
- if sys.byteorder == 'little':
- fh = LEFileHdr(snaplen=snaplen, linktype=linktype)
- else:
- fh = FileHdr(snaplen=snaplen, linktype=linktype)
- self.__f.write(str(fh))
-
- def writepkt(self, pkt, ts=None):
- if ts is None:
- ts = time.time()
- s = str(pkt)
- n = len(s)
- if sys.byteorder == 'little':
- ph = LEPktHdr(tv_sec=int(ts),
- tv_usec=int((float(ts) - int(ts)) * 1000000.0),
- caplen=n, len=n)
- else:
- ph = PktHdr(tv_sec=int(ts),
- tv_usec=int((float(ts) - int(ts)) * 1000000.0),
- caplen=n, len=n)
- self.__f.write(str(ph))
- self.__f.write(s)
-
- def close(self):
- self.__f.close()
-
-class Reader(object):
- """Simple pypcap-compatible pcap file reader."""
-
- def __init__(self, fileobj):
- self.name = fileobj.name
- self.fd = fileobj.fileno()
- self.__f = fileobj
- buf = self.__f.read(FileHdr.__hdr_len__)
- self.__fh = FileHdr(buf)
- self.__ph = PktHdr
- if self.__fh.magic == PMUDPCT_MAGIC:
- self.__fh = LEFileHdr(buf)
- self.__ph = LEPktHdr
- elif self.__fh.magic != TCPDUMP_MAGIC:
- raise ValueError, 'invalid tcpdump header'
- if self.__fh.linktype in dltoff:
- self.dloff = dltoff[self.__fh.linktype]
- else:
- self.dloff = 0
- self.snaplen = self.__fh.snaplen
- self.filter = ''
-
- def fileno(self):
- return self.fd
-
- def datalink(self):
- return self.__fh.linktype
-
- def setfilter(self, value, optimize=1):
- return NotImplementedError
-
- def readpkts(self):
- return list(self)
-
- def dispatch(self, cnt, callback, *args):
- if cnt > 0:
- for i in range(cnt):
- ts, pkt = self.next()
- callback(ts, pkt, *args)
- else:
- for ts, pkt in self:
- callback(ts, pkt, *args)
-
- def loop(self, callback, *args):
- self.dispatch(0, callback, *args)
-
- def __iter__(self):
- self.__f.seek(FileHdr.__hdr_len__)
- while 1:
- buf = self.__f.read(PktHdr.__hdr_len__)
- if not buf: break
- hdr = self.__ph(buf)
- buf = self.__f.read(hdr.caplen)
- yield (hdr.tv_sec + (hdr.tv_usec / 1000000.0), buf)
-
-if __name__ == '__main__':
- import unittest
-
- class PcapTestCase(unittest.TestCase):
- def test_endian(self):
- be = '\xa1\xb2\xc3\xd4\x00\x02\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x60\x00\x00\x00\x01'
- le = '\xd4\xc3\xb2\xa1\x02\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x60\x00\x00\x00\x01\x00\x00\x00'
- befh = FileHdr(be)
- lefh = LEFileHdr(le)
- self.failUnless(befh.linktype == lefh.linktype)
-
- unittest.main()
diff --git a/scripts/external_libs/dpkt-1.8.6/dpkt/pim.py b/scripts/external_libs/dpkt-1.8.6/dpkt/pim.py
deleted file mode 100644
index fa340dd0..00000000
--- a/scripts/external_libs/dpkt-1.8.6/dpkt/pim.py
+++ /dev/null
@@ -1,24 +0,0 @@
-# $Id: pim.py 23 2006-11-08 15:45:33Z dugsong $
-
-"""Protocol Independent Multicast."""
-
-import dpkt
-
-class PIM(dpkt.Packet):
- __hdr__ = (
- ('v_type', 'B', 0x20),
- ('rsvd', 'B', 0),
- ('sum', 'H', 0)
- )
- def _get_v(self): return self.v_type >> 4
- def _set_v(self, v): self.v_type = (v << 4) | (self.v_type & 0xf)
- v = property(_get_v, _set_v)
-
- def _get_type(self): return self.v_type & 0xf
- def _set_type(self, type): self.v_type = (self.v_type & 0xf0) | type
- type = property(_get_type, _set_type)
-
- def __str__(self):
- if not self.sum:
- self.sum = dpkt.in_cksum(dpkt.Packet.__str__(self))
- return dpkt.Packet.__str__(self)
diff --git a/scripts/external_libs/dpkt-1.8.6/dpkt/pmap.py b/scripts/external_libs/dpkt-1.8.6/dpkt/pmap.py
deleted file mode 100644
index 53185b22..00000000
--- a/scripts/external_libs/dpkt-1.8.6/dpkt/pmap.py
+++ /dev/null
@@ -1,17 +0,0 @@
-# $Id: pmap.py 23 2006-11-08 15:45:33Z dugsong $
-
-"""Portmap / rpcbind."""
-
-import dpkt
-
-PMAP_PROG = 100000L
-PMAP_PROCDUMP = 4
-PMAP_VERS = 2
-
-class Pmap(dpkt.Packet):
- __hdr__ = (
- ('prog', 'I', 0),
- ('vers', 'I', 0),
- ('prot', 'I', 0),
- ('port', 'I', 0),
- )
diff --git a/scripts/external_libs/dpkt-1.8.6/dpkt/ppp.py b/scripts/external_libs/dpkt-1.8.6/dpkt/ppp.py
deleted file mode 100644
index f4898b95..00000000
--- a/scripts/external_libs/dpkt-1.8.6/dpkt/ppp.py
+++ /dev/null
@@ -1,63 +0,0 @@
-# $Id: ppp.py 65 2010-03-26 02:53:51Z dugsong $
-
-"""Point-to-Point Protocol."""
-
-import struct
-import dpkt
-
-# XXX - finish later
-
-# http://www.iana.org/assignments/ppp-numbers
-PPP_IP = 0x21 # Internet Protocol
-PPP_IP6 = 0x57 # Internet Protocol v6
-
-# Protocol field compression
-PFC_BIT = 0x01
-
-class PPP(dpkt.Packet):
- __hdr__ = (
- ('p', 'B', PPP_IP),
- )
- _protosw = {}
-
- def set_p(cls, p, pktclass):
- cls._protosw[p] = pktclass
- set_p = classmethod(set_p)
-
- def get_p(cls, p):
- return cls._protosw[p]
- get_p = classmethod(get_p)
-
- def unpack(self, buf):
- dpkt.Packet.unpack(self, buf)
- if self.p & PFC_BIT == 0:
- self.p = struct.unpack('>H', buf[:2])[0]
- self.data = self.data[1:]
- try:
- self.data = self._protosw[self.p](self.data)
- setattr(self, self.data.__class__.__name__.lower(), self.data)
- except (KeyError, struct.error, dpkt.UnpackError):
- pass
-
- def pack_hdr(self):
- try:
- if self.p > 0xff:
- return struct.pack('>H', self.p)
- return dpkt.Packet.pack_hdr(self)
- except struct.error, e:
- raise dpkt.PackError(str(e))
-
-def __load_protos():
- g = globals()
- for k, v in g.iteritems():
- if k.startswith('PPP_'):
- name = k[4:]
- modname = name.lower()
- try:
- mod = __import__(modname, g)
- except ImportError:
- continue
- PPP.set_p(v, getattr(mod, name))
-
-if not PPP._protosw:
- __load_protos()
diff --git a/scripts/external_libs/dpkt-1.8.6/dpkt/pppoe.py b/scripts/external_libs/dpkt-1.8.6/dpkt/pppoe.py
deleted file mode 100644
index 8b4c9e71..00000000
--- a/scripts/external_libs/dpkt-1.8.6/dpkt/pppoe.py
+++ /dev/null
@@ -1,38 +0,0 @@
-# $Id: pppoe.py 23 2006-11-08 15:45:33Z dugsong $
-
-"""PPP-over-Ethernet."""
-
-import dpkt, ppp
-
-# RFC 2516 codes
-PPPoE_PADI = 0x09
-PPPoE_PADO = 0x07
-PPPoE_PADR = 0x19
-PPPoE_PADS = 0x65
-PPPoE_PADT = 0xA7
-PPPoE_SESSION = 0x00
-
-class PPPoE(dpkt.Packet):
- __hdr__ = (
- ('v_type', 'B', 0x11),
- ('code', 'B', 0),
- ('session', 'H', 0),
- ('len', 'H', 0) # payload length
- )
- def _get_v(self): return self.v_type >> 4
- def _set_v(self, v): self.v_type = (v << 4) | (self.v_type & 0xf)
- v = property(_get_v, _set_v)
-
- def _get_type(self): return self.v_type & 0xf
- def _set_type(self, t): self.v_type = (self.v_type & 0xf0) | t
- type = property(_get_type, _set_type)
-
- def unpack(self, buf):
- dpkt.Packet.unpack(self, buf)
- try:
- if self.code == 0:
- self.data = self.ppp = ppp.PPP(self.data)
- except dpkt.UnpackError:
- pass
-
-# XXX - TODO TLVs, etc.
diff --git a/scripts/external_libs/dpkt-1.8.6/dpkt/qq.py b/scripts/external_libs/dpkt-1.8.6/dpkt/qq.py
deleted file mode 100644
index 4720c69b..00000000
--- a/scripts/external_libs/dpkt-1.8.6/dpkt/qq.py
+++ /dev/null
@@ -1,224 +0,0 @@
-# $Id: qq.py 48 2008-05-27 17:31:15Z yardley $
-
-from dpkt import Packet
-
-# header_type
-QQ_HEADER_BASIC_FAMILY = 0x02
-QQ_HEADER_P2P_FAMILY = 0x00
-QQ_HEADER_03_FAMILY = 0x03
-QQ_HEADER_04_FAMILY = 0x04
-QQ_HEADER_05_FAMILY = 0x05
-
-header_type_str = [
- "QQ_HEADER_P2P_FAMILY",
- "Unknown Type",
- "QQ_HEADER_03_FAMILY",
- "QQ_HEADER_04_FAMILY",
- "QQ_HEADER_05_FAMILY",
-]
-
-# command
-QQ_CMD_LOGOUT = 0x0001
-QQ_CMD_KEEP_ALIVE = 0x0002
-QQ_CMD_MODIFY_INFO = 0x0004
-QQ_CMD_SEARCH_USER = 0x0005
-QQ_CMD_GET_USER_INFO = 0x0006
-QQ_CMD_ADD_FRIEND = 0x0009
-QQ_CMD_DELETE_FRIEND = 0x000A
-QQ_CMD_ADD_FRIEND_AUTH = 0x000B
-QQ_CMD_CHANGE_STATUS = 0x000D
-QQ_CMD_ACK_SYS_MSG = 0x0012
-QQ_CMD_SEND_IM = 0x0016
-QQ_CMD_RECV_IM = 0x0017
-QQ_CMD_REMOVE_SELF = 0x001C
-QQ_CMD_REQUEST_KEY = 0x001D
-QQ_CMD_LOGIN = 0x0022
-QQ_CMD_GET_FRIEND_LIST = 0x0026
-QQ_CMD_GET_ONLINE_OP = 0x0027
-QQ_CMD_SEND_SMS = 0x002D
-QQ_CMD_CLUSTER_CMD = 0x0030
-QQ_CMD_TEST = 0x0031
-QQ_CMD_GROUP_DATA_OP = 0x003C
-QQ_CMD_UPLOAD_GROUP_FRIEND = 0x003D
-QQ_CMD_FRIEND_DATA_OP = 0x003E
-QQ_CMD_DOWNLOAD_GROUP_FRIEND = 0x0058
-QQ_CMD_FRIEND_LEVEL_OP = 0x005C
-QQ_CMD_PRIVACY_DATA_OP = 0x005E
-QQ_CMD_CLUSTER_DATA_OP = 0x005F
-QQ_CMD_ADVANCED_SEARCH = 0x0061
-QQ_CMD_REQUEST_LOGIN_TOKEN = 0x0062
-QQ_CMD_USER_PROPERTY_OP = 0x0065
-QQ_CMD_TEMP_SESSION_OP = 0x0066
-QQ_CMD_SIGNATURE_OP = 0x0067
-QQ_CMD_RECV_MSG_SYS = 0x0080
-QQ_CMD_RECV_MSG_FRIEND_CHANGE_STATUS = 0x0081
-QQ_CMD_WEATHER_OP = 0x00A6
-QQ_CMD_ADD_FRIEND_EX = 0x00A7
-QQ_CMD_AUTHORIZE = 0X00A8
-QQ_CMD_UNKNOWN = 0xFFFF
-QQ_SUB_CMD_SEARCH_ME_BY_QQ_ONLY = 0x03
-QQ_SUB_CMD_SHARE_GEOGRAPHY = 0x04
-QQ_SUB_CMD_GET_FRIEND_LEVEL = 0x02
-QQ_SUB_CMD_GET_CLUSTER_ONLINE_MEMBER = 0x01
-QQ_05_CMD_REQUEST_AGENT = 0x0021
-QQ_05_CMD_REQUEST_FACE = 0x0022
-QQ_05_CMD_TRANSFER = 0x0023
-QQ_05_CMD_REQUEST_BEGIN = 0x0026
-QQ_CLUSTER_CMD_CREATE_CLUSTER= 0x01
-QQ_CLUSTER_CMD_MODIFY_MEMBER= 0x02
-QQ_CLUSTER_CMD_MODIFY_CLUSTER_INFO= 0x03
-QQ_CLUSTER_CMD_GET_CLUSTER_INFO= 0x04
-QQ_CLUSTER_CMD_ACTIVATE_CLUSTER= 0x05
-QQ_CLUSTER_CMD_SEARCH_CLUSTER= 0x06
-QQ_CLUSTER_CMD_JOIN_CLUSTER= 0x07
-QQ_CLUSTER_CMD_JOIN_CLUSTER_AUTH= 0x08
-QQ_CLUSTER_CMD_EXIT_CLUSTER= 0x09
-QQ_CLUSTER_CMD_SEND_IM= 0x0A
-QQ_CLUSTER_CMD_GET_ONLINE_MEMBER= 0x0B
-QQ_CLUSTER_CMD_GET_MEMBER_INFO= 0x0C
-QQ_CLUSTER_CMD_MODIFY_CARD = 0x0E
-QQ_CLUSTER_CMD_GET_CARD_BATCH= 0x0F
-QQ_CLUSTER_CMD_GET_CARD = 0x10
-QQ_CLUSTER_CMD_COMMIT_ORGANIZATION = 0x11
-QQ_CLUSTER_CMD_UPDATE_ORGANIZATION= 0x12
-QQ_CLUSTER_CMD_COMMIT_MEMBER_ORGANIZATION = 0x13
-QQ_CLUSTER_CMD_GET_VERSION_ID= 0x19
-QQ_CLUSTER_CMD_SEND_IM_EX = 0x1A
-QQ_CLUSTER_CMD_SET_ROLE = 0x1B
-QQ_CLUSTER_CMD_TRANSFER_ROLE = 0x1C
-QQ_CLUSTER_CMD_CREATE_TEMP = 0x30
-QQ_CLUSTER_CMD_MODIFY_TEMP_MEMBER = 0x31
-QQ_CLUSTER_CMD_EXIT_TEMP = 0x32
-QQ_CLUSTER_CMD_GET_TEMP_INFO = 0x33
-QQ_CLUSTER_CMD_MODIFY_TEMP_INFO = 0x34
-QQ_CLUSTER_CMD_SEND_TEMP_IM = 0x35
-QQ_CLUSTER_CMD_SUB_CLUSTER_OP = 0x36
-QQ_CLUSTER_CMD_ACTIVATE_TEMP = 0x37
-
-QQ_CLUSTER_SUB_CMD_ADD_MEMBER = 0x01
-QQ_CLUSTER_SUB_CMD_REMOVE_MEMBER = 0x02
-QQ_CLUSTER_SUB_CMD_GET_SUBJECT_LIST = 0x02
-QQ_CLUSTER_SUB_CMD_GET_DIALOG_LIST = 0x01
-
-QQ_SUB_CMD_GET_ONLINE_FRIEND = 0x2
-QQ_SUB_CMD_GET_ONLINE_SERVICE = 0x3
-QQ_SUB_CMD_UPLOAD_GROUP_NAME = 0x2
-QQ_SUB_CMD_DOWNLOAD_GROUP_NAME = 0x1
-QQ_SUB_CMD_SEND_TEMP_SESSION_IM = 0x01
-QQ_SUB_CMD_BATCH_DOWNLOAD_FRIEND_REMARK = 0x0
-QQ_SUB_CMD_UPLOAD_FRIEND_REMARK = 0x1
-QQ_SUB_CMD_REMOVE_FRIEND_FROM_LIST = 0x2
-QQ_SUB_CMD_DOWNLOAD_FRIEND_REMARK = 0x3
-QQ_SUB_CMD_MODIFY_SIGNATURE = 0x01
-QQ_SUB_CMD_DELETE_SIGNATURE = 0x02
-QQ_SUB_CMD_GET_SIGNATURE = 0x03
-QQ_SUB_CMD_GET_USER_PROPERTY = 0x01
-QQ_SUB_CMD_GET_WEATHER = 0x01
-
-QQ_FILE_CMD_HEART_BEAT = 0x0001
-QQ_FILE_CMD_HEART_BEAT_ACK = 0x0002
-QQ_FILE_CMD_TRANSFER_FINISHED = 0x0003
-QQ_FILE_CMD_FILE_OP = 0x0007
-QQ_FILE_CMD_FILE_OP_ACK = 0x0008
-QQ_FILE_CMD_SENDER_SAY_HELLO = 0x0031
-QQ_FILE_CMD_SENDER_SAY_HELLO_ACK = 0x0032
-QQ_FILE_CMD_RECEIVER_SAY_HELLO = 0x0033
-QQ_FILE_CMD_RECEIVER_SAY_HELLO_ACK = 0x0034
-QQ_FILE_CMD_NOTIFY_IP_ACK = 0x003C
-QQ_FILE_CMD_PING = 0x003D
-QQ_FILE_CMD_PONG = 0x003E
-QQ_FILE_CMD_YES_I_AM_BEHIND_FIREWALL = 0x0040
-QQ_FILE_CMD_REQUEST_AGENT = 0x0001
-QQ_FILE_CMD_CHECK_IN = 0x0002
-QQ_FILE_CMD_FORWARD = 0x0003
-QQ_FILE_CMD_FORWARD_FINISHED = 0x0004
-QQ_FILE_CMD_IT_IS_TIME = 0x0005
-QQ_FILE_CMD_I_AM_READY = 0x0006
-
-command_str = {
- 0x0001: "QQ_CMD_LOGOUT",
- 0x0002: "QQ_CMD_KEEP_ALIVE",
- 0x0004: "QQ_CMD_MODIFY_INFO",
- 0x0005: "QQ_CMD_SEARCH_USER",
- 0x0006: "QQ_CMD_GET_USER_INFO",
- 0x0009: "QQ_CMD_ADD_FRIEND",
- 0x000A: "QQ_CMD_DELETE_FRIEND",
- 0x000B: "QQ_CMD_ADD_FRIEND_AUTH",
- 0x000D: "QQ_CMD_CHANGE_STATUS",
- 0x0012: "QQ_CMD_ACK_SYS_MSG",
- 0x0016: "QQ_CMD_SEND_IM",
- 0x0017: "QQ_CMD_RECV_IM",
- 0x001C: "QQ_CMD_REMOVE_SELF",
- 0x001D: "QQ_CMD_REQUEST_KEY",
- 0x0022: "QQ_CMD_LOGIN",
- 0x0026: "QQ_CMD_GET_FRIEND_LIST",
- 0x0027: "QQ_CMD_GET_ONLINE_OP",
- 0x002D: "QQ_CMD_SEND_SMS",
- 0x0030: "QQ_CMD_CLUSTER_CMD",
- 0x0031: "QQ_CMD_TEST",
- 0x003C: "QQ_CMD_GROUP_DATA_OP",
- 0x003D: "QQ_CMD_UPLOAD_GROUP_FRIEND",
- 0x003E: "QQ_CMD_FRIEND_DATA_OP",
- 0x0058: "QQ_CMD_DOWNLOAD_GROUP_FRIEND",
- 0x005C: "QQ_CMD_FRIEND_LEVEL_OP",
- 0x005E: "QQ_CMD_PRIVACY_DATA_OP",
- 0x005F: "QQ_CMD_CLUSTER_DATA_OP",
- 0x0061: "QQ_CMD_ADVANCED_SEARCH",
- 0x0062: "QQ_CMD_REQUEST_LOGIN_TOKEN",
- 0x0065: "QQ_CMD_USER_PROPERTY_OP",
- 0x0066: "QQ_CMD_TEMP_SESSION_OP",
- 0x0067: "QQ_CMD_SIGNATURE_OP",
- 0x0080: "QQ_CMD_RECV_MSG_SYS",
- 0x0081: "QQ_CMD_RECV_MSG_FRIEND_CHANGE_STATUS",
- 0x00A6: "QQ_CMD_WEATHER_OP",
- 0x00A7: "QQ_CMD_ADD_FRIEND_EX",
- 0x00A8: "QQ_CMD_AUTHORIZE",
- 0xFFFF: "QQ_CMD_UNKNOWN",
- 0x0021: "_CMD_REQUEST_AGENT",
- 0x0022: "_CMD_REQUEST_FACE",
- 0x0023: "_CMD_TRANSFER",
- 0x0026: "_CMD_REQUEST_BEGIN",
-}
-
-
-class QQBasicPacket(Packet):
- __hdr__ = (
- ('header_type', 'B', 2),
- ('source', 'H', 0),
- ('command', 'H', 0),
- ('sequence', 'H', 0),
- ('qqNum', 'L', 0),
- )
-
-
-class QQ3Packet(Packet):
- __hdr__ = (
- ('header_type', 'B', 3),
- ('command', 'B', 0),
- ('sequence', 'H', 0),
- ('unknown1', 'L', 0),
- ('unknown2', 'L', 0),
- ('unknown3', 'L', 0),
- ('unknown4', 'L', 0),
- ('unknown5', 'L', 0),
- ('unknown6', 'L', 0),
- ('unknown7', 'L', 0),
- ('unknown8', 'L', 0),
- ('unknown9', 'L', 0),
- ('unknown10', 'B', 1),
- ('unknown11', 'B', 0),
- ('unknown12', 'B', 0),
- ('source', 'H', 0),
- ('unknown13', 'B', 0),
- )
-
-
-class QQ5Packet(Packet):
- __hdr__ = (
- ('header_type', 'B', 5),
- ('source', 'H', 0),
- ('unknown', 'H', 0),
- ('command', 'H', 0),
- ('sequence', 'H', 0),
- ('qqNum', 'L', 0),
- )
diff --git a/scripts/external_libs/dpkt-1.8.6/dpkt/radiotap.py b/scripts/external_libs/dpkt-1.8.6/dpkt/radiotap.py
deleted file mode 100644
index 318be6cc..00000000
--- a/scripts/external_libs/dpkt-1.8.6/dpkt/radiotap.py
+++ /dev/null
@@ -1,292 +0,0 @@
-'''Radiotap'''
-
-import dpkt
-import ieee80211
-import socket
-
-# Ref: http://www.radiotap.org
-# Fields Ref: http://www.radiotap.org/defined-fields/all
-
-# Present flags
-_TSFT_MASK = 0x1000000
-_FLAGS_MASK = 0x2000000
-_RATE_MASK = 0x4000000
-_CHANNEL_MASK = 0x8000000
-_FHSS_MASK = 0x10000000
-_ANT_SIG_MASK = 0x20000000
-_ANT_NOISE_MASK = 0x40000000
-_LOCK_QUAL_MASK = 0x80000000
-_TX_ATTN_MASK = 0x10000
-_DB_TX_ATTN_MASK = 0x20000
-_DBM_TX_POWER_MASK = 0x40000
-_ANTENNA_MASK = 0x80000
-_DB_ANT_SIG_MASK = 0x100000
-_DB_ANT_NOISE_MASK = 0x200000
-_RX_FLAGS_MASK = 0x400000
-_CHANNELPLUS_MASK = 0x200
-_EXT_MASK = 0x1
-
-_TSFT_SHIFT = 24
-_FLAGS_SHIFT = 25
-_RATE_SHIFT = 26
-_CHANNEL_SHIFT = 27
-_FHSS_SHIFT = 28
-_ANT_SIG_SHIFT = 29
-_ANT_NOISE_SHIFT = 30
-_LOCK_QUAL_SHIFT = 31
-_TX_ATTN_SHIFT = 16
-_DB_TX_ATTN_SHIFT = 17
-_DBM_TX_POWER_SHIFT = 18
-_ANTENNA_SHIFT = 19
-_DB_ANT_SIG_SHIFT = 20
-_DB_ANT_NOISE_SHIFT = 21
-_RX_FLAGS_SHIFT = 22
-_CHANNELPLUS_SHIFT = 10
-_EXT_SHIFT = 0
-
-# Flags elements
-_FLAGS_SIZE = 2
-_CFP_FLAG_SHIFT = 0
-_PREAMBLE_SHIFT = 1
-_WEP_SHIFT = 2
-_FRAG_SHIFT = 3
-_FCS_SHIFT = 4
-_DATA_PAD_SHIFT = 5
-_BAD_FCS_SHIFT = 6
-_SHORT_GI_SHIFT = 7
-
-# Channel type
-_CHAN_TYPE_SIZE = 4
-_CHANNEL_TYPE_SHIFT = 4
-_CCK_SHIFT = 5
-_OFDM_SHIFT = 6
-_TWO_GHZ_SHIFT = 7
-_FIVE_GHZ_SHIFT = 8
-_PASSIVE_SHIFT = 9
-_DYN_CCK_OFDM_SHIFT = 10
-_GFSK_SHIFT = 11
-_GSM_SHIFT = 12
-_STATIC_TURBO_SHIFT = 13
-_HALF_RATE_SHIFT = 14
-_QUARTER_RATE_SHIFT = 15
-
-# Flags offsets and masks
-_FCS_SHIFT = 4
-_FCS_MASK = 0x10
-
-class Radiotap(dpkt.Packet):
- __hdr__ = (
- ('version', 'B', 0),
- ('pad', 'B', 0),
- ('length', 'H', 0),
- ('present_flags', 'I', 0)
- )
-
- def _get_tsft_present(self): return (self.present_flags & _TSFT_MASK) >> _TSFT_SHIFT
- def _set_tsft_present(self, val): self.present_flags = self.present_flags | (val << _TSFT_SHIFT)
- def _get_flags_present(self): return (self.present_flags & _FLAGS_MASK) >> _FLAGS_SHIFT
- def _set_flags_present(self, val): self.present_flags = self.present_flags | (val << _FLAGS_SHIFT)
- def _get_rate_present(self): return (self.present_flags & _RATE_MASK) >> _RATE_SHIFT
- def _set_rate_present(self, val): self.present_flags = self.present_flags | (val << _RATE_SHIFT)
- def _get_channel_present(self): return (self.present_flags & _CHANNEL_MASK) >> _CHANNEL_SHIFT
- def _set_channel_present(self, val): self.present_flags = self.present_flags | (val << _CHANNEL_SHIFT)
- def _get_fhss_present(self): return (self.present_flags & _FHSS_MASK) >> _FHSS_SHIFT
- def _set_fhss_present(self, val): self.present_flags = self.present_flags | (val << _FHSS_SHIFT)
- def _get_ant_sig_present(self): return (self.present_flags & _ANT_SIG_MASK) >> _ANT_SIG_SHIFT
- def _set_ant_sig_present(self, val): self.present_flags = self.present_flags | (val << _ANT_SIG_SHIFT)
- def _get_ant_noise_present(self): return (self.present_flags & _ANT_NOISE_MASK) >> _ANT_NOISE_SHIFT
- def _set_ant_noise_present(self, val): self.present_flags = self.present_flags | (val << _ANT_NOISE_SHIFT)
- def _get_lock_qual_present(self): return (self.present_flags & _LOCK_QUAL_MASK) >> _LOCK_QUAL_SHIFT
- def _set_lock_qual_present(self, val): self.present_flags = self.present_flags | (val << _LOCK_QUAL_SHIFT)
- def _get_tx_attn_present(self): return (self.present_flags & _TX_ATTN_MASK) >> _TX_ATTN_SHIFT
- def _set_tx_attn_present(self, val): self.present_flags = self.present_flags | (val << _TX_ATTN_SHIFT)
- def _get_db_tx_attn_present(self): return (self.present_flags & _DB_TX_ATTN_MASK) >> _DB_TX_ATTN_SHIFT
- def _set_db_tx_attn_present(self, val): self.present_flags = self.present_flags | (val << _DB_TX_ATTN_SHIFT)
- def _get_dbm_power_present(self): return (self.present_flags & _DBM_TX_POWER_MASK) >> _DBM_TX_POWER_SHIFT
- def _set_dbm_power_present(self, val): self.present_flags = self.present_flags | (val << _DBM_TX_POWER_SHIFT)
- def _get_ant_present(self): return (self.present_flags & _ANTENNA_MASK) >> _ANTENNA_SHIFT
- def _set_ant_present(self, val): self.present_flags = self.present_flags | (val << _ANTENNA_SHIFT)
- def _get_db_ant_sig_present(self): return (self.present_flags & _DB_ANT_SIG_MASK) >> _DB_ANT_SIG_SHIFT
- def _set_db_ant_sig_present(self, val): self.present_flags = self.present_flags | (val << _DB_ANT_SIG_SHIFT)
- def _get_db_ant_noise_present(self): return (self.present_flags & _DB_ANT_NOISE_MASK) >> _DB_ANT_NOISE_SHIFT
- def _set_db_ant_noise_present(self, val): self.present_flags = self.present_flags | (val << _DB_ANT_NOISE_SHIFT)
- def _get_rx_flags_present(self): return (self.present_flags & _RX_FLAGS_MASK) >> _RX_FLAGS_SHIFT
- def _set_rx_flags_present(self, val): self.present_flags = self.present_flags | (val << _RX_FLAGS_SHIFT)
- def _get_chanplus_present(self): return (self.present_flags & _CHANNELPLUS_MASK) >> _CHANNELPLUS_SHIFT
- def _set_chanplus_present(self, val): self.present_flags = self.present_flags | (val << _CHANNELPLUS_SHIFT)
- def _get_ext_present(self): return (self.present_flags & _EXT_MASK) >> _EXT_SHIFT
- def _set_ext_present(self, val): self.present_flags = self.present_flags | (val << _EXT_SHIFT)
-
- tsft_present = property(_get_tsft_present, _set_tsft_present)
- flags_present = property(_get_flags_present, _set_flags_present)
- rate_present = property(_get_rate_present, _set_rate_present)
- channel_present = property(_get_channel_present, _set_channel_present)
- fhss_present = property(_get_fhss_present, _set_fhss_present)
- ant_sig_present = property(_get_ant_sig_present, _set_ant_sig_present)
- ant_noise_present = property(_get_ant_noise_present, _set_ant_noise_present)
- lock_qual_present = property(_get_lock_qual_present, _set_lock_qual_present)
- tx_attn_present = property(_get_tx_attn_present, _set_tx_attn_present)
- db_tx_attn_present = property(_get_db_tx_attn_present, _set_db_tx_attn_present)
- dbm_tx_power_present = property(_get_dbm_power_present, _set_dbm_power_present)
- ant_present = property(_get_ant_present, _set_ant_present)
- db_ant_sig_present = property(_get_db_ant_sig_present, _set_db_ant_sig_present)
- db_ant_noise_present = property(_get_db_ant_noise_present, _set_db_ant_noise_present)
- rx_flags_present = property(_get_rx_flags_present, _set_rx_flags_present)
- chanplus_present = property(_get_chanplus_present, _set_chanplus_present)
- ext_present = property(_get_ext_present, _set_ext_present)
-
- def unpack(self, buf):
- dpkt.Packet.unpack(self, buf)
- self.data = buf[socket.ntohs(self.length):]
-
- self.fields = []
- buf = buf[self.__hdr_len__:]
-
- # decode each field into self.<name> (eg. self.tsft) as well as append it self.fields list
- field_decoder = [
- ('tsft', self.tsft_present, self.TSFT),
- ('flags', self.flags_present, self.Flags),
- ('rate', self.rate_present, self.Rate),
- ('channel', self.channel_present, self.Channel),
- ('fhss', self.fhss_present, self.FHSS),
- ('ant_sig', self.ant_sig_present, self.AntennaSignal),
- ('ant_noise', self.ant_noise_present, self.AntennaNoise),
- ('lock_qual', self.lock_qual_present, self.LockQuality),
- ('tx_attn', self.tx_attn_present, self.TxAttenuation),
- ('db_tx_attn', self.db_tx_attn_present, self.DbTxAttenuation),
- ('dbm_tx_power', self.dbm_tx_power_present, self.DbmTxPower),
- ('ant', self.ant_present, self.Antenna),
- ('db_ant_sig', self.db_ant_sig_present, self.DbAntennaSignal),
- ('db_ant_noise', self.db_ant_noise_present, self.DbAntennaNoise),
- ('rx_flags', self.rx_flags_present, self.RxFlags)
- ]
- for name, present_bit, parser in field_decoder:
- if present_bit:
- field = parser(buf)
- field.data = ''
- setattr(self, name, field)
- self.fields.append(field)
- buf = buf[len(field):]
-
- if len(self.data) > 0:
- if self.flags_present and self.flags.fcs:
- self.data = ieee80211.IEEE80211(self.data, fcs = self.flags.fcs)
- else:
- self.data = ieee80211.IEEE80211(self.data)
-
- class Antenna(dpkt.Packet):
- __hdr__ = (
- ('index', 'B', 0),
- )
-
- class AntennaNoise(dpkt.Packet):
- __hdr__ = (
- ('db', 'B', 0),
- )
-
- class AntennaSignal(dpkt.Packet):
- __hdr__ = (
- ('db', 'B', 0),
- )
-
- class Channel(dpkt.Packet):
- __hdr__ = (
- ('freq', 'H', 0),
- ('flags', 'H', 0),
- )
-
- class FHSS(dpkt.Packet):
- __hdr__ = (
- ('set', 'B', 0),
- ('pattern', 'B', 0),
- )
-
- class Flags(dpkt.Packet):
- __hdr__ = (
- ('val', 'B', 0),
- )
-
- def _get_fcs_present(self): return (self.val & _FCS_MASK) >> _FCS_SHIFT
-
- def _set_fcs_present(self, v): (v << _FCS_SHIFT) | (self.val & ~_FCS_MASK)
- fcs = property(_get_fcs_present, _set_fcs_present)
-
- class LockQuality(dpkt.Packet):
- __hdr__ = (
- ('val', 'H', 0),
- )
-
- class RxFlags(dpkt.Packet):
- __hdr__ = (
- ('val', 'H', 0),
- )
-
- class Rate(dpkt.Packet):
- __hdr__ = (
- ('val', 'B', 0),
- )
-
- class TSFT(dpkt.Packet):
- __hdr__ = (
- ('usecs', 'Q', 0),
- )
-
- class TxAttenuation(dpkt.Packet):
- __hdr__ = (
- ('val', 'H', 0),
- )
-
- class DbTxAttenuation(dpkt.Packet):
- __hdr__ = (
- ('db', 'H', 0),
- )
-
- class DbAntennaNoise(dpkt.Packet):
- __hdr__ = (
- ('db', 'B', 0),
- )
-
- class DbAntennaSignal(dpkt.Packet):
- __hdr__ = (
- ('db', 'B', 0),
- )
-
- class DbmTxPower(dpkt.Packet):
- __hdr__ = (
- ('dbm', 'B', 0),
- )
-
-if __name__ == '__main__':
- import unittest
-
- class RadiotapTestCase(unittest.TestCase):
- def test_Radiotap(self):
- s = '\x00\x00\x00\x18\x6e\x48\x00\x00\x00\x02\x6c\x09\xa0\x00\xa8\x81\x02\x00\x00\x00\x00\x00\x00\x00'
- rad = Radiotap(s)
- self.failUnless(rad.version == 0)
- self.failUnless(rad.present_flags == 0x6e480000)
- self.failUnless(rad.tsft_present == 0)
- self.failUnless(rad.flags_present == 1)
- self.failUnless(rad.rate_present == 1)
- self.failUnless(rad.channel_present == 1)
- self.failUnless(rad.fhss_present == 0)
- self.failUnless(rad.ant_sig_present == 1)
- self.failUnless(rad.ant_noise_present == 1)
- self.failUnless(rad.lock_qual_present == 0)
- self.failUnless(rad.db_tx_attn_present == 0)
- self.failUnless(rad.dbm_tx_power_present == 0)
- self.failUnless(rad.ant_present == 1)
- self.failUnless(rad.db_ant_sig_present == 0)
- self.failUnless(rad.db_ant_noise_present == 0)
- self.failUnless(rad.rx_flags_present == 1)
- self.failUnless(rad.channel.freq == 0x6c09)
- self.failUnless(rad.channel.flags == 0xa000)
- self.failUnless(len(rad.fields) == 7)
-
- def test_fcs(self):
- s = '\x00\x00\x1a\x00\x2f\x48\x00\x00\x34\x8f\x71\x09\x00\x00\x00\x00\x10\x0c\x85\x09\xc0\x00\xcc\x01\x00\x00'
- rt = Radiotap(s)
- self.failUnless(rt.flags_present == 1)
- self.failUnless(rt.flags.fcs == 1)
-
- unittest.main()
diff --git a/scripts/external_libs/dpkt-1.8.6/dpkt/radius.py b/scripts/external_libs/dpkt-1.8.6/dpkt/radius.py
deleted file mode 100644
index 440f86f6..00000000
--- a/scripts/external_libs/dpkt-1.8.6/dpkt/radius.py
+++ /dev/null
@@ -1,88 +0,0 @@
-# $Id: radius.py 23 2006-11-08 15:45:33Z dugsong $
-
-"""Remote Authentication Dial-In User Service."""
-
-import dpkt
-
-# http://www.untruth.org/~josh/security/radius/radius-auth.html
-# RFC 2865
-
-class RADIUS(dpkt.Packet):
- __hdr__ = (
- ('code', 'B', 0),
- ('id', 'B', 0),
- ('len', 'H', 4),
- ('auth', '16s', '')
- )
- attrs = ''
- def unpack(self, buf):
- dpkt.Packet.unpack(self, buf)
- self.attrs = parse_attrs(self.data)
- self.data = ''
-
-def parse_attrs(buf):
- """Parse attributes buffer into a list of (type, data) tuples."""
- attrs = []
- while buf:
- t = ord(buf[0])
- l = ord(buf[1])
- if l < 2:
- break
- d, buf = buf[2:l], buf[l:]
- attrs.append((t, d))
- return attrs
-
-# Codes
-RADIUS_ACCESS_REQUEST = 1
-RADIUS_ACCESS_ACCEPT = 2
-RADIUS_ACCESS_REJECT = 3
-RADIUS_ACCT_REQUEST = 4
-RADIUS_ACCT_RESPONSE = 5
-RADIUS_ACCT_STATUS = 6
-RADIUS_ACCESS_CHALLENGE = 11
-
-# Attributes
-RADIUS_USER_NAME = 1
-RADIUS_USER_PASSWORD = 2
-RADIUS_CHAP_PASSWORD = 3
-RADIUS_NAS_IP_ADDR = 4
-RADIUS_NAS_PORT = 5
-RADIUS_SERVICE_TYPE = 6
-RADIUS_FRAMED_PROTOCOL = 7
-RADIUS_FRAMED_IP_ADDR = 8
-RADIUS_FRAMED_IP_NETMASK = 9
-RADIUS_FRAMED_ROUTING = 10
-RADIUS_FILTER_ID = 11
-RADIUS_FRAMED_MTU = 12
-RADIUS_FRAMED_COMPRESSION = 13
-RADIUS_LOGIN_IP_HOST = 14
-RADIUS_LOGIN_SERVICE = 15
-RADIUS_LOGIN_TCP_PORT = 16
-# unassigned
-RADIUS_REPLY_MESSAGE = 18
-RADIUS_CALLBACK_NUMBER = 19
-RADIUS_CALLBACK_ID = 20
-# unassigned
-RADIUS_FRAMED_ROUTE = 22
-RADIUS_FRAMED_IPX_NETWORK = 23
-RADIUS_STATE = 24
-RADIUS_CLASS = 25
-RADIUS_VENDOR_SPECIFIC = 26
-RADIUS_SESSION_TIMEOUT = 27
-RADIUS_IDLE_TIMEOUT = 28
-RADIUS_TERMINATION_ACTION = 29
-RADIUS_CALLED_STATION_ID = 30
-RADIUS_CALLING_STATION_ID = 31
-RADIUS_NAS_ID = 32
-RADIUS_PROXY_STATE = 33
-RADIUS_LOGIN_LAT_SERVICE = 34
-RADIUS_LOGIN_LAT_NODE = 35
-RADIUS_LOGIN_LAT_GROUP = 36
-RADIUS_FRAMED_ATALK_LINK = 37
-RADIUS_FRAMED_ATALK_NETWORK = 38
-RADIUS_FRAMED_ATALK_ZONE = 39
-# 40-59 reserved for accounting
-RADIUS_CHAP_CHALLENGE = 60
-RADIUS_NAS_PORT_TYPE = 61
-RADIUS_PORT_LIMIT = 62
-RADIUS_LOGIN_LAT_PORT = 63
diff --git a/scripts/external_libs/dpkt-1.8.6/dpkt/rfb.py b/scripts/external_libs/dpkt-1.8.6/dpkt/rfb.py
deleted file mode 100644
index f6d2a5d5..00000000
--- a/scripts/external_libs/dpkt-1.8.6/dpkt/rfb.py
+++ /dev/null
@@ -1,81 +0,0 @@
-# $Id: rfb.py 47 2008-05-27 02:10:00Z jon.oberheide $
-
-"""Remote Framebuffer Protocol."""
-
-import dpkt
-
-# Remote Framebuffer Protocol
-# http://www.realvnc.com/docs/rfbproto.pdf
-
-# Client to Server Messages
-CLIENT_SET_PIXEL_FORMAT = 0
-CLIENT_SET_ENCODINGS = 2
-CLIENT_FRAMEBUFFER_UPDATE_REQUEST = 3
-CLIENT_KEY_EVENT = 4
-CLIENT_POINTER_EVENT = 5
-CLIENT_CUT_TEXT = 6
-
-# Server to Client Messages
-SERVER_FRAMEBUFFER_UPDATE = 0
-SERVER_SET_COLOUR_MAP_ENTRIES = 1
-SERVER_BELL = 2
-SERVER_CUT_TEXT = 3
-
-class RFB(dpkt.Packet):
- __hdr__ = (
- ('type', 'B', 0),
- )
-
-class SetPixelFormat(dpkt.Packet):
- __hdr__ = (
- ('pad', '3s', ''),
- ('pixel_fmt', '16s', '')
- )
-
-class SetEncodings(dpkt.Packet):
- __hdr__ = (
- ('pad', '1s', ''),
- ('num_encodings', 'H', 0)
- )
-
-class FramebufferUpdateRequest(dpkt.Packet):
- __hdr__ = (
- ('incremental', 'B', 0),
- ('x_position', 'H', 0),
- ('y_position', 'H', 0),
- ('width', 'H', 0),
- ('height', 'H', 0)
- )
-
-class KeyEvent(dpkt.Packet):
- __hdr__ = (
- ('down_flag', 'B', 0),
- ('pad', '2s', ''),
- ('key', 'I', 0)
- )
-
-class PointerEvent(dpkt.Packet):
- __hdr__ = (
- ('button_mask', 'B', 0),
- ('x_position', 'H', 0),
- ('y_position', 'H', 0)
- )
-
-class FramebufferUpdate(dpkt.Packet):
- __hdr__ = (
- ('pad', '1s', ''),
- ('num_rects', 'H', 0)
- )
-
-class SetColourMapEntries(dpkt.Packet):
- __hdr__ = (
- ('pad', '1s', ''),
- ('first_colour', 'H', 0),
- ('num_colours', 'H', 0)
- )
-
-class CutText(dpkt.Packet):
- __hdr__ = (
- ('pad', '3s', ''),
- ('length', 'I', 0)
- )
diff --git a/scripts/external_libs/dpkt-1.8.6/dpkt/rip.py b/scripts/external_libs/dpkt-1.8.6/dpkt/rip.py
deleted file mode 100644
index 7542cb3d..00000000
--- a/scripts/external_libs/dpkt-1.8.6/dpkt/rip.py
+++ /dev/null
@@ -1,84 +0,0 @@
-# $Id: rip.py 23 2006-11-08 15:45:33Z dugsong $
-
-"""Routing Information Protocol."""
-
-import dpkt
-
-# RIP v2 - RFC 2453
-# http://tools.ietf.org/html/rfc2453
-
-REQUEST = 1
-RESPONSE = 2
-
-class RIP(dpkt.Packet):
- __hdr__ = (
- ('cmd', 'B', REQUEST),
- ('v', 'B', 2),
- ('rsvd', 'H', 0)
- )
-
- def unpack(self, buf):
- dpkt.Packet.unpack(self, buf)
- l = []
- self.auth = None
- while self.data:
- rte = RTE(self.data[:20])
- if rte.family == 0xFFFF:
- self.auth = Auth(self.data[:20])
- else:
- l.append(rte)
- self.data = self.data[20:]
- self.data = self.rtes = l
-
- def __len__(self):
- len = self.__hdr_len__
- if self.auth:
- len += len(self.auth)
- len += sum(map(len, self.rtes))
- return len
-
- def __str__(self):
- auth = ''
- if self.auth:
- auth = str(self.auth)
- return self.pack_hdr() + \
- auth + \
- ''.join(map(str, self.rtes))
-
-class RTE(dpkt.Packet):
- __hdr__ = (
- ('family', 'H', 2),
- ('route_tag', 'H', 0),
- ('addr', 'I', 0),
- ('subnet', 'I', 0),
- ('next_hop', 'I', 0),
- ('metric', 'I', 1)
- )
-
-class Auth(dpkt.Packet):
- __hdr__ = (
- ('rsvd', 'H', 0xFFFF),
- ('type', 'H', 2),
- ('auth', '16s', 0)
- )
-
-if __name__ == '__main__':
- import unittest
-
- class RIPTestCase(unittest.TestCase):
- def testPack(self):
- r = RIP(self.s)
- self.failUnless(self.s == str(r))
-
- def testUnpack(self):
- r = RIP(self.s)
- self.failUnless(r.auth == None)
- self.failUnless(len(r.rtes) == 2)
-
- rte = r.rtes[1]
- self.failUnless(rte.family == 2)
- self.failUnless(rte.route_tag == 0)
- self.failUnless(rte.metric == 1)
-
- s = '\x02\x02\x00\x00\x00\x02\x00\x00\x01\x02\x03\x00\xff\xff\xff\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x02\x00\x00\xc0\xa8\x01\x08\xff\xff\xff\xfc\x00\x00\x00\x00\x00\x00\x00\x01'
- unittest.main()
diff --git a/scripts/external_libs/dpkt-1.8.6/dpkt/rpc.py b/scripts/external_libs/dpkt-1.8.6/dpkt/rpc.py
deleted file mode 100644
index 19281581..00000000
--- a/scripts/external_libs/dpkt-1.8.6/dpkt/rpc.py
+++ /dev/null
@@ -1,146 +0,0 @@
-# $Id: rpc.py 23 2006-11-08 15:45:33Z dugsong $
-
-"""Remote Procedure Call."""
-
-import struct
-import dpkt
-
-# RPC.dir
-CALL = 0
-REPLY = 1
-
-# RPC.Auth.flavor
-AUTH_NONE = AUTH_NULL = 0
-AUTH_UNIX = 1
-AUTH_SHORT = 2
-AUTH_DES = 3
-
-# RPC.Reply.stat
-MSG_ACCEPTED = 0
-MSG_DENIED = 1
-
-# RPC.Reply.Accept.stat
-SUCCESS = 0
-PROG_UNAVAIL = 1
-PROG_MISMATCH = 2
-PROC_UNAVAIL = 3
-GARBAGE_ARGS = 4
-SYSTEM_ERR = 5
-
-# RPC.Reply.Reject.stat
-RPC_MISMATCH = 0
-AUTH_ERROR = 1
-
-class RPC(dpkt.Packet):
- __hdr__ = (
- ('xid', 'I', 0),
- ('dir', 'I', CALL)
- )
- class Auth(dpkt.Packet):
- __hdr__ = (('flavor', 'I', AUTH_NONE), )
- def unpack(self, buf):
- dpkt.Packet.unpack(self, buf)
- n = struct.unpack('>I', self.data[:4])[0]
- self.data = self.data[4:4+n]
- def __len__(self):
- return 8 + len(self.data)
- def __str__(self):
- return self.pack_hdr() + struct.pack('>I', len(self.data)) + \
- str(self.data)
-
- class Call(dpkt.Packet):
- __hdr__ = (
- ('rpcvers', 'I', 2),
- ('prog', 'I', 0),
- ('vers', 'I', 0),
- ('proc', 'I', 0)
- )
- def unpack(self, buf):
- dpkt.Packet.unpack(self, buf)
- self.cred = RPC.Auth(self.data)
- self.verf = RPC.Auth(self.data[len(self.cred):])
- self.data = self.data[len(self.cred) + len(self.verf):]
- def __len__(self):
- return len(str(self)) # XXX
- def __str__(self):
- return dpkt.Packet.__str__(self) + \
- str(getattr(self, 'cred', RPC.Auth())) + \
- str(getattr(self, 'verf', RPC.Auth())) + \
- str(self.data)
-
- class Reply(dpkt.Packet):
- __hdr__ = (('stat', 'I', MSG_ACCEPTED), )
-
- class Accept(dpkt.Packet):
- __hdr__ = (('stat', 'I', SUCCESS), )
- def unpack(self, buf):
- self.verf = RPC.Auth(buf)
- buf = buf[len(self.verf):]
- self.stat = struct.unpack('>I', buf[:4])[0]
- if self.stat == SUCCESS:
- self.data = buf[4:]
- elif self.stat == PROG_MISMATCH:
- self.low, self.high = struct.unpack('>II', buf[4:12])
- self.data = buf[12:]
- def __len__(self):
- if self.stat == PROG_MISMATCH: n = 8
- else: n = 0
- return len(self.verf) + 4 + n + len(self.data)
- def __str__(self):
- if self.stat == PROG_MISMATCH:
- return str(self.verf) + struct.pack('>III', self.stat,
- self.low, self.high) + self.data
- return str(self.verf) + dpkt.Packet.__str__(self)
-
- class Reject(dpkt.Packet):
- __hdr__ = (('stat', 'I', AUTH_ERROR), )
- def unpack(self, buf):
- dpkt.Packet.unpack(self, buf)
- if self.stat == RPC_MISMATCH:
- self.low, self.high = struct.unpack('>II', self.data[:8])
- self.data = self.data[8:]
- elif self.stat == AUTH_ERROR:
- self.why = struct.unpack('>I', self.data[:4])[0]
- self.data = self.data[4:]
- def __len__(self):
- if self.stat == RPC_MISMATCH: n = 8
- elif self.stat == AUTH_ERROR: n =4
- else: n = 0
- return 4 + n + len(self.data)
- def __str__(self):
- if self.stat == RPC_MISMATCH:
- return struct.pack('>III', self.stat, self.low,
- self.high) + self.data
- elif self.stat == AUTH_ERROR:
- return struct.pack('>II', self.stat, self.why) + self.data
- return dpkt.Packet.__str__(self)
-
- def unpack(self, buf):
- dpkt.Packet.unpack(self, buf)
- if self.stat == MSG_ACCEPTED:
- self.data = self.accept = self.Accept(self.data)
- elif self.status == MSG_DENIED:
- self.data = self.reject = self.Reject(self.data)
-
- def unpack(self, buf):
- dpkt.Packet.unpack(self, buf)
- if self.dir == CALL:
- self.data = self.call = self.Call(self.data)
- elif self.dir == REPLY:
- self.data = self.reply = self.Reply(self.data)
-
-def unpack_xdrlist(cls, buf):
- l = []
- while buf:
- if buf.startswith('\x00\x00\x00\x01'):
- p = cls(buf[4:])
- l.append(p)
- buf = p.data
- elif buf.startswith('\x00\x00\x00\x00'):
- break
- else:
- raise dpkt.UnpackError, 'invalid XDR list'
- return l
-
-def pack_xdrlist(*args):
- return '\x00\x00\x00\x01'.join(map(str, args)) + '\x00\x00\x00\x00'
diff --git a/scripts/external_libs/dpkt-1.8.6/dpkt/rtp.py b/scripts/external_libs/dpkt-1.8.6/dpkt/rtp.py
deleted file mode 100644
index 65fd0b98..00000000
--- a/scripts/external_libs/dpkt-1.8.6/dpkt/rtp.py
+++ /dev/null
@@ -1,70 +0,0 @@
-# $Id: rtp.py 23 2006-11-08 15:45:33Z dugsong $
-
-"""Real-Time Transport Protocol"""
-
-from dpkt import Packet
-
-# version 1100 0000 0000 0000 ! 0xC000 14
-# p 0010 0000 0000 0000 ! 0x2000 13
-# x 0001 0000 0000 0000 ! 0x1000 12
-# cc 0000 1111 0000 0000 ! 0x0F00 8
-# m 0000 0000 1000 0000 ! 0x0080 7
-# pt 0000 0000 0111 1111 ! 0x007F 0
-#
-
-_VERSION_MASK= 0xC000
-_P_MASK = 0x2000
-_X_MASK = 0x1000
-_CC_MASK = 0x0F00
-_M_MASK = 0x0080
-_PT_MASK = 0x007F
-_VERSION_SHIFT=14
-_P_SHIFT = 13
-_X_SHIFT = 12
-_CC_SHIFT = 8
-_M_SHIFT = 7
-_PT_SHIFT = 0
-
-VERSION = 2
-
-class RTP(Packet):
- __hdr__ = (
- ('_type', 'H', 0x8000),
- ('seq', 'H', 0),
- ('ts', 'I', 0),
- ('ssrc', 'I', 0),
- )
- csrc = ''
-
- def _get_version(self): return (self._type&_VERSION_MASK)>>_VERSION_SHIFT
- def _set_version(self, ver):
- self._type = (ver << _VERSION_SHIFT) | (self._type & ~_VERSION_MASK)
- def _get_p(self): return (self._type & _P_MASK) >> _P_SHIFT
- def _set_p(self, p): self._type = (p << _P_SHIFT) | (self._type & ~_P_MASK)
- def _get_x(self): return (self._type & _X_MASK) >> _X_SHIFT
- def _set_x(self, x): self._type = (x << _X_SHIFT) | (self._type & ~_X_MASK)
- def _get_cc(self): return (self._type & _CC_MASK) >> _CC_SHIFT
- def _set_cc(self, cc): self._type = (cc<<_CC_SHIFT)|(self._type&~_CC_MASK)
- def _get_m(self): return (self._type & _M_MASK) >> _M_SHIFT
- def _set_m(self, m): self._type = (m << _M_SHIFT) | (self._type & ~_M_MASK)
- def _get_pt(self): return (self._type & _PT_MASK) >> _PT_SHIFT
- def _set_pt(self, m): self._type = (m << _PT_SHIFT)|(self._type&~_PT_MASK)
-
- version = property(_get_version, _set_version)
- p = property(_get_p, _set_p)
- x = property(_get_x, _set_x)
- cc = property(_get_cc, _set_cc)
- m = property(_get_m, _set_m)
- pt = property(_get_pt, _set_pt)
-
- def __len__(self):
- return self.__hdr_len__ + len(self.csrc) + len(self.data)
-
- def __str__(self):
- return self.pack_hdr() + self.csrc + str(self.data)
-
- def unpack(self, buf):
- super(RTP, self).unpack(buf)
- self.csrc = buf[self.__hdr_len__:self.__hdr_len__ + self.cc * 4]
- self.data = buf[self.__hdr_len__ + self.cc * 4:]
-
diff --git a/scripts/external_libs/dpkt-1.8.6/dpkt/rx.py b/scripts/external_libs/dpkt-1.8.6/dpkt/rx.py
deleted file mode 100644
index 5b898efc..00000000
--- a/scripts/external_libs/dpkt-1.8.6/dpkt/rx.py
+++ /dev/null
@@ -1,44 +0,0 @@
-# $Id: rx.py 23 2006-11-08 15:45:33Z jonojono $
-
-"""Rx Protocol."""
-
-import dpkt
-
-# Types
-DATA = 0x01
-ACK = 0x02
-BUSY = 0x03
-ABORT = 0x04
-ACKALL = 0x05
-CHALLENGE = 0x06
-RESPONSE = 0x07
-DEBUG = 0x08
-
-# Flags
-CLIENT_INITIATED = 0x01
-REQUEST_ACK = 0x02
-LAST_PACKET = 0x04
-MORE_PACKETS = 0x08
-SLOW_START_OK = 0x20
-JUMBO_PACKET = 0x20
-
-# Security
-SEC_NONE = 0x00
-SEC_BCRYPT = 0x01
-SEC_RXKAD = 0x02
-SEC_RXKAD_ENC = 0x03
-
-class Rx(dpkt.Packet):
- __hdr__ = (
- ('epoch', 'I', 0),
- ('cid', 'I', 0),
- ('call', 'I', 1),
- ('seq', 'I', 0),
- ('serial', 'I', 1),
- ('type', 'B', 0),
- ('flags', 'B', CLIENT_INITIATED),
- ('status', 'B', 0),
- ('security', 'B', 0),
- ('sum', 'H', 0),
- ('service', 'H', 0)
- )
diff --git a/scripts/external_libs/dpkt-1.8.6/dpkt/sccp.py b/scripts/external_libs/dpkt-1.8.6/dpkt/sccp.py
deleted file mode 100644
index 7a4ac084..00000000
--- a/scripts/external_libs/dpkt-1.8.6/dpkt/sccp.py
+++ /dev/null
@@ -1,196 +0,0 @@
-# $Id: sccp.py 23 2006-11-08 15:45:33Z dugsong $
-
-"""Cisco Skinny Client Control Protocol."""
-
-import dpkt
-
-KEYPAD_BUTTON = 0x00000003
-OFF_HOOK = 0x00000006
-ON_HOOK = 0x00000007
-OPEN_RECEIVE_CHANNEL_ACK= 0x00000022
-START_TONE = 0x00000082
-STOP_TONE = 0x00000083
-SET_LAMP = 0x00000086
-SET_SPEAKER_MODE = 0x00000088
-START_MEDIA_TRANSMIT = 0x0000008A
-STOP_MEDIA_TRANSMIT = 0x0000008B
-CALL_INFO = 0x0000008F
-DEFINE_TIME_DATE = 0x00000094
-DISPLAY_TEXT = 0x00000099
-OPEN_RECEIVE_CHANNEL = 0x00000105
-CLOSE_RECEIVE_CHANNEL = 0x00000106
-SELECT_SOFTKEYS = 0x00000110
-CALL_STATE = 0x00000111
-DISPLAY_PROMPT_STATUS = 0x00000112
-CLEAR_PROMPT_STATUS = 0x00000113
-ACTIVATE_CALL_PLANE = 0x00000116
-
-class ActivateCallPlane(dpkt.Packet):
- __byte_order__ = '<'
- __hdr__ = (
- ('line_instance', 'I', 0),
- )
-
-class CallInfo(dpkt.Packet):
- __byte_order__ = '<'
- __hdr__ = (
- ('calling_party_name', '40s', ''),
- ('calling_party', '24s', ''),
- ('called_party_name', '40s', ''),
- ('called_party', '24s', ''),
- ('line_instance', 'I', 0),
- ('call_id', 'I', 0),
- ('call_type', 'I', 0),
- ('orig_called_party_name', '40s', ''),
- ('orig_called_party', '24s', '')
- )
-
-class CallState(dpkt.Packet):
- __byte_order__ = '<'
- __hdr__ = (
- ('call_state', 'I', 12), # 12: Proceed, 15: Connected
- ('line_instance', 'I', 1),
- ('call_id', 'I', 0)
- )
-
-class ClearPromptStatus(dpkt.Packet):
- __byte_order__ = '<'
- __hdr__ = (
- ('line_instance', 'I', 1),
- ('call_id', 'I', 0)
- )
-
-class CloseReceiveChannel(dpkt.Packet):
- __byte_order__ = '<'
- __hdr__ = (
- ('conference_id', 'I', 0),
- ('passthruparty_id', 'I', 0),
- )
-
-class DisplayPromptStatus(dpkt.Packet):
- __byte_order__ = '<'
- __hdr__ = (
- ('msg_timeout', 'I', 0),
- ('display_msg', '32s', ''),
- ('line_instance', 'I', 1),
- ('call_id', 'I', 0)
- )
-
-class DisplayText(dpkt.Packet):
- __byte_order__ = '<'
- __hdr__ = (
- ('display_msg', '36s', ''),
- )
-
-class KeypadButton(dpkt.Packet):
- __byte_order__ = '<'
- __hdr__ = (
- ('button', 'I', 0),
- )
-
-class OpenReceiveChannel(dpkt.Packet):
- __byte_order__ = '<'
- __hdr__ = (
- ('conference_id', 'I', 0),
- ('passthruparty_id', 'I', 0),
- ('ms_packet', 'I', 0),
- ('payload_capability', 'I', 4), # 4: G.711 u-law 64k
- ('echo_cancel_type', 'I', 4),
- ('g723_bitrate', 'I', 0),
- )
-
-class OpenReceiveChannelAck(dpkt.Packet):
- __byte_order__ = '<'
- __hdr__ = (
- ('channel_status', 'I', 0),
- ('ip', '4s', ''),
- ('port', 'I', 0),
- ('passthruparty_id', 'I', 0),
- )
-
-class SelectStartKeys(dpkt.Packet):
- __byte_order__ = '<'
- __hdr__ = (
- ('line_id', 'I', 1),
- ('call_id', 'I', 0),
- ('softkey_set', 'I', 8),
- ('softkey_map', 'I', 0xffffffffL)
- )
-
-class SetLamp(dpkt.Packet):
- __byte_order__ = '<'
- __hdr__ = (
- ('stimulus', 'I', 9), # 9: Line
- ('stimulus_instance', 'I', 1),
- ('lamp_mode', 'I', 1),
- )
-
-class SetSpeakerMode(dpkt.Packet):
- __byte_order__ = '<'
- __hdr__ = (
- ('speaker', 'I', 2), # 2: SpeakerOff
- )
-
-class StartMediaTransmission(dpkt.Packet):
- __byte_order__ = '<'
- __hdr__ = (
- ('conference_id', 'I', 0),
- ('passthruparty_id', 'I', 0),
- ('remote_ip', '4s', ''),
- ('remote_port', 'I', 0),
- ('ms_packet', 'I', 0),
- ('payload_capability', 'I', 4), # 4: G.711 u-law 64k
- ('precedence', 'I', 0),
- ('silence_suppression', 'I', 0),
- ('max_frames_per_pkt', 'I', 1),
- ('g723_bitrate', 'I', 0),
- )
-
-class StartTone(dpkt.Packet):
- __byte_order__ = '<'
- __hdr__ = (
- ('tone', 'I', 0x24), # 0x24: AlertingTone
- )
-
-class StopMediaTransmission(dpkt.Packet):
- __byte_order__ = '<'
- __hdr__ = (
- ('conference_id', 'I', 0),
- ('passthruparty_id', 'I', 0),
- )
-
-class SCCP(dpkt.Packet):
- __byte_order__ = '<'
- __hdr__ = (
- ('len', 'I', 0),
- ('rsvd', 'I', 0),
- ('msgid', 'I', 0),
- ('msg', '0s', ''),
- )
- _msgsw = {
- KEYPAD_BUTTON:KeypadButton,
- OPEN_RECEIVE_CHANNEL_ACK:OpenReceiveChannelAck,
- START_TONE:StartTone,
- SET_LAMP:SetLamp,
- START_MEDIA_TRANSMIT:StartMediaTransmission,
- STOP_MEDIA_TRANSMIT:StopMediaTransmission,
- CALL_INFO:CallInfo,
- DISPLAY_TEXT:DisplayText,
- OPEN_RECEIVE_CHANNEL:OpenReceiveChannel,
- CLOSE_RECEIVE_CHANNEL:CloseReceiveChannel,
- CALL_STATE:CallState,
- DISPLAY_PROMPT_STATUS:DisplayPromptStatus,
- CLEAR_PROMPT_STATUS:ClearPromptStatus,
- ACTIVATE_CALL_PLANE:ActivateCallPlane,
- }
- def unpack(self, buf):
- dpkt.Packet.unpack(self, buf)
- n = self.len - 4
- if n > len(self.data):
- raise dpkt.NeedData('not enough data')
- self.msg, self.data = self.data[:n], self.data[n:]
- try:
- p = self._msgsw[self.msgid](self.msg)
- setattr(self, p.__class__.__name__.lower(), p)
- except (KeyError, dpkt.UnpackError):
- pass
diff --git a/scripts/external_libs/dpkt-1.8.6/dpkt/sctp.py b/scripts/external_libs/dpkt-1.8.6/dpkt/sctp.py
deleted file mode 100644
index 31b13e69..00000000
--- a/scripts/external_libs/dpkt-1.8.6/dpkt/sctp.py
+++ /dev/null
@@ -1,90 +0,0 @@
-# $Id: sctp.py 23 2006-11-08 15:45:33Z dugsong $
-
-"""Stream Control Transmission Protocol."""
-
-import dpkt, crc32c
-
-# Stream Control Transmission Protocol
-# http://tools.ietf.org/html/rfc2960
-
-# Chunk Types
-DATA = 0
-INIT = 1
-INIT_ACK = 2
-SACK = 3
-HEARTBEAT = 4
-HEARTBEAT_ACK = 5
-ABORT = 6
-SHUTDOWN = 7
-SHUTDOWN_ACK = 8
-ERROR = 9
-COOKIE_ECHO = 10
-COOKIE_ACK = 11
-ECNE = 12
-CWR = 13
-SHUTDOWN_COMPLETE = 14
-
-class SCTP(dpkt.Packet):
- __hdr__ = (
- ('sport', 'H', 0),
- ('dport', 'H', 0),
- ('vtag', 'I', 0),
- ('sum', 'I', 0)
- )
-
- def unpack(self, buf):
- dpkt.Packet.unpack(self, buf)
- l = []
- while self.data:
- chunk = Chunk(self.data)
- l.append(chunk)
- self.data = self.data[len(chunk):]
- self.data = self.chunks = l
-
- def __len__(self):
- return self.__hdr_len__ + \
- sum(map(len, self.data))
-
- def __str__(self):
- l = [ str(x) for x in self.data ]
- if self.sum == 0:
- s = crc32c.add(0xffffffffL, self.pack_hdr())
- for x in l:
- s = crc32c.add(s, x)
- self.sum = crc32c.done(s)
- return self.pack_hdr() + ''.join(l)
-
-class Chunk(dpkt.Packet):
- __hdr__ = (
- ('type', 'B', INIT),
- ('flags', 'B', 0),
- ('len', 'H', 0)
- )
-
- def unpack(self, buf):
- dpkt.Packet.unpack(self, buf)
- self.data = self.data[:self.len - self.__hdr_len__]
-
-if __name__ == '__main__':
- import unittest
-
- class SCTPTestCase(unittest.TestCase):
- def testPack(self):
- sctp = SCTP(self.s)
- self.failUnless(self.s == str(sctp))
- sctp.sum = 0
- self.failUnless(self.s == str(sctp))
-
- def testUnpack(self):
- sctp = SCTP(self.s)
- self.failUnless(sctp.sport == 32836)
- self.failUnless(sctp.dport == 80)
- self.failUnless(len(sctp.chunks) == 1)
- self.failUnless(len(sctp) == 72)
-
- chunk = sctp.chunks[0]
- self.failUnless(chunk.type == INIT)
- self.failUnless(chunk.len == 60)
-
- s = '\x80\x44\x00\x50\x00\x00\x00\x00\x30\xba\xef\x54\x01\x00\x00\x3c\x3b\xb9\x9c\x46\x00\x01\xa0\x00\x00\x0a\xff\xff\x2b\x2d\x7e\xb2\x00\x05\x00\x08\x9b\xe6\x18\x9b\x00\x05\x00\x08\x9b\xe6\x18\x9c\x00\x0c\x00\x06\x00\x05\x00\x00\x80\x00\x00\x04\xc0\x00\x00\x04\xc0\x06\x00\x08\x00\x00\x00\x00'
- unittest.main()
diff --git a/scripts/external_libs/dpkt-1.8.6/dpkt/sip.py b/scripts/external_libs/dpkt-1.8.6/dpkt/sip.py
deleted file mode 100644
index 398eab8a..00000000
--- a/scripts/external_libs/dpkt-1.8.6/dpkt/sip.py
+++ /dev/null
@@ -1,32 +0,0 @@
-# $Id: sip.py 48 2008-05-27 17:31:15Z yardley $
-
-"""Session Initiation Protocol."""
-
-import http
-
-class Request(http.Request):
- """SIP request."""
- __hdr_defaults__ = {
- 'method':'INVITE',
- 'uri':'sip:user@example.com',
- 'version':'2.0',
- 'headers':{ 'To':'', 'From':'', 'Call-ID':'', 'CSeq':'', 'Contact':'' }
- }
- __methods = dict.fromkeys((
- 'ACK', 'BYE', 'CANCEL', 'INFO', 'INVITE', 'MESSAGE', 'NOTIFY',
- 'OPTIONS', 'PRACK', 'PUBLISH', 'REFER', 'REGISTER', 'SUBSCRIBE',
- 'UPDATE'
- ))
- __proto = 'SIP'
-
-class Response(http.Response):
- """SIP response."""
- __hdr_defaults__ = {
- 'version':'2.0',
- 'status':'200',
- 'reason':'OK',
- 'headers':{ 'To':'', 'From':'', 'Call-ID':'', 'CSeq':'', 'Contact':'' }
- }
- __proto = 'SIP'
-
-
diff --git a/scripts/external_libs/dpkt-1.8.6/dpkt/sll.py b/scripts/external_libs/dpkt-1.8.6/dpkt/sll.py
deleted file mode 100644
index dbe866f8..00000000
--- a/scripts/external_libs/dpkt-1.8.6/dpkt/sll.py
+++ /dev/null
@@ -1,23 +0,0 @@
-# $Id: sll.py 23 2006-11-08 15:45:33Z dugsong $
-
-"""Linux libpcap "cooked" capture encapsulation."""
-
-import arp, dpkt, ethernet
-
-class SLL(dpkt.Packet):
- __hdr__ = (
- ('type', 'H', 0), # 0: to us, 1: bcast, 2: mcast, 3: other, 4: from us
- ('hrd', 'H', arp.ARP_HRD_ETH),
- ('hlen', 'H', 6), # hardware address length
- ('hdr', '8s', ''), # first 8 bytes of link-layer header
- ('ethtype', 'H', ethernet.ETH_TYPE_IP),
- )
- _typesw = ethernet.Ethernet._typesw
-
- def unpack(self, buf):
- dpkt.Packet.unpack(self, buf)
- try:
- self.data = self._typesw[self.ethtype](self.data)
- setattr(self, self.data.__class__.__name__.lower(), self.data)
- except (KeyError, dpkt.UnpackError):
- pass
diff --git a/scripts/external_libs/dpkt-1.8.6/dpkt/smb.py b/scripts/external_libs/dpkt-1.8.6/dpkt/smb.py
deleted file mode 100644
index 1964a535..00000000
--- a/scripts/external_libs/dpkt-1.8.6/dpkt/smb.py
+++ /dev/null
@@ -1,19 +0,0 @@
-# $Id: smb.py 23 2006-11-08 15:45:33Z dugsong $
-
-"""Server Message Block."""
-
-import dpkt
-
-class SMB(dpkt.Packet):
- __hdr__ = [
- ('proto', '4s', ''),
- ('cmd', 'B', 0),
- ('err', 'I', 0),
- ('flags1', 'B', 0),
- ('flags2', 'B', 0),
- ('pad', '6s', ''),
- ('tid', 'H', 0),
- ('pid', 'H', 0),
- ('uid', 'H', 0),
- ('mid', 'H', 0)
- ]
diff --git a/scripts/external_libs/dpkt-1.8.6/dpkt/snoop.py b/scripts/external_libs/dpkt-1.8.6/dpkt/snoop.py
deleted file mode 100644
index 3374feb2..00000000
--- a/scripts/external_libs/dpkt-1.8.6/dpkt/snoop.py
+++ /dev/null
@@ -1,118 +0,0 @@
-# $Id$
-
-"""Snoop file format."""
-
-import sys, time
-import dpkt
-
-# RFC 1761
-
-SNOOP_MAGIC = 0x736E6F6F70000000L
-
-SNOOP_VERSION = 2
-
-SDL_8023 = 0
-SDL_8024 = 1
-SDL_8025 = 2
-SDL_8026 = 3
-SDL_ETHER = 4
-SDL_HDLC = 5
-SDL_CHSYNC = 6
-SDL_IBMCC = 7
-SDL_FDDI = 8
-SDL_OTHER = 9
-
-
-dltoff = { SDL_ETHER:14 }
-
-class PktHdr(dpkt.Packet):
- """snoop packet header."""
- __byte_order__ = '!'
- __hdr__ = (
- ('orig_len', 'I', 0),
- ('incl_len', 'I', 0),
- ('rec_len', 'I', 0),
- ('cum_drops', 'I', 0),
- ('ts_sec', 'I', 0),
- ('ts_usec', 'I', 0),
- )
-
-class FileHdr(dpkt.Packet):
- """snoop file header."""
- __byte_order__ = '!'
- __hdr__ = (
- ('magic', 'Q', SNOOP_MAGIC),
- ('v', 'I', SNOOP_VERSION),
- ('linktype', 'I', SDL_ETHER),
- )
-
-class Writer(object):
- """Simple snoop dumpfile writer."""
- def __init__(self, fileobj, linktype=SDL_ETHER):
- self.__f = fileobj
- fh = FileHdr(linktype=linktype)
- self.__f.write(str(fh))
-
- def writepkt(self, pkt, ts=None):
- if ts is None:
- ts = time.time()
- s = str(pkt)
- n = len(s)
- pad_len = 4 - n % 4 if n % 4 else 0
- ph = PktHdr(orig_len=n,incl_len=n,
- rec_len=PktHdr.__hdr_len__+n+pad_len,
- ts_sec=int(ts),
- ts_usec=int((int(ts) - float(ts)) * 1000000.0))
- self.__f.write(str(ph))
- self.__f.write(s + '\0' * pad_len)
-
- def close(self):
- self.__f.close()
-
-class Reader(object):
- """Simple pypcap-compatible snoop file reader."""
-
- def __init__(self, fileobj):
- self.name = fileobj.name
- self.fd = fileobj.fileno()
- self.__f = fileobj
- buf = self.__f.read(FileHdr.__hdr_len__)
- self.__fh = FileHdr(buf)
- self.__ph = PktHdr
- if self.__fh.magic != SNOOP_MAGIC:
- raise ValueError, 'invalid snoop header'
- self.dloff = dltoff[self.__fh.linktype]
- self.filter = ''
-
- def fileno(self):
- return self.fd
-
- def datalink(self):
- return self.__fh.linktype
-
- def setfilter(self, value, optimize=1):
- return NotImplementedError
-
- def readpkts(self):
- return list(self)
-
- def dispatch(self, cnt, callback, *args):
- if cnt > 0:
- for i in range(cnt):
- ts, pkt = self.next()
- callback(ts, pkt, *args)
- else:
- for ts, pkt in self:
- callback(ts, pkt, *args)
-
- def loop(self, callback, *args):
- self.dispatch(0, callback, *args)
-
- def __iter__(self):
- self.__f.seek(FileHdr.__hdr_len__)
- while 1:
- buf = self.__f.read(PktHdr.__hdr_len__)
- if not buf: break
- hdr = self.__ph(buf)
- buf = self.__f.read(hdr.rec_len - PktHdr.__hdr_len__)
- yield (hdr.ts_sec + (hdr.ts_usec / 1000000.0), buf[:hdr.incl_len])
diff --git a/scripts/external_libs/dpkt-1.8.6/dpkt/ssl.py b/scripts/external_libs/dpkt-1.8.6/dpkt/ssl.py
deleted file mode 100644
index d741a99e..00000000
--- a/scripts/external_libs/dpkt-1.8.6/dpkt/ssl.py
+++ /dev/null
@@ -1,560 +0,0 @@
-# $Id: ssl.py 90 2014-04-02 22:06:23Z andrewflnr@gmail.com $
-# Portion Copyright 2012 Google Inc. All rights reserved.
-
-"""Secure Sockets Layer / Transport Layer Security."""
-
-import dpkt
-import ssl_ciphersuites
-import struct
-import binascii
-import traceback
-import datetime
-
-#
-# Note from April 2011: cde...@gmail.com added code that parses SSL3/TLS messages more in depth.
-#
-# Jul 2012: afleenor@google.com modified and extended SSL support further.
-#
-
-
-class SSL2(dpkt.Packet):
- __hdr__ = (
- ('len', 'H', 0),
- ('msg', 's', ''),
- ('pad', 's', ''),
- )
- def unpack(self, buf):
- dpkt.Packet.unpack(self, buf)
- if self.len & 0x8000:
- n = self.len = self.len & 0x7FFF
- self.msg, self.data = self.data[:n], self.data[n:]
- else:
- n = self.len = self.len & 0x3FFF
- padlen = ord(self.data[0])
- self.msg = self.data[1:1+n]
- self.pad = self.data[1+n:1+n+padlen]
- self.data = self.data[1+n+padlen:]
-
-
-# SSLv3/TLS versions
-SSL3_V = 0x0300
-TLS1_V = 0x0301
-TLS11_V = 0x0302
-TLS12_V = 0x0303
-
-ssl3_versions_str = {
- SSL3_V: 'SSL3',
- TLS1_V: 'TLS 1.0',
- TLS11_V: 'TLS 1.1',
- TLS12_V: 'TLS 1.2'
-}
-
-SSL3_VERSION_BYTES = set(('\x03\x00', '\x03\x01', '\x03\x02', '\x03\x03'))
-
-
-# Alert levels
-SSL3_AD_WARNING = 1
-SSL3_AD_FATAL = 2
-alert_level_str = {
- SSL3_AD_WARNING: 'SSL3_AD_WARNING',
- SSL3_AD_FATAL: 'SSL3_AD_FATAL'
-}
-
-# SSL3 alert descriptions
-SSL3_AD_CLOSE_NOTIFY = 0
-SSL3_AD_UNEXPECTED_MESSAGE = 10 # fatal
-SSL3_AD_BAD_RECORD_MAC = 20 # fatal
-SSL3_AD_DECOMPRESSION_FAILURE = 30 # fatal
-SSL3_AD_HANDSHAKE_FAILURE = 40 # fatal
-SSL3_AD_NO_CERTIFICATE = 41
-SSL3_AD_BAD_CERTIFICATE = 42
-SSL3_AD_UNSUPPORTED_CERTIFICATE = 43
-SSL3_AD_CERTIFICATE_REVOKED = 44
-SSL3_AD_CERTIFICATE_EXPIRED = 45
-SSL3_AD_CERTIFICATE_UNKNOWN = 46
-SSL3_AD_ILLEGAL_PARAMETER = 47 # fatal
-
-# TLS1 alert descriptions
-TLS1_AD_DECRYPTION_FAILED = 21
-TLS1_AD_RECORD_OVERFLOW = 22
-TLS1_AD_UNKNOWN_CA = 48 # fatal
-TLS1_AD_ACCESS_DENIED = 49 # fatal
-TLS1_AD_DECODE_ERROR = 50 # fatal
-TLS1_AD_DECRYPT_ERROR = 51
-TLS1_AD_EXPORT_RESTRICTION = 60 # fatal
-TLS1_AD_PROTOCOL_VERSION = 70 # fatal
-TLS1_AD_INSUFFICIENT_SECURITY = 71 # fatal
-TLS1_AD_INTERNAL_ERROR = 80 # fatal
-TLS1_AD_USER_CANCELLED = 90
-TLS1_AD_NO_RENEGOTIATION = 100
-#/* codes 110-114 are from RFC3546 */
-TLS1_AD_UNSUPPORTED_EXTENSION = 110
-TLS1_AD_CERTIFICATE_UNOBTAINABLE = 111
-TLS1_AD_UNRECOGNIZED_NAME = 112
-TLS1_AD_BAD_CERTIFICATE_STATUS_RESPONSE = 113
-TLS1_AD_BAD_CERTIFICATE_HASH_VALUE = 114
-TLS1_AD_UNKNOWN_PSK_IDENTITY = 115 # fatal
-
-
-# Mapping alert types to strings
-alert_description_str = {
- SSL3_AD_CLOSE_NOTIFY: 'SSL3_AD_CLOSE_NOTIFY',
- SSL3_AD_UNEXPECTED_MESSAGE: 'SSL3_AD_UNEXPECTED_MESSAGE',
- SSL3_AD_BAD_RECORD_MAC: 'SSL3_AD_BAD_RECORD_MAC',
- SSL3_AD_DECOMPRESSION_FAILURE: 'SSL3_AD_DECOMPRESSION_FAILURE',
- SSL3_AD_HANDSHAKE_FAILURE: 'SSL3_AD_HANDSHAKE_FAILURE',
- SSL3_AD_NO_CERTIFICATE: 'SSL3_AD_NO_CERTIFICATE',
- SSL3_AD_BAD_CERTIFICATE: 'SSL3_AD_BAD_CERTIFICATE',
- SSL3_AD_UNSUPPORTED_CERTIFICATE: 'SSL3_AD_UNSUPPORTED_CERTIFICATE',
- SSL3_AD_CERTIFICATE_REVOKED: 'SSL3_AD_CERTIFICATE_REVOKED',
- SSL3_AD_CERTIFICATE_EXPIRED: 'SSL3_AD_CERTIFICATE_EXPIRED',
- SSL3_AD_CERTIFICATE_UNKNOWN: 'SSL3_AD_CERTIFICATE_UNKNOWN',
- SSL3_AD_ILLEGAL_PARAMETER: 'SSL3_AD_ILLEGAL_PARAMETER',
- TLS1_AD_DECRYPTION_FAILED: 'TLS1_AD_DECRYPTION_FAILED',
- TLS1_AD_RECORD_OVERFLOW: 'TLS1_AD_RECORD_OVERFLOW',
- TLS1_AD_UNKNOWN_CA: 'TLS1_AD_UNKNOWN_CA',
- TLS1_AD_ACCESS_DENIED: 'TLS1_AD_ACCESS_DENIED',
- TLS1_AD_DECODE_ERROR: 'TLS1_AD_DECODE_ERROR',
- TLS1_AD_DECRYPT_ERROR: 'TLS1_AD_DECRYPT_ERROR',
- TLS1_AD_EXPORT_RESTRICTION: 'TLS1_AD_EXPORT_RESTRICTION',
- TLS1_AD_PROTOCOL_VERSION: 'TLS1_AD_PROTOCOL_VERSION',
- TLS1_AD_INSUFFICIENT_SECURITY: 'TLS1_AD_INSUFFICIENT_SECURITY',
- TLS1_AD_INTERNAL_ERROR: 'TLS1_AD_INTERNAL_ERROR',
- TLS1_AD_USER_CANCELLED: 'TLS1_AD_USER_CANCELLED',
- TLS1_AD_NO_RENEGOTIATION: 'TLS1_AD_NO_RENEGOTIATION',
- TLS1_AD_UNSUPPORTED_EXTENSION: 'TLS1_AD_UNSUPPORTED_EXTENSION',
- TLS1_AD_CERTIFICATE_UNOBTAINABLE: 'TLS1_AD_CERTIFICATE_UNOBTAINABLE',
- TLS1_AD_UNRECOGNIZED_NAME: 'TLS1_AD_UNRECOGNIZED_NAME',
- TLS1_AD_BAD_CERTIFICATE_STATUS_RESPONSE: 'TLS1_AD_BAD_CERTIFICATE_STATUS_RESPONSE',
- TLS1_AD_BAD_CERTIFICATE_HASH_VALUE: 'TLS1_AD_BAD_CERTIFICATE_HASH_VALUE',
- TLS1_AD_UNKNOWN_PSK_IDENTITY: 'TLS1_AD_UNKNOWN_PSK_IDENTITY'
-}
-
-
-# struct format strings for parsing buffer lengths
-# don't forget, you have to pad a 3-byte value with \x00
-_SIZE_FORMATS = ['!B', '!H', '!I', '!I']
-
-def parse_variable_array(buf, lenbytes):
- """
- Parse an array described using the 'Type name<x..y>' syntax from the spec
-
- Read a length at the start of buf, and returns that many bytes
- after, in a tuple with the TOTAL bytes consumed (including the size). This
- does not check that the array is the right length for any given datatype.
- """
- # first have to figure out how to parse length
- assert lenbytes <= 4 # pretty sure 4 is impossible, too
- size_format = _SIZE_FORMATS[lenbytes - 1]
- padding = '\x00' if lenbytes == 3 else ''
- # read off the length
- size = struct.unpack(size_format, padding + buf[:lenbytes])[0]
- # read the actual data
- data = buf[lenbytes:lenbytes + size]
- # if len(data) != size: insufficient data
- return data, size + lenbytes
-
-
-class SSL3Exception(Exception):
- pass
-
-
-class TLSRecord(dpkt.Packet):
- """
- SSLv3 or TLSv1+ packet.
-
- In addition to the fields specified in the header, there are
- compressed and decrypted fields, indicating whether, in the language
- of the spec, this is a TLSPlaintext, TLSCompressed, or
- TLSCiphertext. The application will have to figure out when it's
- appropriate to change these values.
- """
-
- __hdr__ = (
- ('type', 'B', 0),
- ('version', 'H', 0),
- ('length', 'H', 0),
- )
-
- def __init__(self, *args, **kwargs):
- # assume plaintext unless specified otherwise in arguments
- self.compressed = kwargs.pop('compressed', False)
- self.encrypted = kwargs.pop('encrypted', False)
- # parent constructor
- dpkt.Packet.__init__(self, *args, **kwargs)
- # make sure length and data are consistent
- self.length = len(self.data)
-
- def unpack(self, buf):
- dpkt.Packet.unpack(self, buf)
- header_length = self.__hdr_len__
- self.data = buf[header_length:header_length+self.length]
- # make sure buffer was long enough
- if len(self.data) != self.length:
- raise dpkt.NeedData('TLSRecord data was too short.')
- # assume compressed and encrypted when it's been parsed from
- # raw data
- self.compressed = True
- self.encrypted = True
-
-
-class TLSChangeCipherSpec(dpkt.Packet):
- """
- ChangeCipherSpec message is just a single byte with value 1
- """
- __hdr__ = (('type', 'B', 1),)
-
-
-class TLSAppData(str):
- """
- As far as TLSRecord is concerned, AppData is just an opaque blob.
- """
- pass
-
-
-class TLSAlert(dpkt.Packet):
-
- __hdr__ = (
- ('level', 'B', 1),
- ('description', 'B', 0),
- )
-
-
-class TLSHelloRequest(dpkt.Packet):
- __hdr__ = tuple()
-
-
-class TLSClientHello(dpkt.Packet):
- __hdr__ = (
- ('version', 'H', 0x0301),
- ('random', '32s', '\x00'*32),
- ) # the rest is variable-length and has to be done manually
-
- def unpack(self, buf):
- dpkt.Packet.unpack(self, buf)
- # now session, cipher suites, extensions are in self.data
- self.session_id, pointer = parse_variable_array(self.data, 1)
-# print 'pointer',pointer
- # handle ciphersuites
- ciphersuites, parsed = parse_variable_array(self.data[pointer:], 2)
- pointer += parsed
- self.num_ciphersuites = len(ciphersuites) / 2
- # check len(ciphersuites) % 2 == 0 ?
- # compression methods
- compression_methods, parsed = parse_variable_array(
- self.data[pointer:], 1)
- pointer += parsed
- self.num_compression_methods = parsed - 1
- self.compression_methods = map(ord, compression_methods)
- # extensions
-
-
-class TLSServerHello(dpkt.Packet):
- __hdr__ = (
- ('version', 'H', '0x0301'),
- ('random', '32s', '\x00'*32),
- ) # session is variable, forcing rest to be manual
-
- def unpack(self, buf):
- try:
- dpkt.Packet.unpack(self, buf)
- self.session_id, pointer = parse_variable_array(self.data, 1)
- # single cipher suite
- self.cipher_suite = struct.unpack('!H', self.data[pointer:pointer+2])[0]
- pointer += 2
- # single compression method
- self.compression = struct.unpack('!B', self.data[pointer:pointer+1])[0]
- pointer += 1
- # ignore extensions for now
- except struct.error:
- # probably data too short
- raise dpkt.NeedData
-
-
-class TLSUnknownHandshake(dpkt.Packet):
- __hdr__ = tuple()
-
-TLSCertificate = TLSUnknownHandshake
-TLSServerKeyExchange = TLSUnknownHandshake
-TLSCertificateRequest = TLSUnknownHandshake
-TLSServerHelloDone = TLSUnknownHandshake
-TLSCertificateVerify = TLSUnknownHandshake
-TLSClientKeyExchange = TLSUnknownHandshake
-TLSFinished = TLSUnknownHandshake
-
-
-# mapping of handshake type ids to their names
-# and the classes that implement them
-HANDSHAKE_TYPES = {
- 0: ('HelloRequest', TLSHelloRequest),
- 1: ('ClientHello', TLSClientHello),
- 2: ('ServerHello', TLSServerHello),
- 11: ('Certificate', TLSCertificate),
- 12: ('ServerKeyExchange', TLSServerKeyExchange),
- 13: ('CertificateRequest', TLSCertificateRequest),
- 14: ('ServerHelloDone', TLSServerHelloDone),
- 15: ('CertificateVerify', TLSCertificateVerify),
- 16: ('ClientKeyExchange', TLSClientKeyExchange),
- 20: ('Finished', TLSFinished),
-}
-
-
-class TLSHandshake(dpkt.Packet):
- '''
- A TLS Handshake message
-
- This goes for all messages encapsulated in the Record layer, but especially
- important for handshakes and app data: A message may be spread across a
- number of TLSRecords, in addition to the possibility of there being more
- than one in a given Record. You have to put together the contents of
- TLSRecord's yourself.
- '''
-
- # struct.unpack can't handle the 3-byte int, so we parse it as bytes
- # (and store it as bytes so dpkt doesn't get confused), and turn it into
- # an int in a user-facing property
- __hdr__ = (
- ('type', 'B', 0),
- ('length_bytes', '3s', 0),
- )
-
- def unpack(self, buf):
- dpkt.Packet.unpack(self, buf)
- # Wait, might there be more than one message of self.type?
- embedded_type = HANDSHAKE_TYPES.get(self.type, None)
- if embedded_type is None:
- raise SSL3Exception('Unknown or invalid handshake type %d' %
- self.type)
- # only take the right number of bytes
- self.data = self.data[:self.length]
- if len(self.data) != self.length:
- raise dpkt.NeedData
- # get class out of embedded_type tuple
- self.data = embedded_type[1](self.data)
-
- @property
- def length(self):
- return struct.unpack('!I', '\x00' + self.length_bytes)[0]
-
-
-RECORD_TYPES = {
- 20: TLSChangeCipherSpec,
- 21: TLSAlert,
- 22: TLSHandshake,
- 23: TLSAppData,
-}
-
-
-class SSLFactory(object):
- def __new__(cls, buf):
- v = buf[1:3]
- if v in [ '\x03\x00', '\x03\x01', '\x03\x02' ]:
- return SSL3(buf)
- # SSL2 has no characteristic header or magic bytes, so we just assume
- # that the msg is an SSL2 msg if it is not detected as SSL3+
- return SSL2(buf)
-
-
-def TLSMultiFactory(buf):
- '''
- Attempt to parse one or more TLSRecord's out of buf
-
- Args:
- buf: string containing SSL/TLS messages. May have an incomplete record
- on the end
-
- Returns:
- [TLSRecord]
- int, total bytes consumed, != len(buf) if an incomplete record was left at
- the end.
-
- Raises SSL3Exception.
- '''
- i, n = 0, len(buf)
- msgs = []
- while i < n:
- v = buf[i+1:i+3]
- if v in SSL3_VERSION_BYTES:
- try:
- msg = TLSRecord(buf[i:])
- msgs.append(msg)
- except dpkt.NeedData:
- break
- else:
- raise SSL3Exception('Bad TLS version in buf: %r' % buf[i:i+5])
- i += len(msg)
- return msgs, i
-
-
-import unittest
-
-
-_hexdecode = binascii.a2b_hex
-
-
-class TLSRecordTest(unittest.TestCase):
- """
- Test basic TLSRecord functionality
-
- For this test, the contents of the record doesn't matter, since we're not
- parsing the next layer.
- """
- def setUp(self):
- # add some extra data, to make sure length is parsed correctly
- self.p = TLSRecord('\x17\x03\x01\x00\x08abcdefghzzzzzzzzzzz')
- def testContentType(self):
- self.assertEqual(self.p.type, 23)
- def testVersion(self):
- self.assertEqual(self.p.version, 0x0301)
- def testLength(self):
- self.assertEqual(self.p.length, 8)
- def testData(self):
- self.assertEqual(self.p.data, 'abcdefgh')
- def testInitialFlags(self):
- self.assertTrue(self.p.compressed)
- self.assertTrue(self.p.encrypted)
- def testRepack(self):
- p2 = TLSRecord(type=23, version=0x0301, data='abcdefgh')
- self.assertEqual(p2.type, 23)
- self.assertEqual(p2.version, 0x0301)
- self.assertEqual(p2.length, 8)
- self.assertEqual(p2.data, 'abcdefgh')
- self.assertEqual(p2.pack(), self.p.pack())
- def testTotalLength(self):
- # that len(p) includes header
- self.assertEqual(len(self.p), 13)
- def testRaisesNeedDataWhenBufIsShort(self):
- self.assertRaises(
- dpkt.NeedData,
- TLSRecord,
- '\x16\x03\x01\x00\x10abc')
-
-
-class TLSChangeCipherSpecTest(unittest.TestCase):
- "It's just a byte. This will be quick, I promise"
- def setUp(self):
- self.p = TLSChangeCipherSpec('\x01')
- def testParses(self):
- self.assertEqual(self.p.type, 1)
- def testTotalLength(self):
- self.assertEqual(len(self.p), 1)
-
-
-class TLSAppDataTest(unittest.TestCase):
- "AppData is basically just a string"
- def testValue(self):
- d = TLSAppData('abcdefgh')
- self.assertEqual(d, 'abcdefgh')
-
-
-class TLSHandshakeTest(unittest.TestCase):
- def setUp(self):
- self.h = TLSHandshake('\x00\x00\x00\x01\xff')
- def testCreatedInsideMessage(self):
- self.assertTrue(isinstance(self.h.data, TLSHelloRequest))
- def testLength(self):
- self.assertEqual(self.h.length, 0x01)
- def testRaisesNeedData(self):
- self.assertRaises(dpkt.NeedData, TLSHandshake, '\x00\x00\x01\x01')
-
-
-class ClientHelloTest(unittest.TestCase):
- 'This data is extracted from and verified by Wireshark'
-
- def setUp(self):
- self.data = _hexdecode(
- "01000199" # handshake header
- "0301" # version
- "5008220ce5e0e78b6891afe204498c9363feffbe03235a2d9e05b7d990eb708d" # rand
- "2009bc0192e008e6fa8fe47998fca91311ba30ddde14a9587dc674b11c3d3e5ed1" # session id
- # cipher suites
- "005400ffc00ac0140088008700390038c00fc00500840035c007c009c011c0130045004400330032c00cc00ec002c0040096004100050004002fc008c01200160013c00dc003feff000ac006c010c00bc00100020001"
- "0100" # compresssion methods
- # extensions
- "00fc0000000e000c0000096c6f63616c686f7374000a00080006001700180019000b00020100002300d0a50b2e9f618a9ea9bf493ef49b421835cd2f6b05bbe1179d8edf70d58c33d656e8696d36d7e7e0b9d3ecc0e4de339552fa06c64c0fcb550a334bc43944e2739ca342d15a9ebbe981ac87a0d38160507d47af09bdc16c5f0ee4cdceea551539382333226048a026d3a90a0535f4a64236467db8fee22b041af986ad0f253bc369137cd8d8cd061925461d7f4d7895ca9a4181ab554dad50360ac31860e971483877c9335ac1300c5e78f3e56f3b8e0fc16358fcaceefd5c8d8aaae7b35be116f8832856ca61144fcdd95e071b94d0cf7233740000"
- "FFFFFFFFFFFFFFFF") # random garbage
- self.p = TLSHandshake(self.data)
-
- def testClientHelloConstructed(self):
- 'Make sure the correct class was constructed'
- #print self.p
- self.assertTrue(isinstance(self.p.data, TLSClientHello))
-
-# def testClientDateCorrect(self):
-# self.assertEqual(self.p.random_unixtime, 1342710284)
-
- def testClientRandomCorrect(self):
- self.assertEqual(self.p.data.random,
- _hexdecode('5008220ce5e0e78b6891afe204498c9363feffbe03235a2d9e05b7d990eb708d'))
-
- def testCipherSuiteLength(self):
- # we won't bother testing the identity of each cipher suite in the list.
- self.assertEqual(self.p.data.num_ciphersuites, 42)
- #self.assertEqual(len(self.p.ciphersuites), 42)
-
- def testSessionId(self):
- self.assertEqual(self.p.data.session_id,
- _hexdecode('09bc0192e008e6fa8fe47998fca91311ba30ddde14a9587dc674b11c3d3e5ed1'))
-
- def testCompressionMethods(self):
- self.assertEqual(self.p.data.num_compression_methods, 1)
-
- def testTotalLength(self):
- self.assertEqual(len(self.p), 413)
-
-
-class ServerHelloTest(unittest.TestCase):
- 'Again, from Wireshark'
-
- def setUp(self):
- self.data = _hexdecode('0200004d03015008220c8ec43c5462315a7c99f5d5b6bff009ad285b51dc18485f352e9fdecd2009bc0192e008e6fa8fe47998fca91311ba30ddde14a9587dc674b11c3d3e5ed10002000005ff01000100')
- self.p = TLSHandshake(self.data)
-
- def testConstructed(self):
- self.assertTrue(isinstance(self.p.data, TLSServerHello))
-
-# def testDateCorrect(self):
-# self.assertEqual(self.p.random_unixtime, 1342710284)
-
- def testRandomCorrect(self):
- self.assertEqual(self.p.data.random,
- _hexdecode('5008220c8ec43c5462315a7c99f5d5b6bff009ad285b51dc18485f352e9fdecd'))
-
- def testCipherSuite(self):
- self.assertEqual(
- ssl_ciphersuites.BY_CODE[self.p.data.cipher_suite].name,
- 'TLS_RSA_WITH_NULL_SHA')
-
- def testTotalLength(self):
- self.assertEqual(len(self.p), 81)
-
-
-class TLSMultiFactoryTest(unittest.TestCase):
- "Made up test data"
-
- def setUp(self):
- self.data = _hexdecode('1703010010' # header 1
- 'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA' # data 1
- '1703010010' # header 2
- 'BBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBB' # data 2
- '1703010010' # header 3
- 'CCCCCCCC') # data 3 (incomplete)
- self.msgs, self.bytes_parsed = TLSMultiFactory(self.data)
-
- def testNumMessages(self):
- # only complete messages should be parsed, incomplete ones left
- # in buffer
- self.assertEqual(len(self.msgs), 2)
-
- def testBytesParsed(self):
- self.assertEqual(self.bytes_parsed, (5 + 16) * 2)
-
- def testFirstMsgData(self):
- self.assertEqual(self.msgs[0].data, _hexdecode('AA' * 16))
-
- def testSecondMsgData(self):
- self.assertEqual(self.msgs[1].data, _hexdecode('BB' * 16))
-
-
-if __name__ == '__main__':
- unittest.main()
diff --git a/scripts/external_libs/dpkt-1.8.6/dpkt/ssl_ciphersuites.py b/scripts/external_libs/dpkt-1.8.6/dpkt/ssl_ciphersuites.py
deleted file mode 100644
index 49148a34..00000000
--- a/scripts/external_libs/dpkt-1.8.6/dpkt/ssl_ciphersuites.py
+++ /dev/null
@@ -1,76 +0,0 @@
-# Copyright 2012 Google Inc. All rights reserved.
-
-"""
-Nicely formatted cipher suite definitions for TLS
-
-A list of cipher suites in the form of CipherSuite objects.
-These are supposed to be immutable; don't mess with them.
-"""
-
-
-class CipherSuite(object):
- """
- Encapsulates a cipher suite.
-
- Members/args:
- * code: two-byte ID code, as int
- * name: as in 'TLS_RSA_WITH_RC4_40_MD5'
- * kx: key exchange algorithm, string
- * auth: authentication algorithm, string
- * encoding: encoding algorithm
- * mac: message authentication code algorithm
- """
-
- def __init__(self, code, name, kx, auth, encoding, mac):
- self.code = code
- self.name = name
- self.kx = kx
- self.auth = auth
- self.encoding = encoding
- self.mac = mac
-
- def __repr__(self):
- return 'CipherSuite(%s)' % self.name
-
- MAC_SIZES = {
- 'MD5': 16,
- 'SHA': 20,
- 'SHA256': 32, # I guess
- }
-
- BLOCK_SIZES = {
- 'AES_256_CBC': 16,
- }
-
- @property
- def mac_size(self):
- """In bytes. Default to 0."""
- return self.MAC_SIZES.get(self.mac, 0)
-
- @property
- def block_size(self):
- """In bytes. Default to 1."""
- return self.BLOCK_SIZES.get(self.encoding, 1)
-
-
-# master list of CipherSuite Objects
-CIPHERSUITES = [
- # not a real cipher suite, can be ignored, see RFC5746
- CipherSuite(0xff, 'TLS_EMPTY_RENEGOTIATION_INFO',
- 'NULL', 'NULL', 'NULL', 'NULL'),
- CipherSuite(0x00, 'TLS_NULL_WITH_NULL_NULL',
- 'NULL', 'NULL', 'NULL', 'NULL'),
- CipherSuite(0x01, 'TLS_RSA_WITH_NULL_MD5', 'RSA', 'RSA', 'NULL', 'MD5'),
- CipherSuite(0x02, 'TLS_RSA_WITH_NULL_SHA', 'RSA', 'RSA', 'NULL', 'SHA'),
- CipherSuite(0x0039, 'TLS_DHE_RSA_WITH_AES_256_CBC_SHA',
- 'DHE', 'RSA', 'AES_256_CBC', 'SHA'), # not sure I got the kx/auth thing right.
- CipherSuite(0xffff, 'UNKNOWN_CIPHER', '', '', '', '')
-]
-
-BY_CODE = dict(
- (cipher.code, cipher) for cipher in CIPHERSUITES)
-
-BY_NAME = dict(
- (suite.name, suite) for suite in CIPHERSUITES)
-
-NULL_SUITE = BY_CODE[0x00]
diff --git a/scripts/external_libs/dpkt-1.8.6/dpkt/stp.py b/scripts/external_libs/dpkt-1.8.6/dpkt/stp.py
deleted file mode 100644
index 8fab28b3..00000000
--- a/scripts/external_libs/dpkt-1.8.6/dpkt/stp.py
+++ /dev/null
@@ -1,21 +0,0 @@
-# $Id: stp.py 23 2006-11-08 15:45:33Z dugsong $
-
-"""Spanning Tree Protocol."""
-
-import dpkt
-
-class STP(dpkt.Packet):
- __hdr__ = (
- ('proto_id', 'H', 0),
- ('v', 'B', 0),
- ('type', 'B', 0),
- ('flags', 'B', 0),
- ('root_id', '8s', ''),
- ('root_path', 'I', 0),
- ('bridge_id', '8s', ''),
- ('port_id', 'H', 0),
- ('age', 'H', 0),
- ('max_age', 'H', 0),
- ('hello', 'H', 0),
- ('fd', 'H', 0)
- )
diff --git a/scripts/external_libs/dpkt-1.8.6/dpkt/stun.py b/scripts/external_libs/dpkt-1.8.6/dpkt/stun.py
deleted file mode 100644
index 5706f0ef..00000000
--- a/scripts/external_libs/dpkt-1.8.6/dpkt/stun.py
+++ /dev/null
@@ -1,45 +0,0 @@
-# $Id: stun.py 47 2008-05-27 02:10:00Z jon.oberheide $
-
-"""Simple Traversal of UDP through NAT."""
-
-import struct
-import dpkt
-
-# STUN - RFC 3489
-# http://tools.ietf.org/html/rfc3489
-# Each packet has a 20 byte header followed by 0 or more attribute TLVs.
-
-# Message Types
-BINDING_REQUEST = 0x0001
-BINDING_RESPONSE = 0x0101
-BINDING_ERROR_RESPONSE = 0x0111
-SHARED_SECRET_REQUEST = 0x0002
-SHARED_SECRET_RESPONSE = 0x0102
-SHARED_SECRET_ERROR_RESPONSE = 0x0112
-
-# Message Attributes
-MAPPED_ADDRESS = 0x0001
-RESPONSE_ADDRESS = 0x0002
-CHANGE_REQUEST = 0x0003
-SOURCE_ADDRESS = 0x0004
-CHANGED_ADDRESS = 0x0005
-USERNAME = 0x0006
-PASSWORD = 0x0007
-MESSAGE_INTEGRITY = 0x0008
-ERROR_CODE = 0x0009
-UNKNOWN_ATTRIBUTES = 0x000a
-REFLECTED_FROM = 0x000b
-
-class STUN(dpkt.Packet):
- __hdr__ = (
- ('type', 'H', 0),
- ('len', 'H', 0),
- ('xid', '16s', 0)
- )
-
-def tlv(buf):
- n = 4
- t, l = struct.unpack('>HH', buf[:n])
- v = buf[n:n+l]
- buf = buf[n+l:]
- return (t,l,v, buf)
diff --git a/scripts/external_libs/dpkt-1.8.6/dpkt/tcp.py b/scripts/external_libs/dpkt-1.8.6/dpkt/tcp.py
deleted file mode 100644
index eaf10e3d..00000000
--- a/scripts/external_libs/dpkt-1.8.6/dpkt/tcp.py
+++ /dev/null
@@ -1,98 +0,0 @@
-# $Id: tcp.py 42 2007-08-02 22:38:47Z jon.oberheide $
-
-"""Transmission Control Protocol."""
-
-import dpkt
-
-# TCP control flags
-TH_FIN = 0x01 # end of data
-TH_SYN = 0x02 # synchronize sequence numbers
-TH_RST = 0x04 # reset connection
-TH_PUSH = 0x08 # push
-TH_ACK = 0x10 # acknowledgment number set
-TH_URG = 0x20 # urgent pointer set
-TH_ECE = 0x40 # ECN echo, RFC 3168
-TH_CWR = 0x80 # congestion window reduced
-
-TCP_PORT_MAX = 65535 # maximum port
-TCP_WIN_MAX = 65535 # maximum (unscaled) window
-
-class TCP(dpkt.Packet):
- __hdr__ = (
- ('sport', 'H', 0xdead),
- ('dport', 'H', 0),
- ('seq', 'I', 0xdeadbeefL),
- ('ack', 'I', 0),
- ('off_x2', 'B', ((5 << 4) | 0)),
- ('flags', 'B', TH_SYN),
- ('win', 'H', TCP_WIN_MAX),
- ('sum', 'H', 0),
- ('urp', 'H', 0)
- )
- opts = ''
-
- def _get_off(self): return self.off_x2 >> 4
- def _set_off(self, off): self.off_x2 = (off << 4) | (self.off_x2 & 0xf)
- off = property(_get_off, _set_off)
-
- def __len__(self):
- return self.__hdr_len__ + len(self.opts) + len(self.data)
-
- def __str__(self):
- return self.pack_hdr() + self.opts + str(self.data)
-
- def unpack(self, buf):
- dpkt.Packet.unpack(self, buf)
- ol = ((self.off_x2 >> 4) << 2) - self.__hdr_len__
- if ol < 0:
- raise dpkt.UnpackError, 'invalid header length'
- self.opts = buf[self.__hdr_len__:self.__hdr_len__ + ol]
- self.data = buf[self.__hdr_len__ + ol:]
-
-# Options (opt_type) - http://www.iana.org/assignments/tcp-parameters
-TCP_OPT_EOL = 0 # end of option list
-TCP_OPT_NOP = 1 # no operation
-TCP_OPT_MSS = 2 # maximum segment size
-TCP_OPT_WSCALE = 3 # window scale factor, RFC 1072
-TCP_OPT_SACKOK = 4 # SACK permitted, RFC 2018
-TCP_OPT_SACK = 5 # SACK, RFC 2018
-TCP_OPT_ECHO = 6 # echo (obsolete), RFC 1072
-TCP_OPT_ECHOREPLY = 7 # echo reply (obsolete), RFC 1072
-TCP_OPT_TIMESTAMP = 8 # timestamp, RFC 1323
-TCP_OPT_POCONN = 9 # partial order conn, RFC 1693
-TCP_OPT_POSVC = 10 # partial order service, RFC 1693
-TCP_OPT_CC = 11 # connection count, RFC 1644
-TCP_OPT_CCNEW = 12 # CC.NEW, RFC 1644
-TCP_OPT_CCECHO = 13 # CC.ECHO, RFC 1644
-TCP_OPT_ALTSUM = 14 # alt checksum request, RFC 1146
-TCP_OPT_ALTSUMDATA = 15 # alt checksum data, RFC 1146
-TCP_OPT_SKEETER = 16 # Skeeter
-TCP_OPT_BUBBA = 17 # Bubba
-TCP_OPT_TRAILSUM = 18 # trailer checksum
-TCP_OPT_MD5 = 19 # MD5 signature, RFC 2385
-TCP_OPT_SCPS = 20 # SCPS capabilities
-TCP_OPT_SNACK = 21 # selective negative acks
-TCP_OPT_REC = 22 # record boundaries
-TCP_OPT_CORRUPT = 23 # corruption experienced
-TCP_OPT_SNAP = 24 # SNAP
-TCP_OPT_TCPCOMP = 26 # TCP compression filter
-TCP_OPT_MAX = 27
-
-def parse_opts(buf):
- """Parse TCP option buffer into a list of (option, data) tuples."""
- opts = []
- while buf:
- o = ord(buf[0])
- if o > TCP_OPT_NOP:
- try:
- l = ord(buf[1])
- d, buf = buf[2:l], buf[l:]
- except ValueError:
- #print 'bad option', repr(str(buf))
- opts.append(None) # XXX
- break
- else:
- d, buf = '', buf[1:]
- opts.append((o,d))
- return opts
-
diff --git a/scripts/external_libs/dpkt-1.8.6/dpkt/telnet.py b/scripts/external_libs/dpkt-1.8.6/dpkt/telnet.py
deleted file mode 100644
index 9e8194d3..00000000
--- a/scripts/external_libs/dpkt-1.8.6/dpkt/telnet.py
+++ /dev/null
@@ -1,77 +0,0 @@
-# $Id: telnet.py 23 2006-11-08 15:45:33Z dugsong $
-
-"""Telnet."""
-
-IAC = 255 # interpret as command:
-DONT = 254 # you are not to use option
-DO = 253 # please, you use option
-WONT = 252 # I won't use option
-WILL = 251 # I will use option
-SB = 250 # interpret as subnegotiation
-GA = 249 # you may reverse the line
-EL = 248 # erase the current line
-EC = 247 # erase the current character
-AYT = 246 # are you there
-AO = 245 # abort output--but let prog finish
-IP = 244 # interrupt process--permanently
-BREAK = 243 # break
-DM = 242 # data mark--for connect. cleaning
-NOP = 241 # nop
-SE = 240 # end sub negotiation
-EOR = 239 # end of record (transparent mode)
-ABORT = 238 # Abort process
-SUSP = 237 # Suspend process
-xEOF = 236 # End of file: EOF is already used...
-
-SYNCH = 242 # for telfunc calls
-
-def strip_options(buf):
- """Return a list of lines and dict of options from telnet data."""
- l = buf.split(chr(IAC))
- #print l
- b = []
- d = {}
- subopt = False
- for w in l:
- if not w:
- continue
- o = ord(w[0])
- if o > SB:
- #print 'WILL/WONT/DO/DONT/IAC', `w`
- w = w[2:]
- elif o == SE:
- #print 'SE', `w`
- w = w[1:]
- subopt = False
- elif o == SB:
- #print 'SB', `w`
- subopt = True
- for opt in ('USER', 'DISPLAY', 'TERM'):
- p = w.find(opt + '\x01')
- if p != -1:
- d[opt] = w[p+len(opt)+1:].split('\x00', 1)[0]
- w = None
- elif subopt:
- w = None
- if w:
- w = w.replace('\x00', '\n').splitlines()
- if not w[-1]: w.pop()
- b.extend(w)
- return b, d
-
-if __name__ == '__main__':
- import unittest
-
- class TelnetTestCase(unittest.TestCase):
- def test_telnet(self):
- l = []
- s = "\xff\xfb%\xff\xfa%\x00\x00\x00\xff\xf0\xff\xfd&\xff\xfa&\x05\xff\xf0\xff\xfa&\x01\x01\x02\xff\xf0\xff\xfb\x18\xff\xfb \xff\xfb#\xff\xfb'\xff\xfc$\xff\xfa \x0038400,38400\xff\xf0\xff\xfa#\x00doughboy.citi.umich.edu:0.0\xff\xf0\xff\xfa'\x00\x00DISPLAY\x01doughboy.citi.umich.edu:0.0\x00USER\x01dugsong\xff\xf0\xff\xfa\x18\x00XTERM\xff\xf0\xff\xfd\x03\xff\xfc\x01\xff\xfb\x1f\xff\xfa\x1f\x00P\x00(\xff\xf0\xff\xfd\x05\xff\xfb!\xff\xfd\x01fugly\r\x00yoda\r\x00bashtard\r\x00"
- l.append(s)
- s = '\xff\xfd\x01\xff\xfd\x03\xff\xfb\x18\xff\xfb\x1f\xff\xfa\x1f\x00X\x002\xff\xf0admin\r\x00\xff\xfa\x18\x00LINUX\xff\xf0foobar\r\x00enable\r\x00foobar\r\x00\r\x00show ip int Vlan 666\r\x00'
- l.append(s)
- s = '\xff\xfb%\xff\xfa%\x00\x00\x00\xff\xf0\xff\xfd&\xff\xfa&\x05\xff\xf0\xff\xfa&\x01\x01\x02\xff\xf0\xff\xfb&\xff\xfb\x18\xff\xfb \xff\xfb#\xff\xfb\'\xff\xfc$\xff\xfa \x0038400,38400\xff\xf0\xff\xfa#\x00doughboy.citi.umich.edu:0.0\xff\xf0\xff\xfa\'\x00\x00DISPLAY\x01doughboy.citi.umich.edu:0.0\x00USER\x01dugsong\xff\xf0\xff\xfa\x18\x00XTERM\xff\xf0\xff\xfd\x03\xff\xfc\x01\xff\xfb"\xff\xfa"\x03\x01\x03\x00\x03b\x03\x04\x02\x0f\x05\x00\xff\xff\x07b\x1c\x08\x02\x04\tB\x1a\n\x02\x7f\x0b\x02\x15\x0c\x02\x17\r\x02\x12\x0e\x02\x16\x0f\x02\x11\x10\x02\x13\x11\x00\xff\xff\x12\x00\xff\xff\xff\xf0\xff\xfb\x1f\xff\xfa\x1f\x00P\x00(\xff\xf0\xff\xfd\x05\xff\xfb!\xff\xfa"\x01\x0f\xff\xf0\xff\xfd\x01\xff\xfe\x01\xff\xfa"\x03\x01\x80\x00\xff\xf0\xff\xfd\x01werd\r\n\xff\xfe\x01yoda\r\n\xff\xfd\x01darthvader\r\n\xff\xfe\x01'
- l.append(s)
- exp = [ (['fugly', 'yoda', 'bashtard'], {'USER': 'dugsong', 'DISPLAY': 'doughboy.citi.umich.edu:0.0'}), (['admin', 'foobar', 'enable', 'foobar', '', 'show ip int Vlan 666'], {}), (['werd', 'yoda', 'darthvader'], {'USER': 'dugsong', 'DISPLAY': 'doughboy.citi.umich.edu:0.0'}) ]
- self.failUnless(map(strip_options, l) == exp)
-
- unittest.main()
diff --git a/scripts/external_libs/dpkt-1.8.6/dpkt/tftp.py b/scripts/external_libs/dpkt-1.8.6/dpkt/tftp.py
deleted file mode 100644
index 046ae8d2..00000000
--- a/scripts/external_libs/dpkt-1.8.6/dpkt/tftp.py
+++ /dev/null
@@ -1,55 +0,0 @@
-# $Id: tftp.py 23 2006-11-08 15:45:33Z dugsong $
-
-"""Trivial File Transfer Protocol."""
-
-import struct
-import dpkt
-
-# Opcodes
-OP_RRQ = 1 # read request
-OP_WRQ = 2 # write request
-OP_DATA = 3 # data packet
-OP_ACK = 4 # acknowledgment
-OP_ERR = 5 # error code
-
-# Error codes
-EUNDEF = 0 # not defined
-ENOTFOUND = 1 # file not found
-EACCESS = 2 # access violation
-ENOSPACE = 3 # disk full or allocation exceeded
-EBADOP = 4 # illegal TFTP operation
-EBADID = 5 # unknown transfer ID
-EEXISTS = 6 # file already exists
-ENOUSER = 7 # no such user
-
-class TFTP(dpkt.Packet):
- __hdr__ = (('opcode', 'H', 1), )
-
- def unpack(self, buf):
- dpkt.Packet.unpack(self, buf)
- if self.opcode in (OP_RRQ, OP_WRQ):
- l = self.data.split('\x00')
- self.filename = l[0]
- self.mode = l[1]
- self.data = ''
- elif self.opcode in (OP_DATA, OP_ACK):
- self.block = struct.unpack('>H', self.data[:2])
- self.data = self.data[2:]
- elif self.opcode == OP_ERR:
- self.errcode = struct.unpack('>H', self.data[:2])
- self.errmsg = self.data[2:].split('\x00')[0]
- self.data = ''
-
- def __len__(self):
- return len(str(self))
-
- def __str__(self):
- if self.opcode in (OP_RRQ, OP_WRQ):
- s = '%s\x00%s\x00' % (self.filename, self.mode)
- elif self.opcode in (OP_DATA, OP_ACK):
- s = struct.pack('>H', self.block)
- elif self.opcode == OP_ERR:
- s = struct.pack('>H', self.errcode) + ('%s\x00' % self.errmsg)
- else:
- s = ''
- return self.pack_hdr() + s + self.data
diff --git a/scripts/external_libs/dpkt-1.8.6/dpkt/tns.py b/scripts/external_libs/dpkt-1.8.6/dpkt/tns.py
deleted file mode 100644
index 7e092250..00000000
--- a/scripts/external_libs/dpkt-1.8.6/dpkt/tns.py
+++ /dev/null
@@ -1,24 +0,0 @@
-# $Id: tns.py 23 2006-11-08 15:45:33Z dugsong $
-
-"""Transparent Network Substrate."""
-
-import dpkt
-
-class TNS(dpkt.Packet):
- __hdr__ = (
- ('length', 'H', 0),
- ('pktsum', 'H', 0),
- ('type', 'B', 0),
- ('rsvd', 'B', 0),
- ('hdrsum', 'H', 0),
- ('msg', '0s', ''),
- )
- def unpack(self, buf):
- dpkt.Packet.unpack(self, buf)
- n = self.length - self.__hdr_len__
- if n > len(self.data):
- raise dpkt.NeedData('short message (missing %d bytes)' %
- (n - len(self.data)))
- self.msg = self.data[:n]
- self.data = self.data[n:]
-
diff --git a/scripts/external_libs/dpkt-1.8.6/dpkt/tpkt.py b/scripts/external_libs/dpkt-1.8.6/dpkt/tpkt.py
deleted file mode 100644
index d81f7855..00000000
--- a/scripts/external_libs/dpkt-1.8.6/dpkt/tpkt.py
+++ /dev/null
@@ -1,15 +0,0 @@
-# $Id: tpkt.py 23 2006-11-08 15:45:33Z dugsong $
-
-"""ISO Transport Service on top of the TCP (TPKT)."""
-
-import dpkt
-
-# TPKT - RFC 1006 Section 6
-# http://www.faqs.org/rfcs/rfc1006.html
-
-class TPKT(dpkt.Packet):
- __hdr__ = (
- ('v', 'B', 3),
- ('rsvd', 'B', 0),
- ('len', 'H', 0)
- )
diff --git a/scripts/external_libs/dpkt-1.8.6/dpkt/udp.py b/scripts/external_libs/dpkt-1.8.6/dpkt/udp.py
deleted file mode 100644
index 0fd6334b..00000000
--- a/scripts/external_libs/dpkt-1.8.6/dpkt/udp.py
+++ /dev/null
@@ -1,15 +0,0 @@
-# $Id: udp.py 23 2006-11-08 15:45:33Z dugsong $
-
-"""User Datagram Protocol."""
-
-import dpkt
-
-UDP_PORT_MAX = 65535
-
-class UDP(dpkt.Packet):
- __hdr__ = (
- ('sport', 'H', 0xdead),
- ('dport', 'H', 0),
- ('ulen', 'H', 8),
- ('sum', 'H', 0)
- )
diff --git a/scripts/external_libs/dpkt-1.8.6/dpkt/vrrp.py b/scripts/external_libs/dpkt-1.8.6/dpkt/vrrp.py
deleted file mode 100644
index fbb97937..00000000
--- a/scripts/external_libs/dpkt-1.8.6/dpkt/vrrp.py
+++ /dev/null
@@ -1,48 +0,0 @@
-# $Id: vrrp.py 88 2013-03-05 19:43:17Z andrewflnr@gmail.com $
-
-"""Virtual Router Redundancy Protocol."""
-
-import dpkt
-
-class VRRP(dpkt.Packet):
- __hdr__ = (
- ('vtype', 'B', 0x21),
- ('vrid', 'B', 0),
- ('priority', 'B', 0),
- ('count', 'B', 0),
- ('atype', 'B', 0),
- ('advtime', 'B', 0),
- ('sum', 'H', 0),
- )
- addrs = ()
- auth = ''
- def _get_v(self):
- return self.vtype >> 4
- def _set_v(self, v):
- self.vtype = (self.vtype & ~0xf) | (v << 4)
- v = property(_get_v, _set_v)
-
- def _get_type(self):
- return self.vtype & 0xf
- def _set_type(self, v):
- self.vtype = (self.vtype & ~0xf0) | (v & 0xf)
- type = property(_get_type, _set_type)
-
- def unpack(self, buf):
- dpkt.Packet.unpack(self, buf)
- l = []
- off = 0
- for off in range(0, 4 * self.count, 4):
- l.append(self.data[off:off+4])
- self.addrs = l
- self.auth = self.data[off+4:]
- self.data = ''
-
- def __len__(self):
- return self.__hdr_len__ + (4 * self.count) + len(self.auth)
-
- def __str__(self):
- data = ''.join(self.addrs) + self.auth
- if not self.sum:
- self.sum = dpkt.in_cksum(self.pack_hdr() + data)
- return self.pack_hdr() + data
diff --git a/scripts/external_libs/dpkt-1.8.6/dpkt/yahoo.py b/scripts/external_libs/dpkt-1.8.6/dpkt/yahoo.py
deleted file mode 100644
index 726aeece..00000000
--- a/scripts/external_libs/dpkt-1.8.6/dpkt/yahoo.py
+++ /dev/null
@@ -1,29 +0,0 @@
-# $Id: yahoo.py 23 2006-11-08 15:45:33Z dugsong $
-
-"""Yahoo Messenger."""
-
-import dpkt
-
-class YHOO(dpkt.Packet):
- __hdr__ = [
- ('version', '8s', ' ' * 8),
- ('length', 'I', 0),
- ('service', 'I', 0),
- ('connid', 'I', 0),
- ('magic', 'I', 0),
- ('unknown', 'I', 0),
- ('type', 'I', 0),
- ('nick1', '36s', ' ' * 36),
- ('nick2', '36s', ' ' * 36)
- ]
- __byte_order__ = '<'
-
-class YMSG(dpkt.Packet):
- __hdr__ = [
- ('version', '8s', ' ' * 8),
- ('length', 'H', 0),
- ('type', 'H', 0),
- ('unknown1', 'I', 0),
- ('unknown2', 'I', 0)
- ]
-
diff --git a/scripts/external_libs/dpkt-1.8.6/setup.cfg b/scripts/external_libs/dpkt-1.8.6/setup.cfg
deleted file mode 100644
index 56ffc205..00000000
--- a/scripts/external_libs/dpkt-1.8.6/setup.cfg
+++ /dev/null
@@ -1,20 +0,0 @@
-[bdist_wheel]
-universal = 1
-
-[aliases]
-release = register clean --all sdist bdist_wheel upload
-
-[flake8]
-max-line-length = 140
-
-[pytest]
-addopts = -v --cov-report term-missing
-python_files = *.py
-python_functions = test
-norecursedirs = .tox .git *.egg-info __pycache__ dist build
-
-[egg_info]
-tag_build =
-tag_date = 0
-tag_svn_revision = 0
-
diff --git a/scripts/external_libs/dpkt-1.8.6/setup.py b/scripts/external_libs/dpkt-1.8.6/setup.py
deleted file mode 100644
index fe4f84fa..00000000
--- a/scripts/external_libs/dpkt-1.8.6/setup.py
+++ /dev/null
@@ -1,40 +0,0 @@
-import os
-import sys
-
-try:
- from setuptools import setup, Command
-except ImportError:
- from distutils.core import setup, Command
-
-package_name = 'dpkt'
-description = 'fast, simple packet creation / parsing, with definitions for the basic TCP/IP protocols'
-readme = open('README.rst').read()
-requirements = [ ]
-
-# PyPI Readme
-long_description = open('README.rst').read()
-
-# Pull in the package
-package = __import__(package_name)
-
-setup(name=package_name,
- version=package.__version__,
- author=package.__author__,
- url=package.__url__,
- description=description,
- long_description=long_description,
- packages=['dpkt'],
- install_requires=requirements,
- license='BSD',
- zip_safe=False,
- classifiers=[
- 'Development Status :: 4 - Beta',
- 'Intended Audience :: Developers',
- 'License :: OSI Approved :: BSD License',
- 'Natural Language :: English',
- 'Programming Language :: Python :: 2.6',
- 'Programming Language :: Python :: 2.7',
- 'Programming Language :: Python :: Implementation :: CPython',
- 'Programming Language :: Python :: Implementation :: PyPy',
- ]
-)
diff --git a/scripts/external_libs/nose-1.3.4/AUTHORS b/scripts/external_libs/nose-1.3.4/AUTHORS
deleted file mode 100755
index 5414bcda..00000000
--- a/scripts/external_libs/nose-1.3.4/AUTHORS
+++ /dev/null
@@ -1,27 +0,0 @@
-Jason Pellerin
-Kumar McMillan
-Mika Eloranta
-Jay Parlar
-Scot Doyle
-James Casbon
-Antoine Pitrou
-John J Lee
-Allen Bierbaum
-Pam Zerbinos
-Augie Fackler
-Peter Fein
-Kevin Mitchell
-Alex Stewart
-Timothee Peignier
-Thomas Kluyver
-Heng Liu
-Rosen Diankov
-Buck Golemon
-Bobby Impollonia
-Takafumi Arakaki
-Peter Bengtsson
-Gary Donovan
-Brendan McCollam
-Erik Rose
-Sascha Peilicke
-Andre Caron
diff --git a/scripts/external_libs/nose-1.3.4/PKG-INFO b/scripts/external_libs/nose-1.3.4/PKG-INFO
deleted file mode 100755
index dea3d585..00000000
--- a/scripts/external_libs/nose-1.3.4/PKG-INFO
+++ /dev/null
@@ -1,38 +0,0 @@
-Metadata-Version: 1.1
-Name: nose
-Version: 1.3.4
-Summary: nose extends unittest to make testing easier
-Home-page: http://readthedocs.org/docs/nose/
-Author: Jason Pellerin
-Author-email: jpellerin+nose@gmail.com
-License: GNU LGPL
-Description: nose extends the test loading and running features of unittest, making
- it easier to write, find and run tests.
-
- By default, nose will run tests in files or directories under the current
- working directory whose names include "test" or "Test" at a word boundary
- (like "test_this" or "functional_test" or "TestClass" but not
- "libtest"). Test output is similar to that of unittest, but also includes
- captured stdout output from failing tests, for easy print-style debugging.
-
- These features, and many more, are customizable through the use of
- plugins. Plugins included with nose provide support for doctest, code
- coverage and profiling, flexible attribute-based test selection,
- output capture and more. More information about writing plugins may be
- found on in the nose API documentation, here:
- http://readthedocs.org/docs/nose/
-
- If you have recently reported a bug marked as fixed, or have a craving for
- the very latest, you may want the development version instead:
- https://github.com/nose-devs/nose/tarball/master#egg=nose-dev
-
-Keywords: test unittest doctest automatic discovery
-Platform: UNKNOWN
-Classifier: Development Status :: 5 - Production/Stable
-Classifier: Intended Audience :: Developers
-Classifier: License :: OSI Approved :: GNU Library or Lesser General Public License (LGPL)
-Classifier: Natural Language :: English
-Classifier: Operating System :: OS Independent
-Classifier: Programming Language :: Python
-Classifier: Programming Language :: Python :: 3
-Classifier: Topic :: Software Development :: Testing
diff --git a/scripts/external_libs/nose-1.3.4/lgpl.txt b/scripts/external_libs/nose-1.3.4/lgpl.txt
deleted file mode 100755
index 8add30ad..00000000
--- a/scripts/external_libs/nose-1.3.4/lgpl.txt
+++ /dev/null
@@ -1,504 +0,0 @@
- GNU LESSER GENERAL PUBLIC LICENSE
- Version 2.1, February 1999
-
- Copyright (C) 1991, 1999 Free Software Foundation, Inc.
- 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- Everyone is permitted to copy and distribute verbatim copies
- of this license document, but changing it is not allowed.
-
-[This is the first released version of the Lesser GPL. It also counts
- as the successor of the GNU Library Public License, version 2, hence
- the version number 2.1.]
-
- Preamble
-
- The licenses for most software are designed to take away your
-freedom to share and change it. By contrast, the GNU General Public
-Licenses are intended to guarantee your freedom to share and change
-free software--to make sure the software is free for all its users.
-
- This license, the Lesser General Public License, applies to some
-specially designated software packages--typically libraries--of the
-Free Software Foundation and other authors who decide to use it. You
-can use it too, but we suggest you first think carefully about whether
-this license or the ordinary General Public License is the better
-strategy to use in any particular case, based on the explanations below.
-
- When we speak of free software, we are referring to freedom of use,
-not price. Our General Public Licenses are designed to make sure that
-you have the freedom to distribute copies of free software (and charge
-for this service if you wish); that you receive source code or can get
-it if you want it; that you can change the software and use pieces of
-it in new free programs; and that you are informed that you can do
-these things.
-
- To protect your rights, we need to make restrictions that forbid
-distributors to deny you these rights or to ask you to surrender these
-rights. These restrictions translate to certain responsibilities for
-you if you distribute copies of the library or if you modify it.
-
- For example, if you distribute copies of the library, whether gratis
-or for a fee, you must give the recipients all the rights that we gave
-you. You must make sure that they, too, receive or can get the source
-code. If you link other code with the library, you must provide
-complete object files to the recipients, so that they can relink them
-with the library after making changes to the library and recompiling
-it. And you must show them these terms so they know their rights.
-
- We protect your rights with a two-step method: (1) we copyright the
-library, and (2) we offer you this license, which gives you legal
-permission to copy, distribute and/or modify the library.
-
- To protect each distributor, we want to make it very clear that
-there is no warranty for the free library. Also, if the library is
-modified by someone else and passed on, the recipients should know
-that what they have is not the original version, so that the original
-author's reputation will not be affected by problems that might be
-introduced by others.
-
- Finally, software patents pose a constant threat to the existence of
-any free program. We wish to make sure that a company cannot
-effectively restrict the users of a free program by obtaining a
-restrictive license from a patent holder. Therefore, we insist that
-any patent license obtained for a version of the library must be
-consistent with the full freedom of use specified in this license.
-
- Most GNU software, including some libraries, is covered by the
-ordinary GNU General Public License. This license, the GNU Lesser
-General Public License, applies to certain designated libraries, and
-is quite different from the ordinary General Public License. We use
-this license for certain libraries in order to permit linking those
-libraries into non-free programs.
-
- When a program is linked with a library, whether statically or using
-a shared library, the combination of the two is legally speaking a
-combined work, a derivative of the original library. The ordinary
-General Public License therefore permits such linking only if the
-entire combination fits its criteria of freedom. The Lesser General
-Public License permits more lax criteria for linking other code with
-the library.
-
- We call this license the "Lesser" General Public License because it
-does Less to protect the user's freedom than the ordinary General
-Public License. It also provides other free software developers Less
-of an advantage over competing non-free programs. These disadvantages
-are the reason we use the ordinary General Public License for many
-libraries. However, the Lesser license provides advantages in certain
-special circumstances.
-
- For example, on rare occasions, there may be a special need to
-encourage the widest possible use of a certain library, so that it becomes
-a de-facto standard. To achieve this, non-free programs must be
-allowed to use the library. A more frequent case is that a free
-library does the same job as widely used non-free libraries. In this
-case, there is little to gain by limiting the free library to free
-software only, so we use the Lesser General Public License.
-
- In other cases, permission to use a particular library in non-free
-programs enables a greater number of people to use a large body of
-free software. For example, permission to use the GNU C Library in
-non-free programs enables many more people to use the whole GNU
-operating system, as well as its variant, the GNU/Linux operating
-system.
-
- Although the Lesser General Public License is Less protective of the
-users' freedom, it does ensure that the user of a program that is
-linked with the Library has the freedom and the wherewithal to run
-that program using a modified version of the Library.
-
- The precise terms and conditions for copying, distribution and
-modification follow. Pay close attention to the difference between a
-"work based on the library" and a "work that uses the library". The
-former contains code derived from the library, whereas the latter must
-be combined with the library in order to run.
-
- GNU LESSER GENERAL PUBLIC LICENSE
- TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
-
- 0. This License Agreement applies to any software library or other
-program which contains a notice placed by the copyright holder or
-other authorized party saying it may be distributed under the terms of
-this Lesser General Public License (also called "this License").
-Each licensee is addressed as "you".
-
- A "library" means a collection of software functions and/or data
-prepared so as to be conveniently linked with application programs
-(which use some of those functions and data) to form executables.
-
- The "Library", below, refers to any such software library or work
-which has been distributed under these terms. A "work based on the
-Library" means either the Library or any derivative work under
-copyright law: that is to say, a work containing the Library or a
-portion of it, either verbatim or with modifications and/or translated
-straightforwardly into another language. (Hereinafter, translation is
-included without limitation in the term "modification".)
-
- "Source code" for a work means the preferred form of the work for
-making modifications to it. For a library, complete source code means
-all the source code for all modules it contains, plus any associated
-interface definition files, plus the scripts used to control compilation
-and installation of the library.
-
- Activities other than copying, distribution and modification are not
-covered by this License; they are outside its scope. The act of
-running a program using the Library is not restricted, and output from
-such a program is covered only if its contents constitute a work based
-on the Library (independent of the use of the Library in a tool for
-writing it). Whether that is true depends on what the Library does
-and what the program that uses the Library does.
-
- 1. You may copy and distribute verbatim copies of the Library's
-complete source code as you receive it, in any medium, provided that
-you conspicuously and appropriately publish on each copy an
-appropriate copyright notice and disclaimer of warranty; keep intact
-all the notices that refer to this License and to the absence of any
-warranty; and distribute a copy of this License along with the
-Library.
-
- You may charge a fee for the physical act of transferring a copy,
-and you may at your option offer warranty protection in exchange for a
-fee.
-
- 2. You may modify your copy or copies of the Library or any portion
-of it, thus forming a work based on the Library, and copy and
-distribute such modifications or work under the terms of Section 1
-above, provided that you also meet all of these conditions:
-
- a) The modified work must itself be a software library.
-
- b) You must cause the files modified to carry prominent notices
- stating that you changed the files and the date of any change.
-
- c) You must cause the whole of the work to be licensed at no
- charge to all third parties under the terms of this License.
-
- d) If a facility in the modified Library refers to a function or a
- table of data to be supplied by an application program that uses
- the facility, other than as an argument passed when the facility
- is invoked, then you must make a good faith effort to ensure that,
- in the event an application does not supply such function or
- table, the facility still operates, and performs whatever part of
- its purpose remains meaningful.
-
- (For example, a function in a library to compute square roots has
- a purpose that is entirely well-defined independent of the
- application. Therefore, Subsection 2d requires that any
- application-supplied function or table used by this function must
- be optional: if the application does not supply it, the square
- root function must still compute square roots.)
-
-These requirements apply to the modified work as a whole. If
-identifiable sections of that work are not derived from the Library,
-and can be reasonably considered independent and separate works in
-themselves, then this License, and its terms, do not apply to those
-sections when you distribute them as separate works. But when you
-distribute the same sections as part of a whole which is a work based
-on the Library, the distribution of the whole must be on the terms of
-this License, whose permissions for other licensees extend to the
-entire whole, and thus to each and every part regardless of who wrote
-it.
-
-Thus, it is not the intent of this section to claim rights or contest
-your rights to work written entirely by you; rather, the intent is to
-exercise the right to control the distribution of derivative or
-collective works based on the Library.
-
-In addition, mere aggregation of another work not based on the Library
-with the Library (or with a work based on the Library) on a volume of
-a storage or distribution medium does not bring the other work under
-the scope of this License.
-
- 3. You may opt to apply the terms of the ordinary GNU General Public
-License instead of this License to a given copy of the Library. To do
-this, you must alter all the notices that refer to this License, so
-that they refer to the ordinary GNU General Public License, version 2,
-instead of to this License. (If a newer version than version 2 of the
-ordinary GNU General Public License has appeared, then you can specify
-that version instead if you wish.) Do not make any other change in
-these notices.
-
- Once this change is made in a given copy, it is irreversible for
-that copy, so the ordinary GNU General Public License applies to all
-subsequent copies and derivative works made from that copy.
-
- This option is useful when you wish to copy part of the code of
-the Library into a program that is not a library.
-
- 4. You may copy and distribute the Library (or a portion or
-derivative of it, under Section 2) in object code or executable form
-under the terms of Sections 1 and 2 above provided that you accompany
-it with the complete corresponding machine-readable source code, which
-must be distributed under the terms of Sections 1 and 2 above on a
-medium customarily used for software interchange.
-
- If distribution of object code is made by offering access to copy
-from a designated place, then offering equivalent access to copy the
-source code from the same place satisfies the requirement to
-distribute the source code, even though third parties are not
-compelled to copy the source along with the object code.
-
- 5. A program that contains no derivative of any portion of the
-Library, but is designed to work with the Library by being compiled or
-linked with it, is called a "work that uses the Library". Such a
-work, in isolation, is not a derivative work of the Library, and
-therefore falls outside the scope of this License.
-
- However, linking a "work that uses the Library" with the Library
-creates an executable that is a derivative of the Library (because it
-contains portions of the Library), rather than a "work that uses the
-library". The executable is therefore covered by this License.
-Section 6 states terms for distribution of such executables.
-
- When a "work that uses the Library" uses material from a header file
-that is part of the Library, the object code for the work may be a
-derivative work of the Library even though the source code is not.
-Whether this is true is especially significant if the work can be
-linked without the Library, or if the work is itself a library. The
-threshold for this to be true is not precisely defined by law.
-
- If such an object file uses only numerical parameters, data
-structure layouts and accessors, and small macros and small inline
-functions (ten lines or less in length), then the use of the object
-file is unrestricted, regardless of whether it is legally a derivative
-work. (Executables containing this object code plus portions of the
-Library will still fall under Section 6.)
-
- Otherwise, if the work is a derivative of the Library, you may
-distribute the object code for the work under the terms of Section 6.
-Any executables containing that work also fall under Section 6,
-whether or not they are linked directly with the Library itself.
-
- 6. As an exception to the Sections above, you may also combine or
-link a "work that uses the Library" with the Library to produce a
-work containing portions of the Library, and distribute that work
-under terms of your choice, provided that the terms permit
-modification of the work for the customer's own use and reverse
-engineering for debugging such modifications.
-
- You must give prominent notice with each copy of the work that the
-Library is used in it and that the Library and its use are covered by
-this License. You must supply a copy of this License. If the work
-during execution displays copyright notices, you must include the
-copyright notice for the Library among them, as well as a reference
-directing the user to the copy of this License. Also, you must do one
-of these things:
-
- a) Accompany the work with the complete corresponding
- machine-readable source code for the Library including whatever
- changes were used in the work (which must be distributed under
- Sections 1 and 2 above); and, if the work is an executable linked
- with the Library, with the complete machine-readable "work that
- uses the Library", as object code and/or source code, so that the
- user can modify the Library and then relink to produce a modified
- executable containing the modified Library. (It is understood
- that the user who changes the contents of definitions files in the
- Library will not necessarily be able to recompile the application
- to use the modified definitions.)
-
- b) Use a suitable shared library mechanism for linking with the
- Library. A suitable mechanism is one that (1) uses at run time a
- copy of the library already present on the user's computer system,
- rather than copying library functions into the executable, and (2)
- will operate properly with a modified version of the library, if
- the user installs one, as long as the modified version is
- interface-compatible with the version that the work was made with.
-
- c) Accompany the work with a written offer, valid for at
- least three years, to give the same user the materials
- specified in Subsection 6a, above, for a charge no more
- than the cost of performing this distribution.
-
- d) If distribution of the work is made by offering access to copy
- from a designated place, offer equivalent access to copy the above
- specified materials from the same place.
-
- e) Verify that the user has already received a copy of these
- materials or that you have already sent this user a copy.
-
- For an executable, the required form of the "work that uses the
-Library" must include any data and utility programs needed for
-reproducing the executable from it. However, as a special exception,
-the materials to be distributed need not include anything that is
-normally distributed (in either source or binary form) with the major
-components (compiler, kernel, and so on) of the operating system on
-which the executable runs, unless that component itself accompanies
-the executable.
-
- It may happen that this requirement contradicts the license
-restrictions of other proprietary libraries that do not normally
-accompany the operating system. Such a contradiction means you cannot
-use both them and the Library together in an executable that you
-distribute.
-
- 7. You may place library facilities that are a work based on the
-Library side-by-side in a single library together with other library
-facilities not covered by this License, and distribute such a combined
-library, provided that the separate distribution of the work based on
-the Library and of the other library facilities is otherwise
-permitted, and provided that you do these two things:
-
- a) Accompany the combined library with a copy of the same work
- based on the Library, uncombined with any other library
- facilities. This must be distributed under the terms of the
- Sections above.
-
- b) Give prominent notice with the combined library of the fact
- that part of it is a work based on the Library, and explaining
- where to find the accompanying uncombined form of the same work.
-
- 8. You may not copy, modify, sublicense, link with, or distribute
-the Library except as expressly provided under this License. Any
-attempt otherwise to copy, modify, sublicense, link with, or
-distribute the Library is void, and will automatically terminate your
-rights under this License. However, parties who have received copies,
-or rights, from you under this License will not have their licenses
-terminated so long as such parties remain in full compliance.
-
- 9. You are not required to accept this License, since you have not
-signed it. However, nothing else grants you permission to modify or
-distribute the Library or its derivative works. These actions are
-prohibited by law if you do not accept this License. Therefore, by
-modifying or distributing the Library (or any work based on the
-Library), you indicate your acceptance of this License to do so, and
-all its terms and conditions for copying, distributing or modifying
-the Library or works based on it.
-
- 10. Each time you redistribute the Library (or any work based on the
-Library), the recipient automatically receives a license from the
-original licensor to copy, distribute, link with or modify the Library
-subject to these terms and conditions. You may not impose any further
-restrictions on the recipients' exercise of the rights granted herein.
-You are not responsible for enforcing compliance by third parties with
-this License.
-
- 11. If, as a consequence of a court judgment or allegation of patent
-infringement or for any other reason (not limited to patent issues),
-conditions are imposed on you (whether by court order, agreement or
-otherwise) that contradict the conditions of this License, they do not
-excuse you from the conditions of this License. If you cannot
-distribute so as to satisfy simultaneously your obligations under this
-License and any other pertinent obligations, then as a consequence you
-may not distribute the Library at all. For example, if a patent
-license would not permit royalty-free redistribution of the Library by
-all those who receive copies directly or indirectly through you, then
-the only way you could satisfy both it and this License would be to
-refrain entirely from distribution of the Library.
-
-If any portion of this section is held invalid or unenforceable under any
-particular circumstance, the balance of the section is intended to apply,
-and the section as a whole is intended to apply in other circumstances.
-
-It is not the purpose of this section to induce you to infringe any
-patents or other property right claims or to contest validity of any
-such claims; this section has the sole purpose of protecting the
-integrity of the free software distribution system which is
-implemented by public license practices. Many people have made
-generous contributions to the wide range of software distributed
-through that system in reliance on consistent application of that
-system; it is up to the author/donor to decide if he or she is willing
-to distribute software through any other system and a licensee cannot
-impose that choice.
-
-This section is intended to make thoroughly clear what is believed to
-be a consequence of the rest of this License.
-
- 12. If the distribution and/or use of the Library is restricted in
-certain countries either by patents or by copyrighted interfaces, the
-original copyright holder who places the Library under this License may add
-an explicit geographical distribution limitation excluding those countries,
-so that distribution is permitted only in or among countries not thus
-excluded. In such case, this License incorporates the limitation as if
-written in the body of this License.
-
- 13. The Free Software Foundation may publish revised and/or new
-versions of the Lesser General Public License from time to time.
-Such new versions will be similar in spirit to the present version,
-but may differ in detail to address new problems or concerns.
-
-Each version is given a distinguishing version number. If the Library
-specifies a version number of this License which applies to it and
-"any later version", you have the option of following the terms and
-conditions either of that version or of any later version published by
-the Free Software Foundation. If the Library does not specify a
-license version number, you may choose any version ever published by
-the Free Software Foundation.
-
- 14. If you wish to incorporate parts of the Library into other free
-programs whose distribution conditions are incompatible with these,
-write to the author to ask for permission. For software which is
-copyrighted by the Free Software Foundation, write to the Free
-Software Foundation; we sometimes make exceptions for this. Our
-decision will be guided by the two goals of preserving the free status
-of all derivatives of our free software and of promoting the sharing
-and reuse of software generally.
-
- NO WARRANTY
-
- 15. BECAUSE THE LIBRARY IS LICENSED FREE OF CHARGE, THERE IS NO
-WARRANTY FOR THE LIBRARY, TO THE EXTENT PERMITTED BY APPLICABLE LAW.
-EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR
-OTHER PARTIES PROVIDE THE LIBRARY "AS IS" WITHOUT WARRANTY OF ANY
-KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE
-IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE
-LIBRARY IS WITH YOU. SHOULD THE LIBRARY PROVE DEFECTIVE, YOU ASSUME
-THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
-
- 16. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN
-WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY
-AND/OR REDISTRIBUTE THE LIBRARY AS PERMITTED ABOVE, BE LIABLE TO YOU
-FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR
-CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE
-LIBRARY (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING
-RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A
-FAILURE OF THE LIBRARY TO OPERATE WITH ANY OTHER SOFTWARE), EVEN IF
-SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH
-DAMAGES.
-
- END OF TERMS AND CONDITIONS
-
- How to Apply These Terms to Your New Libraries
-
- If you develop a new library, and you want it to be of the greatest
-possible use to the public, we recommend making it free software that
-everyone can redistribute and change. You can do so by permitting
-redistribution under these terms (or, alternatively, under the terms of the
-ordinary General Public License).
-
- To apply these terms, attach the following notices to the library. It is
-safest to attach them to the start of each source file to most effectively
-convey the exclusion of warranty; and each file should have at least the
-"copyright" line and a pointer to where the full notice is found.
-
- <one line to give the library's name and a brief idea of what it does.>
- Copyright (C) <year> <name of author>
-
- This library is free software; you can redistribute it and/or
- modify it under the terms of the GNU Lesser General Public
- License as published by the Free Software Foundation; either
- version 2.1 of the License, or (at your option) any later version.
-
- This library is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- Lesser General Public License for more details.
-
- You should have received a copy of the GNU Lesser General Public
- License along with this library; if not, write to the Free Software
- Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
-
-Also add information on how to contact you by electronic and paper mail.
-
-You should also get your employer (if you work as a programmer) or your
-school, if any, to sign a "copyright disclaimer" for the library, if
-necessary. Here is a sample; alter the names:
-
- Yoyodyne, Inc., hereby disclaims all copyright interest in the
- library `Frob' (a library for tweaking knobs) written by James Random Hacker.
-
- <signature of Ty Coon>, 1 April 1990
- Ty Coon, President of Vice
-
-That's all there is to it!
-
-
diff --git a/scripts/external_libs/nose-1.3.4/nose/__init__.py b/scripts/external_libs/nose-1.3.4/python2/nose/__init__.py
index 8ab010bf..8ab010bf 100755
--- a/scripts/external_libs/nose-1.3.4/nose/__init__.py
+++ b/scripts/external_libs/nose-1.3.4/python2/nose/__init__.py
diff --git a/scripts/external_libs/nose-1.3.4/nose/__main__.py b/scripts/external_libs/nose-1.3.4/python2/nose/__main__.py
index b402d9df..b402d9df 100755
--- a/scripts/external_libs/nose-1.3.4/nose/__main__.py
+++ b/scripts/external_libs/nose-1.3.4/python2/nose/__main__.py
diff --git a/scripts/external_libs/nose-1.3.4/nose/case.py b/scripts/external_libs/nose-1.3.4/python2/nose/case.py
index cffa4ab4..cffa4ab4 100755
--- a/scripts/external_libs/nose-1.3.4/nose/case.py
+++ b/scripts/external_libs/nose-1.3.4/python2/nose/case.py
diff --git a/scripts/external_libs/nose-1.3.4/nose/commands.py b/scripts/external_libs/nose-1.3.4/python2/nose/commands.py
index ef0e9cae..ef0e9cae 100755
--- a/scripts/external_libs/nose-1.3.4/nose/commands.py
+++ b/scripts/external_libs/nose-1.3.4/python2/nose/commands.py
diff --git a/scripts/external_libs/nose-1.3.4/nose/config.py b/scripts/external_libs/nose-1.3.4/python2/nose/config.py
index 4214c2d6..4214c2d6 100755
--- a/scripts/external_libs/nose-1.3.4/nose/config.py
+++ b/scripts/external_libs/nose-1.3.4/python2/nose/config.py
diff --git a/scripts/external_libs/nose-1.3.4/nose/core.py b/scripts/external_libs/nose-1.3.4/python2/nose/core.py
index 49e7939b..49e7939b 100755
--- a/scripts/external_libs/nose-1.3.4/nose/core.py
+++ b/scripts/external_libs/nose-1.3.4/python2/nose/core.py
diff --git a/scripts/external_libs/nose-1.3.4/nose/exc.py b/scripts/external_libs/nose-1.3.4/python2/nose/exc.py
index 8b780db0..8b780db0 100755
--- a/scripts/external_libs/nose-1.3.4/nose/exc.py
+++ b/scripts/external_libs/nose-1.3.4/python2/nose/exc.py
diff --git a/scripts/external_libs/nose-1.3.4/nose/ext/__init__.py b/scripts/external_libs/nose-1.3.4/python2/nose/ext/__init__.py
index 5fd1516a..5fd1516a 100755
--- a/scripts/external_libs/nose-1.3.4/nose/ext/__init__.py
+++ b/scripts/external_libs/nose-1.3.4/python2/nose/ext/__init__.py
diff --git a/scripts/external_libs/nose-1.3.4/nose/ext/dtcompat.py b/scripts/external_libs/nose-1.3.4/python2/nose/ext/dtcompat.py
index 332cf08c..332cf08c 100755
--- a/scripts/external_libs/nose-1.3.4/nose/ext/dtcompat.py
+++ b/scripts/external_libs/nose-1.3.4/python2/nose/ext/dtcompat.py
diff --git a/scripts/external_libs/nose-1.3.4/nose/failure.py b/scripts/external_libs/nose-1.3.4/python2/nose/failure.py
index c5fabfda..c5fabfda 100755
--- a/scripts/external_libs/nose-1.3.4/nose/failure.py
+++ b/scripts/external_libs/nose-1.3.4/python2/nose/failure.py
diff --git a/scripts/external_libs/nose-1.3.4/nose/importer.py b/scripts/external_libs/nose-1.3.4/python2/nose/importer.py
index e677658c..e677658c 100755
--- a/scripts/external_libs/nose-1.3.4/nose/importer.py
+++ b/scripts/external_libs/nose-1.3.4/python2/nose/importer.py
diff --git a/scripts/external_libs/nose-1.3.4/nose/inspector.py b/scripts/external_libs/nose-1.3.4/python2/nose/inspector.py
index a6c4a3e3..a6c4a3e3 100755
--- a/scripts/external_libs/nose-1.3.4/nose/inspector.py
+++ b/scripts/external_libs/nose-1.3.4/python2/nose/inspector.py
diff --git a/scripts/external_libs/nose-1.3.4/nose/loader.py b/scripts/external_libs/nose-1.3.4/python2/nose/loader.py
index 966b6dc7..966b6dc7 100755
--- a/scripts/external_libs/nose-1.3.4/nose/loader.py
+++ b/scripts/external_libs/nose-1.3.4/python2/nose/loader.py
diff --git a/scripts/external_libs/nose-1.3.4/nose/plugins/__init__.py b/scripts/external_libs/nose-1.3.4/python2/nose/plugins/__init__.py
index 08ee8f32..08ee8f32 100755
--- a/scripts/external_libs/nose-1.3.4/nose/plugins/__init__.py
+++ b/scripts/external_libs/nose-1.3.4/python2/nose/plugins/__init__.py
diff --git a/scripts/external_libs/nose-1.3.4/nose/plugins/allmodules.py b/scripts/external_libs/nose-1.3.4/python2/nose/plugins/allmodules.py
index 1ccd7773..1ccd7773 100755
--- a/scripts/external_libs/nose-1.3.4/nose/plugins/allmodules.py
+++ b/scripts/external_libs/nose-1.3.4/python2/nose/plugins/allmodules.py
diff --git a/scripts/external_libs/nose-1.3.4/nose/plugins/attrib.py b/scripts/external_libs/nose-1.3.4/python2/nose/plugins/attrib.py
index 3d4422a2..3d4422a2 100755
--- a/scripts/external_libs/nose-1.3.4/nose/plugins/attrib.py
+++ b/scripts/external_libs/nose-1.3.4/python2/nose/plugins/attrib.py
diff --git a/scripts/external_libs/nose-1.3.4/nose/plugins/base.py b/scripts/external_libs/nose-1.3.4/python2/nose/plugins/base.py
index f09beb69..f09beb69 100755
--- a/scripts/external_libs/nose-1.3.4/nose/plugins/base.py
+++ b/scripts/external_libs/nose-1.3.4/python2/nose/plugins/base.py
diff --git a/scripts/external_libs/nose-1.3.4/nose/plugins/builtin.py b/scripts/external_libs/nose-1.3.4/python2/nose/plugins/builtin.py
index 4fcc0018..4fcc0018 100755
--- a/scripts/external_libs/nose-1.3.4/nose/plugins/builtin.py
+++ b/scripts/external_libs/nose-1.3.4/python2/nose/plugins/builtin.py
diff --git a/scripts/external_libs/nose-1.3.4/nose/plugins/capture.py b/scripts/external_libs/nose-1.3.4/python2/nose/plugins/capture.py
index fa4e5dca..fa4e5dca 100755
--- a/scripts/external_libs/nose-1.3.4/nose/plugins/capture.py
+++ b/scripts/external_libs/nose-1.3.4/python2/nose/plugins/capture.py
diff --git a/scripts/external_libs/nose-1.3.4/nose/plugins/collect.py b/scripts/external_libs/nose-1.3.4/python2/nose/plugins/collect.py
index 6f9f0faa..6f9f0faa 100755
--- a/scripts/external_libs/nose-1.3.4/nose/plugins/collect.py
+++ b/scripts/external_libs/nose-1.3.4/python2/nose/plugins/collect.py
diff --git a/scripts/external_libs/nose-1.3.4/nose/plugins/cover.py b/scripts/external_libs/nose-1.3.4/python2/nose/plugins/cover.py
index 551f3320..551f3320 100755
--- a/scripts/external_libs/nose-1.3.4/nose/plugins/cover.py
+++ b/scripts/external_libs/nose-1.3.4/python2/nose/plugins/cover.py
diff --git a/scripts/external_libs/nose-1.3.4/nose/plugins/debug.py b/scripts/external_libs/nose-1.3.4/python2/nose/plugins/debug.py
index 78243e60..78243e60 100755
--- a/scripts/external_libs/nose-1.3.4/nose/plugins/debug.py
+++ b/scripts/external_libs/nose-1.3.4/python2/nose/plugins/debug.py
diff --git a/scripts/external_libs/nose-1.3.4/nose/plugins/deprecated.py b/scripts/external_libs/nose-1.3.4/python2/nose/plugins/deprecated.py
index 461a26be..461a26be 100755
--- a/scripts/external_libs/nose-1.3.4/nose/plugins/deprecated.py
+++ b/scripts/external_libs/nose-1.3.4/python2/nose/plugins/deprecated.py
diff --git a/scripts/external_libs/nose-1.3.4/nose/plugins/doctests.py b/scripts/external_libs/nose-1.3.4/python2/nose/plugins/doctests.py
index 5ef65799..5ef65799 100755
--- a/scripts/external_libs/nose-1.3.4/nose/plugins/doctests.py
+++ b/scripts/external_libs/nose-1.3.4/python2/nose/plugins/doctests.py
diff --git a/scripts/external_libs/nose-1.3.4/nose/plugins/errorclass.py b/scripts/external_libs/nose-1.3.4/python2/nose/plugins/errorclass.py
index d1540e00..d1540e00 100755
--- a/scripts/external_libs/nose-1.3.4/nose/plugins/errorclass.py
+++ b/scripts/external_libs/nose-1.3.4/python2/nose/plugins/errorclass.py
diff --git a/scripts/external_libs/nose-1.3.4/nose/plugins/failuredetail.py b/scripts/external_libs/nose-1.3.4/python2/nose/plugins/failuredetail.py
index 6462865d..6462865d 100755
--- a/scripts/external_libs/nose-1.3.4/nose/plugins/failuredetail.py
+++ b/scripts/external_libs/nose-1.3.4/python2/nose/plugins/failuredetail.py
diff --git a/scripts/external_libs/nose-1.3.4/nose/plugins/isolate.py b/scripts/external_libs/nose-1.3.4/python2/nose/plugins/isolate.py
index 13235dfb..13235dfb 100755
--- a/scripts/external_libs/nose-1.3.4/nose/plugins/isolate.py
+++ b/scripts/external_libs/nose-1.3.4/python2/nose/plugins/isolate.py
diff --git a/scripts/external_libs/nose-1.3.4/nose/plugins/logcapture.py b/scripts/external_libs/nose-1.3.4/python2/nose/plugins/logcapture.py
index 4c9a79f6..4c9a79f6 100755
--- a/scripts/external_libs/nose-1.3.4/nose/plugins/logcapture.py
+++ b/scripts/external_libs/nose-1.3.4/python2/nose/plugins/logcapture.py
diff --git a/scripts/external_libs/nose-1.3.4/nose/plugins/manager.py b/scripts/external_libs/nose-1.3.4/python2/nose/plugins/manager.py
index 4d2ed22b..4d2ed22b 100755
--- a/scripts/external_libs/nose-1.3.4/nose/plugins/manager.py
+++ b/scripts/external_libs/nose-1.3.4/python2/nose/plugins/manager.py
diff --git a/scripts/external_libs/nose-1.3.4/nose/plugins/multiprocess.py b/scripts/external_libs/nose-1.3.4/python2/nose/plugins/multiprocess.py
index 2cae744a..2cae744a 100755
--- a/scripts/external_libs/nose-1.3.4/nose/plugins/multiprocess.py
+++ b/scripts/external_libs/nose-1.3.4/python2/nose/plugins/multiprocess.py
diff --git a/scripts/external_libs/nose-1.3.4/nose/plugins/plugintest.py b/scripts/external_libs/nose-1.3.4/python2/nose/plugins/plugintest.py
index 76d0d2c4..76d0d2c4 100755
--- a/scripts/external_libs/nose-1.3.4/nose/plugins/plugintest.py
+++ b/scripts/external_libs/nose-1.3.4/python2/nose/plugins/plugintest.py
diff --git a/scripts/external_libs/nose-1.3.4/nose/plugins/prof.py b/scripts/external_libs/nose-1.3.4/python2/nose/plugins/prof.py
index 4d304a93..4d304a93 100755
--- a/scripts/external_libs/nose-1.3.4/nose/plugins/prof.py
+++ b/scripts/external_libs/nose-1.3.4/python2/nose/plugins/prof.py
diff --git a/scripts/external_libs/nose-1.3.4/nose/plugins/skip.py b/scripts/external_libs/nose-1.3.4/python2/nose/plugins/skip.py
index 9d1ac8f6..9d1ac8f6 100755
--- a/scripts/external_libs/nose-1.3.4/nose/plugins/skip.py
+++ b/scripts/external_libs/nose-1.3.4/python2/nose/plugins/skip.py
diff --git a/scripts/external_libs/nose-1.3.4/nose/plugins/testid.py b/scripts/external_libs/nose-1.3.4/python2/nose/plugins/testid.py
index 49fff9b1..49fff9b1 100755
--- a/scripts/external_libs/nose-1.3.4/nose/plugins/testid.py
+++ b/scripts/external_libs/nose-1.3.4/python2/nose/plugins/testid.py
diff --git a/scripts/external_libs/nose-1.3.4/nose/plugins/xunit.py b/scripts/external_libs/nose-1.3.4/python2/nose/plugins/xunit.py
index e1ec0e1d..e1ec0e1d 100755
--- a/scripts/external_libs/nose-1.3.4/nose/plugins/xunit.py
+++ b/scripts/external_libs/nose-1.3.4/python2/nose/plugins/xunit.py
diff --git a/scripts/external_libs/nose-1.3.4/nose/proxy.py b/scripts/external_libs/nose-1.3.4/python2/nose/proxy.py
index c2676cb1..c2676cb1 100755
--- a/scripts/external_libs/nose-1.3.4/nose/proxy.py
+++ b/scripts/external_libs/nose-1.3.4/python2/nose/proxy.py
diff --git a/scripts/external_libs/nose-1.3.4/nose/pyversion.py b/scripts/external_libs/nose-1.3.4/python2/nose/pyversion.py
index 8b566141..8b566141 100755
--- a/scripts/external_libs/nose-1.3.4/nose/pyversion.py
+++ b/scripts/external_libs/nose-1.3.4/python2/nose/pyversion.py
diff --git a/scripts/external_libs/nose-1.3.4/nose/result.py b/scripts/external_libs/nose-1.3.4/python2/nose/result.py
index f974a14a..f974a14a 100755
--- a/scripts/external_libs/nose-1.3.4/nose/result.py
+++ b/scripts/external_libs/nose-1.3.4/python2/nose/result.py
diff --git a/scripts/external_libs/nose-1.3.4/nose/selector.py b/scripts/external_libs/nose-1.3.4/python2/nose/selector.py
index c4a006a8..c4a006a8 100755
--- a/scripts/external_libs/nose-1.3.4/nose/selector.py
+++ b/scripts/external_libs/nose-1.3.4/python2/nose/selector.py
diff --git a/scripts/external_libs/nose-1.3.4/nose/sphinx/__init__.py b/scripts/external_libs/nose-1.3.4/python2/nose/sphinx/__init__.py
index 2ae28399..2ae28399 100755
--- a/scripts/external_libs/nose-1.3.4/nose/sphinx/__init__.py
+++ b/scripts/external_libs/nose-1.3.4/python2/nose/sphinx/__init__.py
diff --git a/scripts/external_libs/nose-1.3.4/nose/sphinx/pluginopts.py b/scripts/external_libs/nose-1.3.4/python2/nose/sphinx/pluginopts.py
index d2b284ab..d2b284ab 100755
--- a/scripts/external_libs/nose-1.3.4/nose/sphinx/pluginopts.py
+++ b/scripts/external_libs/nose-1.3.4/python2/nose/sphinx/pluginopts.py
diff --git a/scripts/external_libs/nose-1.3.4/nose/suite.py b/scripts/external_libs/nose-1.3.4/python2/nose/suite.py
index a831105e..a831105e 100755
--- a/scripts/external_libs/nose-1.3.4/nose/suite.py
+++ b/scripts/external_libs/nose-1.3.4/python2/nose/suite.py
diff --git a/scripts/external_libs/nose-1.3.4/nose/tools/__init__.py b/scripts/external_libs/nose-1.3.4/python2/nose/tools/__init__.py
index 74dab16a..74dab16a 100755
--- a/scripts/external_libs/nose-1.3.4/nose/tools/__init__.py
+++ b/scripts/external_libs/nose-1.3.4/python2/nose/tools/__init__.py
diff --git a/scripts/external_libs/nose-1.3.4/nose/tools/nontrivial.py b/scripts/external_libs/nose-1.3.4/python2/nose/tools/nontrivial.py
index 28397324..28397324 100755
--- a/scripts/external_libs/nose-1.3.4/nose/tools/nontrivial.py
+++ b/scripts/external_libs/nose-1.3.4/python2/nose/tools/nontrivial.py
diff --git a/scripts/external_libs/nose-1.3.4/nose/tools/trivial.py b/scripts/external_libs/nose-1.3.4/python2/nose/tools/trivial.py
index cf83efed..cf83efed 100755
--- a/scripts/external_libs/nose-1.3.4/nose/tools/trivial.py
+++ b/scripts/external_libs/nose-1.3.4/python2/nose/tools/trivial.py
diff --git a/scripts/external_libs/nose-1.3.4/nose/twistedtools.py b/scripts/external_libs/nose-1.3.4/python2/nose/twistedtools.py
index 8d9c6ffe..8d9c6ffe 100755
--- a/scripts/external_libs/nose-1.3.4/nose/twistedtools.py
+++ b/scripts/external_libs/nose-1.3.4/python2/nose/twistedtools.py
diff --git a/scripts/external_libs/nose-1.3.4/nose/usage.txt b/scripts/external_libs/nose-1.3.4/python2/nose/usage.txt
index bc96894a..bc96894a 100755
--- a/scripts/external_libs/nose-1.3.4/nose/usage.txt
+++ b/scripts/external_libs/nose-1.3.4/python2/nose/usage.txt
diff --git a/scripts/external_libs/nose-1.3.4/nose/util.py b/scripts/external_libs/nose-1.3.4/python2/nose/util.py
index e6f735e0..e6f735e0 100755
--- a/scripts/external_libs/nose-1.3.4/nose/util.py
+++ b/scripts/external_libs/nose-1.3.4/python2/nose/util.py
diff --git a/scripts/external_libs/nose-1.3.4/python3/nose/__init__.py b/scripts/external_libs/nose-1.3.4/python3/nose/__init__.py
new file mode 100644
index 00000000..8ab010bf
--- /dev/null
+++ b/scripts/external_libs/nose-1.3.4/python3/nose/__init__.py
@@ -0,0 +1,15 @@
+from nose.core import collector, main, run, run_exit, runmodule
+# backwards compatibility
+from nose.exc import SkipTest, DeprecatedTest
+from nose.tools import with_setup
+
+__author__ = 'Jason Pellerin'
+__versioninfo__ = (1, 3, 4)
+__version__ = '.'.join(map(str, __versioninfo__))
+
+__all__ = [
+ 'main', 'run', 'run_exit', 'runmodule', 'with_setup',
+ 'SkipTest', 'DeprecatedTest', 'collector'
+ ]
+
+
diff --git a/scripts/external_libs/nose-1.3.4/python3/nose/__main__.py b/scripts/external_libs/nose-1.3.4/python3/nose/__main__.py
new file mode 100644
index 00000000..b402d9df
--- /dev/null
+++ b/scripts/external_libs/nose-1.3.4/python3/nose/__main__.py
@@ -0,0 +1,8 @@
+import sys
+
+from nose.core import run_exit
+
+if sys.argv[0].endswith('__main__.py'):
+ sys.argv[0] = '%s -m nose' % sys.executable
+
+run_exit()
diff --git a/scripts/external_libs/nose-1.3.4/python3/nose/case.py b/scripts/external_libs/nose-1.3.4/python3/nose/case.py
new file mode 100644
index 00000000..a60a16c4
--- /dev/null
+++ b/scripts/external_libs/nose-1.3.4/python3/nose/case.py
@@ -0,0 +1,398 @@
+"""nose unittest.TestCase subclasses. It is not necessary to subclass these
+classes when writing tests; they are used internally by nose.loader.TestLoader
+to create test cases from test functions and methods in test classes.
+"""
+import logging
+import sys
+import unittest
+from inspect import isfunction
+from nose.config import Config
+from nose.failure import Failure # for backwards compatibility
+from nose.util import resolve_name, test_address, try_run
+import collections
+
+log = logging.getLogger(__name__)
+
+
+__all__ = ['Test']
+
+
+class Test(unittest.TestCase):
+ """The universal test case wrapper.
+
+ When a plugin sees a test, it will always see an instance of this
+ class. To access the actual test case that will be run, access the
+ test property of the nose.case.Test instance.
+ """
+ __test__ = False # do not collect
+ def __init__(self, test, config=None, resultProxy=None):
+ # sanity check
+ if not isinstance(test, collections.Callable):
+ raise TypeError("nose.case.Test called with argument %r that "
+ "is not callable. A callable is required."
+ % test)
+ self.test = test
+ if config is None:
+ config = Config()
+ self.config = config
+ self.tbinfo = None
+ self.capturedOutput = None
+ self.resultProxy = resultProxy
+ self.plugins = config.plugins
+ self.passed = None
+ unittest.TestCase.__init__(self)
+
+ def __call__(self, *arg, **kwarg):
+ return self.run(*arg, **kwarg)
+
+ def __str__(self):
+ name = self.plugins.testName(self)
+ if name is not None:
+ return name
+ return str(self.test)
+
+ def __repr__(self):
+ return "Test(%r)" % self.test
+
+ def afterTest(self, result):
+ """Called after test is complete (after result.stopTest)
+ """
+ try:
+ afterTest = result.afterTest
+ except AttributeError:
+ pass
+ else:
+ afterTest(self.test)
+
+ def beforeTest(self, result):
+ """Called before test is run (before result.startTest)
+ """
+ try:
+ beforeTest = result.beforeTest
+ except AttributeError:
+ pass
+ else:
+ beforeTest(self.test)
+
+ def exc_info(self):
+ """Extract exception info.
+ """
+ exc, exv, tb = sys.exc_info()
+ return (exc, exv, tb)
+
+ def id(self):
+ """Get a short(er) description of the test
+ """
+ return self.test.id()
+
+ def address(self):
+ """Return a round-trip name for this test, a name that can be
+ fed back as input to loadTestByName and (assuming the same
+ plugin configuration) result in the loading of this test.
+ """
+ if hasattr(self.test, 'address'):
+ return self.test.address()
+ else:
+ # not a nose case
+ return test_address(self.test)
+
+ def _context(self):
+ try:
+ return self.test.context
+ except AttributeError:
+ pass
+ try:
+ return self.test.__class__
+ except AttributeError:
+ pass
+ try:
+ return resolve_name(self.test.__module__)
+ except AttributeError:
+ pass
+ return None
+ context = property(_context, None, None,
+ """Get the context object of this test (if any).""")
+
+ def run(self, result):
+ """Modified run for the test wrapper.
+
+ From here we don't call result.startTest or stopTest or
+ addSuccess. The wrapper calls addError/addFailure only if its
+ own setup or teardown fails, or running the wrapped test fails
+ (eg, if the wrapped "test" is not callable).
+
+ Two additional methods are called, beforeTest and
+ afterTest. These give plugins a chance to modify the wrapped
+ test before it is called and do cleanup after it is
+ called. They are called unconditionally.
+ """
+ if self.resultProxy:
+ result = self.resultProxy(result, self)
+ try:
+ try:
+ self.beforeTest(result)
+ self.runTest(result)
+ except KeyboardInterrupt:
+ raise
+ except:
+ err = sys.exc_info()
+ result.addError(self, err)
+ finally:
+ self.afterTest(result)
+
+ def runTest(self, result):
+ """Run the test. Plugins may alter the test by returning a
+ value from prepareTestCase. The value must be callable and
+ must accept one argument, the result instance.
+ """
+ test = self.test
+ plug_test = self.config.plugins.prepareTestCase(self)
+ if plug_test is not None:
+ test = plug_test
+ test(result)
+
+ def shortDescription(self):
+ desc = self.plugins.describeTest(self)
+ if desc is not None:
+ return desc
+ # work around bug in unittest.TestCase.shortDescription
+ # with multiline docstrings.
+ test = self.test
+ try:
+ test._testMethodDoc = test._testMethodDoc.strip()# 2.5
+ except AttributeError:
+ try:
+ # 2.4 and earlier
+ test._TestCase__testMethodDoc = \
+ test._TestCase__testMethodDoc.strip()
+ except AttributeError:
+ pass
+ # 2.7 compat: shortDescription() always returns something
+ # which is a change from 2.6 and below, and breaks the
+ # testName plugin call.
+ try:
+ desc = self.test.shortDescription()
+ except Exception:
+ # this is probably caused by a problem in test.__str__() and is
+ # only triggered by python 3.1's unittest!
+ pass
+ try:
+ if desc == str(self.test):
+ return
+ except Exception:
+ # If str() triggers an exception then ignore it.
+ # see issue 422
+ pass
+ return desc
+
+
+class TestBase(unittest.TestCase):
+ """Common functionality for FunctionTestCase and MethodTestCase.
+ """
+ __test__ = False # do not collect
+
+ def id(self):
+ return str(self)
+
+ def runTest(self):
+ self.test(*self.arg)
+
+ def shortDescription(self):
+ if hasattr(self.test, 'description'):
+ return self.test.description
+ func, arg = self._descriptors()
+ doc = getattr(func, '__doc__', None)
+ if not doc:
+ doc = str(self)
+ return doc.strip().split("\n")[0].strip()
+
+
+class FunctionTestCase(TestBase):
+ """TestCase wrapper for test functions.
+
+ Don't use this class directly; it is used internally in nose to
+ create test cases for test functions.
+ """
+ __test__ = False # do not collect
+
+ def __init__(self, test, setUp=None, tearDown=None, arg=tuple(),
+ descriptor=None):
+ """Initialize the MethodTestCase.
+
+ Required argument:
+
+ * test -- the test function to call.
+
+ Optional arguments:
+
+ * setUp -- function to run at setup.
+
+ * tearDown -- function to run at teardown.
+
+ * arg -- arguments to pass to the test function. This is to support
+ generator functions that yield arguments.
+
+ * descriptor -- the function, other than the test, that should be used
+ to construct the test name. This is to support generator functions.
+ """
+
+ self.test = test
+ self.setUpFunc = setUp
+ self.tearDownFunc = tearDown
+ self.arg = arg
+ self.descriptor = descriptor
+ TestBase.__init__(self)
+
+ def address(self):
+ """Return a round-trip name for this test, a name that can be
+ fed back as input to loadTestByName and (assuming the same
+ plugin configuration) result in the loading of this test.
+ """
+ if self.descriptor is not None:
+ return test_address(self.descriptor)
+ else:
+ return test_address(self.test)
+
+ def _context(self):
+ return resolve_name(self.test.__module__)
+ context = property(_context, None, None,
+ """Get context (module) of this test""")
+
+ def setUp(self):
+ """Run any setup function attached to the test function
+ """
+ if self.setUpFunc:
+ self.setUpFunc()
+ else:
+ names = ('setup', 'setUp', 'setUpFunc')
+ try_run(self.test, names)
+
+ def tearDown(self):
+ """Run any teardown function attached to the test function
+ """
+ if self.tearDownFunc:
+ self.tearDownFunc()
+ else:
+ names = ('teardown', 'tearDown', 'tearDownFunc')
+ try_run(self.test, names)
+
+ def __str__(self):
+ func, arg = self._descriptors()
+ if hasattr(func, 'compat_func_name'):
+ name = func.compat_func_name
+ else:
+ name = func.__name__
+ name = "%s.%s" % (func.__module__, name)
+ if arg:
+ name = "%s%s" % (name, arg)
+ # FIXME need to include the full dir path to disambiguate
+ # in cases where test module of the same name was seen in
+ # another directory (old fromDirectory)
+ return name
+ __repr__ = __str__
+
+ def _descriptors(self):
+ """Get the descriptors of the test function: the function and
+ arguments that will be used to construct the test name. In
+ most cases, this is the function itself and no arguments. For
+ tests generated by generator functions, the original
+ (generator) function and args passed to the generated function
+ are returned.
+ """
+ if self.descriptor:
+ return self.descriptor, self.arg
+ else:
+ return self.test, self.arg
+
+
+class MethodTestCase(TestBase):
+ """Test case wrapper for test methods.
+
+ Don't use this class directly; it is used internally in nose to
+ create test cases for test methods.
+ """
+ __test__ = False # do not collect
+
+ def __init__(self, method, test=None, arg=tuple(), descriptor=None):
+ """Initialize the MethodTestCase.
+
+ Required argument:
+
+ * method -- the method to call, may be bound or unbound. In either
+ case, a new instance of the method's class will be instantiated to
+ make the call. Note: In Python 3.x, if using an unbound method, you
+ must wrap it using pyversion.unbound_method.
+
+ Optional arguments:
+
+ * test -- the test function to call. If this is passed, it will be
+ called instead of getting a new bound method of the same name as the
+ desired method from the test instance. This is to support generator
+ methods that yield inline functions.
+
+ * arg -- arguments to pass to the test function. This is to support
+ generator methods that yield arguments.
+
+ * descriptor -- the function, other than the test, that should be used
+ to construct the test name. This is to support generator methods.
+ """
+ self.method = method
+ self.test = test
+ self.arg = arg
+ self.descriptor = descriptor
+ if isfunction(method):
+ raise ValueError("Unbound methods must be wrapped using pyversion.unbound_method before passing to MethodTestCase")
+ self.cls = method.__self__.__class__
+ self.inst = self.cls()
+ if self.test is None:
+ method_name = self.method.__name__
+ self.test = getattr(self.inst, method_name)
+ TestBase.__init__(self)
+
+ def __str__(self):
+ func, arg = self._descriptors()
+ if hasattr(func, 'compat_func_name'):
+ name = func.compat_func_name
+ else:
+ name = func.__name__
+ name = "%s.%s.%s" % (self.cls.__module__,
+ self.cls.__name__,
+ name)
+ if arg:
+ name = "%s%s" % (name, arg)
+ return name
+ __repr__ = __str__
+
+ def address(self):
+ """Return a round-trip name for this test, a name that can be
+ fed back as input to loadTestByName and (assuming the same
+ plugin configuration) result in the loading of this test.
+ """
+ if self.descriptor is not None:
+ return test_address(self.descriptor)
+ else:
+ return test_address(self.method)
+
+ def _context(self):
+ return self.cls
+ context = property(_context, None, None,
+ """Get context (class) of this test""")
+
+ def setUp(self):
+ try_run(self.inst, ('setup', 'setUp'))
+
+ def tearDown(self):
+ try_run(self.inst, ('teardown', 'tearDown'))
+
+ def _descriptors(self):
+ """Get the descriptors of the test method: the method and
+ arguments that will be used to construct the test name. In
+ most cases, this is the method itself and no arguments. For
+ tests generated by generator methods, the original
+ (generator) method and args passed to the generated method
+ or function are returned.
+ """
+ if self.descriptor:
+ return self.descriptor, self.arg
+ else:
+ return self.method, self.arg
diff --git a/scripts/external_libs/nose-1.3.4/python3/nose/commands.py b/scripts/external_libs/nose-1.3.4/python3/nose/commands.py
new file mode 100644
index 00000000..db9fe318
--- /dev/null
+++ b/scripts/external_libs/nose-1.3.4/python3/nose/commands.py
@@ -0,0 +1,172 @@
+"""
+nosetests setuptools command
+----------------------------
+
+The easiest way to run tests with nose is to use the `nosetests` setuptools
+command::
+
+ python setup.py nosetests
+
+This command has one *major* benefit over the standard `test` command: *all
+nose plugins are supported*.
+
+To configure the `nosetests` command, add a [nosetests] section to your
+setup.cfg. The [nosetests] section can contain any command line arguments that
+nosetests supports. The differences between issuing an option on the command
+line and adding it to setup.cfg are:
+
+* In setup.cfg, the -- prefix must be excluded
+* In setup.cfg, command line flags that take no arguments must be given an
+ argument flag (1, T or TRUE for active, 0, F or FALSE for inactive)
+
+Here's an example [nosetests] setup.cfg section::
+
+ [nosetests]
+ verbosity=1
+ detailed-errors=1
+ with-coverage=1
+ cover-package=nose
+ debug=nose.loader
+ pdb=1
+ pdb-failures=1
+
+If you commonly run nosetests with a large number of options, using
+the nosetests setuptools command and configuring with setup.cfg can
+make running your tests much less tedious. (Note that the same options
+and format supported in setup.cfg are supported in all other config
+files, and the nosetests script will also load config files.)
+
+Another reason to run tests with the command is that the command will
+install packages listed in your `tests_require`, as well as doing a
+complete build of your package before running tests. For packages with
+dependencies or that build C extensions, using the setuptools command
+can be more convenient than building by hand and running the nosetests
+script.
+
+Bootstrapping
+-------------
+
+If you are distributing your project and want users to be able to run tests
+without having to install nose themselves, add nose to the setup_requires
+section of your setup()::
+
+ setup(
+ # ...
+ setup_requires=['nose>=1.0']
+ )
+
+This will direct setuptools to download and activate nose during the setup
+process, making the ``nosetests`` command available.
+
+"""
+try:
+ from setuptools import Command
+except ImportError:
+ Command = nosetests = None
+else:
+ from nose.config import Config, option_blacklist, user_config_files, \
+ flag, _bool
+ from nose.core import TestProgram
+ from nose.plugins import DefaultPluginManager
+
+
+ def get_user_options(parser):
+ """convert a optparse option list into a distutils option tuple list"""
+ opt_list = []
+ for opt in parser.option_list:
+ if opt._long_opts[0][2:] in option_blacklist:
+ continue
+ long_name = opt._long_opts[0][2:]
+ if opt.action not in ('store_true', 'store_false'):
+ long_name = long_name + "="
+ short_name = None
+ if opt._short_opts:
+ short_name = opt._short_opts[0][1:]
+ opt_list.append((long_name, short_name, opt.help or ""))
+ return opt_list
+
+
+ class nosetests(Command):
+ description = "Run unit tests using nosetests"
+ __config = Config(files=user_config_files(),
+ plugins=DefaultPluginManager())
+ __parser = __config.getParser()
+ user_options = get_user_options(__parser)
+
+ def initialize_options(self):
+ """create the member variables, but change hyphens to
+ underscores
+ """
+
+ self.option_to_cmds = {}
+ for opt in self.__parser.option_list:
+ cmd_name = opt._long_opts[0][2:]
+ option_name = cmd_name.replace('-', '_')
+ self.option_to_cmds[option_name] = cmd_name
+ setattr(self, option_name, None)
+ self.attr = None
+
+ def finalize_options(self):
+ """nothing to do here"""
+ pass
+
+ def run(self):
+ """ensure tests are capable of being run, then
+ run nose.main with a reconstructed argument list"""
+ if getattr(self.distribution, 'use_2to3', False):
+ # If we run 2to3 we can not do this inplace:
+
+ # Ensure metadata is up-to-date
+ build_py = self.get_finalized_command('build_py')
+ build_py.inplace = 0
+ build_py.run()
+ bpy_cmd = self.get_finalized_command("build_py")
+ build_path = bpy_cmd.build_lib
+
+ # Build extensions
+ egg_info = self.get_finalized_command('egg_info')
+ egg_info.egg_base = build_path
+ egg_info.run()
+
+ build_ext = self.get_finalized_command('build_ext')
+ build_ext.inplace = 0
+ build_ext.run()
+ else:
+ self.run_command('egg_info')
+
+ # Build extensions in-place
+ build_ext = self.get_finalized_command('build_ext')
+ build_ext.inplace = 1
+ build_ext.run()
+
+ if self.distribution.install_requires:
+ self.distribution.fetch_build_eggs(
+ self.distribution.install_requires)
+ if self.distribution.tests_require:
+ self.distribution.fetch_build_eggs(
+ self.distribution.tests_require)
+
+ ei_cmd = self.get_finalized_command("egg_info")
+ argv = ['nosetests', '--where', ei_cmd.egg_base]
+ for (option_name, cmd_name) in list(self.option_to_cmds.items()):
+ if option_name in option_blacklist:
+ continue
+ value = getattr(self, option_name)
+ if value is not None:
+ argv.extend(
+ self.cfgToArg(option_name.replace('_', '-'), value))
+ TestProgram(argv=argv, config=self.__config)
+
+ def cfgToArg(self, optname, value):
+ argv = []
+ long_optname = '--' + optname
+ opt = self.__parser.get_option(long_optname)
+ if opt.action in ('store_true', 'store_false'):
+ if not flag(value):
+ raise ValueError("Invalid value '%s' for '%s'" % (
+ value, optname))
+ if _bool(value):
+ argv.append(long_optname)
+ else:
+ argv.extend([long_optname, value])
+ return argv
diff --git a/scripts/external_libs/nose-1.3.4/python3/nose/config.py b/scripts/external_libs/nose-1.3.4/python3/nose/config.py
new file mode 100644
index 00000000..aaff17e8
--- /dev/null
+++ b/scripts/external_libs/nose-1.3.4/python3/nose/config.py
@@ -0,0 +1,661 @@
+import logging
+import optparse
+import os
+import re
+import sys
+import configparser
+from optparse import OptionParser
+from nose.util import absdir, tolist
+from nose.plugins.manager import NoPlugins
+from warnings import warn, filterwarnings
+
+log = logging.getLogger(__name__)
+
+# not allowed in config files
+option_blacklist = ['help', 'verbose']
+
+config_files = [
+ # Linux users will prefer this
+ "~/.noserc",
+ # Windows users will prefer this
+ "~/nose.cfg"
+ ]
+
+# plaforms on which the exe check defaults to off
+# Windows and IronPython
+exe_allowed_platforms = ('win32', 'cli')
+
+filterwarnings("always", category=DeprecationWarning,
+ module=r'(.*\.)?nose\.config')
+
+class NoSuchOptionError(Exception):
+ def __init__(self, name):
+ Exception.__init__(self, name)
+ self.name = name
+
+
+class ConfigError(Exception):
+ pass
+
+
+class ConfiguredDefaultsOptionParser(object):
+ """
+ Handler for options from commandline and config files.
+ """
+ def __init__(self, parser, config_section, error=None, file_error=None):
+ self._parser = parser
+ self._config_section = config_section
+ if error is None:
+ error = self._parser.error
+ self._error = error
+ if file_error is None:
+ file_error = lambda msg, **kw: error(msg)
+ self._file_error = file_error
+
+ def _configTuples(self, cfg, filename):
+ config = []
+ if self._config_section in cfg.sections():
+ for name, value in cfg.items(self._config_section):
+ config.append((name, value, filename))
+ return config
+
+ def _readFromFilenames(self, filenames):
+ config = []
+ for filename in filenames:
+ cfg = configparser.RawConfigParser()
+ try:
+ cfg.read(filename)
+ except configparser.Error as exc:
+ raise ConfigError("Error reading config file %r: %s" %
+ (filename, str(exc)))
+ config.extend(self._configTuples(cfg, filename))
+ return config
+
+ def _readFromFileObject(self, fh):
+ cfg = configparser.RawConfigParser()
+ try:
+ filename = fh.name
+ except AttributeError:
+ filename = '<???>'
+ try:
+ cfg.readfp(fh)
+ except configparser.Error as exc:
+ raise ConfigError("Error reading config file %r: %s" %
+ (filename, str(exc)))
+ return self._configTuples(cfg, filename)
+
+ def _readConfiguration(self, config_files):
+ try:
+ config_files.readline
+ except AttributeError:
+ filename_or_filenames = config_files
+ if isinstance(filename_or_filenames, str):
+ filenames = [filename_or_filenames]
+ else:
+ filenames = filename_or_filenames
+ config = self._readFromFilenames(filenames)
+ else:
+ fh = config_files
+ config = self._readFromFileObject(fh)
+ return config
+
+ def _processConfigValue(self, name, value, values, parser):
+ opt_str = '--' + name
+ option = parser.get_option(opt_str)
+ if option is None:
+ raise NoSuchOptionError(name)
+ else:
+ option.process(opt_str, value, values, parser)
+
+ def _applyConfigurationToValues(self, parser, config, values):
+ for name, value, filename in config:
+ if name in option_blacklist:
+ continue
+ try:
+ self._processConfigValue(name, value, values, parser)
+ except NoSuchOptionError as exc:
+ self._file_error(
+ "Error reading config file %r: "
+ "no such option %r" % (filename, exc.name),
+ name=name, filename=filename)
+ except optparse.OptionValueError as exc:
+ msg = str(exc).replace('--' + name, repr(name), 1)
+ self._file_error("Error reading config file %r: "
+ "%s" % (filename, msg),
+ name=name, filename=filename)
+
+ def parseArgsAndConfigFiles(self, args, config_files):
+ values = self._parser.get_default_values()
+ try:
+ config = self._readConfiguration(config_files)
+ except ConfigError as exc:
+ self._error(str(exc))
+ else:
+ try:
+ self._applyConfigurationToValues(self._parser, config, values)
+ except ConfigError as exc:
+ self._error(str(exc))
+ return self._parser.parse_args(args, values)
+
+
+class Config(object):
+ """nose configuration.
+
+ Instances of Config are used throughout nose to configure
+ behavior, including plugin lists. Here are the default values for
+ all config keys::
+
+ self.env = env = kw.pop('env', {})
+ self.args = ()
+ self.testMatch = re.compile(r'(?:^|[\\b_\\.%s-])[Tt]est' % os.sep)
+ self.addPaths = not env.get('NOSE_NOPATH', False)
+ self.configSection = 'nosetests'
+ self.debug = env.get('NOSE_DEBUG')
+ self.debugLog = env.get('NOSE_DEBUG_LOG')
+ self.exclude = None
+ self.getTestCaseNamesCompat = False
+ self.includeExe = env.get('NOSE_INCLUDE_EXE',
+ sys.platform in exe_allowed_platforms)
+ self.ignoreFiles = (re.compile(r'^\.'),
+ re.compile(r'^_'),
+ re.compile(r'^setup\.py$')
+ )
+ self.include = None
+ self.loggingConfig = None
+ self.logStream = sys.stderr
+ self.options = NoOptions()
+ self.parser = None
+ self.plugins = NoPlugins()
+ self.srcDirs = ('lib', 'src')
+ self.runOnInit = True
+ self.stopOnError = env.get('NOSE_STOP', False)
+ self.stream = sys.stderr
+ self.testNames = ()
+ self.verbosity = int(env.get('NOSE_VERBOSE', 1))
+ self.where = ()
+ self.py3where = ()
+ self.workingDir = None
+ """
+
+ def __init__(self, **kw):
+ self.env = env = kw.pop('env', {})
+ self.args = ()
+ self.testMatchPat = env.get('NOSE_TESTMATCH',
+ r'(?:^|[\b_\.%s-])[Tt]est' % os.sep)
+ self.testMatch = re.compile(self.testMatchPat)
+ self.addPaths = not env.get('NOSE_NOPATH', False)
+ self.configSection = 'nosetests'
+ self.debug = env.get('NOSE_DEBUG')
+ self.debugLog = env.get('NOSE_DEBUG_LOG')
+ self.exclude = None
+ self.getTestCaseNamesCompat = False
+ self.includeExe = env.get('NOSE_INCLUDE_EXE',
+ sys.platform in exe_allowed_platforms)
+ self.ignoreFilesDefaultStrings = [r'^\.',
+ r'^_',
+ r'^setup\.py$',
+ ]
+ self.ignoreFiles = list(map(re.compile, self.ignoreFilesDefaultStrings))
+ self.include = None
+ self.loggingConfig = None
+ self.logStream = sys.stderr
+ self.options = NoOptions()
+ self.parser = None
+ self.plugins = NoPlugins()
+ self.srcDirs = ('lib', 'src')
+ self.runOnInit = True
+ self.stopOnError = env.get('NOSE_STOP', False)
+ self.stream = sys.stderr
+ self.testNames = []
+ self.verbosity = int(env.get('NOSE_VERBOSE', 1))
+ self.where = ()
+ self.py3where = ()
+ self.workingDir = os.getcwd()
+ self.traverseNamespace = False
+ self.firstPackageWins = False
+ self.parserClass = OptionParser
+ self.worker = False
+
+ self._default = self.__dict__.copy()
+ self.update(kw)
+ self._orig = self.__dict__.copy()
+
+ def __getstate__(self):
+ state = self.__dict__.copy()
+ del state['stream']
+ del state['_orig']
+ del state['_default']
+ del state['env']
+ del state['logStream']
+ # FIXME remove plugins, have only plugin manager class
+ state['plugins'] = self.plugins.__class__
+ return state
+
+ def __setstate__(self, state):
+ plugincls = state.pop('plugins')
+ self.update(state)
+ self.worker = True
+ # FIXME won't work for static plugin lists
+ self.plugins = plugincls()
+ self.plugins.loadPlugins()
+ # needed so .can_configure gets set appropriately
+ dummy_parser = self.parserClass()
+ self.plugins.addOptions(dummy_parser, {})
+ self.plugins.configure(self.options, self)
+
+ def __repr__(self):
+ d = self.__dict__.copy()
+ # don't expose env, could include sensitive info
+ d['env'] = {}
+ keys = [ k for k in list(d.keys())
+ if not k.startswith('_') ]
+ keys.sort()
+ return "Config(%s)" % ', '.join([ '%s=%r' % (k, d[k])
+ for k in keys ])
+ __str__ = __repr__
+
+ def _parseArgs(self, argv, cfg_files):
+ def warn_sometimes(msg, name=None, filename=None):
+ if (hasattr(self.plugins, 'excludedOption') and
+ self.plugins.excludedOption(name)):
+ msg = ("Option %r in config file %r ignored: "
+ "excluded by runtime environment" %
+ (name, filename))
+ warn(msg, RuntimeWarning)
+ else:
+ raise ConfigError(msg)
+ parser = ConfiguredDefaultsOptionParser(
+ self.getParser(), self.configSection, file_error=warn_sometimes)
+ return parser.parseArgsAndConfigFiles(argv[1:], cfg_files)
+
+ def configure(self, argv=None, doc=None):
+ """Configure the nose running environment. Execute configure before
+ collecting tests with nose.TestCollector to enable output capture and
+ other features.
+ """
+ env = self.env
+ if argv is None:
+ argv = sys.argv
+
+ cfg_files = getattr(self, 'files', [])
+ options, args = self._parseArgs(argv, cfg_files)
+ # If -c --config has been specified on command line,
+ # load those config files and reparse
+ if getattr(options, 'files', []):
+ options, args = self._parseArgs(argv, options.files)
+
+ self.options = options
+ if args:
+ self.testNames = args
+ if options.testNames is not None:
+ self.testNames.extend(tolist(options.testNames))
+
+ if options.py3where is not None:
+ if sys.version_info >= (3,):
+ options.where = options.py3where
+
+ # `where` is an append action, so it can't have a default value
+ # in the parser, or that default will always be in the list
+ if not options.where:
+ options.where = env.get('NOSE_WHERE', None)
+
+ # include and exclude also
+ if not options.ignoreFiles:
+ options.ignoreFiles = env.get('NOSE_IGNORE_FILES', [])
+ if not options.include:
+ options.include = env.get('NOSE_INCLUDE', [])
+ if not options.exclude:
+ options.exclude = env.get('NOSE_EXCLUDE', [])
+
+ self.addPaths = options.addPaths
+ self.stopOnError = options.stopOnError
+ self.verbosity = options.verbosity
+ self.includeExe = options.includeExe
+ self.traverseNamespace = options.traverseNamespace
+ self.debug = options.debug
+ self.debugLog = options.debugLog
+ self.loggingConfig = options.loggingConfig
+ self.firstPackageWins = options.firstPackageWins
+ self.configureLogging()
+
+ if not options.byteCompile:
+ sys.dont_write_bytecode = True
+
+ if options.where is not None:
+ self.configureWhere(options.where)
+
+ if options.testMatch:
+ self.testMatch = re.compile(options.testMatch)
+
+ if options.ignoreFiles:
+ self.ignoreFiles = list(map(re.compile, tolist(options.ignoreFiles)))
+ log.info("Ignoring files matching %s", options.ignoreFiles)
+ else:
+ log.info("Ignoring files matching %s", self.ignoreFilesDefaultStrings)
+
+ if options.include:
+ self.include = list(map(re.compile, tolist(options.include)))
+ log.info("Including tests matching %s", options.include)
+
+ if options.exclude:
+ self.exclude = list(map(re.compile, tolist(options.exclude)))
+ log.info("Excluding tests matching %s", options.exclude)
+
+ # When listing plugins we don't want to run them
+ if not options.showPlugins:
+ self.plugins.configure(options, self)
+ self.plugins.begin()
+
+ def configureLogging(self):
+ """Configure logging for nose, or optionally other packages. Any logger
+ name may be set with the debug option, and that logger will be set to
+ debug level and be assigned the same handler as the nose loggers, unless
+ it already has a handler.
+ """
+ if self.loggingConfig:
+ from logging.config import fileConfig
+ fileConfig(self.loggingConfig)
+ return
+
+ format = logging.Formatter('%(name)s: %(levelname)s: %(message)s')
+ if self.debugLog:
+ handler = logging.FileHandler(self.debugLog)
+ else:
+ handler = logging.StreamHandler(self.logStream)
+ handler.setFormatter(format)
+
+ logger = logging.getLogger('nose')
+ logger.propagate = 0
+
+ # only add our default handler if there isn't already one there
+ # this avoids annoying duplicate log messages.
+ found = False
+ if self.debugLog:
+ debugLogAbsPath = os.path.abspath(self.debugLog)
+ for h in logger.handlers:
+ if type(h) == logging.FileHandler and \
+ h.baseFilename == debugLogAbsPath:
+ found = True
+ else:
+ for h in logger.handlers:
+ if type(h) == logging.StreamHandler and \
+ h.stream == self.logStream:
+ found = True
+ if not found:
+ logger.addHandler(handler)
+
+ # default level
+ lvl = logging.WARNING
+ if self.verbosity >= 5:
+ lvl = 0
+ elif self.verbosity >= 4:
+ lvl = logging.DEBUG
+ elif self.verbosity >= 3:
+ lvl = logging.INFO
+ logger.setLevel(lvl)
+
+ # individual overrides
+ if self.debug:
+ # no blanks
+ debug_loggers = [ name for name in self.debug.split(',')
+ if name ]
+ for logger_name in debug_loggers:
+ l = logging.getLogger(logger_name)
+ l.setLevel(logging.DEBUG)
+ if not l.handlers and not logger_name.startswith('nose'):
+ l.addHandler(handler)
+
+ def configureWhere(self, where):
+ """Configure the working directory or directories for the test run.
+ """
+ from nose.importer import add_path
+ self.workingDir = None
+ where = tolist(where)
+ warned = False
+ for path in where:
+ if not self.workingDir:
+ abs_path = absdir(path)
+ if abs_path is None:
+ raise ValueError("Working directory %s not found, or "
+ "not a directory" % path)
+ log.info("Set working dir to %s", abs_path)
+ self.workingDir = abs_path
+ if self.addPaths and \
+ os.path.exists(os.path.join(abs_path, '__init__.py')):
+ log.info("Working directory %s is a package; "
+ "adding to sys.path" % abs_path)
+ add_path(abs_path)
+ continue
+ if not warned:
+ warn("Use of multiple -w arguments is deprecated and "
+ "support may be removed in a future release. You can "
+ "get the same behavior by passing directories without "
+ "the -w argument on the command line, or by using the "
+ "--tests argument in a configuration file.",
+ DeprecationWarning)
+ warned = True
+ self.testNames.append(path)
+
+ def default(self):
+ """Reset all config values to defaults.
+ """
+ self.__dict__.update(self._default)
+
+ def getParser(self, doc=None):
+ """Get the command line option parser.
+ """
+ if self.parser:
+ return self.parser
+ env = self.env
+ parser = self.parserClass(doc)
+ parser.add_option(
+ "-V","--version", action="store_true",
+ dest="version", default=False,
+ help="Output nose version and exit")
+ parser.add_option(
+ "-p", "--plugins", action="store_true",
+ dest="showPlugins", default=False,
+ help="Output list of available plugins and exit. Combine with "
+ "higher verbosity for greater detail")
+ parser.add_option(
+ "-v", "--verbose",
+ action="count", dest="verbosity",
+ default=self.verbosity,
+ help="Be more verbose. [NOSE_VERBOSE]")
+ parser.add_option(
+ "--verbosity", action="store", dest="verbosity",
+ metavar='VERBOSITY',
+ type="int", help="Set verbosity; --verbosity=2 is "
+ "the same as -v")
+ parser.add_option(
+ "-q", "--quiet", action="store_const", const=0, dest="verbosity",
+ help="Be less verbose")
+ parser.add_option(
+ "-c", "--config", action="append", dest="files",
+ metavar="FILES",
+ help="Load configuration from config file(s). May be specified "
+ "multiple times; in that case, all config files will be "
+ "loaded and combined")
+ parser.add_option(
+ "-w", "--where", action="append", dest="where",
+ metavar="WHERE",
+ help="Look for tests in this directory. "
+ "May be specified multiple times. The first directory passed "
+ "will be used as the working directory, in place of the current "
+ "working directory, which is the default. Others will be added "
+ "to the list of tests to execute. [NOSE_WHERE]"
+ )
+ parser.add_option(
+ "--py3where", action="append", dest="py3where",
+ metavar="PY3WHERE",
+ help="Look for tests in this directory under Python 3.x. "
+ "Functions the same as 'where', but only applies if running under "
+ "Python 3.x or above. Note that, if present under 3.x, this "
+ "option completely replaces any directories specified with "
+ "'where', so the 'where' option becomes ineffective. "
+ "[NOSE_PY3WHERE]"
+ )
+ parser.add_option(
+ "-m", "--match", "--testmatch", action="store",
+ dest="testMatch", metavar="REGEX",
+ help="Files, directories, function names, and class names "
+ "that match this regular expression are considered tests. "
+ "Default: %s [NOSE_TESTMATCH]" % self.testMatchPat,
+ default=self.testMatchPat)
+ parser.add_option(
+ "--tests", action="store", dest="testNames", default=None,
+ metavar='NAMES',
+ help="Run these tests (comma-separated list). This argument is "
+ "useful mainly from configuration files; on the command line, "
+ "just pass the tests to run as additional arguments with no "
+ "switch.")
+ parser.add_option(
+ "-l", "--debug", action="store",
+ dest="debug", default=self.debug,
+ help="Activate debug logging for one or more systems. "
+ "Available debug loggers: nose, nose.importer, "
+ "nose.inspector, nose.plugins, nose.result and "
+ "nose.selector. Separate multiple names with a comma.")
+ parser.add_option(
+ "--debug-log", dest="debugLog", action="store",
+ default=self.debugLog, metavar="FILE",
+ help="Log debug messages to this file "
+ "(default: sys.stderr)")
+ parser.add_option(
+ "--logging-config", "--log-config",
+ dest="loggingConfig", action="store",
+ default=self.loggingConfig, metavar="FILE",
+ help="Load logging config from this file -- bypasses all other"
+ " logging config settings.")
+ parser.add_option(
+ "-I", "--ignore-files", action="append", dest="ignoreFiles",
+ metavar="REGEX",
+ help="Completely ignore any file that matches this regular "
+ "expression. Takes precedence over any other settings or "
+ "plugins. "
+ "Specifying this option will replace the default setting. "
+ "Specify this option multiple times "
+ "to add more regular expressions [NOSE_IGNORE_FILES]")
+ parser.add_option(
+ "-e", "--exclude", action="append", dest="exclude",
+ metavar="REGEX",
+ help="Don't run tests that match regular "
+ "expression [NOSE_EXCLUDE]")
+ parser.add_option(
+ "-i", "--include", action="append", dest="include",
+ metavar="REGEX",
+ help="This regular expression will be applied to files, "
+ "directories, function names, and class names for a chance "
+ "to include additional tests that do not match TESTMATCH. "
+ "Specify this option multiple times "
+ "to add more regular expressions [NOSE_INCLUDE]")
+ parser.add_option(
+ "-x", "--stop", action="store_true", dest="stopOnError",
+ default=self.stopOnError,
+ help="Stop running tests after the first error or failure")
+ parser.add_option(
+ "-P", "--no-path-adjustment", action="store_false",
+ dest="addPaths",
+ default=self.addPaths,
+ help="Don't make any changes to sys.path when "
+ "loading tests [NOSE_NOPATH]")
+ parser.add_option(
+ "--exe", action="store_true", dest="includeExe",
+ default=self.includeExe,
+ help="Look for tests in python modules that are "
+ "executable. Normal behavior is to exclude executable "
+ "modules, since they may not be import-safe "
+ "[NOSE_INCLUDE_EXE]")
+ parser.add_option(
+ "--noexe", action="store_false", dest="includeExe",
+ help="DO NOT look for tests in python modules that are "
+ "executable. (The default on the windows platform is to "
+ "do so.)")
+ parser.add_option(
+ "--traverse-namespace", action="store_true",
+ default=self.traverseNamespace, dest="traverseNamespace",
+ help="Traverse through all path entries of a namespace package")
+ parser.add_option(
+ "--first-package-wins", "--first-pkg-wins", "--1st-pkg-wins",
+ action="store_true", default=False, dest="firstPackageWins",
+ help="nose's importer will normally evict a package from sys."
+ "modules if it sees a package with the same name in a different "
+ "location. Set this option to disable that behavior.")
+ parser.add_option(
+ "--no-byte-compile",
+ action="store_false", default=True, dest="byteCompile",
+ help="Prevent nose from byte-compiling the source into .pyc files "
+ "while nose is scanning for and running tests.")
+
+ self.plugins.loadPlugins()
+ self.pluginOpts(parser)
+
+ self.parser = parser
+ return parser
+
+ def help(self, doc=None):
+ """Return the generated help message
+ """
+ return self.getParser(doc).format_help()
+
+ def pluginOpts(self, parser):
+ self.plugins.addOptions(parser, self.env)
+
+ def reset(self):
+ self.__dict__.update(self._orig)
+
+ def todict(self):
+ return self.__dict__.copy()
+
+ def update(self, d):
+ self.__dict__.update(d)
+
+
+class NoOptions(object):
+ """Options container that returns None for all options.
+ """
+ def __getstate__(self):
+ return {}
+
+ def __setstate__(self, state):
+ pass
+
+ def __getnewargs__(self):
+ return ()
+
+ def __bool__(self):
+ return False
+
+
+def user_config_files():
+ """Return path to any existing user config files
+ """
+ return list(filter(os.path.exists,
+ list(map(os.path.expanduser, config_files))))
+
+
+def all_config_files():
+ """Return path to any existing user config files, plus any setup.cfg
+ in the current working directory.
+ """
+ user = user_config_files()
+ if os.path.exists('setup.cfg'):
+ return user + ['setup.cfg']
+ return user
+
+
+# used when parsing config files
+def flag(val):
+ """Does the value look like an on/off flag?"""
+ if val == 1:
+ return True
+ elif val == 0:
+ return False
+ val = str(val)
+ if len(val) > 5:
+ return False
+ return val.upper() in ('1', '0', 'F', 'T', 'TRUE', 'FALSE', 'ON', 'OFF')
+
+
+def _bool(val):
+ return str(val).upper() in ('1', 'T', 'TRUE', 'ON')
diff --git a/scripts/external_libs/nose-1.3.4/python3/nose/core.py b/scripts/external_libs/nose-1.3.4/python3/nose/core.py
new file mode 100644
index 00000000..ceacc9aa
--- /dev/null
+++ b/scripts/external_libs/nose-1.3.4/python3/nose/core.py
@@ -0,0 +1,341 @@
+"""Implements nose test program and collector.
+"""
+
+
+import logging
+import os
+import sys
+import time
+import unittest
+
+from nose.config import Config, all_config_files
+from nose.loader import defaultTestLoader
+from nose.plugins.manager import PluginManager, DefaultPluginManager, \
+ RestrictedPluginManager
+from nose.result import TextTestResult
+from nose.suite import FinalizingSuiteWrapper
+from nose.util import isclass, tolist
+
+
+log = logging.getLogger('nose.core')
+compat_24 = sys.version_info >= (2, 4)
+
+__all__ = ['TestProgram', 'main', 'run', 'run_exit', 'runmodule', 'collector',
+ 'TextTestRunner']
+
+
+class TextTestRunner(unittest.TextTestRunner):
+ """Test runner that uses nose's TextTestResult to enable errorClasses,
+ as well as providing hooks for plugins to override or replace the test
+ output stream, results, and the test case itself.
+ """
+ def __init__(self, stream=sys.stderr, descriptions=1, verbosity=1,
+ config=None):
+ if config is None:
+ config = Config()
+ self.config = config
+ unittest.TextTestRunner.__init__(self, stream, descriptions, verbosity)
+
+
+ def _makeResult(self):
+ return TextTestResult(self.stream,
+ self.descriptions,
+ self.verbosity,
+ self.config)
+
+ def run(self, test):
+ """Overrides to provide plugin hooks and defer all output to
+ the test result class.
+ """
+ wrapper = self.config.plugins.prepareTest(test)
+ if wrapper is not None:
+ test = wrapper
+
+ # plugins can decorate or capture the output stream
+ wrapped = self.config.plugins.setOutputStream(self.stream)
+ if wrapped is not None:
+ self.stream = wrapped
+
+ result = self._makeResult()
+ start = time.time()
+ try:
+ test(result)
+ except KeyboardInterrupt:
+ pass
+ stop = time.time()
+ result.printErrors()
+ result.printSummary(start, stop)
+ self.config.plugins.finalize(result)
+ return result
+
+
+class TestProgram(unittest.TestProgram):
+ """Collect and run tests, returning success or failure.
+
+ The arguments to TestProgram() are the same as to
+ :func:`main()` and :func:`run()`:
+
+ * module: All tests are in this module (default: None)
+ * defaultTest: Tests to load (default: '.')
+ * argv: Command line arguments (default: None; sys.argv is read)
+ * testRunner: Test runner instance (default: None)
+ * testLoader: Test loader instance (default: None)
+ * env: Environment; ignored if config is provided (default: None;
+ os.environ is read)
+ * config: :class:`nose.config.Config` instance (default: None)
+ * suite: Suite or list of tests to run (default: None). Passing a
+ suite or lists of tests will bypass all test discovery and
+ loading. *ALSO NOTE* that if you pass a unittest.TestSuite
+ instance as the suite, context fixtures at the class, module and
+ package level will not be used, and many plugin hooks will not
+ be called. If you want normal nose behavior, either pass a list
+ of tests, or a fully-configured :class:`nose.suite.ContextSuite`.
+ * exit: Exit after running tests and printing report (default: True)
+ * plugins: List of plugins to use; ignored if config is provided
+ (default: load plugins with DefaultPluginManager)
+ * addplugins: List of **extra** plugins to use. Pass a list of plugin
+ instances in this argument to make custom plugins available while
+ still using the DefaultPluginManager.
+ """
+ verbosity = 1
+
+ def __init__(self, module=None, defaultTest='.', argv=None,
+ testRunner=None, testLoader=None, env=None, config=None,
+ suite=None, exit=True, plugins=None, addplugins=None):
+ if env is None:
+ env = os.environ
+ if config is None:
+ config = self.makeConfig(env, plugins)
+ if addplugins:
+ config.plugins.addPlugins(extraplugins=addplugins)
+ self.config = config
+ self.suite = suite
+ self.exit = exit
+ extra_args = {}
+ version = sys.version_info[0:2]
+ if version >= (2,7) and version != (3,0):
+ extra_args['exit'] = exit
+ unittest.TestProgram.__init__(
+ self, module=module, defaultTest=defaultTest,
+ argv=argv, testRunner=testRunner, testLoader=testLoader,
+ **extra_args)
+
+ def getAllConfigFiles(self, env=None):
+ env = env or {}
+ if env.get('NOSE_IGNORE_CONFIG_FILES', False):
+ return []
+ else:
+ return all_config_files()
+
+ def makeConfig(self, env, plugins=None):
+ """Load a Config, pre-filled with user config files if any are
+ found.
+ """
+ cfg_files = self.getAllConfigFiles(env)
+ if plugins:
+ manager = PluginManager(plugins=plugins)
+ else:
+ manager = DefaultPluginManager()
+ return Config(
+ env=env, files=cfg_files, plugins=manager)
+
+ def parseArgs(self, argv):
+ """Parse argv and env and configure running environment.
+ """
+ self.config.configure(argv, doc=self.usage())
+ log.debug("configured %s", self.config)
+
+ # quick outs: version, plugins (optparse would have already
+ # caught and exited on help)
+ if self.config.options.version:
+ from nose import __version__
+ sys.stdout = sys.__stdout__
+ print("%s version %s" % (os.path.basename(sys.argv[0]), __version__))
+ sys.exit(0)
+
+ if self.config.options.showPlugins:
+ self.showPlugins()
+ sys.exit(0)
+
+ if self.testLoader is None:
+ self.testLoader = defaultTestLoader(config=self.config)
+ elif isclass(self.testLoader):
+ self.testLoader = self.testLoader(config=self.config)
+ plug_loader = self.config.plugins.prepareTestLoader(self.testLoader)
+ if plug_loader is not None:
+ self.testLoader = plug_loader
+ log.debug("test loader is %s", self.testLoader)
+
+ # FIXME if self.module is a string, add it to self.testNames? not sure
+
+ if self.config.testNames:
+ self.testNames = self.config.testNames
+ else:
+ self.testNames = tolist(self.defaultTest)
+ log.debug('defaultTest %s', self.defaultTest)
+ log.debug('Test names are %s', self.testNames)
+ if self.config.workingDir is not None:
+ os.chdir(self.config.workingDir)
+ self.createTests()
+
+ def createTests(self):
+ """Create the tests to run. If a self.suite
+ is set, then that suite will be used. Otherwise, tests will be
+ loaded from the given test names (self.testNames) using the
+ test loader.
+ """
+ log.debug("createTests called with %s", self.suite)
+ if self.suite is not None:
+ # We were given an explicit suite to run. Make sure it's
+ # loaded and wrapped correctly.
+ self.test = self.testLoader.suiteClass(self.suite)
+ else:
+ self.test = self.testLoader.loadTestsFromNames(self.testNames)
+
+ def runTests(self):
+ """Run Tests. Returns true on success, false on failure, and sets
+ self.success to the same value.
+ """
+ log.debug("runTests called")
+ if self.testRunner is None:
+ self.testRunner = TextTestRunner(stream=self.config.stream,
+ verbosity=self.config.verbosity,
+ config=self.config)
+ plug_runner = self.config.plugins.prepareTestRunner(self.testRunner)
+ if plug_runner is not None:
+ self.testRunner = plug_runner
+ result = self.testRunner.run(self.test)
+ self.success = result.wasSuccessful()
+ if self.exit:
+ sys.exit(not self.success)
+ return self.success
+
+ def showPlugins(self):
+ """Print list of available plugins.
+ """
+ import textwrap
+
+ class DummyParser:
+ def __init__(self):
+ self.options = []
+ def add_option(self, *arg, **kw):
+ self.options.append((arg, kw.pop('help', '')))
+
+ v = self.config.verbosity
+ self.config.plugins.sort()
+ for p in self.config.plugins:
+ print("Plugin %s" % p.name)
+ if v >= 2:
+ print(" score: %s" % p.score)
+ print('\n'.join(textwrap.wrap(p.help().strip(),
+ initial_indent=' ',
+ subsequent_indent=' ')))
+ if v >= 3:
+ parser = DummyParser()
+ p.addOptions(parser)
+ if len(parser.options):
+ print()
+ print(" Options:")
+ for opts, help in parser.options:
+ print(' %s' % (', '.join(opts)))
+ if help:
+ print('\n'.join(
+ textwrap.wrap(help.strip(),
+ initial_indent=' ',
+ subsequent_indent=' ')))
+ print()
+
+ def usage(cls):
+ import nose
+ try:
+ ld = nose.__loader__
+ text = ld.get_data(os.path.join(
+ os.path.dirname(__file__), 'usage.txt'))
+ except AttributeError:
+ f = open(os.path.join(
+ os.path.dirname(__file__), 'usage.txt'), 'r')
+ try:
+ text = f.read()
+ finally:
+ f.close()
+ # Ensure that we return str, not bytes.
+ if not isinstance(text, str):
+ text = text.decode('utf-8')
+ return text
+ usage = classmethod(usage)
+
+# backwards compatibility
+run_exit = main = TestProgram
+
+
+def run(*arg, **kw):
+ """Collect and run tests, returning success or failure.
+
+ The arguments to `run()` are the same as to `main()`:
+
+ * module: All tests are in this module (default: None)
+ * defaultTest: Tests to load (default: '.')
+ * argv: Command line arguments (default: None; sys.argv is read)
+ * testRunner: Test runner instance (default: None)
+ * testLoader: Test loader instance (default: None)
+ * env: Environment; ignored if config is provided (default: None;
+ os.environ is read)
+ * config: :class:`nose.config.Config` instance (default: None)
+ * suite: Suite or list of tests to run (default: None). Passing a
+ suite or lists of tests will bypass all test discovery and
+ loading. *ALSO NOTE* that if you pass a unittest.TestSuite
+ instance as the suite, context fixtures at the class, module and
+ package level will not be used, and many plugin hooks will not
+ be called. If you want normal nose behavior, either pass a list
+ of tests, or a fully-configured :class:`nose.suite.ContextSuite`.
+ * plugins: List of plugins to use; ignored if config is provided
+ (default: load plugins with DefaultPluginManager)
+ * addplugins: List of **extra** plugins to use. Pass a list of plugin
+ instances in this argument to make custom plugins available while
+ still using the DefaultPluginManager.
+
+ With the exception that the ``exit`` argument is always set
+ to False.
+ """
+ kw['exit'] = False
+ return TestProgram(*arg, **kw).success
+
+
+def runmodule(name='__main__', **kw):
+ """Collect and run tests in a single module only. Defaults to running
+ tests in __main__. Additional arguments to TestProgram may be passed
+ as keyword arguments.
+ """
+ main(defaultTest=name, **kw)
+
+
+def collector():
+ """TestSuite replacement entry point. Use anywhere you might use a
+ unittest.TestSuite. The collector will, by default, load options from
+ all config files and execute loader.loadTestsFromNames() on the
+ configured testNames, or '.' if no testNames are configured.
+ """
+ # plugins that implement any of these methods are disabled, since
+ # we don't control the test runner and won't be able to run them
+ # finalize() is also not called, but plugins that use it aren't disabled,
+ # because capture needs it.
+ setuptools_incompat = ('report', 'prepareTest',
+ 'prepareTestLoader', 'prepareTestRunner',
+ 'setOutputStream')
+
+ plugins = RestrictedPluginManager(exclude=setuptools_incompat)
+ conf = Config(files=all_config_files(),
+ plugins=plugins)
+ conf.configure(argv=['collector'])
+ loader = defaultTestLoader(conf)
+
+ if conf.testNames:
+ suite = loader.loadTestsFromNames(conf.testNames)
+ else:
+ suite = loader.loadTestsFromNames(('.',))
+ return FinalizingSuiteWrapper(suite, plugins.finalize)
+
+
+
+if __name__ == '__main__':
+ main()
diff --git a/scripts/external_libs/nose-1.3.4/python3/nose/exc.py b/scripts/external_libs/nose-1.3.4/python3/nose/exc.py
new file mode 100644
index 00000000..8b780db0
--- /dev/null
+++ b/scripts/external_libs/nose-1.3.4/python3/nose/exc.py
@@ -0,0 +1,9 @@
+"""Exceptions for marking tests as skipped or deprecated.
+
+This module exists to provide backwards compatibility with previous
+versions of nose where skipped and deprecated tests were core
+functionality, rather than being provided by plugins. It may be
+removed in a future release.
+"""
+from nose.plugins.skip import SkipTest
+from nose.plugins.deprecated import DeprecatedTest
diff --git a/scripts/external_libs/nose-1.3.4/python3/nose/ext/__init__.py b/scripts/external_libs/nose-1.3.4/python3/nose/ext/__init__.py
new file mode 100644
index 00000000..5fd1516a
--- /dev/null
+++ b/scripts/external_libs/nose-1.3.4/python3/nose/ext/__init__.py
@@ -0,0 +1,3 @@
+"""
+External or vendor files
+"""
diff --git a/scripts/external_libs/nose-1.3.4/python3/nose/ext/dtcompat.py b/scripts/external_libs/nose-1.3.4/python3/nose/ext/dtcompat.py
new file mode 100644
index 00000000..e707c657
--- /dev/null
+++ b/scripts/external_libs/nose-1.3.4/python3/nose/ext/dtcompat.py
@@ -0,0 +1,2272 @@
+# Module doctest.
+# Released to the public domain 16-Jan-2001, by Tim Peters (tim@python.org).
+# Major enhancements and refactoring by:
+# Jim Fulton
+# Edward Loper
+
+# Provided as-is; use at your own risk; no warranty; no promises; enjoy!
+#
+# Modified for inclusion in nose to provide support for DocFileTest in
+# python 2.3:
+#
+# - all doctests removed from module (they fail under 2.3 and 2.5)
+# - now handles the $py.class extension when ran under Jython
+
+r"""Module doctest -- a framework for running examples in docstrings.
+
+In simplest use, end each module M to be tested with:
+
+def _test():
+ import doctest
+ doctest.testmod()
+
+if __name__ == "__main__":
+ _test()
+
+Then running the module as a script will cause the examples in the
+docstrings to get executed and verified:
+
+python M.py
+
+This won't display anything unless an example fails, in which case the
+failing example(s) and the cause(s) of the failure(s) are printed to stdout
+(why not stderr? because stderr is a lame hack <0.2 wink>), and the final
+line of output is "Test failed.".
+
+Run it with the -v switch instead:
+
+python M.py -v
+
+and a detailed report of all examples tried is printed to stdout, along
+with assorted summaries at the end.
+
+You can force verbose mode by passing "verbose=True" to testmod, or prohibit
+it by passing "verbose=False". In either of those cases, sys.argv is not
+examined by testmod.
+
+There are a variety of other ways to run doctests, including integration
+with the unittest framework, and support for running non-Python text
+files containing doctests. There are also many ways to override parts
+of doctest's default behaviors. See the Library Reference Manual for
+details.
+"""
+
+__docformat__ = 'reStructuredText en'
+
+__all__ = [
+ # 0, Option Flags
+ 'register_optionflag',
+ 'DONT_ACCEPT_TRUE_FOR_1',
+ 'DONT_ACCEPT_BLANKLINE',
+ 'NORMALIZE_WHITESPACE',
+ 'ELLIPSIS',
+ 'IGNORE_EXCEPTION_DETAIL',
+ 'COMPARISON_FLAGS',
+ 'REPORT_UDIFF',
+ 'REPORT_CDIFF',
+ 'REPORT_NDIFF',
+ 'REPORT_ONLY_FIRST_FAILURE',
+ 'REPORTING_FLAGS',
+ # 1. Utility Functions
+ 'is_private',
+ # 2. Example & DocTest
+ 'Example',
+ 'DocTest',
+ # 3. Doctest Parser
+ 'DocTestParser',
+ # 4. Doctest Finder
+ 'DocTestFinder',
+ # 5. Doctest Runner
+ 'DocTestRunner',
+ 'OutputChecker',
+ 'DocTestFailure',
+ 'UnexpectedException',
+ 'DebugRunner',
+ # 6. Test Functions
+ 'testmod',
+ 'testfile',
+ 'run_docstring_examples',
+ # 7. Tester
+ 'Tester',
+ # 8. Unittest Support
+ 'DocTestSuite',
+ 'DocFileSuite',
+ 'set_unittest_reportflags',
+ # 9. Debugging Support
+ 'script_from_examples',
+ 'testsource',
+ 'debug_src',
+ 'debug',
+]
+
+import __future__
+
+import sys, traceback, inspect, linecache, os, re
+import unittest, difflib, pdb, tempfile
+import warnings
+from io import StringIO
+
+# Don't whine about the deprecated is_private function in this
+# module's tests.
+warnings.filterwarnings("ignore", "is_private", DeprecationWarning,
+ __name__, 0)
+
+# There are 4 basic classes:
+# - Example: a <source, want> pair, plus an intra-docstring line number.
+# - DocTest: a collection of examples, parsed from a docstring, plus
+# info about where the docstring came from (name, filename, lineno).
+# - DocTestFinder: extracts DocTests from a given object's docstring and
+# its contained objects' docstrings.
+# - DocTestRunner: runs DocTest cases, and accumulates statistics.
+#
+# So the basic picture is:
+#
+# list of:
+# +------+ +---------+ +-------+
+# |object| --DocTestFinder-> | DocTest | --DocTestRunner-> |results|
+# +------+ +---------+ +-------+
+# | Example |
+# | ... |
+# | Example |
+# +---------+
+
+# Option constants.
+
+OPTIONFLAGS_BY_NAME = {}
+def register_optionflag(name):
+ # Create a new flag unless `name` is already known.
+ return OPTIONFLAGS_BY_NAME.setdefault(name, 1 << len(OPTIONFLAGS_BY_NAME))
+
+DONT_ACCEPT_TRUE_FOR_1 = register_optionflag('DONT_ACCEPT_TRUE_FOR_1')
+DONT_ACCEPT_BLANKLINE = register_optionflag('DONT_ACCEPT_BLANKLINE')
+NORMALIZE_WHITESPACE = register_optionflag('NORMALIZE_WHITESPACE')
+ELLIPSIS = register_optionflag('ELLIPSIS')
+IGNORE_EXCEPTION_DETAIL = register_optionflag('IGNORE_EXCEPTION_DETAIL')
+
+COMPARISON_FLAGS = (DONT_ACCEPT_TRUE_FOR_1 |
+ DONT_ACCEPT_BLANKLINE |
+ NORMALIZE_WHITESPACE |
+ ELLIPSIS |
+ IGNORE_EXCEPTION_DETAIL)
+
+REPORT_UDIFF = register_optionflag('REPORT_UDIFF')
+REPORT_CDIFF = register_optionflag('REPORT_CDIFF')
+REPORT_NDIFF = register_optionflag('REPORT_NDIFF')
+REPORT_ONLY_FIRST_FAILURE = register_optionflag('REPORT_ONLY_FIRST_FAILURE')
+
+REPORTING_FLAGS = (REPORT_UDIFF |
+ REPORT_CDIFF |
+ REPORT_NDIFF |
+ REPORT_ONLY_FIRST_FAILURE)
+
+# Special string markers for use in `want` strings:
+BLANKLINE_MARKER = '<BLANKLINE>'
+ELLIPSIS_MARKER = '...'
+
+######################################################################
+## Table of Contents
+######################################################################
+# 1. Utility Functions
+# 2. Example & DocTest -- store test cases
+# 3. DocTest Parser -- extracts examples from strings
+# 4. DocTest Finder -- extracts test cases from objects
+# 5. DocTest Runner -- runs test cases
+# 6. Test Functions -- convenient wrappers for testing
+# 7. Tester Class -- for backwards compatibility
+# 8. Unittest Support
+# 9. Debugging Support
+# 10. Example Usage
+
+######################################################################
+## 1. Utility Functions
+######################################################################
+
+def is_private(prefix, base):
+ """prefix, base -> true iff name prefix + "." + base is "private".
+
+ Prefix may be an empty string, and base does not contain a period.
+ Prefix is ignored (although functions you write conforming to this
+ protocol may make use of it).
+ Return true iff base begins with an (at least one) underscore, but
+ does not both begin and end with (at least) two underscores.
+ """
+ warnings.warn("is_private is deprecated; it wasn't useful; "
+ "examine DocTestFinder.find() lists instead",
+ DeprecationWarning, stacklevel=2)
+ return base[:1] == "_" and not base[:2] == "__" == base[-2:]
+
+def _extract_future_flags(globs):
+ """
+ Return the compiler-flags associated with the future features that
+ have been imported into the given namespace (globs).
+ """
+ flags = 0
+ for fname in __future__.all_feature_names:
+ feature = globs.get(fname, None)
+ if feature is getattr(__future__, fname):
+ flags |= feature.compiler_flag
+ return flags
+
+def _normalize_module(module, depth=2):
+ """
+ Return the module specified by `module`. In particular:
+ - If `module` is a module, then return module.
+ - If `module` is a string, then import and return the
+ module with that name.
+ - If `module` is None, then return the calling module.
+ The calling module is assumed to be the module of
+ the stack frame at the given depth in the call stack.
+ """
+ if inspect.ismodule(module):
+ return module
+ elif isinstance(module, str):
+ return __import__(module, globals(), locals(), ["*"])
+ elif module is None:
+ return sys.modules[sys._getframe(depth).f_globals['__name__']]
+ else:
+ raise TypeError("Expected a module, string, or None")
+
+def _indent(s, indent=4):
+ """
+ Add the given number of space characters to the beginning every
+ non-blank line in `s`, and return the result.
+ """
+ # This regexp matches the start of non-blank lines:
+ return re.sub('(?m)^(?!$)', indent*' ', s)
+
+def _exception_traceback(exc_info):
+ """
+ Return a string containing a traceback message for the given
+ exc_info tuple (as returned by sys.exc_info()).
+ """
+ # Get a traceback message.
+ excout = StringIO()
+ exc_type, exc_val, exc_tb = exc_info
+ traceback.print_exception(exc_type, exc_val, exc_tb, file=excout)
+ return excout.getvalue()
+
+# Override some StringIO methods.
+class _SpoofOut(StringIO):
+ def getvalue(self):
+ result = StringIO.getvalue(self)
+ # If anything at all was written, make sure there's a trailing
+ # newline. There's no way for the expected output to indicate
+ # that a trailing newline is missing.
+ if result and not result.endswith("\n"):
+ result += "\n"
+ # Prevent softspace from screwing up the next test case, in
+ # case they used print with a trailing comma in an example.
+ if hasattr(self, "softspace"):
+ del self.softspace
+ return result
+
+ def truncate(self, size=None):
+ StringIO.truncate(self, size)
+ if hasattr(self, "softspace"):
+ del self.softspace
+
+# Worst-case linear-time ellipsis matching.
+def _ellipsis_match(want, got):
+ if ELLIPSIS_MARKER not in want:
+ return want == got
+
+ # Find "the real" strings.
+ ws = want.split(ELLIPSIS_MARKER)
+ assert len(ws) >= 2
+
+ # Deal with exact matches possibly needed at one or both ends.
+ startpos, endpos = 0, len(got)
+ w = ws[0]
+ if w: # starts with exact match
+ if got.startswith(w):
+ startpos = len(w)
+ del ws[0]
+ else:
+ return False
+ w = ws[-1]
+ if w: # ends with exact match
+ if got.endswith(w):
+ endpos -= len(w)
+ del ws[-1]
+ else:
+ return False
+
+ if startpos > endpos:
+ # Exact end matches required more characters than we have, as in
+ # _ellipsis_match('aa...aa', 'aaa')
+ return False
+
+ # For the rest, we only need to find the leftmost non-overlapping
+ # match for each piece. If there's no overall match that way alone,
+ # there's no overall match period.
+ for w in ws:
+ # w may be '' at times, if there are consecutive ellipses, or
+ # due to an ellipsis at the start or end of `want`. That's OK.
+ # Search for an empty string succeeds, and doesn't change startpos.
+ startpos = got.find(w, startpos, endpos)
+ if startpos < 0:
+ return False
+ startpos += len(w)
+
+ return True
+
+def _comment_line(line):
+ "Return a commented form of the given line"
+ line = line.rstrip()
+ if line:
+ return '# '+line
+ else:
+ return '#'
+
+class _OutputRedirectingPdb(pdb.Pdb):
+ """
+ A specialized version of the python debugger that redirects stdout
+ to a given stream when interacting with the user. Stdout is *not*
+ redirected when traced code is executed.
+ """
+ def __init__(self, out):
+ self.__out = out
+ pdb.Pdb.__init__(self)
+
+ def trace_dispatch(self, *args):
+ # Redirect stdout to the given stream.
+ save_stdout = sys.stdout
+ sys.stdout = self.__out
+ # Call Pdb's trace dispatch method.
+ try:
+ return pdb.Pdb.trace_dispatch(self, *args)
+ finally:
+ sys.stdout = save_stdout
+
+# [XX] Normalize with respect to os.path.pardir?
+def _module_relative_path(module, path):
+ if not inspect.ismodule(module):
+ raise TypeError('Expected a module: %r' % module)
+ if path.startswith('/'):
+ raise ValueError('Module-relative files may not have absolute paths')
+
+ # Find the base directory for the path.
+ if hasattr(module, '__file__'):
+ # A normal module/package
+ basedir = os.path.split(module.__file__)[0]
+ elif module.__name__ == '__main__':
+ # An interactive session.
+ if len(sys.argv)>0 and sys.argv[0] != '':
+ basedir = os.path.split(sys.argv[0])[0]
+ else:
+ basedir = os.curdir
+ else:
+ # A module w/o __file__ (this includes builtins)
+ raise ValueError("Can't resolve paths relative to the module " +
+ module + " (it has no __file__)")
+
+ # Combine the base directory and the path.
+ return os.path.join(basedir, *(path.split('/')))
+
+######################################################################
+## 2. Example & DocTest
+######################################################################
+## - An "example" is a <source, want> pair, where "source" is a
+## fragment of source code, and "want" is the expected output for
+## "source." The Example class also includes information about
+## where the example was extracted from.
+##
+## - A "doctest" is a collection of examples, typically extracted from
+## a string (such as an object's docstring). The DocTest class also
+## includes information about where the string was extracted from.
+
+class Example:
+ """
+ A single doctest example, consisting of source code and expected
+ output. `Example` defines the following attributes:
+
+ - source: A single Python statement, always ending with a newline.
+ The constructor adds a newline if needed.
+
+ - want: The expected output from running the source code (either
+ from stdout, or a traceback in case of exception). `want` ends
+ with a newline unless it's empty, in which case it's an empty
+ string. The constructor adds a newline if needed.
+
+ - exc_msg: The exception message generated by the example, if
+ the example is expected to generate an exception; or `None` if
+ it is not expected to generate an exception. This exception
+ message is compared against the return value of
+ `traceback.format_exception_only()`. `exc_msg` ends with a
+ newline unless it's `None`. The constructor adds a newline
+ if needed.
+
+ - lineno: The line number within the DocTest string containing
+ this Example where the Example begins. This line number is
+ zero-based, with respect to the beginning of the DocTest.
+
+ - indent: The example's indentation in the DocTest string.
+ I.e., the number of space characters that preceed the
+ example's first prompt.
+
+ - options: A dictionary mapping from option flags to True or
+ False, which is used to override default options for this
+ example. Any option flags not contained in this dictionary
+ are left at their default value (as specified by the
+ DocTestRunner's optionflags). By default, no options are set.
+ """
+ def __init__(self, source, want, exc_msg=None, lineno=0, indent=0,
+ options=None):
+ # Normalize inputs.
+ if not source.endswith('\n'):
+ source += '\n'
+ if want and not want.endswith('\n'):
+ want += '\n'
+ if exc_msg is not None and not exc_msg.endswith('\n'):
+ exc_msg += '\n'
+ # Store properties.
+ self.source = source
+ self.want = want
+ self.lineno = lineno
+ self.indent = indent
+ if options is None: options = {}
+ self.options = options
+ self.exc_msg = exc_msg
+
+class DocTest:
+ """
+ A collection of doctest examples that should be run in a single
+ namespace. Each `DocTest` defines the following attributes:
+
+ - examples: the list of examples.
+
+ - globs: The namespace (aka globals) that the examples should
+ be run in.
+
+ - name: A name identifying the DocTest (typically, the name of
+ the object whose docstring this DocTest was extracted from).
+
+ - filename: The name of the file that this DocTest was extracted
+ from, or `None` if the filename is unknown.
+
+ - lineno: The line number within filename where this DocTest
+ begins, or `None` if the line number is unavailable. This
+ line number is zero-based, with respect to the beginning of
+ the file.
+
+ - docstring: The string that the examples were extracted from,
+ or `None` if the string is unavailable.
+ """
+ def __init__(self, examples, globs, name, filename, lineno, docstring):
+ """
+ Create a new DocTest containing the given examples. The
+ DocTest's globals are initialized with a copy of `globs`.
+ """
+ assert not isinstance(examples, str), \
+ "DocTest no longer accepts str; use DocTestParser instead"
+ self.examples = examples
+ self.docstring = docstring
+ self.globs = globs.copy()
+ self.name = name
+ self.filename = filename
+ self.lineno = lineno
+
+ def __repr__(self):
+ if len(self.examples) == 0:
+ examples = 'no examples'
+ elif len(self.examples) == 1:
+ examples = '1 example'
+ else:
+ examples = '%d examples' % len(self.examples)
+ return ('<DocTest %s from %s:%s (%s)>' %
+ (self.name, self.filename, self.lineno, examples))
+
+
+ # This lets us sort tests by name:
+ def __cmp__(self, other):
+ if not isinstance(other, DocTest):
+ return -1
+ return cmp((self.name, self.filename, self.lineno, id(self)),
+ (other.name, other.filename, other.lineno, id(other)))
+
+######################################################################
+## 3. DocTestParser
+######################################################################
+
+class DocTestParser:
+ """
+ A class used to parse strings containing doctest examples.
+ """
+ # This regular expression is used to find doctest examples in a
+ # string. It defines three groups: `source` is the source code
+ # (including leading indentation and prompts); `indent` is the
+ # indentation of the first (PS1) line of the source code; and
+ # `want` is the expected output (including leading indentation).
+ _EXAMPLE_RE = re.compile(r'''
+ # Source consists of a PS1 line followed by zero or more PS2 lines.
+ (?P<source>
+ (?:^(?P<indent> [ ]*) >>> .*) # PS1 line
+ (?:\n [ ]* \.\.\. .*)*) # PS2 lines
+ \n?
+ # Want consists of any non-blank lines that do not start with PS1.
+ (?P<want> (?:(?![ ]*$) # Not a blank line
+ (?![ ]*>>>) # Not a line starting with PS1
+ .*$\n? # But any other line
+ )*)
+ ''', re.MULTILINE | re.VERBOSE)
+
+ # A regular expression for handling `want` strings that contain
+ # expected exceptions. It divides `want` into three pieces:
+ # - the traceback header line (`hdr`)
+ # - the traceback stack (`stack`)
+ # - the exception message (`msg`), as generated by
+ # traceback.format_exception_only()
+ # `msg` may have multiple lines. We assume/require that the
+ # exception message is the first non-indented line starting with a word
+ # character following the traceback header line.
+ _EXCEPTION_RE = re.compile(r"""
+ # Grab the traceback header. Different versions of Python have
+ # said different things on the first traceback line.
+ ^(?P<hdr> Traceback\ \(
+ (?: most\ recent\ call\ last
+ | innermost\ last
+ ) \) :
+ )
+ \s* $ # toss trailing whitespace on the header.
+ (?P<stack> .*?) # don't blink: absorb stuff until...
+ ^ (?P<msg> \w+ .*) # a line *starts* with alphanum.
+ """, re.VERBOSE | re.MULTILINE | re.DOTALL)
+
+ # A callable returning a true value iff its argument is a blank line
+ # or contains a single comment.
+ _IS_BLANK_OR_COMMENT = re.compile(r'^[ ]*(#.*)?$').match
+
+ def parse(self, string, name='<string>'):
+ """
+ Divide the given string into examples and intervening text,
+ and return them as a list of alternating Examples and strings.
+ Line numbers for the Examples are 0-based. The optional
+ argument `name` is a name identifying this string, and is only
+ used for error messages.
+ """
+ string = string.expandtabs()
+ # If all lines begin with the same indentation, then strip it.
+ min_indent = self._min_indent(string)
+ if min_indent > 0:
+ string = '\n'.join([l[min_indent:] for l in string.split('\n')])
+
+ output = []
+ charno, lineno = 0, 0
+ # Find all doctest examples in the string:
+ for m in self._EXAMPLE_RE.finditer(string):
+ # Add the pre-example text to `output`.
+ output.append(string[charno:m.start()])
+ # Update lineno (lines before this example)
+ lineno += string.count('\n', charno, m.start())
+ # Extract info from the regexp match.
+ (source, options, want, exc_msg) = \
+ self._parse_example(m, name, lineno)
+ # Create an Example, and add it to the list.
+ if not self._IS_BLANK_OR_COMMENT(source):
+ output.append( Example(source, want, exc_msg,
+ lineno=lineno,
+ indent=min_indent+len(m.group('indent')),
+ options=options) )
+ # Update lineno (lines inside this example)
+ lineno += string.count('\n', m.start(), m.end())
+ # Update charno.
+ charno = m.end()
+ # Add any remaining post-example text to `output`.
+ output.append(string[charno:])
+ return output
+
+ def get_doctest(self, string, globs, name, filename, lineno):
+ """
+ Extract all doctest examples from the given string, and
+ collect them into a `DocTest` object.
+
+ `globs`, `name`, `filename`, and `lineno` are attributes for
+ the new `DocTest` object. See the documentation for `DocTest`
+ for more information.
+ """
+ return DocTest(self.get_examples(string, name), globs,
+ name, filename, lineno, string)
+
+ def get_examples(self, string, name='<string>'):
+ """
+ Extract all doctest examples from the given string, and return
+ them as a list of `Example` objects. Line numbers are
+ 0-based, because it's most common in doctests that nothing
+ interesting appears on the same line as opening triple-quote,
+ and so the first interesting line is called \"line 1\" then.
+
+ The optional argument `name` is a name identifying this
+ string, and is only used for error messages.
+ """
+ return [x for x in self.parse(string, name)
+ if isinstance(x, Example)]
+
+ def _parse_example(self, m, name, lineno):
+ """
+ Given a regular expression match from `_EXAMPLE_RE` (`m`),
+ return a pair `(source, want)`, where `source` is the matched
+ example's source code (with prompts and indentation stripped);
+ and `want` is the example's expected output (with indentation
+ stripped).
+
+ `name` is the string's name, and `lineno` is the line number
+ where the example starts; both are used for error messages.
+ """
+ # Get the example's indentation level.
+ indent = len(m.group('indent'))
+
+ # Divide source into lines; check that they're properly
+ # indented; and then strip their indentation & prompts.
+ source_lines = m.group('source').split('\n')
+ self._check_prompt_blank(source_lines, indent, name, lineno)
+ self._check_prefix(source_lines[1:], ' '*indent + '.', name, lineno)
+ source = '\n'.join([sl[indent+4:] for sl in source_lines])
+
+ # Divide want into lines; check that it's properly indented; and
+ # then strip the indentation. Spaces before the last newline should
+ # be preserved, so plain rstrip() isn't good enough.
+ want = m.group('want')
+ want_lines = want.split('\n')
+ if len(want_lines) > 1 and re.match(r' *$', want_lines[-1]):
+ del want_lines[-1] # forget final newline & spaces after it
+ self._check_prefix(want_lines, ' '*indent, name,
+ lineno + len(source_lines))
+ want = '\n'.join([wl[indent:] for wl in want_lines])
+
+ # If `want` contains a traceback message, then extract it.
+ m = self._EXCEPTION_RE.match(want)
+ if m:
+ exc_msg = m.group('msg')
+ else:
+ exc_msg = None
+
+ # Extract options from the source.
+ options = self._find_options(source, name, lineno)
+
+ return source, options, want, exc_msg
+
+ # This regular expression looks for option directives in the
+ # source code of an example. Option directives are comments
+ # starting with "doctest:". Warning: this may give false
+ # positives for string-literals that contain the string
+ # "#doctest:". Eliminating these false positives would require
+ # actually parsing the string; but we limit them by ignoring any
+ # line containing "#doctest:" that is *followed* by a quote mark.
+ _OPTION_DIRECTIVE_RE = re.compile(r'#\s*doctest:\s*([^\n\'"]*)$',
+ re.MULTILINE)
+
+ def _find_options(self, source, name, lineno):
+ """
+ Return a dictionary containing option overrides extracted from
+ option directives in the given source string.
+
+ `name` is the string's name, and `lineno` is the line number
+ where the example starts; both are used for error messages.
+ """
+ options = {}
+ # (note: with the current regexp, this will match at most once:)
+ for m in self._OPTION_DIRECTIVE_RE.finditer(source):
+ option_strings = m.group(1).replace(',', ' ').split()
+ for option in option_strings:
+ if (option[0] not in '+-' or
+ option[1:] not in OPTIONFLAGS_BY_NAME):
+ raise ValueError('line %r of the doctest for %s '
+ 'has an invalid option: %r' %
+ (lineno+1, name, option))
+ flag = OPTIONFLAGS_BY_NAME[option[1:]]
+ options[flag] = (option[0] == '+')
+ if options and self._IS_BLANK_OR_COMMENT(source):
+ raise ValueError('line %r of the doctest for %s has an option '
+ 'directive on a line with no example: %r' %
+ (lineno, name, source))
+ return options
+
+ # This regular expression finds the indentation of every non-blank
+ # line in a string.
+ _INDENT_RE = re.compile('^([ ]*)(?=\S)', re.MULTILINE)
+
+ def _min_indent(self, s):
+ "Return the minimum indentation of any non-blank line in `s`"
+ indents = [len(indent) for indent in self._INDENT_RE.findall(s)]
+ if len(indents) > 0:
+ return min(indents)
+ else:
+ return 0
+
+ def _check_prompt_blank(self, lines, indent, name, lineno):
+ """
+ Given the lines of a source string (including prompts and
+ leading indentation), check to make sure that every prompt is
+ followed by a space character. If any line is not followed by
+ a space character, then raise ValueError.
+ """
+ for i, line in enumerate(lines):
+ if len(line) >= indent+4 and line[indent+3] != ' ':
+ raise ValueError('line %r of the docstring for %s '
+ 'lacks blank after %s: %r' %
+ (lineno+i+1, name,
+ line[indent:indent+3], line))
+
+ def _check_prefix(self, lines, prefix, name, lineno):
+ """
+ Check that every line in the given list starts with the given
+ prefix; if any line does not, then raise a ValueError.
+ """
+ for i, line in enumerate(lines):
+ if line and not line.startswith(prefix):
+ raise ValueError('line %r of the docstring for %s has '
+ 'inconsistent leading whitespace: %r' %
+ (lineno+i+1, name, line))
+
+
+######################################################################
+## 4. DocTest Finder
+######################################################################
+
+class DocTestFinder:
+ """
+ A class used to extract the DocTests that are relevant to a given
+ object, from its docstring and the docstrings of its contained
+ objects. Doctests can currently be extracted from the following
+ object types: modules, functions, classes, methods, staticmethods,
+ classmethods, and properties.
+ """
+
+ def __init__(self, verbose=False, parser=DocTestParser(),
+ recurse=True, _namefilter=None, exclude_empty=True):
+ """
+ Create a new doctest finder.
+
+ The optional argument `parser` specifies a class or
+ function that should be used to create new DocTest objects (or
+ objects that implement the same interface as DocTest). The
+ signature for this factory function should match the signature
+ of the DocTest constructor.
+
+ If the optional argument `recurse` is false, then `find` will
+ only examine the given object, and not any contained objects.
+
+ If the optional argument `exclude_empty` is false, then `find`
+ will include tests for objects with empty docstrings.
+ """
+ self._parser = parser
+ self._verbose = verbose
+ self._recurse = recurse
+ self._exclude_empty = exclude_empty
+ # _namefilter is undocumented, and exists only for temporary backward-
+ # compatibility support of testmod's deprecated isprivate mess.
+ self._namefilter = _namefilter
+
+ def find(self, obj, name=None, module=None, globs=None,
+ extraglobs=None):
+ """
+ Return a list of the DocTests that are defined by the given
+ object's docstring, or by any of its contained objects'
+ docstrings.
+
+ The optional parameter `module` is the module that contains
+ the given object. If the module is not specified or is None, then
+ the test finder will attempt to automatically determine the
+ correct module. The object's module is used:
+
+ - As a default namespace, if `globs` is not specified.
+ - To prevent the DocTestFinder from extracting DocTests
+ from objects that are imported from other modules.
+ - To find the name of the file containing the object.
+ - To help find the line number of the object within its
+ file.
+
+ Contained objects whose module does not match `module` are ignored.
+
+ If `module` is False, no attempt to find the module will be made.
+ This is obscure, of use mostly in tests: if `module` is False, or
+ is None but cannot be found automatically, then all objects are
+ considered to belong to the (non-existent) module, so all contained
+ objects will (recursively) be searched for doctests.
+
+ The globals for each DocTest is formed by combining `globs`
+ and `extraglobs` (bindings in `extraglobs` override bindings
+ in `globs`). A new copy of the globals dictionary is created
+ for each DocTest. If `globs` is not specified, then it
+ defaults to the module's `__dict__`, if specified, or {}
+ otherwise. If `extraglobs` is not specified, then it defaults
+ to {}.
+
+ """
+ # If name was not specified, then extract it from the object.
+ if name is None:
+ name = getattr(obj, '__name__', None)
+ if name is None:
+ raise ValueError("DocTestFinder.find: name must be given "
+ "when obj.__name__ doesn't exist: %r" %
+ (type(obj),))
+
+ # Find the module that contains the given object (if obj is
+ # a module, then module=obj.). Note: this may fail, in which
+ # case module will be None.
+ if module is False:
+ module = None
+ elif module is None:
+ module = inspect.getmodule(obj)
+
+ # Read the module's source code. This is used by
+ # DocTestFinder._find_lineno to find the line number for a
+ # given object's docstring.
+ try:
+ file = inspect.getsourcefile(obj) or inspect.getfile(obj)
+ source_lines = linecache.getlines(file)
+ if not source_lines:
+ source_lines = None
+ except TypeError:
+ source_lines = None
+
+ # Initialize globals, and merge in extraglobs.
+ if globs is None:
+ if module is None:
+ globs = {}
+ else:
+ globs = module.__dict__.copy()
+ else:
+ globs = globs.copy()
+ if extraglobs is not None:
+ globs.update(extraglobs)
+
+ # Recursively expore `obj`, extracting DocTests.
+ tests = []
+ self._find(tests, obj, name, module, source_lines, globs, {})
+ # Sort the tests by alpha order of names, for consistency in
+ # verbose-mode output. This was a feature of doctest in Pythons
+ # <= 2.3 that got lost by accident in 2.4. It was repaired in
+ # 2.4.4 and 2.5.
+ tests.sort()
+ return tests
+
+ def _filter(self, obj, prefix, base):
+ """
+ Return true if the given object should not be examined.
+ """
+ return (self._namefilter is not None and
+ self._namefilter(prefix, base))
+
+ def _from_module(self, module, object):
+ """
+ Return true if the given object is defined in the given
+ module.
+ """
+ if module is None:
+ return True
+ elif inspect.isfunction(object):
+ return module.__dict__ is object.__globals__
+ elif inspect.isclass(object):
+ # Some jython classes don't set __module__
+ return module.__name__ == getattr(object, '__module__', None)
+ elif inspect.getmodule(object) is not None:
+ return module is inspect.getmodule(object)
+ elif hasattr(object, '__module__'):
+ return module.__name__ == object.__module__
+ elif isinstance(object, property):
+ return True # [XX] no way not be sure.
+ else:
+ raise ValueError("object must be a class or function")
+
+ def _find(self, tests, obj, name, module, source_lines, globs, seen):
+ """
+ Find tests for the given object and any contained objects, and
+ add them to `tests`.
+ """
+ if self._verbose:
+ print('Finding tests in %s' % name)
+
+ # If we've already processed this object, then ignore it.
+ if id(obj) in seen:
+ return
+ seen[id(obj)] = 1
+
+ # Find a test for this object, and add it to the list of tests.
+ test = self._get_test(obj, name, module, globs, source_lines)
+ if test is not None:
+ tests.append(test)
+
+ # Look for tests in a module's contained objects.
+ if inspect.ismodule(obj) and self._recurse:
+ for valname, val in list(obj.__dict__.items()):
+ # Check if this contained object should be ignored.
+ if self._filter(val, name, valname):
+ continue
+ valname = '%s.%s' % (name, valname)
+ # Recurse to functions & classes.
+ if ((inspect.isfunction(val) or inspect.isclass(val)) and
+ self._from_module(module, val)):
+ self._find(tests, val, valname, module, source_lines,
+ globs, seen)
+
+ # Look for tests in a module's __test__ dictionary.
+ if inspect.ismodule(obj) and self._recurse:
+ for valname, val in list(getattr(obj, '__test__', {}).items()):
+ if not isinstance(valname, str):
+ raise ValueError("DocTestFinder.find: __test__ keys "
+ "must be strings: %r" %
+ (type(valname),))
+ if not (inspect.isfunction(val) or inspect.isclass(val) or
+ inspect.ismethod(val) or inspect.ismodule(val) or
+ isinstance(val, str)):
+ raise ValueError("DocTestFinder.find: __test__ values "
+ "must be strings, functions, methods, "
+ "classes, or modules: %r" %
+ (type(val),))
+ valname = '%s.__test__.%s' % (name, valname)
+ self._find(tests, val, valname, module, source_lines,
+ globs, seen)
+
+ # Look for tests in a class's contained objects.
+ if inspect.isclass(obj) and self._recurse:
+ for valname, val in list(obj.__dict__.items()):
+ # Check if this contained object should be ignored.
+ if self._filter(val, name, valname):
+ continue
+ # Special handling for staticmethod/classmethod.
+ if isinstance(val, staticmethod):
+ val = getattr(obj, valname)
+ if isinstance(val, classmethod):
+ val = getattr(obj, valname).__func__
+
+ # Recurse to methods, properties, and nested classes.
+ if ((inspect.isfunction(val) or inspect.isclass(val) or
+ isinstance(val, property)) and
+ self._from_module(module, val)):
+ valname = '%s.%s' % (name, valname)
+ self._find(tests, val, valname, module, source_lines,
+ globs, seen)
+
+ def _get_test(self, obj, name, module, globs, source_lines):
+ """
+ Return a DocTest for the given object, if it defines a docstring;
+ otherwise, return None.
+ """
+ # Extract the object's docstring. If it doesn't have one,
+ # then return None (no test for this object).
+ if isinstance(obj, str):
+ docstring = obj
+ else:
+ try:
+ if obj.__doc__ is None:
+ docstring = ''
+ else:
+ docstring = obj.__doc__
+ if not isinstance(docstring, str):
+ docstring = str(docstring)
+ except (TypeError, AttributeError):
+ docstring = ''
+
+ # Find the docstring's location in the file.
+ lineno = self._find_lineno(obj, source_lines)
+
+ # Don't bother if the docstring is empty.
+ if self._exclude_empty and not docstring:
+ return None
+
+ # Return a DocTest for this object.
+ if module is None:
+ filename = None
+ else:
+ filename = getattr(module, '__file__', module.__name__)
+ if filename[-4:] in (".pyc", ".pyo"):
+ filename = filename[:-1]
+ elif sys.platform.startswith('java') and \
+ filename.endswith('$py.class'):
+ filename = '%s.py' % filename[:-9]
+ return self._parser.get_doctest(docstring, globs, name,
+ filename, lineno)
+
+ def _find_lineno(self, obj, source_lines):
+ """
+ Return a line number of the given object's docstring. Note:
+ this method assumes that the object has a docstring.
+ """
+ lineno = None
+
+ # Find the line number for modules.
+ if inspect.ismodule(obj):
+ lineno = 0
+
+ # Find the line number for classes.
+ # Note: this could be fooled if a class is defined multiple
+ # times in a single file.
+ if inspect.isclass(obj):
+ if source_lines is None:
+ return None
+ pat = re.compile(r'^\s*class\s*%s\b' %
+ getattr(obj, '__name__', '-'))
+ for i, line in enumerate(source_lines):
+ if pat.match(line):
+ lineno = i
+ break
+
+ # Find the line number for functions & methods.
+ if inspect.ismethod(obj): obj = obj.__func__
+ if inspect.isfunction(obj): obj = obj.__code__
+ if inspect.istraceback(obj): obj = obj.tb_frame
+ if inspect.isframe(obj): obj = obj.f_code
+ if inspect.iscode(obj):
+ lineno = getattr(obj, 'co_firstlineno', None)-1
+
+ # Find the line number where the docstring starts. Assume
+ # that it's the first line that begins with a quote mark.
+ # Note: this could be fooled by a multiline function
+ # signature, where a continuation line begins with a quote
+ # mark.
+ if lineno is not None:
+ if source_lines is None:
+ return lineno+1
+ pat = re.compile('(^|.*:)\s*\w*("|\')')
+ for lineno in range(lineno, len(source_lines)):
+ if pat.match(source_lines[lineno]):
+ return lineno
+
+ # We couldn't find the line number.
+ return None
+
+######################################################################
+## 5. DocTest Runner
+######################################################################
+
+class DocTestRunner:
+ # This divider string is used to separate failure messages, and to
+ # separate sections of the summary.
+ DIVIDER = "*" * 70
+
+ def __init__(self, checker=None, verbose=None, optionflags=0):
+ """
+ Create a new test runner.
+
+ Optional keyword arg `checker` is the `OutputChecker` that
+ should be used to compare the expected outputs and actual
+ outputs of doctest examples.
+
+ Optional keyword arg 'verbose' prints lots of stuff if true,
+ only failures if false; by default, it's true iff '-v' is in
+ sys.argv.
+
+ Optional argument `optionflags` can be used to control how the
+ test runner compares expected output to actual output, and how
+ it displays failures. See the documentation for `testmod` for
+ more information.
+ """
+ self._checker = checker or OutputChecker()
+ if verbose is None:
+ verbose = '-v' in sys.argv
+ self._verbose = verbose
+ self.optionflags = optionflags
+ self.original_optionflags = optionflags
+
+ # Keep track of the examples we've run.
+ self.tries = 0
+ self.failures = 0
+ self._name2ft = {}
+
+ # Create a fake output target for capturing doctest output.
+ self._fakeout = _SpoofOut()
+
+ #/////////////////////////////////////////////////////////////////
+ # Reporting methods
+ #/////////////////////////////////////////////////////////////////
+
+ def report_start(self, out, test, example):
+ """
+ Report that the test runner is about to process the given
+ example. (Only displays a message if verbose=True)
+ """
+ if self._verbose:
+ if example.want:
+ out('Trying:\n' + _indent(example.source) +
+ 'Expecting:\n' + _indent(example.want))
+ else:
+ out('Trying:\n' + _indent(example.source) +
+ 'Expecting nothing\n')
+
+ def report_success(self, out, test, example, got):
+ """
+ Report that the given example ran successfully. (Only
+ displays a message if verbose=True)
+ """
+ if self._verbose:
+ out("ok\n")
+
+ def report_failure(self, out, test, example, got):
+ """
+ Report that the given example failed.
+ """
+ out(self._failure_header(test, example) +
+ self._checker.output_difference(example, got, self.optionflags))
+
+ def report_unexpected_exception(self, out, test, example, exc_info):
+ """
+ Report that the given example raised an unexpected exception.
+ """
+ out(self._failure_header(test, example) +
+ 'Exception raised:\n' + _indent(_exception_traceback(exc_info)))
+
+ def _failure_header(self, test, example):
+ out = [self.DIVIDER]
+ if test.filename:
+ if test.lineno is not None and example.lineno is not None:
+ lineno = test.lineno + example.lineno + 1
+ else:
+ lineno = '?'
+ out.append('File "%s", line %s, in %s' %
+ (test.filename, lineno, test.name))
+ else:
+ out.append('Line %s, in %s' % (example.lineno+1, test.name))
+ out.append('Failed example:')
+ source = example.source
+ out.append(_indent(source))
+ return '\n'.join(out)
+
+ #/////////////////////////////////////////////////////////////////
+ # DocTest Running
+ #/////////////////////////////////////////////////////////////////
+
+ def __run(self, test, compileflags, out):
+ """
+ Run the examples in `test`. Write the outcome of each example
+ with one of the `DocTestRunner.report_*` methods, using the
+ writer function `out`. `compileflags` is the set of compiler
+ flags that should be used to execute examples. Return a tuple
+ `(f, t)`, where `t` is the number of examples tried, and `f`
+ is the number of examples that failed. The examples are run
+ in the namespace `test.globs`.
+ """
+ # Keep track of the number of failures and tries.
+ failures = tries = 0
+
+ # Save the option flags (since option directives can be used
+ # to modify them).
+ original_optionflags = self.optionflags
+
+ SUCCESS, FAILURE, BOOM = list(range(3)) # `outcome` state
+
+ check = self._checker.check_output
+
+ # Process each example.
+ for examplenum, example in enumerate(test.examples):
+
+ # If REPORT_ONLY_FIRST_FAILURE is set, then supress
+ # reporting after the first failure.
+ quiet = (self.optionflags & REPORT_ONLY_FIRST_FAILURE and
+ failures > 0)
+
+ # Merge in the example's options.
+ self.optionflags = original_optionflags
+ if example.options:
+ for (optionflag, val) in list(example.options.items()):
+ if val:
+ self.optionflags |= optionflag
+ else:
+ self.optionflags &= ~optionflag
+
+ # Record that we started this example.
+ tries += 1
+ if not quiet:
+ self.report_start(out, test, example)
+
+ # Use a special filename for compile(), so we can retrieve
+ # the source code during interactive debugging (see
+ # __patched_linecache_getlines).
+ filename = '<doctest %s[%d]>' % (test.name, examplenum)
+
+ # Run the example in the given context (globs), and record
+ # any exception that gets raised. (But don't intercept
+ # keyboard interrupts.)
+ try:
+ # Don't blink! This is where the user's code gets run.
+ exec(compile(example.source, filename, "single",
+ compileflags, 1), test.globs)
+ self.debugger.set_continue() # ==== Example Finished ====
+ exception = None
+ except KeyboardInterrupt:
+ raise
+ except:
+ exception = sys.exc_info()
+ self.debugger.set_continue() # ==== Example Finished ====
+
+ got = self._fakeout.getvalue() # the actual output
+ self._fakeout.truncate(0)
+ outcome = FAILURE # guilty until proved innocent or insane
+
+ # If the example executed without raising any exceptions,
+ # verify its output.
+ if exception is None:
+ if check(example.want, got, self.optionflags):
+ outcome = SUCCESS
+
+ # The example raised an exception: check if it was expected.
+ else:
+ exc_info = sys.exc_info()
+ exc_msg = traceback.format_exception_only(*exc_info[:2])[-1]
+ if not quiet:
+ got += _exception_traceback(exc_info)
+
+ # If `example.exc_msg` is None, then we weren't expecting
+ # an exception.
+ if example.exc_msg is None:
+ outcome = BOOM
+
+ # We expected an exception: see whether it matches.
+ elif check(example.exc_msg, exc_msg, self.optionflags):
+ outcome = SUCCESS
+
+ # Another chance if they didn't care about the detail.
+ elif self.optionflags & IGNORE_EXCEPTION_DETAIL:
+ m1 = re.match(r'[^:]*:', example.exc_msg)
+ m2 = re.match(r'[^:]*:', exc_msg)
+ if m1 and m2 and check(m1.group(0), m2.group(0),
+ self.optionflags):
+ outcome = SUCCESS
+
+ # Report the outcome.
+ if outcome is SUCCESS:
+ if not quiet:
+ self.report_success(out, test, example, got)
+ elif outcome is FAILURE:
+ if not quiet:
+ self.report_failure(out, test, example, got)
+ failures += 1
+ elif outcome is BOOM:
+ if not quiet:
+ self.report_unexpected_exception(out, test, example,
+ exc_info)
+ failures += 1
+ else:
+ assert False, ("unknown outcome", outcome)
+
+ # Restore the option flags (in case they were modified)
+ self.optionflags = original_optionflags
+
+ # Record and return the number of failures and tries.
+ self.__record_outcome(test, failures, tries)
+ return failures, tries
+
+ def __record_outcome(self, test, f, t):
+ """
+ Record the fact that the given DocTest (`test`) generated `f`
+ failures out of `t` tried examples.
+ """
+ f2, t2 = self._name2ft.get(test.name, (0,0))
+ self._name2ft[test.name] = (f+f2, t+t2)
+ self.failures += f
+ self.tries += t
+
+ __LINECACHE_FILENAME_RE = re.compile(r'<doctest '
+ r'(?P<name>[\w\.]+)'
+ r'\[(?P<examplenum>\d+)\]>$')
+ def __patched_linecache_getlines(self, filename):
+ m = self.__LINECACHE_FILENAME_RE.match(filename)
+ if m and m.group('name') == self.test.name:
+ example = self.test.examples[int(m.group('examplenum'))]
+ return example.source.splitlines(True)
+ else:
+ return self.save_linecache_getlines(filename)
+
+ def run(self, test, compileflags=None, out=None, clear_globs=True):
+ """
+ Run the examples in `test`, and display the results using the
+ writer function `out`.
+
+ The examples are run in the namespace `test.globs`. If
+ `clear_globs` is true (the default), then this namespace will
+ be cleared after the test runs, to help with garbage
+ collection. If you would like to examine the namespace after
+ the test completes, then use `clear_globs=False`.
+
+ `compileflags` gives the set of flags that should be used by
+ the Python compiler when running the examples. If not
+ specified, then it will default to the set of future-import
+ flags that apply to `globs`.
+
+ The output of each example is checked using
+ `DocTestRunner.check_output`, and the results are formatted by
+ the `DocTestRunner.report_*` methods.
+ """
+ self.test = test
+
+ if compileflags is None:
+ compileflags = _extract_future_flags(test.globs)
+
+ save_stdout = sys.stdout
+ if out is None:
+ out = save_stdout.write
+ sys.stdout = self._fakeout
+
+ # Patch pdb.set_trace to restore sys.stdout during interactive
+ # debugging (so it's not still redirected to self._fakeout).
+ # Note that the interactive output will go to *our*
+ # save_stdout, even if that's not the real sys.stdout; this
+ # allows us to write test cases for the set_trace behavior.
+ save_set_trace = pdb.set_trace
+ self.debugger = _OutputRedirectingPdb(save_stdout)
+ self.debugger.reset()
+ pdb.set_trace = self.debugger.set_trace
+
+ # Patch linecache.getlines, so we can see the example's source
+ # when we're inside the debugger.
+ self.save_linecache_getlines = linecache.getlines
+ linecache.getlines = self.__patched_linecache_getlines
+
+ try:
+ return self.__run(test, compileflags, out)
+ finally:
+ sys.stdout = save_stdout
+ pdb.set_trace = save_set_trace
+ linecache.getlines = self.save_linecache_getlines
+ if clear_globs:
+ test.globs.clear()
+
+ #/////////////////////////////////////////////////////////////////
+ # Summarization
+ #/////////////////////////////////////////////////////////////////
+ def summarize(self, verbose=None):
+ """
+ Print a summary of all the test cases that have been run by
+ this DocTestRunner, and return a tuple `(f, t)`, where `f` is
+ the total number of failed examples, and `t` is the total
+ number of tried examples.
+
+ The optional `verbose` argument controls how detailed the
+ summary is. If the verbosity is not specified, then the
+ DocTestRunner's verbosity is used.
+ """
+ if verbose is None:
+ verbose = self._verbose
+ notests = []
+ passed = []
+ failed = []
+ totalt = totalf = 0
+ for x in list(self._name2ft.items()):
+ name, (f, t) = x
+ assert f <= t
+ totalt += t
+ totalf += f
+ if t == 0:
+ notests.append(name)
+ elif f == 0:
+ passed.append( (name, t) )
+ else:
+ failed.append(x)
+ if verbose:
+ if notests:
+ print(len(notests), "items had no tests:")
+ notests.sort()
+ for thing in notests:
+ print(" ", thing)
+ if passed:
+ print(len(passed), "items passed all tests:")
+ passed.sort()
+ for thing, count in passed:
+ print(" %3d tests in %s" % (count, thing))
+ if failed:
+ print(self.DIVIDER)
+ print(len(failed), "items had failures:")
+ failed.sort()
+ for thing, (f, t) in failed:
+ print(" %3d of %3d in %s" % (f, t, thing))
+ if verbose:
+ print(totalt, "tests in", len(self._name2ft), "items.")
+ print(totalt - totalf, "passed and", totalf, "failed.")
+ if totalf:
+ print("***Test Failed***", totalf, "failures.")
+ elif verbose:
+ print("Test passed.")
+ return totalf, totalt
+
+ #/////////////////////////////////////////////////////////////////
+ # Backward compatibility cruft to maintain doctest.master.
+ #/////////////////////////////////////////////////////////////////
+ def merge(self, other):
+ d = self._name2ft
+ for name, (f, t) in list(other._name2ft.items()):
+ if name in d:
+ print("*** DocTestRunner.merge: '" + name + "' in both" \
+ " testers; summing outcomes.")
+ f2, t2 = d[name]
+ f = f + f2
+ t = t + t2
+ d[name] = f, t
+
+class OutputChecker:
+ """
+ A class used to check the whether the actual output from a doctest
+ example matches the expected output. `OutputChecker` defines two
+ methods: `check_output`, which compares a given pair of outputs,
+ and returns true if they match; and `output_difference`, which
+ returns a string describing the differences between two outputs.
+ """
+ def check_output(self, want, got, optionflags):
+ """
+ Return True iff the actual output from an example (`got`)
+ matches the expected output (`want`). These strings are
+ always considered to match if they are identical; but
+ depending on what option flags the test runner is using,
+ several non-exact match types are also possible. See the
+ documentation for `TestRunner` for more information about
+ option flags.
+ """
+ # Handle the common case first, for efficiency:
+ # if they're string-identical, always return true.
+ if got == want:
+ return True
+
+ # The values True and False replaced 1 and 0 as the return
+ # value for boolean comparisons in Python 2.3.
+ if not (optionflags & DONT_ACCEPT_TRUE_FOR_1):
+ if (got,want) == ("True\n", "1\n"):
+ return True
+ if (got,want) == ("False\n", "0\n"):
+ return True
+
+ # <BLANKLINE> can be used as a special sequence to signify a
+ # blank line, unless the DONT_ACCEPT_BLANKLINE flag is used.
+ if not (optionflags & DONT_ACCEPT_BLANKLINE):
+ # Replace <BLANKLINE> in want with a blank line.
+ want = re.sub('(?m)^%s\s*?$' % re.escape(BLANKLINE_MARKER),
+ '', want)
+ # If a line in got contains only spaces, then remove the
+ # spaces.
+ got = re.sub('(?m)^\s*?$', '', got)
+ if got == want:
+ return True
+
+ # This flag causes doctest to ignore any differences in the
+ # contents of whitespace strings. Note that this can be used
+ # in conjunction with the ELLIPSIS flag.
+ if optionflags & NORMALIZE_WHITESPACE:
+ got = ' '.join(got.split())
+ want = ' '.join(want.split())
+ if got == want:
+ return True
+
+ # The ELLIPSIS flag says to let the sequence "..." in `want`
+ # match any substring in `got`.
+ if optionflags & ELLIPSIS:
+ if _ellipsis_match(want, got):
+ return True
+
+ # We didn't find any match; return false.
+ return False
+
+ # Should we do a fancy diff?
+ def _do_a_fancy_diff(self, want, got, optionflags):
+ # Not unless they asked for a fancy diff.
+ if not optionflags & (REPORT_UDIFF |
+ REPORT_CDIFF |
+ REPORT_NDIFF):
+ return False
+
+ # If expected output uses ellipsis, a meaningful fancy diff is
+ # too hard ... or maybe not. In two real-life failures Tim saw,
+ # a diff was a major help anyway, so this is commented out.
+ # [todo] _ellipsis_match() knows which pieces do and don't match,
+ # and could be the basis for a kick-ass diff in this case.
+ ##if optionflags & ELLIPSIS and ELLIPSIS_MARKER in want:
+ ## return False
+
+ # ndiff does intraline difference marking, so can be useful even
+ # for 1-line differences.
+ if optionflags & REPORT_NDIFF:
+ return True
+
+ # The other diff types need at least a few lines to be helpful.
+ return want.count('\n') > 2 and got.count('\n') > 2
+
+ def output_difference(self, example, got, optionflags):
+ """
+ Return a string describing the differences between the
+ expected output for a given example (`example`) and the actual
+ output (`got`). `optionflags` is the set of option flags used
+ to compare `want` and `got`.
+ """
+ want = example.want
+ # If <BLANKLINE>s are being used, then replace blank lines
+ # with <BLANKLINE> in the actual output string.
+ if not (optionflags & DONT_ACCEPT_BLANKLINE):
+ got = re.sub('(?m)^[ ]*(?=\n)', BLANKLINE_MARKER, got)
+
+ # Check if we should use diff.
+ if self._do_a_fancy_diff(want, got, optionflags):
+ # Split want & got into lines.
+ want_lines = want.splitlines(True) # True == keep line ends
+ got_lines = got.splitlines(True)
+ # Use difflib to find their differences.
+ if optionflags & REPORT_UDIFF:
+ diff = difflib.unified_diff(want_lines, got_lines, n=2)
+ diff = list(diff)[2:] # strip the diff header
+ kind = 'unified diff with -expected +actual'
+ elif optionflags & REPORT_CDIFF:
+ diff = difflib.context_diff(want_lines, got_lines, n=2)
+ diff = list(diff)[2:] # strip the diff header
+ kind = 'context diff with expected followed by actual'
+ elif optionflags & REPORT_NDIFF:
+ engine = difflib.Differ(charjunk=difflib.IS_CHARACTER_JUNK)
+ diff = list(engine.compare(want_lines, got_lines))
+ kind = 'ndiff with -expected +actual'
+ else:
+ assert 0, 'Bad diff option'
+ # Remove trailing whitespace on diff output.
+ diff = [line.rstrip() + '\n' for line in diff]
+ return 'Differences (%s):\n' % kind + _indent(''.join(diff))
+
+ # If we're not using diff, then simply list the expected
+ # output followed by the actual output.
+ if want and got:
+ return 'Expected:\n%sGot:\n%s' % (_indent(want), _indent(got))
+ elif want:
+ return 'Expected:\n%sGot nothing\n' % _indent(want)
+ elif got:
+ return 'Expected nothing\nGot:\n%s' % _indent(got)
+ else:
+ return 'Expected nothing\nGot nothing\n'
+
+class DocTestFailure(Exception):
+ """A DocTest example has failed in debugging mode.
+
+ The exception instance has variables:
+
+ - test: the DocTest object being run
+
+ - excample: the Example object that failed
+
+ - got: the actual output
+ """
+ def __init__(self, test, example, got):
+ self.test = test
+ self.example = example
+ self.got = got
+
+ def __str__(self):
+ return str(self.test)
+
+class UnexpectedException(Exception):
+ """A DocTest example has encountered an unexpected exception
+
+ The exception instance has variables:
+
+ - test: the DocTest object being run
+
+ - excample: the Example object that failed
+
+ - exc_info: the exception info
+ """
+ def __init__(self, test, example, exc_info):
+ self.test = test
+ self.example = example
+ self.exc_info = exc_info
+
+ def __str__(self):
+ return str(self.test)
+
+class DebugRunner(DocTestRunner):
+
+ def run(self, test, compileflags=None, out=None, clear_globs=True):
+ r = DocTestRunner.run(self, test, compileflags, out, False)
+ if clear_globs:
+ test.globs.clear()
+ return r
+
+ def report_unexpected_exception(self, out, test, example, exc_info):
+ raise UnexpectedException(test, example, exc_info)
+
+ def report_failure(self, out, test, example, got):
+ raise DocTestFailure(test, example, got)
+
+######################################################################
+## 6. Test Functions
+######################################################################
+# These should be backwards compatible.
+
+# For backward compatibility, a global instance of a DocTestRunner
+# class, updated by testmod.
+master = None
+
+def testmod(m=None, name=None, globs=None, verbose=None, isprivate=None,
+ report=True, optionflags=0, extraglobs=None,
+ raise_on_error=False, exclude_empty=False):
+ """m=None, name=None, globs=None, verbose=None, isprivate=None,
+ report=True, optionflags=0, extraglobs=None, raise_on_error=False,
+ exclude_empty=False
+
+ Test examples in docstrings in functions and classes reachable
+ from module m (or the current module if m is not supplied), starting
+ with m.__doc__. Unless isprivate is specified, private names
+ are not skipped.
+
+ Also test examples reachable from dict m.__test__ if it exists and is
+ not None. m.__test__ maps names to functions, classes and strings;
+ function and class docstrings are tested even if the name is private;
+ strings are tested directly, as if they were docstrings.
+
+ Return (#failures, #tests).
+
+ See doctest.__doc__ for an overview.
+
+ Optional keyword arg "name" gives the name of the module; by default
+ use m.__name__.
+
+ Optional keyword arg "globs" gives a dict to be used as the globals
+ when executing examples; by default, use m.__dict__. A copy of this
+ dict is actually used for each docstring, so that each docstring's
+ examples start with a clean slate.
+
+ Optional keyword arg "extraglobs" gives a dictionary that should be
+ merged into the globals that are used to execute examples. By
+ default, no extra globals are used. This is new in 2.4.
+
+ Optional keyword arg "verbose" prints lots of stuff if true, prints
+ only failures if false; by default, it's true iff "-v" is in sys.argv.
+
+ Optional keyword arg "report" prints a summary at the end when true,
+ else prints nothing at the end. In verbose mode, the summary is
+ detailed, else very brief (in fact, empty if all tests passed).
+
+ Optional keyword arg "optionflags" or's together module constants,
+ and defaults to 0. This is new in 2.3. Possible values (see the
+ docs for details):
+
+ DONT_ACCEPT_TRUE_FOR_1
+ DONT_ACCEPT_BLANKLINE
+ NORMALIZE_WHITESPACE
+ ELLIPSIS
+ IGNORE_EXCEPTION_DETAIL
+ REPORT_UDIFF
+ REPORT_CDIFF
+ REPORT_NDIFF
+ REPORT_ONLY_FIRST_FAILURE
+
+ Optional keyword arg "raise_on_error" raises an exception on the
+ first unexpected exception or failure. This allows failures to be
+ post-mortem debugged.
+
+ Deprecated in Python 2.4:
+ Optional keyword arg "isprivate" specifies a function used to
+ determine whether a name is private. The default function is
+ treat all functions as public. Optionally, "isprivate" can be
+ set to doctest.is_private to skip over functions marked as private
+ using the underscore naming convention; see its docs for details.
+
+ Advanced tomfoolery: testmod runs methods of a local instance of
+ class doctest.Tester, then merges the results into (or creates)
+ global Tester instance doctest.master. Methods of doctest.master
+ can be called directly too, if you want to do something unusual.
+ Passing report=0 to testmod is especially useful then, to delay
+ displaying a summary. Invoke doctest.master.summarize(verbose)
+ when you're done fiddling.
+ """
+ global master
+
+ if isprivate is not None:
+ warnings.warn("the isprivate argument is deprecated; "
+ "examine DocTestFinder.find() lists instead",
+ DeprecationWarning)
+
+ # If no module was given, then use __main__.
+ if m is None:
+ # DWA - m will still be None if this wasn't invoked from the command
+ # line, in which case the following TypeError is about as good an error
+ # as we should expect
+ m = sys.modules.get('__main__')
+
+ # Check that we were actually given a module.
+ if not inspect.ismodule(m):
+ raise TypeError("testmod: module required; %r" % (m,))
+
+ # If no name was given, then use the module's name.
+ if name is None:
+ name = m.__name__
+
+ # Find, parse, and run all tests in the given module.
+ finder = DocTestFinder(_namefilter=isprivate, exclude_empty=exclude_empty)
+
+ if raise_on_error:
+ runner = DebugRunner(verbose=verbose, optionflags=optionflags)
+ else:
+ runner = DocTestRunner(verbose=verbose, optionflags=optionflags)
+
+ for test in finder.find(m, name, globs=globs, extraglobs=extraglobs):
+ runner.run(test)
+
+ if report:
+ runner.summarize()
+
+ if master is None:
+ master = runner
+ else:
+ master.merge(runner)
+
+ return runner.failures, runner.tries
+
+def testfile(filename, module_relative=True, name=None, package=None,
+ globs=None, verbose=None, report=True, optionflags=0,
+ extraglobs=None, raise_on_error=False, parser=DocTestParser()):
+ """
+ Test examples in the given file. Return (#failures, #tests).
+
+ Optional keyword arg "module_relative" specifies how filenames
+ should be interpreted:
+
+ - If "module_relative" is True (the default), then "filename"
+ specifies a module-relative path. By default, this path is
+ relative to the calling module's directory; but if the
+ "package" argument is specified, then it is relative to that
+ package. To ensure os-independence, "filename" should use
+ "/" characters to separate path segments, and should not
+ be an absolute path (i.e., it may not begin with "/").
+
+ - If "module_relative" is False, then "filename" specifies an
+ os-specific path. The path may be absolute or relative (to
+ the current working directory).
+
+ Optional keyword arg "name" gives the name of the test; by default
+ use the file's basename.
+
+ Optional keyword argument "package" is a Python package or the
+ name of a Python package whose directory should be used as the
+ base directory for a module relative filename. If no package is
+ specified, then the calling module's directory is used as the base
+ directory for module relative filenames. It is an error to
+ specify "package" if "module_relative" is False.
+
+ Optional keyword arg "globs" gives a dict to be used as the globals
+ when executing examples; by default, use {}. A copy of this dict
+ is actually used for each docstring, so that each docstring's
+ examples start with a clean slate.
+
+ Optional keyword arg "extraglobs" gives a dictionary that should be
+ merged into the globals that are used to execute examples. By
+ default, no extra globals are used.
+
+ Optional keyword arg "verbose" prints lots of stuff if true, prints
+ only failures if false; by default, it's true iff "-v" is in sys.argv.
+
+ Optional keyword arg "report" prints a summary at the end when true,
+ else prints nothing at the end. In verbose mode, the summary is
+ detailed, else very brief (in fact, empty if all tests passed).
+
+ Optional keyword arg "optionflags" or's together module constants,
+ and defaults to 0. Possible values (see the docs for details):
+
+ DONT_ACCEPT_TRUE_FOR_1
+ DONT_ACCEPT_BLANKLINE
+ NORMALIZE_WHITESPACE
+ ELLIPSIS
+ IGNORE_EXCEPTION_DETAIL
+ REPORT_UDIFF
+ REPORT_CDIFF
+ REPORT_NDIFF
+ REPORT_ONLY_FIRST_FAILURE
+
+ Optional keyword arg "raise_on_error" raises an exception on the
+ first unexpected exception or failure. This allows failures to be
+ post-mortem debugged.
+
+ Optional keyword arg "parser" specifies a DocTestParser (or
+ subclass) that should be used to extract tests from the files.
+
+ Advanced tomfoolery: testmod runs methods of a local instance of
+ class doctest.Tester, then merges the results into (or creates)
+ global Tester instance doctest.master. Methods of doctest.master
+ can be called directly too, if you want to do something unusual.
+ Passing report=0 to testmod is especially useful then, to delay
+ displaying a summary. Invoke doctest.master.summarize(verbose)
+ when you're done fiddling.
+ """
+ global master
+
+ if package and not module_relative:
+ raise ValueError("Package may only be specified for module-"
+ "relative paths.")
+
+ # Relativize the path
+ if module_relative:
+ package = _normalize_module(package)
+ filename = _module_relative_path(package, filename)
+
+ # If no name was given, then use the file's name.
+ if name is None:
+ name = os.path.basename(filename)
+
+ # Assemble the globals.
+ if globs is None:
+ globs = {}
+ else:
+ globs = globs.copy()
+ if extraglobs is not None:
+ globs.update(extraglobs)
+
+ if raise_on_error:
+ runner = DebugRunner(verbose=verbose, optionflags=optionflags)
+ else:
+ runner = DocTestRunner(verbose=verbose, optionflags=optionflags)
+
+ # Read the file, convert it to a test, and run it.
+ s = open(filename).read()
+ test = parser.get_doctest(s, globs, name, filename, 0)
+ runner.run(test)
+
+ if report:
+ runner.summarize()
+
+ if master is None:
+ master = runner
+ else:
+ master.merge(runner)
+
+ return runner.failures, runner.tries
+
+def run_docstring_examples(f, globs, verbose=False, name="NoName",
+ compileflags=None, optionflags=0):
+ """
+ Test examples in the given object's docstring (`f`), using `globs`
+ as globals. Optional argument `name` is used in failure messages.
+ If the optional argument `verbose` is true, then generate output
+ even if there are no failures.
+
+ `compileflags` gives the set of flags that should be used by the
+ Python compiler when running the examples. If not specified, then
+ it will default to the set of future-import flags that apply to
+ `globs`.
+
+ Optional keyword arg `optionflags` specifies options for the
+ testing and output. See the documentation for `testmod` for more
+ information.
+ """
+ # Find, parse, and run all tests in the given module.
+ finder = DocTestFinder(verbose=verbose, recurse=False)
+ runner = DocTestRunner(verbose=verbose, optionflags=optionflags)
+ for test in finder.find(f, name, globs=globs):
+ runner.run(test, compileflags=compileflags)
+
+######################################################################
+## 7. Tester
+######################################################################
+# This is provided only for backwards compatibility. It's not
+# actually used in any way.
+
+class Tester:
+ def __init__(self, mod=None, globs=None, verbose=None,
+ isprivate=None, optionflags=0):
+
+ warnings.warn("class Tester is deprecated; "
+ "use class doctest.DocTestRunner instead",
+ DeprecationWarning, stacklevel=2)
+ if mod is None and globs is None:
+ raise TypeError("Tester.__init__: must specify mod or globs")
+ if mod is not None and not inspect.ismodule(mod):
+ raise TypeError("Tester.__init__: mod must be a module; %r" %
+ (mod,))
+ if globs is None:
+ globs = mod.__dict__
+ self.globs = globs
+
+ self.verbose = verbose
+ self.isprivate = isprivate
+ self.optionflags = optionflags
+ self.testfinder = DocTestFinder(_namefilter=isprivate)
+ self.testrunner = DocTestRunner(verbose=verbose,
+ optionflags=optionflags)
+
+ def runstring(self, s, name):
+ test = DocTestParser().get_doctest(s, self.globs, name, None, None)
+ if self.verbose:
+ print("Running string", name)
+ (f,t) = self.testrunner.run(test)
+ if self.verbose:
+ print(f, "of", t, "examples failed in string", name)
+ return (f,t)
+
+ def rundoc(self, object, name=None, module=None):
+ f = t = 0
+ tests = self.testfinder.find(object, name, module=module,
+ globs=self.globs)
+ for test in tests:
+ (f2, t2) = self.testrunner.run(test)
+ (f,t) = (f+f2, t+t2)
+ return (f,t)
+
+ def rundict(self, d, name, module=None):
+ import new
+ m = new.module(name)
+ m.__dict__.update(d)
+ if module is None:
+ module = False
+ return self.rundoc(m, name, module)
+
+ def run__test__(self, d, name):
+ import new
+ m = new.module(name)
+ m.__test__ = d
+ return self.rundoc(m, name)
+
+ def summarize(self, verbose=None):
+ return self.testrunner.summarize(verbose)
+
+ def merge(self, other):
+ self.testrunner.merge(other.testrunner)
+
+######################################################################
+## 8. Unittest Support
+######################################################################
+
+_unittest_reportflags = 0
+
+def set_unittest_reportflags(flags):
+ global _unittest_reportflags
+
+ if (flags & REPORTING_FLAGS) != flags:
+ raise ValueError("Only reporting flags allowed", flags)
+ old = _unittest_reportflags
+ _unittest_reportflags = flags
+ return old
+
+
+class DocTestCase(unittest.TestCase):
+
+ def __init__(self, test, optionflags=0, setUp=None, tearDown=None,
+ checker=None):
+
+ unittest.TestCase.__init__(self)
+ self._dt_optionflags = optionflags
+ self._dt_checker = checker
+ self._dt_test = test
+ self._dt_setUp = setUp
+ self._dt_tearDown = tearDown
+
+ def setUp(self):
+ test = self._dt_test
+
+ if self._dt_setUp is not None:
+ self._dt_setUp(test)
+
+ def tearDown(self):
+ test = self._dt_test
+
+ if self._dt_tearDown is not None:
+ self._dt_tearDown(test)
+
+ test.globs.clear()
+
+ def runTest(self):
+ test = self._dt_test
+ old = sys.stdout
+ new = StringIO()
+ optionflags = self._dt_optionflags
+
+ if not (optionflags & REPORTING_FLAGS):
+ # The option flags don't include any reporting flags,
+ # so add the default reporting flags
+ optionflags |= _unittest_reportflags
+
+ runner = DocTestRunner(optionflags=optionflags,
+ checker=self._dt_checker, verbose=False)
+
+ try:
+ runner.DIVIDER = "-"*70
+ failures, tries = runner.run(
+ test, out=new.write, clear_globs=False)
+ finally:
+ sys.stdout = old
+
+ if failures:
+ raise self.failureException(self.format_failure(new.getvalue()))
+
+ def format_failure(self, err):
+ test = self._dt_test
+ if test.lineno is None:
+ lineno = 'unknown line number'
+ else:
+ lineno = '%s' % test.lineno
+ lname = '.'.join(test.name.split('.')[-1:])
+ return ('Failed doctest test for %s\n'
+ ' File "%s", line %s, in %s\n\n%s'
+ % (test.name, test.filename, lineno, lname, err)
+ )
+
+ def debug(self):
+ self.setUp()
+ runner = DebugRunner(optionflags=self._dt_optionflags,
+ checker=self._dt_checker, verbose=False)
+ runner.run(self._dt_test)
+ self.tearDown()
+
+ def id(self):
+ return self._dt_test.name
+
+ def __repr__(self):
+ name = self._dt_test.name.split('.')
+ return "%s (%s)" % (name[-1], '.'.join(name[:-1]))
+
+ __str__ = __repr__
+
+ def shortDescription(self):
+ return "Doctest: " + self._dt_test.name
+
+def DocTestSuite(module=None, globs=None, extraglobs=None, test_finder=None,
+ **options):
+ """
+ Convert doctest tests for a module to a unittest test suite.
+
+ This converts each documentation string in a module that
+ contains doctest tests to a unittest test case. If any of the
+ tests in a doc string fail, then the test case fails. An exception
+ is raised showing the name of the file containing the test and a
+ (sometimes approximate) line number.
+
+ The `module` argument provides the module to be tested. The argument
+ can be either a module or a module name.
+
+ If no argument is given, the calling module is used.
+
+ A number of options may be provided as keyword arguments:
+
+ setUp
+ A set-up function. This is called before running the
+ tests in each file. The setUp function will be passed a DocTest
+ object. The setUp function can access the test globals as the
+ globs attribute of the test passed.
+
+ tearDown
+ A tear-down function. This is called after running the
+ tests in each file. The tearDown function will be passed a DocTest
+ object. The tearDown function can access the test globals as the
+ globs attribute of the test passed.
+
+ globs
+ A dictionary containing initial global variables for the tests.
+
+ optionflags
+ A set of doctest option flags expressed as an integer.
+ """
+
+ if test_finder is None:
+ test_finder = DocTestFinder()
+
+ module = _normalize_module(module)
+ tests = test_finder.find(module, globs=globs, extraglobs=extraglobs)
+ if globs is None:
+ globs = module.__dict__
+ if not tests:
+ # Why do we want to do this? Because it reveals a bug that might
+ # otherwise be hidden.
+ raise ValueError(module, "has no tests")
+
+ tests.sort()
+ suite = unittest.TestSuite()
+ for test in tests:
+ if len(test.examples) == 0:
+ continue
+ if not test.filename:
+ filename = module.__file__
+ if filename[-4:] in (".pyc", ".pyo"):
+ filename = filename[:-1]
+ elif sys.platform.startswith('java') and \
+ filename.endswith('$py.class'):
+ filename = '%s.py' % filename[:-9]
+ test.filename = filename
+ suite.addTest(DocTestCase(test, **options))
+
+ return suite
+
+class DocFileCase(DocTestCase):
+
+ def id(self):
+ return '_'.join(self._dt_test.name.split('.'))
+
+ def __repr__(self):
+ return self._dt_test.filename
+ __str__ = __repr__
+
+ def format_failure(self, err):
+ return ('Failed doctest test for %s\n File "%s", line 0\n\n%s'
+ % (self._dt_test.name, self._dt_test.filename, err)
+ )
+
+def DocFileTest(path, module_relative=True, package=None,
+ globs=None, parser=DocTestParser(), **options):
+ if globs is None:
+ globs = {}
+
+ if package and not module_relative:
+ raise ValueError("Package may only be specified for module-"
+ "relative paths.")
+
+ # Relativize the path.
+ if module_relative:
+ package = _normalize_module(package)
+ path = _module_relative_path(package, path)
+
+ # Find the file and read it.
+ name = os.path.basename(path)
+ doc = open(path).read()
+
+ # Convert it to a test, and wrap it in a DocFileCase.
+ test = parser.get_doctest(doc, globs, name, path, 0)
+ return DocFileCase(test, **options)
+
+def DocFileSuite(*paths, **kw):
+ """A unittest suite for one or more doctest files.
+
+ The path to each doctest file is given as a string; the
+ interpretation of that string depends on the keyword argument
+ "module_relative".
+
+ A number of options may be provided as keyword arguments:
+
+ module_relative
+ If "module_relative" is True, then the given file paths are
+ interpreted as os-independent module-relative paths. By
+ default, these paths are relative to the calling module's
+ directory; but if the "package" argument is specified, then
+ they are relative to that package. To ensure os-independence,
+ "filename" should use "/" characters to separate path
+ segments, and may not be an absolute path (i.e., it may not
+ begin with "/").
+
+ If "module_relative" is False, then the given file paths are
+ interpreted as os-specific paths. These paths may be absolute
+ or relative (to the current working directory).
+
+ package
+ A Python package or the name of a Python package whose directory
+ should be used as the base directory for module relative paths.
+ If "package" is not specified, then the calling module's
+ directory is used as the base directory for module relative
+ filenames. It is an error to specify "package" if
+ "module_relative" is False.
+
+ setUp
+ A set-up function. This is called before running the
+ tests in each file. The setUp function will be passed a DocTest
+ object. The setUp function can access the test globals as the
+ globs attribute of the test passed.
+
+ tearDown
+ A tear-down function. This is called after running the
+ tests in each file. The tearDown function will be passed a DocTest
+ object. The tearDown function can access the test globals as the
+ globs attribute of the test passed.
+
+ globs
+ A dictionary containing initial global variables for the tests.
+
+ optionflags
+ A set of doctest option flags expressed as an integer.
+
+ parser
+ A DocTestParser (or subclass) that should be used to extract
+ tests from the files.
+ """
+ suite = unittest.TestSuite()
+
+ # We do this here so that _normalize_module is called at the right
+ # level. If it were called in DocFileTest, then this function
+ # would be the caller and we might guess the package incorrectly.
+ if kw.get('module_relative', True):
+ kw['package'] = _normalize_module(kw.get('package'))
+
+ for path in paths:
+ suite.addTest(DocFileTest(path, **kw))
+
+ return suite
+
+######################################################################
+## 9. Debugging Support
+######################################################################
+
+def script_from_examples(s):
+ output = []
+ for piece in DocTestParser().parse(s):
+ if isinstance(piece, Example):
+ # Add the example's source code (strip trailing NL)
+ output.append(piece.source[:-1])
+ # Add the expected output:
+ want = piece.want
+ if want:
+ output.append('# Expected:')
+ output += ['## '+l for l in want.split('\n')[:-1]]
+ else:
+ # Add non-example text.
+ output += [_comment_line(l)
+ for l in piece.split('\n')[:-1]]
+
+ # Trim junk on both ends.
+ while output and output[-1] == '#':
+ output.pop()
+ while output and output[0] == '#':
+ output.pop(0)
+ # Combine the output, and return it.
+ # Add a courtesy newline to prevent exec from choking (see bug #1172785)
+ return '\n'.join(output) + '\n'
+
+def testsource(module, name):
+ """Extract the test sources from a doctest docstring as a script.
+
+ Provide the module (or dotted name of the module) containing the
+ test to be debugged and the name (within the module) of the object
+ with the doc string with tests to be debugged.
+ """
+ module = _normalize_module(module)
+ tests = DocTestFinder().find(module)
+ test = [t for t in tests if t.name == name]
+ if not test:
+ raise ValueError(name, "not found in tests")
+ test = test[0]
+ testsrc = script_from_examples(test.docstring)
+ return testsrc
+
+def debug_src(src, pm=False, globs=None):
+ """Debug a single doctest docstring, in argument `src`'"""
+ testsrc = script_from_examples(src)
+ debug_script(testsrc, pm, globs)
+
+def debug_script(src, pm=False, globs=None):
+ "Debug a test script. `src` is the script, as a string."
+ import pdb
+
+ # Note that tempfile.NameTemporaryFile() cannot be used. As the
+ # docs say, a file so created cannot be opened by name a second time
+ # on modern Windows boxes, and execfile() needs to open it.
+ srcfilename = tempfile.mktemp(".py", "doctestdebug")
+ f = open(srcfilename, 'w')
+ f.write(src)
+ f.close()
+
+ try:
+ if globs:
+ globs = globs.copy()
+ else:
+ globs = {}
+
+ if pm:
+ try:
+ exec(compile(open(srcfilename).read(), srcfilename, 'exec'), globs, globs)
+ except:
+ print(sys.exc_info()[1])
+ pdb.post_mortem(sys.exc_info()[2])
+ else:
+ # Note that %r is vital here. '%s' instead can, e.g., cause
+ # backslashes to get treated as metacharacters on Windows.
+ pdb.run("execfile(%r)" % srcfilename, globs, globs)
+
+ finally:
+ os.remove(srcfilename)
+
+def debug(module, name, pm=False):
+ """Debug a single doctest docstring.
+
+ Provide the module (or dotted name of the module) containing the
+ test to be debugged and the name (within the module) of the object
+ with the docstring with tests to be debugged.
+ """
+ module = _normalize_module(module)
+ testsrc = testsource(module, name)
+ debug_script(testsrc, pm, module.__dict__)
+
+
+__test__ = {}
diff --git a/scripts/external_libs/nose-1.3.4/python3/nose/failure.py b/scripts/external_libs/nose-1.3.4/python3/nose/failure.py
new file mode 100644
index 00000000..dad5253e
--- /dev/null
+++ b/scripts/external_libs/nose-1.3.4/python3/nose/failure.py
@@ -0,0 +1,42 @@
+import logging
+import unittest
+from traceback import format_tb
+from nose.pyversion import is_base_exception
+
+log = logging.getLogger(__name__)
+
+
+__all__ = ['Failure']
+
+
+class Failure(unittest.TestCase):
+ """Unloadable or unexecutable test.
+
+ A Failure case is placed in a test suite to indicate the presence of a
+ test that could not be loaded or executed. A common example is a test
+ module that fails to import.
+
+ """
+ __test__ = False # do not collect
+ def __init__(self, exc_class, exc_val, tb=None, address=None):
+ log.debug("A failure! %s %s %s", exc_class, exc_val, format_tb(tb))
+ self.exc_class = exc_class
+ self.exc_val = exc_val
+ self.tb = tb
+ self._address = address
+ unittest.TestCase.__init__(self)
+
+ def __str__(self):
+ return "Failure: %s (%s)" % (
+ getattr(self.exc_class, '__name__', self.exc_class), self.exc_val)
+
+ def address(self):
+ return self._address
+
+ def runTest(self):
+ if self.tb is not None:
+ if is_base_exception(self.exc_val):
+ raise self.exc_val.with_traceback(self.tb)
+ raise self.exc_class(self.exc_val).with_traceback(self.tb)
+ else:
+ raise self.exc_class(self.exc_val)
diff --git a/scripts/external_libs/nose-1.3.4/python3/nose/importer.py b/scripts/external_libs/nose-1.3.4/python3/nose/importer.py
new file mode 100644
index 00000000..e677658c
--- /dev/null
+++ b/scripts/external_libs/nose-1.3.4/python3/nose/importer.py
@@ -0,0 +1,167 @@
+"""Implements an importer that looks only in specific path (ignoring
+sys.path), and uses a per-path cache in addition to sys.modules. This is
+necessary because test modules in different directories frequently have the
+same names, which means that the first loaded would mask the rest when using
+the builtin importer.
+"""
+import logging
+import os
+import sys
+from nose.config import Config
+
+from imp import find_module, load_module, acquire_lock, release_lock
+
+log = logging.getLogger(__name__)
+
+try:
+ _samefile = os.path.samefile
+except AttributeError:
+ def _samefile(src, dst):
+ return (os.path.normcase(os.path.realpath(src)) ==
+ os.path.normcase(os.path.realpath(dst)))
+
+
+class Importer(object):
+ """An importer class that does only path-specific imports. That
+ is, the given module is not searched for on sys.path, but only at
+ the path or in the directory specified.
+ """
+ def __init__(self, config=None):
+ if config is None:
+ config = Config()
+ self.config = config
+
+ def importFromPath(self, path, fqname):
+ """Import a dotted-name package whose tail is at path. In other words,
+ given foo.bar and path/to/foo/bar.py, import foo from path/to/foo then
+ bar from path/to/foo/bar, returning bar.
+ """
+ # find the base dir of the package
+ path_parts = os.path.normpath(os.path.abspath(path)).split(os.sep)
+ name_parts = fqname.split('.')
+ if path_parts[-1] == '__init__.py':
+ path_parts.pop()
+ path_parts = path_parts[:-(len(name_parts))]
+ dir_path = os.sep.join(path_parts)
+ # then import fqname starting from that dir
+ return self.importFromDir(dir_path, fqname)
+
+ def importFromDir(self, dir, fqname):
+ """Import a module *only* from path, ignoring sys.path and
+ reloading if the version in sys.modules is not the one we want.
+ """
+ dir = os.path.normpath(os.path.abspath(dir))
+ log.debug("Import %s from %s", fqname, dir)
+
+ # FIXME reimplement local per-dir cache?
+
+ # special case for __main__
+ if fqname == '__main__':
+ return sys.modules[fqname]
+
+ if self.config.addPaths:
+ add_path(dir, self.config)
+
+ path = [dir]
+ parts = fqname.split('.')
+ part_fqname = ''
+ mod = parent = fh = None
+
+ for part in parts:
+ if part_fqname == '':
+ part_fqname = part
+ else:
+ part_fqname = "%s.%s" % (part_fqname, part)
+ try:
+ acquire_lock()
+ log.debug("find module part %s (%s) in %s",
+ part, part_fqname, path)
+ fh, filename, desc = find_module(part, path)
+ old = sys.modules.get(part_fqname)
+ if old is not None:
+ # test modules frequently have name overlap; make sure
+ # we get a fresh copy of anything we are trying to load
+ # from a new path
+ log.debug("sys.modules has %s as %s", part_fqname, old)
+ if (self.sameModule(old, filename)
+ or (self.config.firstPackageWins and
+ getattr(old, '__path__', None))):
+ mod = old
+ else:
+ del sys.modules[part_fqname]
+ mod = load_module(part_fqname, fh, filename, desc)
+ else:
+ mod = load_module(part_fqname, fh, filename, desc)
+ finally:
+ if fh:
+ fh.close()
+ release_lock()
+ if parent:
+ setattr(parent, part, mod)
+ if hasattr(mod, '__path__'):
+ path = mod.__path__
+ parent = mod
+ return mod
+
+ def _dirname_if_file(self, filename):
+ # We only take the dirname if we have a path to a non-dir,
+ # because taking the dirname of a symlink to a directory does not
+ # give the actual directory parent.
+ if os.path.isdir(filename):
+ return filename
+ else:
+ return os.path.dirname(filename)
+
+ def sameModule(self, mod, filename):
+ mod_paths = []
+ if hasattr(mod, '__path__'):
+ for path in mod.__path__:
+ mod_paths.append(self._dirname_if_file(path))
+ elif hasattr(mod, '__file__'):
+ mod_paths.append(self._dirname_if_file(mod.__file__))
+ else:
+ # builtin or other module-like object that
+ # doesn't have __file__; must be new
+ return False
+ new_path = self._dirname_if_file(filename)
+ for mod_path in mod_paths:
+ log.debug(
+ "module already loaded? mod: %s new: %s",
+ mod_path, new_path)
+ if _samefile(mod_path, new_path):
+ return True
+ return False
+
+
+def add_path(path, config=None):
+ """Ensure that the path, or the root of the current package (if
+ path is in a package), is in sys.path.
+ """
+
+ # FIXME add any src-looking dirs seen too... need to get config for that
+
+ log.debug('Add path %s' % path)
+ if not path:
+ return []
+ added = []
+ parent = os.path.dirname(path)
+ if (parent
+ and os.path.exists(os.path.join(path, '__init__.py'))):
+ added.extend(add_path(parent, config))
+ elif not path in sys.path:
+ log.debug("insert %s into sys.path", path)
+ sys.path.insert(0, path)
+ added.append(path)
+ if config and config.srcDirs:
+ for dirname in config.srcDirs:
+ dirpath = os.path.join(path, dirname)
+ if os.path.isdir(dirpath):
+ sys.path.insert(0, dirpath)
+ added.append(dirpath)
+ return added
+
+
+def remove_path(path):
+ log.debug('Remove path %s' % path)
+ if path in sys.path:
+ sys.path.remove(path)
diff --git a/scripts/external_libs/nose-1.3.4/python3/nose/inspector.py b/scripts/external_libs/nose-1.3.4/python3/nose/inspector.py
new file mode 100644
index 00000000..2444d9e7
--- /dev/null
+++ b/scripts/external_libs/nose-1.3.4/python3/nose/inspector.py
@@ -0,0 +1,208 @@
+"""Simple traceback introspection. Used to add additional information to
+AssertionErrors in tests, so that failure messages may be more informative.
+"""
+import inspect
+import logging
+import re
+import sys
+import textwrap
+import tokenize
+import collections
+
+try:
+ from io import StringIO
+except ImportError:
+ from io import StringIO
+
+log = logging.getLogger(__name__)
+
+def inspect_traceback(tb):
+ """Inspect a traceback and its frame, returning source for the expression
+ where the exception was raised, with simple variable replacement performed
+ and the line on which the exception was raised marked with '>>'
+ """
+ log.debug('inspect traceback %s', tb)
+
+ # we only want the innermost frame, where the exception was raised
+ while tb.tb_next:
+ tb = tb.tb_next
+
+ frame = tb.tb_frame
+ lines, exc_line = tbsource(tb)
+
+ # figure out the set of lines to grab.
+ inspect_lines, mark_line = find_inspectable_lines(lines, exc_line)
+ src = StringIO(textwrap.dedent(''.join(inspect_lines)))
+ exp = Expander(frame.f_locals, frame.f_globals)
+
+ while inspect_lines:
+ try:
+ for tok in tokenize.generate_tokens(src.readline):
+ exp(*tok)
+ except tokenize.TokenError as e:
+ # this can happen if our inspectable region happens to butt up
+ # against the end of a construct like a docstring with the closing
+ # """ on separate line
+ log.debug("Tokenizer error: %s", e)
+ inspect_lines.pop(0)
+ mark_line -= 1
+ src = StringIO(textwrap.dedent(''.join(inspect_lines)))
+ exp = Expander(frame.f_locals, frame.f_globals)
+ continue
+ break
+ padded = []
+ if exp.expanded_source:
+ exp_lines = exp.expanded_source.split('\n')
+ ep = 0
+ for line in exp_lines:
+ if ep == mark_line:
+ padded.append('>> ' + line)
+ else:
+ padded.append(' ' + line)
+ ep += 1
+ return '\n'.join(padded)
+
+
+def tbsource(tb, context=6):
+ """Get source from a traceback object.
+
+ A tuple of two things is returned: a list of lines of context from
+ the source code, and the index of the current line within that list.
+ The optional second argument specifies the number of lines of context
+ to return, which are centered around the current line.
+
+ .. Note ::
+ This is adapted from inspect.py in the python 2.4 standard library,
+ since a bug in the 2.3 version of inspect prevents it from correctly
+ locating source lines in a traceback frame.
+ """
+
+ lineno = tb.tb_lineno
+ frame = tb.tb_frame
+
+ if context > 0:
+ start = lineno - 1 - context//2
+ log.debug("lineno: %s start: %s", lineno, start)
+
+ try:
+ lines, dummy = inspect.findsource(frame)
+ except IOError:
+ lines, index = [''], 0
+ else:
+ all_lines = lines
+ start = max(start, 1)
+ start = max(0, min(start, len(lines) - context))
+ lines = lines[start:start+context]
+ index = lineno - 1 - start
+
+ # python 2.5 compat: if previous line ends in a continuation,
+ # decrement start by 1 to match 2.4 behavior
+ if sys.version_info >= (2, 5) and index > 0:
+ while lines[index-1].strip().endswith('\\'):
+ start -= 1
+ lines = all_lines[start:start+context]
+ else:
+ lines, index = [''], 0
+ log.debug("tbsource lines '''%s''' around index %s", lines, index)
+ return (lines, index)
+
+
+def find_inspectable_lines(lines, pos):
+ """Find lines in home that are inspectable.
+
+ Walk back from the err line up to 3 lines, but don't walk back over
+ changes in indent level.
+
+ Walk forward up to 3 lines, counting \ separated lines as 1. Don't walk
+ over changes in indent level (unless part of an extended line)
+ """
+ cnt = re.compile(r'\\[\s\n]*$')
+ df = re.compile(r':[\s\n]*$')
+ ind = re.compile(r'^(\s*)')
+ toinspect = []
+ home = lines[pos]
+ home_indent = ind.match(home).groups()[0]
+
+ before = lines[max(pos-3, 0):pos]
+ before.reverse()
+ after = lines[pos+1:min(pos+4, len(lines))]
+
+ for line in before:
+ if ind.match(line).groups()[0] == home_indent:
+ toinspect.append(line)
+ else:
+ break
+ toinspect.reverse()
+ toinspect.append(home)
+ home_pos = len(toinspect)-1
+ continued = cnt.search(home)
+ for line in after:
+ if ((continued or ind.match(line).groups()[0] == home_indent)
+ and not df.search(line)):
+ toinspect.append(line)
+ continued = cnt.search(line)
+ else:
+ break
+ log.debug("Inspecting lines '''%s''' around %s", toinspect, home_pos)
+ return toinspect, home_pos
+
+
+class Expander:
+ """Simple expression expander. Uses tokenize to find the names and
+ expands any that can be looked up in the frame.
+ """
+ def __init__(self, locals, globals):
+ self.locals = locals
+ self.globals = globals
+ self.lpos = None
+ self.expanded_source = ''
+
+ def __call__(self, ttype, tok, start, end, line):
+ # TODO
+ # deal with unicode properly
+
+ # TODO
+ # Dealing with instance members
+ # always keep the last thing seen
+ # if the current token is a dot,
+ # get ready to getattr(lastthing, this thing) on the
+ # next call.
+
+ if self.lpos is not None:
+ if start[1] >= self.lpos:
+ self.expanded_source += ' ' * (start[1]-self.lpos)
+ elif start[1] < self.lpos:
+ # newline, indent correctly
+ self.expanded_source += ' ' * start[1]
+ self.lpos = end[1]
+
+ if ttype == tokenize.INDENT:
+ pass
+ elif ttype == tokenize.NAME:
+ # Clean this junk up
+ try:
+ val = self.locals[tok]
+ if isinstance(val, collections.Callable):
+ val = tok
+ else:
+ val = repr(val)
+ except KeyError:
+ try:
+ val = self.globals[tok]
+ if isinstance(val, collections.Callable):
+ val = tok
+ else:
+ val = repr(val)
+
+ except KeyError:
+ val = tok
+ # FIXME... not sure how to handle things like funcs, classes
+ # FIXME this is broken for some unicode strings
+ self.expanded_source += val
+ else:
+ self.expanded_source += tok
+ # if this is the end of the line and the line ends with
+ # \, then tack a \ and newline onto the output
+ # print line[end[1]:]
+ if re.match(r'\s+\\\n', line[end[1]:]):
+ self.expanded_source += ' \\\n'
diff --git a/scripts/external_libs/nose-1.3.4/python3/nose/loader.py b/scripts/external_libs/nose-1.3.4/python3/nose/loader.py
new file mode 100644
index 00000000..c364686d
--- /dev/null
+++ b/scripts/external_libs/nose-1.3.4/python3/nose/loader.py
@@ -0,0 +1,619 @@
+"""
+Test Loader
+-----------
+
+nose's test loader implements the same basic functionality as its
+superclass, unittest.TestLoader, but extends it by more liberal
+interpretations of what may be a test and how a test may be named.
+"""
+
+
+import logging
+import os
+import sys
+import unittest
+import types
+from inspect import isfunction
+from nose.pyversion import unbound_method, ismethod
+from nose.case import FunctionTestCase, MethodTestCase
+from nose.failure import Failure
+from nose.config import Config
+from nose.importer import Importer, add_path, remove_path
+from nose.selector import defaultSelector, TestAddress
+from nose.util import func_lineno, getpackage, isclass, isgenerator, \
+ ispackage, regex_last_key, resolve_name, transplant_func, \
+ transplant_class, test_address
+from nose.suite import ContextSuiteFactory, ContextList, LazySuite
+from nose.pyversion import sort_list, cmp_to_key
+import collections
+
+
+log = logging.getLogger(__name__)
+#log.setLevel(logging.DEBUG)
+
+# for efficiency and easier mocking
+op_normpath = os.path.normpath
+op_abspath = os.path.abspath
+op_join = os.path.join
+op_isdir = os.path.isdir
+op_isfile = os.path.isfile
+
+
+__all__ = ['TestLoader', 'defaultTestLoader']
+
+
+class TestLoader(unittest.TestLoader):
+ """Test loader that extends unittest.TestLoader to:
+
+ * Load tests from test-like functions and classes that are not
+ unittest.TestCase subclasses
+ * Find and load test modules in a directory
+ * Support tests that are generators
+ * Support easy extensions of or changes to that behavior through plugins
+ """
+ config = None
+ importer = None
+ workingDir = None
+ selector = None
+ suiteClass = None
+
+ def __init__(self, config=None, importer=None, workingDir=None,
+ selector=None):
+ """Initialize a test loader.
+
+ Parameters (all optional):
+
+ * config: provide a `nose.config.Config`_ or other config class
+ instance; if not provided a `nose.config.Config`_ with
+ default values is used.
+ * importer: provide an importer instance that implements
+ `importFromPath`. If not provided, a
+ `nose.importer.Importer`_ is used.
+ * workingDir: the directory to which file and module names are
+ relative. If not provided, assumed to be the current working
+ directory.
+ * selector: a selector class or instance. If a class is
+ provided, it will be instantiated with one argument, the
+ current config. If not provided, a `nose.selector.Selector`_
+ is used.
+ """
+ if config is None:
+ config = Config()
+ if importer is None:
+ importer = Importer(config=config)
+ if workingDir is None:
+ workingDir = config.workingDir
+ if selector is None:
+ selector = defaultSelector(config)
+ elif isclass(selector):
+ selector = selector(config)
+ self.config = config
+ self.importer = importer
+ self.workingDir = op_normpath(op_abspath(workingDir))
+ self.selector = selector
+ if config.addPaths:
+ add_path(workingDir, config)
+ self.suiteClass = ContextSuiteFactory(config=config)
+
+ self._visitedPaths = set([])
+
+ unittest.TestLoader.__init__(self)
+
+ def getTestCaseNames(self, testCaseClass):
+ """Override to select with selector, unless
+ config.getTestCaseNamesCompat is True
+ """
+ if self.config.getTestCaseNamesCompat:
+ return unittest.TestLoader.getTestCaseNames(self, testCaseClass)
+
+ def wanted(attr, cls=testCaseClass, sel=self.selector):
+ item = getattr(cls, attr, None)
+ if isfunction(item):
+ item = unbound_method(cls, item)
+ elif not ismethod(item):
+ return False
+ return sel.wantMethod(item)
+
+ cases = list(filter(wanted, dir(testCaseClass)))
+
+ # add runTest if nothing else picked
+ if not cases and hasattr(testCaseClass, 'runTest'):
+ cases = ['runTest']
+ if self.sortTestMethodsUsing:
+ sort_list(cases, cmp_to_key(self.sortTestMethodsUsing))
+ return cases
+
+ def _haveVisited(self, path):
+ # For cases where path is None, we always pretend we haven't visited
+ # them.
+ if path is None:
+ return False
+
+ return path in self._visitedPaths
+
+ def _addVisitedPath(self, path):
+ if path is not None:
+ self._visitedPaths.add(path)
+
+ def loadTestsFromDir(self, path):
+ """Load tests from the directory at path. This is a generator
+ -- each suite of tests from a module or other file is yielded
+ and is expected to be executed before the next file is
+ examined.
+ """
+ log.debug("load from dir %s", path)
+ plugins = self.config.plugins
+ plugins.beforeDirectory(path)
+ if self.config.addPaths:
+ paths_added = add_path(path, self.config)
+
+ entries = os.listdir(path)
+ sort_list(entries, regex_last_key(self.config.testMatch))
+ for entry in entries:
+ # this hard-coded initial-dot test will be removed:
+ # http://code.google.com/p/python-nose/issues/detail?id=82
+ if entry.startswith('.'):
+ continue
+ entry_path = op_abspath(op_join(path, entry))
+ is_file = op_isfile(entry_path)
+ wanted = False
+ if is_file:
+ is_dir = False
+ wanted = self.selector.wantFile(entry_path)
+ else:
+ is_dir = op_isdir(entry_path)
+ if is_dir:
+ # this hard-coded initial-underscore test will be removed:
+ # http://code.google.com/p/python-nose/issues/detail?id=82
+ if entry.startswith('_'):
+ continue
+ wanted = self.selector.wantDirectory(entry_path)
+ is_package = ispackage(entry_path)
+
+ # Python 3.3 now implements PEP 420: Implicit Namespace Packages.
+ # As a result, it's now possible that parent paths that have a
+ # segment with the same basename as our package ends up
+ # in module.__path__. So we have to keep track of what we've
+ # visited, and not-revisit them again.
+ if wanted and not self._haveVisited(entry_path):
+ self._addVisitedPath(entry_path)
+ if is_file:
+ plugins.beforeContext()
+ if entry.endswith('.py'):
+ yield self.loadTestsFromName(
+ entry_path, discovered=True)
+ else:
+ yield self.loadTestsFromFile(entry_path)
+ plugins.afterContext()
+ elif is_package:
+ # Load the entry as a package: given the full path,
+ # loadTestsFromName() will figure it out
+ yield self.loadTestsFromName(
+ entry_path, discovered=True)
+ else:
+ # Another test dir in this one: recurse lazily
+ yield self.suiteClass(
+ lambda: self.loadTestsFromDir(entry_path))
+ tests = []
+ for test in plugins.loadTestsFromDir(path):
+ tests.append(test)
+ # TODO: is this try/except needed?
+ try:
+ if tests:
+ yield self.suiteClass(tests)
+ except (KeyboardInterrupt, SystemExit):
+ raise
+ except:
+ yield self.suiteClass([Failure(*sys.exc_info())])
+
+ # pop paths
+ if self.config.addPaths:
+ for p in paths_added:
+ remove_path(p)
+ plugins.afterDirectory(path)
+
+ def loadTestsFromFile(self, filename):
+ """Load tests from a non-module file. Default is to raise a
+ ValueError; plugins may implement `loadTestsFromFile` to
+ provide a list of tests loaded from the file.
+ """
+ log.debug("Load from non-module file %s", filename)
+ try:
+ tests = [test for test in
+ self.config.plugins.loadTestsFromFile(filename)]
+ if tests:
+ # Plugins can yield False to indicate that they were
+ # unable to load tests from a file, but it was not an
+ # error -- the file just had no tests to load.
+ tests = [_f for _f in tests if _f]
+ return self.suiteClass(tests)
+ else:
+ # Nothing was able to even try to load from this file
+ open(filename, 'r').close() # trigger os error
+ raise ValueError("Unable to load tests from file %s"
+ % filename)
+ except (KeyboardInterrupt, SystemExit):
+ raise
+ except:
+ exc = sys.exc_info()
+ return self.suiteClass(
+ [Failure(exc[0], exc[1], exc[2],
+ address=(filename, None, None))])
+
+ def loadTestsFromGenerator(self, generator, module):
+ """Lazy-load tests from a generator function. The generator function
+ may yield either:
+
+ * a callable, or
+ * a function name resolvable within the same module
+ """
+ def generate(g=generator, m=module):
+ try:
+ for test in g():
+ test_func, arg = self.parseGeneratedTest(test)
+ if not isinstance(test_func, collections.Callable):
+ test_func = getattr(m, test_func)
+ yield FunctionTestCase(test_func, arg=arg, descriptor=g)
+ except KeyboardInterrupt:
+ raise
+ except:
+ exc = sys.exc_info()
+ yield Failure(exc[0], exc[1], exc[2],
+ address=test_address(generator))
+ return self.suiteClass(generate, context=generator, can_split=False)
+
+ def loadTestsFromGeneratorMethod(self, generator, cls):
+ """Lazy-load tests from a generator method.
+
+ This is more complicated than loading from a generator function,
+ since a generator method may yield:
+
+ * a function
+ * a bound or unbound method, or
+ * a method name
+ """
+ # convert the unbound generator method
+ # into a bound method so it can be called below
+ if hasattr(generator, 'im_class'):
+ cls = generator.__self__.__class__
+ inst = cls()
+ method = generator.__name__
+ generator = getattr(inst, method)
+
+ def generate(g=generator, c=cls):
+ try:
+ for test in g():
+ test_func, arg = self.parseGeneratedTest(test)
+ if not isinstance(test_func, collections.Callable):
+ test_func = unbound_method(c, getattr(c, test_func))
+ if ismethod(test_func):
+ yield MethodTestCase(test_func, arg=arg, descriptor=g)
+ elif isinstance(test_func, collections.Callable):
+ # In this case we're forcing the 'MethodTestCase'
+ # to run the inline function as its test call,
+ # but using the generator method as the 'method of
+ # record' (so no need to pass it as the descriptor)
+ yield MethodTestCase(g, test=test_func, arg=arg)
+ else:
+ yield Failure(
+ TypeError,
+ "%s is not a callable or method" % test_func)
+ except KeyboardInterrupt:
+ raise
+ except:
+ exc = sys.exc_info()
+ yield Failure(exc[0], exc[1], exc[2],
+ address=test_address(generator))
+ return self.suiteClass(generate, context=generator, can_split=False)
+
+ def loadTestsFromModule(self, module, path=None, discovered=False):
+ """Load all tests from module and return a suite containing
+ them. If the module has been discovered and is not test-like,
+ the suite will be empty by default, though plugins may add
+ their own tests.
+ """
+ log.debug("Load from module %s", module)
+ tests = []
+ test_classes = []
+ test_funcs = []
+ # For *discovered* modules, we only load tests when the module looks
+ # testlike. For modules we've been directed to load, we always
+ # look for tests. (discovered is set to True by loadTestsFromDir)
+ if not discovered or self.selector.wantModule(module):
+ for item in dir(module):
+ test = getattr(module, item, None)
+ # print "Check %s (%s) in %s" % (item, test, module.__name__)
+ if isclass(test):
+ if self.selector.wantClass(test):
+ test_classes.append(test)
+ elif isfunction(test) and self.selector.wantFunction(test):
+ test_funcs.append(test)
+ sort_list(test_classes, lambda x: x.__name__)
+ sort_list(test_funcs, func_lineno)
+ tests = [self.makeTest(t, parent=module) for t in test_classes + test_funcs]
+
+ # Now, descend into packages
+ # FIXME can or should this be lazy?
+ # is this syntax 2.2 compatible?
+ module_paths = getattr(module, '__path__', [])
+ if path:
+ path = os.path.realpath(path)
+ for module_path in module_paths:
+ log.debug("Load tests from module path %s?", module_path)
+ log.debug("path: %s os.path.realpath(%s): %s",
+ path, module_path, os.path.realpath(module_path))
+ if (self.config.traverseNamespace or not path) or \
+ os.path.realpath(module_path).startswith(path):
+ # Egg files can be on sys.path, so make sure the path is a
+ # directory before trying to load from it.
+ if os.path.isdir(module_path):
+ tests.extend(self.loadTestsFromDir(module_path))
+
+ for test in self.config.plugins.loadTestsFromModule(module, path):
+ tests.append(test)
+
+ return self.suiteClass(ContextList(tests, context=module))
+
+ def loadTestsFromName(self, name, module=None, discovered=False):
+ """Load tests from the entity with the given name.
+
+ The name may indicate a file, directory, module, or any object
+ within a module. See `nose.util.split_test_name` for details on
+ test name parsing.
+ """
+ # FIXME refactor this method into little bites?
+ log.debug("load from %s (%s)", name, module)
+
+ suite = self.suiteClass
+
+ # give plugins first crack
+ plug_tests = self.config.plugins.loadTestsFromName(name, module)
+ if plug_tests:
+ return suite(plug_tests)
+
+ addr = TestAddress(name, workingDir=self.workingDir)
+ if module:
+ # Two cases:
+ # name is class.foo
+ # The addr will be incorrect, since it thinks class.foo is
+ # a dotted module name. It's actually a dotted attribute
+ # name. In this case we want to use the full submitted
+ # name as the name to load from the module.
+ # name is module:class.foo
+ # The addr will be correct. The part we want is the part after
+ # the :, which is in addr.call.
+ if addr.call:
+ name = addr.call
+ parent, obj = self.resolve(name, module)
+ if (isclass(parent)
+ and getattr(parent, '__module__', None) != module.__name__
+ and not isinstance(obj, Failure)):
+ parent = transplant_class(parent, module.__name__)
+ obj = getattr(parent, obj.__name__)
+ log.debug("parent %s obj %s module %s", parent, obj, module)
+ if isinstance(obj, Failure):
+ return suite([obj])
+ else:
+ return suite(ContextList([self.makeTest(obj, parent)],
+ context=parent))
+ else:
+ if addr.module:
+ try:
+ if addr.filename is None:
+ module = resolve_name(addr.module)
+ else:
+ self.config.plugins.beforeImport(
+ addr.filename, addr.module)
+ # FIXME: to support module.name names,
+ # do what resolve-name does and keep trying to
+ # import, popping tail of module into addr.call,
+ # until we either get an import or run out of
+ # module parts
+ try:
+ module = self.importer.importFromPath(
+ addr.filename, addr.module)
+ finally:
+ self.config.plugins.afterImport(
+ addr.filename, addr.module)
+ except (KeyboardInterrupt, SystemExit):
+ raise
+ except:
+ exc = sys.exc_info()
+ return suite([Failure(exc[0], exc[1], exc[2],
+ address=addr.totuple())])
+ if addr.call:
+ return self.loadTestsFromName(addr.call, module)
+ else:
+ return self.loadTestsFromModule(
+ module, addr.filename,
+ discovered=discovered)
+ elif addr.filename:
+ path = addr.filename
+ if addr.call:
+ package = getpackage(path)
+ if package is None:
+ return suite([
+ Failure(ValueError,
+ "Can't find callable %s in file %s: "
+ "file is not a python module" %
+ (addr.call, path),
+ address=addr.totuple())])
+ return self.loadTestsFromName(addr.call, module=package)
+ else:
+ if op_isdir(path):
+ # In this case we *can* be lazy since we know
+ # that each module in the dir will be fully
+ # loaded before its tests are executed; we
+ # also know that we're not going to be asked
+ # to load from . and ./some_module.py *as part
+ # of this named test load*
+ return LazySuite(
+ lambda: self.loadTestsFromDir(path))
+ elif op_isfile(path):
+ return self.loadTestsFromFile(path)
+ else:
+ return suite([
+ Failure(OSError, "No such file %s" % path,
+ address=addr.totuple())])
+ else:
+ # just a function? what to do? I think it can only be
+ # handled when module is not None
+ return suite([
+ Failure(ValueError, "Unresolvable test name %s" % name,
+ address=addr.totuple())])
+
+ def loadTestsFromNames(self, names, module=None):
+ """Load tests from all names, returning a suite containing all
+ tests.
+ """
+ plug_res = self.config.plugins.loadTestsFromNames(names, module)
+ if plug_res:
+ suite, names = plug_res
+ if suite:
+ return self.suiteClass([
+ self.suiteClass(suite),
+ unittest.TestLoader.loadTestsFromNames(self, names, module)
+ ])
+ return unittest.TestLoader.loadTestsFromNames(self, names, module)
+
+ def loadTestsFromTestCase(self, testCaseClass):
+ """Load tests from a unittest.TestCase subclass.
+ """
+ cases = []
+ plugins = self.config.plugins
+ for case in plugins.loadTestsFromTestCase(testCaseClass):
+ cases.append(case)
+ # For efficiency in the most common case, just call and return from
+ # super. This avoids having to extract cases and rebuild a context
+ # suite when there are no plugin-contributed cases.
+ if not cases:
+ return super(TestLoader, self).loadTestsFromTestCase(testCaseClass)
+ cases.extend(
+ [case for case in
+ super(TestLoader, self).loadTestsFromTestCase(testCaseClass)])
+ return self.suiteClass(cases)
+
+ def loadTestsFromTestClass(self, cls):
+ """Load tests from a test class that is *not* a unittest.TestCase
+ subclass.
+
+ In this case, we can't depend on the class's `__init__` taking method
+ name arguments, so we have to compose a MethodTestCase for each
+ method in the class that looks testlike.
+ """
+ def wanted(attr, cls=cls, sel=self.selector):
+ item = getattr(cls, attr, None)
+ if isfunction(item):
+ item = unbound_method(cls, item)
+ elif not ismethod(item):
+ return False
+ return sel.wantMethod(item)
+ cases = [self.makeTest(getattr(cls, case), cls)
+ for case in filter(wanted, dir(cls))]
+ for test in self.config.plugins.loadTestsFromTestClass(cls):
+ cases.append(test)
+ return self.suiteClass(ContextList(cases, context=cls))
+
+ def makeTest(self, obj, parent=None):
+ try:
+ return self._makeTest(obj, parent)
+ except (KeyboardInterrupt, SystemExit):
+ raise
+ except:
+ exc = sys.exc_info()
+ try:
+ addr = test_address(obj)
+ except KeyboardInterrupt:
+ raise
+ except:
+ addr = None
+ return Failure(exc[0], exc[1], exc[2], address=addr)
+
+ def _makeTest(self, obj, parent=None):
+ """Given a test object and its parent, return a test case
+ or test suite.
+ """
+ plug_tests = []
+ try:
+ addr = test_address(obj)
+ except KeyboardInterrupt:
+ raise
+ except:
+ addr = None
+ for test in self.config.plugins.makeTest(obj, parent):
+ plug_tests.append(test)
+ # TODO: is this try/except needed?
+ try:
+ if plug_tests:
+ return self.suiteClass(plug_tests)
+ except (KeyboardInterrupt, SystemExit):
+ raise
+ except:
+ exc = sys.exc_info()
+ return Failure(exc[0], exc[1], exc[2], address=addr)
+
+ if isfunction(obj) and parent and not isinstance(parent, types.ModuleType):
+ # This is a Python 3.x 'unbound method'. Wrap it with its
+ # associated class..
+ obj = unbound_method(parent, obj)
+
+ if isinstance(obj, unittest.TestCase):
+ return obj
+ elif isclass(obj):
+ if parent and obj.__module__ != parent.__name__:
+ obj = transplant_class(obj, parent.__name__)
+ if issubclass(obj, unittest.TestCase):
+ return self.loadTestsFromTestCase(obj)
+ else:
+ return self.loadTestsFromTestClass(obj)
+ elif ismethod(obj):
+ if parent is None:
+ parent = obj.__class__
+ if issubclass(parent, unittest.TestCase):
+ return parent(obj.__name__)
+ else:
+ if isgenerator(obj):
+ return self.loadTestsFromGeneratorMethod(obj, parent)
+ else:
+ return MethodTestCase(obj)
+ elif isfunction(obj):
+ if parent and obj.__module__ != parent.__name__:
+ obj = transplant_func(obj, parent.__name__)
+ if isgenerator(obj):
+ return self.loadTestsFromGenerator(obj, parent)
+ else:
+ return FunctionTestCase(obj)
+ else:
+ return Failure(TypeError,
+ "Can't make a test from %s" % obj,
+ address=addr)
+
+ def resolve(self, name, module):
+ """Resolve name within module
+ """
+ obj = module
+ parts = name.split('.')
+ for part in parts:
+ parent, obj = obj, getattr(obj, part, None)
+ if obj is None:
+ # no such test
+ obj = Failure(ValueError, "No such test %s" % name)
+ return parent, obj
+
+ def parseGeneratedTest(self, test):
+ """Given the yield value of a test generator, return a func and args.
+
+ This is used in the two loadTestsFromGenerator* methods.
+
+ """
+ if not isinstance(test, tuple): # yield test
+ test_func, arg = (test, tuple())
+ elif len(test) == 1: # yield (test,)
+ test_func, arg = (test[0], tuple())
+ else: # yield test, foo, bar, ...
+ assert len(test) > 1 # sanity check
+ test_func, arg = (test[0], test[1:])
+ return test_func, arg
+
+defaultTestLoader = TestLoader
+
diff --git a/scripts/external_libs/nose-1.3.4/python3/nose/plugins/__init__.py b/scripts/external_libs/nose-1.3.4/python3/nose/plugins/__init__.py
new file mode 100644
index 00000000..08ee8f32
--- /dev/null
+++ b/scripts/external_libs/nose-1.3.4/python3/nose/plugins/__init__.py
@@ -0,0 +1,190 @@
+"""
+Writing Plugins
+---------------
+
+nose supports plugins for test collection, selection, observation and
+reporting. There are two basic rules for plugins:
+
+* Plugin classes should subclass :class:`nose.plugins.Plugin`.
+
+* Plugins may implement any of the methods described in the class
+ :doc:`IPluginInterface <interface>` in nose.plugins.base. Please note that
+ this class is for documentary purposes only; plugins may not subclass
+ IPluginInterface.
+
+Hello World
+===========
+
+Here's a basic plugin. It doesn't do much so read on for more ideas or dive
+into the :doc:`IPluginInterface <interface>` to see all available hooks.
+
+.. code-block:: python
+
+ import logging
+ import os
+
+ from nose.plugins import Plugin
+
+ log = logging.getLogger('nose.plugins.helloworld')
+
+ class HelloWorld(Plugin):
+ name = 'helloworld'
+
+ def options(self, parser, env=os.environ):
+ super(HelloWorld, self).options(parser, env=env)
+
+ def configure(self, options, conf):
+ super(HelloWorld, self).configure(options, conf)
+ if not self.enabled:
+ return
+
+ def finalize(self, result):
+ log.info('Hello pluginized world!')
+
+Registering
+===========
+
+.. Note::
+ Important note: the following applies only to the default
+ plugin manager. Other plugin managers may use different means to
+ locate and load plugins.
+
+For nose to find a plugin, it must be part of a package that uses
+setuptools_, and the plugin must be included in the entry points defined
+in the setup.py for the package:
+
+.. code-block:: python
+
+ setup(name='Some plugin',
+ # ...
+ entry_points = {
+ 'nose.plugins.0.10': [
+ 'someplugin = someplugin:SomePlugin'
+ ]
+ },
+ # ...
+ )
+
+Once the package is installed with install or develop, nose will be able
+to load the plugin.
+
+.. _setuptools: http://peak.telecommunity.com/DevCenter/setuptools
+
+Registering a plugin without setuptools
+=======================================
+
+It is currently possible to register a plugin programmatically by
+creating a custom nose runner like this :
+
+.. code-block:: python
+
+ import nose
+ from yourplugin import YourPlugin
+
+ if __name__ == '__main__':
+ nose.main(addplugins=[YourPlugin()])
+
+Defining options
+================
+
+All plugins must implement the methods ``options(self, parser, env)``
+and ``configure(self, options, conf)``. Subclasses of nose.plugins.Plugin
+that want the standard options should call the superclass methods.
+
+nose uses optparse.OptionParser from the standard library to parse
+arguments. A plugin's ``options()`` method receives a parser
+instance. It's good form for a plugin to use that instance only to add
+additional arguments that take only long arguments (--like-this). Most
+of nose's built-in arguments get their default value from an environment
+variable.
+
+A plugin's ``configure()`` method receives the parsed ``OptionParser`` options
+object, as well as the current config object. Plugins should configure their
+behavior based on the user-selected settings, and may raise exceptions
+if the configured behavior is nonsensical.
+
+Logging
+=======
+
+nose uses the logging classes from the standard library. To enable users
+to view debug messages easily, plugins should use ``logging.getLogger()`` to
+acquire a logger in the ``nose.plugins`` namespace.
+
+Recipes
+=======
+
+* Writing a plugin that monitors or controls test result output
+
+ Implement any or all of ``addError``, ``addFailure``, etc., to monitor test
+ results. If you also want to monitor output, implement
+ ``setOutputStream`` and keep a reference to the output stream. If you
+ want to prevent the builtin ``TextTestResult`` output, implement
+ ``setOutputSteam`` and *return a dummy stream*. The default output will go
+ to the dummy stream, while you send your desired output to the real stream.
+
+ Example: `examples/html_plugin/htmlplug.py`_
+
+* Writing a plugin that handles exceptions
+
+ Subclass :doc:`ErrorClassPlugin <errorclasses>`.
+
+ Examples: :doc:`nose.plugins.deprecated <deprecated>`,
+ :doc:`nose.plugins.skip <skip>`
+
+* Writing a plugin that adds detail to error reports
+
+ Implement ``formatError`` and/or ``formatFailure``. The error tuple
+ you return (error class, error message, traceback) will replace the
+ original error tuple.
+
+ Examples: :doc:`nose.plugins.capture <capture>`,
+ :doc:`nose.plugins.failuredetail <failuredetail>`
+
+* Writing a plugin that loads tests from files other than python modules
+
+ Implement ``wantFile`` and ``loadTestsFromFile``. In ``wantFile``,
+ return True for files that you want to examine for tests. In
+ ``loadTestsFromFile``, for those files, return an iterable
+ containing TestCases (or yield them as you find them;
+ ``loadTestsFromFile`` may also be a generator).
+
+ Example: :doc:`nose.plugins.doctests <doctests>`
+
+* Writing a plugin that prints a report
+
+ Implement ``begin`` if you need to perform setup before testing
+ begins. Implement ``report`` and output your report to the provided stream.
+
+ Examples: :doc:`nose.plugins.cover <cover>`, :doc:`nose.plugins.prof <prof>`
+
+* Writing a plugin that selects or rejects tests
+
+ Implement any or all ``want*`` methods. Return False to reject the test
+ candidate, True to accept it -- which means that the test candidate
+ will pass through the rest of the system, so you must be prepared to
+ load tests from it if tests can't be loaded by the core loader or
+ another plugin -- and None if you don't care.
+
+ Examples: :doc:`nose.plugins.attrib <attrib>`,
+ :doc:`nose.plugins.doctests <doctests>`, :doc:`nose.plugins.testid <testid>`
+
+
+More Examples
+=============
+
+See any builtin plugin or example plugin in the examples_ directory in
+the nose source distribution. There is a list of third-party plugins
+`on jottit`_.
+
+.. _examples/html_plugin/htmlplug.py: http://python-nose.googlecode.com/svn/trunk/examples/html_plugin/htmlplug.py
+.. _examples: http://python-nose.googlecode.com/svn/trunk/examples
+.. _on jottit: http://nose-plugins.jottit.com/
+
+"""
+from nose.plugins.base import Plugin
+from nose.plugins.manager import *
+from nose.plugins.plugintest import PluginTester
+
+if __name__ == '__main__':
+ import doctest
+ doctest.testmod()
diff --git a/scripts/external_libs/nose-1.3.4/python3/nose/plugins/allmodules.py b/scripts/external_libs/nose-1.3.4/python3/nose/plugins/allmodules.py
new file mode 100644
index 00000000..1ccd7773
--- /dev/null
+++ b/scripts/external_libs/nose-1.3.4/python3/nose/plugins/allmodules.py
@@ -0,0 +1,45 @@
+"""Use the AllModules plugin by passing ``--all-modules`` or setting the
+NOSE_ALL_MODULES environment variable to enable collection and execution of
+tests in all python modules. Normal nose behavior is to look for tests only in
+modules that match testMatch.
+
+More information: :doc:`../doc_tests/test_allmodules/test_allmodules`
+
+.. warning ::
+
+ This plugin can have surprising interactions with plugins that load tests
+ from what nose normally considers non-test modules, such as
+ the :doc:`doctest plugin <doctests>`. This is because any given
+ object in a module can't be loaded both by a plugin and the normal nose
+ :class:`test loader <nose.loader.TestLoader>`. Also, if you have functions
+ or classes in non-test modules that look like tests but aren't, you will
+ likely see errors as nose attempts to run them as tests.
+
+"""
+
+import os
+from nose.plugins.base import Plugin
+
+class AllModules(Plugin):
+ """Collect tests from all python modules.
+ """
+ def options(self, parser, env):
+ """Register commandline options.
+ """
+ env_opt = 'NOSE_ALL_MODULES'
+ parser.add_option('--all-modules',
+ action="store_true",
+ dest=self.enableOpt,
+ default=env.get(env_opt),
+ help="Enable plugin %s: %s [%s]" %
+ (self.__class__.__name__, self.help(), env_opt))
+
+ def wantFile(self, file):
+ """Override to return True for all files ending with .py"""
+ # always want .py files
+ if file.endswith('.py'):
+ return True
+
+ def wantModule(self, module):
+ """Override return True for all modules"""
+ return True
diff --git a/scripts/external_libs/nose-1.3.4/python3/nose/plugins/attrib.py b/scripts/external_libs/nose-1.3.4/python3/nose/plugins/attrib.py
new file mode 100644
index 00000000..62e55337
--- /dev/null
+++ b/scripts/external_libs/nose-1.3.4/python3/nose/plugins/attrib.py
@@ -0,0 +1,287 @@
+"""Attribute selector plugin.
+
+Oftentimes when testing you will want to select tests based on
+criteria rather then simply by filename. For example, you might want
+to run all tests except for the slow ones. You can do this with the
+Attribute selector plugin by setting attributes on your test methods.
+Here is an example:
+
+.. code-block:: python
+
+ def test_big_download():
+ import urllib
+ # commence slowness...
+
+ test_big_download.slow = 1
+
+Once you've assigned an attribute ``slow = 1`` you can exclude that
+test and all other tests having the slow attribute by running ::
+
+ $ nosetests -a '!slow'
+
+There is also a decorator available for you that will set attributes.
+Here's how to set ``slow=1`` like above with the decorator:
+
+.. code-block:: python
+
+ from nose.plugins.attrib import attr
+ @attr('slow')
+ def test_big_download():
+ import urllib
+ # commence slowness...
+
+And here's how to set an attribute with a specific value:
+
+.. code-block:: python
+
+ from nose.plugins.attrib import attr
+ @attr(speed='slow')
+ def test_big_download():
+ import urllib
+ # commence slowness...
+
+This test could be run with ::
+
+ $ nosetests -a speed=slow
+
+In Python 2.6 and higher, ``@attr`` can be used on a class to set attributes
+on all its test methods at once. For example:
+
+.. code-block:: python
+
+ from nose.plugins.attrib import attr
+ @attr(speed='slow')
+ class MyTestCase:
+ def test_long_integration(self):
+ pass
+ def test_end_to_end_something(self):
+ pass
+
+Below is a reference to the different syntaxes available.
+
+Simple syntax
+-------------
+
+Examples of using the ``-a`` and ``--attr`` options:
+
+* ``nosetests -a status=stable``
+ Only runs tests with attribute "status" having value "stable"
+
+* ``nosetests -a priority=2,status=stable``
+ Runs tests having both attributes and values
+
+* ``nosetests -a priority=2 -a slow``
+ Runs tests that match either attribute
+
+* ``nosetests -a tags=http``
+ If a test's ``tags`` attribute was a list and it contained the value
+ ``http`` then it would be run
+
+* ``nosetests -a slow``
+ Runs tests with the attribute ``slow`` if its value does not equal False
+ (False, [], "", etc...)
+
+* ``nosetests -a '!slow'``
+ Runs tests that do NOT have the attribute ``slow`` or have a ``slow``
+ attribute that is equal to False
+ **NOTE**:
+ if your shell (like bash) interprets '!' as a special character make sure to
+ put single quotes around it.
+
+Expression Evaluation
+---------------------
+
+Examples using the ``-A`` and ``--eval-attr`` options:
+
+* ``nosetests -A "not slow"``
+ Evaluates the Python expression "not slow" and runs the test if True
+
+* ``nosetests -A "(priority > 5) and not slow"``
+ Evaluates a complex Python expression and runs the test if True
+
+"""
+import inspect
+import logging
+import os
+import sys
+from inspect import isfunction
+from nose.plugins.base import Plugin
+from nose.util import tolist
+import collections
+
+log = logging.getLogger('nose.plugins.attrib')
+compat_24 = sys.version_info >= (2, 4)
+
+def attr(*args, **kwargs):
+ """Decorator that adds attributes to classes or functions
+ for use with the Attribute (-a) plugin.
+ """
+ def wrap_ob(ob):
+ for name in args:
+ setattr(ob, name, True)
+ for name, value in kwargs.items():
+ setattr(ob, name, value)
+ return ob
+ return wrap_ob
+
+def get_method_attr(method, cls, attr_name, default = False):
+ """Look up an attribute on a method/ function.
+ If the attribute isn't found there, looking it up in the
+ method's class, if any.
+ """
+ Missing = object()
+ value = getattr(method, attr_name, Missing)
+ if value is Missing and cls is not None:
+ value = getattr(cls, attr_name, Missing)
+ if value is Missing:
+ return default
+ return value
+
+
+class ContextHelper:
+ """Object that can act as context dictionary for eval and looks up
+ names as attributes on a method/ function and its class.
+ """
+ def __init__(self, method, cls):
+ self.method = method
+ self.cls = cls
+
+ def __getitem__(self, name):
+ return get_method_attr(self.method, self.cls, name)
+
+
+class AttributeSelector(Plugin):
+ """Selects test cases to be run based on their attributes.
+ """
+
+ def __init__(self):
+ Plugin.__init__(self)
+ self.attribs = []
+
+ def options(self, parser, env):
+ """Register command line options"""
+ parser.add_option("-a", "--attr",
+ dest="attr", action="append",
+ default=env.get('NOSE_ATTR'),
+ metavar="ATTR",
+ help="Run only tests that have attributes "
+ "specified by ATTR [NOSE_ATTR]")
+ # disable in < 2.4: eval can't take needed args
+ if compat_24:
+ parser.add_option("-A", "--eval-attr",
+ dest="eval_attr", metavar="EXPR", action="append",
+ default=env.get('NOSE_EVAL_ATTR'),
+ help="Run only tests for whose attributes "
+ "the Python expression EXPR evaluates "
+ "to True [NOSE_EVAL_ATTR]")
+
+ def configure(self, options, config):
+ """Configure the plugin and system, based on selected options.
+
+ attr and eval_attr may each be lists.
+
+ self.attribs will be a list of lists of tuples. In that list, each
+ list is a group of attributes, all of which must match for the rule to
+ match.
+ """
+ self.attribs = []
+
+ # handle python eval-expression parameter
+ if compat_24 and options.eval_attr:
+ eval_attr = tolist(options.eval_attr)
+ for attr in eval_attr:
+ # "<python expression>"
+ # -> eval(expr) in attribute context must be True
+ def eval_in_context(expr, obj, cls):
+ return eval(expr, None, ContextHelper(obj, cls))
+ self.attribs.append([(attr, eval_in_context)])
+
+ # attribute requirements are a comma separated list of
+ # 'key=value' pairs
+ if options.attr:
+ std_attr = tolist(options.attr)
+ for attr in std_attr:
+ # all attributes within an attribute group must match
+ attr_group = []
+ for attrib in attr.strip().split(","):
+ # don't die on trailing comma
+ if not attrib:
+ continue
+ items = attrib.split("=", 1)
+ if len(items) > 1:
+ # "name=value"
+ # -> 'str(obj.name) == value' must be True
+ key, value = items
+ else:
+ key = items[0]
+ if key[0] == "!":
+ # "!name"
+ # 'bool(obj.name)' must be False
+ key = key[1:]
+ value = False
+ else:
+ # "name"
+ # -> 'bool(obj.name)' must be True
+ value = True
+ attr_group.append((key, value))
+ self.attribs.append(attr_group)
+ if self.attribs:
+ self.enabled = True
+
+ def validateAttrib(self, method, cls = None):
+ """Verify whether a method has the required attributes
+ The method is considered a match if it matches all attributes
+ for any attribute group.
+ ."""
+ # TODO: is there a need for case-sensitive value comparison?
+ any = False
+ for group in self.attribs:
+ match = True
+ for key, value in group:
+ attr = get_method_attr(method, cls, key)
+ if isinstance(value, collections.Callable):
+ if not value(key, method, cls):
+ match = False
+ break
+ elif value is True:
+ # value must exist and be True
+ if not bool(attr):
+ match = False
+ break
+ elif value is False:
+ # value must not exist or be False
+ if bool(attr):
+ match = False
+ break
+ elif type(attr) in (list, tuple):
+ # value must be found in the list attribute
+ if not str(value).lower() in [str(x).lower()
+ for x in attr]:
+ match = False
+ break
+ else:
+ # value must match, convert to string and compare
+ if (value != attr
+ and str(value).lower() != str(attr).lower()):
+ match = False
+ break
+ any = any or match
+ if any:
+ # not True because we don't want to FORCE the selection of the
+ # item, only say that it is acceptable
+ return None
+ return False
+
+ def wantFunction(self, function):
+ """Accept the function if its attributes match.
+ """
+ return self.validateAttrib(function)
+
+ def wantMethod(self, method):
+ """Accept the method if its attributes match.
+ """
+ try:
+ cls = method.__self__.__class__
+ except AttributeError:
+ return False
+ return self.validateAttrib(method, cls)
diff --git a/scripts/external_libs/nose-1.3.4/python3/nose/plugins/base.py b/scripts/external_libs/nose-1.3.4/python3/nose/plugins/base.py
new file mode 100644
index 00000000..0e1c68f8
--- /dev/null
+++ b/scripts/external_libs/nose-1.3.4/python3/nose/plugins/base.py
@@ -0,0 +1,725 @@
+import os
+import textwrap
+from optparse import OptionConflictError
+from warnings import warn
+from nose.util import tolist
+
+class Plugin(object):
+ """Base class for nose plugins. It's recommended but not *necessary* to
+ subclass this class to create a plugin, but all plugins *must* implement
+ `options(self, parser, env)` and `configure(self, options, conf)`, and
+ must have the attributes `enabled`, `name` and `score`. The `name`
+ attribute may contain hyphens ('-').
+
+ Plugins should not be enabled by default.
+
+ Subclassing Plugin (and calling the superclass methods in
+ __init__, configure, and options, if you override them) will give
+ your plugin some friendly default behavior:
+
+ * A --with-$name option will be added to the command line interface
+ to enable the plugin, and a corresponding environment variable
+ will be used as the default value. The plugin class's docstring
+ will be used as the help for this option.
+ * The plugin will not be enabled unless this option is selected by
+ the user.
+ """
+ can_configure = False
+ enabled = False
+ enableOpt = None
+ name = None
+ score = 100
+
+ def __init__(self):
+ if self.name is None:
+ self.name = self.__class__.__name__.lower()
+ if self.enableOpt is None:
+ self.enableOpt = "enable_plugin_%s" % self.name.replace('-', '_')
+
+ def addOptions(self, parser, env=None):
+ """Add command-line options for this plugin.
+
+ The base plugin class adds --with-$name by default, used to enable the
+ plugin.
+
+ .. warning :: Don't implement addOptions unless you want to override
+ all default option handling behavior, including
+ warnings for conflicting options. Implement
+ :meth:`options
+ <nose.plugins.base.IPluginInterface.options>`
+ instead.
+ """
+ self.add_options(parser, env)
+
+ def add_options(self, parser, env=None):
+ """Non-camel-case version of func name for backwards compatibility.
+
+ .. warning ::
+
+ DEPRECATED: Do not use this method,
+ use :meth:`options <nose.plugins.base.IPluginInterface.options>`
+ instead.
+
+ """
+ # FIXME raise deprecation warning if wasn't called by wrapper
+ if env is None:
+ env = os.environ
+ try:
+ self.options(parser, env)
+ self.can_configure = True
+ except OptionConflictError as e:
+ warn("Plugin %s has conflicting option string: %s and will "
+ "be disabled" % (self, e), RuntimeWarning)
+ self.enabled = False
+ self.can_configure = False
+
+ def options(self, parser, env):
+ """Register commandline options.
+
+ Implement this method for normal options behavior with protection from
+ OptionConflictErrors. If you override this method and want the default
+ --with-$name option to be registered, be sure to call super().
+ """
+ env_opt = 'NOSE_WITH_%s' % self.name.upper()
+ env_opt = env_opt.replace('-', '_')
+ parser.add_option("--with-%s" % self.name,
+ action="store_true",
+ dest=self.enableOpt,
+ default=env.get(env_opt),
+ help="Enable plugin %s: %s [%s]" %
+ (self.__class__.__name__, self.help(), env_opt))
+
+ def configure(self, options, conf):
+ """Configure the plugin and system, based on selected options.
+
+ The base plugin class sets the plugin to enabled if the enable option
+ for the plugin (self.enableOpt) is true.
+ """
+ if not self.can_configure:
+ return
+ self.conf = conf
+ if hasattr(options, self.enableOpt):
+ self.enabled = getattr(options, self.enableOpt)
+
+ def help(self):
+ """Return help for this plugin. This will be output as the help
+ section of the --with-$name option that enables the plugin.
+ """
+ if self.__class__.__doc__:
+ # doc sections are often indented; compress the spaces
+ return textwrap.dedent(self.__class__.__doc__)
+ return "(no help available)"
+
+ # Compatiblity shim
+ def tolist(self, val):
+ warn("Plugin.tolist is deprecated. Use nose.util.tolist instead",
+ DeprecationWarning)
+ return tolist(val)
+
+
+class IPluginInterface(object):
+ """
+ IPluginInterface describes the plugin API. Do not subclass or use this
+ class directly.
+ """
+ def __new__(cls, *arg, **kw):
+ raise TypeError("IPluginInterface class is for documentation only")
+
+ def addOptions(self, parser, env):
+ """Called to allow plugin to register command-line options with the
+ parser. DO NOT return a value from this method unless you want to stop
+ all other plugins from setting their options.
+
+ .. warning ::
+
+ DEPRECATED -- implement
+ :meth:`options <nose.plugins.base.IPluginInterface.options>` instead.
+ """
+ pass
+ add_options = addOptions
+ add_options.deprecated = True
+
+ def addDeprecated(self, test):
+ """Called when a deprecated test is seen. DO NOT return a value
+ unless you want to stop other plugins from seeing the deprecated
+ test.
+
+ .. warning :: DEPRECATED -- check error class in addError instead
+ """
+ pass
+ addDeprecated.deprecated = True
+
+ def addError(self, test, err):
+ """Called when a test raises an uncaught exception. DO NOT return a
+ value unless you want to stop other plugins from seeing that the
+ test has raised an error.
+
+ :param test: the test case
+ :type test: :class:`nose.case.Test`
+ :param err: sys.exc_info() tuple
+ :type err: 3-tuple
+ """
+ pass
+ addError.changed = True
+
+ def addFailure(self, test, err):
+ """Called when a test fails. DO NOT return a value unless you
+ want to stop other plugins from seeing that the test has failed.
+
+ :param test: the test case
+ :type test: :class:`nose.case.Test`
+ :param err: 3-tuple
+ :type err: sys.exc_info() tuple
+ """
+ pass
+ addFailure.changed = True
+
+ def addSkip(self, test):
+ """Called when a test is skipped. DO NOT return a value unless
+ you want to stop other plugins from seeing the skipped test.
+
+ .. warning:: DEPRECATED -- check error class in addError instead
+ """
+ pass
+ addSkip.deprecated = True
+
+ def addSuccess(self, test):
+ """Called when a test passes. DO NOT return a value unless you
+ want to stop other plugins from seeing the passing test.
+
+ :param test: the test case
+ :type test: :class:`nose.case.Test`
+ """
+ pass
+ addSuccess.changed = True
+
+ def afterContext(self):
+ """Called after a context (generally a module) has been
+ lazy-loaded, imported, setup, had its tests loaded and
+ executed, and torn down.
+ """
+ pass
+ afterContext._new = True
+
+ def afterDirectory(self, path):
+ """Called after all tests have been loaded from directory at path
+ and run.
+
+ :param path: the directory that has finished processing
+ :type path: string
+ """
+ pass
+ afterDirectory._new = True
+
+ def afterImport(self, filename, module):
+ """Called after module is imported from filename. afterImport
+ is called even if the import failed.
+
+ :param filename: The file that was loaded
+ :type filename: string
+ :param module: The name of the module
+ :type module: string
+ """
+ pass
+ afterImport._new = True
+
+ def afterTest(self, test):
+ """Called after the test has been run and the result recorded
+ (after stopTest).
+
+ :param test: the test case
+ :type test: :class:`nose.case.Test`
+ """
+ pass
+ afterTest._new = True
+
+ def beforeContext(self):
+ """Called before a context (generally a module) is
+ examined. Because the context is not yet loaded, plugins don't
+ get to know what the context is; so any context operations
+ should use a stack that is pushed in `beforeContext` and popped
+ in `afterContext` to ensure they operate symmetrically.
+
+ `beforeContext` and `afterContext` are mainly useful for tracking
+ and restoring global state around possible changes from within a
+ context, whatever the context may be. If you need to operate on
+ contexts themselves, see `startContext` and `stopContext`, which
+ are passed the context in question, but are called after
+ it has been loaded (imported in the module case).
+ """
+ pass
+ beforeContext._new = True
+
+ def beforeDirectory(self, path):
+ """Called before tests are loaded from directory at path.
+
+ :param path: the directory that is about to be processed
+ """
+ pass
+ beforeDirectory._new = True
+
+ def beforeImport(self, filename, module):
+ """Called before module is imported from filename.
+
+ :param filename: The file that will be loaded
+ :param module: The name of the module found in file
+ :type module: string
+ """
+ beforeImport._new = True
+
+ def beforeTest(self, test):
+ """Called before the test is run (before startTest).
+
+ :param test: the test case
+ :type test: :class:`nose.case.Test`
+ """
+ pass
+ beforeTest._new = True
+
+ def begin(self):
+ """Called before any tests are collected or run. Use this to
+ perform any setup needed before testing begins.
+ """
+ pass
+
+ def configure(self, options, conf):
+ """Called after the command line has been parsed, with the
+ parsed options and the config container. Here, implement any
+ config storage or changes to state or operation that are set
+ by command line options.
+
+ DO NOT return a value from this method unless you want to
+ stop all other plugins from being configured.
+ """
+ pass
+
+ def finalize(self, result):
+ """Called after all report output, including output from all
+ plugins, has been sent to the stream. Use this to print final
+ test results or perform final cleanup. Return None to allow
+ other plugins to continue printing, or any other value to stop
+ them.
+
+ :param result: test result object
+
+ .. Note:: When tests are run under a test runner other than
+ :class:`nose.core.TextTestRunner`, such as
+ via ``python setup.py test``, this method may be called
+ **before** the default report output is sent.
+ """
+ pass
+
+ def describeTest(self, test):
+ """Return a test description.
+
+ Called by :meth:`nose.case.Test.shortDescription`.
+
+ :param test: the test case
+ :type test: :class:`nose.case.Test`
+ """
+ pass
+ describeTest._new = True
+
+ def formatError(self, test, err):
+ """Called in result.addError, before plugin.addError. If you
+ want to replace or modify the error tuple, return a new error
+ tuple, otherwise return err, the original error tuple.
+
+ :param test: the test case
+ :type test: :class:`nose.case.Test`
+ :param err: sys.exc_info() tuple
+ :type err: 3-tuple
+ """
+ pass
+ formatError._new = True
+ formatError.chainable = True
+ # test arg is not chainable
+ formatError.static_args = (True, False)
+
+ def formatFailure(self, test, err):
+ """Called in result.addFailure, before plugin.addFailure. If you
+ want to replace or modify the error tuple, return a new error
+ tuple, otherwise return err, the original error tuple.
+
+ :param test: the test case
+ :type test: :class:`nose.case.Test`
+ :param err: sys.exc_info() tuple
+ :type err: 3-tuple
+ """
+ pass
+ formatFailure._new = True
+ formatFailure.chainable = True
+ # test arg is not chainable
+ formatFailure.static_args = (True, False)
+
+ def handleError(self, test, err):
+ """Called on addError. To handle the error yourself and prevent normal
+ error processing, return a true value.
+
+ :param test: the test case
+ :type test: :class:`nose.case.Test`
+ :param err: sys.exc_info() tuple
+ :type err: 3-tuple
+ """
+ pass
+ handleError._new = True
+
+ def handleFailure(self, test, err):
+ """Called on addFailure. To handle the failure yourself and
+ prevent normal failure processing, return a true value.
+
+ :param test: the test case
+ :type test: :class:`nose.case.Test`
+ :param err: sys.exc_info() tuple
+ :type err: 3-tuple
+ """
+ pass
+ handleFailure._new = True
+
+ def loadTestsFromDir(self, path):
+ """Return iterable of tests from a directory. May be a
+ generator. Each item returned must be a runnable
+ unittest.TestCase (or subclass) instance or suite instance.
+ Return None if your plugin cannot collect any tests from
+ directory.
+
+ :param path: The path to the directory.
+ """
+ pass
+ loadTestsFromDir.generative = True
+ loadTestsFromDir._new = True
+
+ def loadTestsFromModule(self, module, path=None):
+ """Return iterable of tests in a module. May be a
+ generator. Each item returned must be a runnable
+ unittest.TestCase (or subclass) instance.
+ Return None if your plugin cannot
+ collect any tests from module.
+
+ :param module: The module object
+ :type module: python module
+ :param path: the path of the module to search, to distinguish from
+ namespace package modules
+
+ .. note::
+
+ NEW. The ``path`` parameter will only be passed by nose 0.11
+ or above.
+ """
+ pass
+ loadTestsFromModule.generative = True
+
+ def loadTestsFromName(self, name, module=None, importPath=None):
+ """Return tests in this file or module. Return None if you are not able
+ to load any tests, or an iterable if you are. May be a
+ generator.
+
+ :param name: The test name. May be a file or module name plus a test
+ callable. Use split_test_name to split into parts. Or it might
+ be some crazy name of your own devising, in which case, do
+ whatever you want.
+ :param module: Module from which the name is to be loaded
+ :param importPath: Path from which file (must be a python module) was
+ found
+
+ .. warning:: DEPRECATED: this argument will NOT be passed.
+ """
+ pass
+ loadTestsFromName.generative = True
+
+ def loadTestsFromNames(self, names, module=None):
+ """Return a tuple of (tests loaded, remaining names). Return
+ None if you are not able to load any tests. Multiple plugins
+ may implement loadTestsFromNames; the remaining name list from
+ each will be passed to the next as input.
+
+ :param names: List of test names.
+ :type names: iterable
+ :param module: Module from which the names are to be loaded
+ """
+ pass
+ loadTestsFromNames._new = True
+ loadTestsFromNames.chainable = True
+
+ def loadTestsFromFile(self, filename):
+ """Return tests in this file. Return None if you are not
+ interested in loading any tests, or an iterable if you are and
+ can load some. May be a generator. *If you are interested in
+ loading tests from the file and encounter no errors, but find
+ no tests, yield False or return [False].*
+
+ .. Note:: This method replaces loadTestsFromPath from the 0.9
+ API.
+
+ :param filename: The full path to the file or directory.
+ """
+ pass
+ loadTestsFromFile.generative = True
+ loadTestsFromFile._new = True
+
+ def loadTestsFromPath(self, path):
+ """
+ .. warning:: DEPRECATED -- use loadTestsFromFile instead
+ """
+ pass
+ loadTestsFromPath.deprecated = True
+
+ def loadTestsFromTestCase(self, cls):
+ """Return tests in this test case class. Return None if you are
+ not able to load any tests, or an iterable if you are. May be a
+ generator.
+
+ :param cls: The test case class. Must be subclass of
+ :class:`unittest.TestCase`.
+ """
+ pass
+ loadTestsFromTestCase.generative = True
+
+ def loadTestsFromTestClass(self, cls):
+ """Return tests in this test class. Class will *not* be a
+ unittest.TestCase subclass. Return None if you are not able to
+ load any tests, an iterable if you are. May be a generator.
+
+ :param cls: The test case class. Must be **not** be subclass of
+ :class:`unittest.TestCase`.
+ """
+ pass
+ loadTestsFromTestClass._new = True
+ loadTestsFromTestClass.generative = True
+
+ def makeTest(self, obj, parent):
+ """Given an object and its parent, return or yield one or more
+ test cases. Each test must be a unittest.TestCase (or subclass)
+ instance. This is called before default test loading to allow
+ plugins to load an alternate test case or cases for an
+ object. May be a generator.
+
+ :param obj: The object to be made into a test
+ :param parent: The parent of obj (eg, for a method, the class)
+ """
+ pass
+ makeTest._new = True
+ makeTest.generative = True
+
+ def options(self, parser, env):
+ """Called to allow plugin to register command line
+ options with the parser.
+
+ DO NOT return a value from this method unless you want to stop
+ all other plugins from setting their options.
+
+ :param parser: options parser instance
+ :type parser: :class:`ConfigParser.ConfigParser`
+ :param env: environment, default is os.environ
+ """
+ pass
+ options._new = True
+
+ def prepareTest(self, test):
+ """Called before the test is run by the test runner. Please
+ note the article *the* in the previous sentence: prepareTest
+ is called *only once*, and is passed the test case or test
+ suite that the test runner will execute. It is *not* called
+ for each individual test case. If you return a non-None value,
+ that return value will be run as the test. Use this hook to
+ wrap or decorate the test with another function. If you need
+ to modify or wrap individual test cases, use `prepareTestCase`
+ instead.
+
+ :param test: the test case
+ :type test: :class:`nose.case.Test`
+ """
+ pass
+
+ def prepareTestCase(self, test):
+ """Prepare or wrap an individual test case. Called before
+ execution of the test. The test passed here is a
+ nose.case.Test instance; the case to be executed is in the
+ test attribute of the passed case. To modify the test to be
+ run, you should return a callable that takes one argument (the
+ test result object) -- it is recommended that you *do not*
+ side-effect the nose.case.Test instance you have been passed.
+
+ Keep in mind that when you replace the test callable you are
+ replacing the run() method of the test case -- including the
+ exception handling and result calls, etc.
+
+ :param test: the test case
+ :type test: :class:`nose.case.Test`
+ """
+ pass
+ prepareTestCase._new = True
+
+ def prepareTestLoader(self, loader):
+ """Called before tests are loaded. To replace the test loader,
+ return a test loader. To allow other plugins to process the
+ test loader, return None. Only one plugin may replace the test
+ loader. Only valid when using nose.TestProgram.
+
+ :param loader: :class:`nose.loader.TestLoader`
+ (or other loader) instance
+ """
+ pass
+ prepareTestLoader._new = True
+
+ def prepareTestResult(self, result):
+ """Called before the first test is run. To use a different
+ test result handler for all tests than the given result,
+ return a test result handler. NOTE however that this handler
+ will only be seen by tests, that is, inside of the result
+ proxy system. The TestRunner and TestProgram -- whether nose's
+ or other -- will continue to see the original result
+ handler. For this reason, it is usually better to monkeypatch
+ the result (for instance, if you want to handle some
+ exceptions in a unique way). Only one plugin may replace the
+ result, but many may monkeypatch it. If you want to
+ monkeypatch and stop other plugins from doing so, monkeypatch
+ and return the patched result.
+
+ :param result: :class:`nose.result.TextTestResult`
+ (or other result) instance
+ """
+ pass
+ prepareTestResult._new = True
+
+ def prepareTestRunner(self, runner):
+ """Called before tests are run. To replace the test runner,
+ return a test runner. To allow other plugins to process the
+ test runner, return None. Only valid when using nose.TestProgram.
+
+ :param runner: :class:`nose.core.TextTestRunner`
+ (or other runner) instance
+ """
+ pass
+ prepareTestRunner._new = True
+
+ def report(self, stream):
+ """Called after all error output has been printed. Print your
+ plugin's report to the provided stream. Return None to allow
+ other plugins to print reports, any other value to stop them.
+
+ :param stream: stream object; send your output here
+ :type stream: file-like object
+ """
+ pass
+
+ def setOutputStream(self, stream):
+ """Called before test output begins. To direct test output to a
+ new stream, return a stream object, which must implement a
+ `write(msg)` method. If you only want to note the stream, not
+ capture or redirect it, then return None.
+
+ :param stream: stream object; send your output here
+ :type stream: file-like object
+ """
+
+ def startContext(self, context):
+ """Called before context setup and the running of tests in the
+ context. Note that tests have already been *loaded* from the
+ context before this call.
+
+ :param context: the context about to be setup. May be a module or
+ class, or any other object that contains tests.
+ """
+ pass
+ startContext._new = True
+
+ def startTest(self, test):
+ """Called before each test is run. DO NOT return a value unless
+ you want to stop other plugins from seeing the test start.
+
+ :param test: the test case
+ :type test: :class:`nose.case.Test`
+ """
+ pass
+
+ def stopContext(self, context):
+ """Called after the tests in a context have run and the
+ context has been torn down.
+
+ :param context: the context that has been torn down. May be a module or
+ class, or any other object that contains tests.
+ """
+ pass
+ stopContext._new = True
+
+ def stopTest(self, test):
+ """Called after each test is run. DO NOT return a value unless
+ you want to stop other plugins from seeing that the test has stopped.
+
+ :param test: the test case
+ :type test: :class:`nose.case.Test`
+ """
+ pass
+
+ def testName(self, test):
+ """Return a short test name. Called by `nose.case.Test.__str__`.
+
+ :param test: the test case
+ :type test: :class:`nose.case.Test`
+ """
+ pass
+ testName._new = True
+
+ def wantClass(self, cls):
+ """Return true if you want the main test selector to collect
+ tests from this class, false if you don't, and None if you don't
+ care.
+
+ :param cls: The class being examined by the selector
+ """
+ pass
+
+ def wantDirectory(self, dirname):
+ """Return true if you want test collection to descend into this
+ directory, false if you do not, and None if you don't care.
+
+ :param dirname: Full path to directory being examined by the selector
+ """
+ pass
+
+ def wantFile(self, file):
+ """Return true if you want to collect tests from this file,
+ false if you do not and None if you don't care.
+
+ Change from 0.9: The optional package parameter is no longer passed.
+
+ :param file: Full path to file being examined by the selector
+ """
+ pass
+
+ def wantFunction(self, function):
+ """Return true to collect this function as a test, false to
+ prevent it from being collected, and None if you don't care.
+
+ :param function: The function object being examined by the selector
+ """
+ pass
+
+ def wantMethod(self, method):
+ """Return true to collect this method as a test, false to
+ prevent it from being collected, and None if you don't care.
+
+ :param method: The method object being examined by the selector
+ :type method: unbound method
+ """
+ pass
+
+ def wantModule(self, module):
+ """Return true if you want to collection to descend into this
+ module, false to prevent the collector from descending into the
+ module, and None if you don't care.
+
+ :param module: The module object being examined by the selector
+ :type module: python module
+ """
+ pass
+
+ def wantModuleTests(self, module):
+ """
+ .. warning:: DEPRECATED -- this method will not be called, it has
+ been folded into wantModule.
+ """
+ pass
+ wantModuleTests.deprecated = True
+
diff --git a/scripts/external_libs/nose-1.3.4/python3/nose/plugins/builtin.py b/scripts/external_libs/nose-1.3.4/python3/nose/plugins/builtin.py
new file mode 100644
index 00000000..4fcc0018
--- /dev/null
+++ b/scripts/external_libs/nose-1.3.4/python3/nose/plugins/builtin.py
@@ -0,0 +1,34 @@
+"""
+Lists builtin plugins.
+"""
+plugins = []
+builtins = (
+ ('nose.plugins.attrib', 'AttributeSelector'),
+ ('nose.plugins.capture', 'Capture'),
+ ('nose.plugins.logcapture', 'LogCapture'),
+ ('nose.plugins.cover', 'Coverage'),
+ ('nose.plugins.debug', 'Pdb'),
+ ('nose.plugins.deprecated', 'Deprecated'),
+ ('nose.plugins.doctests', 'Doctest'),
+ ('nose.plugins.isolate', 'IsolationPlugin'),
+ ('nose.plugins.failuredetail', 'FailureDetail'),
+ ('nose.plugins.prof', 'Profile'),
+ ('nose.plugins.skip', 'Skip'),
+ ('nose.plugins.testid', 'TestId'),
+ ('nose.plugins.multiprocess', 'MultiProcess'),
+ ('nose.plugins.xunit', 'Xunit'),
+ ('nose.plugins.allmodules', 'AllModules'),
+ ('nose.plugins.collect', 'CollectOnly'),
+ )
+
+for module, cls in builtins:
+ try:
+ plugmod = __import__(module, globals(), locals(), [cls])
+ except KeyboardInterrupt:
+ raise
+ except:
+ continue
+ plug = getattr(plugmod, cls)
+ plugins.append(plug)
+ globals()[cls] = plug
+
diff --git a/scripts/external_libs/nose-1.3.4/python3/nose/plugins/capture.py b/scripts/external_libs/nose-1.3.4/python3/nose/plugins/capture.py
new file mode 100644
index 00000000..bf40bcd5
--- /dev/null
+++ b/scripts/external_libs/nose-1.3.4/python3/nose/plugins/capture.py
@@ -0,0 +1,115 @@
+"""
+This plugin captures stdout during test execution. If the test fails
+or raises an error, the captured output will be appended to the error
+or failure output. It is enabled by default but can be disabled with
+the options ``-s`` or ``--nocapture``.
+
+:Options:
+ ``--nocapture``
+ Don't capture stdout (any stdout output will be printed immediately)
+
+"""
+import logging
+import os
+import sys
+from nose.plugins.base import Plugin
+from nose.pyversion import exc_to_unicode, force_unicode
+from nose.util import ln
+from io import StringIO
+
+
+log = logging.getLogger(__name__)
+
+class Capture(Plugin):
+ """
+ Output capture plugin. Enabled by default. Disable with ``-s`` or
+ ``--nocapture``. This plugin captures stdout during test execution,
+ appending any output captured to the error or failure output,
+ should the test fail or raise an error.
+ """
+ enabled = True
+ env_opt = 'NOSE_NOCAPTURE'
+ name = 'capture'
+ score = 1600
+
+ def __init__(self):
+ self.stdout = []
+ self._buf = None
+
+ def options(self, parser, env):
+ """Register commandline options
+ """
+ parser.add_option(
+ "-s", "--nocapture", action="store_false",
+ default=not env.get(self.env_opt), dest="capture",
+ help="Don't capture stdout (any stdout output "
+ "will be printed immediately) [NOSE_NOCAPTURE]")
+
+ def configure(self, options, conf):
+ """Configure plugin. Plugin is enabled by default.
+ """
+ self.conf = conf
+ if not options.capture:
+ self.enabled = False
+
+ def afterTest(self, test):
+ """Clear capture buffer.
+ """
+ self.end()
+ self._buf = None
+
+ def begin(self):
+ """Replace sys.stdout with capture buffer.
+ """
+ self.start() # get an early handle on sys.stdout
+
+ def beforeTest(self, test):
+ """Flush capture buffer.
+ """
+ self.start()
+
+ def formatError(self, test, err):
+ """Add captured output to error report.
+ """
+ test.capturedOutput = output = self.buffer
+ self._buf = None
+ if not output:
+ # Don't return None as that will prevent other
+ # formatters from formatting and remove earlier formatters
+ # formats, instead return the err we got
+ return err
+ ec, ev, tb = err
+ return (ec, self.addCaptureToErr(ev, output), tb)
+
+ def formatFailure(self, test, err):
+ """Add captured output to failure report.
+ """
+ return self.formatError(test, err)
+
+ def addCaptureToErr(self, ev, output):
+ ev = exc_to_unicode(ev)
+ output = force_unicode(output)
+ return '\n'.join([ev, ln('>> begin captured stdout <<'),
+ output, ln('>> end captured stdout <<')])
+
+ def start(self):
+ self.stdout.append(sys.stdout)
+ self._buf = StringIO()
+ sys.stdout = self._buf
+
+ def end(self):
+ if self.stdout:
+ sys.stdout = self.stdout.pop()
+
+ def finalize(self, result):
+ """Restore stdout.
+ """
+ while self.stdout:
+ self.end()
+
+ def _get_buffer(self):
+ if self._buf is not None:
+ return self._buf.getvalue()
+
+ buffer = property(_get_buffer, None, None,
+ """Captured stdout output.""")
diff --git a/scripts/external_libs/nose-1.3.4/python3/nose/plugins/collect.py b/scripts/external_libs/nose-1.3.4/python3/nose/plugins/collect.py
new file mode 100644
index 00000000..2e6833fc
--- /dev/null
+++ b/scripts/external_libs/nose-1.3.4/python3/nose/plugins/collect.py
@@ -0,0 +1,95 @@
+"""
+This plugin bypasses the actual execution of tests, and instead just collects
+test names. Fixtures are also bypassed, so running nosetests with the
+collection plugin enabled should be very quick.
+
+This plugin is useful in combination with the testid plugin (``--with-id``).
+Run both together to get an indexed list of all tests, which will enable you to
+run individual tests by index number.
+
+This plugin is also useful for counting tests in a test suite, and making
+people watching your demo think all of your tests pass.
+"""
+from nose.plugins.base import Plugin
+from nose.case import Test
+import logging
+import unittest
+import collections
+
+log = logging.getLogger(__name__)
+
+
+class CollectOnly(Plugin):
+ """
+ Collect and output test names only, don't run any tests.
+ """
+ name = "collect-only"
+ enableOpt = 'collect_only'
+
+ def options(self, parser, env):
+ """Register commandline options.
+ """
+ parser.add_option('--collect-only',
+ action='store_true',
+ dest=self.enableOpt,
+ default=env.get('NOSE_COLLECT_ONLY'),
+ help="Enable collect-only: %s [COLLECT_ONLY]" %
+ (self.help()))
+
+ def prepareTestLoader(self, loader):
+ """Install collect-only suite class in TestLoader.
+ """
+ # Disable context awareness
+ log.debug("Preparing test loader")
+ loader.suiteClass = TestSuiteFactory(self.conf)
+
+ def prepareTestCase(self, test):
+ """Replace actual test with dummy that always passes.
+ """
+ # Return something that always passes
+ log.debug("Preparing test case %s", test)
+ if not isinstance(test, Test):
+ return
+ def run(result):
+ # We need to make these plugin calls because there won't be
+ # a result proxy, due to using a stripped-down test suite
+ self.conf.plugins.startTest(test)
+ result.startTest(test)
+ self.conf.plugins.addSuccess(test)
+ result.addSuccess(test)
+ self.conf.plugins.stopTest(test)
+ result.stopTest(test)
+ return run
+
+
+class TestSuiteFactory:
+ """
+ Factory for producing configured test suites.
+ """
+ def __init__(self, conf):
+ self.conf = conf
+
+ def __call__(self, tests=(), **kw):
+ return TestSuite(tests, conf=self.conf)
+
+
+class TestSuite(unittest.TestSuite):
+ """
+ Basic test suite that bypasses most proxy and plugin calls, but does
+ wrap tests in a nose.case.Test so prepareTestCase will be called.
+ """
+ def __init__(self, tests=(), conf=None):
+ self.conf = conf
+ # Exec lazy suites: makes discovery depth-first
+ if isinstance(tests, collections.Callable):
+ tests = tests()
+ log.debug("TestSuite(%r)", tests)
+ unittest.TestSuite.__init__(self, tests)
+
+ def addTest(self, test):
+ log.debug("Add test %s", test)
+ if isinstance(test, unittest.TestSuite):
+ self._tests.append(test)
+ else:
+ self._tests.append(Test(test, config=self.conf))
+
diff --git a/scripts/external_libs/nose-1.3.4/python3/nose/plugins/cover.py b/scripts/external_libs/nose-1.3.4/python3/nose/plugins/cover.py
new file mode 100644
index 00000000..558614cc
--- /dev/null
+++ b/scripts/external_libs/nose-1.3.4/python3/nose/plugins/cover.py
@@ -0,0 +1,253 @@
+"""If you have Ned Batchelder's coverage_ module installed, you may activate a
+coverage report with the ``--with-coverage`` switch or NOSE_WITH_COVERAGE
+environment variable. The coverage report will cover any python source module
+imported after the start of the test run, excluding modules that match
+testMatch. If you want to include those modules too, use the ``--cover-tests``
+switch, or set the NOSE_COVER_TESTS environment variable to a true value. To
+restrict the coverage report to modules from a particular package or packages,
+use the ``--cover-package`` switch or the NOSE_COVER_PACKAGE environment
+variable.
+
+.. _coverage: http://www.nedbatchelder.com/code/modules/coverage.html
+"""
+import logging
+import re
+import sys
+import io
+from nose.plugins.base import Plugin
+from nose.util import src, tolist
+
+log = logging.getLogger(__name__)
+
+
+class Coverage(Plugin):
+ """
+ Activate a coverage report using Ned Batchelder's coverage module.
+ """
+ coverTests = False
+ coverPackages = None
+ coverInstance = None
+ coverErase = False
+ coverMinPercentage = None
+ score = 200
+ status = {}
+
+ def options(self, parser, env):
+ """
+ Add options to command line.
+ """
+ super(Coverage, self).options(parser, env)
+ parser.add_option("--cover-package", action="append",
+ default=env.get('NOSE_COVER_PACKAGE'),
+ metavar="PACKAGE",
+ dest="cover_packages",
+ help="Restrict coverage output to selected packages "
+ "[NOSE_COVER_PACKAGE]")
+ parser.add_option("--cover-erase", action="store_true",
+ default=env.get('NOSE_COVER_ERASE'),
+ dest="cover_erase",
+ help="Erase previously collected coverage "
+ "statistics before run")
+ parser.add_option("--cover-tests", action="store_true",
+ dest="cover_tests",
+ default=env.get('NOSE_COVER_TESTS'),
+ help="Include test modules in coverage report "
+ "[NOSE_COVER_TESTS]")
+ parser.add_option("--cover-min-percentage", action="store",
+ dest="cover_min_percentage",
+ default=env.get('NOSE_COVER_MIN_PERCENTAGE'),
+ help="Minimum percentage of coverage for tests "
+ "to pass [NOSE_COVER_MIN_PERCENTAGE]")
+ parser.add_option("--cover-inclusive", action="store_true",
+ dest="cover_inclusive",
+ default=env.get('NOSE_COVER_INCLUSIVE'),
+ help="Include all python files under working "
+ "directory in coverage report. Useful for "
+ "discovering holes in test coverage if not all "
+ "files are imported by the test suite. "
+ "[NOSE_COVER_INCLUSIVE]")
+ parser.add_option("--cover-html", action="store_true",
+ default=env.get('NOSE_COVER_HTML'),
+ dest='cover_html',
+ help="Produce HTML coverage information")
+ parser.add_option('--cover-html-dir', action='store',
+ default=env.get('NOSE_COVER_HTML_DIR', 'cover'),
+ dest='cover_html_dir',
+ metavar='DIR',
+ help='Produce HTML coverage information in dir')
+ parser.add_option("--cover-branches", action="store_true",
+ default=env.get('NOSE_COVER_BRANCHES'),
+ dest="cover_branches",
+ help="Include branch coverage in coverage report "
+ "[NOSE_COVER_BRANCHES]")
+ parser.add_option("--cover-xml", action="store_true",
+ default=env.get('NOSE_COVER_XML'),
+ dest="cover_xml",
+ help="Produce XML coverage information")
+ parser.add_option("--cover-xml-file", action="store",
+ default=env.get('NOSE_COVER_XML_FILE', 'coverage.xml'),
+ dest="cover_xml_file",
+ metavar="FILE",
+ help="Produce XML coverage information in file")
+
+ def configure(self, options, conf):
+ """
+ Configure plugin.
+ """
+ try:
+ self.status.pop('active')
+ except KeyError:
+ pass
+ super(Coverage, self).configure(options, conf)
+ if conf.worker:
+ return
+ if self.enabled:
+ try:
+ import coverage
+ if not hasattr(coverage, 'coverage'):
+ raise ImportError("Unable to import coverage module")
+ except ImportError:
+ log.error("Coverage not available: "
+ "unable to import coverage module")
+ self.enabled = False
+ return
+ self.conf = conf
+ self.coverErase = options.cover_erase
+ self.coverTests = options.cover_tests
+ self.coverPackages = []
+ if options.cover_packages:
+ if isinstance(options.cover_packages, (list, tuple)):
+ cover_packages = options.cover_packages
+ else:
+ cover_packages = [options.cover_packages]
+ for pkgs in [tolist(x) for x in cover_packages]:
+ self.coverPackages.extend(pkgs)
+ self.coverInclusive = options.cover_inclusive
+ if self.coverPackages:
+ log.info("Coverage report will include only packages: %s",
+ self.coverPackages)
+ self.coverHtmlDir = None
+ if options.cover_html:
+ self.coverHtmlDir = options.cover_html_dir
+ log.debug('Will put HTML coverage report in %s', self.coverHtmlDir)
+ self.coverBranches = options.cover_branches
+ self.coverXmlFile = None
+ if options.cover_min_percentage:
+ self.coverMinPercentage = int(options.cover_min_percentage.rstrip('%'))
+ if options.cover_xml:
+ self.coverXmlFile = options.cover_xml_file
+ log.debug('Will put XML coverage report in %s', self.coverXmlFile)
+ if self.enabled:
+ self.status['active'] = True
+ self.coverInstance = coverage.coverage(auto_data=False,
+ branch=self.coverBranches, data_suffix=None,
+ source=self.coverPackages)
+
+ def begin(self):
+ """
+ Begin recording coverage information.
+ """
+ log.debug("Coverage begin")
+ self.skipModules = list(sys.modules.keys())[:]
+ if self.coverErase:
+ log.debug("Clearing previously collected coverage statistics")
+ self.coverInstance.combine()
+ self.coverInstance.erase()
+ self.coverInstance.exclude('#pragma[: ]+[nN][oO] [cC][oO][vV][eE][rR]')
+ self.coverInstance.load()
+ self.coverInstance.start()
+
+ def report(self, stream):
+ """
+ Output code coverage report.
+ """
+ log.debug("Coverage report")
+ self.coverInstance.stop()
+ self.coverInstance.combine()
+ self.coverInstance.save()
+ modules = [module
+ for name, module in list(sys.modules.items())
+ if self.wantModuleCoverage(name, module)]
+ log.debug("Coverage report will cover modules: %s", modules)
+ self.coverInstance.report(modules, file=stream)
+
+ import coverage
+ if self.coverHtmlDir:
+ log.debug("Generating HTML coverage report")
+ try:
+ self.coverInstance.html_report(modules, self.coverHtmlDir)
+ except coverage.misc.CoverageException as e:
+ log.warning("Failed to generate HTML report: %s" % str(e))
+
+ if self.coverXmlFile:
+ log.debug("Generating XML coverage report")
+ try:
+ self.coverInstance.xml_report(modules, self.coverXmlFile)
+ except coverage.misc.CoverageException as e:
+ log.warning("Failed to generate XML report: %s" % str(e))
+
+ # make sure we have minimum required coverage
+ if self.coverMinPercentage:
+ f = io.StringIO()
+ self.coverInstance.report(modules, file=f)
+
+ multiPackageRe = (r'-------\s\w+\s+\d+\s+\d+(?:\s+\d+\s+\d+)?'
+ r'\s+(\d+)%\s+\d*\s{0,1}$')
+ singlePackageRe = (r'-------\s[\w./]+\s+\d+\s+\d+(?:\s+\d+\s+\d+)?'
+ r'\s+(\d+)%(?:\s+[-\d, ]+)\s{0,1}$')
+
+ m = re.search(multiPackageRe, f.getvalue())
+ if m is None:
+ m = re.search(singlePackageRe, f.getvalue())
+
+ if m:
+ percentage = int(m.groups()[0])
+ if percentage < self.coverMinPercentage:
+ log.error('TOTAL Coverage did not reach minimum '
+ 'required: %d%%' % self.coverMinPercentage)
+ sys.exit(1)
+ else:
+ log.error("No total percentage was found in coverage output, "
+ "something went wrong.")
+
+
+ def wantModuleCoverage(self, name, module):
+ if not hasattr(module, '__file__'):
+ log.debug("no coverage of %s: no __file__", name)
+ return False
+ module_file = src(module.__file__)
+ if not module_file or not module_file.endswith('.py'):
+ log.debug("no coverage of %s: not a python file", name)
+ return False
+ if self.coverPackages:
+ for package in self.coverPackages:
+ if (re.findall(r'^%s\b' % re.escape(package), name)
+ and (self.coverTests
+ or not self.conf.testMatch.search(name))):
+ log.debug("coverage for %s", name)
+ return True
+ if name in self.skipModules:
+ log.debug("no coverage for %s: loaded before coverage start",
+ name)
+ return False
+ if self.conf.testMatch.search(name) and not self.coverTests:
+ log.debug("no coverage for %s: is a test", name)
+ return False
+ # accept any package that passed the previous tests, unless
+ # coverPackages is on -- in that case, if we wanted this
+ # module, we would have already returned True
+ return not self.coverPackages
+
+ def wantFile(self, file, package=None):
+ """If inclusive coverage enabled, return true for all source files
+ in wanted packages.
+ """
+ if self.coverInclusive:
+ if file.endswith(".py"):
+ if package and self.coverPackages:
+ for want in self.coverPackages:
+ if package.startswith(want):
+ return True
+ else:
+ return True
+ return None
diff --git a/scripts/external_libs/nose-1.3.4/python3/nose/plugins/debug.py b/scripts/external_libs/nose-1.3.4/python3/nose/plugins/debug.py
new file mode 100644
index 00000000..78243e60
--- /dev/null
+++ b/scripts/external_libs/nose-1.3.4/python3/nose/plugins/debug.py
@@ -0,0 +1,67 @@
+"""
+This plugin provides ``--pdb`` and ``--pdb-failures`` options. The ``--pdb``
+option will drop the test runner into pdb when it encounters an error. To
+drop into pdb on failure, use ``--pdb-failures``.
+"""
+
+import pdb
+from nose.plugins.base import Plugin
+
+class Pdb(Plugin):
+ """
+ Provides --pdb and --pdb-failures options that cause the test runner to
+ drop into pdb if it encounters an error or failure, respectively.
+ """
+ enabled_for_errors = False
+ enabled_for_failures = False
+ score = 5 # run last, among builtins
+
+ def options(self, parser, env):
+ """Register commandline options.
+ """
+ parser.add_option(
+ "--pdb", action="store_true", dest="debugBoth",
+ default=env.get('NOSE_PDB', False),
+ help="Drop into debugger on failures or errors")
+ parser.add_option(
+ "--pdb-failures", action="store_true",
+ dest="debugFailures",
+ default=env.get('NOSE_PDB_FAILURES', False),
+ help="Drop into debugger on failures")
+ parser.add_option(
+ "--pdb-errors", action="store_true",
+ dest="debugErrors",
+ default=env.get('NOSE_PDB_ERRORS', False),
+ help="Drop into debugger on errors")
+
+ def configure(self, options, conf):
+ """Configure which kinds of exceptions trigger plugin.
+ """
+ self.conf = conf
+ self.enabled_for_errors = options.debugErrors or options.debugBoth
+ self.enabled_for_failures = options.debugFailures or options.debugBoth
+ self.enabled = self.enabled_for_failures or self.enabled_for_errors
+
+ def addError(self, test, err):
+ """Enter pdb if configured to debug errors.
+ """
+ if not self.enabled_for_errors:
+ return
+ self.debug(err)
+
+ def addFailure(self, test, err):
+ """Enter pdb if configured to debug failures.
+ """
+ if not self.enabled_for_failures:
+ return
+ self.debug(err)
+
+ def debug(self, err):
+ import sys # FIXME why is this import here?
+ ec, ev, tb = err
+ stdout = sys.stdout
+ sys.stdout = sys.__stdout__
+ try:
+ pdb.post_mortem(tb)
+ finally:
+ sys.stdout = stdout
diff --git a/scripts/external_libs/nose-1.3.4/python3/nose/plugins/deprecated.py b/scripts/external_libs/nose-1.3.4/python3/nose/plugins/deprecated.py
new file mode 100644
index 00000000..461a26be
--- /dev/null
+++ b/scripts/external_libs/nose-1.3.4/python3/nose/plugins/deprecated.py
@@ -0,0 +1,45 @@
+"""
+This plugin installs a DEPRECATED error class for the :class:`DeprecatedTest`
+exception. When :class:`DeprecatedTest` is raised, the exception will be logged
+in the deprecated attribute of the result, ``D`` or ``DEPRECATED`` (verbose)
+will be output, and the exception will not be counted as an error or failure.
+It is enabled by default, but can be turned off by using ``--no-deprecated``.
+"""
+
+from nose.plugins.errorclass import ErrorClass, ErrorClassPlugin
+
+
+class DeprecatedTest(Exception):
+ """Raise this exception to mark a test as deprecated.
+ """
+ pass
+
+
+class Deprecated(ErrorClassPlugin):
+ """
+ Installs a DEPRECATED error class for the DeprecatedTest exception. Enabled
+ by default.
+ """
+ enabled = True
+ deprecated = ErrorClass(DeprecatedTest,
+ label='DEPRECATED',
+ isfailure=False)
+
+ def options(self, parser, env):
+ """Register commandline options.
+ """
+ env_opt = 'NOSE_WITHOUT_DEPRECATED'
+ parser.add_option('--no-deprecated', action='store_true',
+ dest='noDeprecated', default=env.get(env_opt, False),
+ help="Disable special handling of DeprecatedTest "
+ "exceptions.")
+
+ def configure(self, options, conf):
+ """Configure plugin.
+ """
+ if not self.can_configure:
+ return
+ self.conf = conf
+ disable = getattr(options, 'noDeprecated', False)
+ if disable:
+ self.enabled = False
diff --git a/scripts/external_libs/nose-1.3.4/python3/nose/plugins/doctests.py b/scripts/external_libs/nose-1.3.4/python3/nose/plugins/doctests.py
new file mode 100644
index 00000000..ca037814
--- /dev/null
+++ b/scripts/external_libs/nose-1.3.4/python3/nose/plugins/doctests.py
@@ -0,0 +1,452 @@
+"""Use the Doctest plugin with ``--with-doctest`` or the NOSE_WITH_DOCTEST
+environment variable to enable collection and execution of :mod:`doctests
+<doctest>`. Because doctests are usually included in the tested package
+(instead of being grouped into packages or modules of their own), nose only
+looks for them in the non-test packages it discovers in the working directory.
+
+Doctests may also be placed into files other than python modules, in which
+case they can be collected and executed by using the ``--doctest-extension``
+switch or NOSE_DOCTEST_EXTENSION environment variable to indicate which file
+extension(s) to load.
+
+When loading doctests from non-module files, use the ``--doctest-fixtures``
+switch to specify how to find modules containing fixtures for the tests. A
+module name will be produced by appending the value of that switch to the base
+name of each doctest file loaded. For example, a doctest file "widgets.rst"
+with the switch ``--doctest_fixtures=_fixt`` will load fixtures from the module
+``widgets_fixt.py``.
+
+A fixtures module may define any or all of the following functions:
+
+* setup([module]) or setup_module([module])
+
+ Called before the test runs. You may raise SkipTest to skip all tests.
+
+* teardown([module]) or teardown_module([module])
+
+ Called after the test runs, if setup/setup_module did not raise an
+ unhandled exception.
+
+* setup_test(test)
+
+ Called before the test. NOTE: the argument passed is a
+ doctest.DocTest instance, *not* a unittest.TestCase.
+
+* teardown_test(test)
+
+ Called after the test, if setup_test did not raise an exception. NOTE: the
+ argument passed is a doctest.DocTest instance, *not* a unittest.TestCase.
+
+Doctests are run like any other test, with the exception that output
+capture does not work; doctest does its own output capture while running a
+test.
+
+.. note ::
+
+ See :doc:`../doc_tests/test_doctest_fixtures/doctest_fixtures` for
+ additional documentation and examples.
+
+"""
+
+
+import logging
+import os
+import sys
+import unittest
+from inspect import getmodule
+from nose.plugins.base import Plugin
+from nose.suite import ContextList
+from nose.util import anyp, getpackage, test_address, resolve_name, \
+ src, tolist, isproperty
+try:
+ from io import StringIO
+except ImportError:
+ from io import StringIO
+import sys
+import builtins as builtin_mod
+
+log = logging.getLogger(__name__)
+
+try:
+ import doctest
+ doctest.DocTestCase
+ # system version of doctest is acceptable, but needs a monkeypatch
+except (ImportError, AttributeError):
+ # system version is too old
+ import nose.ext.dtcompat as doctest
+
+
+#
+# Doctest and coverage don't get along, so we need to create
+# a monkeypatch that will replace the part of doctest that
+# interferes with coverage reports.
+#
+# The monkeypatch is based on this zope patch:
+# http://svn.zope.org/Zope3/trunk/src/zope/testing/doctest.py?rev=28679&r1=28703&r2=28705
+#
+_orp = doctest._OutputRedirectingPdb
+
+class NoseOutputRedirectingPdb(_orp):
+ def __init__(self, out):
+ self.__debugger_used = False
+ _orp.__init__(self, out)
+
+ def set_trace(self):
+ self.__debugger_used = True
+ _orp.set_trace(self, sys._getframe().f_back)
+
+ def set_continue(self):
+ # Calling set_continue unconditionally would break unit test
+ # coverage reporting, as Bdb.set_continue calls sys.settrace(None).
+ if self.__debugger_used:
+ _orp.set_continue(self)
+doctest._OutputRedirectingPdb = NoseOutputRedirectingPdb
+
+
+class DoctestSuite(unittest.TestSuite):
+ """
+ Doctest suites are parallelizable at the module or file level only,
+ since they may be attached to objects that are not individually
+ addressable (like properties). This suite subclass is used when
+ loading doctests from a module to ensure that behavior.
+
+ This class is used only if the plugin is not fully prepared;
+ in normal use, the loader's suiteClass is used.
+
+ """
+ can_split = False
+
+ def __init__(self, tests=(), context=None, can_split=False):
+ self.context = context
+ self.can_split = can_split
+ unittest.TestSuite.__init__(self, tests=tests)
+
+ def address(self):
+ return test_address(self.context)
+
+ def __iter__(self):
+ # 2.3 compat
+ return iter(self._tests)
+
+ def __str__(self):
+ return str(self._tests)
+
+
+class Doctest(Plugin):
+ """
+ Activate doctest plugin to find and run doctests in non-test modules.
+ """
+ extension = None
+ suiteClass = DoctestSuite
+
+ def options(self, parser, env):
+ """Register commmandline options.
+ """
+ Plugin.options(self, parser, env)
+ parser.add_option('--doctest-tests', action='store_true',
+ dest='doctest_tests',
+ default=env.get('NOSE_DOCTEST_TESTS'),
+ help="Also look for doctests in test modules. "
+ "Note that classes, methods and functions should "
+ "have either doctests or non-doctest tests, "
+ "not both. [NOSE_DOCTEST_TESTS]")
+ parser.add_option('--doctest-extension', action="append",
+ dest="doctestExtension",
+ metavar="EXT",
+ help="Also look for doctests in files with "
+ "this extension [NOSE_DOCTEST_EXTENSION]")
+ parser.add_option('--doctest-result-variable',
+ dest='doctest_result_var',
+ default=env.get('NOSE_DOCTEST_RESULT_VAR'),
+ metavar="VAR",
+ help="Change the variable name set to the result of "
+ "the last interpreter command from the default '_'. "
+ "Can be used to avoid conflicts with the _() "
+ "function used for text translation. "
+ "[NOSE_DOCTEST_RESULT_VAR]")
+ parser.add_option('--doctest-fixtures', action="store",
+ dest="doctestFixtures",
+ metavar="SUFFIX",
+ help="Find fixtures for a doctest file in module "
+ "with this name appended to the base name "
+ "of the doctest file")
+ parser.add_option('--doctest-options', action="append",
+ dest="doctestOptions",
+ metavar="OPTIONS",
+ help="Specify options to pass to doctest. " +
+ "Eg. '+ELLIPSIS,+NORMALIZE_WHITESPACE'")
+ # Set the default as a list, if given in env; otherwise
+ # an additional value set on the command line will cause
+ # an error.
+ env_setting = env.get('NOSE_DOCTEST_EXTENSION')
+ if env_setting is not None:
+ parser.set_defaults(doctestExtension=tolist(env_setting))
+
+ def configure(self, options, config):
+ """Configure plugin.
+ """
+ Plugin.configure(self, options, config)
+ self.doctest_result_var = options.doctest_result_var
+ self.doctest_tests = options.doctest_tests
+ self.extension = tolist(options.doctestExtension)
+ self.fixtures = options.doctestFixtures
+ self.finder = doctest.DocTestFinder()
+ self.optionflags = 0
+ if options.doctestOptions:
+ flags = ",".join(options.doctestOptions).split(',')
+ for flag in flags:
+ if not flag or flag[0] not in '+-':
+ raise ValueError(
+ "Must specify doctest options with starting " +
+ "'+' or '-'. Got %s" % (flag,))
+ mode, option_name = flag[0], flag[1:]
+ option_flag = doctest.OPTIONFLAGS_BY_NAME.get(option_name)
+ if not option_flag:
+ raise ValueError("Unknown doctest option %s" %
+ (option_name,))
+ if mode == '+':
+ self.optionflags |= option_flag
+ elif mode == '-':
+ self.optionflags &= ~option_flag
+
+ def prepareTestLoader(self, loader):
+ """Capture loader's suiteClass.
+
+ This is used to create test suites from doctest files.
+
+ """
+ self.suiteClass = loader.suiteClass
+
+ def loadTestsFromModule(self, module):
+ """Load doctests from the module.
+ """
+ log.debug("loading from %s", module)
+ if not self.matches(module.__name__):
+ log.debug("Doctest doesn't want module %s", module)
+ return
+ try:
+ tests = self.finder.find(module)
+ except AttributeError:
+ log.exception("Attribute error loading from %s", module)
+ # nose allows module.__test__ = False; doctest does not and throws
+ # AttributeError
+ return
+ if not tests:
+ log.debug("No tests found in %s", module)
+ return
+ tests.sort()
+ module_file = src(module.__file__)
+ # FIXME this breaks the id plugin somehow (tests probably don't
+ # get wrapped in result proxy or something)
+ cases = []
+ for test in tests:
+ if not test.examples:
+ continue
+ if not test.filename:
+ test.filename = module_file
+ cases.append(DocTestCase(test,
+ optionflags=self.optionflags,
+ result_var=self.doctest_result_var))
+ if cases:
+ yield self.suiteClass(cases, context=module, can_split=False)
+
+ def loadTestsFromFile(self, filename):
+ """Load doctests from the file.
+
+ Tests are loaded only if filename's extension matches
+ configured doctest extension.
+
+ """
+ if self.extension and anyp(filename.endswith, self.extension):
+ name = os.path.basename(filename)
+ dh = open(filename)
+ try:
+ doc = dh.read()
+ finally:
+ dh.close()
+
+ fixture_context = None
+ globs = {'__file__': filename}
+ if self.fixtures:
+ base, ext = os.path.splitext(name)
+ dirname = os.path.dirname(filename)
+ sys.path.append(dirname)
+ fixt_mod = base + self.fixtures
+ try:
+ fixture_context = __import__(
+ fixt_mod, globals(), locals(), ["nop"])
+ except ImportError as e:
+ log.debug(
+ "Could not import %s: %s (%s)", fixt_mod, e, sys.path)
+ log.debug("Fixture module %s resolved to %s",
+ fixt_mod, fixture_context)
+ if hasattr(fixture_context, 'globs'):
+ globs = fixture_context.globs(globs)
+ parser = doctest.DocTestParser()
+ test = parser.get_doctest(
+ doc, globs=globs, name=name,
+ filename=filename, lineno=0)
+ if test.examples:
+ case = DocFileCase(
+ test,
+ optionflags=self.optionflags,
+ setUp=getattr(fixture_context, 'setup_test', None),
+ tearDown=getattr(fixture_context, 'teardown_test', None),
+ result_var=self.doctest_result_var)
+ if fixture_context:
+ yield ContextList((case,), context=fixture_context)
+ else:
+ yield case
+ else:
+ yield False # no tests to load
+
+ def makeTest(self, obj, parent):
+ """Look for doctests in the given object, which will be a
+ function, method or class.
+ """
+ name = getattr(obj, '__name__', 'Unnammed %s' % type(obj))
+ doctests = self.finder.find(obj, module=getmodule(parent), name=name)
+ if doctests:
+ for test in doctests:
+ if len(test.examples) == 0:
+ continue
+ yield DocTestCase(test, obj=obj, optionflags=self.optionflags,
+ result_var=self.doctest_result_var)
+
+ def matches(self, name):
+ # FIXME this seems wrong -- nothing is ever going to
+ # fail this test, since we're given a module NAME not FILE
+ if name == '__init__.py':
+ return False
+ # FIXME don't think we need include/exclude checks here?
+ return ((self.doctest_tests or not self.conf.testMatch.search(name)
+ or (self.conf.include
+ and [_f for _f in [inc.search(name)
+ for inc in self.conf.include] if _f]))
+ and (not self.conf.exclude
+ or not [_f for _f in [exc.search(name)
+ for exc in self.conf.exclude] if _f]))
+
+ def wantFile(self, file):
+ """Override to select all modules and any file ending with
+ configured doctest extension.
+ """
+ # always want .py files
+ if file.endswith('.py'):
+ return True
+ # also want files that match my extension
+ if (self.extension
+ and anyp(file.endswith, self.extension)
+ and (not self.conf.exclude
+ or not [_f for _f in [exc.search(file)
+ for exc in self.conf.exclude] if _f])):
+ return True
+ return None
+
+
+class DocTestCase(doctest.DocTestCase):
+ """Overrides DocTestCase to
+ provide an address() method that returns the correct address for
+ the doctest case. To provide hints for address(), an obj may also
+ be passed -- this will be used as the test object for purposes of
+ determining the test address, if it is provided.
+ """
+ def __init__(self, test, optionflags=0, setUp=None, tearDown=None,
+ checker=None, obj=None, result_var='_'):
+ self._result_var = result_var
+ self._nose_obj = obj
+ super(DocTestCase, self).__init__(
+ test, optionflags=optionflags, setUp=setUp, tearDown=tearDown,
+ checker=checker)
+
+ def address(self):
+ if self._nose_obj is not None:
+ return test_address(self._nose_obj)
+ obj = resolve_name(self._dt_test.name)
+
+ if isproperty(obj):
+ # properties have no connection to the class they are in
+ # so we can't just look 'em up, we have to first look up
+ # the class, then stick the prop on the end
+ parts = self._dt_test.name.split('.')
+ class_name = '.'.join(parts[:-1])
+ cls = resolve_name(class_name)
+ base_addr = test_address(cls)
+ return (base_addr[0], base_addr[1],
+ '.'.join([base_addr[2], parts[-1]]))
+ else:
+ return test_address(obj)
+
+ # doctests loaded via find(obj) omit the module name
+ # so we need to override id, __repr__ and shortDescription
+ # bonus: this will squash a 2.3 vs 2.4 incompatiblity
+ def id(self):
+ name = self._dt_test.name
+ filename = self._dt_test.filename
+ if filename is not None:
+ pk = getpackage(filename)
+ if pk is None:
+ return name
+ if not name.startswith(pk):
+ name = "%s.%s" % (pk, name)
+ return name
+
+ def __repr__(self):
+ name = self.id()
+ name = name.split('.')
+ return "%s (%s)" % (name[-1], '.'.join(name[:-1]))
+ __str__ = __repr__
+
+ def shortDescription(self):
+ return 'Doctest: %s' % self.id()
+
+ def setUp(self):
+ if self._result_var is not None:
+ self._old_displayhook = sys.displayhook
+ sys.displayhook = self._displayhook
+ super(DocTestCase, self).setUp()
+
+ def _displayhook(self, value):
+ if value is None:
+ return
+ setattr(builtin_mod, self._result_var, value)
+ print(repr(value))
+
+ def tearDown(self):
+ super(DocTestCase, self).tearDown()
+ if self._result_var is not None:
+ sys.displayhook = self._old_displayhook
+ delattr(builtin_mod, self._result_var)
+
+
+class DocFileCase(doctest.DocFileCase):
+ """Overrides to provide address() method that returns the correct
+ address for the doc file case.
+ """
+ def __init__(self, test, optionflags=0, setUp=None, tearDown=None,
+ checker=None, result_var='_'):
+ self._result_var = result_var
+ super(DocFileCase, self).__init__(
+ test, optionflags=optionflags, setUp=setUp, tearDown=tearDown,
+ checker=None)
+
+ def address(self):
+ return (self._dt_test.filename, None, None)
+
+ def setUp(self):
+ if self._result_var is not None:
+ self._old_displayhook = sys.displayhook
+ sys.displayhook = self._displayhook
+ super(DocFileCase, self).setUp()
+
+ def _displayhook(self, value):
+ if value is None:
+ return
+ setattr(builtin_mod, self._result_var, value)
+ print(repr(value))
+
+ def tearDown(self):
+ super(DocFileCase, self).tearDown()
+ if self._result_var is not None:
+ sys.displayhook = self._old_displayhook
+ delattr(builtin_mod, self._result_var)
diff --git a/scripts/external_libs/nose-1.3.4/python3/nose/plugins/errorclass.py b/scripts/external_libs/nose-1.3.4/python3/nose/plugins/errorclass.py
new file mode 100644
index 00000000..d8823131
--- /dev/null
+++ b/scripts/external_libs/nose-1.3.4/python3/nose/plugins/errorclass.py
@@ -0,0 +1,210 @@
+"""
+ErrorClass Plugins
+------------------
+
+ErrorClass plugins provide an easy way to add support for custom
+handling of particular classes of exceptions.
+
+An ErrorClass plugin defines one or more ErrorClasses and how each is
+handled and reported on. Each error class is stored in a different
+attribute on the result, and reported separately. Each error class must
+indicate the exceptions that fall under that class, the label to use
+for reporting, and whether exceptions of the class should be
+considered as failures for the whole test run.
+
+ErrorClasses use a declarative syntax. Assign an ErrorClass to the
+attribute you wish to add to the result object, defining the
+exceptions, label and isfailure attributes. For example, to declare an
+ErrorClassPlugin that defines TodoErrors (and subclasses of TodoError)
+as an error class with the label 'TODO' that is considered a failure,
+do this:
+
+ >>> class Todo(Exception):
+ ... pass
+ >>> class TodoError(ErrorClassPlugin):
+ ... todo = ErrorClass(Todo, label='TODO', isfailure=True)
+
+The MetaErrorClass metaclass translates the ErrorClass declarations
+into the tuples used by the error handling and reporting functions in
+the result. This is an internal format and subject to change; you
+should always use the declarative syntax for attaching ErrorClasses to
+an ErrorClass plugin.
+
+ >>> TodoError.errorClasses # doctest: +ELLIPSIS
+ ((<class ...Todo...>, ('todo', 'TODO', True)),)
+
+Let's see the plugin in action. First some boilerplate.
+
+ >>> import sys
+ >>> import unittest
+ >>> try:
+ ... # 2.7+
+ ... from unittest.runner import _WritelnDecorator
+ ... except ImportError:
+ ... from unittest import _WritelnDecorator
+ ...
+ >>> buf = _WritelnDecorator(sys.stdout)
+
+Now define a test case that raises a Todo.
+
+ >>> class TestTodo(unittest.TestCase):
+ ... def runTest(self):
+ ... raise Todo("I need to test something")
+ >>> case = TestTodo()
+
+Prepare the result using our plugin. Normally this happens during the
+course of test execution within nose -- you won't be doing this
+yourself. For the purposes of this testing document, I'm stepping
+through the internal process of nose so you can see what happens at
+each step.
+
+ >>> plugin = TodoError()
+ >>> from nose.result import _TextTestResult
+ >>> result = _TextTestResult(stream=buf, descriptions=0, verbosity=2)
+ >>> plugin.prepareTestResult(result)
+
+Now run the test. TODO is printed.
+
+ >>> _ = case(result) # doctest: +ELLIPSIS
+ runTest (....TestTodo) ... TODO: I need to test something
+
+Errors and failures are empty, but todo has our test:
+
+ >>> result.errors
+ []
+ >>> result.failures
+ []
+ >>> result.todo # doctest: +ELLIPSIS
+ [(<....TestTodo testMethod=runTest>, '...Todo: I need to test something\\n')]
+ >>> result.printErrors() # doctest: +ELLIPSIS
+ <BLANKLINE>
+ ======================================================================
+ TODO: runTest (....TestTodo)
+ ----------------------------------------------------------------------
+ Traceback (most recent call last):
+ ...
+ ...Todo: I need to test something
+ <BLANKLINE>
+
+Since we defined a Todo as a failure, the run was not successful.
+
+ >>> result.wasSuccessful()
+ False
+"""
+
+from nose.pyversion import make_instancemethod
+from nose.plugins.base import Plugin
+from nose.result import TextTestResult
+from nose.util import isclass
+
+class MetaErrorClass(type):
+ """Metaclass for ErrorClassPlugins that allows error classes to be
+ set up in a declarative manner.
+ """
+ def __init__(self, name, bases, attr):
+ errorClasses = []
+ for name, detail in list(attr.items()):
+ if isinstance(detail, ErrorClass):
+ attr.pop(name)
+ for cls in detail:
+ errorClasses.append(
+ (cls, (name, detail.label, detail.isfailure)))
+ super(MetaErrorClass, self).__init__(name, bases, attr)
+ self.errorClasses = tuple(errorClasses)
+
+
+class ErrorClass(object):
+ def __init__(self, *errorClasses, **kw):
+ self.errorClasses = errorClasses
+ try:
+ for key in ('label', 'isfailure'):
+ setattr(self, key, kw.pop(key))
+ except KeyError:
+ raise TypeError("%r is a required named argument for ErrorClass"
+ % key)
+
+ def __iter__(self):
+ return iter(self.errorClasses)
+
+
+class ErrorClassPlugin(Plugin, metaclass=MetaErrorClass):
+ """
+ Base class for ErrorClass plugins. Subclass this class and declare the
+ exceptions that you wish to handle as attributes of the subclass.
+ """
+ score = 1000
+ errorClasses = ()
+
+ def addError(self, test, err):
+ err_cls, a, b = err
+ if not isclass(err_cls):
+ return
+ classes = [e[0] for e in self.errorClasses]
+ if [c for c in classes if issubclass(err_cls, c)]:
+ return True
+
+ def prepareTestResult(self, result):
+ if not hasattr(result, 'errorClasses'):
+ self.patchResult(result)
+ for cls, (storage_attr, label, isfail) in self.errorClasses:
+ if cls not in result.errorClasses:
+ storage = getattr(result, storage_attr, [])
+ setattr(result, storage_attr, storage)
+ result.errorClasses[cls] = (storage, label, isfail)
+
+ def patchResult(self, result):
+ result.printLabel = print_label_patch(result)
+ result._orig_addError, result.addError = \
+ result.addError, add_error_patch(result)
+ result._orig_wasSuccessful, result.wasSuccessful = \
+ result.wasSuccessful, wassuccessful_patch(result)
+ if hasattr(result, 'printErrors'):
+ result._orig_printErrors, result.printErrors = \
+ result.printErrors, print_errors_patch(result)
+ if hasattr(result, 'addSkip'):
+ result._orig_addSkip, result.addSkip = \
+ result.addSkip, add_skip_patch(result)
+ result.errorClasses = {}
+
+
+def add_error_patch(result):
+ """Create a new addError method to patch into a result instance
+ that recognizes the errorClasses attribute and deals with
+ errorclasses correctly.
+ """
+ return make_instancemethod(TextTestResult.addError, result)
+
+
+def print_errors_patch(result):
+ """Create a new printErrors method that prints errorClasses items
+ as well.
+ """
+ return make_instancemethod(TextTestResult.printErrors, result)
+
+
+def print_label_patch(result):
+ """Create a new printLabel method that prints errorClasses items
+ as well.
+ """
+ return make_instancemethod(TextTestResult.printLabel, result)
+
+
+def wassuccessful_patch(result):
+ """Create a new wasSuccessful method that checks errorClasses for
+ exceptions that were put into other slots than error or failure
+ but that still count as not success.
+ """
+ return make_instancemethod(TextTestResult.wasSuccessful, result)
+
+
+def add_skip_patch(result):
+ """Create a new addSkip method to patch into a result instance
+ that delegates to addError.
+ """
+ return make_instancemethod(TextTestResult.addSkip, result)
+
+
+if __name__ == '__main__':
+ import doctest
+ doctest.testmod()
+
diff --git a/scripts/external_libs/nose-1.3.4/python3/nose/plugins/failuredetail.py b/scripts/external_libs/nose-1.3.4/python3/nose/plugins/failuredetail.py
new file mode 100644
index 00000000..6462865d
--- /dev/null
+++ b/scripts/external_libs/nose-1.3.4/python3/nose/plugins/failuredetail.py
@@ -0,0 +1,49 @@
+"""
+This plugin provides assert introspection. When the plugin is enabled
+and a test failure occurs, the traceback is displayed with extra context
+around the line in which the exception was raised. Simple variable
+substitution is also performed in the context output to provide more
+debugging information.
+"""
+
+from nose.plugins import Plugin
+from nose.pyversion import exc_to_unicode, force_unicode
+from nose.inspector import inspect_traceback
+
+class FailureDetail(Plugin):
+ """
+ Plugin that provides extra information in tracebacks of test failures.
+ """
+ score = 1600 # before capture
+
+ def options(self, parser, env):
+ """Register commmandline options.
+ """
+ parser.add_option(
+ "-d", "--detailed-errors", "--failure-detail",
+ action="store_true",
+ default=env.get('NOSE_DETAILED_ERRORS'),
+ dest="detailedErrors", help="Add detail to error"
+ " output by attempting to evaluate failed"
+ " asserts [NOSE_DETAILED_ERRORS]")
+
+ def configure(self, options, conf):
+ """Configure plugin.
+ """
+ if not self.can_configure:
+ return
+ self.enabled = options.detailedErrors
+ self.conf = conf
+
+ def formatFailure(self, test, err):
+ """Add detail from traceback inspection to error message of a failure.
+ """
+ ec, ev, tb = err
+ tbinfo, str_ev = None, exc_to_unicode(ev)
+
+ if tb:
+ tbinfo = force_unicode(inspect_traceback(tb))
+ str_ev = '\n'.join([str_ev, tbinfo])
+ test.tbinfo = tbinfo
+ return (ec, str_ev, tb)
+
diff --git a/scripts/external_libs/nose-1.3.4/python3/nose/plugins/isolate.py b/scripts/external_libs/nose-1.3.4/python3/nose/plugins/isolate.py
new file mode 100644
index 00000000..77a2de52
--- /dev/null
+++ b/scripts/external_libs/nose-1.3.4/python3/nose/plugins/isolate.py
@@ -0,0 +1,103 @@
+"""The isolation plugin resets the contents of sys.modules after running
+each test module or package. Use it by setting ``--with-isolation`` or the
+NOSE_WITH_ISOLATION environment variable.
+
+The effects are similar to wrapping the following functions around the
+import and execution of each test module::
+
+ def setup(module):
+ module._mods = sys.modules.copy()
+
+ def teardown(module):
+ to_del = [ m for m in sys.modules.keys() if m not in
+ module._mods ]
+ for mod in to_del:
+ del sys.modules[mod]
+ sys.modules.update(module._mods)
+
+Isolation works only during lazy loading. In normal use, this is only
+during discovery of modules within a directory, where the process of
+importing, loading tests and running tests from each module is
+encapsulated in a single loadTestsFromName call. This plugin
+implements loadTestsFromNames to force the same lazy-loading there,
+which allows isolation to work in directed mode as well as discovery,
+at the cost of some efficiency: lazy-loading names forces full context
+setup and teardown to run for each name, defeating the grouping that
+is normally used to ensure that context setup and teardown are run the
+fewest possible times for a given set of names.
+
+.. warning ::
+
+ This plugin should not be used in conjunction with other plugins
+ that assume that modules, once imported, will stay imported; for
+ instance, it may cause very odd results when used with the coverage
+ plugin.
+
+"""
+
+import logging
+import sys
+
+from nose.plugins import Plugin
+
+
+log = logging.getLogger('nose.plugins.isolation')
+
+class IsolationPlugin(Plugin):
+ """
+ Activate the isolation plugin to isolate changes to external
+ modules to a single test module or package. The isolation plugin
+ resets the contents of sys.modules after each test module or
+ package runs to its state before the test. PLEASE NOTE that this
+ plugin should not be used with the coverage plugin, or in any other case
+ where module reloading may produce undesirable side-effects.
+ """
+ score = 10 # I want to be last
+ name = 'isolation'
+
+ def configure(self, options, conf):
+ """Configure plugin.
+ """
+ Plugin.configure(self, options, conf)
+ self._mod_stack = []
+
+ def beforeContext(self):
+ """Copy sys.modules onto my mod stack
+ """
+ mods = sys.modules.copy()
+ self._mod_stack.append(mods)
+
+ def afterContext(self):
+ """Pop my mod stack and restore sys.modules to the state
+ it was in when mod stack was pushed.
+ """
+ mods = self._mod_stack.pop()
+ to_del = [ m for m in list(sys.modules.keys()) if m not in mods ]
+ if to_del:
+ log.debug('removing sys modules entries: %s', to_del)
+ for mod in to_del:
+ del sys.modules[mod]
+ sys.modules.update(mods)
+
+ def loadTestsFromNames(self, names, module=None):
+ """Create a lazy suite that calls beforeContext and afterContext
+ around each name. The side-effect of this is that full context
+ fixtures will be set up and torn down around each test named.
+ """
+ # Fast path for when we don't care
+ if not names or len(names) == 1:
+ return
+ loader = self.loader
+ plugins = self.conf.plugins
+ def lazy():
+ for name in names:
+ plugins.beforeContext()
+ yield loader.loadTestsFromName(name, module=module)
+ plugins.afterContext()
+ return (loader.suiteClass(lazy), [])
+
+ def prepareTestLoader(self, loader):
+ """Get handle on test loader so we can use it in loadTestsFromNames.
+ """
+ self.loader = loader
+
diff --git a/scripts/external_libs/nose-1.3.4/python3/nose/plugins/logcapture.py b/scripts/external_libs/nose-1.3.4/python3/nose/plugins/logcapture.py
new file mode 100644
index 00000000..2af79ad6
--- /dev/null
+++ b/scripts/external_libs/nose-1.3.4/python3/nose/plugins/logcapture.py
@@ -0,0 +1,245 @@
+"""
+This plugin captures logging statements issued during test execution. When an
+error or failure occurs, the captured log messages are attached to the running
+test in the test.capturedLogging attribute, and displayed with the error failure
+output. It is enabled by default but can be turned off with the option
+``--nologcapture``.
+
+You can filter captured logging statements with the ``--logging-filter`` option.
+If set, it specifies which logger(s) will be captured; loggers that do not match
+will be passed. Example: specifying ``--logging-filter=sqlalchemy,myapp``
+will ensure that only statements logged via sqlalchemy.engine, myapp
+or myapp.foo.bar logger will be logged.
+
+You can remove other installed logging handlers with the
+``--logging-clear-handlers`` option.
+"""
+
+import logging
+from logging import Handler
+import threading
+
+from nose.plugins.base import Plugin
+from nose.util import anyp, ln, safe_str
+
+try:
+ from io import StringIO
+except ImportError:
+ from io import StringIO
+
+log = logging.getLogger(__name__)
+
+class FilterSet(object):
+ def __init__(self, filter_components):
+ self.inclusive, self.exclusive = self._partition(filter_components)
+
+ # @staticmethod
+ def _partition(components):
+ inclusive, exclusive = [], []
+ for component in components:
+ if component.startswith('-'):
+ exclusive.append(component[1:])
+ else:
+ inclusive.append(component)
+ return inclusive, exclusive
+ _partition = staticmethod(_partition)
+
+ def allow(self, record):
+ """returns whether this record should be printed"""
+ if not self:
+ # nothing to filter
+ return True
+ return self._allow(record) and not self._deny(record)
+
+ # @staticmethod
+ def _any_match(matchers, record):
+ """return the bool of whether `record` starts with
+ any item in `matchers`"""
+ def record_matches_key(key):
+ return record == key or record.startswith(key + '.')
+ return anyp(bool, list(map(record_matches_key, matchers)))
+ _any_match = staticmethod(_any_match)
+
+ def _allow(self, record):
+ if not self.inclusive:
+ return True
+ return self._any_match(self.inclusive, record)
+
+ def _deny(self, record):
+ if not self.exclusive:
+ return False
+ return self._any_match(self.exclusive, record)
+
+
+class MyMemoryHandler(Handler):
+ def __init__(self, logformat, logdatefmt, filters):
+ Handler.__init__(self)
+ fmt = logging.Formatter(logformat, logdatefmt)
+ self.setFormatter(fmt)
+ self.filterset = FilterSet(filters)
+ self.buffer = []
+ def emit(self, record):
+ self.buffer.append(self.format(record))
+ def flush(self):
+ pass # do nothing
+ def truncate(self):
+ self.buffer = []
+ def filter(self, record):
+ if self.filterset.allow(record.name):
+ return Handler.filter(self, record)
+ def __getstate__(self):
+ state = self.__dict__.copy()
+ del state['lock']
+ return state
+ def __setstate__(self, state):
+ self.__dict__.update(state)
+ self.lock = threading.RLock()
+
+
+class LogCapture(Plugin):
+ """
+ Log capture plugin. Enabled by default. Disable with --nologcapture.
+ This plugin captures logging statements issued during test execution,
+ appending any output captured to the error or failure output,
+ should the test fail or raise an error.
+ """
+ enabled = True
+ env_opt = 'NOSE_NOLOGCAPTURE'
+ name = 'logcapture'
+ score = 500
+ logformat = '%(name)s: %(levelname)s: %(message)s'
+ logdatefmt = None
+ clear = False
+ filters = ['-nose']
+
+ def options(self, parser, env):
+ """Register commandline options.
+ """
+ parser.add_option(
+ "--nologcapture", action="store_false",
+ default=not env.get(self.env_opt), dest="logcapture",
+ help="Disable logging capture plugin. "
+ "Logging configuration will be left intact."
+ " [NOSE_NOLOGCAPTURE]")
+ parser.add_option(
+ "--logging-format", action="store", dest="logcapture_format",
+ default=env.get('NOSE_LOGFORMAT') or self.logformat,
+ metavar="FORMAT",
+ help="Specify custom format to print statements. "
+ "Uses the same format as used by standard logging handlers."
+ " [NOSE_LOGFORMAT]")
+ parser.add_option(
+ "--logging-datefmt", action="store", dest="logcapture_datefmt",
+ default=env.get('NOSE_LOGDATEFMT') or self.logdatefmt,
+ metavar="FORMAT",
+ help="Specify custom date/time format to print statements. "
+ "Uses the same format as used by standard logging handlers."
+ " [NOSE_LOGDATEFMT]")
+ parser.add_option(
+ "--logging-filter", action="store", dest="logcapture_filters",
+ default=env.get('NOSE_LOGFILTER'),
+ metavar="FILTER",
+ help="Specify which statements to filter in/out. "
+ "By default, everything is captured. If the output is too"
+ " verbose,\nuse this option to filter out needless output.\n"
+ "Example: filter=foo will capture statements issued ONLY to\n"
+ " foo or foo.what.ever.sub but not foobar or other logger.\n"
+ "Specify multiple loggers with comma: filter=foo,bar,baz.\n"
+ "If any logger name is prefixed with a minus, eg filter=-foo,\n"
+ "it will be excluded rather than included. Default: "
+ "exclude logging messages from nose itself (-nose)."
+ " [NOSE_LOGFILTER]\n")
+ parser.add_option(
+ "--logging-clear-handlers", action="store_true",
+ default=False, dest="logcapture_clear",
+ help="Clear all other logging handlers")
+ parser.add_option(
+ "--logging-level", action="store",
+ default='NOTSET', dest="logcapture_level",
+ help="Set the log level to capture")
+
+ def configure(self, options, conf):
+ """Configure plugin.
+ """
+ self.conf = conf
+ # Disable if explicitly disabled, or if logging is
+ # configured via logging config file
+ if not options.logcapture or conf.loggingConfig:
+ self.enabled = False
+ self.logformat = options.logcapture_format
+ self.logdatefmt = options.logcapture_datefmt
+ self.clear = options.logcapture_clear
+ self.loglevel = options.logcapture_level
+ if options.logcapture_filters:
+ self.filters = options.logcapture_filters.split(',')
+
+ def setupLoghandler(self):
+ # setup our handler with root logger
+ root_logger = logging.getLogger()
+ if self.clear:
+ if hasattr(root_logger, "handlers"):
+ for handler in root_logger.handlers:
+ root_logger.removeHandler(handler)
+ for logger in list(logging.Logger.manager.loggerDict.values()):
+ if hasattr(logger, "handlers"):
+ for handler in logger.handlers:
+ logger.removeHandler(handler)
+ # make sure there isn't one already
+ # you can't simply use "if self.handler not in root_logger.handlers"
+ # since at least in unit tests this doesn't work --
+ # LogCapture() is instantiated for each test case while root_logger
+ # is module global
+ # so we always add new MyMemoryHandler instance
+ for handler in root_logger.handlers[:]:
+ if isinstance(handler, MyMemoryHandler):
+ root_logger.handlers.remove(handler)
+ root_logger.addHandler(self.handler)
+ # to make sure everything gets captured
+ loglevel = getattr(self, "loglevel", "NOTSET")
+ root_logger.setLevel(getattr(logging, loglevel))
+
+ def begin(self):
+ """Set up logging handler before test run begins.
+ """
+ self.start()
+
+ def start(self):
+ self.handler = MyMemoryHandler(self.logformat, self.logdatefmt,
+ self.filters)
+ self.setupLoghandler()
+
+ def end(self):
+ pass
+
+ def beforeTest(self, test):
+ """Clear buffers and handlers before test.
+ """
+ self.setupLoghandler()
+
+ def afterTest(self, test):
+ """Clear buffers after test.
+ """
+ self.handler.truncate()
+
+ def formatFailure(self, test, err):
+ """Add captured log messages to failure output.
+ """
+ return self.formatError(test, err)
+
+ def formatError(self, test, err):
+ """Add captured log messages to error output.
+ """
+ # logic flow copied from Capture.formatError
+ test.capturedLogging = records = self.formatLogRecords()
+ if not records:
+ return err
+ ec, ev, tb = err
+ return (ec, self.addCaptureToErr(ev, records), tb)
+
+ def formatLogRecords(self):
+ return list(map(safe_str, self.handler.buffer))
+
+ def addCaptureToErr(self, ev, records):
+ return '\n'.join([safe_str(ev), ln('>> begin captured logging <<')] + \
+ records + \
+ [ln('>> end captured logging <<')])
diff --git a/scripts/external_libs/nose-1.3.4/python3/nose/plugins/manager.py b/scripts/external_libs/nose-1.3.4/python3/nose/plugins/manager.py
new file mode 100644
index 00000000..1a400671
--- /dev/null
+++ b/scripts/external_libs/nose-1.3.4/python3/nose/plugins/manager.py
@@ -0,0 +1,460 @@
+"""
+Plugin Manager
+--------------
+
+A plugin manager class is used to load plugins, manage the list of
+loaded plugins, and proxy calls to those plugins.
+
+The plugin managers provided with nose are:
+
+:class:`PluginManager`
+ This manager doesn't implement loadPlugins, so it can only work
+ with a static list of plugins.
+
+:class:`BuiltinPluginManager`
+ This manager loads plugins referenced in ``nose.plugins.builtin``.
+
+:class:`EntryPointPluginManager`
+ This manager uses setuptools entrypoints to load plugins.
+
+:class:`ExtraPluginsPluginManager`
+ This manager loads extra plugins specified with the keyword
+ `addplugins`.
+
+:class:`DefaultPluginMananger`
+ This is the manager class that will be used by default. If
+ setuptools is installed, it is a subclass of
+ :class:`EntryPointPluginManager` and :class:`BuiltinPluginManager`;
+ otherwise, an alias to :class:`BuiltinPluginManager`.
+
+:class:`RestrictedPluginManager`
+ This manager is for use in test runs where some plugin calls are
+ not available, such as runs started with ``python setup.py test``,
+ where the test runner is the default unittest :class:`TextTestRunner`. It
+ is a subclass of :class:`DefaultPluginManager`.
+
+Writing a plugin manager
+========================
+
+If you want to load plugins via some other means, you can write a
+plugin manager and pass an instance of your plugin manager class when
+instantiating the :class:`nose.config.Config` instance that you pass to
+:class:`TestProgram` (or :func:`main` or :func:`run`).
+
+To implement your plugin loading scheme, implement ``loadPlugins()``,
+and in that method, call ``addPlugin()`` with an instance of each plugin
+you wish to make available. Make sure to call
+``super(self).loadPlugins()`` as well if have subclassed a manager
+other than ``PluginManager``.
+
+"""
+import inspect
+import logging
+import os
+import sys
+from itertools import chain as iterchain
+from warnings import warn
+import nose.config
+from nose.failure import Failure
+from nose.plugins.base import IPluginInterface
+from nose.pyversion import sort_list
+
+try:
+ import pickle as pickle
+except:
+ import pickle
+try:
+ from io import StringIO
+except:
+ from io import StringIO
+
+
+__all__ = ['DefaultPluginManager', 'PluginManager', 'EntryPointPluginManager',
+ 'BuiltinPluginManager', 'RestrictedPluginManager']
+
+log = logging.getLogger(__name__)
+
+
+class PluginProxy(object):
+ """Proxy for plugin calls. Essentially a closure bound to the
+ given call and plugin list.
+
+ The plugin proxy also must be bound to a particular plugin
+ interface specification, so that it knows what calls are available
+ and any special handling that is required for each call.
+ """
+ interface = IPluginInterface
+ def __init__(self, call, plugins):
+ try:
+ self.method = getattr(self.interface, call)
+ except AttributeError:
+ raise AttributeError("%s is not a valid %s method"
+ % (call, self.interface.__name__))
+ self.call = self.makeCall(call)
+ self.plugins = []
+ for p in plugins:
+ self.addPlugin(p, call)
+
+ def __call__(self, *arg, **kw):
+ return self.call(*arg, **kw)
+
+ def addPlugin(self, plugin, call):
+ """Add plugin to my list of plugins to call, if it has the attribute
+ I'm bound to.
+ """
+ meth = getattr(plugin, call, None)
+ if meth is not None:
+ if call == 'loadTestsFromModule' and \
+ len(inspect.getargspec(meth)[0]) == 2:
+ orig_meth = meth
+ meth = lambda module, path, **kwargs: orig_meth(module)
+ self.plugins.append((plugin, meth))
+
+ def makeCall(self, call):
+ if call == 'loadTestsFromNames':
+ # special case -- load tests from names behaves somewhat differently
+ # from other chainable calls, because plugins return a tuple, only
+ # part of which can be chained to the next plugin.
+ return self._loadTestsFromNames
+
+ meth = self.method
+ if getattr(meth, 'generative', False):
+ # call all plugins and yield a flattened iterator of their results
+ return lambda *arg, **kw: list(self.generate(*arg, **kw))
+ elif getattr(meth, 'chainable', False):
+ return self.chain
+ else:
+ # return a value from the first plugin that returns non-None
+ return self.simple
+
+ def chain(self, *arg, **kw):
+ """Call plugins in a chain, where the result of each plugin call is
+ sent to the next plugin as input. The final output result is returned.
+ """
+ result = None
+ # extract the static arguments (if any) from arg so they can
+ # be passed to each plugin call in the chain
+ static = [a for (static, a)
+ in zip(getattr(self.method, 'static_args', []), arg)
+ if static]
+ for p, meth in self.plugins:
+ result = meth(*arg, **kw)
+ arg = static[:]
+ arg.append(result)
+ return result
+
+ def generate(self, *arg, **kw):
+ """Call all plugins, yielding each item in each non-None result.
+ """
+ for p, meth in self.plugins:
+ result = None
+ try:
+ result = meth(*arg, **kw)
+ if result is not None:
+ for r in result:
+ yield r
+ except (KeyboardInterrupt, SystemExit):
+ raise
+ except:
+ exc = sys.exc_info()
+ yield Failure(*exc)
+ continue
+
+ def simple(self, *arg, **kw):
+ """Call all plugins, returning the first non-None result.
+ """
+ for p, meth in self.plugins:
+ result = meth(*arg, **kw)
+ if result is not None:
+ return result
+
+ def _loadTestsFromNames(self, names, module=None):
+ """Chainable but not quite normal. Plugins return a tuple of
+ (tests, names) after processing the names. The tests are added
+ to a suite that is accumulated throughout the full call, while
+ names are input for the next plugin in the chain.
+ """
+ suite = []
+ for p, meth in self.plugins:
+ result = meth(names, module=module)
+ if result is not None:
+ suite_part, names = result
+ if suite_part:
+ suite.extend(suite_part)
+ return suite, names
+
+
+class NoPlugins(object):
+ """Null Plugin manager that has no plugins."""
+ interface = IPluginInterface
+ def __init__(self):
+ self._plugins = self.plugins = ()
+
+ def __iter__(self):
+ return ()
+
+ def _doNothing(self, *args, **kwds):
+ pass
+
+ def _emptyIterator(self, *args, **kwds):
+ return ()
+
+ def __getattr__(self, call):
+ method = getattr(self.interface, call)
+ if getattr(method, "generative", False):
+ return self._emptyIterator
+ else:
+ return self._doNothing
+
+ def addPlugin(self, plug):
+ raise NotImplementedError()
+
+ def addPlugins(self, plugins):
+ raise NotImplementedError()
+
+ def configure(self, options, config):
+ pass
+
+ def loadPlugins(self):
+ pass
+
+ def sort(self):
+ pass
+
+
+class PluginManager(object):
+ """Base class for plugin managers. PluginManager is intended to be
+ used only with a static list of plugins. The loadPlugins() implementation
+ only reloads plugins from _extraplugins to prevent those from being
+ overridden by a subclass.
+
+ The basic functionality of a plugin manager is to proxy all unknown
+ attributes through a ``PluginProxy`` to a list of plugins.
+
+ Note that the list of plugins *may not* be changed after the first plugin
+ call.
+ """
+ proxyClass = PluginProxy
+
+ def __init__(self, plugins=(), proxyClass=None):
+ self._plugins = []
+ self._extraplugins = ()
+ self._proxies = {}
+ if plugins:
+ self.addPlugins(plugins)
+ if proxyClass is not None:
+ self.proxyClass = proxyClass
+
+ def __getattr__(self, call):
+ try:
+ return self._proxies[call]
+ except KeyError:
+ proxy = self.proxyClass(call, self._plugins)
+ self._proxies[call] = proxy
+ return proxy
+
+ def __iter__(self):
+ return iter(self.plugins)
+
+ def addPlugin(self, plug):
+ # allow, for instance, plugins loaded via entry points to
+ # supplant builtin plugins.
+ new_name = getattr(plug, 'name', object())
+ self._plugins[:] = [p for p in self._plugins
+ if getattr(p, 'name', None) != new_name]
+ self._plugins.append(plug)
+
+ def addPlugins(self, plugins=(), extraplugins=()):
+ """extraplugins are maintained in a separate list and
+ re-added by loadPlugins() to prevent their being overwritten
+ by plugins added by a subclass of PluginManager
+ """
+ self._extraplugins = extraplugins
+ for plug in iterchain(plugins, extraplugins):
+ self.addPlugin(plug)
+
+ def configure(self, options, config):
+ """Configure the set of plugins with the given options
+ and config instance. After configuration, disabled plugins
+ are removed from the plugins list.
+ """
+ log.debug("Configuring plugins")
+ self.config = config
+ cfg = PluginProxy('configure', self._plugins)
+ cfg(options, config)
+ enabled = [plug for plug in self._plugins if plug.enabled]
+ self.plugins = enabled
+ self.sort()
+ log.debug("Plugins enabled: %s", enabled)
+
+ def loadPlugins(self):
+ for plug in self._extraplugins:
+ self.addPlugin(plug)
+
+ def sort(self):
+ return sort_list(self._plugins, lambda x: getattr(x, 'score', 1), reverse=True)
+
+ def _get_plugins(self):
+ return self._plugins
+
+ def _set_plugins(self, plugins):
+ self._plugins = []
+ self.addPlugins(plugins)
+
+ plugins = property(_get_plugins, _set_plugins, None,
+ """Access the list of plugins managed by
+ this plugin manager""")
+
+
+class ZeroNinePlugin:
+ """Proxy for 0.9 plugins, adapts 0.10 calls to 0.9 standard.
+ """
+ def __init__(self, plugin):
+ self.plugin = plugin
+
+ def options(self, parser, env=os.environ):
+ self.plugin.add_options(parser, env)
+
+ def addError(self, test, err):
+ if not hasattr(self.plugin, 'addError'):
+ return
+ # switch off to addSkip, addDeprecated if those types
+ from nose.exc import SkipTest, DeprecatedTest
+ ec, ev, tb = err
+ if issubclass(ec, SkipTest):
+ if not hasattr(self.plugin, 'addSkip'):
+ return
+ return self.plugin.addSkip(test.test)
+ elif issubclass(ec, DeprecatedTest):
+ if not hasattr(self.plugin, 'addDeprecated'):
+ return
+ return self.plugin.addDeprecated(test.test)
+ # add capt
+ capt = test.capturedOutput
+ return self.plugin.addError(test.test, err, capt)
+
+ def loadTestsFromFile(self, filename):
+ if hasattr(self.plugin, 'loadTestsFromPath'):
+ return self.plugin.loadTestsFromPath(filename)
+
+ def addFailure(self, test, err):
+ if not hasattr(self.plugin, 'addFailure'):
+ return
+ # add capt and tbinfo
+ capt = test.capturedOutput
+ tbinfo = test.tbinfo
+ return self.plugin.addFailure(test.test, err, capt, tbinfo)
+
+ def addSuccess(self, test):
+ if not hasattr(self.plugin, 'addSuccess'):
+ return
+ capt = test.capturedOutput
+ self.plugin.addSuccess(test.test, capt)
+
+ def startTest(self, test):
+ if not hasattr(self.plugin, 'startTest'):
+ return
+ return self.plugin.startTest(test.test)
+
+ def stopTest(self, test):
+ if not hasattr(self.plugin, 'stopTest'):
+ return
+ return self.plugin.stopTest(test.test)
+
+ def __getattr__(self, val):
+ return getattr(self.plugin, val)
+
+
+class EntryPointPluginManager(PluginManager):
+ """Plugin manager that loads plugins from the `nose.plugins` and
+ `nose.plugins.0.10` entry points.
+ """
+ entry_points = (('nose.plugins.0.10', None),
+ ('nose.plugins', ZeroNinePlugin))
+
+ def loadPlugins(self):
+ """Load plugins by iterating the `nose.plugins` entry point.
+ """
+ from pkg_resources import iter_entry_points
+ loaded = {}
+ for entry_point, adapt in self.entry_points:
+ for ep in iter_entry_points(entry_point):
+ if ep.name in loaded:
+ continue
+ loaded[ep.name] = True
+ log.debug('%s load plugin %s', self.__class__.__name__, ep)
+ try:
+ plugcls = ep.load()
+ except KeyboardInterrupt:
+ raise
+ except Exception as e:
+ # never want a plugin load to kill the test run
+ # but we can't log here because the logger is not yet
+ # configured
+ warn("Unable to load plugin %s: %s" % (ep, e),
+ RuntimeWarning)
+ continue
+ if adapt:
+ plug = adapt(plugcls())
+ else:
+ plug = plugcls()
+ self.addPlugin(plug)
+ super(EntryPointPluginManager, self).loadPlugins()
+
+
+class BuiltinPluginManager(PluginManager):
+ """Plugin manager that loads plugins from the list in
+ `nose.plugins.builtin`.
+ """
+ def loadPlugins(self):
+ """Load plugins in nose.plugins.builtin
+ """
+ from nose.plugins import builtin
+ for plug in builtin.plugins:
+ self.addPlugin(plug())
+ super(BuiltinPluginManager, self).loadPlugins()
+
+try:
+ import pkg_resources
+ class DefaultPluginManager(EntryPointPluginManager, BuiltinPluginManager):
+ pass
+
+except ImportError:
+ class DefaultPluginManager(BuiltinPluginManager):
+ pass
+
+class RestrictedPluginManager(DefaultPluginManager):
+ """Plugin manager that restricts the plugin list to those not
+ excluded by a list of exclude methods. Any plugin that implements
+ an excluded method will be removed from the manager's plugin list
+ after plugins are loaded.
+ """
+ def __init__(self, plugins=(), exclude=(), load=True):
+ DefaultPluginManager.__init__(self, plugins)
+ self.load = load
+ self.exclude = exclude
+ self.excluded = []
+ self._excludedOpts = None
+
+ def excludedOption(self, name):
+ if self._excludedOpts is None:
+ from optparse import OptionParser
+ self._excludedOpts = OptionParser(add_help_option=False)
+ for plugin in self.excluded:
+ plugin.options(self._excludedOpts, env={})
+ return self._excludedOpts.get_option('--' + name)
+
+ def loadPlugins(self):
+ if self.load:
+ DefaultPluginManager.loadPlugins(self)
+ allow = []
+ for plugin in self.plugins:
+ ok = True
+ for method in self.exclude:
+ if hasattr(plugin, method):
+ ok = False
+ self.excluded.append(plugin)
+ break
+ if ok:
+ allow.append(plugin)
+ self.plugins = allow
diff --git a/scripts/external_libs/nose-1.3.4/python3/nose/plugins/multiprocess.py b/scripts/external_libs/nose-1.3.4/python3/nose/plugins/multiprocess.py
new file mode 100644
index 00000000..a0c53c60
--- /dev/null
+++ b/scripts/external_libs/nose-1.3.4/python3/nose/plugins/multiprocess.py
@@ -0,0 +1,835 @@
+"""
+Overview
+========
+
+The multiprocess plugin enables you to distribute your test run among a set of
+worker processes that run tests in parallel. This can speed up CPU-bound test
+runs (as long as the number of work processeses is around the number of
+processors or cores available), but is mainly useful for IO-bound tests that
+spend most of their time waiting for data to arrive from someplace else.
+
+.. note ::
+
+ See :doc:`../doc_tests/test_multiprocess/multiprocess` for
+ additional documentation and examples. Use of this plugin on python
+ 2.5 or earlier requires the multiprocessing_ module, also available
+ from PyPI.
+
+.. _multiprocessing : http://code.google.com/p/python-multiprocessing/
+
+How tests are distributed
+=========================
+
+The ideal case would be to dispatch each test to a worker process
+separately. This ideal is not attainable in all cases, however, because many
+test suites depend on context (class, module or package) fixtures.
+
+The plugin can't know (unless you tell it -- see below!) if a context fixture
+can be called many times concurrently (is re-entrant), or if it can be shared
+among tests running in different processes. Therefore, if a context has
+fixtures, the default behavior is to dispatch the entire suite to a worker as
+a unit.
+
+Controlling distribution
+^^^^^^^^^^^^^^^^^^^^^^^^
+
+There are two context-level variables that you can use to control this default
+behavior.
+
+If a context's fixtures are re-entrant, set ``_multiprocess_can_split_ = True``
+in the context, and the plugin will dispatch tests in suites bound to that
+context as if the context had no fixtures. This means that the fixtures will
+execute concurrently and multiple times, typically once per test.
+
+If a context's fixtures can be shared by tests running in different processes
+-- such as a package-level fixture that starts an external http server or
+initializes a shared database -- then set ``_multiprocess_shared_ = True`` in
+the context. These fixtures will then execute in the primary nose process, and
+tests in those contexts will be individually dispatched to run in parallel.
+
+How results are collected and reported
+======================================
+
+As each test or suite executes in a worker process, results (failures, errors,
+and specially handled exceptions like SkipTest) are collected in that
+process. When the worker process finishes, it returns results to the main
+nose process. There, any progress output is printed (dots!), and the
+results from the test run are combined into a consolidated result
+set. When results have been received for all dispatched tests, or all
+workers have died, the result summary is output as normal.
+
+Beware!
+=======
+
+Not all test suites will benefit from, or even operate correctly using, this
+plugin. For example, CPU-bound tests will run more slowly if you don't have
+multiple processors. There are also some differences in plugin
+interactions and behaviors due to the way in which tests are dispatched and
+loaded. In general, test loading under this plugin operates as if it were
+always in directed mode instead of discovered mode. For instance, doctests
+in test modules will always be found when using this plugin with the doctest
+plugin.
+
+But the biggest issue you will face is probably concurrency. Unless you
+have kept your tests as religiously pure unit tests, with no side-effects, no
+ordering issues, and no external dependencies, chances are you will experience
+odd, intermittent and unexplainable failures and errors when using this
+plugin. This doesn't necessarily mean the plugin is broken; it may mean that
+your test suite is not safe for concurrency.
+
+New Features in 1.1.0
+=====================
+
+* functions generated by test generators are now added to the worker queue
+ making them multi-threaded.
+* fixed timeout functionality, now functions will be terminated with a
+ TimedOutException exception when they exceed their execution time. The
+ worker processes are not terminated.
+* added ``--process-restartworker`` option to restart workers once they are
+ done, this helps control memory usage. Sometimes memory leaks can accumulate
+ making long runs very difficult.
+* added global _instantiate_plugins to configure which plugins are started
+ on the worker processes.
+
+"""
+
+import logging
+import os
+import sys
+import time
+import traceback
+import unittest
+import pickle
+import signal
+import nose.case
+from nose.core import TextTestRunner
+from nose import failure
+from nose import loader
+from nose.plugins.base import Plugin
+from nose.pyversion import bytes_
+from nose.result import TextTestResult
+from nose.suite import ContextSuite
+from nose.util import test_address
+try:
+ # 2.7+
+ from unittest.runner import _WritelnDecorator
+except ImportError:
+ from unittest import _WritelnDecorator
+from queue import Empty
+from warnings import warn
+try:
+ from io import StringIO
+except ImportError:
+ import io
+
+# this is a list of plugin classes that will be checked for and created inside
+# each worker process
+_instantiate_plugins = None
+
+log = logging.getLogger(__name__)
+
+Process = Queue = Pool = Event = Value = Array = None
+
+# have to inherit KeyboardInterrupt to it will interrupt process properly
+class TimedOutException(KeyboardInterrupt):
+ def __init__(self, value = "Timed Out"):
+ self.value = value
+ def __str__(self):
+ return repr(self.value)
+
+def _import_mp():
+ global Process, Queue, Pool, Event, Value, Array
+ try:
+ from multiprocessing import Manager, Process
+ #prevent the server process created in the manager which holds Python
+ #objects and allows other processes to manipulate them using proxies
+ #to interrupt on SIGINT (keyboardinterrupt) so that the communication
+ #channel between subprocesses and main process is still usable after
+ #ctrl+C is received in the main process.
+ old=signal.signal(signal.SIGINT, signal.SIG_IGN)
+ m = Manager()
+ #reset it back so main process will receive a KeyboardInterrupt
+ #exception on ctrl+c
+ signal.signal(signal.SIGINT, old)
+ Queue, Pool, Event, Value, Array = (
+ m.Queue, m.Pool, m.Event, m.Value, m.Array
+ )
+ except ImportError:
+ warn("multiprocessing module is not available, multiprocess plugin "
+ "cannot be used", RuntimeWarning)
+
+
+class TestLet:
+ def __init__(self, case):
+ try:
+ self._id = case.id()
+ except AttributeError:
+ pass
+ self._short_description = case.shortDescription()
+ self._str = str(case)
+
+ def id(self):
+ return self._id
+
+ def shortDescription(self):
+ return self._short_description
+
+ def __str__(self):
+ return self._str
+
+class MultiProcess(Plugin):
+ """
+ Run tests in multiple processes. Requires processing module.
+ """
+ score = 1000
+ status = {}
+
+ def options(self, parser, env):
+ """
+ Register command-line options.
+ """
+ parser.add_option("--processes", action="store",
+ default=env.get('NOSE_PROCESSES', 0),
+ dest="multiprocess_workers",
+ metavar="NUM",
+ help="Spread test run among this many processes. "
+ "Set a number equal to the number of processors "
+ "or cores in your machine for best results. "
+ "Pass a negative number to have the number of "
+ "processes automatically set to the number of "
+ "cores. Passing 0 means to disable parallel "
+ "testing. Default is 0 unless NOSE_PROCESSES is "
+ "set. "
+ "[NOSE_PROCESSES]")
+ parser.add_option("--process-timeout", action="store",
+ default=env.get('NOSE_PROCESS_TIMEOUT', 10),
+ dest="multiprocess_timeout",
+ metavar="SECONDS",
+ help="Set timeout for return of results from each "
+ "test runner process. Default is 10. "
+ "[NOSE_PROCESS_TIMEOUT]")
+ parser.add_option("--process-restartworker", action="store_true",
+ default=env.get('NOSE_PROCESS_RESTARTWORKER', False),
+ dest="multiprocess_restartworker",
+ help="If set, will restart each worker process once"
+ " their tests are done, this helps control memory "
+ "leaks from killing the system. "
+ "[NOSE_PROCESS_RESTARTWORKER]")
+
+ def configure(self, options, config):
+ """
+ Configure plugin.
+ """
+ try:
+ self.status.pop('active')
+ except KeyError:
+ pass
+ if not hasattr(options, 'multiprocess_workers'):
+ self.enabled = False
+ return
+ # don't start inside of a worker process
+ if config.worker:
+ return
+ self.config = config
+ try:
+ workers = int(options.multiprocess_workers)
+ except (TypeError, ValueError):
+ workers = 0
+ if workers:
+ _import_mp()
+ if Process is None:
+ self.enabled = False
+ return
+ # Negative number of workers will cause multiprocessing to hang.
+ # Set the number of workers to the CPU count to avoid this.
+ if workers < 0:
+ try:
+ import multiprocessing
+ workers = multiprocessing.cpu_count()
+ except NotImplementedError:
+ self.enabled = False
+ return
+ self.enabled = True
+ self.config.multiprocess_workers = workers
+ t = float(options.multiprocess_timeout)
+ self.config.multiprocess_timeout = t
+ r = int(options.multiprocess_restartworker)
+ self.config.multiprocess_restartworker = r
+ self.status['active'] = True
+
+ def prepareTestLoader(self, loader):
+ """Remember loader class so MultiProcessTestRunner can instantiate
+ the right loader.
+ """
+ self.loaderClass = loader.__class__
+
+ def prepareTestRunner(self, runner):
+ """Replace test runner with MultiProcessTestRunner.
+ """
+ # replace with our runner class
+ return MultiProcessTestRunner(stream=runner.stream,
+ verbosity=self.config.verbosity,
+ config=self.config,
+ loaderClass=self.loaderClass)
+
+def signalhandler(sig, frame):
+ raise TimedOutException()
+
+class MultiProcessTestRunner(TextTestRunner):
+ waitkilltime = 5.0 # max time to wait to terminate a process that does not
+ # respond to SIGILL
+ def __init__(self, **kw):
+ self.loaderClass = kw.pop('loaderClass', loader.defaultTestLoader)
+ super(MultiProcessTestRunner, self).__init__(**kw)
+
+ def collect(self, test, testQueue, tasks, to_teardown, result):
+ # dispatch and collect results
+ # put indexes only on queue because tests aren't picklable
+ for case in self.nextBatch(test):
+ log.debug("Next batch %s (%s)", case, type(case))
+ if (isinstance(case, nose.case.Test) and
+ isinstance(case.test, failure.Failure)):
+ log.debug("Case is a Failure")
+ case(result) # run here to capture the failure
+ continue
+ # handle shared fixtures
+ if isinstance(case, ContextSuite) and case.context is failure.Failure:
+ log.debug("Case is a Failure")
+ case(result) # run here to capture the failure
+ continue
+ elif isinstance(case, ContextSuite) and self.sharedFixtures(case):
+ log.debug("%s has shared fixtures", case)
+ try:
+ case.setUp()
+ except (KeyboardInterrupt, SystemExit):
+ raise
+ except:
+ log.debug("%s setup failed", sys.exc_info())
+ result.addError(case, sys.exc_info())
+ else:
+ to_teardown.append(case)
+ if case.factory:
+ ancestors=case.factory.context.get(case, [])
+ for an in ancestors[:2]:
+ #log.debug('reset ancestor %s', an)
+ if getattr(an, '_multiprocess_shared_', False):
+ an._multiprocess_can_split_=True
+ #an._multiprocess_shared_=False
+ self.collect(case, testQueue, tasks, to_teardown, result)
+
+ else:
+ test_addr = self.addtask(testQueue,tasks,case)
+ log.debug("Queued test %s (%s) to %s",
+ len(tasks), test_addr, testQueue)
+
+ def startProcess(self, iworker, testQueue, resultQueue, shouldStop, result):
+ currentaddr = Value('c',bytes_(''))
+ currentstart = Value('d',time.time())
+ keyboardCaught = Event()
+ p = Process(target=runner,
+ args=(iworker, testQueue,
+ resultQueue,
+ currentaddr,
+ currentstart,
+ keyboardCaught,
+ shouldStop,
+ self.loaderClass,
+ result.__class__,
+ pickle.dumps(self.config)))
+ p.currentaddr = currentaddr
+ p.currentstart = currentstart
+ p.keyboardCaught = keyboardCaught
+ old = signal.signal(signal.SIGILL, signalhandler)
+ p.start()
+ signal.signal(signal.SIGILL, old)
+ return p
+
+ def run(self, test):
+ """
+ Execute the test (which may be a test suite). If the test is a suite,
+ distribute it out among as many processes as have been configured, at
+ as fine a level as is possible given the context fixtures defined in
+ the suite or any sub-suites.
+
+ """
+ log.debug("%s.run(%s) (%s)", self, test, os.getpid())
+ wrapper = self.config.plugins.prepareTest(test)
+ if wrapper is not None:
+ test = wrapper
+
+ # plugins can decorate or capture the output stream
+ wrapped = self.config.plugins.setOutputStream(self.stream)
+ if wrapped is not None:
+ self.stream = wrapped
+
+ testQueue = Queue()
+ resultQueue = Queue()
+ tasks = []
+ completed = []
+ workers = []
+ to_teardown = []
+ shouldStop = Event()
+
+ result = self._makeResult()
+ start = time.time()
+
+ self.collect(test, testQueue, tasks, to_teardown, result)
+
+ log.debug("Starting %s workers", self.config.multiprocess_workers)
+ for i in range(self.config.multiprocess_workers):
+ p = self.startProcess(i, testQueue, resultQueue, shouldStop, result)
+ workers.append(p)
+ log.debug("Started worker process %s", i+1)
+
+ total_tasks = len(tasks)
+ # need to keep track of the next time to check for timeouts in case
+ # more than one process times out at the same time.
+ nexttimeout=self.config.multiprocess_timeout
+ thrownError = None
+
+ try:
+ while tasks:
+ log.debug("Waiting for results (%s/%s tasks), next timeout=%.3fs",
+ len(completed), total_tasks,nexttimeout)
+ try:
+ iworker, addr, newtask_addrs, batch_result = resultQueue.get(
+ timeout=nexttimeout)
+ log.debug('Results received for worker %d, %s, new tasks: %d',
+ iworker,addr,len(newtask_addrs))
+ try:
+ try:
+ tasks.remove(addr)
+ except ValueError:
+ log.warn('worker %s failed to remove from tasks: %s',
+ iworker,addr)
+ total_tasks += len(newtask_addrs)
+ tasks.extend(newtask_addrs)
+ except KeyError:
+ log.debug("Got result for unknown task? %s", addr)
+ log.debug("current: %s",str(list(tasks)[0]))
+ else:
+ completed.append([addr,batch_result])
+ self.consolidate(result, batch_result)
+ if (self.config.stopOnError
+ and not result.wasSuccessful()):
+ # set the stop condition
+ shouldStop.set()
+ break
+ if self.config.multiprocess_restartworker:
+ log.debug('joining worker %s',iworker)
+ # wait for working, but not that important if worker
+ # cannot be joined in fact, for workers that add to
+ # testQueue, they will not terminate until all their
+ # items are read
+ workers[iworker].join(timeout=1)
+ if not shouldStop.is_set() and not testQueue.empty():
+ log.debug('starting new process on worker %s',iworker)
+ workers[iworker] = self.startProcess(iworker, testQueue, resultQueue, shouldStop, result)
+ except Empty:
+ log.debug("Timed out with %s tasks pending "
+ "(empty testQueue=%r): %s",
+ len(tasks),testQueue.empty(),str(tasks))
+ any_alive = False
+ for iworker, w in enumerate(workers):
+ if w.is_alive():
+ worker_addr = bytes_(w.currentaddr.value,'ascii')
+ timeprocessing = time.time() - w.currentstart.value
+ if ( len(worker_addr) == 0
+ and timeprocessing > self.config.multiprocess_timeout-0.1):
+ log.debug('worker %d has finished its work item, '
+ 'but is not exiting? do we wait for it?',
+ iworker)
+ else:
+ any_alive = True
+ if (len(worker_addr) > 0
+ and timeprocessing > self.config.multiprocess_timeout-0.1):
+ log.debug('timed out worker %s: %s',
+ iworker,worker_addr)
+ w.currentaddr.value = bytes_('')
+ # If the process is in C++ code, sending a SIGILL
+ # might not send a python KeybordInterrupt exception
+ # therefore, send multiple signals until an
+ # exception is caught. If this takes too long, then
+ # terminate the process
+ w.keyboardCaught.clear()
+ startkilltime = time.time()
+ while not w.keyboardCaught.is_set() and w.is_alive():
+ if time.time()-startkilltime > self.waitkilltime:
+ # have to terminate...
+ log.error("terminating worker %s",iworker)
+ w.terminate()
+ # there is a small probability that the
+ # terminated process might send a result,
+ # which has to be specially handled or
+ # else processes might get orphaned.
+ workers[iworker] = w = self.startProcess(iworker, testQueue, resultQueue, shouldStop, result)
+ break
+ os.kill(w.pid, signal.SIGILL)
+ time.sleep(0.1)
+ if not any_alive and testQueue.empty():
+ log.debug("All workers dead")
+ break
+ nexttimeout=self.config.multiprocess_timeout
+ for w in workers:
+ if w.is_alive() and len(w.currentaddr.value) > 0:
+ timeprocessing = time.time()-w.currentstart.value
+ if timeprocessing <= self.config.multiprocess_timeout:
+ nexttimeout = min(nexttimeout,
+ self.config.multiprocess_timeout-timeprocessing)
+ log.debug("Completed %s tasks (%s remain)", len(completed), len(tasks))
+
+ except (KeyboardInterrupt, SystemExit) as e:
+ log.info('parent received ctrl-c when waiting for test results')
+ thrownError = e
+ #resultQueue.get(False)
+
+ result.addError(test, sys.exc_info())
+
+ try:
+ for case in to_teardown:
+ log.debug("Tearing down shared fixtures for %s", case)
+ try:
+ case.tearDown()
+ except (KeyboardInterrupt, SystemExit):
+ raise
+ except:
+ result.addError(case, sys.exc_info())
+
+ stop = time.time()
+
+ # first write since can freeze on shutting down processes
+ result.printErrors()
+ result.printSummary(start, stop)
+ self.config.plugins.finalize(result)
+
+ if thrownError is None:
+ log.debug("Tell all workers to stop")
+ for w in workers:
+ if w.is_alive():
+ testQueue.put('STOP', block=False)
+
+ # wait for the workers to end
+ for iworker,worker in enumerate(workers):
+ if worker.is_alive():
+ log.debug('joining worker %s',iworker)
+ worker.join()
+ if worker.is_alive():
+ log.debug('failed to join worker %s',iworker)
+ except (KeyboardInterrupt, SystemExit):
+ log.info('parent received ctrl-c when shutting down: stop all processes')
+ for worker in workers:
+ if worker.is_alive():
+ worker.terminate()
+
+ if thrownError: raise thrownError
+ else: raise
+
+ return result
+
+ def addtask(testQueue,tasks,case):
+ arg = None
+ if isinstance(case,nose.case.Test) and hasattr(case.test,'arg'):
+ # this removes the top level descriptor and allows real function
+ # name to be returned
+ case.test.descriptor = None
+ arg = case.test.arg
+ test_addr = MultiProcessTestRunner.address(case)
+ testQueue.put((test_addr,arg), block=False)
+ if arg is not None:
+ test_addr += str(arg)
+ if tasks is not None:
+ tasks.append(test_addr)
+ return test_addr
+ addtask = staticmethod(addtask)
+
+ def address(case):
+ if hasattr(case, 'address'):
+ file, mod, call = case.address()
+ elif hasattr(case, 'context'):
+ file, mod, call = test_address(case.context)
+ else:
+ raise Exception("Unable to convert %s to address" % case)
+ parts = []
+ if file is None:
+ if mod is None:
+ raise Exception("Unaddressable case %s" % case)
+ else:
+ parts.append(mod)
+ else:
+ # strip __init__.py(c) from end of file part
+ # if present, having it there confuses loader
+ dirname, basename = os.path.split(file)
+ if basename.startswith('__init__'):
+ file = dirname
+ parts.append(file)
+ if call is not None:
+ parts.append(call)
+ return ':'.join(map(str, parts))
+ address = staticmethod(address)
+
+ def nextBatch(self, test):
+ # allows tests or suites to mark themselves as not safe
+ # for multiprocess execution
+ if hasattr(test, 'context'):
+ if not getattr(test.context, '_multiprocess_', True):
+ return
+
+ if ((isinstance(test, ContextSuite)
+ and test.hasFixtures(self.checkCanSplit))
+ or not getattr(test, 'can_split', True)
+ or not isinstance(test, unittest.TestSuite)):
+ # regular test case, or a suite with context fixtures
+
+ # special case: when run like nosetests path/to/module.py
+ # the top-level suite has only one item, and it shares
+ # the same context as that item. In that case, we want the
+ # item, not the top-level suite
+ if isinstance(test, ContextSuite):
+ contained = list(test)
+ if (len(contained) == 1
+ and getattr(contained[0],
+ 'context', None) == test.context):
+ test = contained[0]
+ yield test
+ else:
+ # Suite is without fixtures at this level; but it may have
+ # fixtures at any deeper level, so we need to examine it all
+ # the way down to the case level
+ for case in test:
+ for batch in self.nextBatch(case):
+ yield batch
+
+ def checkCanSplit(context, fixt):
+ """
+ Callback that we use to check whether the fixtures found in a
+ context or ancestor are ones we care about.
+
+ Contexts can tell us that their fixtures are reentrant by setting
+ _multiprocess_can_split_. So if we see that, we return False to
+ disregard those fixtures.
+ """
+ if not fixt:
+ return False
+ if getattr(context, '_multiprocess_can_split_', False):
+ return False
+ return True
+ checkCanSplit = staticmethod(checkCanSplit)
+
+ def sharedFixtures(self, case):
+ context = getattr(case, 'context', None)
+ if not context:
+ return False
+ return getattr(context, '_multiprocess_shared_', False)
+
+ def consolidate(self, result, batch_result):
+ log.debug("batch result is %s" , batch_result)
+ try:
+ output, testsRun, failures, errors, errorClasses = batch_result
+ except ValueError:
+ log.debug("result in unexpected format %s", batch_result)
+ failure.Failure(*sys.exc_info())(result)
+ return
+ self.stream.write(output)
+ result.testsRun += testsRun
+ result.failures.extend(failures)
+ result.errors.extend(errors)
+ for key, (storage, label, isfail) in list(errorClasses.items()):
+ if key not in result.errorClasses:
+ # Ordinarily storage is result attribute
+ # but it's only processed through the errorClasses
+ # dict, so it's ok to fake it here
+ result.errorClasses[key] = ([], label, isfail)
+ mystorage, _junk, _junk = result.errorClasses[key]
+ mystorage.extend(storage)
+ log.debug("Ran %s tests (total: %s)", testsRun, result.testsRun)
+
+
+def runner(ix, testQueue, resultQueue, currentaddr, currentstart,
+ keyboardCaught, shouldStop, loaderClass, resultClass, config):
+ try:
+ try:
+ return __runner(ix, testQueue, resultQueue, currentaddr, currentstart,
+ keyboardCaught, shouldStop, loaderClass, resultClass, config)
+ except KeyboardInterrupt:
+ log.debug('Worker %s keyboard interrupt, stopping',ix)
+ except Empty:
+ log.debug("Worker %s timed out waiting for tasks", ix)
+
+def __runner(ix, testQueue, resultQueue, currentaddr, currentstart,
+ keyboardCaught, shouldStop, loaderClass, resultClass, config):
+
+ config = pickle.loads(config)
+ dummy_parser = config.parserClass()
+ if _instantiate_plugins is not None:
+ for pluginclass in _instantiate_plugins:
+ plugin = pluginclass()
+ plugin.addOptions(dummy_parser,{})
+ config.plugins.addPlugin(plugin)
+ config.plugins.configure(config.options,config)
+ config.plugins.begin()
+ log.debug("Worker %s executing, pid=%d", ix,os.getpid())
+ loader = loaderClass(config=config)
+ loader.suiteClass.suiteClass = NoSharedFixtureContextSuite
+
+ def get():
+ return testQueue.get(timeout=config.multiprocess_timeout)
+
+ def makeResult():
+ stream = _WritelnDecorator(StringIO())
+ result = resultClass(stream, descriptions=1,
+ verbosity=config.verbosity,
+ config=config)
+ plug_result = config.plugins.prepareTestResult(result)
+ if plug_result:
+ return plug_result
+ return result
+
+ def batch(result):
+ failures = [(TestLet(c), err) for c, err in result.failures]
+ errors = [(TestLet(c), err) for c, err in result.errors]
+ errorClasses = {}
+ for key, (storage, label, isfail) in list(result.errorClasses.items()):
+ errorClasses[key] = ([(TestLet(c), err) for c, err in storage],
+ label, isfail)
+ return (
+ result.stream.getvalue(),
+ result.testsRun,
+ failures,
+ errors,
+ errorClasses)
+ for test_addr, arg in iter(get, 'STOP'):
+ if shouldStop.is_set():
+ log.exception('Worker %d STOPPED',ix)
+ break
+ result = makeResult()
+ test = loader.loadTestsFromNames([test_addr])
+ test.testQueue = testQueue
+ test.tasks = []
+ test.arg = arg
+ log.debug("Worker %s Test is %s (%s)", ix, test_addr, test)
+ try:
+ if arg is not None:
+ test_addr = test_addr + str(arg)
+ currentaddr.value = bytes_(test_addr)
+ currentstart.value = time.time()
+ test(result)
+ currentaddr.value = bytes_('')
+ resultQueue.put((ix, test_addr, test.tasks, batch(result)))
+ except KeyboardInterrupt as e: #TimedOutException:
+ timeout = isinstance(e, TimedOutException)
+ if timeout:
+ keyboardCaught.set()
+ if len(currentaddr.value):
+ if timeout:
+ msg = 'Worker %s timed out, failing current test %s'
+ else:
+ msg = 'Worker %s keyboard interrupt, failing current test %s'
+ log.exception(msg,ix,test_addr)
+ currentaddr.value = bytes_('')
+ failure.Failure(*sys.exc_info())(result)
+ resultQueue.put((ix, test_addr, test.tasks, batch(result)))
+ else:
+ if timeout:
+ msg = 'Worker %s test %s timed out'
+ else:
+ msg = 'Worker %s test %s keyboard interrupt'
+ log.debug(msg,ix,test_addr)
+ resultQueue.put((ix, test_addr, test.tasks, batch(result)))
+ if not timeout:
+ raise
+ except SystemExit:
+ currentaddr.value = bytes_('')
+ log.exception('Worker %s system exit',ix)
+ raise
+ except:
+ currentaddr.value = bytes_('')
+ log.exception("Worker %s error running test or returning "
+ "results",ix)
+ failure.Failure(*sys.exc_info())(result)
+ resultQueue.put((ix, test_addr, test.tasks, batch(result)))
+ if config.multiprocess_restartworker:
+ break
+ log.debug("Worker %s ending", ix)
+
+
+class NoSharedFixtureContextSuite(ContextSuite):
+ """
+ Context suite that never fires shared fixtures.
+
+ When a context sets _multiprocess_shared_, fixtures in that context
+ are executed by the main process. Using this suite class prevents them
+ from executing in the runner process as well.
+
+ """
+ testQueue = None
+ tasks = None
+ arg = None
+ def setupContext(self, context):
+ if getattr(context, '_multiprocess_shared_', False):
+ return
+ super(NoSharedFixtureContextSuite, self).setupContext(context)
+
+ def teardownContext(self, context):
+ if getattr(context, '_multiprocess_shared_', False):
+ return
+ super(NoSharedFixtureContextSuite, self).teardownContext(context)
+ def run(self, result):
+ """Run tests in suite inside of suite fixtures.
+ """
+ # proxy the result for myself
+ log.debug("suite %s (%s) run called, tests: %s",
+ id(self), self, self._tests)
+ if self.resultProxy:
+ result, orig = self.resultProxy(result, self), result
+ else:
+ result, orig = result, result
+ try:
+ #log.debug('setUp for %s', id(self));
+ self.setUp()
+ except KeyboardInterrupt:
+ raise
+ except:
+ self.error_context = 'setup'
+ result.addError(self, self._exc_info())
+ return
+ try:
+ for test in self._tests:
+ if (isinstance(test,nose.case.Test)
+ and self.arg is not None):
+ test.test.arg = self.arg
+ else:
+ test.arg = self.arg
+ test.testQueue = self.testQueue
+ test.tasks = self.tasks
+ if result.shouldStop:
+ log.debug("stopping")
+ break
+ # each nose.case.Test will create its own result proxy
+ # so the cases need the original result, to avoid proxy
+ # chains
+ #log.debug('running test %s in suite %s', test, self);
+ try:
+ test(orig)
+ except KeyboardInterrupt as e:
+ timeout = isinstance(e, TimedOutException)
+ if timeout:
+ msg = 'Timeout when running test %s in suite %s'
+ else:
+ msg = 'KeyboardInterrupt when running test %s in suite %s'
+ log.debug(msg, test, self)
+ err = (TimedOutException,TimedOutException(str(test)),
+ sys.exc_info()[2])
+ test.config.plugins.addError(test,err)
+ orig.addError(test,err)
+ if not timeout:
+ raise
+ finally:
+ self.has_run = True
+ try:
+ #log.debug('tearDown for %s', id(self));
+ self.tearDown()
+ except KeyboardInterrupt:
+ raise
+ except:
+ self.error_context = 'teardown'
+ result.addError(self, self._exc_info())
diff --git a/scripts/external_libs/nose-1.3.4/python3/nose/plugins/plugintest.py b/scripts/external_libs/nose-1.3.4/python3/nose/plugins/plugintest.py
new file mode 100644
index 00000000..a8723737
--- /dev/null
+++ b/scripts/external_libs/nose-1.3.4/python3/nose/plugins/plugintest.py
@@ -0,0 +1,417 @@
+"""
+Testing Plugins
+===============
+
+The plugin interface is well-tested enough to safely unit test your
+use of its hooks with some level of confidence. However, there is also
+a mixin for unittest.TestCase called PluginTester that's designed to
+test plugins in their native runtime environment.
+
+Here's a simple example with a do-nothing plugin and a composed suite.
+
+ >>> import unittest
+ >>> from nose.plugins import Plugin, PluginTester
+ >>> class FooPlugin(Plugin):
+ ... pass
+ >>> class TestPluginFoo(PluginTester, unittest.TestCase):
+ ... activate = '--with-foo'
+ ... plugins = [FooPlugin()]
+ ... def test_foo(self):
+ ... for line in self.output:
+ ... # i.e. check for patterns
+ ... pass
+ ...
+ ... # or check for a line containing ...
+ ... assert "ValueError" in self.output
+ ... def makeSuite(self):
+ ... class TC(unittest.TestCase):
+ ... def runTest(self):
+ ... raise ValueError("I hate foo")
+ ... return [TC('runTest')]
+ ...
+ >>> res = unittest.TestResult()
+ >>> case = TestPluginFoo('test_foo')
+ >>> _ = case(res)
+ >>> res.errors
+ []
+ >>> res.failures
+ []
+ >>> res.wasSuccessful()
+ True
+ >>> res.testsRun
+ 1
+
+And here is a more complex example of testing a plugin that has extra
+arguments and reads environment variables.
+
+ >>> import unittest, os
+ >>> from nose.plugins import Plugin, PluginTester
+ >>> class FancyOutputter(Plugin):
+ ... name = "fancy"
+ ... def configure(self, options, conf):
+ ... Plugin.configure(self, options, conf)
+ ... if not self.enabled:
+ ... return
+ ... self.fanciness = 1
+ ... if options.more_fancy:
+ ... self.fanciness = 2
+ ... if 'EVEN_FANCIER' in self.env:
+ ... self.fanciness = 3
+ ...
+ ... def options(self, parser, env=os.environ):
+ ... self.env = env
+ ... parser.add_option('--more-fancy', action='store_true')
+ ... Plugin.options(self, parser, env=env)
+ ...
+ ... def report(self, stream):
+ ... stream.write("FANCY " * self.fanciness)
+ ...
+ >>> class TestFancyOutputter(PluginTester, unittest.TestCase):
+ ... activate = '--with-fancy' # enables the plugin
+ ... plugins = [FancyOutputter()]
+ ... args = ['--more-fancy']
+ ... env = {'EVEN_FANCIER': '1'}
+ ...
+ ... def test_fancy_output(self):
+ ... assert "FANCY FANCY FANCY" in self.output, (
+ ... "got: %s" % self.output)
+ ... def makeSuite(self):
+ ... class TC(unittest.TestCase):
+ ... def runTest(self):
+ ... raise ValueError("I hate fancy stuff")
+ ... return [TC('runTest')]
+ ...
+ >>> res = unittest.TestResult()
+ >>> case = TestFancyOutputter('test_fancy_output')
+ >>> _ = case(res)
+ >>> res.errors
+ []
+ >>> res.failures
+ []
+ >>> res.wasSuccessful()
+ True
+ >>> res.testsRun
+ 1
+
+"""
+
+import re
+import sys
+from warnings import warn
+
+try:
+ from io import StringIO
+except ImportError:
+ from io import StringIO
+
+__all__ = ['PluginTester', 'run']
+
+from os import getpid
+class MultiProcessFile(object):
+ """
+ helper for testing multiprocessing
+
+ multiprocessing poses a problem for doctests, since the strategy
+ of replacing sys.stdout/stderr with file-like objects then
+ inspecting the results won't work: the child processes will
+ write to the objects, but the data will not be reflected
+ in the parent doctest-ing process.
+
+ The solution is to create file-like objects which will interact with
+ multiprocessing in a more desirable way.
+
+ All processes can write to this object, but only the creator can read.
+ This allows the testing system to see a unified picture of I/O.
+ """
+ def __init__(self):
+ # per advice at:
+ # http://docs.python.org/library/multiprocessing.html#all-platforms
+ self.__master = getpid()
+ self.__queue = Manager().Queue()
+ self.__buffer = StringIO()
+ self.softspace = 0
+
+ def buffer(self):
+ if getpid() != self.__master:
+ return
+
+ from queue import Empty
+ from collections import defaultdict
+ cache = defaultdict(str)
+ while True:
+ try:
+ pid, data = self.__queue.get_nowait()
+ except Empty:
+ break
+ if pid == ():
+ #show parent output after children
+ #this is what users see, usually
+ pid = ( 1e100, ) # googol!
+ cache[pid] += data
+ for pid in sorted(cache):
+ #self.__buffer.write( '%s wrote: %r\n' % (pid, cache[pid]) ) #DEBUG
+ self.__buffer.write( cache[pid] )
+ def write(self, data):
+ # note that these pids are in the form of current_process()._identity
+ # rather than OS pids
+ from multiprocessing import current_process
+ pid = current_process()._identity
+ self.__queue.put((pid, data))
+ def __iter__(self):
+ "getattr doesn't work for iter()"
+ self.buffer()
+ return self.__buffer
+ def seek(self, offset, whence=0):
+ self.buffer()
+ return self.__buffer.seek(offset, whence)
+ def getvalue(self):
+ self.buffer()
+ return self.__buffer.getvalue()
+ def __getattr__(self, attr):
+ return getattr(self.__buffer, attr)
+
+try:
+ from multiprocessing import Manager
+ Buffer = MultiProcessFile
+except ImportError:
+ Buffer = StringIO
+
+class PluginTester(object):
+ """A mixin for testing nose plugins in their runtime environment.
+
+ Subclass this and mix in unittest.TestCase to run integration/functional
+ tests on your plugin. When setUp() is called, the stub test suite is
+ executed with your plugin so that during an actual test you can inspect the
+ artifacts of how your plugin interacted with the stub test suite.
+
+ - activate
+
+ - the argument to send nosetests to activate the plugin
+
+ - suitepath
+
+ - if set, this is the path of the suite to test. Otherwise, you
+ will need to use the hook, makeSuite()
+
+ - plugins
+
+ - the list of plugins to make available during the run. Note
+ that this does not mean these plugins will be *enabled* during
+ the run -- only the plugins enabled by the activate argument
+ or other settings in argv or env will be enabled.
+
+ - args
+
+ - a list of arguments to add to the nosetests command, in addition to
+ the activate argument
+
+ - env
+
+ - optional dict of environment variables to send nosetests
+
+ """
+ activate = None
+ suitepath = None
+ args = None
+ env = {}
+ argv = None
+ plugins = []
+ ignoreFiles = None
+
+ def makeSuite(self):
+ """returns a suite object of tests to run (unittest.TestSuite())
+
+ If self.suitepath is None, this must be implemented. The returned suite
+ object will be executed with all plugins activated. It may return
+ None.
+
+ Here is an example of a basic suite object you can return ::
+
+ >>> import unittest
+ >>> class SomeTest(unittest.TestCase):
+ ... def runTest(self):
+ ... raise ValueError("Now do something, plugin!")
+ ...
+ >>> unittest.TestSuite([SomeTest()]) # doctest: +ELLIPSIS
+ <unittest...TestSuite tests=[<...SomeTest testMethod=runTest>]>
+
+ """
+ raise NotImplementedError
+
+ def _execPlugin(self):
+ """execute the plugin on the internal test suite.
+ """
+ from nose.config import Config
+ from nose.core import TestProgram
+ from nose.plugins.manager import PluginManager
+
+ suite = None
+ stream = Buffer()
+ conf = Config(env=self.env,
+ stream=stream,
+ plugins=PluginManager(plugins=self.plugins))
+ if self.ignoreFiles is not None:
+ conf.ignoreFiles = self.ignoreFiles
+ if not self.suitepath:
+ suite = self.makeSuite()
+
+ self.nose = TestProgram(argv=self.argv, config=conf, suite=suite,
+ exit=False)
+ self.output = AccessDecorator(stream)
+
+ def setUp(self):
+ """runs nosetests with the specified test suite, all plugins
+ activated.
+ """
+ self.argv = ['nosetests', self.activate]
+ if self.args:
+ self.argv.extend(self.args)
+ if self.suitepath:
+ self.argv.append(self.suitepath)
+
+ self._execPlugin()
+
+
+class AccessDecorator(object):
+ stream = None
+ _buf = None
+ def __init__(self, stream):
+ self.stream = stream
+ stream.seek(0)
+ self._buf = stream.read()
+ stream.seek(0)
+ def __contains__(self, val):
+ return val in self._buf
+ def __iter__(self):
+ return iter(self.stream)
+ def __str__(self):
+ return self._buf
+
+
+def blankline_separated_blocks(text):
+ "a bunch of === characters is also considered a blank line"
+ block = []
+ for line in text.splitlines(True):
+ block.append(line)
+ line = line.strip()
+ if not line or line.startswith('===') and not line.strip('='):
+ yield "".join(block)
+ block = []
+ if block:
+ yield "".join(block)
+
+
+def remove_stack_traces(out):
+ # this regexp taken from Python 2.5's doctest
+ traceback_re = re.compile(r"""
+ # Grab the traceback header. Different versions of Python have
+ # said different things on the first traceback line.
+ ^(?P<hdr> Traceback\ \(
+ (?: most\ recent\ call\ last
+ | innermost\ last
+ ) \) :
+ )
+ \s* $ # toss trailing whitespace on the header.
+ (?P<stack> .*?) # don't blink: absorb stuff until...
+ ^(?=\w) # a line *starts* with alphanum.
+ .*?(?P<exception> \w+ ) # exception name
+ (?P<msg> [:\n] .*) # the rest
+ """, re.VERBOSE | re.MULTILINE | re.DOTALL)
+ blocks = []
+ for block in blankline_separated_blocks(out):
+ blocks.append(traceback_re.sub(r"\g<hdr>\n...\n\g<exception>\g<msg>", block))
+ return "".join(blocks)
+
+
+def simplify_warnings(out):
+ warn_re = re.compile(r"""
+ # Cut the file and line no, up to the warning name
+ ^.*:\d+:\s
+ (?P<category>\w+): \s+ # warning category
+ (?P<detail>.+) $ \n? # warning message
+ ^ .* $ # stack frame
+ """, re.VERBOSE | re.MULTILINE)
+ return warn_re.sub(r"\g<category>: \g<detail>", out)
+
+
+def remove_timings(out):
+ return re.sub(
+ r"Ran (\d+ tests?) in [0-9.]+s", r"Ran \1 in ...s", out)
+
+
+def munge_nose_output_for_doctest(out):
+ """Modify nose output to make it easy to use in doctests."""
+ out = remove_stack_traces(out)
+ out = simplify_warnings(out)
+ out = remove_timings(out)
+ return out.strip()
+
+
+def run(*arg, **kw):
+ """
+ Specialized version of nose.run for use inside of doctests that
+ test test runs.
+
+ This version of run() prints the result output to stdout. Before
+ printing, the output is processed by replacing the timing
+ information with an ellipsis (...), removing traceback stacks, and
+ removing trailing whitespace.
+
+ Use this version of run wherever you are writing a doctest that
+ tests nose (or unittest) test result output.
+
+ Note: do not use doctest: +ELLIPSIS when testing nose output,
+ since ellipses ("test_foo ... ok") in your expected test runner
+ output may match multiple lines of output, causing spurious test
+ passes!
+ """
+ from nose import run
+ from nose.config import Config
+ from nose.plugins.manager import PluginManager
+
+ buffer = Buffer()
+ if 'config' not in kw:
+ plugins = kw.pop('plugins', [])
+ if isinstance(plugins, list):
+ plugins = PluginManager(plugins=plugins)
+ env = kw.pop('env', {})
+ kw['config'] = Config(env=env, plugins=plugins)
+ if 'argv' not in kw:
+ kw['argv'] = ['nosetests', '-v']
+ kw['config'].stream = buffer
+
+ # Set up buffering so that all output goes to our buffer,
+ # or warn user if deprecated behavior is active. If this is not
+ # done, prints and warnings will either be out of place or
+ # disappear.
+ stderr = sys.stderr
+ stdout = sys.stdout
+ if kw.pop('buffer_all', False):
+ sys.stdout = sys.stderr = buffer
+ restore = True
+ else:
+ restore = False
+ warn("The behavior of nose.plugins.plugintest.run() will change in "
+ "the next release of nose. The current behavior does not "
+ "correctly account for output to stdout and stderr. To enable "
+ "correct behavior, use run_buffered() instead, or pass "
+ "the keyword argument buffer_all=True to run().",
+ DeprecationWarning, stacklevel=2)
+ try:
+ run(*arg, **kw)
+ finally:
+ if restore:
+ sys.stderr = stderr
+ sys.stdout = stdout
+ out = buffer.getvalue()
+ print(munge_nose_output_for_doctest(out))
+
+
+def run_buffered(*arg, **kw):
+ kw['buffer_all'] = True
+ run(*arg, **kw)
+
+if __name__ == '__main__':
+ import doctest
+ doctest.testmod()
+
diff --git a/scripts/external_libs/nose-1.3.4/python3/nose/plugins/prof.py b/scripts/external_libs/nose-1.3.4/python3/nose/plugins/prof.py
new file mode 100644
index 00000000..4d304a93
--- /dev/null
+++ b/scripts/external_libs/nose-1.3.4/python3/nose/plugins/prof.py
@@ -0,0 +1,154 @@
+"""This plugin will run tests using the hotshot profiler, which is part
+of the standard library. To turn it on, use the ``--with-profile`` option
+or set the NOSE_WITH_PROFILE environment variable. Profiler output can be
+controlled with the ``--profile-sort`` and ``--profile-restrict`` options,
+and the profiler output file may be changed with ``--profile-stats-file``.
+
+See the `hotshot documentation`_ in the standard library documentation for
+more details on the various output options.
+
+.. _hotshot documentation: http://docs.python.org/library/hotshot.html
+"""
+
+try:
+ import hotshot
+ from hotshot import stats
+except ImportError:
+ hotshot, stats = None, None
+import logging
+import os
+import sys
+import tempfile
+from nose.plugins.base import Plugin
+from nose.util import tolist
+
+log = logging.getLogger('nose.plugins')
+
+class Profile(Plugin):
+ """
+ Use this plugin to run tests using the hotshot profiler.
+ """
+ pfile = None
+ clean_stats_file = False
+ def options(self, parser, env):
+ """Register commandline options.
+ """
+ if not self.available():
+ return
+ Plugin.options(self, parser, env)
+ parser.add_option('--profile-sort', action='store', dest='profile_sort',
+ default=env.get('NOSE_PROFILE_SORT', 'cumulative'),
+ metavar="SORT",
+ help="Set sort order for profiler output")
+ parser.add_option('--profile-stats-file', action='store',
+ dest='profile_stats_file',
+ metavar="FILE",
+ default=env.get('NOSE_PROFILE_STATS_FILE'),
+ help='Profiler stats file; default is a new '
+ 'temp file on each run')
+ parser.add_option('--profile-restrict', action='append',
+ dest='profile_restrict',
+ metavar="RESTRICT",
+ default=env.get('NOSE_PROFILE_RESTRICT'),
+ help="Restrict profiler output. See help for "
+ "pstats.Stats for details")
+
+ def available(cls):
+ return hotshot is not None
+ available = classmethod(available)
+
+ def begin(self):
+ """Create profile stats file and load profiler.
+ """
+ if not self.available():
+ return
+ self._create_pfile()
+ self.prof = hotshot.Profile(self.pfile)
+
+ def configure(self, options, conf):
+ """Configure plugin.
+ """
+ if not self.available():
+ self.enabled = False
+ return
+ Plugin.configure(self, options, conf)
+ self.conf = conf
+ if options.profile_stats_file:
+ self.pfile = options.profile_stats_file
+ self.clean_stats_file = False
+ else:
+ self.pfile = None
+ self.clean_stats_file = True
+ self.fileno = None
+ self.sort = options.profile_sort
+ self.restrict = tolist(options.profile_restrict)
+
+ def prepareTest(self, test):
+ """Wrap entire test run in :func:`prof.runcall`.
+ """
+ if not self.available():
+ return
+ log.debug('preparing test %s' % test)
+ def run_and_profile(result, prof=self.prof, test=test):
+ self._create_pfile()
+ prof.runcall(test, result)
+ return run_and_profile
+
+ def report(self, stream):
+ """Output profiler report.
+ """
+ log.debug('printing profiler report')
+ self.prof.close()
+ prof_stats = stats.load(self.pfile)
+ prof_stats.sort_stats(self.sort)
+
+ # 2.5 has completely different stream handling from 2.4 and earlier.
+ # Before 2.5, stats objects have no stream attribute; in 2.5 and later
+ # a reference sys.stdout is stored before we can tweak it.
+ compat_25 = hasattr(prof_stats, 'stream')
+ if compat_25:
+ tmp = prof_stats.stream
+ prof_stats.stream = stream
+ else:
+ tmp = sys.stdout
+ sys.stdout = stream
+ try:
+ if self.restrict:
+ log.debug('setting profiler restriction to %s', self.restrict)
+ prof_stats.print_stats(*self.restrict)
+ else:
+ prof_stats.print_stats()
+ finally:
+ if compat_25:
+ prof_stats.stream = tmp
+ else:
+ sys.stdout = tmp
+
+ def finalize(self, result):
+ """Clean up stats file, if configured to do so.
+ """
+ if not self.available():
+ return
+ try:
+ self.prof.close()
+ except AttributeError:
+ # TODO: is this trying to catch just the case where not
+ # hasattr(self.prof, "close")? If so, the function call should be
+ # moved out of the try: suite.
+ pass
+ if self.clean_stats_file:
+ if self.fileno:
+ try:
+ os.close(self.fileno)
+ except OSError:
+ pass
+ try:
+ os.unlink(self.pfile)
+ except OSError:
+ pass
+ return None
+
+ def _create_pfile(self):
+ if not self.pfile:
+ self.fileno, self.pfile = tempfile.mkstemp()
+ self.clean_stats_file = True
diff --git a/scripts/external_libs/nose-1.3.4/python3/nose/plugins/skip.py b/scripts/external_libs/nose-1.3.4/python3/nose/plugins/skip.py
new file mode 100644
index 00000000..9d1ac8f6
--- /dev/null
+++ b/scripts/external_libs/nose-1.3.4/python3/nose/plugins/skip.py
@@ -0,0 +1,63 @@
+"""
+This plugin installs a SKIP error class for the SkipTest exception.
+When SkipTest is raised, the exception will be logged in the skipped
+attribute of the result, 'S' or 'SKIP' (verbose) will be output, and
+the exception will not be counted as an error or failure. This plugin
+is enabled by default but may be disabled with the ``--no-skip`` option.
+"""
+
+from nose.plugins.errorclass import ErrorClass, ErrorClassPlugin
+
+
+# on SkipTest:
+# - unittest SkipTest is first preference, but it's only available
+# for >= 2.7
+# - unittest2 SkipTest is second preference for older pythons. This
+# mirrors logic for choosing SkipTest exception in testtools
+# - if none of the above, provide custom class
+try:
+ from unittest.case import SkipTest
+except ImportError:
+ try:
+ from unittest2.case import SkipTest
+ except ImportError:
+ class SkipTest(Exception):
+ """Raise this exception to mark a test as skipped.
+ """
+ pass
+
+
+class Skip(ErrorClassPlugin):
+ """
+ Plugin that installs a SKIP error class for the SkipTest
+ exception. When SkipTest is raised, the exception will be logged
+ in the skipped attribute of the result, 'S' or 'SKIP' (verbose)
+ will be output, and the exception will not be counted as an error
+ or failure.
+ """
+ enabled = True
+ skipped = ErrorClass(SkipTest,
+ label='SKIP',
+ isfailure=False)
+
+ def options(self, parser, env):
+ """
+ Add my options to command line.
+ """
+ env_opt = 'NOSE_WITHOUT_SKIP'
+ parser.add_option('--no-skip', action='store_true',
+ dest='noSkip', default=env.get(env_opt, False),
+ help="Disable special handling of SkipTest "
+ "exceptions.")
+
+ def configure(self, options, conf):
+ """
+ Configure plugin. Skip plugin is enabled by default.
+ """
+ if not self.can_configure:
+ return
+ self.conf = conf
+ disable = getattr(options, 'noSkip', False)
+ if disable:
+ self.enabled = False
+
diff --git a/scripts/external_libs/nose-1.3.4/python3/nose/plugins/testid.py b/scripts/external_libs/nose-1.3.4/python3/nose/plugins/testid.py
new file mode 100644
index 00000000..c3f351e9
--- /dev/null
+++ b/scripts/external_libs/nose-1.3.4/python3/nose/plugins/testid.py
@@ -0,0 +1,306 @@
+"""
+This plugin adds a test id (like #1) to each test name output. After
+you've run once to generate test ids, you can re-run individual
+tests by activating the plugin and passing the ids (with or
+without the # prefix) instead of test names.
+
+For example, if your normal test run looks like::
+
+ % nosetests -v
+ tests.test_a ... ok
+ tests.test_b ... ok
+ tests.test_c ... ok
+
+When adding ``--with-id`` you'll see::
+
+ % nosetests -v --with-id
+ #1 tests.test_a ... ok
+ #2 tests.test_b ... ok
+ #3 tests.test_c ... ok
+
+Then you can re-run individual tests by supplying just an id number::
+
+ % nosetests -v --with-id 2
+ #2 tests.test_b ... ok
+
+You can also pass multiple id numbers::
+
+ % nosetests -v --with-id 2 3
+ #2 tests.test_b ... ok
+ #3 tests.test_c ... ok
+
+Since most shells consider '#' a special character, you can leave it out when
+specifying a test id.
+
+Note that when run without the -v switch, no special output is displayed, but
+the ids file is still written.
+
+Looping over failed tests
+-------------------------
+
+This plugin also adds a mode that will direct the test runner to record
+failed tests. Subsequent test runs will then run only the tests that failed
+last time. Activate this mode with the ``--failed`` switch::
+
+ % nosetests -v --failed
+ #1 test.test_a ... ok
+ #2 test.test_b ... ERROR
+ #3 test.test_c ... FAILED
+ #4 test.test_d ... ok
+
+On the second run, only tests #2 and #3 will run::
+
+ % nosetests -v --failed
+ #2 test.test_b ... ERROR
+ #3 test.test_c ... FAILED
+
+As you correct errors and tests pass, they'll drop out of subsequent runs.
+
+First::
+
+ % nosetests -v --failed
+ #2 test.test_b ... ok
+ #3 test.test_c ... FAILED
+
+Second::
+
+ % nosetests -v --failed
+ #3 test.test_c ... FAILED
+
+When all tests pass, the full set will run on the next invocation.
+
+First::
+
+ % nosetests -v --failed
+ #3 test.test_c ... ok
+
+Second::
+
+ % nosetests -v --failed
+ #1 test.test_a ... ok
+ #2 test.test_b ... ok
+ #3 test.test_c ... ok
+ #4 test.test_d ... ok
+
+.. note ::
+
+ If you expect to use ``--failed`` regularly, it's a good idea to always run
+ using the ``--with-id`` option. This will ensure that an id file is always
+ created, allowing you to add ``--failed`` to the command line as soon as
+ you have failing tests. Otherwise, your first run using ``--failed`` will
+ (perhaps surprisingly) run *all* tests, because there won't be an id file
+ containing the record of failed tests from your previous run.
+
+"""
+__test__ = False
+
+import logging
+import os
+from nose.plugins import Plugin
+from nose.util import src, set
+
+try:
+ from pickle import dump, load
+except ImportError:
+ from pickle import dump, load
+
+log = logging.getLogger(__name__)
+
+
+class TestId(Plugin):
+ """
+ Activate to add a test id (like #1) to each test name output. Activate
+ with --failed to rerun failing tests only.
+ """
+ name = 'id'
+ idfile = None
+ collecting = True
+ loopOnFailed = False
+
+ def options(self, parser, env):
+ """Register commandline options.
+ """
+ Plugin.options(self, parser, env)
+ parser.add_option('--id-file', action='store', dest='testIdFile',
+ default='.noseids', metavar="FILE",
+ help="Store test ids found in test runs in this "
+ "file. Default is the file .noseids in the "
+ "working directory.")
+ parser.add_option('--failed', action='store_true',
+ dest='failed', default=False,
+ help="Run the tests that failed in the last "
+ "test run.")
+
+ def configure(self, options, conf):
+ """Configure plugin.
+ """
+ Plugin.configure(self, options, conf)
+ if options.failed:
+ self.enabled = True
+ self.loopOnFailed = True
+ log.debug("Looping on failed tests")
+ self.idfile = os.path.expanduser(options.testIdFile)
+ if not os.path.isabs(self.idfile):
+ self.idfile = os.path.join(conf.workingDir, self.idfile)
+ self.id = 1
+ # Ids and tests are mirror images: ids are {id: test address} and
+ # tests are {test address: id}
+ self.ids = {}
+ self.tests = {}
+ self.failed = []
+ self.source_names = []
+ # used to track ids seen when tests is filled from
+ # loaded ids file
+ self._seen = {}
+ self._write_hashes = conf.verbosity >= 2
+
+ def finalize(self, result):
+ """Save new ids file, if needed.
+ """
+ if result.wasSuccessful():
+ self.failed = []
+ if self.collecting:
+ ids = dict(list(zip(list(self.tests.values()), list(self.tests.keys()))))
+ else:
+ ids = self.ids
+ fh = open(self.idfile, 'wb')
+ dump({'ids': ids,
+ 'failed': self.failed,
+ 'source_names': self.source_names}, fh)
+ fh.close()
+ log.debug('Saved test ids: %s, failed %s to %s',
+ ids, self.failed, self.idfile)
+
+ def loadTestsFromNames(self, names, module=None):
+ """Translate ids in the list of requested names into their
+ test addresses, if they are found in my dict of tests.
+ """
+ log.debug('ltfn %s %s', names, module)
+ try:
+ fh = open(self.idfile, 'rb')
+ data = load(fh)
+ if 'ids' in data:
+ self.ids = data['ids']
+ self.failed = data['failed']
+ self.source_names = data['source_names']
+ else:
+ # old ids field
+ self.ids = data
+ self.failed = []
+ self.source_names = names
+ if self.ids:
+ self.id = max(self.ids) + 1
+ self.tests = dict(list(zip(list(self.ids.values()), list(self.ids.keys()))))
+ else:
+ self.id = 1
+ log.debug(
+ 'Loaded test ids %s tests %s failed %s sources %s from %s',
+ self.ids, self.tests, self.failed, self.source_names,
+ self.idfile)
+ fh.close()
+ except IOError:
+ log.debug('IO error reading %s', self.idfile)
+
+ if self.loopOnFailed and self.failed:
+ self.collecting = False
+ names = self.failed
+ self.failed = []
+ # I don't load any tests myself, only translate names like '#2'
+ # into the associated test addresses
+ translated = []
+ new_source = []
+ really_new = []
+ for name in names:
+ trans = self.tr(name)
+ if trans != name:
+ translated.append(trans)
+ else:
+ new_source.append(name)
+ # names that are not ids and that are not in the current
+ # list of source names go into the list for next time
+ if new_source:
+ new_set = set(new_source)
+ old_set = set(self.source_names)
+ log.debug("old: %s new: %s", old_set, new_set)
+ really_new = [s for s in new_source
+ if not s in old_set]
+ if really_new:
+ # remember new sources
+ self.source_names.extend(really_new)
+ if not translated:
+ # new set of source names, no translations
+ # means "run the requested tests"
+ names = new_source
+ else:
+ # no new names to translate and add to id set
+ self.collecting = False
+ log.debug("translated: %s new sources %s names %s",
+ translated, really_new, names)
+ return (None, translated + really_new or names)
+
+ def makeName(self, addr):
+ log.debug("Make name %s", addr)
+ filename, module, call = addr
+ if filename is not None:
+ head = src(filename)
+ else:
+ head = module
+ if call is not None:
+ return "%s:%s" % (head, call)
+ return head
+
+ def setOutputStream(self, stream):
+ """Get handle on output stream so the plugin can print id #s
+ """
+ self.stream = stream
+
+ def startTest(self, test):
+ """Maybe output an id # before the test name.
+
+ Example output::
+
+ #1 test.test ... ok
+ #2 test.test_two ... ok
+
+ """
+ adr = test.address()
+ log.debug('start test %s (%s)', adr, adr in self.tests)
+ if adr in self.tests:
+ if adr in self._seen:
+ self.write(' ')
+ else:
+ self.write('#%s ' % self.tests[adr])
+ self._seen[adr] = 1
+ return
+ self.tests[adr] = self.id
+ self.write('#%s ' % self.id)
+ self.id += 1
+
+ def afterTest(self, test):
+ # None means test never ran, False means failed/err
+ if test.passed is False:
+ try:
+ key = str(self.tests[test.address()])
+ except KeyError:
+ # never saw this test -- startTest didn't run
+ pass
+ else:
+ if key not in self.failed:
+ self.failed.append(key)
+
+ def tr(self, name):
+ log.debug("tr '%s'", name)
+ try:
+ key = int(name.replace('#', ''))
+ except ValueError:
+ return name
+ log.debug("Got key %s", key)
+ # I'm running tests mapped from the ids file,
+ # not collecting new ones
+ if key in self.ids:
+ return self.makeName(self.ids[key])
+ return name
+
+ def write(self, output):
+ if self._write_hashes:
+ self.stream.write(output)
diff --git a/scripts/external_libs/nose-1.3.4/python3/nose/plugins/xunit.py b/scripts/external_libs/nose-1.3.4/python3/nose/plugins/xunit.py
new file mode 100644
index 00000000..4efb4e14
--- /dev/null
+++ b/scripts/external_libs/nose-1.3.4/python3/nose/plugins/xunit.py
@@ -0,0 +1,329 @@
+"""This plugin provides test results in the standard XUnit XML format.
+
+It's designed for the `Jenkins`_ (previously Hudson) continuous build
+system, but will probably work for anything else that understands an
+XUnit-formatted XML representation of test results.
+
+Add this shell command to your builder ::
+
+ nosetests --with-xunit
+
+And by default a file named nosetests.xml will be written to the
+working directory.
+
+In a Jenkins builder, tick the box named "Publish JUnit test result report"
+under the Post-build Actions and enter this value for Test report XMLs::
+
+ **/nosetests.xml
+
+If you need to change the name or location of the file, you can set the
+``--xunit-file`` option.
+
+Here is an abbreviated version of what an XML test report might look like::
+
+ <?xml version="1.0" encoding="UTF-8"?>
+ <testsuite name="nosetests" tests="1" errors="1" failures="0" skip="0">
+ <testcase classname="path_to_test_suite.TestSomething"
+ name="test_it" time="0">
+ <error type="exceptions.TypeError" message="oops, wrong type">
+ Traceback (most recent call last):
+ ...
+ TypeError: oops, wrong type
+ </error>
+ </testcase>
+ </testsuite>
+
+.. _Jenkins: http://jenkins-ci.org/
+
+"""
+import codecs
+import doctest
+import os
+import sys
+import traceback
+import re
+import inspect
+from io import StringIO
+from time import time
+from xml.sax import saxutils
+
+from nose.plugins.base import Plugin
+from nose.exc import SkipTest
+from nose.pyversion import force_unicode, format_exception
+
+# Invalid XML characters, control characters 0-31 sans \t, \n and \r
+CONTROL_CHARACTERS = re.compile(r"[\000-\010\013\014\016-\037]")
+
+TEST_ID = re.compile(r'^(.*?)(\(.*\))$')
+
+def xml_safe(value):
+ """Replaces invalid XML characters with '?'."""
+ return CONTROL_CHARACTERS.sub('?', value)
+
+def escape_cdata(cdata):
+ """Escape a string for an XML CDATA section."""
+ return xml_safe(cdata).replace(']]>', ']]>]]&gt;<![CDATA[')
+
+def id_split(idval):
+ m = TEST_ID.match(idval)
+ if m:
+ name, fargs = m.groups()
+ head, tail = name.rsplit(".", 1)
+ return [head, tail+fargs]
+ else:
+ return idval.rsplit(".", 1)
+
+def nice_classname(obj):
+ """Returns a nice name for class object or class instance.
+
+ >>> nice_classname(Exception()) # doctest: +ELLIPSIS
+ '...Exception'
+ >>> nice_classname(Exception) # doctest: +ELLIPSIS
+ '...Exception'
+
+ """
+ if inspect.isclass(obj):
+ cls_name = obj.__name__
+ else:
+ cls_name = obj.__class__.__name__
+ mod = inspect.getmodule(obj)
+ if mod:
+ name = mod.__name__
+ # jython
+ if name.startswith('org.python.core.'):
+ name = name[len('org.python.core.'):]
+ return "%s.%s" % (name, cls_name)
+ else:
+ return cls_name
+
+def exc_message(exc_info):
+ """Return the exception's message."""
+ exc = exc_info[1]
+ if exc is None:
+ # str exception
+ result = exc_info[0]
+ else:
+ try:
+ result = str(exc)
+ except UnicodeEncodeError:
+ try:
+ result = str(exc)
+ except UnicodeError:
+ # Fallback to args as neither str nor
+ # unicode(Exception(u'\xe6')) work in Python < 2.6
+ result = exc.args[0]
+ result = force_unicode(result, 'UTF-8')
+ return xml_safe(result)
+
+class Tee(object):
+ def __init__(self, encoding, *args):
+ self._encoding = encoding
+ self._streams = args
+
+ def write(self, data):
+ data = force_unicode(data, self._encoding)
+ for s in self._streams:
+ s.write(data)
+
+ def writelines(self, lines):
+ for line in lines:
+ self.write(line)
+
+ def flush(self):
+ for s in self._streams:
+ s.flush()
+
+ def isatty(self):
+ return False
+
+
+class Xunit(Plugin):
+ """This plugin provides test results in the standard XUnit XML format."""
+ name = 'xunit'
+ score = 1500
+ encoding = 'UTF-8'
+ error_report_file = None
+
+ def __init__(self):
+ super(Xunit, self).__init__()
+ self._capture_stack = []
+ self._currentStdout = None
+ self._currentStderr = None
+
+ def _timeTaken(self):
+ if hasattr(self, '_timer'):
+ taken = time() - self._timer
+ else:
+ # test died before it ran (probably error in setup())
+ # or success/failure added before test started probably
+ # due to custom TestResult munging
+ taken = 0.0
+ return taken
+
+ def _quoteattr(self, attr):
+ """Escape an XML attribute. Value can be unicode."""
+ attr = xml_safe(attr)
+ return saxutils.quoteattr(attr)
+
+ def options(self, parser, env):
+ """Sets additional command line options."""
+ Plugin.options(self, parser, env)
+ parser.add_option(
+ '--xunit-file', action='store',
+ dest='xunit_file', metavar="FILE",
+ default=env.get('NOSE_XUNIT_FILE', 'nosetests.xml'),
+ help=("Path to xml file to store the xunit report in. "
+ "Default is nosetests.xml in the working directory "
+ "[NOSE_XUNIT_FILE]"))
+
+ def configure(self, options, config):
+ """Configures the xunit plugin."""
+ Plugin.configure(self, options, config)
+ self.config = config
+ if self.enabled:
+ self.stats = {'errors': 0,
+ 'failures': 0,
+ 'passes': 0,
+ 'skipped': 0
+ }
+ self.errorlist = []
+ self.error_report_file_name = os.path.realpath(options.xunit_file)
+
+ def report(self, stream):
+ """Writes an Xunit-formatted XML file
+
+ The file includes a report of test errors and failures.
+
+ """
+ self.error_report_file = codecs.open(self.error_report_file_name, 'w',
+ self.encoding, 'replace')
+ self.stats['encoding'] = self.encoding
+ self.stats['total'] = (self.stats['errors'] + self.stats['failures']
+ + self.stats['passes'] + self.stats['skipped'])
+ self.error_report_file.write(
+ '<?xml version="1.0" encoding="%(encoding)s"?>'
+ '<testsuite name="nosetests" tests="%(total)d" '
+ 'errors="%(errors)d" failures="%(failures)d" '
+ 'skip="%(skipped)d">' % self.stats)
+ self.error_report_file.write(''.join([force_unicode(e, self.encoding)
+ for e in self.errorlist]))
+ self.error_report_file.write('</testsuite>')
+ self.error_report_file.close()
+ if self.config.verbosity > 1:
+ stream.writeln("-" * 70)
+ stream.writeln("XML: %s" % self.error_report_file.name)
+
+ def _startCapture(self):
+ self._capture_stack.append((sys.stdout, sys.stderr))
+ self._currentStdout = StringIO()
+ self._currentStderr = StringIO()
+ sys.stdout = Tee(self.encoding, self._currentStdout, sys.stdout)
+ sys.stderr = Tee(self.encoding, self._currentStderr, sys.stderr)
+
+ def startContext(self, context):
+ self._startCapture()
+
+ def stopContext(self, context):
+ self._endCapture()
+
+ def beforeTest(self, test):
+ """Initializes a timer before starting a test."""
+ self._timer = time()
+ self._startCapture()
+
+ def _endCapture(self):
+ if self._capture_stack:
+ sys.stdout, sys.stderr = self._capture_stack.pop()
+
+ def afterTest(self, test):
+ self._endCapture()
+ self._currentStdout = None
+ self._currentStderr = None
+
+ def finalize(self, test):
+ while self._capture_stack:
+ self._endCapture()
+
+ def _getCapturedStdout(self):
+ if self._currentStdout:
+ value = self._currentStdout.getvalue()
+ if value:
+ return '<system-out><![CDATA[%s]]></system-out>' % escape_cdata(
+ value)
+ return ''
+
+ def _getCapturedStderr(self):
+ if self._currentStderr:
+ value = self._currentStderr.getvalue()
+ if value:
+ return '<system-err><![CDATA[%s]]></system-err>' % escape_cdata(
+ value)
+ return ''
+
+ def addError(self, test, err, capt=None):
+ """Add error output to Xunit report.
+ """
+ taken = self._timeTaken()
+
+ if issubclass(err[0], SkipTest):
+ type = 'skipped'
+ self.stats['skipped'] += 1
+ else:
+ type = 'error'
+ self.stats['errors'] += 1
+
+ tb = format_exception(err, self.encoding)
+ id = test.id()
+
+ self.errorlist.append(
+ '<testcase classname=%(cls)s name=%(name)s time="%(taken).3f">'
+ '<%(type)s type=%(errtype)s message=%(message)s><![CDATA[%(tb)s]]>'
+ '</%(type)s>%(systemout)s%(systemerr)s</testcase>' %
+ {'cls': self._quoteattr(id_split(id)[0]),
+ 'name': self._quoteattr(id_split(id)[-1]),
+ 'taken': taken,
+ 'type': type,
+ 'errtype': self._quoteattr(nice_classname(err[0])),
+ 'message': self._quoteattr(exc_message(err)),
+ 'tb': escape_cdata(tb),
+ 'systemout': self._getCapturedStdout(),
+ 'systemerr': self._getCapturedStderr(),
+ })
+
+ def addFailure(self, test, err, capt=None, tb_info=None):
+ """Add failure output to Xunit report.
+ """
+ taken = self._timeTaken()
+ tb = format_exception(err, self.encoding)
+ self.stats['failures'] += 1
+ id = test.id()
+
+ self.errorlist.append(
+ '<testcase classname=%(cls)s name=%(name)s time="%(taken).3f">'
+ '<failure type=%(errtype)s message=%(message)s><![CDATA[%(tb)s]]>'
+ '</failure>%(systemout)s%(systemerr)s</testcase>' %
+ {'cls': self._quoteattr(id_split(id)[0]),
+ 'name': self._quoteattr(id_split(id)[-1]),
+ 'taken': taken,
+ 'errtype': self._quoteattr(nice_classname(err[0])),
+ 'message': self._quoteattr(exc_message(err)),
+ 'tb': escape_cdata(tb),
+ 'systemout': self._getCapturedStdout(),
+ 'systemerr': self._getCapturedStderr(),
+ })
+
+ def addSuccess(self, test, capt=None):
+ """Add success output to Xunit report.
+ """
+ taken = self._timeTaken()
+ self.stats['passes'] += 1
+ id = test.id()
+ self.errorlist.append(
+ '<testcase classname=%(cls)s name=%(name)s '
+ 'time="%(taken).3f">%(systemout)s%(systemerr)s</testcase>' %
+ {'cls': self._quoteattr(id_split(id)[0]),
+ 'name': self._quoteattr(id_split(id)[-1]),
+ 'taken': taken,
+ 'systemout': self._getCapturedStdout(),
+ 'systemerr': self._getCapturedStderr(),
+ })
diff --git a/scripts/external_libs/nose-1.3.4/python3/nose/proxy.py b/scripts/external_libs/nose-1.3.4/python3/nose/proxy.py
new file mode 100644
index 00000000..c2676cb1
--- /dev/null
+++ b/scripts/external_libs/nose-1.3.4/python3/nose/proxy.py
@@ -0,0 +1,188 @@
+"""
+Result Proxy
+------------
+
+The result proxy wraps the result instance given to each test. It
+performs two functions: enabling extended error/failure reporting
+and calling plugins.
+
+As each result event is fired, plugins are called with the same event;
+however, plugins are called with the nose.case.Test instance that
+wraps the actual test. So when a test fails and calls
+result.addFailure(self, err), the result proxy calls
+addFailure(self.test, err) for each plugin. This allows plugins to
+have a single stable interface for all test types, and also to
+manipulate the test object itself by setting the `test` attribute of
+the nose.case.Test that they receive.
+"""
+import logging
+from nose.config import Config
+
+
+log = logging.getLogger(__name__)
+
+
+def proxied_attribute(local_attr, proxied_attr, doc):
+ """Create a property that proxies attribute ``proxied_attr`` through
+ the local attribute ``local_attr``.
+ """
+ def fget(self):
+ return getattr(getattr(self, local_attr), proxied_attr)
+ def fset(self, value):
+ setattr(getattr(self, local_attr), proxied_attr, value)
+ def fdel(self):
+ delattr(getattr(self, local_attr), proxied_attr)
+ return property(fget, fset, fdel, doc)
+
+
+class ResultProxyFactory(object):
+ """Factory for result proxies. Generates a ResultProxy bound to each test
+ and the result passed to the test.
+ """
+ def __init__(self, config=None):
+ if config is None:
+ config = Config()
+ self.config = config
+ self.__prepared = False
+ self.__result = None
+
+ def __call__(self, result, test):
+ """Return a ResultProxy for the current test.
+
+ On first call, plugins are given a chance to replace the
+ result used for the remaining tests. If a plugin returns a
+ value from prepareTestResult, that object will be used as the
+ result for all tests.
+ """
+ if not self.__prepared:
+ self.__prepared = True
+ plug_result = self.config.plugins.prepareTestResult(result)
+ if plug_result is not None:
+ self.__result = result = plug_result
+ if self.__result is not None:
+ result = self.__result
+ return ResultProxy(result, test, config=self.config)
+
+
+class ResultProxy(object):
+ """Proxy to TestResults (or other results handler).
+
+ One ResultProxy is created for each nose.case.Test. The result
+ proxy calls plugins with the nose.case.Test instance (instead of
+ the wrapped test case) as each result call is made. Finally, the
+ real result method is called, also with the nose.case.Test
+ instance as the test parameter.
+
+ """
+ def __init__(self, result, test, config=None):
+ if config is None:
+ config = Config()
+ self.config = config
+ self.plugins = config.plugins
+ self.result = result
+ self.test = test
+
+ def __repr__(self):
+ return repr(self.result)
+
+ def _prepareErr(self, err):
+ if not isinstance(err[1], Exception) and isinstance(err[0], type):
+ # Turn value back into an Exception (required in Python 3.x).
+ # Plugins do all sorts of crazy things with exception values.
+ # Convert it to a custom subclass of Exception with the same
+ # name as the actual exception to make it print correctly.
+ value = type(err[0].__name__, (Exception,), {})(err[1])
+ err = (err[0], value, err[2])
+ return err
+
+ def assertMyTest(self, test):
+ # The test I was called with must be my .test or my
+ # .test's .test. or my .test.test's .case
+
+ case = getattr(self.test, 'test', None)
+ assert (test is self.test
+ or test is case
+ or test is getattr(case, '_nose_case', None)), (
+ "ResultProxy for %r (%s) was called with test %r (%s)"
+ % (self.test, id(self.test), test, id(test)))
+
+ def afterTest(self, test):
+ self.assertMyTest(test)
+ self.plugins.afterTest(self.test)
+ if hasattr(self.result, "afterTest"):
+ self.result.afterTest(self.test)
+
+ def beforeTest(self, test):
+ self.assertMyTest(test)
+ self.plugins.beforeTest(self.test)
+ if hasattr(self.result, "beforeTest"):
+ self.result.beforeTest(self.test)
+
+ def addError(self, test, err):
+ self.assertMyTest(test)
+ plugins = self.plugins
+ plugin_handled = plugins.handleError(self.test, err)
+ if plugin_handled:
+ return
+ # test.passed is set in result, to account for error classes
+ formatted = plugins.formatError(self.test, err)
+ if formatted is not None:
+ err = formatted
+ plugins.addError(self.test, err)
+ self.result.addError(self.test, self._prepareErr(err))
+ if not self.result.wasSuccessful() and self.config.stopOnError:
+ self.shouldStop = True
+
+ def addFailure(self, test, err):
+ self.assertMyTest(test)
+ plugins = self.plugins
+ plugin_handled = plugins.handleFailure(self.test, err)
+ if plugin_handled:
+ return
+ self.test.passed = False
+ formatted = plugins.formatFailure(self.test, err)
+ if formatted is not None:
+ err = formatted
+ plugins.addFailure(self.test, err)
+ self.result.addFailure(self.test, self._prepareErr(err))
+ if self.config.stopOnError:
+ self.shouldStop = True
+
+ def addSkip(self, test, reason):
+ # 2.7 compat shim
+ from nose.plugins.skip import SkipTest
+ self.assertMyTest(test)
+ plugins = self.plugins
+ if not isinstance(reason, Exception):
+ # for Python 3.2+
+ reason = Exception(reason)
+ plugins.addError(self.test, (SkipTest, reason, None))
+ self.result.addSkip(self.test, reason)
+
+ def addSuccess(self, test):
+ self.assertMyTest(test)
+ self.plugins.addSuccess(self.test)
+ self.result.addSuccess(self.test)
+
+ def startTest(self, test):
+ self.assertMyTest(test)
+ self.plugins.startTest(self.test)
+ self.result.startTest(self.test)
+
+ def stop(self):
+ self.result.stop()
+
+ def stopTest(self, test):
+ self.assertMyTest(test)
+ self.plugins.stopTest(self.test)
+ self.result.stopTest(self.test)
+
+ # proxied attributes
+ shouldStop = proxied_attribute('result', 'shouldStop',
+ """Should the test run stop?""")
+ errors = proxied_attribute('result', 'errors',
+ """Tests that raised an exception""")
+ failures = proxied_attribute('result', 'failures',
+ """Tests that failed""")
+ testsRun = proxied_attribute('result', 'testsRun',
+ """Number of tests run""")
diff --git a/scripts/external_libs/nose-1.3.4/python3/nose/pyversion.py b/scripts/external_libs/nose-1.3.4/python3/nose/pyversion.py
new file mode 100644
index 00000000..922b0490
--- /dev/null
+++ b/scripts/external_libs/nose-1.3.4/python3/nose/pyversion.py
@@ -0,0 +1,214 @@
+"""
+This module contains fixups for using nose under different versions of Python.
+"""
+import sys
+import os
+import traceback
+import types
+import inspect
+import nose.util
+
+__all__ = ['make_instancemethod', 'cmp_to_key', 'sort_list', 'ClassType',
+ 'TypeType', 'UNICODE_STRINGS', 'unbound_method', 'ismethod',
+ 'bytes_', 'is_base_exception', 'force_unicode', 'exc_to_unicode',
+ 'format_exception']
+
+# In Python 3.x, all strings are unicode (the call to 'unicode()' in the 2.x
+# source will be replaced with 'str()' when running 2to3, so this test will
+# then become true)
+UNICODE_STRINGS = (type(str()) == type(str()))
+
+if sys.version_info[:2] < (3, 0):
+ def force_unicode(s, encoding='UTF-8'):
+ try:
+ s = str(s)
+ except UnicodeDecodeError:
+ s = str(s).decode(encoding, 'replace')
+
+ return s
+else:
+ def force_unicode(s, encoding='UTF-8'):
+ return str(s)
+
+# new.instancemethod() is obsolete for new-style classes (Python 3.x)
+# We need to use descriptor methods instead.
+try:
+ import new
+ def make_instancemethod(function, instance):
+ return new.instancemethod(function.__func__, instance,
+ instance.__class__)
+except ImportError:
+ def make_instancemethod(function, instance):
+ return function.__get__(instance, instance.__class__)
+
+# To be forward-compatible, we do all list sorts using keys instead of cmp
+# functions. However, part of the unittest.TestLoader API involves a
+# user-provideable cmp function, so we need some way to convert that.
+def cmp_to_key(mycmp):
+ 'Convert a cmp= function into a key= function'
+ class Key(object):
+ def __init__(self, obj):
+ self.obj = obj
+ def __lt__(self, other):
+ return mycmp(self.obj, other.obj) < 0
+ def __gt__(self, other):
+ return mycmp(self.obj, other.obj) > 0
+ def __eq__(self, other):
+ return mycmp(self.obj, other.obj) == 0
+ return Key
+
+# Python 2.3 also does not support list-sorting by key, so we need to convert
+# keys to cmp functions if we're running on old Python..
+if sys.version_info < (2, 4):
+ def sort_list(l, key, reverse=False):
+ if reverse:
+ return l.sort(lambda a, b: cmp(key(b), key(a)))
+ else:
+ return l.sort(lambda a, b: cmp(key(a), key(b)))
+else:
+ def sort_list(l, key, reverse=False):
+ return l.sort(key=key, reverse=reverse)
+
+# In Python 3.x, all objects are "new style" objects descended from 'type', and
+# thus types.ClassType and types.TypeType don't exist anymore. For
+# compatibility, we make sure they still work.
+if hasattr(types, 'ClassType'):
+ ClassType = type
+ TypeType = type
+else:
+ ClassType = type
+ TypeType = type
+
+# The following emulates the behavior (we need) of an 'unbound method' under
+# Python 3.x (namely, the ability to have a class associated with a function
+# definition so that things can do stuff based on its associated class)
+class UnboundMethod:
+ def __init__(self, cls, func):
+ # Make sure we have all the same attributes as the original function,
+ # so that the AttributeSelector plugin will work correctly...
+ self.__dict__ = func.__dict__.copy()
+ self._func = func
+ self.__self__ = UnboundSelf(cls)
+ if sys.version_info < (3, 0):
+ self.__self__.__class__ = cls
+
+ def address(self):
+ cls = self.__self__.cls
+ modname = cls.__module__
+ module = sys.modules[modname]
+ filename = getattr(module, '__file__', None)
+ if filename is not None:
+ filename = os.path.abspath(filename)
+ return (nose.util.src(filename), modname, "%s.%s" % (cls.__name__,
+ self._func.__name__))
+
+ def __call__(self, *args, **kwargs):
+ return self._func(*args, **kwargs)
+
+ def __getattr__(self, attr):
+ return getattr(self._func, attr)
+
+ def __repr__(self):
+ return '<unbound method %s.%s>' % (self.__self__.cls.__name__,
+ self._func.__name__)
+
+class UnboundSelf:
+ def __init__(self, cls):
+ self.cls = cls
+
+ # We have to do this hackery because Python won't let us override the
+ # __class__ attribute...
+ def __getattribute__(self, attr):
+ if attr == '__class__':
+ return self.cls
+ else:
+ return object.__getattribute__(self, attr)
+
+def unbound_method(cls, func):
+ if inspect.ismethod(func):
+ return func
+ if not inspect.isfunction(func):
+ raise TypeError('%s is not a function' % (repr(func),))
+ return UnboundMethod(cls, func)
+
+def ismethod(obj):
+ return inspect.ismethod(obj) or isinstance(obj, UnboundMethod)
+
+
+# Make a pseudo-bytes function that can be called without the encoding arg:
+if sys.version_info >= (3, 0):
+ def bytes_(s, encoding='utf8'):
+ if isinstance(s, bytes):
+ return s
+ return bytes(s, encoding)
+else:
+ def bytes_(s, encoding=None):
+ return str(s)
+
+
+if sys.version_info[:2] >= (2, 6):
+ def isgenerator(o):
+ if isinstance(o, UnboundMethod):
+ o = o._func
+ return inspect.isgeneratorfunction(o) or inspect.isgenerator(o)
+else:
+ try:
+ from compiler.consts import CO_GENERATOR
+ except ImportError:
+ # IronPython doesn't have a complier module
+ CO_GENERATOR=0x20
+
+ def isgenerator(func):
+ try:
+ return func.__code__.co_flags & CO_GENERATOR != 0
+ except AttributeError:
+ return False
+
+# Make a function to help check if an exception is derived from BaseException.
+# In Python 2.4, we just use Exception instead.
+if sys.version_info[:2] < (2, 5):
+ def is_base_exception(exc):
+ return isinstance(exc, Exception)
+else:
+ def is_base_exception(exc):
+ return isinstance(exc, BaseException)
+
+if sys.version_info[:2] < (3, 0):
+ def exc_to_unicode(ev, encoding='utf-8'):
+ if is_base_exception(ev):
+ if not hasattr(ev, '__unicode__'):
+ # 2.5-
+ if not hasattr(ev, 'message'):
+ # 2.4
+ msg = len(ev.args) and ev.args[0] or ''
+ else:
+ msg = ev.message
+ msg = force_unicode(msg, encoding=encoding)
+ clsname = force_unicode(ev.__class__.__name__,
+ encoding=encoding)
+ ev = '%s: %s' % (clsname, msg)
+ elif not isinstance(ev, str):
+ ev = repr(ev)
+
+ return force_unicode(ev, encoding=encoding)
+else:
+ def exc_to_unicode(ev, encoding='utf-8'):
+ return str(ev)
+
+def format_exception(exc_info, encoding='UTF-8'):
+ ec, ev, tb = exc_info
+
+ # Our exception object may have been turned into a string, and Python 3's
+ # traceback.format_exception() doesn't take kindly to that (it expects an
+ # actual exception object). So we work around it, by doing the work
+ # ourselves if ev is not an exception object.
+ if not is_base_exception(ev):
+ tb_data = force_unicode(
+ ''.join(traceback.format_tb(tb)),
+ encoding)
+ ev = exc_to_unicode(ev)
+ return tb_data + ev
+ else:
+ return force_unicode(
+ ''.join(traceback.format_exception(*exc_info)),
+ encoding)
diff --git a/scripts/external_libs/nose-1.3.4/python3/nose/result.py b/scripts/external_libs/nose-1.3.4/python3/nose/result.py
new file mode 100644
index 00000000..37aa5101
--- /dev/null
+++ b/scripts/external_libs/nose-1.3.4/python3/nose/result.py
@@ -0,0 +1,200 @@
+"""
+Test Result
+-----------
+
+Provides a TextTestResult that extends unittest's _TextTestResult to
+provide support for error classes (such as the builtin skip and
+deprecated classes), and hooks for plugins to take over or extend
+reporting.
+"""
+
+import logging
+try:
+ # 2.7+
+ from unittest.runner import _TextTestResult
+except ImportError:
+ from unittest import _TextTestResult
+from nose.config import Config
+from nose.util import isclass, ln as _ln # backwards compat
+
+log = logging.getLogger('nose.result')
+
+
+def _exception_detail(exc):
+ # this is what stdlib module traceback does
+ try:
+ return str(exc)
+ except:
+ return '<unprintable %s object>' % type(exc).__name__
+
+
+class TextTestResult(_TextTestResult):
+ """Text test result that extends unittest's default test result
+ support for a configurable set of errorClasses (eg, Skip,
+ Deprecated, TODO) that extend the errors/failures/success triad.
+ """
+ def __init__(self, stream, descriptions, verbosity, config=None,
+ errorClasses=None):
+ if errorClasses is None:
+ errorClasses = {}
+ self.errorClasses = errorClasses
+ if config is None:
+ config = Config()
+ self.config = config
+ _TextTestResult.__init__(self, stream, descriptions, verbosity)
+
+ def addSkip(self, test, reason):
+ # 2.7 skip compat
+ from nose.plugins.skip import SkipTest
+ if SkipTest in self.errorClasses:
+ storage, label, isfail = self.errorClasses[SkipTest]
+ storage.append((test, reason))
+ self.printLabel(label, (SkipTest, reason, None))
+
+ def addError(self, test, err):
+ """Overrides normal addError to add support for
+ errorClasses. If the exception is a registered class, the
+ error will be added to the list for that class, not errors.
+ """
+ ec, ev, tb = err
+ try:
+ exc_info = self._exc_info_to_string(err, test)
+ except TypeError:
+ # 2.3 compat
+ exc_info = self._exc_info_to_string(err)
+ for cls, (storage, label, isfail) in list(self.errorClasses.items()):
+ #if 'Skip' in cls.__name__ or 'Skip' in ec.__name__:
+ # from nose.tools import set_trace
+ # set_trace()
+ if isclass(ec) and issubclass(ec, cls):
+ if isfail:
+ test.passed = False
+ storage.append((test, exc_info))
+ self.printLabel(label, err)
+ return
+ self.errors.append((test, exc_info))
+ test.passed = False
+ self.printLabel('ERROR')
+
+ # override to bypass changes in 2.7
+ def getDescription(self, test):
+ if self.descriptions:
+ return test.shortDescription() or str(test)
+ else:
+ return str(test)
+
+ def printLabel(self, label, err=None):
+ # Might get patched into a streamless result
+ stream = getattr(self, 'stream', None)
+ if stream is not None:
+ if self.showAll:
+ message = [label]
+ if err:
+ detail = _exception_detail(err[1])
+ if detail:
+ message.append(detail)
+ stream.writeln(": ".join(message))
+ elif self.dots:
+ stream.write(label[:1])
+
+ def printErrors(self):
+ """Overrides to print all errorClasses errors as well.
+ """
+ _TextTestResult.printErrors(self)
+ for cls in list(self.errorClasses.keys()):
+ storage, label, isfail = self.errorClasses[cls]
+ if isfail:
+ self.printErrorList(label, storage)
+ # Might get patched into a result with no config
+ if hasattr(self, 'config'):
+ self.config.plugins.report(self.stream)
+
+ def printSummary(self, start, stop):
+ """Called by the test runner to print the final summary of test
+ run results.
+ """
+ write = self.stream.write
+ writeln = self.stream.writeln
+ taken = float(stop - start)
+ run = self.testsRun
+ plural = run != 1 and "s" or ""
+
+ writeln(self.separator2)
+ writeln("Ran %s test%s in %.3fs" % (run, plural, taken))
+ writeln()
+
+ summary = {}
+ eckeys = list(self.errorClasses.keys())
+ for cls in eckeys:
+ storage, label, isfail = self.errorClasses[cls]
+ count = len(storage)
+ if not count:
+ continue
+ summary[label] = count
+ if len(self.failures):
+ summary['failures'] = len(self.failures)
+ if len(self.errors):
+ summary['errors'] = len(self.errors)
+
+ if not self.wasSuccessful():
+ write("FAILED")
+ else:
+ write("OK")
+ items = list(summary.items())
+ if items:
+ items.sort()
+ write(" (")
+ write(", ".join(["%s=%s" % (label, count) for
+ label, count in items]))
+ writeln(")")
+ else:
+ writeln()
+
+ def wasSuccessful(self):
+ """Overrides to check that there are no errors in errorClasses
+ lists that are marked as errors and should cause a run to
+ fail.
+ """
+ if self.errors or self.failures:
+ return False
+ for cls in list(self.errorClasses.keys()):
+ storage, label, isfail = self.errorClasses[cls]
+ if not isfail:
+ continue
+ if storage:
+ return False
+ return True
+
+ def _addError(self, test, err):
+ try:
+ exc_info = self._exc_info_to_string(err, test)
+ except TypeError:
+ # 2.3: does not take test arg
+ exc_info = self._exc_info_to_string(err)
+ self.errors.append((test, exc_info))
+ if self.showAll:
+ self.stream.write('ERROR')
+ elif self.dots:
+ self.stream.write('E')
+
+ def _exc_info_to_string(self, err, test=None):
+ # 2.7 skip compat
+ from nose.plugins.skip import SkipTest
+ if isclass(err[0]) and issubclass(err[0], SkipTest):
+ return str(err[1])
+ # 2.3/2.4 -- 2.4 passes test, 2.3 does not
+ try:
+ return _TextTestResult._exc_info_to_string(self, err, test)
+ except TypeError:
+ # 2.3: does not take test arg
+ return _TextTestResult._exc_info_to_string(self, err)
+
+
+def ln(*arg, **kw):
+ from warnings import warn
+ warn("ln() has moved to nose.util from nose.result and will be removed "
+ "from nose.result in a future release. Please update your imports ",
+ DeprecationWarning)
+ return _ln(*arg, **kw)
+
+
diff --git a/scripts/external_libs/nose-1.3.4/python3/nose/selector.py b/scripts/external_libs/nose-1.3.4/python3/nose/selector.py
new file mode 100644
index 00000000..6b8190de
--- /dev/null
+++ b/scripts/external_libs/nose-1.3.4/python3/nose/selector.py
@@ -0,0 +1,247 @@
+"""
+Test Selection
+--------------
+
+Test selection is handled by a Selector. The test loader calls the
+appropriate selector method for each object it encounters that it
+thinks may be a test.
+"""
+import logging
+import os
+import unittest
+from nose.config import Config
+from nose.util import split_test_name, src, getfilename, getpackage, ispackage
+
+log = logging.getLogger(__name__)
+
+__all__ = ['Selector', 'defaultSelector', 'TestAddress']
+
+
+# for efficiency and easier mocking
+op_join = os.path.join
+op_basename = os.path.basename
+op_exists = os.path.exists
+op_splitext = os.path.splitext
+op_isabs = os.path.isabs
+op_abspath = os.path.abspath
+
+
+class Selector(object):
+ """Core test selector. Examines test candidates and determines whether,
+ given the specified configuration, the test candidate should be selected
+ as a test.
+ """
+ def __init__(self, config):
+ if config is None:
+ config = Config()
+ self.configure(config)
+
+ def configure(self, config):
+ self.config = config
+ self.exclude = config.exclude
+ self.ignoreFiles = config.ignoreFiles
+ self.include = config.include
+ self.plugins = config.plugins
+ self.match = config.testMatch
+
+ def matches(self, name):
+ """Does the name match my requirements?
+
+ To match, a name must match config.testMatch OR config.include
+ and it must not match config.exclude
+ """
+ return ((self.match.search(name)
+ or (self.include and
+ [_f for _f in [inc.search(name) for inc in self.include] if _f]))
+ and ((not self.exclude)
+ or not [_f for _f in [exc.search(name) for exc in self.exclude] if _f]
+ ))
+
+ def wantClass(self, cls):
+ """Is the class a wanted test class?
+
+ A class must be a unittest.TestCase subclass, or match test name
+ requirements. Classes that start with _ are always excluded.
+ """
+ declared = getattr(cls, '__test__', None)
+ if declared is not None:
+ wanted = declared
+ else:
+ wanted = (not cls.__name__.startswith('_')
+ and (issubclass(cls, unittest.TestCase)
+ or self.matches(cls.__name__)))
+
+ plug_wants = self.plugins.wantClass(cls)
+ if plug_wants is not None:
+ log.debug("Plugin setting selection of %s to %s", cls, plug_wants)
+ wanted = plug_wants
+ log.debug("wantClass %s? %s", cls, wanted)
+ return wanted
+
+ def wantDirectory(self, dirname):
+ """Is the directory a wanted test directory?
+
+ All package directories match, so long as they do not match exclude.
+ All other directories must match test requirements.
+ """
+ tail = op_basename(dirname)
+ if ispackage(dirname):
+ wanted = (not self.exclude
+ or not [_f for _f in [exc.search(tail) for exc in self.exclude] if _f])
+ else:
+ wanted = (self.matches(tail)
+ or (self.config.srcDirs
+ and tail in self.config.srcDirs))
+ plug_wants = self.plugins.wantDirectory(dirname)
+ if plug_wants is not None:
+ log.debug("Plugin setting selection of %s to %s",
+ dirname, plug_wants)
+ wanted = plug_wants
+ log.debug("wantDirectory %s? %s", dirname, wanted)
+ return wanted
+
+ def wantFile(self, file):
+ """Is the file a wanted test file?
+
+ The file must be a python source file and match testMatch or
+ include, and not match exclude. Files that match ignore are *never*
+ wanted, regardless of plugin, testMatch, include or exclude settings.
+ """
+ # never, ever load files that match anything in ignore
+ # (.* _* and *setup*.py by default)
+ base = op_basename(file)
+ ignore_matches = [ ignore_this for ignore_this in self.ignoreFiles
+ if ignore_this.search(base) ]
+ if ignore_matches:
+ log.debug('%s matches ignoreFiles pattern; skipped',
+ base)
+ return False
+ if not self.config.includeExe and os.access(file, os.X_OK):
+ log.info('%s is executable; skipped', file)
+ return False
+ dummy, ext = op_splitext(base)
+ pysrc = ext == '.py'
+
+ wanted = pysrc and self.matches(base)
+ plug_wants = self.plugins.wantFile(file)
+ if plug_wants is not None:
+ log.debug("plugin setting want %s to %s", file, plug_wants)
+ wanted = plug_wants
+ log.debug("wantFile %s? %s", file, wanted)
+ return wanted
+
+ def wantFunction(self, function):
+ """Is the function a test function?
+ """
+ try:
+ if hasattr(function, 'compat_func_name'):
+ funcname = function.compat_func_name
+ else:
+ funcname = function.__name__
+ except AttributeError:
+ # not a function
+ return False
+ declared = getattr(function, '__test__', None)
+ if declared is not None:
+ wanted = declared
+ else:
+ wanted = not funcname.startswith('_') and self.matches(funcname)
+ plug_wants = self.plugins.wantFunction(function)
+ if plug_wants is not None:
+ wanted = plug_wants
+ log.debug("wantFunction %s? %s", function, wanted)
+ return wanted
+
+ def wantMethod(self, method):
+ """Is the method a test method?
+ """
+ try:
+ method_name = method.__name__
+ except AttributeError:
+ # not a method
+ return False
+ if method_name.startswith('_'):
+ # never collect 'private' methods
+ return False
+ declared = getattr(method, '__test__', None)
+ if declared is not None:
+ wanted = declared
+ else:
+ wanted = self.matches(method_name)
+ plug_wants = self.plugins.wantMethod(method)
+ if plug_wants is not None:
+ wanted = plug_wants
+ log.debug("wantMethod %s? %s", method, wanted)
+ return wanted
+
+ def wantModule(self, module):
+ """Is the module a test module?
+
+ The tail of the module name must match test requirements. One exception:
+ we always want __main__.
+ """
+ declared = getattr(module, '__test__', None)
+ if declared is not None:
+ wanted = declared
+ else:
+ wanted = self.matches(module.__name__.split('.')[-1]) \
+ or module.__name__ == '__main__'
+ plug_wants = self.plugins.wantModule(module)
+ if plug_wants is not None:
+ wanted = plug_wants
+ log.debug("wantModule %s? %s", module, wanted)
+ return wanted
+
+defaultSelector = Selector
+
+
+class TestAddress(object):
+ """A test address represents a user's request to run a particular
+ test. The user may specify a filename or module (or neither),
+ and/or a callable (a class, function, or method). The naming
+ format for test addresses is:
+
+ filename_or_module:callable
+
+ Filenames that are not absolute will be made absolute relative to
+ the working dir.
+
+ The filename or module part will be considered a module name if it
+ doesn't look like a file, that is, if it doesn't exist on the file
+ system and it doesn't contain any directory separators and it
+ doesn't end in .py.
+
+ Callables may be a class name, function name, method name, or
+ class.method specification.
+ """
+ def __init__(self, name, workingDir=None):
+ if workingDir is None:
+ workingDir = os.getcwd()
+ self.name = name
+ self.workingDir = workingDir
+ self.filename, self.module, self.call = split_test_name(name)
+ log.debug('Test name %s resolved to file %s, module %s, call %s',
+ name, self.filename, self.module, self.call)
+ if self.filename is None:
+ if self.module is not None:
+ self.filename = getfilename(self.module, self.workingDir)
+ if self.filename:
+ self.filename = src(self.filename)
+ if not op_isabs(self.filename):
+ self.filename = op_abspath(op_join(workingDir,
+ self.filename))
+ if self.module is None:
+ self.module = getpackage(self.filename)
+ log.debug(
+ 'Final resolution of test name %s: file %s module %s call %s',
+ name, self.filename, self.module, self.call)
+
+ def totuple(self):
+ return (self.filename, self.module, self.call)
+
+ def __str__(self):
+ return self.name
+
+ def __repr__(self):
+ return "%s: (%s, %s, %s)" % (self.name, self.filename,
+ self.module, self.call)
diff --git a/scripts/external_libs/nose-1.3.4/python3/nose/sphinx/__init__.py b/scripts/external_libs/nose-1.3.4/python3/nose/sphinx/__init__.py
new file mode 100644
index 00000000..2ae28399
--- /dev/null
+++ b/scripts/external_libs/nose-1.3.4/python3/nose/sphinx/__init__.py
@@ -0,0 +1 @@
+pass
diff --git a/scripts/external_libs/nose-1.3.4/python3/nose/sphinx/pluginopts.py b/scripts/external_libs/nose-1.3.4/python3/nose/sphinx/pluginopts.py
new file mode 100644
index 00000000..d2b284ab
--- /dev/null
+++ b/scripts/external_libs/nose-1.3.4/python3/nose/sphinx/pluginopts.py
@@ -0,0 +1,189 @@
+"""
+Adds a sphinx directive that can be used to automatically document a plugin.
+
+this::
+
+ .. autoplugin :: nose.plugins.foo
+ :plugin: Pluggy
+
+produces::
+
+ .. automodule :: nose.plugins.foo
+
+ Options
+ -------
+
+ .. cmdoption :: --foo=BAR, --fooble=BAR
+
+ Do the foo thing to the new thing.
+
+ Plugin
+ ------
+
+ .. autoclass :: nose.plugins.foo.Pluggy
+ :members:
+
+ Source
+ ------
+
+ .. include :: path/to/nose/plugins/foo.py
+ :literal:
+
+"""
+import os
+try:
+ from docutils import nodes, utils
+ from docutils.statemachine import ViewList
+ from docutils.parsers.rst import directives
+except ImportError:
+ pass # won't run anyway
+
+from nose.util import resolve_name
+from nose.plugins.base import Plugin
+from nose.plugins.manager import BuiltinPluginManager
+from nose.config import Config
+from nose.core import TestProgram
+from inspect import isclass
+
+
+def autoplugin_directive(dirname, arguments, options, content, lineno,
+ content_offset, block_text, state, state_machine):
+ mod_name = arguments[0]
+ mod = resolve_name(mod_name)
+ plug_name = options.get('plugin', None)
+ if plug_name:
+ obj = getattr(mod, plug_name)
+ else:
+ for entry in dir(mod):
+ obj = getattr(mod, entry)
+ if isclass(obj) and issubclass(obj, Plugin) and obj is not Plugin:
+ plug_name = '%s.%s' % (mod_name, entry)
+ break
+
+ # mod docstring
+ rst = ViewList()
+ rst.append('.. automodule :: %s\n' % mod_name, '<autodoc>')
+ rst.append('', '<autodoc>')
+
+ # options
+ rst.append('Options', '<autodoc>')
+ rst.append('-------', '<autodoc>')
+ rst.append('', '<autodoc>')
+
+ plug = obj()
+ opts = OptBucket()
+ plug.options(opts, {})
+ for opt in opts:
+ rst.append(opt.options(), '<autodoc>')
+ rst.append(' \n', '<autodoc>')
+ rst.append(' ' + opt.help + '\n', '<autodoc>')
+ rst.append('\n', '<autodoc>')
+
+ # plugin class
+ rst.append('Plugin', '<autodoc>')
+ rst.append('------', '<autodoc>')
+ rst.append('', '<autodoc>')
+
+ rst.append('.. autoclass :: %s\n' % plug_name, '<autodoc>')
+ rst.append(' :members:\n', '<autodoc>')
+ rst.append(' :show-inheritance:\n', '<autodoc>')
+ rst.append('', '<autodoc>')
+
+ # source
+ rst.append('Source', '<autodoc>')
+ rst.append('------', '<autodoc>')
+ rst.append(
+ '.. include :: %s\n' % utils.relative_path(
+ state_machine.document['source'],
+ os.path.abspath(mod.__file__.replace('.pyc', '.py'))),
+ '<autodoc>')
+ rst.append(' :literal:\n', '<autodoc>')
+ rst.append('', '<autodoc>')
+
+ node = nodes.section()
+ node.document = state.document
+ surrounding_title_styles = state.memo.title_styles
+ surrounding_section_level = state.memo.section_level
+ state.memo.title_styles = []
+ state.memo.section_level = 0
+ state.nested_parse(rst, 0, node, match_titles=1)
+ state.memo.title_styles = surrounding_title_styles
+ state.memo.section_level = surrounding_section_level
+
+ return node.children
+
+
+def autohelp_directive(dirname, arguments, options, content, lineno,
+ content_offset, block_text, state, state_machine):
+ """produces rst from nose help"""
+ config = Config(parserClass=OptBucket,
+ plugins=BuiltinPluginManager())
+ parser = config.getParser(TestProgram.usage())
+ rst = ViewList()
+ for line in parser.format_help().split('\n'):
+ rst.append(line, '<autodoc>')
+
+ rst.append('Options', '<autodoc>')
+ rst.append('-------', '<autodoc>')
+ rst.append('', '<autodoc>')
+ for opt in parser:
+ rst.append(opt.options(), '<autodoc>')
+ rst.append(' \n', '<autodoc>')
+ rst.append(' ' + opt.help + '\n', '<autodoc>')
+ rst.append('\n', '<autodoc>')
+ node = nodes.section()
+ node.document = state.document
+ surrounding_title_styles = state.memo.title_styles
+ surrounding_section_level = state.memo.section_level
+ state.memo.title_styles = []
+ state.memo.section_level = 0
+ state.nested_parse(rst, 0, node, match_titles=1)
+ state.memo.title_styles = surrounding_title_styles
+ state.memo.section_level = surrounding_section_level
+
+ return node.children
+
+
+class OptBucket(object):
+ def __init__(self, doc=None, prog='nosetests'):
+ self.opts = []
+ self.doc = doc
+ self.prog = prog
+
+ def __iter__(self):
+ return iter(self.opts)
+
+ def format_help(self):
+ return self.doc.replace('%prog', self.prog).replace(':\n', '::\n')
+
+ def add_option(self, *arg, **kw):
+ self.opts.append(Opt(*arg, **kw))
+
+
+class Opt(object):
+ def __init__(self, *arg, **kw):
+ self.opts = arg
+ self.action = kw.pop('action', None)
+ self.default = kw.pop('default', None)
+ self.metavar = kw.pop('metavar', None)
+ self.help = kw.pop('help', None)
+
+ def options(self):
+ buf = []
+ for optstring in self.opts:
+ desc = optstring
+ if self.action not in ('store_true', 'store_false'):
+ desc += '=%s' % self.meta(optstring)
+ buf.append(desc)
+ return '.. cmdoption :: ' + ', '.join(buf)
+
+ def meta(self, optstring):
+ # FIXME optparser default metavar?
+ return self.metavar or 'DEFAULT'
+
+
+def setup(app):
+ app.add_directive('autoplugin',
+ autoplugin_directive, 1, (1, 0, 1),
+ plugin=directives.unchanged)
+ app.add_directive('autohelp', autohelp_directive, 0, (0, 0, 1))
diff --git a/scripts/external_libs/nose-1.3.4/python3/nose/suite.py b/scripts/external_libs/nose-1.3.4/python3/nose/suite.py
new file mode 100644
index 00000000..7a79217c
--- /dev/null
+++ b/scripts/external_libs/nose-1.3.4/python3/nose/suite.py
@@ -0,0 +1,610 @@
+"""
+Test Suites
+-----------
+
+Provides a LazySuite, which is a suite whose test list is a generator
+function, and ContextSuite,which can run fixtures (setup/teardown
+functions or methods) for the context that contains its tests.
+
+"""
+
+
+import logging
+import sys
+import unittest
+from nose.case import Test
+from nose.config import Config
+from nose.proxy import ResultProxyFactory
+from nose.util import isclass, resolve_name, try_run
+import collections
+
+if sys.platform == 'cli':
+ if sys.version_info[:2] < (2, 6):
+ import clr
+ clr.AddReference("IronPython")
+ from IronPython.Runtime.Exceptions import StringException
+ else:
+ class StringException(Exception):
+ pass
+
+log = logging.getLogger(__name__)
+#log.setLevel(logging.DEBUG)
+
+# Singleton for default value -- see ContextSuite.__init__ below
+_def = object()
+
+
+def _strclass(cls):
+ return "%s.%s" % (cls.__module__, cls.__name__)
+
+class MixedContextError(Exception):
+ """Error raised when a context suite sees tests from more than
+ one context.
+ """
+ pass
+
+
+class LazySuite(unittest.TestSuite):
+ """A suite that may use a generator as its list of tests
+ """
+ def __init__(self, tests=()):
+ """Initialize the suite. tests may be an iterable or a generator
+ """
+ super(LazySuite, self).__init__()
+ self._set_tests(tests)
+
+ def __iter__(self):
+ return iter(self._tests)
+
+ def __repr__(self):
+ return "<%s tests=generator (%s)>" % (
+ _strclass(self.__class__), id(self))
+
+ def __hash__(self):
+ return object.__hash__(self)
+
+ __str__ = __repr__
+
+ def addTest(self, test):
+ self._precache.append(test)
+
+ # added to bypass run changes in 2.7's unittest
+ def run(self, result):
+ for test in self._tests:
+ if result.shouldStop:
+ break
+ test(result)
+ return result
+
+ def __bool__(self):
+ log.debug("tests in %s?", id(self))
+ if self._precache:
+ return True
+ if self.test_generator is None:
+ return False
+ try:
+ test = next(self.test_generator)
+ if test is not None:
+ self._precache.append(test)
+ return True
+ except StopIteration:
+ pass
+ return False
+
+ def _get_tests(self):
+ log.debug("precache is %s", self._precache)
+ for test in self._precache:
+ yield test
+ if self.test_generator is None:
+ return
+ for test in self.test_generator:
+ yield test
+
+ def _set_tests(self, tests):
+ self._precache = []
+ is_suite = isinstance(tests, unittest.TestSuite)
+ if isinstance(tests, collections.Callable) and not is_suite:
+ self.test_generator = tests()
+ elif is_suite:
+ # Suites need special treatment: they must be called like
+ # tests for their setup/teardown to run (if any)
+ self.addTests([tests])
+ self.test_generator = None
+ else:
+ self.addTests(tests)
+ self.test_generator = None
+
+ _tests = property(_get_tests, _set_tests, None,
+ "Access the tests in this suite. Access is through a "
+ "generator, so iteration may not be repeatable.")
+
+
+class ContextSuite(LazySuite):
+ """A suite with context.
+
+ A ContextSuite executes fixtures (setup and teardown functions or
+ methods) for the context containing its tests.
+
+ The context may be explicitly passed. If it is not, a context (or
+ nested set of contexts) will be constructed by examining the tests
+ in the suite.
+ """
+ failureException = unittest.TestCase.failureException
+ was_setup = False
+ was_torndown = False
+ classSetup = ('setup_class', 'setup_all', 'setupClass', 'setupAll',
+ 'setUpClass', 'setUpAll')
+ classTeardown = ('teardown_class', 'teardown_all', 'teardownClass',
+ 'teardownAll', 'tearDownClass', 'tearDownAll')
+ moduleSetup = ('setup_module', 'setupModule', 'setUpModule', 'setup',
+ 'setUp')
+ moduleTeardown = ('teardown_module', 'teardownModule', 'tearDownModule',
+ 'teardown', 'tearDown')
+ packageSetup = ('setup_package', 'setupPackage', 'setUpPackage')
+ packageTeardown = ('teardown_package', 'teardownPackage',
+ 'tearDownPackage')
+
+ def __init__(self, tests=(), context=None, factory=None,
+ config=None, resultProxy=None, can_split=True):
+ log.debug("Context suite for %s (%s) (%s)", tests, context, id(self))
+ self.context = context
+ self.factory = factory
+ if config is None:
+ config = Config()
+ self.config = config
+ self.resultProxy = resultProxy
+ self.has_run = False
+ self.can_split = can_split
+ self.error_context = None
+ super(ContextSuite, self).__init__(tests)
+
+ def __repr__(self):
+ return "<%s context=%s>" % (
+ _strclass(self.__class__),
+ getattr(self.context, '__name__', self.context))
+ __str__ = __repr__
+
+ def id(self):
+ if self.error_context:
+ return '%s:%s' % (repr(self), self.error_context)
+ else:
+ return repr(self)
+
+ def __hash__(self):
+ return object.__hash__(self)
+
+ # 2.3 compat -- force 2.4 call sequence
+ def __call__(self, *arg, **kw):
+ return self.run(*arg, **kw)
+
+ def exc_info(self):
+ """Hook for replacing error tuple output
+ """
+ return sys.exc_info()
+
+ def _exc_info(self):
+ """Bottleneck to fix up IronPython string exceptions
+ """
+ e = self.exc_info()
+ if sys.platform == 'cli':
+ if isinstance(e[0], StringException):
+ # IronPython throws these StringExceptions, but
+ # traceback checks type(etype) == str. Make a real
+ # string here.
+ e = (str(e[0]), e[1], e[2])
+
+ return e
+
+ def run(self, result):
+ """Run tests in suite inside of suite fixtures.
+ """
+ # proxy the result for myself
+ log.debug("suite %s (%s) run called, tests: %s", id(self), self, self._tests)
+ #import pdb
+ #pdb.set_trace()
+ if self.resultProxy:
+ result, orig = self.resultProxy(result, self), result
+ else:
+ result, orig = result, result
+ try:
+ self.setUp()
+ except KeyboardInterrupt:
+ raise
+ except:
+ self.error_context = 'setup'
+ result.addError(self, self._exc_info())
+ return
+ try:
+ for test in self._tests:
+ if result.shouldStop:
+ log.debug("stopping")
+ break
+ # each nose.case.Test will create its own result proxy
+ # so the cases need the original result, to avoid proxy
+ # chains
+ test(orig)
+ finally:
+ self.has_run = True
+ try:
+ self.tearDown()
+ except KeyboardInterrupt:
+ raise
+ except:
+ self.error_context = 'teardown'
+ result.addError(self, self._exc_info())
+
+ def hasFixtures(self, ctx_callback=None):
+ context = self.context
+ if context is None:
+ return False
+ if self.implementsAnyFixture(context, ctx_callback=ctx_callback):
+ return True
+ # My context doesn't have any, but its ancestors might
+ factory = self.factory
+ if factory:
+ ancestors = factory.context.get(self, [])
+ for ancestor in ancestors:
+ if self.implementsAnyFixture(
+ ancestor, ctx_callback=ctx_callback):
+ return True
+ return False
+
+ def implementsAnyFixture(self, context, ctx_callback):
+ if isclass(context):
+ names = self.classSetup + self.classTeardown
+ else:
+ names = self.moduleSetup + self.moduleTeardown
+ if hasattr(context, '__path__'):
+ names += self.packageSetup + self.packageTeardown
+ # If my context has any fixture attribute, I have fixtures
+ fixt = False
+ for m in names:
+ if hasattr(context, m):
+ fixt = True
+ break
+ if ctx_callback is None:
+ return fixt
+ return ctx_callback(context, fixt)
+
+ def setUp(self):
+ log.debug("suite %s setUp called, tests: %s", id(self), self._tests)
+ if not self:
+ # I have no tests
+ log.debug("suite %s has no tests", id(self))
+ return
+ if self.was_setup:
+ log.debug("suite %s already set up", id(self))
+ return
+ context = self.context
+ if context is None:
+ return
+ # before running my own context's setup, I need to
+ # ask the factory if my context's contexts' setups have been run
+ factory = self.factory
+ if factory:
+ # get a copy, since we'll be destroying it as we go
+ ancestors = factory.context.get(self, [])[:]
+ while ancestors:
+ ancestor = ancestors.pop()
+ log.debug("ancestor %s may need setup", ancestor)
+ if ancestor in factory.was_setup:
+ continue
+ log.debug("ancestor %s does need setup", ancestor)
+ self.setupContext(ancestor)
+ if not context in factory.was_setup:
+ self.setupContext(context)
+ else:
+ self.setupContext(context)
+ self.was_setup = True
+ log.debug("completed suite setup")
+
+ def setupContext(self, context):
+ self.config.plugins.startContext(context)
+ log.debug("%s setup context %s", self, context)
+ if self.factory:
+ if context in self.factory.was_setup:
+ return
+ # note that I ran the setup for this context, so that I'll run
+ # the teardown in my teardown
+ self.factory.was_setup[context] = self
+ if isclass(context):
+ names = self.classSetup
+ else:
+ names = self.moduleSetup
+ if hasattr(context, '__path__'):
+ names = self.packageSetup + names
+ try_run(context, names)
+
+ def shortDescription(self):
+ if self.context is None:
+ return "test suite"
+ return "test suite for %s" % self.context
+
+ def tearDown(self):
+ log.debug('context teardown')
+ if not self.was_setup or self.was_torndown:
+ log.debug(
+ "No reason to teardown (was_setup? %s was_torndown? %s)"
+ % (self.was_setup, self.was_torndown))
+ return
+ self.was_torndown = True
+ context = self.context
+ if context is None:
+ log.debug("No context to tear down")
+ return
+
+ # for each ancestor... if the ancestor was setup
+ # and I did the setup, I can do teardown
+ factory = self.factory
+ if factory:
+ ancestors = factory.context.get(self, []) + [context]
+ for ancestor in ancestors:
+ log.debug('ancestor %s may need teardown', ancestor)
+ if not ancestor in factory.was_setup:
+ log.debug('ancestor %s was not setup', ancestor)
+ continue
+ if ancestor in factory.was_torndown:
+ log.debug('ancestor %s already torn down', ancestor)
+ continue
+ setup = factory.was_setup[ancestor]
+ log.debug("%s setup ancestor %s", setup, ancestor)
+ if setup is self:
+ self.teardownContext(ancestor)
+ else:
+ self.teardownContext(context)
+
+ def teardownContext(self, context):
+ log.debug("%s teardown context %s", self, context)
+ if self.factory:
+ if context in self.factory.was_torndown:
+ return
+ self.factory.was_torndown[context] = self
+ if isclass(context):
+ names = self.classTeardown
+ else:
+ names = self.moduleTeardown
+ if hasattr(context, '__path__'):
+ names = self.packageTeardown + names
+ try_run(context, names)
+ self.config.plugins.stopContext(context)
+
+ # FIXME the wrapping has to move to the factory?
+ def _get_wrapped_tests(self):
+ for test in self._get_tests():
+ if isinstance(test, Test) or isinstance(test, unittest.TestSuite):
+ yield test
+ else:
+ yield Test(test,
+ config=self.config,
+ resultProxy=self.resultProxy)
+
+ _tests = property(_get_wrapped_tests, LazySuite._set_tests, None,
+ "Access the tests in this suite. Tests are returned "
+ "inside of a context wrapper.")
+
+
+class ContextSuiteFactory(object):
+ """Factory for ContextSuites. Called with a collection of tests,
+ the factory decides on a hierarchy of contexts by introspecting
+ the collection or the tests themselves to find the objects
+ containing the test objects. It always returns one suite, but that
+ suite may consist of a hierarchy of nested suites.
+ """
+ suiteClass = ContextSuite
+ def __init__(self, config=None, suiteClass=None, resultProxy=_def):
+ if config is None:
+ config = Config()
+ self.config = config
+ if suiteClass is not None:
+ self.suiteClass = suiteClass
+ # Using a singleton to represent default instead of None allows
+ # passing resultProxy=None to turn proxying off.
+ if resultProxy is _def:
+ resultProxy = ResultProxyFactory(config=config)
+ self.resultProxy = resultProxy
+ self.suites = {}
+ self.context = {}
+ self.was_setup = {}
+ self.was_torndown = {}
+
+ def __call__(self, tests, **kw):
+ """Return ``ContextSuite`` for tests. ``tests`` may either
+ be a callable (in which case the resulting ContextSuite will
+ have no parent context and be evaluated lazily) or an
+ iterable. In that case the tests will wrapped in
+ nose.case.Test, be examined and the context of each found and a
+ suite of suites returned, organized into a stack with the
+ outermost suites belonging to the outermost contexts.
+ """
+ log.debug("Create suite for %s", tests)
+ context = kw.pop('context', getattr(tests, 'context', None))
+ log.debug("tests %s context %s", tests, context)
+ if context is None:
+ tests = self.wrapTests(tests)
+ try:
+ context = self.findContext(tests)
+ except MixedContextError:
+ return self.makeSuite(self.mixedSuites(tests), None, **kw)
+ return self.makeSuite(tests, context, **kw)
+
+ def ancestry(self, context):
+ """Return the ancestry of the context (that is, all of the
+ packages and modules containing the context), in order of
+ descent with the outermost ancestor last.
+ This method is a generator.
+ """
+ log.debug("get ancestry %s", context)
+ if context is None:
+ return
+ # Methods include reference to module they are defined in, we
+ # don't want that, instead want the module the class is in now
+ # (classes are re-ancestored elsewhere).
+ if hasattr(context, 'im_class'):
+ context = context.__self__.__class__
+ elif hasattr(context, '__self__'):
+ context = context.__self__.__class__
+ if hasattr(context, '__module__'):
+ ancestors = context.__module__.split('.')
+ elif hasattr(context, '__name__'):
+ ancestors = context.__name__.split('.')[:-1]
+ else:
+ raise TypeError("%s has no ancestors?" % context)
+ while ancestors:
+ log.debug(" %s ancestors %s", context, ancestors)
+ yield resolve_name('.'.join(ancestors))
+ ancestors.pop()
+
+ def findContext(self, tests):
+ if isinstance(tests, collections.Callable) or isinstance(tests, unittest.TestSuite):
+ return None
+ context = None
+ for test in tests:
+ # Don't look at suites for contexts, only tests
+ ctx = getattr(test, 'context', None)
+ if ctx is None:
+ continue
+ if context is None:
+ context = ctx
+ elif context != ctx:
+ raise MixedContextError(
+ "Tests with different contexts in same suite! %s != %s"
+ % (context, ctx))
+ return context
+
+ def makeSuite(self, tests, context, **kw):
+ suite = self.suiteClass(
+ tests, context=context, config=self.config, factory=self,
+ resultProxy=self.resultProxy, **kw)
+ if context is not None:
+ self.suites.setdefault(context, []).append(suite)
+ self.context.setdefault(suite, []).append(context)
+ log.debug("suite %s has context %s", suite,
+ getattr(context, '__name__', None))
+ for ancestor in self.ancestry(context):
+ self.suites.setdefault(ancestor, []).append(suite)
+ self.context[suite].append(ancestor)
+ log.debug("suite %s has ancestor %s", suite, ancestor.__name__)
+ return suite
+
+ def mixedSuites(self, tests):
+ """The complex case where there are tests that don't all share
+ the same context. Groups tests into suites with common ancestors,
+ according to the following (essentially tail-recursive) procedure:
+
+ Starting with the context of the first test, if it is not
+ None, look for tests in the remaining tests that share that
+ ancestor. If any are found, group into a suite with that
+ ancestor as the context, and replace the current suite with
+ that suite. Continue this process for each ancestor of the
+ first test, until all ancestors have been processed. At this
+ point if any tests remain, recurse with those tests as the
+ input, returning a list of the common suite (which may be the
+ suite or test we started with, if no common tests were found)
+ plus the results of recursion.
+ """
+ if not tests:
+ return []
+ head = tests.pop(0)
+ if not tests:
+ return [head] # short circuit when none are left to combine
+ suite = head # the common ancestry suite, so far
+ tail = tests[:]
+ context = getattr(head, 'context', None)
+ if context is not None:
+ ancestors = [context] + [a for a in self.ancestry(context)]
+ for ancestor in ancestors:
+ common = [suite] # tests with ancestor in common, so far
+ remain = [] # tests that remain to be processed
+ for test in tail:
+ found_common = False
+ test_ctx = getattr(test, 'context', None)
+ if test_ctx is None:
+ remain.append(test)
+ continue
+ if test_ctx is ancestor:
+ common.append(test)
+ continue
+ for test_ancestor in self.ancestry(test_ctx):
+ if test_ancestor is ancestor:
+ common.append(test)
+ found_common = True
+ break
+ if not found_common:
+ remain.append(test)
+ if common:
+ suite = self.makeSuite(common, ancestor)
+ tail = self.mixedSuites(remain)
+ return [suite] + tail
+
+ def wrapTests(self, tests):
+ log.debug("wrap %s", tests)
+ if isinstance(tests, collections.Callable) or isinstance(tests, unittest.TestSuite):
+ log.debug("I won't wrap")
+ return tests
+ wrapped = []
+ for test in tests:
+ log.debug("wrapping %s", test)
+ if isinstance(test, Test) or isinstance(test, unittest.TestSuite):
+ wrapped.append(test)
+ elif isinstance(test, ContextList):
+ wrapped.append(self.makeSuite(test, context=test.context))
+ else:
+ wrapped.append(
+ Test(test, config=self.config, resultProxy=self.resultProxy)
+ )
+ return wrapped
+
+
+class ContextList(object):
+ """Not quite a suite -- a group of tests in a context. This is used
+ to hint the ContextSuiteFactory about what context the tests
+ belong to, in cases where it may be ambiguous or missing.
+ """
+ def __init__(self, tests, context=None):
+ self.tests = tests
+ self.context = context
+
+ def __iter__(self):
+ return iter(self.tests)
+
+
+class FinalizingSuiteWrapper(unittest.TestSuite):
+ """Wraps suite and calls final function after suite has
+ executed. Used to call final functions in cases (like running in
+ the standard test runner) where test running is not under nose's
+ control.
+ """
+ def __init__(self, suite, finalize):
+ super(FinalizingSuiteWrapper, self).__init__()
+ self.suite = suite
+ self.finalize = finalize
+
+ def __call__(self, *arg, **kw):
+ return self.run(*arg, **kw)
+
+ # 2.7 compat
+ def __iter__(self):
+ return iter(self.suite)
+
+ def run(self, *arg, **kw):
+ try:
+ return self.suite(*arg, **kw)
+ finally:
+ self.finalize(*arg, **kw)
+
+
+# backwards compat -- sort of
+class TestDir:
+ def __init__(*arg, **kw):
+ raise NotImplementedError(
+ "TestDir is not usable with nose 0.10. The class is present "
+ "in nose.suite for backwards compatibility purposes but it "
+ "may not be used.")
+
+
+class TestModule:
+ def __init__(*arg, **kw):
+ raise NotImplementedError(
+ "TestModule is not usable with nose 0.10. The class is present "
+ "in nose.suite for backwards compatibility purposes but it "
+ "may not be used.")
diff --git a/scripts/external_libs/nose-1.3.4/python3/nose/tools/__init__.py b/scripts/external_libs/nose-1.3.4/python3/nose/tools/__init__.py
new file mode 100644
index 00000000..74dab16a
--- /dev/null
+++ b/scripts/external_libs/nose-1.3.4/python3/nose/tools/__init__.py
@@ -0,0 +1,15 @@
+"""
+Tools for testing
+-----------------
+
+nose.tools provides a few convenience functions to make writing tests
+easier. You don't have to use them; nothing in the rest of nose depends
+on any of these methods.
+
+"""
+from nose.tools.nontrivial import *
+from nose.tools.nontrivial import __all__ as nontrivial_all
+from nose.tools.trivial import *
+from nose.tools.trivial import __all__ as trivial_all
+
+__all__ = trivial_all + nontrivial_all
diff --git a/scripts/external_libs/nose-1.3.4/python3/nose/tools/nontrivial.py b/scripts/external_libs/nose-1.3.4/python3/nose/tools/nontrivial.py
new file mode 100644
index 00000000..62e221ee
--- /dev/null
+++ b/scripts/external_libs/nose-1.3.4/python3/nose/tools/nontrivial.py
@@ -0,0 +1,151 @@
+"""Tools not exempt from being descended into in tracebacks"""
+
+import time
+
+
+__all__ = ['make_decorator', 'raises', 'set_trace', 'timed', 'with_setup',
+ 'TimeExpired', 'istest', 'nottest']
+
+
+class TimeExpired(AssertionError):
+ pass
+
+
+def make_decorator(func):
+ """
+ Wraps a test decorator so as to properly replicate metadata
+ of the decorated function, including nose's additional stuff
+ (namely, setup and teardown).
+ """
+ def decorate(newfunc):
+ if hasattr(func, 'compat_func_name'):
+ name = func.compat_func_name
+ else:
+ name = func.__name__
+ newfunc.__dict__ = func.__dict__
+ newfunc.__doc__ = func.__doc__
+ newfunc.__module__ = func.__module__
+ if not hasattr(newfunc, 'compat_co_firstlineno'):
+ newfunc.compat_co_firstlineno = func.__code__.co_firstlineno
+ try:
+ newfunc.__name__ = name
+ except TypeError:
+ # can't set func name in 2.3
+ newfunc.compat_func_name = name
+ return newfunc
+ return decorate
+
+
+def raises(*exceptions):
+ """Test must raise one of expected exceptions to pass.
+
+ Example use::
+
+ @raises(TypeError, ValueError)
+ def test_raises_type_error():
+ raise TypeError("This test passes")
+
+ @raises(Exception)
+ def test_that_fails_by_passing():
+ pass
+
+ If you want to test many assertions about exceptions in a single test,
+ you may want to use `assert_raises` instead.
+ """
+ valid = ' or '.join([e.__name__ for e in exceptions])
+ def decorate(func):
+ name = func.__name__
+ def newfunc(*arg, **kw):
+ try:
+ func(*arg, **kw)
+ except exceptions:
+ pass
+ except:
+ raise
+ else:
+ message = "%s() did not raise %s" % (name, valid)
+ raise AssertionError(message)
+ newfunc = make_decorator(func)(newfunc)
+ return newfunc
+ return decorate
+
+
+def set_trace():
+ """Call pdb.set_trace in the calling frame, first restoring
+ sys.stdout to the real output stream. Note that sys.stdout is NOT
+ reset to whatever it was before the call once pdb is done!
+ """
+ import pdb
+ import sys
+ stdout = sys.stdout
+ sys.stdout = sys.__stdout__
+ pdb.Pdb().set_trace(sys._getframe().f_back)
+
+
+def timed(limit):
+ """Test must finish within specified time limit to pass.
+
+ Example use::
+
+ @timed(.1)
+ def test_that_fails():
+ time.sleep(.2)
+ """
+ def decorate(func):
+ def newfunc(*arg, **kw):
+ start = time.time()
+ result = func(*arg, **kw)
+ end = time.time()
+ if end - start > limit:
+ raise TimeExpired("Time limit (%s) exceeded" % limit)
+ return result
+ newfunc = make_decorator(func)(newfunc)
+ return newfunc
+ return decorate
+
+
+def with_setup(setup=None, teardown=None):
+ """Decorator to add setup and/or teardown methods to a test function::
+
+ @with_setup(setup, teardown)
+ def test_something():
+ " ... "
+
+ Note that `with_setup` is useful *only* for test functions, not for test
+ methods or inside of TestCase subclasses.
+ """
+ def decorate(func, setup=setup, teardown=teardown):
+ if setup:
+ if hasattr(func, 'setup'):
+ _old_s = func.setup
+ def _s():
+ setup()
+ _old_s()
+ func.setup = _s
+ else:
+ func.setup = setup
+ if teardown:
+ if hasattr(func, 'teardown'):
+ _old_t = func.teardown
+ def _t():
+ _old_t()
+ teardown()
+ func.teardown = _t
+ else:
+ func.teardown = teardown
+ return func
+ return decorate
+
+
+def istest(func):
+ """Decorator to mark a function or method as a test
+ """
+ func.__test__ = True
+ return func
+
+
+def nottest(func):
+ """Decorator to mark a function or method as *not* a test
+ """
+ func.__test__ = False
+ return func
diff --git a/scripts/external_libs/nose-1.3.4/python3/nose/tools/trivial.py b/scripts/external_libs/nose-1.3.4/python3/nose/tools/trivial.py
new file mode 100644
index 00000000..cf83efed
--- /dev/null
+++ b/scripts/external_libs/nose-1.3.4/python3/nose/tools/trivial.py
@@ -0,0 +1,54 @@
+"""Tools so trivial that tracebacks should not descend into them
+
+We define the ``__unittest`` symbol in their module namespace so unittest will
+skip them when printing tracebacks, just as it does for their corresponding
+methods in ``unittest`` proper.
+
+"""
+import re
+import unittest
+
+
+__all__ = ['ok_', 'eq_']
+
+# Use the same flag as unittest itself to prevent descent into these functions:
+__unittest = 1
+
+
+def ok_(expr, msg=None):
+ """Shorthand for assert. Saves 3 whole characters!
+ """
+ if not expr:
+ raise AssertionError(msg)
+
+
+def eq_(a, b, msg=None):
+ """Shorthand for 'assert a == b, "%r != %r" % (a, b)
+ """
+ if not a == b:
+ raise AssertionError(msg or "%r != %r" % (a, b))
+
+
+#
+# Expose assert* from unittest.TestCase
+# - give them pep8 style names
+#
+caps = re.compile('([A-Z])')
+
+def pep8(name):
+ return caps.sub(lambda m: '_' + m.groups()[0].lower(), name)
+
+class Dummy(unittest.TestCase):
+ def nop():
+ pass
+_t = Dummy('nop')
+
+for at in [ at for at in dir(_t)
+ if at.startswith('assert') and not '_' in at ]:
+ pepd = pep8(at)
+ vars()[pepd] = getattr(_t, at)
+ __all__.append(pepd)
+
+del Dummy
+del _t
+del pep8
diff --git a/scripts/external_libs/nose-1.3.4/python3/nose/twistedtools.py b/scripts/external_libs/nose-1.3.4/python3/nose/twistedtools.py
new file mode 100644
index 00000000..9239307e
--- /dev/null
+++ b/scripts/external_libs/nose-1.3.4/python3/nose/twistedtools.py
@@ -0,0 +1,173 @@
+"""
+Twisted integration
+-------------------
+
+This module provides a very simple way to integrate your tests with the
+Twisted_ event loop.
+
+You must import this module *before* importing anything from Twisted itself!
+
+Example::
+
+ from nose.twistedtools import reactor, deferred
+
+ @deferred()
+ def test_resolve():
+ return reactor.resolve("www.python.org")
+
+Or, more realistically::
+
+ @deferred(timeout=5.0)
+ def test_resolve():
+ d = reactor.resolve("www.python.org")
+ def check_ip(ip):
+ assert ip == "67.15.36.43"
+ d.addCallback(check_ip)
+ return d
+
+.. _Twisted: http://twistedmatrix.com/trac/
+"""
+
+import sys
+from queue import Queue, Empty
+from nose.tools import make_decorator, TimeExpired
+
+__all__ = [
+ 'threaded_reactor', 'reactor', 'deferred', 'TimeExpired',
+ 'stop_reactor'
+]
+
+_twisted_thread = None
+
+def threaded_reactor():
+ """
+ Start the Twisted reactor in a separate thread, if not already done.
+ Returns the reactor.
+ The thread will automatically be destroyed when all the tests are done.
+ """
+ global _twisted_thread
+ try:
+ from twisted.internet import reactor
+ except ImportError:
+ return None, None
+ if not _twisted_thread:
+ from twisted.python import threadable
+ from threading import Thread
+ _twisted_thread = Thread(target=lambda: reactor.run( \
+ installSignalHandlers=False))
+ _twisted_thread.setDaemon(True)
+ _twisted_thread.start()
+ return reactor, _twisted_thread
+
+# Export global reactor variable, as Twisted does
+reactor, reactor_thread = threaded_reactor()
+
+
+def stop_reactor():
+ """Stop the reactor and join the reactor thread until it stops.
+ Call this function in teardown at the module or package level to
+ reset the twisted system after your tests. You *must* do this if
+ you mix tests using these tools and tests using twisted.trial.
+ """
+ global _twisted_thread
+
+ def stop_reactor():
+ '''Helper for calling stop from withing the thread.'''
+ reactor.stop()
+
+ reactor.callFromThread(stop_reactor)
+ reactor_thread.join()
+ for p in reactor.getDelayedCalls():
+ if p.active():
+ p.cancel()
+ _twisted_thread = None
+
+
+def deferred(timeout=None):
+ """
+ By wrapping a test function with this decorator, you can return a
+ twisted Deferred and the test will wait for the deferred to be triggered.
+ The whole test function will run inside the Twisted event loop.
+
+ The optional timeout parameter specifies the maximum duration of the test.
+ The difference with timed() is that timed() will still wait for the test
+ to end, while deferred() will stop the test when its timeout has expired.
+ The latter is more desireable when dealing with network tests, because
+ the result may actually never arrive.
+
+ If the callback is triggered, the test has passed.
+ If the errback is triggered or the timeout expires, the test has failed.
+
+ Example::
+
+ @deferred(timeout=5.0)
+ def test_resolve():
+ return reactor.resolve("www.python.org")
+
+ Attention! If you combine this decorator with other decorators (like
+ "raises"), deferred() must be called *first*!
+
+ In other words, this is good::
+
+ @raises(DNSLookupError)
+ @deferred()
+ def test_error():
+ return reactor.resolve("xxxjhjhj.biz")
+
+ and this is bad::
+
+ @deferred()
+ @raises(DNSLookupError)
+ def test_error():
+ return reactor.resolve("xxxjhjhj.biz")
+ """
+ reactor, reactor_thread = threaded_reactor()
+ if reactor is None:
+ raise ImportError("twisted is not available or could not be imported")
+ # Check for common syntax mistake
+ # (otherwise, tests can be silently ignored
+ # if one writes "@deferred" instead of "@deferred()")
+ try:
+ timeout is None or timeout + 0
+ except TypeError:
+ raise TypeError("'timeout' argument must be a number or None")
+
+ def decorate(func):
+ def wrapper(*args, **kargs):
+ q = Queue()
+ def callback(value):
+ q.put(None)
+ def errback(failure):
+ # Retrieve and save full exception info
+ try:
+ failure.raiseException()
+ except:
+ q.put(sys.exc_info())
+ def g():
+ try:
+ d = func(*args, **kargs)
+ try:
+ d.addCallbacks(callback, errback)
+ # Check for a common mistake and display a nice error
+ # message
+ except AttributeError:
+ raise TypeError("you must return a twisted Deferred "
+ "from your test case!")
+ # Catch exceptions raised in the test body (from the
+ # Twisted thread)
+ except:
+ q.put(sys.exc_info())
+ reactor.callFromThread(g)
+ try:
+ error = q.get(timeout=timeout)
+ except Empty:
+ raise TimeExpired("timeout expired before end of test (%f s.)"
+ % timeout)
+ # Re-raise all exceptions
+ if error is not None:
+ exc_type, exc_value, tb = error
+ raise exc_type(exc_value).with_traceback(tb)
+ wrapper = make_decorator(func)(wrapper)
+ return wrapper
+ return decorate
+
diff --git a/scripts/external_libs/nose-1.3.4/python3/nose/usage.txt b/scripts/external_libs/nose-1.3.4/python3/nose/usage.txt
new file mode 100644
index 00000000..bc96894a
--- /dev/null
+++ b/scripts/external_libs/nose-1.3.4/python3/nose/usage.txt
@@ -0,0 +1,115 @@
+nose collects tests automatically from python source files,
+directories and packages found in its working directory (which
+defaults to the current working directory). Any python source file,
+directory or package that matches the testMatch regular expression
+(by default: `(?:^|[\b_\.-])[Tt]est)` will be collected as a test (or
+source for collection of tests). In addition, all other packages
+found in the working directory will be examined for python source files
+or directories that match testMatch. Package discovery descends all
+the way down the tree, so package.tests and package.sub.tests and
+package.sub.sub2.tests will all be collected.
+
+Within a test directory or package, any python source file matching
+testMatch will be examined for test cases. Within a test module,
+functions and classes whose names match testMatch and TestCase
+subclasses with any name will be loaded and executed as tests. Tests
+may use the assert keyword or raise AssertionErrors to indicate test
+failure. TestCase subclasses may do the same or use the various
+TestCase methods available.
+
+**It is important to note that the default behavior of nose is to
+not include tests from files which are executable.** To include
+tests from such files, remove their executable bit or use
+the --exe flag (see 'Options' section below).
+
+Selecting Tests
+---------------
+
+To specify which tests to run, pass test names on the command line:
+
+ %prog only_test_this.py
+
+Test names specified may be file or module names, and may optionally
+indicate the test case to run by separating the module or file name
+from the test case name with a colon. Filenames may be relative or
+absolute. Examples:
+
+ %prog test.module
+ %prog another.test:TestCase.test_method
+ %prog a.test:TestCase
+ %prog /path/to/test/file.py:test_function
+
+You may also change the working directory where nose looks for tests
+by using the -w switch:
+
+ %prog -w /path/to/tests
+
+Note, however, that support for multiple -w arguments is now deprecated
+and will be removed in a future release. As of nose 0.10, you can get
+the same behavior by specifying the target directories *without*
+the -w switch:
+
+ %prog /path/to/tests /another/path/to/tests
+
+Further customization of test selection and loading is possible
+through the use of plugins.
+
+Test result output is identical to that of unittest, except for
+the additional features (error classes, and plugin-supplied
+features such as output capture and assert introspection) detailed
+in the options below.
+
+Configuration
+-------------
+
+In addition to passing command-line options, you may also put
+configuration options in your project's *setup.cfg* file, or a .noserc
+or nose.cfg file in your home directory. In any of these standard
+ini-style config files, you put your nosetests configuration in a
+``[nosetests]`` section. Options are the same as on the command line,
+with the -- prefix removed. For options that are simple switches, you
+must supply a value:
+
+ [nosetests]
+ verbosity=3
+ with-doctest=1
+
+All configuration files that are found will be loaded and their
+options combined. You can override the standard config file loading
+with the ``-c`` option.
+
+Using Plugins
+-------------
+
+There are numerous nose plugins available via easy_install and
+elsewhere. To use a plugin, just install it. The plugin will add
+command line options to nosetests. To verify that the plugin is installed,
+run:
+
+ nosetests --plugins
+
+You can add -v or -vv to that command to show more information
+about each plugin.
+
+If you are running nose.main() or nose.run() from a script, you
+can specify a list of plugins to use by passing a list of plugins
+with the plugins keyword argument.
+
+0.9 plugins
+-----------
+
+nose 1.0 can use SOME plugins that were written for nose 0.9. The
+default plugin manager inserts a compatibility wrapper around 0.9
+plugins that adapts the changed plugin api calls. However, plugins
+that access nose internals are likely to fail, especially if they
+attempt to access test case or test suite classes. For example,
+plugins that try to determine if a test passed to startTest is an
+individual test or a suite will fail, partly because suites are no
+longer passed to startTest and partly because it's likely that the
+plugin is trying to find out if the test is an instance of a class
+that no longer exists.
+
+0.10 and 0.11 plugins
+---------------------
+
+All plugins written for nose 0.10 and 0.11 should work with nose 1.0.
diff --git a/scripts/external_libs/nose-1.3.4/python3/nose/util.py b/scripts/external_libs/nose-1.3.4/python3/nose/util.py
new file mode 100644
index 00000000..62320eb5
--- /dev/null
+++ b/scripts/external_libs/nose-1.3.4/python3/nose/util.py
@@ -0,0 +1,660 @@
+"""Utility functions and classes used by nose internally.
+"""
+import inspect
+import itertools
+import logging
+import os
+import re
+import sys
+import types
+import unittest
+from nose.pyversion import ClassType, TypeType, isgenerator, ismethod
+
+
+log = logging.getLogger('nose')
+
+ident_re = re.compile(r'^[A-Za-z_][A-Za-z0-9_.]*$')
+class_types = (ClassType, TypeType)
+skip_pattern = r"(?:\.svn)|(?:[^.]+\.py[co])|(?:.*~)|(?:.*\$py\.class)|(?:__pycache__)"
+
+try:
+ set()
+ set = set # make from nose.util import set happy
+except NameError:
+ try:
+ from sets import Set as set
+ except ImportError:
+ pass
+
+
+def ls_tree(dir_path="",
+ skip_pattern=skip_pattern,
+ indent="|-- ", branch_indent="| ",
+ last_indent="`-- ", last_branch_indent=" "):
+ # TODO: empty directories look like non-directory files
+ return "\n".join(_ls_tree_lines(dir_path, skip_pattern,
+ indent, branch_indent,
+ last_indent, last_branch_indent))
+
+
+def _ls_tree_lines(dir_path, skip_pattern,
+ indent, branch_indent, last_indent, last_branch_indent):
+ if dir_path == "":
+ dir_path = os.getcwd()
+
+ lines = []
+
+ names = os.listdir(dir_path)
+ names.sort()
+ dirs, nondirs = [], []
+ for name in names:
+ if re.match(skip_pattern, name):
+ continue
+ if os.path.isdir(os.path.join(dir_path, name)):
+ dirs.append(name)
+ else:
+ nondirs.append(name)
+
+ # list non-directories first
+ entries = list(itertools.chain([(name, False) for name in nondirs],
+ [(name, True) for name in dirs]))
+ def ls_entry(name, is_dir, ind, branch_ind):
+ if not is_dir:
+ yield ind + name
+ else:
+ path = os.path.join(dir_path, name)
+ if not os.path.islink(path):
+ yield ind + name
+ subtree = _ls_tree_lines(path, skip_pattern,
+ indent, branch_indent,
+ last_indent, last_branch_indent)
+ for x in subtree:
+ yield branch_ind + x
+ for name, is_dir in entries[:-1]:
+ for line in ls_entry(name, is_dir, indent, branch_indent):
+ yield line
+ if entries:
+ name, is_dir = entries[-1]
+ for line in ls_entry(name, is_dir, last_indent, last_branch_indent):
+ yield line
+
+
+def absdir(path):
+ """Return absolute, normalized path to directory, if it exists; None
+ otherwise.
+ """
+ if not os.path.isabs(path):
+ path = os.path.normpath(os.path.abspath(os.path.join(os.getcwd(),
+ path)))
+ if path is None or not os.path.isdir(path):
+ return None
+ return path
+
+
+def absfile(path, where=None):
+ """Return absolute, normalized path to file (optionally in directory
+ where), or None if the file can't be found either in where or the current
+ working directory.
+ """
+ orig = path
+ if where is None:
+ where = os.getcwd()
+ if isinstance(where, list) or isinstance(where, tuple):
+ for maybe_path in where:
+ maybe_abs = absfile(path, maybe_path)
+ if maybe_abs is not None:
+ return maybe_abs
+ return None
+ if not os.path.isabs(path):
+ path = os.path.normpath(os.path.abspath(os.path.join(where, path)))
+ if path is None or not os.path.exists(path):
+ if where != os.getcwd():
+ # try the cwd instead
+ path = os.path.normpath(os.path.abspath(os.path.join(os.getcwd(),
+ orig)))
+ if path is None or not os.path.exists(path):
+ return None
+ if os.path.isdir(path):
+ # might want an __init__.py from pacakge
+ init = os.path.join(path,'__init__.py')
+ if os.path.isfile(init):
+ return init
+ elif os.path.isfile(path):
+ return path
+ return None
+
+
+def anyp(predicate, iterable):
+ for item in iterable:
+ if predicate(item):
+ return True
+ return False
+
+
+def file_like(name):
+ """A name is file-like if it is a path that exists, or it has a
+ directory part, or it ends in .py, or it isn't a legal python
+ identifier.
+ """
+ return (os.path.exists(name)
+ or os.path.dirname(name)
+ or name.endswith('.py')
+ or not ident_re.match(os.path.splitext(name)[0]))
+
+
+def func_lineno(func):
+ """Get the line number of a function. First looks for
+ compat_co_firstlineno, then func_code.co_first_lineno.
+ """
+ try:
+ return func.compat_co_firstlineno
+ except AttributeError:
+ try:
+ return func.__code__.co_firstlineno
+ except AttributeError:
+ return -1
+
+
+def isclass(obj):
+ """Is obj a class? Inspect's isclass is too liberal and returns True
+ for objects that can't be subclasses of anything.
+ """
+ obj_type = type(obj)
+ return obj_type in class_types or issubclass(obj_type, type)
+
+
+# backwards compat (issue #64)
+is_generator = isgenerator
+
+
+def ispackage(path):
+ """
+ Is this path a package directory?
+
+ >>> ispackage('nose')
+ True
+ >>> ispackage('unit_tests')
+ False
+ >>> ispackage('nose/plugins')
+ True
+ >>> ispackage('nose/loader.py')
+ False
+ """
+ if os.path.isdir(path):
+ # at least the end of the path must be a legal python identifier
+ # and __init__.py[co] must exist
+ end = os.path.basename(path)
+ if ident_re.match(end):
+ for init in ('__init__.py', '__init__.pyc', '__init__.pyo'):
+ if os.path.isfile(os.path.join(path, init)):
+ return True
+ if sys.platform.startswith('java') and \
+ os.path.isfile(os.path.join(path, '__init__$py.class')):
+ return True
+ return False
+
+
+def isproperty(obj):
+ """
+ Is this a property?
+
+ >>> class Foo:
+ ... def got(self):
+ ... return 2
+ ... def get(self):
+ ... return 1
+ ... get = property(get)
+
+ >>> isproperty(Foo.got)
+ False
+ >>> isproperty(Foo.get)
+ True
+ """
+ return type(obj) == property
+
+
+def getfilename(package, relativeTo=None):
+ """Find the python source file for a package, relative to a
+ particular directory (defaults to current working directory if not
+ given).
+ """
+ if relativeTo is None:
+ relativeTo = os.getcwd()
+ path = os.path.join(relativeTo, os.sep.join(package.split('.')))
+ suffixes = ('/__init__.py', '.py')
+ for suffix in suffixes:
+ filename = path + suffix
+ if os.path.exists(filename):
+ return filename
+ return None
+
+
+def getpackage(filename):
+ """
+ Find the full dotted package name for a given python source file
+ name. Returns None if the file is not a python source file.
+
+ >>> getpackage('foo.py')
+ 'foo'
+ >>> getpackage('biff/baf.py')
+ 'baf'
+ >>> getpackage('nose/util.py')
+ 'nose.util'
+
+ Works for directories too.
+
+ >>> getpackage('nose')
+ 'nose'
+ >>> getpackage('nose/plugins')
+ 'nose.plugins'
+
+ And __init__ files stuck onto directories
+
+ >>> getpackage('nose/plugins/__init__.py')
+ 'nose.plugins'
+
+ Absolute paths also work.
+
+ >>> path = os.path.abspath(os.path.join('nose', 'plugins'))
+ >>> getpackage(path)
+ 'nose.plugins'
+ """
+ src_file = src(filename)
+ if (os.path.isdir(src_file) or not src_file.endswith('.py')) and not ispackage(src_file):
+ return None
+ base, ext = os.path.splitext(os.path.basename(src_file))
+ if base == '__init__':
+ mod_parts = []
+ else:
+ mod_parts = [base]
+ path, part = os.path.split(os.path.split(src_file)[0])
+ while part:
+ if ispackage(os.path.join(path, part)):
+ mod_parts.append(part)
+ else:
+ break
+ path, part = os.path.split(path)
+ mod_parts.reverse()
+ return '.'.join(mod_parts)
+
+
+def ln(label):
+ """Draw a 70-char-wide divider, with label in the middle.
+
+ >>> ln('hello there')
+ '---------------------------- hello there -----------------------------'
+ """
+ label_len = len(label) + 2
+ chunk = (70 - label_len) // 2
+ out = '%s %s %s' % ('-' * chunk, label, '-' * chunk)
+ pad = 70 - len(out)
+ if pad > 0:
+ out = out + ('-' * pad)
+ return out
+
+
+def resolve_name(name, module=None):
+ """Resolve a dotted name to a module and its parts. This is stolen
+ wholesale from unittest.TestLoader.loadTestByName.
+
+ >>> resolve_name('nose.util') #doctest: +ELLIPSIS
+ <module 'nose.util' from...>
+ >>> resolve_name('nose.util.resolve_name') #doctest: +ELLIPSIS
+ <function resolve_name at...>
+ """
+ parts = name.split('.')
+ parts_copy = parts[:]
+ if module is None:
+ while parts_copy:
+ try:
+ log.debug("__import__ %s", name)
+ module = __import__('.'.join(parts_copy))
+ break
+ except ImportError:
+ del parts_copy[-1]
+ if not parts_copy:
+ raise
+ parts = parts[1:]
+ obj = module
+ log.debug("resolve: %s, %s, %s, %s", parts, name, obj, module)
+ for part in parts:
+ obj = getattr(obj, part)
+ return obj
+
+
+def split_test_name(test):
+ """Split a test name into a 3-tuple containing file, module, and callable
+ names, any of which (but not all) may be blank.
+
+ Test names are in the form:
+
+ file_or_module:callable
+
+ Either side of the : may be dotted. To change the splitting behavior, you
+ can alter nose.util.split_test_re.
+ """
+ norm = os.path.normpath
+ file_or_mod = test
+ fn = None
+ if not ':' in test:
+ # only a file or mod part
+ if file_like(test):
+ return (norm(test), None, None)
+ else:
+ return (None, test, None)
+
+ # could be path|mod:callable, or a : in the file path someplace
+ head, tail = os.path.split(test)
+ if not head:
+ # this is a case like 'foo:bar' -- generally a module
+ # name followed by a callable, but also may be a windows
+ # drive letter followed by a path
+ try:
+ file_or_mod, fn = test.split(':')
+ if file_like(fn):
+ # must be a funny path
+ file_or_mod, fn = test, None
+ except ValueError:
+ # more than one : in the test
+ # this is a case like c:\some\path.py:a_test
+ parts = test.split(':')
+ if len(parts[0]) == 1:
+ file_or_mod, fn = ':'.join(parts[:-1]), parts[-1]
+ else:
+ # nonsense like foo:bar:baz
+ raise ValueError("Test name '%s' could not be parsed. Please "
+ "format test names as path:callable or "
+ "module:callable." % (test,))
+ elif not tail:
+ # this is a case like 'foo:bar/'
+ # : must be part of the file path, so ignore it
+ file_or_mod = test
+ else:
+ if ':' in tail:
+ file_part, fn = tail.split(':')
+ else:
+ file_part = tail
+ file_or_mod = os.sep.join([head, file_part])
+ if file_or_mod:
+ if file_like(file_or_mod):
+ return (norm(file_or_mod), None, fn)
+ else:
+ return (None, file_or_mod, fn)
+ else:
+ return (None, None, fn)
+split_test_name.__test__ = False # do not collect
+
+
+def test_address(test):
+ """Find the test address for a test, which may be a module, filename,
+ class, method or function.
+ """
+ if hasattr(test, "address"):
+ return test.address()
+ # type-based polymorphism sucks in general, but I believe is
+ # appropriate here
+ t = type(test)
+ file = module = call = None
+ if t == types.ModuleType:
+ file = getattr(test, '__file__', None)
+ module = getattr(test, '__name__', None)
+ return (src(file), module, call)
+ if t == types.FunctionType or issubclass(t, type) or t == type:
+ module = getattr(test, '__module__', None)
+ if module is not None:
+ m = sys.modules[module]
+ file = getattr(m, '__file__', None)
+ if file is not None:
+ file = os.path.abspath(file)
+ call = getattr(test, '__name__', None)
+ return (src(file), module, call)
+ if t == types.MethodType:
+ cls_adr = test_address(test.__self__.__class__)
+ return (src(cls_adr[0]), cls_adr[1],
+ "%s.%s" % (cls_adr[2], test.__name__))
+ # handle unittest.TestCase instances
+ if isinstance(test, unittest.TestCase):
+ if (hasattr(test, '_FunctionTestCase__testFunc') # pre 2.7
+ or hasattr(test, '_testFunc')): # 2.7
+ # unittest FunctionTestCase
+ try:
+ return test_address(test._FunctionTestCase__testFunc)
+ except AttributeError:
+ return test_address(test._testFunc)
+ # regular unittest.TestCase
+ cls_adr = test_address(test.__class__)
+ # 2.5 compat: __testMethodName changed to _testMethodName
+ try:
+ method_name = test._TestCase__testMethodName
+ except AttributeError:
+ method_name = test._testMethodName
+ return (src(cls_adr[0]), cls_adr[1],
+ "%s.%s" % (cls_adr[2], method_name))
+ if (hasattr(test, '__class__') and
+ test.__class__.__module__ not in ('__builtin__', 'builtins')):
+ return test_address(test.__class__)
+ raise TypeError("I don't know what %s is (%s)" % (test, t))
+test_address.__test__ = False # do not collect
+
+
+def try_run(obj, names):
+ """Given a list of possible method names, try to run them with the
+ provided object. Keep going until something works. Used to run
+ setup/teardown methods for module, package, and function tests.
+ """
+ for name in names:
+ func = getattr(obj, name, None)
+ if func is not None:
+ if type(obj) == types.ModuleType:
+ # py.test compatibility
+ if isinstance(func, types.FunctionType):
+ args, varargs, varkw, defaults = \
+ inspect.getargspec(func)
+ else:
+ # Not a function. If it's callable, call it anyway
+ if hasattr(func, '__call__') and not inspect.ismethod(func):
+ func = func.__call__
+ try:
+ args, varargs, varkw, defaults = \
+ inspect.getargspec(func)
+ args.pop(0) # pop the self off
+ except TypeError:
+ raise TypeError("Attribute %s of %r is not a python "
+ "function. Only functions or callables"
+ " may be used as fixtures." %
+ (name, obj))
+ if len(args):
+ log.debug("call fixture %s.%s(%s)", obj, name, obj)
+ return func(obj)
+ log.debug("call fixture %s.%s", obj, name)
+ return func()
+
+
+def src(filename):
+ """Find the python source file for a .pyc, .pyo or $py.class file on
+ jython. Returns the filename provided if it is not a python source
+ file.
+ """
+ if filename is None:
+ return filename
+ if sys.platform.startswith('java') and filename.endswith('$py.class'):
+ return '.'.join((filename[:-9], 'py'))
+ base, ext = os.path.splitext(filename)
+ if ext in ('.pyc', '.pyo', '.py'):
+ return '.'.join((base, 'py'))
+ return filename
+
+
+def regex_last_key(regex):
+ """Sort key function factory that puts items that match a
+ regular expression last.
+
+ >>> from nose.config import Config
+ >>> from nose.pyversion import sort_list
+ >>> c = Config()
+ >>> regex = c.testMatch
+ >>> entries = ['.', '..', 'a_test', 'src', 'lib', 'test', 'foo.py']
+ >>> sort_list(entries, regex_last_key(regex))
+ >>> entries
+ ['.', '..', 'foo.py', 'lib', 'src', 'a_test', 'test']
+ """
+ def k(obj):
+ if regex.search(obj):
+ return (1, obj)
+ return (0, obj)
+ return k
+
+
+def tolist(val):
+ """Convert a value that may be a list or a (possibly comma-separated)
+ string into a list. The exception: None is returned as None, not [None].
+
+ >>> tolist(["one", "two"])
+ ['one', 'two']
+ >>> tolist("hello")
+ ['hello']
+ >>> tolist("separate,values, with, commas, spaces , are ,ok")
+ ['separate', 'values', 'with', 'commas', 'spaces', 'are', 'ok']
+ """
+ if val is None:
+ return None
+ try:
+ # might already be a list
+ val.extend([])
+ return val
+ except AttributeError:
+ pass
+ # might be a string
+ try:
+ return re.split(r'\s*,\s*', val)
+ except TypeError:
+ # who knows...
+ return list(val)
+
+
+class odict(dict):
+ """Simple ordered dict implementation, based on:
+
+ http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/107747
+ """
+ def __init__(self, *arg, **kw):
+ self._keys = []
+ super(odict, self).__init__(*arg, **kw)
+
+ def __delitem__(self, key):
+ super(odict, self).__delitem__(key)
+ self._keys.remove(key)
+
+ def __setitem__(self, key, item):
+ super(odict, self).__setitem__(key, item)
+ if key not in self._keys:
+ self._keys.append(key)
+
+ def __str__(self):
+ return "{%s}" % ', '.join(["%r: %r" % (k, v) for k, v in list(self.items())])
+
+ def clear(self):
+ super(odict, self).clear()
+ self._keys = []
+
+ def copy(self):
+ d = super(odict, self).copy()
+ d._keys = self._keys[:]
+ return d
+
+ def items(self):
+ return list(zip(self._keys, list(self.values())))
+
+ def keys(self):
+ return self._keys[:]
+
+ def setdefault(self, key, failobj=None):
+ item = super(odict, self).setdefault(key, failobj)
+ if key not in self._keys:
+ self._keys.append(key)
+ return item
+
+ def update(self, dict):
+ super(odict, self).update(dict)
+ for key in list(dict.keys()):
+ if key not in self._keys:
+ self._keys.append(key)
+
+ def values(self):
+ return list(map(self.get, self._keys))
+
+
+def transplant_func(func, module):
+ """
+ Make a function imported from module A appear as if it is located
+ in module B.
+
+ >>> from pprint import pprint
+ >>> pprint.__module__
+ 'pprint'
+ >>> pp = transplant_func(pprint, __name__)
+ >>> pp.__module__
+ 'nose.util'
+
+ The original function is not modified.
+
+ >>> pprint.__module__
+ 'pprint'
+
+ Calling the transplanted function calls the original.
+
+ >>> pp([1, 2])
+ [1, 2]
+ >>> pprint([1,2])
+ [1, 2]
+
+ """
+ from nose.tools import make_decorator
+ if isgenerator(func):
+ def newfunc(*arg, **kw):
+ for v in func(*arg, **kw):
+ yield v
+ else:
+ def newfunc(*arg, **kw):
+ return func(*arg, **kw)
+
+ newfunc = make_decorator(func)(newfunc)
+ newfunc.__module__ = module
+ return newfunc
+
+
+def transplant_class(cls, module):
+ """
+ Make a class appear to reside in `module`, rather than the module in which
+ it is actually defined.
+
+ >>> from nose.failure import Failure
+ >>> Failure.__module__
+ 'nose.failure'
+ >>> Nf = transplant_class(Failure, __name__)
+ >>> Nf.__module__
+ 'nose.util'
+ >>> Nf.__name__
+ 'Failure'
+
+ """
+ class C(cls):
+ pass
+ C.__module__ = module
+ C.__name__ = cls.__name__
+ return C
+
+
+def safe_str(val, encoding='utf-8'):
+ try:
+ return str(val)
+ except UnicodeEncodeError:
+ if isinstance(val, Exception):
+ return ' '.join([safe_str(arg, encoding)
+ for arg in val])
+ return str(val).encode(encoding)
+
+
+if __name__ == '__main__':
+ import doctest
+ doctest.testmod()
diff --git a/scripts/external_libs/pyyaml-3.11/python2/yaml/__init__.py b/scripts/external_libs/pyyaml-3.11/python2/yaml/__init__.py
new file mode 100644
index 00000000..76e19e13
--- /dev/null
+++ b/scripts/external_libs/pyyaml-3.11/python2/yaml/__init__.py
@@ -0,0 +1,315 @@
+
+from error import *
+
+from tokens import *
+from events import *
+from nodes import *
+
+from loader import *
+from dumper import *
+
+__version__ = '3.11'
+
+try:
+ from cyaml import *
+ __with_libyaml__ = True
+except ImportError:
+ __with_libyaml__ = False
+
+def scan(stream, Loader=Loader):
+ """
+ Scan a YAML stream and produce scanning tokens.
+ """
+ loader = Loader(stream)
+ try:
+ while loader.check_token():
+ yield loader.get_token()
+ finally:
+ loader.dispose()
+
+def parse(stream, Loader=Loader):
+ """
+ Parse a YAML stream and produce parsing events.
+ """
+ loader = Loader(stream)
+ try:
+ while loader.check_event():
+ yield loader.get_event()
+ finally:
+ loader.dispose()
+
+def compose(stream, Loader=Loader):
+ """
+ Parse the first YAML document in a stream
+ and produce the corresponding representation tree.
+ """
+ loader = Loader(stream)
+ try:
+ return loader.get_single_node()
+ finally:
+ loader.dispose()
+
+def compose_all(stream, Loader=Loader):
+ """
+ Parse all YAML documents in a stream
+ and produce corresponding representation trees.
+ """
+ loader = Loader(stream)
+ try:
+ while loader.check_node():
+ yield loader.get_node()
+ finally:
+ loader.dispose()
+
+def load(stream, Loader=Loader):
+ """
+ Parse the first YAML document in a stream
+ and produce the corresponding Python object.
+ """
+ loader = Loader(stream)
+ try:
+ return loader.get_single_data()
+ finally:
+ loader.dispose()
+
+def load_all(stream, Loader=Loader):
+ """
+ Parse all YAML documents in a stream
+ and produce corresponding Python objects.
+ """
+ loader = Loader(stream)
+ try:
+ while loader.check_data():
+ yield loader.get_data()
+ finally:
+ loader.dispose()
+
+def safe_load(stream):
+ """
+ Parse the first YAML document in a stream
+ and produce the corresponding Python object.
+ Resolve only basic YAML tags.
+ """
+ return load(stream, SafeLoader)
+
+def safe_load_all(stream):
+ """
+ Parse all YAML documents in a stream
+ and produce corresponding Python objects.
+ Resolve only basic YAML tags.
+ """
+ return load_all(stream, SafeLoader)
+
+def emit(events, stream=None, Dumper=Dumper,
+ canonical=None, indent=None, width=None,
+ allow_unicode=None, line_break=None):
+ """
+ Emit YAML parsing events into a stream.
+ If stream is None, return the produced string instead.
+ """
+ getvalue = None
+ if stream is None:
+ from StringIO import StringIO
+ stream = StringIO()
+ getvalue = stream.getvalue
+ dumper = Dumper(stream, canonical=canonical, indent=indent, width=width,
+ allow_unicode=allow_unicode, line_break=line_break)
+ try:
+ for event in events:
+ dumper.emit(event)
+ finally:
+ dumper.dispose()
+ if getvalue:
+ return getvalue()
+
+def serialize_all(nodes, stream=None, Dumper=Dumper,
+ canonical=None, indent=None, width=None,
+ allow_unicode=None, line_break=None,
+ encoding='utf-8', explicit_start=None, explicit_end=None,
+ version=None, tags=None):
+ """
+ Serialize a sequence of representation trees into a YAML stream.
+ If stream is None, return the produced string instead.
+ """
+ getvalue = None
+ if stream is None:
+ if encoding is None:
+ from StringIO import StringIO
+ else:
+ from cStringIO import StringIO
+ stream = StringIO()
+ getvalue = stream.getvalue
+ dumper = Dumper(stream, canonical=canonical, indent=indent, width=width,
+ allow_unicode=allow_unicode, line_break=line_break,
+ encoding=encoding, version=version, tags=tags,
+ explicit_start=explicit_start, explicit_end=explicit_end)
+ try:
+ dumper.open()
+ for node in nodes:
+ dumper.serialize(node)
+ dumper.close()
+ finally:
+ dumper.dispose()
+ if getvalue:
+ return getvalue()
+
+def serialize(node, stream=None, Dumper=Dumper, **kwds):
+ """
+ Serialize a representation tree into a YAML stream.
+ If stream is None, return the produced string instead.
+ """
+ return serialize_all([node], stream, Dumper=Dumper, **kwds)
+
+def dump_all(documents, stream=None, Dumper=Dumper,
+ default_style=None, default_flow_style=None,
+ canonical=None, indent=None, width=None,
+ allow_unicode=None, line_break=None,
+ encoding='utf-8', explicit_start=None, explicit_end=None,
+ version=None, tags=None):
+ """
+ Serialize a sequence of Python objects into a YAML stream.
+ If stream is None, return the produced string instead.
+ """
+ getvalue = None
+ if stream is None:
+ if encoding is None:
+ from StringIO import StringIO
+ else:
+ from cStringIO import StringIO
+ stream = StringIO()
+ getvalue = stream.getvalue
+ dumper = Dumper(stream, default_style=default_style,
+ default_flow_style=default_flow_style,
+ canonical=canonical, indent=indent, width=width,
+ allow_unicode=allow_unicode, line_break=line_break,
+ encoding=encoding, version=version, tags=tags,
+ explicit_start=explicit_start, explicit_end=explicit_end)
+ try:
+ dumper.open()
+ for data in documents:
+ dumper.represent(data)
+ dumper.close()
+ finally:
+ dumper.dispose()
+ if getvalue:
+ return getvalue()
+
+def dump(data, stream=None, Dumper=Dumper, **kwds):
+ """
+ Serialize a Python object into a YAML stream.
+ If stream is None, return the produced string instead.
+ """
+ return dump_all([data], stream, Dumper=Dumper, **kwds)
+
+def safe_dump_all(documents, stream=None, **kwds):
+ """
+ Serialize a sequence of Python objects into a YAML stream.
+ Produce only basic YAML tags.
+ If stream is None, return the produced string instead.
+ """
+ return dump_all(documents, stream, Dumper=SafeDumper, **kwds)
+
+def safe_dump(data, stream=None, **kwds):
+ """
+ Serialize a Python object into a YAML stream.
+ Produce only basic YAML tags.
+ If stream is None, return the produced string instead.
+ """
+ return dump_all([data], stream, Dumper=SafeDumper, **kwds)
+
+def add_implicit_resolver(tag, regexp, first=None,
+ Loader=Loader, Dumper=Dumper):
+ """
+ Add an implicit scalar detector.
+ If an implicit scalar value matches the given regexp,
+ the corresponding tag is assigned to the scalar.
+ first is a sequence of possible initial characters or None.
+ """
+ Loader.add_implicit_resolver(tag, regexp, first)
+ Dumper.add_implicit_resolver(tag, regexp, first)
+
+def add_path_resolver(tag, path, kind=None, Loader=Loader, Dumper=Dumper):
+ """
+ Add a path based resolver for the given tag.
+ A path is a list of keys that forms a path
+ to a node in the representation tree.
+ Keys can be string values, integers, or None.
+ """
+ Loader.add_path_resolver(tag, path, kind)
+ Dumper.add_path_resolver(tag, path, kind)
+
+def add_constructor(tag, constructor, Loader=Loader):
+ """
+ Add a constructor for the given tag.
+ Constructor is a function that accepts a Loader instance
+ and a node object and produces the corresponding Python object.
+ """
+ Loader.add_constructor(tag, constructor)
+
+def add_multi_constructor(tag_prefix, multi_constructor, Loader=Loader):
+ """
+ Add a multi-constructor for the given tag prefix.
+ Multi-constructor is called for a node if its tag starts with tag_prefix.
+ Multi-constructor accepts a Loader instance, a tag suffix,
+ and a node object and produces the corresponding Python object.
+ """
+ Loader.add_multi_constructor(tag_prefix, multi_constructor)
+
+def add_representer(data_type, representer, Dumper=Dumper):
+ """
+ Add a representer for the given type.
+ Representer is a function accepting a Dumper instance
+ and an instance of the given data type
+ and producing the corresponding representation node.
+ """
+ Dumper.add_representer(data_type, representer)
+
+def add_multi_representer(data_type, multi_representer, Dumper=Dumper):
+ """
+ Add a representer for the given type.
+ Multi-representer is a function accepting a Dumper instance
+ and an instance of the given data type or subtype
+ and producing the corresponding representation node.
+ """
+ Dumper.add_multi_representer(data_type, multi_representer)
+
+class YAMLObjectMetaclass(type):
+ """
+ The metaclass for YAMLObject.
+ """
+ def __init__(cls, name, bases, kwds):
+ super(YAMLObjectMetaclass, cls).__init__(name, bases, kwds)
+ if 'yaml_tag' in kwds and kwds['yaml_tag'] is not None:
+ cls.yaml_loader.add_constructor(cls.yaml_tag, cls.from_yaml)
+ cls.yaml_dumper.add_representer(cls, cls.to_yaml)
+
+class YAMLObject(object):
+ """
+ An object that can dump itself to a YAML stream
+ and load itself from a YAML stream.
+ """
+
+ __metaclass__ = YAMLObjectMetaclass
+ __slots__ = () # no direct instantiation, so allow immutable subclasses
+
+ yaml_loader = Loader
+ yaml_dumper = Dumper
+
+ yaml_tag = None
+ yaml_flow_style = None
+
+ def from_yaml(cls, loader, node):
+ """
+ Convert a representation node to a Python object.
+ """
+ return loader.construct_yaml_object(node, cls)
+ from_yaml = classmethod(from_yaml)
+
+ def to_yaml(cls, dumper, data):
+ """
+ Convert a Python object to a representation node.
+ """
+ return dumper.represent_yaml_object(cls.yaml_tag, data, cls,
+ flow_style=cls.yaml_flow_style)
+ to_yaml = classmethod(to_yaml)
+
diff --git a/scripts/external_libs/pyyaml-3.11/python2/yaml/composer.py b/scripts/external_libs/pyyaml-3.11/python2/yaml/composer.py
new file mode 100644
index 00000000..06e5ac78
--- /dev/null
+++ b/scripts/external_libs/pyyaml-3.11/python2/yaml/composer.py
@@ -0,0 +1,139 @@
+
+__all__ = ['Composer', 'ComposerError']
+
+from error import MarkedYAMLError
+from events import *
+from nodes import *
+
+class ComposerError(MarkedYAMLError):
+ pass
+
+class Composer(object):
+
+ def __init__(self):
+ self.anchors = {}
+
+ def check_node(self):
+ # Drop the STREAM-START event.
+ if self.check_event(StreamStartEvent):
+ self.get_event()
+
+ # If there are more documents available?
+ return not self.check_event(StreamEndEvent)
+
+ def get_node(self):
+ # Get the root node of the next document.
+ if not self.check_event(StreamEndEvent):
+ return self.compose_document()
+
+ def get_single_node(self):
+ # Drop the STREAM-START event.
+ self.get_event()
+
+ # Compose a document if the stream is not empty.
+ document = None
+ if not self.check_event(StreamEndEvent):
+ document = self.compose_document()
+
+ # Ensure that the stream contains no more documents.
+ if not self.check_event(StreamEndEvent):
+ event = self.get_event()
+ raise ComposerError("expected a single document in the stream",
+ document.start_mark, "but found another document",
+ event.start_mark)
+
+ # Drop the STREAM-END event.
+ self.get_event()
+
+ return document
+
+ def compose_document(self):
+ # Drop the DOCUMENT-START event.
+ self.get_event()
+
+ # Compose the root node.
+ node = self.compose_node(None, None)
+
+ # Drop the DOCUMENT-END event.
+ self.get_event()
+
+ self.anchors = {}
+ return node
+
+ def compose_node(self, parent, index):
+ if self.check_event(AliasEvent):
+ event = self.get_event()
+ anchor = event.anchor
+ if anchor not in self.anchors:
+ raise ComposerError(None, None, "found undefined alias %r"
+ % anchor.encode('utf-8'), event.start_mark)
+ return self.anchors[anchor]
+ event = self.peek_event()
+ anchor = event.anchor
+ if anchor is not None:
+ if anchor in self.anchors:
+ raise ComposerError("found duplicate anchor %r; first occurence"
+ % anchor.encode('utf-8'), self.anchors[anchor].start_mark,
+ "second occurence", event.start_mark)
+ self.descend_resolver(parent, index)
+ if self.check_event(ScalarEvent):
+ node = self.compose_scalar_node(anchor)
+ elif self.check_event(SequenceStartEvent):
+ node = self.compose_sequence_node(anchor)
+ elif self.check_event(MappingStartEvent):
+ node = self.compose_mapping_node(anchor)
+ self.ascend_resolver()
+ return node
+
+ def compose_scalar_node(self, anchor):
+ event = self.get_event()
+ tag = event.tag
+ if tag is None or tag == u'!':
+ tag = self.resolve(ScalarNode, event.value, event.implicit)
+ node = ScalarNode(tag, event.value,
+ event.start_mark, event.end_mark, style=event.style)
+ if anchor is not None:
+ self.anchors[anchor] = node
+ return node
+
+ def compose_sequence_node(self, anchor):
+ start_event = self.get_event()
+ tag = start_event.tag
+ if tag is None or tag == u'!':
+ tag = self.resolve(SequenceNode, None, start_event.implicit)
+ node = SequenceNode(tag, [],
+ start_event.start_mark, None,
+ flow_style=start_event.flow_style)
+ if anchor is not None:
+ self.anchors[anchor] = node
+ index = 0
+ while not self.check_event(SequenceEndEvent):
+ node.value.append(self.compose_node(node, index))
+ index += 1
+ end_event = self.get_event()
+ node.end_mark = end_event.end_mark
+ return node
+
+ def compose_mapping_node(self, anchor):
+ start_event = self.get_event()
+ tag = start_event.tag
+ if tag is None or tag == u'!':
+ tag = self.resolve(MappingNode, None, start_event.implicit)
+ node = MappingNode(tag, [],
+ start_event.start_mark, None,
+ flow_style=start_event.flow_style)
+ if anchor is not None:
+ self.anchors[anchor] = node
+ while not self.check_event(MappingEndEvent):
+ #key_event = self.peek_event()
+ item_key = self.compose_node(node, None)
+ #if item_key in node.value:
+ # raise ComposerError("while composing a mapping", start_event.start_mark,
+ # "found duplicate key", key_event.start_mark)
+ item_value = self.compose_node(node, item_key)
+ #node.value[item_key] = item_value
+ node.value.append((item_key, item_value))
+ end_event = self.get_event()
+ node.end_mark = end_event.end_mark
+ return node
+
diff --git a/scripts/external_libs/pyyaml-3.11/python2/yaml/constructor.py b/scripts/external_libs/pyyaml-3.11/python2/yaml/constructor.py
new file mode 100644
index 00000000..635faac3
--- /dev/null
+++ b/scripts/external_libs/pyyaml-3.11/python2/yaml/constructor.py
@@ -0,0 +1,675 @@
+
+__all__ = ['BaseConstructor', 'SafeConstructor', 'Constructor',
+ 'ConstructorError']
+
+from error import *
+from nodes import *
+
+import datetime
+
+import binascii, re, sys, types
+
+class ConstructorError(MarkedYAMLError):
+ pass
+
+class BaseConstructor(object):
+
+ yaml_constructors = {}
+ yaml_multi_constructors = {}
+
+ def __init__(self):
+ self.constructed_objects = {}
+ self.recursive_objects = {}
+ self.state_generators = []
+ self.deep_construct = False
+
+ def check_data(self):
+ # If there are more documents available?
+ return self.check_node()
+
+ def get_data(self):
+ # Construct and return the next document.
+ if self.check_node():
+ return self.construct_document(self.get_node())
+
+ def get_single_data(self):
+ # Ensure that the stream contains a single document and construct it.
+ node = self.get_single_node()
+ if node is not None:
+ return self.construct_document(node)
+ return None
+
+ def construct_document(self, node):
+ data = self.construct_object(node)
+ while self.state_generators:
+ state_generators = self.state_generators
+ self.state_generators = []
+ for generator in state_generators:
+ for dummy in generator:
+ pass
+ self.constructed_objects = {}
+ self.recursive_objects = {}
+ self.deep_construct = False
+ return data
+
+ def construct_object(self, node, deep=False):
+ if node in self.constructed_objects:
+ return self.constructed_objects[node]
+ if deep:
+ old_deep = self.deep_construct
+ self.deep_construct = True
+ if node in self.recursive_objects:
+ raise ConstructorError(None, None,
+ "found unconstructable recursive node", node.start_mark)
+ self.recursive_objects[node] = None
+ constructor = None
+ tag_suffix = None
+ if node.tag in self.yaml_constructors:
+ constructor = self.yaml_constructors[node.tag]
+ else:
+ for tag_prefix in self.yaml_multi_constructors:
+ if node.tag.startswith(tag_prefix):
+ tag_suffix = node.tag[len(tag_prefix):]
+ constructor = self.yaml_multi_constructors[tag_prefix]
+ break
+ else:
+ if None in self.yaml_multi_constructors:
+ tag_suffix = node.tag
+ constructor = self.yaml_multi_constructors[None]
+ elif None in self.yaml_constructors:
+ constructor = self.yaml_constructors[None]
+ elif isinstance(node, ScalarNode):
+ constructor = self.__class__.construct_scalar
+ elif isinstance(node, SequenceNode):
+ constructor = self.__class__.construct_sequence
+ elif isinstance(node, MappingNode):
+ constructor = self.__class__.construct_mapping
+ if tag_suffix is None:
+ data = constructor(self, node)
+ else:
+ data = constructor(self, tag_suffix, node)
+ if isinstance(data, types.GeneratorType):
+ generator = data
+ data = generator.next()
+ if self.deep_construct:
+ for dummy in generator:
+ pass
+ else:
+ self.state_generators.append(generator)
+ self.constructed_objects[node] = data
+ del self.recursive_objects[node]
+ if deep:
+ self.deep_construct = old_deep
+ return data
+
+ def construct_scalar(self, node):
+ if not isinstance(node, ScalarNode):
+ raise ConstructorError(None, None,
+ "expected a scalar node, but found %s" % node.id,
+ node.start_mark)
+ return node.value
+
+ def construct_sequence(self, node, deep=False):
+ if not isinstance(node, SequenceNode):
+ raise ConstructorError(None, None,
+ "expected a sequence node, but found %s" % node.id,
+ node.start_mark)
+ return [self.construct_object(child, deep=deep)
+ for child in node.value]
+
+ def construct_mapping(self, node, deep=False):
+ if not isinstance(node, MappingNode):
+ raise ConstructorError(None, None,
+ "expected a mapping node, but found %s" % node.id,
+ node.start_mark)
+ mapping = {}
+ for key_node, value_node in node.value:
+ key = self.construct_object(key_node, deep=deep)
+ try:
+ hash(key)
+ except TypeError, exc:
+ raise ConstructorError("while constructing a mapping", node.start_mark,
+ "found unacceptable key (%s)" % exc, key_node.start_mark)
+ value = self.construct_object(value_node, deep=deep)
+ mapping[key] = value
+ return mapping
+
+ def construct_pairs(self, node, deep=False):
+ if not isinstance(node, MappingNode):
+ raise ConstructorError(None, None,
+ "expected a mapping node, but found %s" % node.id,
+ node.start_mark)
+ pairs = []
+ for key_node, value_node in node.value:
+ key = self.construct_object(key_node, deep=deep)
+ value = self.construct_object(value_node, deep=deep)
+ pairs.append((key, value))
+ return pairs
+
+ def add_constructor(cls, tag, constructor):
+ if not 'yaml_constructors' in cls.__dict__:
+ cls.yaml_constructors = cls.yaml_constructors.copy()
+ cls.yaml_constructors[tag] = constructor
+ add_constructor = classmethod(add_constructor)
+
+ def add_multi_constructor(cls, tag_prefix, multi_constructor):
+ if not 'yaml_multi_constructors' in cls.__dict__:
+ cls.yaml_multi_constructors = cls.yaml_multi_constructors.copy()
+ cls.yaml_multi_constructors[tag_prefix] = multi_constructor
+ add_multi_constructor = classmethod(add_multi_constructor)
+
+class SafeConstructor(BaseConstructor):
+
+ def construct_scalar(self, node):
+ if isinstance(node, MappingNode):
+ for key_node, value_node in node.value:
+ if key_node.tag == u'tag:yaml.org,2002:value':
+ return self.construct_scalar(value_node)
+ return BaseConstructor.construct_scalar(self, node)
+
+ def flatten_mapping(self, node):
+ merge = []
+ index = 0
+ while index < len(node.value):
+ key_node, value_node = node.value[index]
+ if key_node.tag == u'tag:yaml.org,2002:merge':
+ del node.value[index]
+ if isinstance(value_node, MappingNode):
+ self.flatten_mapping(value_node)
+ merge.extend(value_node.value)
+ elif isinstance(value_node, SequenceNode):
+ submerge = []
+ for subnode in value_node.value:
+ if not isinstance(subnode, MappingNode):
+ raise ConstructorError("while constructing a mapping",
+ node.start_mark,
+ "expected a mapping for merging, but found %s"
+ % subnode.id, subnode.start_mark)
+ self.flatten_mapping(subnode)
+ submerge.append(subnode.value)
+ submerge.reverse()
+ for value in submerge:
+ merge.extend(value)
+ else:
+ raise ConstructorError("while constructing a mapping", node.start_mark,
+ "expected a mapping or list of mappings for merging, but found %s"
+ % value_node.id, value_node.start_mark)
+ elif key_node.tag == u'tag:yaml.org,2002:value':
+ key_node.tag = u'tag:yaml.org,2002:str'
+ index += 1
+ else:
+ index += 1
+ if merge:
+ node.value = merge + node.value
+
+ def construct_mapping(self, node, deep=False):
+ if isinstance(node, MappingNode):
+ self.flatten_mapping(node)
+ return BaseConstructor.construct_mapping(self, node, deep=deep)
+
+ def construct_yaml_null(self, node):
+ self.construct_scalar(node)
+ return None
+
+ bool_values = {
+ u'yes': True,
+ u'no': False,
+ u'true': True,
+ u'false': False,
+ u'on': True,
+ u'off': False,
+ }
+
+ def construct_yaml_bool(self, node):
+ value = self.construct_scalar(node)
+ return self.bool_values[value.lower()]
+
+ def construct_yaml_int(self, node):
+ value = str(self.construct_scalar(node))
+ value = value.replace('_', '')
+ sign = +1
+ if value[0] == '-':
+ sign = -1
+ if value[0] in '+-':
+ value = value[1:]
+ if value == '0':
+ return 0
+ elif value.startswith('0b'):
+ return sign*int(value[2:], 2)
+ elif value.startswith('0x'):
+ return sign*int(value[2:], 16)
+ elif value[0] == '0':
+ return sign*int(value, 8)
+ elif ':' in value:
+ digits = [int(part) for part in value.split(':')]
+ digits.reverse()
+ base = 1
+ value = 0
+ for digit in digits:
+ value += digit*base
+ base *= 60
+ return sign*value
+ else:
+ return sign*int(value)
+
+ inf_value = 1e300
+ while inf_value != inf_value*inf_value:
+ inf_value *= inf_value
+ nan_value = -inf_value/inf_value # Trying to make a quiet NaN (like C99).
+
+ def construct_yaml_float(self, node):
+ value = str(self.construct_scalar(node))
+ value = value.replace('_', '').lower()
+ sign = +1
+ if value[0] == '-':
+ sign = -1
+ if value[0] in '+-':
+ value = value[1:]
+ if value == '.inf':
+ return sign*self.inf_value
+ elif value == '.nan':
+ return self.nan_value
+ elif ':' in value:
+ digits = [float(part) for part in value.split(':')]
+ digits.reverse()
+ base = 1
+ value = 0.0
+ for digit in digits:
+ value += digit*base
+ base *= 60
+ return sign*value
+ else:
+ return sign*float(value)
+
+ def construct_yaml_binary(self, node):
+ value = self.construct_scalar(node)
+ try:
+ return str(value).decode('base64')
+ except (binascii.Error, UnicodeEncodeError), exc:
+ raise ConstructorError(None, None,
+ "failed to decode base64 data: %s" % exc, node.start_mark)
+
+ timestamp_regexp = re.compile(
+ ur'''^(?P<year>[0-9][0-9][0-9][0-9])
+ -(?P<month>[0-9][0-9]?)
+ -(?P<day>[0-9][0-9]?)
+ (?:(?:[Tt]|[ \t]+)
+ (?P<hour>[0-9][0-9]?)
+ :(?P<minute>[0-9][0-9])
+ :(?P<second>[0-9][0-9])
+ (?:\.(?P<fraction>[0-9]*))?
+ (?:[ \t]*(?P<tz>Z|(?P<tz_sign>[-+])(?P<tz_hour>[0-9][0-9]?)
+ (?::(?P<tz_minute>[0-9][0-9]))?))?)?$''', re.X)
+
+ def construct_yaml_timestamp(self, node):
+ value = self.construct_scalar(node)
+ match = self.timestamp_regexp.match(node.value)
+ values = match.groupdict()
+ year = int(values['year'])
+ month = int(values['month'])
+ day = int(values['day'])
+ if not values['hour']:
+ return datetime.date(year, month, day)
+ hour = int(values['hour'])
+ minute = int(values['minute'])
+ second = int(values['second'])
+ fraction = 0
+ if values['fraction']:
+ fraction = values['fraction'][:6]
+ while len(fraction) < 6:
+ fraction += '0'
+ fraction = int(fraction)
+ delta = None
+ if values['tz_sign']:
+ tz_hour = int(values['tz_hour'])
+ tz_minute = int(values['tz_minute'] or 0)
+ delta = datetime.timedelta(hours=tz_hour, minutes=tz_minute)
+ if values['tz_sign'] == '-':
+ delta = -delta
+ data = datetime.datetime(year, month, day, hour, minute, second, fraction)
+ if delta:
+ data -= delta
+ return data
+
+ def construct_yaml_omap(self, node):
+ # Note: we do not check for duplicate keys, because it's too
+ # CPU-expensive.
+ omap = []
+ yield omap
+ if not isinstance(node, SequenceNode):
+ raise ConstructorError("while constructing an ordered map", node.start_mark,
+ "expected a sequence, but found %s" % node.id, node.start_mark)
+ for subnode in node.value:
+ if not isinstance(subnode, MappingNode):
+ raise ConstructorError("while constructing an ordered map", node.start_mark,
+ "expected a mapping of length 1, but found %s" % subnode.id,
+ subnode.start_mark)
+ if len(subnode.value) != 1:
+ raise ConstructorError("while constructing an ordered map", node.start_mark,
+ "expected a single mapping item, but found %d items" % len(subnode.value),
+ subnode.start_mark)
+ key_node, value_node = subnode.value[0]
+ key = self.construct_object(key_node)
+ value = self.construct_object(value_node)
+ omap.append((key, value))
+
+ def construct_yaml_pairs(self, node):
+ # Note: the same code as `construct_yaml_omap`.
+ pairs = []
+ yield pairs
+ if not isinstance(node, SequenceNode):
+ raise ConstructorError("while constructing pairs", node.start_mark,
+ "expected a sequence, but found %s" % node.id, node.start_mark)
+ for subnode in node.value:
+ if not isinstance(subnode, MappingNode):
+ raise ConstructorError("while constructing pairs", node.start_mark,
+ "expected a mapping of length 1, but found %s" % subnode.id,
+ subnode.start_mark)
+ if len(subnode.value) != 1:
+ raise ConstructorError("while constructing pairs", node.start_mark,
+ "expected a single mapping item, but found %d items" % len(subnode.value),
+ subnode.start_mark)
+ key_node, value_node = subnode.value[0]
+ key = self.construct_object(key_node)
+ value = self.construct_object(value_node)
+ pairs.append((key, value))
+
+ def construct_yaml_set(self, node):
+ data = set()
+ yield data
+ value = self.construct_mapping(node)
+ data.update(value)
+
+ def construct_yaml_str(self, node):
+ value = self.construct_scalar(node)
+ try:
+ return value.encode('ascii')
+ except UnicodeEncodeError:
+ return value
+
+ def construct_yaml_seq(self, node):
+ data = []
+ yield data
+ data.extend(self.construct_sequence(node))
+
+ def construct_yaml_map(self, node):
+ data = {}
+ yield data
+ value = self.construct_mapping(node)
+ data.update(value)
+
+ def construct_yaml_object(self, node, cls):
+ data = cls.__new__(cls)
+ yield data
+ if hasattr(data, '__setstate__'):
+ state = self.construct_mapping(node, deep=True)
+ data.__setstate__(state)
+ else:
+ state = self.construct_mapping(node)
+ data.__dict__.update(state)
+
+ def construct_undefined(self, node):
+ raise ConstructorError(None, None,
+ "could not determine a constructor for the tag %r" % node.tag.encode('utf-8'),
+ node.start_mark)
+
+SafeConstructor.add_constructor(
+ u'tag:yaml.org,2002:null',
+ SafeConstructor.construct_yaml_null)
+
+SafeConstructor.add_constructor(
+ u'tag:yaml.org,2002:bool',
+ SafeConstructor.construct_yaml_bool)
+
+SafeConstructor.add_constructor(
+ u'tag:yaml.org,2002:int',
+ SafeConstructor.construct_yaml_int)
+
+SafeConstructor.add_constructor(
+ u'tag:yaml.org,2002:float',
+ SafeConstructor.construct_yaml_float)
+
+SafeConstructor.add_constructor(
+ u'tag:yaml.org,2002:binary',
+ SafeConstructor.construct_yaml_binary)
+
+SafeConstructor.add_constructor(
+ u'tag:yaml.org,2002:timestamp',
+ SafeConstructor.construct_yaml_timestamp)
+
+SafeConstructor.add_constructor(
+ u'tag:yaml.org,2002:omap',
+ SafeConstructor.construct_yaml_omap)
+
+SafeConstructor.add_constructor(
+ u'tag:yaml.org,2002:pairs',
+ SafeConstructor.construct_yaml_pairs)
+
+SafeConstructor.add_constructor(
+ u'tag:yaml.org,2002:set',
+ SafeConstructor.construct_yaml_set)
+
+SafeConstructor.add_constructor(
+ u'tag:yaml.org,2002:str',
+ SafeConstructor.construct_yaml_str)
+
+SafeConstructor.add_constructor(
+ u'tag:yaml.org,2002:seq',
+ SafeConstructor.construct_yaml_seq)
+
+SafeConstructor.add_constructor(
+ u'tag:yaml.org,2002:map',
+ SafeConstructor.construct_yaml_map)
+
+SafeConstructor.add_constructor(None,
+ SafeConstructor.construct_undefined)
+
+class Constructor(SafeConstructor):
+
+ def construct_python_str(self, node):
+ return self.construct_scalar(node).encode('utf-8')
+
+ def construct_python_unicode(self, node):
+ return self.construct_scalar(node)
+
+ def construct_python_long(self, node):
+ return long(self.construct_yaml_int(node))
+
+ def construct_python_complex(self, node):
+ return complex(self.construct_scalar(node))
+
+ def construct_python_tuple(self, node):
+ return tuple(self.construct_sequence(node))
+
+ def find_python_module(self, name, mark):
+ if not name:
+ raise ConstructorError("while constructing a Python module", mark,
+ "expected non-empty name appended to the tag", mark)
+ try:
+ __import__(name)
+ except ImportError, exc:
+ raise ConstructorError("while constructing a Python module", mark,
+ "cannot find module %r (%s)" % (name.encode('utf-8'), exc), mark)
+ return sys.modules[name]
+
+ def find_python_name(self, name, mark):
+ if not name:
+ raise ConstructorError("while constructing a Python object", mark,
+ "expected non-empty name appended to the tag", mark)
+ if u'.' in name:
+ module_name, object_name = name.rsplit('.', 1)
+ else:
+ module_name = '__builtin__'
+ object_name = name
+ try:
+ __import__(module_name)
+ except ImportError, exc:
+ raise ConstructorError("while constructing a Python object", mark,
+ "cannot find module %r (%s)" % (module_name.encode('utf-8'), exc), mark)
+ module = sys.modules[module_name]
+ if not hasattr(module, object_name):
+ raise ConstructorError("while constructing a Python object", mark,
+ "cannot find %r in the module %r" % (object_name.encode('utf-8'),
+ module.__name__), mark)
+ return getattr(module, object_name)
+
+ def construct_python_name(self, suffix, node):
+ value = self.construct_scalar(node)
+ if value:
+ raise ConstructorError("while constructing a Python name", node.start_mark,
+ "expected the empty value, but found %r" % value.encode('utf-8'),
+ node.start_mark)
+ return self.find_python_name(suffix, node.start_mark)
+
+ def construct_python_module(self, suffix, node):
+ value = self.construct_scalar(node)
+ if value:
+ raise ConstructorError("while constructing a Python module", node.start_mark,
+ "expected the empty value, but found %r" % value.encode('utf-8'),
+ node.start_mark)
+ return self.find_python_module(suffix, node.start_mark)
+
+ class classobj: pass
+
+ def make_python_instance(self, suffix, node,
+ args=None, kwds=None, newobj=False):
+ if not args:
+ args = []
+ if not kwds:
+ kwds = {}
+ cls = self.find_python_name(suffix, node.start_mark)
+ if newobj and isinstance(cls, type(self.classobj)) \
+ and not args and not kwds:
+ instance = self.classobj()
+ instance.__class__ = cls
+ return instance
+ elif newobj and isinstance(cls, type):
+ return cls.__new__(cls, *args, **kwds)
+ else:
+ return cls(*args, **kwds)
+
+ def set_python_instance_state(self, instance, state):
+ if hasattr(instance, '__setstate__'):
+ instance.__setstate__(state)
+ else:
+ slotstate = {}
+ if isinstance(state, tuple) and len(state) == 2:
+ state, slotstate = state
+ if hasattr(instance, '__dict__'):
+ instance.__dict__.update(state)
+ elif state:
+ slotstate.update(state)
+ for key, value in slotstate.items():
+ setattr(object, key, value)
+
+ def construct_python_object(self, suffix, node):
+ # Format:
+ # !!python/object:module.name { ... state ... }
+ instance = self.make_python_instance(suffix, node, newobj=True)
+ yield instance
+ deep = hasattr(instance, '__setstate__')
+ state = self.construct_mapping(node, deep=deep)
+ self.set_python_instance_state(instance, state)
+
+ def construct_python_object_apply(self, suffix, node, newobj=False):
+ # Format:
+ # !!python/object/apply # (or !!python/object/new)
+ # args: [ ... arguments ... ]
+ # kwds: { ... keywords ... }
+ # state: ... state ...
+ # listitems: [ ... listitems ... ]
+ # dictitems: { ... dictitems ... }
+ # or short format:
+ # !!python/object/apply [ ... arguments ... ]
+ # The difference between !!python/object/apply and !!python/object/new
+ # is how an object is created, check make_python_instance for details.
+ if isinstance(node, SequenceNode):
+ args = self.construct_sequence(node, deep=True)
+ kwds = {}
+ state = {}
+ listitems = []
+ dictitems = {}
+ else:
+ value = self.construct_mapping(node, deep=True)
+ args = value.get('args', [])
+ kwds = value.get('kwds', {})
+ state = value.get('state', {})
+ listitems = value.get('listitems', [])
+ dictitems = value.get('dictitems', {})
+ instance = self.make_python_instance(suffix, node, args, kwds, newobj)
+ if state:
+ self.set_python_instance_state(instance, state)
+ if listitems:
+ instance.extend(listitems)
+ if dictitems:
+ for key in dictitems:
+ instance[key] = dictitems[key]
+ return instance
+
+ def construct_python_object_new(self, suffix, node):
+ return self.construct_python_object_apply(suffix, node, newobj=True)
+
+Constructor.add_constructor(
+ u'tag:yaml.org,2002:python/none',
+ Constructor.construct_yaml_null)
+
+Constructor.add_constructor(
+ u'tag:yaml.org,2002:python/bool',
+ Constructor.construct_yaml_bool)
+
+Constructor.add_constructor(
+ u'tag:yaml.org,2002:python/str',
+ Constructor.construct_python_str)
+
+Constructor.add_constructor(
+ u'tag:yaml.org,2002:python/unicode',
+ Constructor.construct_python_unicode)
+
+Constructor.add_constructor(
+ u'tag:yaml.org,2002:python/int',
+ Constructor.construct_yaml_int)
+
+Constructor.add_constructor(
+ u'tag:yaml.org,2002:python/long',
+ Constructor.construct_python_long)
+
+Constructor.add_constructor(
+ u'tag:yaml.org,2002:python/float',
+ Constructor.construct_yaml_float)
+
+Constructor.add_constructor(
+ u'tag:yaml.org,2002:python/complex',
+ Constructor.construct_python_complex)
+
+Constructor.add_constructor(
+ u'tag:yaml.org,2002:python/list',
+ Constructor.construct_yaml_seq)
+
+Constructor.add_constructor(
+ u'tag:yaml.org,2002:python/tuple',
+ Constructor.construct_python_tuple)
+
+Constructor.add_constructor(
+ u'tag:yaml.org,2002:python/dict',
+ Constructor.construct_yaml_map)
+
+Constructor.add_multi_constructor(
+ u'tag:yaml.org,2002:python/name:',
+ Constructor.construct_python_name)
+
+Constructor.add_multi_constructor(
+ u'tag:yaml.org,2002:python/module:',
+ Constructor.construct_python_module)
+
+Constructor.add_multi_constructor(
+ u'tag:yaml.org,2002:python/object:',
+ Constructor.construct_python_object)
+
+Constructor.add_multi_constructor(
+ u'tag:yaml.org,2002:python/object/apply:',
+ Constructor.construct_python_object_apply)
+
+Constructor.add_multi_constructor(
+ u'tag:yaml.org,2002:python/object/new:',
+ Constructor.construct_python_object_new)
+
diff --git a/scripts/external_libs/pyyaml-3.11/python2/yaml/cyaml.py b/scripts/external_libs/pyyaml-3.11/python2/yaml/cyaml.py
new file mode 100644
index 00000000..68dcd751
--- /dev/null
+++ b/scripts/external_libs/pyyaml-3.11/python2/yaml/cyaml.py
@@ -0,0 +1,85 @@
+
+__all__ = ['CBaseLoader', 'CSafeLoader', 'CLoader',
+ 'CBaseDumper', 'CSafeDumper', 'CDumper']
+
+from _yaml import CParser, CEmitter
+
+from constructor import *
+
+from serializer import *
+from representer import *
+
+from resolver import *
+
+class CBaseLoader(CParser, BaseConstructor, BaseResolver):
+
+ def __init__(self, stream):
+ CParser.__init__(self, stream)
+ BaseConstructor.__init__(self)
+ BaseResolver.__init__(self)
+
+class CSafeLoader(CParser, SafeConstructor, Resolver):
+
+ def __init__(self, stream):
+ CParser.__init__(self, stream)
+ SafeConstructor.__init__(self)
+ Resolver.__init__(self)
+
+class CLoader(CParser, Constructor, Resolver):
+
+ def __init__(self, stream):
+ CParser.__init__(self, stream)
+ Constructor.__init__(self)
+ Resolver.__init__(self)
+
+class CBaseDumper(CEmitter, BaseRepresenter, BaseResolver):
+
+ def __init__(self, stream,
+ default_style=None, default_flow_style=None,
+ canonical=None, indent=None, width=None,
+ allow_unicode=None, line_break=None,
+ encoding=None, explicit_start=None, explicit_end=None,
+ version=None, tags=None):
+ CEmitter.__init__(self, stream, canonical=canonical,
+ indent=indent, width=width, encoding=encoding,
+ allow_unicode=allow_unicode, line_break=line_break,
+ explicit_start=explicit_start, explicit_end=explicit_end,
+ version=version, tags=tags)
+ Representer.__init__(self, default_style=default_style,
+ default_flow_style=default_flow_style)
+ Resolver.__init__(self)
+
+class CSafeDumper(CEmitter, SafeRepresenter, Resolver):
+
+ def __init__(self, stream,
+ default_style=None, default_flow_style=None,
+ canonical=None, indent=None, width=None,
+ allow_unicode=None, line_break=None,
+ encoding=None, explicit_start=None, explicit_end=None,
+ version=None, tags=None):
+ CEmitter.__init__(self, stream, canonical=canonical,
+ indent=indent, width=width, encoding=encoding,
+ allow_unicode=allow_unicode, line_break=line_break,
+ explicit_start=explicit_start, explicit_end=explicit_end,
+ version=version, tags=tags)
+ SafeRepresenter.__init__(self, default_style=default_style,
+ default_flow_style=default_flow_style)
+ Resolver.__init__(self)
+
+class CDumper(CEmitter, Serializer, Representer, Resolver):
+
+ def __init__(self, stream,
+ default_style=None, default_flow_style=None,
+ canonical=None, indent=None, width=None,
+ allow_unicode=None, line_break=None,
+ encoding=None, explicit_start=None, explicit_end=None,
+ version=None, tags=None):
+ CEmitter.__init__(self, stream, canonical=canonical,
+ indent=indent, width=width, encoding=encoding,
+ allow_unicode=allow_unicode, line_break=line_break,
+ explicit_start=explicit_start, explicit_end=explicit_end,
+ version=version, tags=tags)
+ Representer.__init__(self, default_style=default_style,
+ default_flow_style=default_flow_style)
+ Resolver.__init__(self)
+
diff --git a/scripts/external_libs/pyyaml-3.11/python2/yaml/dumper.py b/scripts/external_libs/pyyaml-3.11/python2/yaml/dumper.py
new file mode 100644
index 00000000..f811d2c9
--- /dev/null
+++ b/scripts/external_libs/pyyaml-3.11/python2/yaml/dumper.py
@@ -0,0 +1,62 @@
+
+__all__ = ['BaseDumper', 'SafeDumper', 'Dumper']
+
+from emitter import *
+from serializer import *
+from representer import *
+from resolver import *
+
+class BaseDumper(Emitter, Serializer, BaseRepresenter, BaseResolver):
+
+ def __init__(self, stream,
+ default_style=None, default_flow_style=None,
+ canonical=None, indent=None, width=None,
+ allow_unicode=None, line_break=None,
+ encoding=None, explicit_start=None, explicit_end=None,
+ version=None, tags=None):
+ Emitter.__init__(self, stream, canonical=canonical,
+ indent=indent, width=width,
+ allow_unicode=allow_unicode, line_break=line_break)
+ Serializer.__init__(self, encoding=encoding,
+ explicit_start=explicit_start, explicit_end=explicit_end,
+ version=version, tags=tags)
+ Representer.__init__(self, default_style=default_style,
+ default_flow_style=default_flow_style)
+ Resolver.__init__(self)
+
+class SafeDumper(Emitter, Serializer, SafeRepresenter, Resolver):
+
+ def __init__(self, stream,
+ default_style=None, default_flow_style=None,
+ canonical=None, indent=None, width=None,
+ allow_unicode=None, line_break=None,
+ encoding=None, explicit_start=None, explicit_end=None,
+ version=None, tags=None):
+ Emitter.__init__(self, stream, canonical=canonical,
+ indent=indent, width=width,
+ allow_unicode=allow_unicode, line_break=line_break)
+ Serializer.__init__(self, encoding=encoding,
+ explicit_start=explicit_start, explicit_end=explicit_end,
+ version=version, tags=tags)
+ SafeRepresenter.__init__(self, default_style=default_style,
+ default_flow_style=default_flow_style)
+ Resolver.__init__(self)
+
+class Dumper(Emitter, Serializer, Representer, Resolver):
+
+ def __init__(self, stream,
+ default_style=None, default_flow_style=None,
+ canonical=None, indent=None, width=None,
+ allow_unicode=None, line_break=None,
+ encoding=None, explicit_start=None, explicit_end=None,
+ version=None, tags=None):
+ Emitter.__init__(self, stream, canonical=canonical,
+ indent=indent, width=width,
+ allow_unicode=allow_unicode, line_break=line_break)
+ Serializer.__init__(self, encoding=encoding,
+ explicit_start=explicit_start, explicit_end=explicit_end,
+ version=version, tags=tags)
+ Representer.__init__(self, default_style=default_style,
+ default_flow_style=default_flow_style)
+ Resolver.__init__(self)
+
diff --git a/scripts/external_libs/pyyaml-3.11/python2/yaml/emitter.py b/scripts/external_libs/pyyaml-3.11/python2/yaml/emitter.py
new file mode 100644
index 00000000..e5bcdccc
--- /dev/null
+++ b/scripts/external_libs/pyyaml-3.11/python2/yaml/emitter.py
@@ -0,0 +1,1140 @@
+
+# Emitter expects events obeying the following grammar:
+# stream ::= STREAM-START document* STREAM-END
+# document ::= DOCUMENT-START node DOCUMENT-END
+# node ::= SCALAR | sequence | mapping
+# sequence ::= SEQUENCE-START node* SEQUENCE-END
+# mapping ::= MAPPING-START (node node)* MAPPING-END
+
+__all__ = ['Emitter', 'EmitterError']
+
+from error import YAMLError
+from events import *
+
+class EmitterError(YAMLError):
+ pass
+
+class ScalarAnalysis(object):
+ def __init__(self, scalar, empty, multiline,
+ allow_flow_plain, allow_block_plain,
+ allow_single_quoted, allow_double_quoted,
+ allow_block):
+ self.scalar = scalar
+ self.empty = empty
+ self.multiline = multiline
+ self.allow_flow_plain = allow_flow_plain
+ self.allow_block_plain = allow_block_plain
+ self.allow_single_quoted = allow_single_quoted
+ self.allow_double_quoted = allow_double_quoted
+ self.allow_block = allow_block
+
+class Emitter(object):
+
+ DEFAULT_TAG_PREFIXES = {
+ u'!' : u'!',
+ u'tag:yaml.org,2002:' : u'!!',
+ }
+
+ def __init__(self, stream, canonical=None, indent=None, width=None,
+ allow_unicode=None, line_break=None):
+
+ # The stream should have the methods `write` and possibly `flush`.
+ self.stream = stream
+
+ # Encoding can be overriden by STREAM-START.
+ self.encoding = None
+
+ # Emitter is a state machine with a stack of states to handle nested
+ # structures.
+ self.states = []
+ self.state = self.expect_stream_start
+
+ # Current event and the event queue.
+ self.events = []
+ self.event = None
+
+ # The current indentation level and the stack of previous indents.
+ self.indents = []
+ self.indent = None
+
+ # Flow level.
+ self.flow_level = 0
+
+ # Contexts.
+ self.root_context = False
+ self.sequence_context = False
+ self.mapping_context = False
+ self.simple_key_context = False
+
+ # Characteristics of the last emitted character:
+ # - current position.
+ # - is it a whitespace?
+ # - is it an indention character
+ # (indentation space, '-', '?', or ':')?
+ self.line = 0
+ self.column = 0
+ self.whitespace = True
+ self.indention = True
+
+ # Whether the document requires an explicit document indicator
+ self.open_ended = False
+
+ # Formatting details.
+ self.canonical = canonical
+ self.allow_unicode = allow_unicode
+ self.best_indent = 2
+ if indent and 1 < indent < 10:
+ self.best_indent = indent
+ self.best_width = 80
+ if width and width > self.best_indent*2:
+ self.best_width = width
+ self.best_line_break = u'\n'
+ if line_break in [u'\r', u'\n', u'\r\n']:
+ self.best_line_break = line_break
+
+ # Tag prefixes.
+ self.tag_prefixes = None
+
+ # Prepared anchor and tag.
+ self.prepared_anchor = None
+ self.prepared_tag = None
+
+ # Scalar analysis and style.
+ self.analysis = None
+ self.style = None
+
+ def dispose(self):
+ # Reset the state attributes (to clear self-references)
+ self.states = []
+ self.state = None
+
+ def emit(self, event):
+ self.events.append(event)
+ while not self.need_more_events():
+ self.event = self.events.pop(0)
+ self.state()
+ self.event = None
+
+ # In some cases, we wait for a few next events before emitting.
+
+ def need_more_events(self):
+ if not self.events:
+ return True
+ event = self.events[0]
+ if isinstance(event, DocumentStartEvent):
+ return self.need_events(1)
+ elif isinstance(event, SequenceStartEvent):
+ return self.need_events(2)
+ elif isinstance(event, MappingStartEvent):
+ return self.need_events(3)
+ else:
+ return False
+
+ def need_events(self, count):
+ level = 0
+ for event in self.events[1:]:
+ if isinstance(event, (DocumentStartEvent, CollectionStartEvent)):
+ level += 1
+ elif isinstance(event, (DocumentEndEvent, CollectionEndEvent)):
+ level -= 1
+ elif isinstance(event, StreamEndEvent):
+ level = -1
+ if level < 0:
+ return False
+ return (len(self.events) < count+1)
+
+ def increase_indent(self, flow=False, indentless=False):
+ self.indents.append(self.indent)
+ if self.indent is None:
+ if flow:
+ self.indent = self.best_indent
+ else:
+ self.indent = 0
+ elif not indentless:
+ self.indent += self.best_indent
+
+ # States.
+
+ # Stream handlers.
+
+ def expect_stream_start(self):
+ if isinstance(self.event, StreamStartEvent):
+ if self.event.encoding and not getattr(self.stream, 'encoding', None):
+ self.encoding = self.event.encoding
+ self.write_stream_start()
+ self.state = self.expect_first_document_start
+ else:
+ raise EmitterError("expected StreamStartEvent, but got %s"
+ % self.event)
+
+ def expect_nothing(self):
+ raise EmitterError("expected nothing, but got %s" % self.event)
+
+ # Document handlers.
+
+ def expect_first_document_start(self):
+ return self.expect_document_start(first=True)
+
+ def expect_document_start(self, first=False):
+ if isinstance(self.event, DocumentStartEvent):
+ if (self.event.version or self.event.tags) and self.open_ended:
+ self.write_indicator(u'...', True)
+ self.write_indent()
+ if self.event.version:
+ version_text = self.prepare_version(self.event.version)
+ self.write_version_directive(version_text)
+ self.tag_prefixes = self.DEFAULT_TAG_PREFIXES.copy()
+ if self.event.tags:
+ handles = self.event.tags.keys()
+ handles.sort()
+ for handle in handles:
+ prefix = self.event.tags[handle]
+ self.tag_prefixes[prefix] = handle
+ handle_text = self.prepare_tag_handle(handle)
+ prefix_text = self.prepare_tag_prefix(prefix)
+ self.write_tag_directive(handle_text, prefix_text)
+ implicit = (first and not self.event.explicit and not self.canonical
+ and not self.event.version and not self.event.tags
+ and not self.check_empty_document())
+ if not implicit:
+ self.write_indent()
+ self.write_indicator(u'---', True)
+ if self.canonical:
+ self.write_indent()
+ self.state = self.expect_document_root
+ elif isinstance(self.event, StreamEndEvent):
+ if self.open_ended:
+ self.write_indicator(u'...', True)
+ self.write_indent()
+ self.write_stream_end()
+ self.state = self.expect_nothing
+ else:
+ raise EmitterError("expected DocumentStartEvent, but got %s"
+ % self.event)
+
+ def expect_document_end(self):
+ if isinstance(self.event, DocumentEndEvent):
+ self.write_indent()
+ if self.event.explicit:
+ self.write_indicator(u'...', True)
+ self.write_indent()
+ self.flush_stream()
+ self.state = self.expect_document_start
+ else:
+ raise EmitterError("expected DocumentEndEvent, but got %s"
+ % self.event)
+
+ def expect_document_root(self):
+ self.states.append(self.expect_document_end)
+ self.expect_node(root=True)
+
+ # Node handlers.
+
+ def expect_node(self, root=False, sequence=False, mapping=False,
+ simple_key=False):
+ self.root_context = root
+ self.sequence_context = sequence
+ self.mapping_context = mapping
+ self.simple_key_context = simple_key
+ if isinstance(self.event, AliasEvent):
+ self.expect_alias()
+ elif isinstance(self.event, (ScalarEvent, CollectionStartEvent)):
+ self.process_anchor(u'&')
+ self.process_tag()
+ if isinstance(self.event, ScalarEvent):
+ self.expect_scalar()
+ elif isinstance(self.event, SequenceStartEvent):
+ if self.flow_level or self.canonical or self.event.flow_style \
+ or self.check_empty_sequence():
+ self.expect_flow_sequence()
+ else:
+ self.expect_block_sequence()
+ elif isinstance(self.event, MappingStartEvent):
+ if self.flow_level or self.canonical or self.event.flow_style \
+ or self.check_empty_mapping():
+ self.expect_flow_mapping()
+ else:
+ self.expect_block_mapping()
+ else:
+ raise EmitterError("expected NodeEvent, but got %s" % self.event)
+
+ def expect_alias(self):
+ if self.event.anchor is None:
+ raise EmitterError("anchor is not specified for alias")
+ self.process_anchor(u'*')
+ self.state = self.states.pop()
+
+ def expect_scalar(self):
+ self.increase_indent(flow=True)
+ self.process_scalar()
+ self.indent = self.indents.pop()
+ self.state = self.states.pop()
+
+ # Flow sequence handlers.
+
+ def expect_flow_sequence(self):
+ self.write_indicator(u'[', True, whitespace=True)
+ self.flow_level += 1
+ self.increase_indent(flow=True)
+ self.state = self.expect_first_flow_sequence_item
+
+ def expect_first_flow_sequence_item(self):
+ if isinstance(self.event, SequenceEndEvent):
+ self.indent = self.indents.pop()
+ self.flow_level -= 1
+ self.write_indicator(u']', False)
+ self.state = self.states.pop()
+ else:
+ if self.canonical or self.column > self.best_width:
+ self.write_indent()
+ self.states.append(self.expect_flow_sequence_item)
+ self.expect_node(sequence=True)
+
+ def expect_flow_sequence_item(self):
+ if isinstance(self.event, SequenceEndEvent):
+ self.indent = self.indents.pop()
+ self.flow_level -= 1
+ if self.canonical:
+ self.write_indicator(u',', False)
+ self.write_indent()
+ self.write_indicator(u']', False)
+ self.state = self.states.pop()
+ else:
+ self.write_indicator(u',', False)
+ if self.canonical or self.column > self.best_width:
+ self.write_indent()
+ self.states.append(self.expect_flow_sequence_item)
+ self.expect_node(sequence=True)
+
+ # Flow mapping handlers.
+
+ def expect_flow_mapping(self):
+ self.write_indicator(u'{', True, whitespace=True)
+ self.flow_level += 1
+ self.increase_indent(flow=True)
+ self.state = self.expect_first_flow_mapping_key
+
+ def expect_first_flow_mapping_key(self):
+ if isinstance(self.event, MappingEndEvent):
+ self.indent = self.indents.pop()
+ self.flow_level -= 1
+ self.write_indicator(u'}', False)
+ self.state = self.states.pop()
+ else:
+ if self.canonical or self.column > self.best_width:
+ self.write_indent()
+ if not self.canonical and self.check_simple_key():
+ self.states.append(self.expect_flow_mapping_simple_value)
+ self.expect_node(mapping=True, simple_key=True)
+ else:
+ self.write_indicator(u'?', True)
+ self.states.append(self.expect_flow_mapping_value)
+ self.expect_node(mapping=True)
+
+ def expect_flow_mapping_key(self):
+ if isinstance(self.event, MappingEndEvent):
+ self.indent = self.indents.pop()
+ self.flow_level -= 1
+ if self.canonical:
+ self.write_indicator(u',', False)
+ self.write_indent()
+ self.write_indicator(u'}', False)
+ self.state = self.states.pop()
+ else:
+ self.write_indicator(u',', False)
+ if self.canonical or self.column > self.best_width:
+ self.write_indent()
+ if not self.canonical and self.check_simple_key():
+ self.states.append(self.expect_flow_mapping_simple_value)
+ self.expect_node(mapping=True, simple_key=True)
+ else:
+ self.write_indicator(u'?', True)
+ self.states.append(self.expect_flow_mapping_value)
+ self.expect_node(mapping=True)
+
+ def expect_flow_mapping_simple_value(self):
+ self.write_indicator(u':', False)
+ self.states.append(self.expect_flow_mapping_key)
+ self.expect_node(mapping=True)
+
+ def expect_flow_mapping_value(self):
+ if self.canonical or self.column > self.best_width:
+ self.write_indent()
+ self.write_indicator(u':', True)
+ self.states.append(self.expect_flow_mapping_key)
+ self.expect_node(mapping=True)
+
+ # Block sequence handlers.
+
+ def expect_block_sequence(self):
+ indentless = (self.mapping_context and not self.indention)
+ self.increase_indent(flow=False, indentless=indentless)
+ self.state = self.expect_first_block_sequence_item
+
+ def expect_first_block_sequence_item(self):
+ return self.expect_block_sequence_item(first=True)
+
+ def expect_block_sequence_item(self, first=False):
+ if not first and isinstance(self.event, SequenceEndEvent):
+ self.indent = self.indents.pop()
+ self.state = self.states.pop()
+ else:
+ self.write_indent()
+ self.write_indicator(u'-', True, indention=True)
+ self.states.append(self.expect_block_sequence_item)
+ self.expect_node(sequence=True)
+
+ # Block mapping handlers.
+
+ def expect_block_mapping(self):
+ self.increase_indent(flow=False)
+ self.state = self.expect_first_block_mapping_key
+
+ def expect_first_block_mapping_key(self):
+ return self.expect_block_mapping_key(first=True)
+
+ def expect_block_mapping_key(self, first=False):
+ if not first and isinstance(self.event, MappingEndEvent):
+ self.indent = self.indents.pop()
+ self.state = self.states.pop()
+ else:
+ self.write_indent()
+ if self.check_simple_key():
+ self.states.append(self.expect_block_mapping_simple_value)
+ self.expect_node(mapping=True, simple_key=True)
+ else:
+ self.write_indicator(u'?', True, indention=True)
+ self.states.append(self.expect_block_mapping_value)
+ self.expect_node(mapping=True)
+
+ def expect_block_mapping_simple_value(self):
+ self.write_indicator(u':', False)
+ self.states.append(self.expect_block_mapping_key)
+ self.expect_node(mapping=True)
+
+ def expect_block_mapping_value(self):
+ self.write_indent()
+ self.write_indicator(u':', True, indention=True)
+ self.states.append(self.expect_block_mapping_key)
+ self.expect_node(mapping=True)
+
+ # Checkers.
+
+ def check_empty_sequence(self):
+ return (isinstance(self.event, SequenceStartEvent) and self.events
+ and isinstance(self.events[0], SequenceEndEvent))
+
+ def check_empty_mapping(self):
+ return (isinstance(self.event, MappingStartEvent) and self.events
+ and isinstance(self.events[0], MappingEndEvent))
+
+ def check_empty_document(self):
+ if not isinstance(self.event, DocumentStartEvent) or not self.events:
+ return False
+ event = self.events[0]
+ return (isinstance(event, ScalarEvent) and event.anchor is None
+ and event.tag is None and event.implicit and event.value == u'')
+
+ def check_simple_key(self):
+ length = 0
+ if isinstance(self.event, NodeEvent) and self.event.anchor is not None:
+ if self.prepared_anchor is None:
+ self.prepared_anchor = self.prepare_anchor(self.event.anchor)
+ length += len(self.prepared_anchor)
+ if isinstance(self.event, (ScalarEvent, CollectionStartEvent)) \
+ and self.event.tag is not None:
+ if self.prepared_tag is None:
+ self.prepared_tag = self.prepare_tag(self.event.tag)
+ length += len(self.prepared_tag)
+ if isinstance(self.event, ScalarEvent):
+ if self.analysis is None:
+ self.analysis = self.analyze_scalar(self.event.value)
+ length += len(self.analysis.scalar)
+ return (length < 128 and (isinstance(self.event, AliasEvent)
+ or (isinstance(self.event, ScalarEvent)
+ and not self.analysis.empty and not self.analysis.multiline)
+ or self.check_empty_sequence() or self.check_empty_mapping()))
+
+ # Anchor, Tag, and Scalar processors.
+
+ def process_anchor(self, indicator):
+ if self.event.anchor is None:
+ self.prepared_anchor = None
+ return
+ if self.prepared_anchor is None:
+ self.prepared_anchor = self.prepare_anchor(self.event.anchor)
+ if self.prepared_anchor:
+ self.write_indicator(indicator+self.prepared_anchor, True)
+ self.prepared_anchor = None
+
+ def process_tag(self):
+ tag = self.event.tag
+ if isinstance(self.event, ScalarEvent):
+ if self.style is None:
+ self.style = self.choose_scalar_style()
+ if ((not self.canonical or tag is None) and
+ ((self.style == '' and self.event.implicit[0])
+ or (self.style != '' and self.event.implicit[1]))):
+ self.prepared_tag = None
+ return
+ if self.event.implicit[0] and tag is None:
+ tag = u'!'
+ self.prepared_tag = None
+ else:
+ if (not self.canonical or tag is None) and self.event.implicit:
+ self.prepared_tag = None
+ return
+ if tag is None:
+ raise EmitterError("tag is not specified")
+ if self.prepared_tag is None:
+ self.prepared_tag = self.prepare_tag(tag)
+ if self.prepared_tag:
+ self.write_indicator(self.prepared_tag, True)
+ self.prepared_tag = None
+
+ def choose_scalar_style(self):
+ if self.analysis is None:
+ self.analysis = self.analyze_scalar(self.event.value)
+ if self.event.style == '"' or self.canonical:
+ return '"'
+ if not self.event.style and self.event.implicit[0]:
+ if (not (self.simple_key_context and
+ (self.analysis.empty or self.analysis.multiline))
+ and (self.flow_level and self.analysis.allow_flow_plain
+ or (not self.flow_level and self.analysis.allow_block_plain))):
+ return ''
+ if self.event.style and self.event.style in '|>':
+ if (not self.flow_level and not self.simple_key_context
+ and self.analysis.allow_block):
+ return self.event.style
+ if not self.event.style or self.event.style == '\'':
+ if (self.analysis.allow_single_quoted and
+ not (self.simple_key_context and self.analysis.multiline)):
+ return '\''
+ return '"'
+
+ def process_scalar(self):
+ if self.analysis is None:
+ self.analysis = self.analyze_scalar(self.event.value)
+ if self.style is None:
+ self.style = self.choose_scalar_style()
+ split = (not self.simple_key_context)
+ #if self.analysis.multiline and split \
+ # and (not self.style or self.style in '\'\"'):
+ # self.write_indent()
+ if self.style == '"':
+ self.write_double_quoted(self.analysis.scalar, split)
+ elif self.style == '\'':
+ self.write_single_quoted(self.analysis.scalar, split)
+ elif self.style == '>':
+ self.write_folded(self.analysis.scalar)
+ elif self.style == '|':
+ self.write_literal(self.analysis.scalar)
+ else:
+ self.write_plain(self.analysis.scalar, split)
+ self.analysis = None
+ self.style = None
+
+ # Analyzers.
+
+ def prepare_version(self, version):
+ major, minor = version
+ if major != 1:
+ raise EmitterError("unsupported YAML version: %d.%d" % (major, minor))
+ return u'%d.%d' % (major, minor)
+
+ def prepare_tag_handle(self, handle):
+ if not handle:
+ raise EmitterError("tag handle must not be empty")
+ if handle[0] != u'!' or handle[-1] != u'!':
+ raise EmitterError("tag handle must start and end with '!': %r"
+ % (handle.encode('utf-8')))
+ for ch in handle[1:-1]:
+ if not (u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or u'a' <= ch <= u'z' \
+ or ch in u'-_'):
+ raise EmitterError("invalid character %r in the tag handle: %r"
+ % (ch.encode('utf-8'), handle.encode('utf-8')))
+ return handle
+
+ def prepare_tag_prefix(self, prefix):
+ if not prefix:
+ raise EmitterError("tag prefix must not be empty")
+ chunks = []
+ start = end = 0
+ if prefix[0] == u'!':
+ end = 1
+ while end < len(prefix):
+ ch = prefix[end]
+ if u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or u'a' <= ch <= u'z' \
+ or ch in u'-;/?!:@&=+$,_.~*\'()[]':
+ end += 1
+ else:
+ if start < end:
+ chunks.append(prefix[start:end])
+ start = end = end+1
+ data = ch.encode('utf-8')
+ for ch in data:
+ chunks.append(u'%%%02X' % ord(ch))
+ if start < end:
+ chunks.append(prefix[start:end])
+ return u''.join(chunks)
+
+ def prepare_tag(self, tag):
+ if not tag:
+ raise EmitterError("tag must not be empty")
+ if tag == u'!':
+ return tag
+ handle = None
+ suffix = tag
+ prefixes = self.tag_prefixes.keys()
+ prefixes.sort()
+ for prefix in prefixes:
+ if tag.startswith(prefix) \
+ and (prefix == u'!' or len(prefix) < len(tag)):
+ handle = self.tag_prefixes[prefix]
+ suffix = tag[len(prefix):]
+ chunks = []
+ start = end = 0
+ while end < len(suffix):
+ ch = suffix[end]
+ if u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or u'a' <= ch <= u'z' \
+ or ch in u'-;/?:@&=+$,_.~*\'()[]' \
+ or (ch == u'!' and handle != u'!'):
+ end += 1
+ else:
+ if start < end:
+ chunks.append(suffix[start:end])
+ start = end = end+1
+ data = ch.encode('utf-8')
+ for ch in data:
+ chunks.append(u'%%%02X' % ord(ch))
+ if start < end:
+ chunks.append(suffix[start:end])
+ suffix_text = u''.join(chunks)
+ if handle:
+ return u'%s%s' % (handle, suffix_text)
+ else:
+ return u'!<%s>' % suffix_text
+
+ def prepare_anchor(self, anchor):
+ if not anchor:
+ raise EmitterError("anchor must not be empty")
+ for ch in anchor:
+ if not (u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or u'a' <= ch <= u'z' \
+ or ch in u'-_'):
+ raise EmitterError("invalid character %r in the anchor: %r"
+ % (ch.encode('utf-8'), anchor.encode('utf-8')))
+ return anchor
+
+ def analyze_scalar(self, scalar):
+
+ # Empty scalar is a special case.
+ if not scalar:
+ return ScalarAnalysis(scalar=scalar, empty=True, multiline=False,
+ allow_flow_plain=False, allow_block_plain=True,
+ allow_single_quoted=True, allow_double_quoted=True,
+ allow_block=False)
+
+ # Indicators and special characters.
+ block_indicators = False
+ flow_indicators = False
+ line_breaks = False
+ special_characters = False
+
+ # Important whitespace combinations.
+ leading_space = False
+ leading_break = False
+ trailing_space = False
+ trailing_break = False
+ break_space = False
+ space_break = False
+
+ # Check document indicators.
+ if scalar.startswith(u'---') or scalar.startswith(u'...'):
+ block_indicators = True
+ flow_indicators = True
+
+ # First character or preceded by a whitespace.
+ preceeded_by_whitespace = True
+
+ # Last character or followed by a whitespace.
+ followed_by_whitespace = (len(scalar) == 1 or
+ scalar[1] in u'\0 \t\r\n\x85\u2028\u2029')
+
+ # The previous character is a space.
+ previous_space = False
+
+ # The previous character is a break.
+ previous_break = False
+
+ index = 0
+ while index < len(scalar):
+ ch = scalar[index]
+
+ # Check for indicators.
+ if index == 0:
+ # Leading indicators are special characters.
+ if ch in u'#,[]{}&*!|>\'\"%@`':
+ flow_indicators = True
+ block_indicators = True
+ if ch in u'?:':
+ flow_indicators = True
+ if followed_by_whitespace:
+ block_indicators = True
+ if ch == u'-' and followed_by_whitespace:
+ flow_indicators = True
+ block_indicators = True
+ else:
+ # Some indicators cannot appear within a scalar as well.
+ if ch in u',?[]{}':
+ flow_indicators = True
+ if ch == u':':
+ flow_indicators = True
+ if followed_by_whitespace:
+ block_indicators = True
+ if ch == u'#' and preceeded_by_whitespace:
+ flow_indicators = True
+ block_indicators = True
+
+ # Check for line breaks, special, and unicode characters.
+ if ch in u'\n\x85\u2028\u2029':
+ line_breaks = True
+ if not (ch == u'\n' or u'\x20' <= ch <= u'\x7E'):
+ if (ch == u'\x85' or u'\xA0' <= ch <= u'\uD7FF'
+ or u'\uE000' <= ch <= u'\uFFFD') and ch != u'\uFEFF':
+ unicode_characters = True
+ if not self.allow_unicode:
+ special_characters = True
+ else:
+ special_characters = True
+
+ # Detect important whitespace combinations.
+ if ch == u' ':
+ if index == 0:
+ leading_space = True
+ if index == len(scalar)-1:
+ trailing_space = True
+ if previous_break:
+ break_space = True
+ previous_space = True
+ previous_break = False
+ elif ch in u'\n\x85\u2028\u2029':
+ if index == 0:
+ leading_break = True
+ if index == len(scalar)-1:
+ trailing_break = True
+ if previous_space:
+ space_break = True
+ previous_space = False
+ previous_break = True
+ else:
+ previous_space = False
+ previous_break = False
+
+ # Prepare for the next character.
+ index += 1
+ preceeded_by_whitespace = (ch in u'\0 \t\r\n\x85\u2028\u2029')
+ followed_by_whitespace = (index+1 >= len(scalar) or
+ scalar[index+1] in u'\0 \t\r\n\x85\u2028\u2029')
+
+ # Let's decide what styles are allowed.
+ allow_flow_plain = True
+ allow_block_plain = True
+ allow_single_quoted = True
+ allow_double_quoted = True
+ allow_block = True
+
+ # Leading and trailing whitespaces are bad for plain scalars.
+ if (leading_space or leading_break
+ or trailing_space or trailing_break):
+ allow_flow_plain = allow_block_plain = False
+
+ # We do not permit trailing spaces for block scalars.
+ if trailing_space:
+ allow_block = False
+
+ # Spaces at the beginning of a new line are only acceptable for block
+ # scalars.
+ if break_space:
+ allow_flow_plain = allow_block_plain = allow_single_quoted = False
+
+ # Spaces followed by breaks, as well as special character are only
+ # allowed for double quoted scalars.
+ if space_break or special_characters:
+ allow_flow_plain = allow_block_plain = \
+ allow_single_quoted = allow_block = False
+
+ # Although the plain scalar writer supports breaks, we never emit
+ # multiline plain scalars.
+ if line_breaks:
+ allow_flow_plain = allow_block_plain = False
+
+ # Flow indicators are forbidden for flow plain scalars.
+ if flow_indicators:
+ allow_flow_plain = False
+
+ # Block indicators are forbidden for block plain scalars.
+ if block_indicators:
+ allow_block_plain = False
+
+ return ScalarAnalysis(scalar=scalar,
+ empty=False, multiline=line_breaks,
+ allow_flow_plain=allow_flow_plain,
+ allow_block_plain=allow_block_plain,
+ allow_single_quoted=allow_single_quoted,
+ allow_double_quoted=allow_double_quoted,
+ allow_block=allow_block)
+
+ # Writers.
+
+ def flush_stream(self):
+ if hasattr(self.stream, 'flush'):
+ self.stream.flush()
+
+ def write_stream_start(self):
+ # Write BOM if needed.
+ if self.encoding and self.encoding.startswith('utf-16'):
+ self.stream.write(u'\uFEFF'.encode(self.encoding))
+
+ def write_stream_end(self):
+ self.flush_stream()
+
+ def write_indicator(self, indicator, need_whitespace,
+ whitespace=False, indention=False):
+ if self.whitespace or not need_whitespace:
+ data = indicator
+ else:
+ data = u' '+indicator
+ self.whitespace = whitespace
+ self.indention = self.indention and indention
+ self.column += len(data)
+ self.open_ended = False
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+
+ def write_indent(self):
+ indent = self.indent or 0
+ if not self.indention or self.column > indent \
+ or (self.column == indent and not self.whitespace):
+ self.write_line_break()
+ if self.column < indent:
+ self.whitespace = True
+ data = u' '*(indent-self.column)
+ self.column = indent
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+
+ def write_line_break(self, data=None):
+ if data is None:
+ data = self.best_line_break
+ self.whitespace = True
+ self.indention = True
+ self.line += 1
+ self.column = 0
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+
+ def write_version_directive(self, version_text):
+ data = u'%%YAML %s' % version_text
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ self.write_line_break()
+
+ def write_tag_directive(self, handle_text, prefix_text):
+ data = u'%%TAG %s %s' % (handle_text, prefix_text)
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ self.write_line_break()
+
+ # Scalar streams.
+
+ def write_single_quoted(self, text, split=True):
+ self.write_indicator(u'\'', True)
+ spaces = False
+ breaks = False
+ start = end = 0
+ while end <= len(text):
+ ch = None
+ if end < len(text):
+ ch = text[end]
+ if spaces:
+ if ch is None or ch != u' ':
+ if start+1 == end and self.column > self.best_width and split \
+ and start != 0 and end != len(text):
+ self.write_indent()
+ else:
+ data = text[start:end]
+ self.column += len(data)
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ start = end
+ elif breaks:
+ if ch is None or ch not in u'\n\x85\u2028\u2029':
+ if text[start] == u'\n':
+ self.write_line_break()
+ for br in text[start:end]:
+ if br == u'\n':
+ self.write_line_break()
+ else:
+ self.write_line_break(br)
+ self.write_indent()
+ start = end
+ else:
+ if ch is None or ch in u' \n\x85\u2028\u2029' or ch == u'\'':
+ if start < end:
+ data = text[start:end]
+ self.column += len(data)
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ start = end
+ if ch == u'\'':
+ data = u'\'\''
+ self.column += 2
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ start = end + 1
+ if ch is not None:
+ spaces = (ch == u' ')
+ breaks = (ch in u'\n\x85\u2028\u2029')
+ end += 1
+ self.write_indicator(u'\'', False)
+
+ ESCAPE_REPLACEMENTS = {
+ u'\0': u'0',
+ u'\x07': u'a',
+ u'\x08': u'b',
+ u'\x09': u't',
+ u'\x0A': u'n',
+ u'\x0B': u'v',
+ u'\x0C': u'f',
+ u'\x0D': u'r',
+ u'\x1B': u'e',
+ u'\"': u'\"',
+ u'\\': u'\\',
+ u'\x85': u'N',
+ u'\xA0': u'_',
+ u'\u2028': u'L',
+ u'\u2029': u'P',
+ }
+
+ def write_double_quoted(self, text, split=True):
+ self.write_indicator(u'"', True)
+ start = end = 0
+ while end <= len(text):
+ ch = None
+ if end < len(text):
+ ch = text[end]
+ if ch is None or ch in u'"\\\x85\u2028\u2029\uFEFF' \
+ or not (u'\x20' <= ch <= u'\x7E'
+ or (self.allow_unicode
+ and (u'\xA0' <= ch <= u'\uD7FF'
+ or u'\uE000' <= ch <= u'\uFFFD'))):
+ if start < end:
+ data = text[start:end]
+ self.column += len(data)
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ start = end
+ if ch is not None:
+ if ch in self.ESCAPE_REPLACEMENTS:
+ data = u'\\'+self.ESCAPE_REPLACEMENTS[ch]
+ elif ch <= u'\xFF':
+ data = u'\\x%02X' % ord(ch)
+ elif ch <= u'\uFFFF':
+ data = u'\\u%04X' % ord(ch)
+ else:
+ data = u'\\U%08X' % ord(ch)
+ self.column += len(data)
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ start = end+1
+ if 0 < end < len(text)-1 and (ch == u' ' or start >= end) \
+ and self.column+(end-start) > self.best_width and split:
+ data = text[start:end]+u'\\'
+ if start < end:
+ start = end
+ self.column += len(data)
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ self.write_indent()
+ self.whitespace = False
+ self.indention = False
+ if text[start] == u' ':
+ data = u'\\'
+ self.column += len(data)
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ end += 1
+ self.write_indicator(u'"', False)
+
+ def determine_block_hints(self, text):
+ hints = u''
+ if text:
+ if text[0] in u' \n\x85\u2028\u2029':
+ hints += unicode(self.best_indent)
+ if text[-1] not in u'\n\x85\u2028\u2029':
+ hints += u'-'
+ elif len(text) == 1 or text[-2] in u'\n\x85\u2028\u2029':
+ hints += u'+'
+ return hints
+
+ def write_folded(self, text):
+ hints = self.determine_block_hints(text)
+ self.write_indicator(u'>'+hints, True)
+ if hints[-1:] == u'+':
+ self.open_ended = True
+ self.write_line_break()
+ leading_space = True
+ spaces = False
+ breaks = True
+ start = end = 0
+ while end <= len(text):
+ ch = None
+ if end < len(text):
+ ch = text[end]
+ if breaks:
+ if ch is None or ch not in u'\n\x85\u2028\u2029':
+ if not leading_space and ch is not None and ch != u' ' \
+ and text[start] == u'\n':
+ self.write_line_break()
+ leading_space = (ch == u' ')
+ for br in text[start:end]:
+ if br == u'\n':
+ self.write_line_break()
+ else:
+ self.write_line_break(br)
+ if ch is not None:
+ self.write_indent()
+ start = end
+ elif spaces:
+ if ch != u' ':
+ if start+1 == end and self.column > self.best_width:
+ self.write_indent()
+ else:
+ data = text[start:end]
+ self.column += len(data)
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ start = end
+ else:
+ if ch is None or ch in u' \n\x85\u2028\u2029':
+ data = text[start:end]
+ self.column += len(data)
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ if ch is None:
+ self.write_line_break()
+ start = end
+ if ch is not None:
+ breaks = (ch in u'\n\x85\u2028\u2029')
+ spaces = (ch == u' ')
+ end += 1
+
+ def write_literal(self, text):
+ hints = self.determine_block_hints(text)
+ self.write_indicator(u'|'+hints, True)
+ if hints[-1:] == u'+':
+ self.open_ended = True
+ self.write_line_break()
+ breaks = True
+ start = end = 0
+ while end <= len(text):
+ ch = None
+ if end < len(text):
+ ch = text[end]
+ if breaks:
+ if ch is None or ch not in u'\n\x85\u2028\u2029':
+ for br in text[start:end]:
+ if br == u'\n':
+ self.write_line_break()
+ else:
+ self.write_line_break(br)
+ if ch is not None:
+ self.write_indent()
+ start = end
+ else:
+ if ch is None or ch in u'\n\x85\u2028\u2029':
+ data = text[start:end]
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ if ch is None:
+ self.write_line_break()
+ start = end
+ if ch is not None:
+ breaks = (ch in u'\n\x85\u2028\u2029')
+ end += 1
+
+ def write_plain(self, text, split=True):
+ if self.root_context:
+ self.open_ended = True
+ if not text:
+ return
+ if not self.whitespace:
+ data = u' '
+ self.column += len(data)
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ self.whitespace = False
+ self.indention = False
+ spaces = False
+ breaks = False
+ start = end = 0
+ while end <= len(text):
+ ch = None
+ if end < len(text):
+ ch = text[end]
+ if spaces:
+ if ch != u' ':
+ if start+1 == end and self.column > self.best_width and split:
+ self.write_indent()
+ self.whitespace = False
+ self.indention = False
+ else:
+ data = text[start:end]
+ self.column += len(data)
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ start = end
+ elif breaks:
+ if ch not in u'\n\x85\u2028\u2029':
+ if text[start] == u'\n':
+ self.write_line_break()
+ for br in text[start:end]:
+ if br == u'\n':
+ self.write_line_break()
+ else:
+ self.write_line_break(br)
+ self.write_indent()
+ self.whitespace = False
+ self.indention = False
+ start = end
+ else:
+ if ch is None or ch in u' \n\x85\u2028\u2029':
+ data = text[start:end]
+ self.column += len(data)
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ start = end
+ if ch is not None:
+ spaces = (ch == u' ')
+ breaks = (ch in u'\n\x85\u2028\u2029')
+ end += 1
+
diff --git a/scripts/external_libs/pyyaml-3.11/python2/yaml/error.py b/scripts/external_libs/pyyaml-3.11/python2/yaml/error.py
new file mode 100644
index 00000000..577686db
--- /dev/null
+++ b/scripts/external_libs/pyyaml-3.11/python2/yaml/error.py
@@ -0,0 +1,75 @@
+
+__all__ = ['Mark', 'YAMLError', 'MarkedYAMLError']
+
+class Mark(object):
+
+ def __init__(self, name, index, line, column, buffer, pointer):
+ self.name = name
+ self.index = index
+ self.line = line
+ self.column = column
+ self.buffer = buffer
+ self.pointer = pointer
+
+ def get_snippet(self, indent=4, max_length=75):
+ if self.buffer is None:
+ return None
+ head = ''
+ start = self.pointer
+ while start > 0 and self.buffer[start-1] not in u'\0\r\n\x85\u2028\u2029':
+ start -= 1
+ if self.pointer-start > max_length/2-1:
+ head = ' ... '
+ start += 5
+ break
+ tail = ''
+ end = self.pointer
+ while end < len(self.buffer) and self.buffer[end] not in u'\0\r\n\x85\u2028\u2029':
+ end += 1
+ if end-self.pointer > max_length/2-1:
+ tail = ' ... '
+ end -= 5
+ break
+ snippet = self.buffer[start:end].encode('utf-8')
+ return ' '*indent + head + snippet + tail + '\n' \
+ + ' '*(indent+self.pointer-start+len(head)) + '^'
+
+ def __str__(self):
+ snippet = self.get_snippet()
+ where = " in \"%s\", line %d, column %d" \
+ % (self.name, self.line+1, self.column+1)
+ if snippet is not None:
+ where += ":\n"+snippet
+ return where
+
+class YAMLError(Exception):
+ pass
+
+class MarkedYAMLError(YAMLError):
+
+ def __init__(self, context=None, context_mark=None,
+ problem=None, problem_mark=None, note=None):
+ self.context = context
+ self.context_mark = context_mark
+ self.problem = problem
+ self.problem_mark = problem_mark
+ self.note = note
+
+ def __str__(self):
+ lines = []
+ if self.context is not None:
+ lines.append(self.context)
+ if self.context_mark is not None \
+ and (self.problem is None or self.problem_mark is None
+ or self.context_mark.name != self.problem_mark.name
+ or self.context_mark.line != self.problem_mark.line
+ or self.context_mark.column != self.problem_mark.column):
+ lines.append(str(self.context_mark))
+ if self.problem is not None:
+ lines.append(self.problem)
+ if self.problem_mark is not None:
+ lines.append(str(self.problem_mark))
+ if self.note is not None:
+ lines.append(self.note)
+ return '\n'.join(lines)
+
diff --git a/scripts/external_libs/pyyaml-3.11/python2/yaml/events.py b/scripts/external_libs/pyyaml-3.11/python2/yaml/events.py
new file mode 100644
index 00000000..f79ad389
--- /dev/null
+++ b/scripts/external_libs/pyyaml-3.11/python2/yaml/events.py
@@ -0,0 +1,86 @@
+
+# Abstract classes.
+
+class Event(object):
+ def __init__(self, start_mark=None, end_mark=None):
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+ def __repr__(self):
+ attributes = [key for key in ['anchor', 'tag', 'implicit', 'value']
+ if hasattr(self, key)]
+ arguments = ', '.join(['%s=%r' % (key, getattr(self, key))
+ for key in attributes])
+ return '%s(%s)' % (self.__class__.__name__, arguments)
+
+class NodeEvent(Event):
+ def __init__(self, anchor, start_mark=None, end_mark=None):
+ self.anchor = anchor
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+
+class CollectionStartEvent(NodeEvent):
+ def __init__(self, anchor, tag, implicit, start_mark=None, end_mark=None,
+ flow_style=None):
+ self.anchor = anchor
+ self.tag = tag
+ self.implicit = implicit
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+ self.flow_style = flow_style
+
+class CollectionEndEvent(Event):
+ pass
+
+# Implementations.
+
+class StreamStartEvent(Event):
+ def __init__(self, start_mark=None, end_mark=None, encoding=None):
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+ self.encoding = encoding
+
+class StreamEndEvent(Event):
+ pass
+
+class DocumentStartEvent(Event):
+ def __init__(self, start_mark=None, end_mark=None,
+ explicit=None, version=None, tags=None):
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+ self.explicit = explicit
+ self.version = version
+ self.tags = tags
+
+class DocumentEndEvent(Event):
+ def __init__(self, start_mark=None, end_mark=None,
+ explicit=None):
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+ self.explicit = explicit
+
+class AliasEvent(NodeEvent):
+ pass
+
+class ScalarEvent(NodeEvent):
+ def __init__(self, anchor, tag, implicit, value,
+ start_mark=None, end_mark=None, style=None):
+ self.anchor = anchor
+ self.tag = tag
+ self.implicit = implicit
+ self.value = value
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+ self.style = style
+
+class SequenceStartEvent(CollectionStartEvent):
+ pass
+
+class SequenceEndEvent(CollectionEndEvent):
+ pass
+
+class MappingStartEvent(CollectionStartEvent):
+ pass
+
+class MappingEndEvent(CollectionEndEvent):
+ pass
+
diff --git a/scripts/external_libs/pyyaml-3.11/python2/yaml/loader.py b/scripts/external_libs/pyyaml-3.11/python2/yaml/loader.py
new file mode 100644
index 00000000..293ff467
--- /dev/null
+++ b/scripts/external_libs/pyyaml-3.11/python2/yaml/loader.py
@@ -0,0 +1,40 @@
+
+__all__ = ['BaseLoader', 'SafeLoader', 'Loader']
+
+from reader import *
+from scanner import *
+from parser import *
+from composer import *
+from constructor import *
+from resolver import *
+
+class BaseLoader(Reader, Scanner, Parser, Composer, BaseConstructor, BaseResolver):
+
+ def __init__(self, stream):
+ Reader.__init__(self, stream)
+ Scanner.__init__(self)
+ Parser.__init__(self)
+ Composer.__init__(self)
+ BaseConstructor.__init__(self)
+ BaseResolver.__init__(self)
+
+class SafeLoader(Reader, Scanner, Parser, Composer, SafeConstructor, Resolver):
+
+ def __init__(self, stream):
+ Reader.__init__(self, stream)
+ Scanner.__init__(self)
+ Parser.__init__(self)
+ Composer.__init__(self)
+ SafeConstructor.__init__(self)
+ Resolver.__init__(self)
+
+class Loader(Reader, Scanner, Parser, Composer, Constructor, Resolver):
+
+ def __init__(self, stream):
+ Reader.__init__(self, stream)
+ Scanner.__init__(self)
+ Parser.__init__(self)
+ Composer.__init__(self)
+ Constructor.__init__(self)
+ Resolver.__init__(self)
+
diff --git a/scripts/external_libs/pyyaml-3.11/python2/yaml/nodes.py b/scripts/external_libs/pyyaml-3.11/python2/yaml/nodes.py
new file mode 100644
index 00000000..c4f070c4
--- /dev/null
+++ b/scripts/external_libs/pyyaml-3.11/python2/yaml/nodes.py
@@ -0,0 +1,49 @@
+
+class Node(object):
+ def __init__(self, tag, value, start_mark, end_mark):
+ self.tag = tag
+ self.value = value
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+ def __repr__(self):
+ value = self.value
+ #if isinstance(value, list):
+ # if len(value) == 0:
+ # value = '<empty>'
+ # elif len(value) == 1:
+ # value = '<1 item>'
+ # else:
+ # value = '<%d items>' % len(value)
+ #else:
+ # if len(value) > 75:
+ # value = repr(value[:70]+u' ... ')
+ # else:
+ # value = repr(value)
+ value = repr(value)
+ return '%s(tag=%r, value=%s)' % (self.__class__.__name__, self.tag, value)
+
+class ScalarNode(Node):
+ id = 'scalar'
+ def __init__(self, tag, value,
+ start_mark=None, end_mark=None, style=None):
+ self.tag = tag
+ self.value = value
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+ self.style = style
+
+class CollectionNode(Node):
+ def __init__(self, tag, value,
+ start_mark=None, end_mark=None, flow_style=None):
+ self.tag = tag
+ self.value = value
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+ self.flow_style = flow_style
+
+class SequenceNode(CollectionNode):
+ id = 'sequence'
+
+class MappingNode(CollectionNode):
+ id = 'mapping'
+
diff --git a/scripts/external_libs/pyyaml-3.11/python2/yaml/parser.py b/scripts/external_libs/pyyaml-3.11/python2/yaml/parser.py
new file mode 100644
index 00000000..f9e3057f
--- /dev/null
+++ b/scripts/external_libs/pyyaml-3.11/python2/yaml/parser.py
@@ -0,0 +1,589 @@
+
+# The following YAML grammar is LL(1) and is parsed by a recursive descent
+# parser.
+#
+# stream ::= STREAM-START implicit_document? explicit_document* STREAM-END
+# implicit_document ::= block_node DOCUMENT-END*
+# explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END*
+# block_node_or_indentless_sequence ::=
+# ALIAS
+# | properties (block_content | indentless_block_sequence)?
+# | block_content
+# | indentless_block_sequence
+# block_node ::= ALIAS
+# | properties block_content?
+# | block_content
+# flow_node ::= ALIAS
+# | properties flow_content?
+# | flow_content
+# properties ::= TAG ANCHOR? | ANCHOR TAG?
+# block_content ::= block_collection | flow_collection | SCALAR
+# flow_content ::= flow_collection | SCALAR
+# block_collection ::= block_sequence | block_mapping
+# flow_collection ::= flow_sequence | flow_mapping
+# block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END
+# indentless_sequence ::= (BLOCK-ENTRY block_node?)+
+# block_mapping ::= BLOCK-MAPPING_START
+# ((KEY block_node_or_indentless_sequence?)?
+# (VALUE block_node_or_indentless_sequence?)?)*
+# BLOCK-END
+# flow_sequence ::= FLOW-SEQUENCE-START
+# (flow_sequence_entry FLOW-ENTRY)*
+# flow_sequence_entry?
+# FLOW-SEQUENCE-END
+# flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+# flow_mapping ::= FLOW-MAPPING-START
+# (flow_mapping_entry FLOW-ENTRY)*
+# flow_mapping_entry?
+# FLOW-MAPPING-END
+# flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+#
+# FIRST sets:
+#
+# stream: { STREAM-START }
+# explicit_document: { DIRECTIVE DOCUMENT-START }
+# implicit_document: FIRST(block_node)
+# block_node: { ALIAS TAG ANCHOR SCALAR BLOCK-SEQUENCE-START BLOCK-MAPPING-START FLOW-SEQUENCE-START FLOW-MAPPING-START }
+# flow_node: { ALIAS ANCHOR TAG SCALAR FLOW-SEQUENCE-START FLOW-MAPPING-START }
+# block_content: { BLOCK-SEQUENCE-START BLOCK-MAPPING-START FLOW-SEQUENCE-START FLOW-MAPPING-START SCALAR }
+# flow_content: { FLOW-SEQUENCE-START FLOW-MAPPING-START SCALAR }
+# block_collection: { BLOCK-SEQUENCE-START BLOCK-MAPPING-START }
+# flow_collection: { FLOW-SEQUENCE-START FLOW-MAPPING-START }
+# block_sequence: { BLOCK-SEQUENCE-START }
+# block_mapping: { BLOCK-MAPPING-START }
+# block_node_or_indentless_sequence: { ALIAS ANCHOR TAG SCALAR BLOCK-SEQUENCE-START BLOCK-MAPPING-START FLOW-SEQUENCE-START FLOW-MAPPING-START BLOCK-ENTRY }
+# indentless_sequence: { ENTRY }
+# flow_collection: { FLOW-SEQUENCE-START FLOW-MAPPING-START }
+# flow_sequence: { FLOW-SEQUENCE-START }
+# flow_mapping: { FLOW-MAPPING-START }
+# flow_sequence_entry: { ALIAS ANCHOR TAG SCALAR FLOW-SEQUENCE-START FLOW-MAPPING-START KEY }
+# flow_mapping_entry: { ALIAS ANCHOR TAG SCALAR FLOW-SEQUENCE-START FLOW-MAPPING-START KEY }
+
+__all__ = ['Parser', 'ParserError']
+
+from error import MarkedYAMLError
+from tokens import *
+from events import *
+from scanner import *
+
+class ParserError(MarkedYAMLError):
+ pass
+
+class Parser(object):
+ # Since writing a recursive-descendant parser is a straightforward task, we
+ # do not give many comments here.
+
+ DEFAULT_TAGS = {
+ u'!': u'!',
+ u'!!': u'tag:yaml.org,2002:',
+ }
+
+ def __init__(self):
+ self.current_event = None
+ self.yaml_version = None
+ self.tag_handles = {}
+ self.states = []
+ self.marks = []
+ self.state = self.parse_stream_start
+
+ def dispose(self):
+ # Reset the state attributes (to clear self-references)
+ self.states = []
+ self.state = None
+
+ def check_event(self, *choices):
+ # Check the type of the next event.
+ if self.current_event is None:
+ if self.state:
+ self.current_event = self.state()
+ if self.current_event is not None:
+ if not choices:
+ return True
+ for choice in choices:
+ if isinstance(self.current_event, choice):
+ return True
+ return False
+
+ def peek_event(self):
+ # Get the next event.
+ if self.current_event is None:
+ if self.state:
+ self.current_event = self.state()
+ return self.current_event
+
+ def get_event(self):
+ # Get the next event and proceed further.
+ if self.current_event is None:
+ if self.state:
+ self.current_event = self.state()
+ value = self.current_event
+ self.current_event = None
+ return value
+
+ # stream ::= STREAM-START implicit_document? explicit_document* STREAM-END
+ # implicit_document ::= block_node DOCUMENT-END*
+ # explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END*
+
+ def parse_stream_start(self):
+
+ # Parse the stream start.
+ token = self.get_token()
+ event = StreamStartEvent(token.start_mark, token.end_mark,
+ encoding=token.encoding)
+
+ # Prepare the next state.
+ self.state = self.parse_implicit_document_start
+
+ return event
+
+ def parse_implicit_document_start(self):
+
+ # Parse an implicit document.
+ if not self.check_token(DirectiveToken, DocumentStartToken,
+ StreamEndToken):
+ self.tag_handles = self.DEFAULT_TAGS
+ token = self.peek_token()
+ start_mark = end_mark = token.start_mark
+ event = DocumentStartEvent(start_mark, end_mark,
+ explicit=False)
+
+ # Prepare the next state.
+ self.states.append(self.parse_document_end)
+ self.state = self.parse_block_node
+
+ return event
+
+ else:
+ return self.parse_document_start()
+
+ def parse_document_start(self):
+
+ # Parse any extra document end indicators.
+ while self.check_token(DocumentEndToken):
+ self.get_token()
+
+ # Parse an explicit document.
+ if not self.check_token(StreamEndToken):
+ token = self.peek_token()
+ start_mark = token.start_mark
+ version, tags = self.process_directives()
+ if not self.check_token(DocumentStartToken):
+ raise ParserError(None, None,
+ "expected '<document start>', but found %r"
+ % self.peek_token().id,
+ self.peek_token().start_mark)
+ token = self.get_token()
+ end_mark = token.end_mark
+ event = DocumentStartEvent(start_mark, end_mark,
+ explicit=True, version=version, tags=tags)
+ self.states.append(self.parse_document_end)
+ self.state = self.parse_document_content
+ else:
+ # Parse the end of the stream.
+ token = self.get_token()
+ event = StreamEndEvent(token.start_mark, token.end_mark)
+ assert not self.states
+ assert not self.marks
+ self.state = None
+ return event
+
+ def parse_document_end(self):
+
+ # Parse the document end.
+ token = self.peek_token()
+ start_mark = end_mark = token.start_mark
+ explicit = False
+ if self.check_token(DocumentEndToken):
+ token = self.get_token()
+ end_mark = token.end_mark
+ explicit = True
+ event = DocumentEndEvent(start_mark, end_mark,
+ explicit=explicit)
+
+ # Prepare the next state.
+ self.state = self.parse_document_start
+
+ return event
+
+ def parse_document_content(self):
+ if self.check_token(DirectiveToken,
+ DocumentStartToken, DocumentEndToken, StreamEndToken):
+ event = self.process_empty_scalar(self.peek_token().start_mark)
+ self.state = self.states.pop()
+ return event
+ else:
+ return self.parse_block_node()
+
+ def process_directives(self):
+ self.yaml_version = None
+ self.tag_handles = {}
+ while self.check_token(DirectiveToken):
+ token = self.get_token()
+ if token.name == u'YAML':
+ if self.yaml_version is not None:
+ raise ParserError(None, None,
+ "found duplicate YAML directive", token.start_mark)
+ major, minor = token.value
+ if major != 1:
+ raise ParserError(None, None,
+ "found incompatible YAML document (version 1.* is required)",
+ token.start_mark)
+ self.yaml_version = token.value
+ elif token.name == u'TAG':
+ handle, prefix = token.value
+ if handle in self.tag_handles:
+ raise ParserError(None, None,
+ "duplicate tag handle %r" % handle.encode('utf-8'),
+ token.start_mark)
+ self.tag_handles[handle] = prefix
+ if self.tag_handles:
+ value = self.yaml_version, self.tag_handles.copy()
+ else:
+ value = self.yaml_version, None
+ for key in self.DEFAULT_TAGS:
+ if key not in self.tag_handles:
+ self.tag_handles[key] = self.DEFAULT_TAGS[key]
+ return value
+
+ # block_node_or_indentless_sequence ::= ALIAS
+ # | properties (block_content | indentless_block_sequence)?
+ # | block_content
+ # | indentless_block_sequence
+ # block_node ::= ALIAS
+ # | properties block_content?
+ # | block_content
+ # flow_node ::= ALIAS
+ # | properties flow_content?
+ # | flow_content
+ # properties ::= TAG ANCHOR? | ANCHOR TAG?
+ # block_content ::= block_collection | flow_collection | SCALAR
+ # flow_content ::= flow_collection | SCALAR
+ # block_collection ::= block_sequence | block_mapping
+ # flow_collection ::= flow_sequence | flow_mapping
+
+ def parse_block_node(self):
+ return self.parse_node(block=True)
+
+ def parse_flow_node(self):
+ return self.parse_node()
+
+ def parse_block_node_or_indentless_sequence(self):
+ return self.parse_node(block=True, indentless_sequence=True)
+
+ def parse_node(self, block=False, indentless_sequence=False):
+ if self.check_token(AliasToken):
+ token = self.get_token()
+ event = AliasEvent(token.value, token.start_mark, token.end_mark)
+ self.state = self.states.pop()
+ else:
+ anchor = None
+ tag = None
+ start_mark = end_mark = tag_mark = None
+ if self.check_token(AnchorToken):
+ token = self.get_token()
+ start_mark = token.start_mark
+ end_mark = token.end_mark
+ anchor = token.value
+ if self.check_token(TagToken):
+ token = self.get_token()
+ tag_mark = token.start_mark
+ end_mark = token.end_mark
+ tag = token.value
+ elif self.check_token(TagToken):
+ token = self.get_token()
+ start_mark = tag_mark = token.start_mark
+ end_mark = token.end_mark
+ tag = token.value
+ if self.check_token(AnchorToken):
+ token = self.get_token()
+ end_mark = token.end_mark
+ anchor = token.value
+ if tag is not None:
+ handle, suffix = tag
+ if handle is not None:
+ if handle not in self.tag_handles:
+ raise ParserError("while parsing a node", start_mark,
+ "found undefined tag handle %r" % handle.encode('utf-8'),
+ tag_mark)
+ tag = self.tag_handles[handle]+suffix
+ else:
+ tag = suffix
+ #if tag == u'!':
+ # raise ParserError("while parsing a node", start_mark,
+ # "found non-specific tag '!'", tag_mark,
+ # "Please check 'http://pyyaml.org/wiki/YAMLNonSpecificTag' and share your opinion.")
+ if start_mark is None:
+ start_mark = end_mark = self.peek_token().start_mark
+ event = None
+ implicit = (tag is None or tag == u'!')
+ if indentless_sequence and self.check_token(BlockEntryToken):
+ end_mark = self.peek_token().end_mark
+ event = SequenceStartEvent(anchor, tag, implicit,
+ start_mark, end_mark)
+ self.state = self.parse_indentless_sequence_entry
+ else:
+ if self.check_token(ScalarToken):
+ token = self.get_token()
+ end_mark = token.end_mark
+ if (token.plain and tag is None) or tag == u'!':
+ implicit = (True, False)
+ elif tag is None:
+ implicit = (False, True)
+ else:
+ implicit = (False, False)
+ event = ScalarEvent(anchor, tag, implicit, token.value,
+ start_mark, end_mark, style=token.style)
+ self.state = self.states.pop()
+ elif self.check_token(FlowSequenceStartToken):
+ end_mark = self.peek_token().end_mark
+ event = SequenceStartEvent(anchor, tag, implicit,
+ start_mark, end_mark, flow_style=True)
+ self.state = self.parse_flow_sequence_first_entry
+ elif self.check_token(FlowMappingStartToken):
+ end_mark = self.peek_token().end_mark
+ event = MappingStartEvent(anchor, tag, implicit,
+ start_mark, end_mark, flow_style=True)
+ self.state = self.parse_flow_mapping_first_key
+ elif block and self.check_token(BlockSequenceStartToken):
+ end_mark = self.peek_token().start_mark
+ event = SequenceStartEvent(anchor, tag, implicit,
+ start_mark, end_mark, flow_style=False)
+ self.state = self.parse_block_sequence_first_entry
+ elif block and self.check_token(BlockMappingStartToken):
+ end_mark = self.peek_token().start_mark
+ event = MappingStartEvent(anchor, tag, implicit,
+ start_mark, end_mark, flow_style=False)
+ self.state = self.parse_block_mapping_first_key
+ elif anchor is not None or tag is not None:
+ # Empty scalars are allowed even if a tag or an anchor is
+ # specified.
+ event = ScalarEvent(anchor, tag, (implicit, False), u'',
+ start_mark, end_mark)
+ self.state = self.states.pop()
+ else:
+ if block:
+ node = 'block'
+ else:
+ node = 'flow'
+ token = self.peek_token()
+ raise ParserError("while parsing a %s node" % node, start_mark,
+ "expected the node content, but found %r" % token.id,
+ token.start_mark)
+ return event
+
+ # block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END
+
+ def parse_block_sequence_first_entry(self):
+ token = self.get_token()
+ self.marks.append(token.start_mark)
+ return self.parse_block_sequence_entry()
+
+ def parse_block_sequence_entry(self):
+ if self.check_token(BlockEntryToken):
+ token = self.get_token()
+ if not self.check_token(BlockEntryToken, BlockEndToken):
+ self.states.append(self.parse_block_sequence_entry)
+ return self.parse_block_node()
+ else:
+ self.state = self.parse_block_sequence_entry
+ return self.process_empty_scalar(token.end_mark)
+ if not self.check_token(BlockEndToken):
+ token = self.peek_token()
+ raise ParserError("while parsing a block collection", self.marks[-1],
+ "expected <block end>, but found %r" % token.id, token.start_mark)
+ token = self.get_token()
+ event = SequenceEndEvent(token.start_mark, token.end_mark)
+ self.state = self.states.pop()
+ self.marks.pop()
+ return event
+
+ # indentless_sequence ::= (BLOCK-ENTRY block_node?)+
+
+ def parse_indentless_sequence_entry(self):
+ if self.check_token(BlockEntryToken):
+ token = self.get_token()
+ if not self.check_token(BlockEntryToken,
+ KeyToken, ValueToken, BlockEndToken):
+ self.states.append(self.parse_indentless_sequence_entry)
+ return self.parse_block_node()
+ else:
+ self.state = self.parse_indentless_sequence_entry
+ return self.process_empty_scalar(token.end_mark)
+ token = self.peek_token()
+ event = SequenceEndEvent(token.start_mark, token.start_mark)
+ self.state = self.states.pop()
+ return event
+
+ # block_mapping ::= BLOCK-MAPPING_START
+ # ((KEY block_node_or_indentless_sequence?)?
+ # (VALUE block_node_or_indentless_sequence?)?)*
+ # BLOCK-END
+
+ def parse_block_mapping_first_key(self):
+ token = self.get_token()
+ self.marks.append(token.start_mark)
+ return self.parse_block_mapping_key()
+
+ def parse_block_mapping_key(self):
+ if self.check_token(KeyToken):
+ token = self.get_token()
+ if not self.check_token(KeyToken, ValueToken, BlockEndToken):
+ self.states.append(self.parse_block_mapping_value)
+ return self.parse_block_node_or_indentless_sequence()
+ else:
+ self.state = self.parse_block_mapping_value
+ return self.process_empty_scalar(token.end_mark)
+ if not self.check_token(BlockEndToken):
+ token = self.peek_token()
+ raise ParserError("while parsing a block mapping", self.marks[-1],
+ "expected <block end>, but found %r" % token.id, token.start_mark)
+ token = self.get_token()
+ event = MappingEndEvent(token.start_mark, token.end_mark)
+ self.state = self.states.pop()
+ self.marks.pop()
+ return event
+
+ def parse_block_mapping_value(self):
+ if self.check_token(ValueToken):
+ token = self.get_token()
+ if not self.check_token(KeyToken, ValueToken, BlockEndToken):
+ self.states.append(self.parse_block_mapping_key)
+ return self.parse_block_node_or_indentless_sequence()
+ else:
+ self.state = self.parse_block_mapping_key
+ return self.process_empty_scalar(token.end_mark)
+ else:
+ self.state = self.parse_block_mapping_key
+ token = self.peek_token()
+ return self.process_empty_scalar(token.start_mark)
+
+ # flow_sequence ::= FLOW-SEQUENCE-START
+ # (flow_sequence_entry FLOW-ENTRY)*
+ # flow_sequence_entry?
+ # FLOW-SEQUENCE-END
+ # flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+ #
+ # Note that while production rules for both flow_sequence_entry and
+ # flow_mapping_entry are equal, their interpretations are different.
+ # For `flow_sequence_entry`, the part `KEY flow_node? (VALUE flow_node?)?`
+ # generate an inline mapping (set syntax).
+
+ def parse_flow_sequence_first_entry(self):
+ token = self.get_token()
+ self.marks.append(token.start_mark)
+ return self.parse_flow_sequence_entry(first=True)
+
+ def parse_flow_sequence_entry(self, first=False):
+ if not self.check_token(FlowSequenceEndToken):
+ if not first:
+ if self.check_token(FlowEntryToken):
+ self.get_token()
+ else:
+ token = self.peek_token()
+ raise ParserError("while parsing a flow sequence", self.marks[-1],
+ "expected ',' or ']', but got %r" % token.id, token.start_mark)
+
+ if self.check_token(KeyToken):
+ token = self.peek_token()
+ event = MappingStartEvent(None, None, True,
+ token.start_mark, token.end_mark,
+ flow_style=True)
+ self.state = self.parse_flow_sequence_entry_mapping_key
+ return event
+ elif not self.check_token(FlowSequenceEndToken):
+ self.states.append(self.parse_flow_sequence_entry)
+ return self.parse_flow_node()
+ token = self.get_token()
+ event = SequenceEndEvent(token.start_mark, token.end_mark)
+ self.state = self.states.pop()
+ self.marks.pop()
+ return event
+
+ def parse_flow_sequence_entry_mapping_key(self):
+ token = self.get_token()
+ if not self.check_token(ValueToken,
+ FlowEntryToken, FlowSequenceEndToken):
+ self.states.append(self.parse_flow_sequence_entry_mapping_value)
+ return self.parse_flow_node()
+ else:
+ self.state = self.parse_flow_sequence_entry_mapping_value
+ return self.process_empty_scalar(token.end_mark)
+
+ def parse_flow_sequence_entry_mapping_value(self):
+ if self.check_token(ValueToken):
+ token = self.get_token()
+ if not self.check_token(FlowEntryToken, FlowSequenceEndToken):
+ self.states.append(self.parse_flow_sequence_entry_mapping_end)
+ return self.parse_flow_node()
+ else:
+ self.state = self.parse_flow_sequence_entry_mapping_end
+ return self.process_empty_scalar(token.end_mark)
+ else:
+ self.state = self.parse_flow_sequence_entry_mapping_end
+ token = self.peek_token()
+ return self.process_empty_scalar(token.start_mark)
+
+ def parse_flow_sequence_entry_mapping_end(self):
+ self.state = self.parse_flow_sequence_entry
+ token = self.peek_token()
+ return MappingEndEvent(token.start_mark, token.start_mark)
+
+ # flow_mapping ::= FLOW-MAPPING-START
+ # (flow_mapping_entry FLOW-ENTRY)*
+ # flow_mapping_entry?
+ # FLOW-MAPPING-END
+ # flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+
+ def parse_flow_mapping_first_key(self):
+ token = self.get_token()
+ self.marks.append(token.start_mark)
+ return self.parse_flow_mapping_key(first=True)
+
+ def parse_flow_mapping_key(self, first=False):
+ if not self.check_token(FlowMappingEndToken):
+ if not first:
+ if self.check_token(FlowEntryToken):
+ self.get_token()
+ else:
+ token = self.peek_token()
+ raise ParserError("while parsing a flow mapping", self.marks[-1],
+ "expected ',' or '}', but got %r" % token.id, token.start_mark)
+ if self.check_token(KeyToken):
+ token = self.get_token()
+ if not self.check_token(ValueToken,
+ FlowEntryToken, FlowMappingEndToken):
+ self.states.append(self.parse_flow_mapping_value)
+ return self.parse_flow_node()
+ else:
+ self.state = self.parse_flow_mapping_value
+ return self.process_empty_scalar(token.end_mark)
+ elif not self.check_token(FlowMappingEndToken):
+ self.states.append(self.parse_flow_mapping_empty_value)
+ return self.parse_flow_node()
+ token = self.get_token()
+ event = MappingEndEvent(token.start_mark, token.end_mark)
+ self.state = self.states.pop()
+ self.marks.pop()
+ return event
+
+ def parse_flow_mapping_value(self):
+ if self.check_token(ValueToken):
+ token = self.get_token()
+ if not self.check_token(FlowEntryToken, FlowMappingEndToken):
+ self.states.append(self.parse_flow_mapping_key)
+ return self.parse_flow_node()
+ else:
+ self.state = self.parse_flow_mapping_key
+ return self.process_empty_scalar(token.end_mark)
+ else:
+ self.state = self.parse_flow_mapping_key
+ token = self.peek_token()
+ return self.process_empty_scalar(token.start_mark)
+
+ def parse_flow_mapping_empty_value(self):
+ self.state = self.parse_flow_mapping_key
+ return self.process_empty_scalar(self.peek_token().start_mark)
+
+ def process_empty_scalar(self, mark):
+ return ScalarEvent(None, None, (True, False), u'', mark, mark)
+
diff --git a/scripts/external_libs/pyyaml-3.11/python2/yaml/reader.py b/scripts/external_libs/pyyaml-3.11/python2/yaml/reader.py
new file mode 100644
index 00000000..3249e6b9
--- /dev/null
+++ b/scripts/external_libs/pyyaml-3.11/python2/yaml/reader.py
@@ -0,0 +1,190 @@
+# This module contains abstractions for the input stream. You don't have to
+# looks further, there are no pretty code.
+#
+# We define two classes here.
+#
+# Mark(source, line, column)
+# It's just a record and its only use is producing nice error messages.
+# Parser does not use it for any other purposes.
+#
+# Reader(source, data)
+# Reader determines the encoding of `data` and converts it to unicode.
+# Reader provides the following methods and attributes:
+# reader.peek(length=1) - return the next `length` characters
+# reader.forward(length=1) - move the current position to `length` characters.
+# reader.index - the number of the current character.
+# reader.line, stream.column - the line and the column of the current character.
+
+__all__ = ['Reader', 'ReaderError']
+
+from error import YAMLError, Mark
+
+import codecs, re
+
+class ReaderError(YAMLError):
+
+ def __init__(self, name, position, character, encoding, reason):
+ self.name = name
+ self.character = character
+ self.position = position
+ self.encoding = encoding
+ self.reason = reason
+
+ def __str__(self):
+ if isinstance(self.character, str):
+ return "'%s' codec can't decode byte #x%02x: %s\n" \
+ " in \"%s\", position %d" \
+ % (self.encoding, ord(self.character), self.reason,
+ self.name, self.position)
+ else:
+ return "unacceptable character #x%04x: %s\n" \
+ " in \"%s\", position %d" \
+ % (self.character, self.reason,
+ self.name, self.position)
+
+class Reader(object):
+ # Reader:
+ # - determines the data encoding and converts it to unicode,
+ # - checks if characters are in allowed range,
+ # - adds '\0' to the end.
+
+ # Reader accepts
+ # - a `str` object,
+ # - a `unicode` object,
+ # - a file-like object with its `read` method returning `str`,
+ # - a file-like object with its `read` method returning `unicode`.
+
+ # Yeah, it's ugly and slow.
+
+ def __init__(self, stream):
+ self.name = None
+ self.stream = None
+ self.stream_pointer = 0
+ self.eof = True
+ self.buffer = u''
+ self.pointer = 0
+ self.raw_buffer = None
+ self.raw_decode = None
+ self.encoding = None
+ self.index = 0
+ self.line = 0
+ self.column = 0
+ if isinstance(stream, unicode):
+ self.name = "<unicode string>"
+ self.check_printable(stream)
+ self.buffer = stream+u'\0'
+ elif isinstance(stream, str):
+ self.name = "<string>"
+ self.raw_buffer = stream
+ self.determine_encoding()
+ else:
+ self.stream = stream
+ self.name = getattr(stream, 'name', "<file>")
+ self.eof = False
+ self.raw_buffer = ''
+ self.determine_encoding()
+
+ def peek(self, index=0):
+ try:
+ return self.buffer[self.pointer+index]
+ except IndexError:
+ self.update(index+1)
+ return self.buffer[self.pointer+index]
+
+ def prefix(self, length=1):
+ if self.pointer+length >= len(self.buffer):
+ self.update(length)
+ return self.buffer[self.pointer:self.pointer+length]
+
+ def forward(self, length=1):
+ if self.pointer+length+1 >= len(self.buffer):
+ self.update(length+1)
+ while length:
+ ch = self.buffer[self.pointer]
+ self.pointer += 1
+ self.index += 1
+ if ch in u'\n\x85\u2028\u2029' \
+ or (ch == u'\r' and self.buffer[self.pointer] != u'\n'):
+ self.line += 1
+ self.column = 0
+ elif ch != u'\uFEFF':
+ self.column += 1
+ length -= 1
+
+ def get_mark(self):
+ if self.stream is None:
+ return Mark(self.name, self.index, self.line, self.column,
+ self.buffer, self.pointer)
+ else:
+ return Mark(self.name, self.index, self.line, self.column,
+ None, None)
+
+ def determine_encoding(self):
+ while not self.eof and len(self.raw_buffer) < 2:
+ self.update_raw()
+ if not isinstance(self.raw_buffer, unicode):
+ if self.raw_buffer.startswith(codecs.BOM_UTF16_LE):
+ self.raw_decode = codecs.utf_16_le_decode
+ self.encoding = 'utf-16-le'
+ elif self.raw_buffer.startswith(codecs.BOM_UTF16_BE):
+ self.raw_decode = codecs.utf_16_be_decode
+ self.encoding = 'utf-16-be'
+ else:
+ self.raw_decode = codecs.utf_8_decode
+ self.encoding = 'utf-8'
+ self.update(1)
+
+ NON_PRINTABLE = re.compile(u'[^\x09\x0A\x0D\x20-\x7E\x85\xA0-\uD7FF\uE000-\uFFFD]')
+ def check_printable(self, data):
+ match = self.NON_PRINTABLE.search(data)
+ if match:
+ character = match.group()
+ position = self.index+(len(self.buffer)-self.pointer)+match.start()
+ raise ReaderError(self.name, position, ord(character),
+ 'unicode', "special characters are not allowed")
+
+ def update(self, length):
+ if self.raw_buffer is None:
+ return
+ self.buffer = self.buffer[self.pointer:]
+ self.pointer = 0
+ while len(self.buffer) < length:
+ if not self.eof:
+ self.update_raw()
+ if self.raw_decode is not None:
+ try:
+ data, converted = self.raw_decode(self.raw_buffer,
+ 'strict', self.eof)
+ except UnicodeDecodeError, exc:
+ character = exc.object[exc.start]
+ if self.stream is not None:
+ position = self.stream_pointer-len(self.raw_buffer)+exc.start
+ else:
+ position = exc.start
+ raise ReaderError(self.name, position, character,
+ exc.encoding, exc.reason)
+ else:
+ data = self.raw_buffer
+ converted = len(data)
+ self.check_printable(data)
+ self.buffer += data
+ self.raw_buffer = self.raw_buffer[converted:]
+ if self.eof:
+ self.buffer += u'\0'
+ self.raw_buffer = None
+ break
+
+ def update_raw(self, size=1024):
+ data = self.stream.read(size)
+ if data:
+ self.raw_buffer += data
+ self.stream_pointer += len(data)
+ else:
+ self.eof = True
+
+#try:
+# import psyco
+# psyco.bind(Reader)
+#except ImportError:
+# pass
+
diff --git a/scripts/external_libs/pyyaml-3.11/python2/yaml/representer.py b/scripts/external_libs/pyyaml-3.11/python2/yaml/representer.py
new file mode 100644
index 00000000..5f4fc70d
--- /dev/null
+++ b/scripts/external_libs/pyyaml-3.11/python2/yaml/representer.py
@@ -0,0 +1,484 @@
+
+__all__ = ['BaseRepresenter', 'SafeRepresenter', 'Representer',
+ 'RepresenterError']
+
+from error import *
+from nodes import *
+
+import datetime
+
+import sys, copy_reg, types
+
+class RepresenterError(YAMLError):
+ pass
+
+class BaseRepresenter(object):
+
+ yaml_representers = {}
+ yaml_multi_representers = {}
+
+ def __init__(self, default_style=None, default_flow_style=None):
+ self.default_style = default_style
+ self.default_flow_style = default_flow_style
+ self.represented_objects = {}
+ self.object_keeper = []
+ self.alias_key = None
+
+ def represent(self, data):
+ node = self.represent_data(data)
+ self.serialize(node)
+ self.represented_objects = {}
+ self.object_keeper = []
+ self.alias_key = None
+
+ def get_classobj_bases(self, cls):
+ bases = [cls]
+ for base in cls.__bases__:
+ bases.extend(self.get_classobj_bases(base))
+ return bases
+
+ def represent_data(self, data):
+ if self.ignore_aliases(data):
+ self.alias_key = None
+ else:
+ self.alias_key = id(data)
+ if self.alias_key is not None:
+ if self.alias_key in self.represented_objects:
+ node = self.represented_objects[self.alias_key]
+ #if node is None:
+ # raise RepresenterError("recursive objects are not allowed: %r" % data)
+ return node
+ #self.represented_objects[alias_key] = None
+ self.object_keeper.append(data)
+ data_types = type(data).__mro__
+ if type(data) is types.InstanceType:
+ data_types = self.get_classobj_bases(data.__class__)+list(data_types)
+ if data_types[0] in self.yaml_representers:
+ node = self.yaml_representers[data_types[0]](self, data)
+ else:
+ for data_type in data_types:
+ if data_type in self.yaml_multi_representers:
+ node = self.yaml_multi_representers[data_type](self, data)
+ break
+ else:
+ if None in self.yaml_multi_representers:
+ node = self.yaml_multi_representers[None](self, data)
+ elif None in self.yaml_representers:
+ node = self.yaml_representers[None](self, data)
+ else:
+ node = ScalarNode(None, unicode(data))
+ #if alias_key is not None:
+ # self.represented_objects[alias_key] = node
+ return node
+
+ def add_representer(cls, data_type, representer):
+ if not 'yaml_representers' in cls.__dict__:
+ cls.yaml_representers = cls.yaml_representers.copy()
+ cls.yaml_representers[data_type] = representer
+ add_representer = classmethod(add_representer)
+
+ def add_multi_representer(cls, data_type, representer):
+ if not 'yaml_multi_representers' in cls.__dict__:
+ cls.yaml_multi_representers = cls.yaml_multi_representers.copy()
+ cls.yaml_multi_representers[data_type] = representer
+ add_multi_representer = classmethod(add_multi_representer)
+
+ def represent_scalar(self, tag, value, style=None):
+ if style is None:
+ style = self.default_style
+ node = ScalarNode(tag, value, style=style)
+ if self.alias_key is not None:
+ self.represented_objects[self.alias_key] = node
+ return node
+
+ def represent_sequence(self, tag, sequence, flow_style=None):
+ value = []
+ node = SequenceNode(tag, value, flow_style=flow_style)
+ if self.alias_key is not None:
+ self.represented_objects[self.alias_key] = node
+ best_style = True
+ for item in sequence:
+ node_item = self.represent_data(item)
+ if not (isinstance(node_item, ScalarNode) and not node_item.style):
+ best_style = False
+ value.append(node_item)
+ if flow_style is None:
+ if self.default_flow_style is not None:
+ node.flow_style = self.default_flow_style
+ else:
+ node.flow_style = best_style
+ return node
+
+ def represent_mapping(self, tag, mapping, flow_style=None):
+ value = []
+ node = MappingNode(tag, value, flow_style=flow_style)
+ if self.alias_key is not None:
+ self.represented_objects[self.alias_key] = node
+ best_style = True
+ if hasattr(mapping, 'items'):
+ mapping = mapping.items()
+ mapping.sort()
+ for item_key, item_value in mapping:
+ node_key = self.represent_data(item_key)
+ node_value = self.represent_data(item_value)
+ if not (isinstance(node_key, ScalarNode) and not node_key.style):
+ best_style = False
+ if not (isinstance(node_value, ScalarNode) and not node_value.style):
+ best_style = False
+ value.append((node_key, node_value))
+ if flow_style is None:
+ if self.default_flow_style is not None:
+ node.flow_style = self.default_flow_style
+ else:
+ node.flow_style = best_style
+ return node
+
+ def ignore_aliases(self, data):
+ return False
+
+class SafeRepresenter(BaseRepresenter):
+
+ def ignore_aliases(self, data):
+ if data in [None, ()]:
+ return True
+ if isinstance(data, (str, unicode, bool, int, float)):
+ return True
+
+ def represent_none(self, data):
+ return self.represent_scalar(u'tag:yaml.org,2002:null',
+ u'null')
+
+ def represent_str(self, data):
+ tag = None
+ style = None
+ try:
+ data = unicode(data, 'ascii')
+ tag = u'tag:yaml.org,2002:str'
+ except UnicodeDecodeError:
+ try:
+ data = unicode(data, 'utf-8')
+ tag = u'tag:yaml.org,2002:str'
+ except UnicodeDecodeError:
+ data = data.encode('base64')
+ tag = u'tag:yaml.org,2002:binary'
+ style = '|'
+ return self.represent_scalar(tag, data, style=style)
+
+ def represent_unicode(self, data):
+ return self.represent_scalar(u'tag:yaml.org,2002:str', data)
+
+ def represent_bool(self, data):
+ if data:
+ value = u'true'
+ else:
+ value = u'false'
+ return self.represent_scalar(u'tag:yaml.org,2002:bool', value)
+
+ def represent_int(self, data):
+ return self.represent_scalar(u'tag:yaml.org,2002:int', unicode(data))
+
+ def represent_long(self, data):
+ return self.represent_scalar(u'tag:yaml.org,2002:int', unicode(data))
+
+ inf_value = 1e300
+ while repr(inf_value) != repr(inf_value*inf_value):
+ inf_value *= inf_value
+
+ def represent_float(self, data):
+ if data != data or (data == 0.0 and data == 1.0):
+ value = u'.nan'
+ elif data == self.inf_value:
+ value = u'.inf'
+ elif data == -self.inf_value:
+ value = u'-.inf'
+ else:
+ value = unicode(repr(data)).lower()
+ # Note that in some cases `repr(data)` represents a float number
+ # without the decimal parts. For instance:
+ # >>> repr(1e17)
+ # '1e17'
+ # Unfortunately, this is not a valid float representation according
+ # to the definition of the `!!float` tag. We fix this by adding
+ # '.0' before the 'e' symbol.
+ if u'.' not in value and u'e' in value:
+ value = value.replace(u'e', u'.0e', 1)
+ return self.represent_scalar(u'tag:yaml.org,2002:float', value)
+
+ def represent_list(self, data):
+ #pairs = (len(data) > 0 and isinstance(data, list))
+ #if pairs:
+ # for item in data:
+ # if not isinstance(item, tuple) or len(item) != 2:
+ # pairs = False
+ # break
+ #if not pairs:
+ return self.represent_sequence(u'tag:yaml.org,2002:seq', data)
+ #value = []
+ #for item_key, item_value in data:
+ # value.append(self.represent_mapping(u'tag:yaml.org,2002:map',
+ # [(item_key, item_value)]))
+ #return SequenceNode(u'tag:yaml.org,2002:pairs', value)
+
+ def represent_dict(self, data):
+ return self.represent_mapping(u'tag:yaml.org,2002:map', data)
+
+ def represent_set(self, data):
+ value = {}
+ for key in data:
+ value[key] = None
+ return self.represent_mapping(u'tag:yaml.org,2002:set', value)
+
+ def represent_date(self, data):
+ value = unicode(data.isoformat())
+ return self.represent_scalar(u'tag:yaml.org,2002:timestamp', value)
+
+ def represent_datetime(self, data):
+ value = unicode(data.isoformat(' '))
+ return self.represent_scalar(u'tag:yaml.org,2002:timestamp', value)
+
+ def represent_yaml_object(self, tag, data, cls, flow_style=None):
+ if hasattr(data, '__getstate__'):
+ state = data.__getstate__()
+ else:
+ state = data.__dict__.copy()
+ return self.represent_mapping(tag, state, flow_style=flow_style)
+
+ def represent_undefined(self, data):
+ raise RepresenterError("cannot represent an object: %s" % data)
+
+SafeRepresenter.add_representer(type(None),
+ SafeRepresenter.represent_none)
+
+SafeRepresenter.add_representer(str,
+ SafeRepresenter.represent_str)
+
+SafeRepresenter.add_representer(unicode,
+ SafeRepresenter.represent_unicode)
+
+SafeRepresenter.add_representer(bool,
+ SafeRepresenter.represent_bool)
+
+SafeRepresenter.add_representer(int,
+ SafeRepresenter.represent_int)
+
+SafeRepresenter.add_representer(long,
+ SafeRepresenter.represent_long)
+
+SafeRepresenter.add_representer(float,
+ SafeRepresenter.represent_float)
+
+SafeRepresenter.add_representer(list,
+ SafeRepresenter.represent_list)
+
+SafeRepresenter.add_representer(tuple,
+ SafeRepresenter.represent_list)
+
+SafeRepresenter.add_representer(dict,
+ SafeRepresenter.represent_dict)
+
+SafeRepresenter.add_representer(set,
+ SafeRepresenter.represent_set)
+
+SafeRepresenter.add_representer(datetime.date,
+ SafeRepresenter.represent_date)
+
+SafeRepresenter.add_representer(datetime.datetime,
+ SafeRepresenter.represent_datetime)
+
+SafeRepresenter.add_representer(None,
+ SafeRepresenter.represent_undefined)
+
+class Representer(SafeRepresenter):
+
+ def represent_str(self, data):
+ tag = None
+ style = None
+ try:
+ data = unicode(data, 'ascii')
+ tag = u'tag:yaml.org,2002:str'
+ except UnicodeDecodeError:
+ try:
+ data = unicode(data, 'utf-8')
+ tag = u'tag:yaml.org,2002:python/str'
+ except UnicodeDecodeError:
+ data = data.encode('base64')
+ tag = u'tag:yaml.org,2002:binary'
+ style = '|'
+ return self.represent_scalar(tag, data, style=style)
+
+ def represent_unicode(self, data):
+ tag = None
+ try:
+ data.encode('ascii')
+ tag = u'tag:yaml.org,2002:python/unicode'
+ except UnicodeEncodeError:
+ tag = u'tag:yaml.org,2002:str'
+ return self.represent_scalar(tag, data)
+
+ def represent_long(self, data):
+ tag = u'tag:yaml.org,2002:int'
+ if int(data) is not data:
+ tag = u'tag:yaml.org,2002:python/long'
+ return self.represent_scalar(tag, unicode(data))
+
+ def represent_complex(self, data):
+ if data.imag == 0.0:
+ data = u'%r' % data.real
+ elif data.real == 0.0:
+ data = u'%rj' % data.imag
+ elif data.imag > 0:
+ data = u'%r+%rj' % (data.real, data.imag)
+ else:
+ data = u'%r%rj' % (data.real, data.imag)
+ return self.represent_scalar(u'tag:yaml.org,2002:python/complex', data)
+
+ def represent_tuple(self, data):
+ return self.represent_sequence(u'tag:yaml.org,2002:python/tuple', data)
+
+ def represent_name(self, data):
+ name = u'%s.%s' % (data.__module__, data.__name__)
+ return self.represent_scalar(u'tag:yaml.org,2002:python/name:'+name, u'')
+
+ def represent_module(self, data):
+ return self.represent_scalar(
+ u'tag:yaml.org,2002:python/module:'+data.__name__, u'')
+
+ def represent_instance(self, data):
+ # For instances of classic classes, we use __getinitargs__ and
+ # __getstate__ to serialize the data.
+
+ # If data.__getinitargs__ exists, the object must be reconstructed by
+ # calling cls(**args), where args is a tuple returned by
+ # __getinitargs__. Otherwise, the cls.__init__ method should never be
+ # called and the class instance is created by instantiating a trivial
+ # class and assigning to the instance's __class__ variable.
+
+ # If data.__getstate__ exists, it returns the state of the object.
+ # Otherwise, the state of the object is data.__dict__.
+
+ # We produce either a !!python/object or !!python/object/new node.
+ # If data.__getinitargs__ does not exist and state is a dictionary, we
+ # produce a !!python/object node . Otherwise we produce a
+ # !!python/object/new node.
+
+ cls = data.__class__
+ class_name = u'%s.%s' % (cls.__module__, cls.__name__)
+ args = None
+ state = None
+ if hasattr(data, '__getinitargs__'):
+ args = list(data.__getinitargs__())
+ if hasattr(data, '__getstate__'):
+ state = data.__getstate__()
+ else:
+ state = data.__dict__
+ if args is None and isinstance(state, dict):
+ return self.represent_mapping(
+ u'tag:yaml.org,2002:python/object:'+class_name, state)
+ if isinstance(state, dict) and not state:
+ return self.represent_sequence(
+ u'tag:yaml.org,2002:python/object/new:'+class_name, args)
+ value = {}
+ if args:
+ value['args'] = args
+ value['state'] = state
+ return self.represent_mapping(
+ u'tag:yaml.org,2002:python/object/new:'+class_name, value)
+
+ def represent_object(self, data):
+ # We use __reduce__ API to save the data. data.__reduce__ returns
+ # a tuple of length 2-5:
+ # (function, args, state, listitems, dictitems)
+
+ # For reconstructing, we calls function(*args), then set its state,
+ # listitems, and dictitems if they are not None.
+
+ # A special case is when function.__name__ == '__newobj__'. In this
+ # case we create the object with args[0].__new__(*args).
+
+ # Another special case is when __reduce__ returns a string - we don't
+ # support it.
+
+ # We produce a !!python/object, !!python/object/new or
+ # !!python/object/apply node.
+
+ cls = type(data)
+ if cls in copy_reg.dispatch_table:
+ reduce = copy_reg.dispatch_table[cls](data)
+ elif hasattr(data, '__reduce_ex__'):
+ reduce = data.__reduce_ex__(2)
+ elif hasattr(data, '__reduce__'):
+ reduce = data.__reduce__()
+ else:
+ raise RepresenterError("cannot represent object: %r" % data)
+ reduce = (list(reduce)+[None]*5)[:5]
+ function, args, state, listitems, dictitems = reduce
+ args = list(args)
+ if state is None:
+ state = {}
+ if listitems is not None:
+ listitems = list(listitems)
+ if dictitems is not None:
+ dictitems = dict(dictitems)
+ if function.__name__ == '__newobj__':
+ function = args[0]
+ args = args[1:]
+ tag = u'tag:yaml.org,2002:python/object/new:'
+ newobj = True
+ else:
+ tag = u'tag:yaml.org,2002:python/object/apply:'
+ newobj = False
+ function_name = u'%s.%s' % (function.__module__, function.__name__)
+ if not args and not listitems and not dictitems \
+ and isinstance(state, dict) and newobj:
+ return self.represent_mapping(
+ u'tag:yaml.org,2002:python/object:'+function_name, state)
+ if not listitems and not dictitems \
+ and isinstance(state, dict) and not state:
+ return self.represent_sequence(tag+function_name, args)
+ value = {}
+ if args:
+ value['args'] = args
+ if state or not isinstance(state, dict):
+ value['state'] = state
+ if listitems:
+ value['listitems'] = listitems
+ if dictitems:
+ value['dictitems'] = dictitems
+ return self.represent_mapping(tag+function_name, value)
+
+Representer.add_representer(str,
+ Representer.represent_str)
+
+Representer.add_representer(unicode,
+ Representer.represent_unicode)
+
+Representer.add_representer(long,
+ Representer.represent_long)
+
+Representer.add_representer(complex,
+ Representer.represent_complex)
+
+Representer.add_representer(tuple,
+ Representer.represent_tuple)
+
+Representer.add_representer(type,
+ Representer.represent_name)
+
+Representer.add_representer(types.ClassType,
+ Representer.represent_name)
+
+Representer.add_representer(types.FunctionType,
+ Representer.represent_name)
+
+Representer.add_representer(types.BuiltinFunctionType,
+ Representer.represent_name)
+
+Representer.add_representer(types.ModuleType,
+ Representer.represent_module)
+
+Representer.add_multi_representer(types.InstanceType,
+ Representer.represent_instance)
+
+Representer.add_multi_representer(object,
+ Representer.represent_object)
+
diff --git a/scripts/external_libs/pyyaml-3.11/python2/yaml/resolver.py b/scripts/external_libs/pyyaml-3.11/python2/yaml/resolver.py
new file mode 100644
index 00000000..6b5ab875
--- /dev/null
+++ b/scripts/external_libs/pyyaml-3.11/python2/yaml/resolver.py
@@ -0,0 +1,224 @@
+
+__all__ = ['BaseResolver', 'Resolver']
+
+from error import *
+from nodes import *
+
+import re
+
+class ResolverError(YAMLError):
+ pass
+
+class BaseResolver(object):
+
+ DEFAULT_SCALAR_TAG = u'tag:yaml.org,2002:str'
+ DEFAULT_SEQUENCE_TAG = u'tag:yaml.org,2002:seq'
+ DEFAULT_MAPPING_TAG = u'tag:yaml.org,2002:map'
+
+ yaml_implicit_resolvers = {}
+ yaml_path_resolvers = {}
+
+ def __init__(self):
+ self.resolver_exact_paths = []
+ self.resolver_prefix_paths = []
+
+ def add_implicit_resolver(cls, tag, regexp, first):
+ if not 'yaml_implicit_resolvers' in cls.__dict__:
+ cls.yaml_implicit_resolvers = cls.yaml_implicit_resolvers.copy()
+ if first is None:
+ first = [None]
+ for ch in first:
+ cls.yaml_implicit_resolvers.setdefault(ch, []).append((tag, regexp))
+ add_implicit_resolver = classmethod(add_implicit_resolver)
+
+ def add_path_resolver(cls, tag, path, kind=None):
+ # Note: `add_path_resolver` is experimental. The API could be changed.
+ # `new_path` is a pattern that is matched against the path from the
+ # root to the node that is being considered. `node_path` elements are
+ # tuples `(node_check, index_check)`. `node_check` is a node class:
+ # `ScalarNode`, `SequenceNode`, `MappingNode` or `None`. `None`
+ # matches any kind of a node. `index_check` could be `None`, a boolean
+ # value, a string value, or a number. `None` and `False` match against
+ # any _value_ of sequence and mapping nodes. `True` matches against
+ # any _key_ of a mapping node. A string `index_check` matches against
+ # a mapping value that corresponds to a scalar key which content is
+ # equal to the `index_check` value. An integer `index_check` matches
+ # against a sequence value with the index equal to `index_check`.
+ if not 'yaml_path_resolvers' in cls.__dict__:
+ cls.yaml_path_resolvers = cls.yaml_path_resolvers.copy()
+ new_path = []
+ for element in path:
+ if isinstance(element, (list, tuple)):
+ if len(element) == 2:
+ node_check, index_check = element
+ elif len(element) == 1:
+ node_check = element[0]
+ index_check = True
+ else:
+ raise ResolverError("Invalid path element: %s" % element)
+ else:
+ node_check = None
+ index_check = element
+ if node_check is str:
+ node_check = ScalarNode
+ elif node_check is list:
+ node_check = SequenceNode
+ elif node_check is dict:
+ node_check = MappingNode
+ elif node_check not in [ScalarNode, SequenceNode, MappingNode] \
+ and not isinstance(node_check, basestring) \
+ and node_check is not None:
+ raise ResolverError("Invalid node checker: %s" % node_check)
+ if not isinstance(index_check, (basestring, int)) \
+ and index_check is not None:
+ raise ResolverError("Invalid index checker: %s" % index_check)
+ new_path.append((node_check, index_check))
+ if kind is str:
+ kind = ScalarNode
+ elif kind is list:
+ kind = SequenceNode
+ elif kind is dict:
+ kind = MappingNode
+ elif kind not in [ScalarNode, SequenceNode, MappingNode] \
+ and kind is not None:
+ raise ResolverError("Invalid node kind: %s" % kind)
+ cls.yaml_path_resolvers[tuple(new_path), kind] = tag
+ add_path_resolver = classmethod(add_path_resolver)
+
+ def descend_resolver(self, current_node, current_index):
+ if not self.yaml_path_resolvers:
+ return
+ exact_paths = {}
+ prefix_paths = []
+ if current_node:
+ depth = len(self.resolver_prefix_paths)
+ for path, kind in self.resolver_prefix_paths[-1]:
+ if self.check_resolver_prefix(depth, path, kind,
+ current_node, current_index):
+ if len(path) > depth:
+ prefix_paths.append((path, kind))
+ else:
+ exact_paths[kind] = self.yaml_path_resolvers[path, kind]
+ else:
+ for path, kind in self.yaml_path_resolvers:
+ if not path:
+ exact_paths[kind] = self.yaml_path_resolvers[path, kind]
+ else:
+ prefix_paths.append((path, kind))
+ self.resolver_exact_paths.append(exact_paths)
+ self.resolver_prefix_paths.append(prefix_paths)
+
+ def ascend_resolver(self):
+ if not self.yaml_path_resolvers:
+ return
+ self.resolver_exact_paths.pop()
+ self.resolver_prefix_paths.pop()
+
+ def check_resolver_prefix(self, depth, path, kind,
+ current_node, current_index):
+ node_check, index_check = path[depth-1]
+ if isinstance(node_check, basestring):
+ if current_node.tag != node_check:
+ return
+ elif node_check is not None:
+ if not isinstance(current_node, node_check):
+ return
+ if index_check is True and current_index is not None:
+ return
+ if (index_check is False or index_check is None) \
+ and current_index is None:
+ return
+ if isinstance(index_check, basestring):
+ if not (isinstance(current_index, ScalarNode)
+ and index_check == current_index.value):
+ return
+ elif isinstance(index_check, int) and not isinstance(index_check, bool):
+ if index_check != current_index:
+ return
+ return True
+
+ def resolve(self, kind, value, implicit):
+ if kind is ScalarNode and implicit[0]:
+ if value == u'':
+ resolvers = self.yaml_implicit_resolvers.get(u'', [])
+ else:
+ resolvers = self.yaml_implicit_resolvers.get(value[0], [])
+ resolvers += self.yaml_implicit_resolvers.get(None, [])
+ for tag, regexp in resolvers:
+ if regexp.match(value):
+ return tag
+ implicit = implicit[1]
+ if self.yaml_path_resolvers:
+ exact_paths = self.resolver_exact_paths[-1]
+ if kind in exact_paths:
+ return exact_paths[kind]
+ if None in exact_paths:
+ return exact_paths[None]
+ if kind is ScalarNode:
+ return self.DEFAULT_SCALAR_TAG
+ elif kind is SequenceNode:
+ return self.DEFAULT_SEQUENCE_TAG
+ elif kind is MappingNode:
+ return self.DEFAULT_MAPPING_TAG
+
+class Resolver(BaseResolver):
+ pass
+
+Resolver.add_implicit_resolver(
+ u'tag:yaml.org,2002:bool',
+ re.compile(ur'''^(?:yes|Yes|YES|no|No|NO
+ |true|True|TRUE|false|False|FALSE
+ |on|On|ON|off|Off|OFF)$''', re.X),
+ list(u'yYnNtTfFoO'))
+
+Resolver.add_implicit_resolver(
+ u'tag:yaml.org,2002:float',
+ re.compile(ur'''^(?:[-+]?(?:[0-9][0-9_]*)\.[0-9_]*(?:[eE][-+][0-9]+)?
+ |\.[0-9_]+(?:[eE][-+][0-9]+)?
+ |[-+]?[0-9][0-9_]*(?::[0-5]?[0-9])+\.[0-9_]*
+ |[-+]?\.(?:inf|Inf|INF)
+ |\.(?:nan|NaN|NAN))$''', re.X),
+ list(u'-+0123456789.'))
+
+Resolver.add_implicit_resolver(
+ u'tag:yaml.org,2002:int',
+ re.compile(ur'''^(?:[-+]?0b[0-1_]+
+ |[-+]?0[0-7_]+
+ |[-+]?(?:0|[1-9][0-9_]*)
+ |[-+]?0x[0-9a-fA-F_]+
+ |[-+]?[1-9][0-9_]*(?::[0-5]?[0-9])+)$''', re.X),
+ list(u'-+0123456789'))
+
+Resolver.add_implicit_resolver(
+ u'tag:yaml.org,2002:merge',
+ re.compile(ur'^(?:<<)$'),
+ [u'<'])
+
+Resolver.add_implicit_resolver(
+ u'tag:yaml.org,2002:null',
+ re.compile(ur'''^(?: ~
+ |null|Null|NULL
+ | )$''', re.X),
+ [u'~', u'n', u'N', u''])
+
+Resolver.add_implicit_resolver(
+ u'tag:yaml.org,2002:timestamp',
+ re.compile(ur'''^(?:[0-9][0-9][0-9][0-9]-[0-9][0-9]-[0-9][0-9]
+ |[0-9][0-9][0-9][0-9] -[0-9][0-9]? -[0-9][0-9]?
+ (?:[Tt]|[ \t]+)[0-9][0-9]?
+ :[0-9][0-9] :[0-9][0-9] (?:\.[0-9]*)?
+ (?:[ \t]*(?:Z|[-+][0-9][0-9]?(?::[0-9][0-9])?))?)$''', re.X),
+ list(u'0123456789'))
+
+Resolver.add_implicit_resolver(
+ u'tag:yaml.org,2002:value',
+ re.compile(ur'^(?:=)$'),
+ [u'='])
+
+# The following resolver is only for documentation purposes. It cannot work
+# because plain scalars cannot start with '!', '&', or '*'.
+Resolver.add_implicit_resolver(
+ u'tag:yaml.org,2002:yaml',
+ re.compile(ur'^(?:!|&|\*)$'),
+ list(u'!&*'))
+
diff --git a/scripts/external_libs/pyyaml-3.11/python2/yaml/scanner.py b/scripts/external_libs/pyyaml-3.11/python2/yaml/scanner.py
new file mode 100644
index 00000000..5228fad6
--- /dev/null
+++ b/scripts/external_libs/pyyaml-3.11/python2/yaml/scanner.py
@@ -0,0 +1,1457 @@
+
+# Scanner produces tokens of the following types:
+# STREAM-START
+# STREAM-END
+# DIRECTIVE(name, value)
+# DOCUMENT-START
+# DOCUMENT-END
+# BLOCK-SEQUENCE-START
+# BLOCK-MAPPING-START
+# BLOCK-END
+# FLOW-SEQUENCE-START
+# FLOW-MAPPING-START
+# FLOW-SEQUENCE-END
+# FLOW-MAPPING-END
+# BLOCK-ENTRY
+# FLOW-ENTRY
+# KEY
+# VALUE
+# ALIAS(value)
+# ANCHOR(value)
+# TAG(value)
+# SCALAR(value, plain, style)
+#
+# Read comments in the Scanner code for more details.
+#
+
+__all__ = ['Scanner', 'ScannerError']
+
+from error import MarkedYAMLError
+from tokens import *
+
+class ScannerError(MarkedYAMLError):
+ pass
+
+class SimpleKey(object):
+ # See below simple keys treatment.
+
+ def __init__(self, token_number, required, index, line, column, mark):
+ self.token_number = token_number
+ self.required = required
+ self.index = index
+ self.line = line
+ self.column = column
+ self.mark = mark
+
+class Scanner(object):
+
+ def __init__(self):
+ """Initialize the scanner."""
+ # It is assumed that Scanner and Reader will have a common descendant.
+ # Reader do the dirty work of checking for BOM and converting the
+ # input data to Unicode. It also adds NUL to the end.
+ #
+ # Reader supports the following methods
+ # self.peek(i=0) # peek the next i-th character
+ # self.prefix(l=1) # peek the next l characters
+ # self.forward(l=1) # read the next l characters and move the pointer.
+
+ # Had we reached the end of the stream?
+ self.done = False
+
+ # The number of unclosed '{' and '['. `flow_level == 0` means block
+ # context.
+ self.flow_level = 0
+
+ # List of processed tokens that are not yet emitted.
+ self.tokens = []
+
+ # Add the STREAM-START token.
+ self.fetch_stream_start()
+
+ # Number of tokens that were emitted through the `get_token` method.
+ self.tokens_taken = 0
+
+ # The current indentation level.
+ self.indent = -1
+
+ # Past indentation levels.
+ self.indents = []
+
+ # Variables related to simple keys treatment.
+
+ # A simple key is a key that is not denoted by the '?' indicator.
+ # Example of simple keys:
+ # ---
+ # block simple key: value
+ # ? not a simple key:
+ # : { flow simple key: value }
+ # We emit the KEY token before all keys, so when we find a potential
+ # simple key, we try to locate the corresponding ':' indicator.
+ # Simple keys should be limited to a single line and 1024 characters.
+
+ # Can a simple key start at the current position? A simple key may
+ # start:
+ # - at the beginning of the line, not counting indentation spaces
+ # (in block context),
+ # - after '{', '[', ',' (in the flow context),
+ # - after '?', ':', '-' (in the block context).
+ # In the block context, this flag also signifies if a block collection
+ # may start at the current position.
+ self.allow_simple_key = True
+
+ # Keep track of possible simple keys. This is a dictionary. The key
+ # is `flow_level`; there can be no more that one possible simple key
+ # for each level. The value is a SimpleKey record:
+ # (token_number, required, index, line, column, mark)
+ # A simple key may start with ALIAS, ANCHOR, TAG, SCALAR(flow),
+ # '[', or '{' tokens.
+ self.possible_simple_keys = {}
+
+ # Public methods.
+
+ def check_token(self, *choices):
+ # Check if the next token is one of the given types.
+ while self.need_more_tokens():
+ self.fetch_more_tokens()
+ if self.tokens:
+ if not choices:
+ return True
+ for choice in choices:
+ if isinstance(self.tokens[0], choice):
+ return True
+ return False
+
+ def peek_token(self):
+ # Return the next token, but do not delete if from the queue.
+ while self.need_more_tokens():
+ self.fetch_more_tokens()
+ if self.tokens:
+ return self.tokens[0]
+
+ def get_token(self):
+ # Return the next token.
+ while self.need_more_tokens():
+ self.fetch_more_tokens()
+ if self.tokens:
+ self.tokens_taken += 1
+ return self.tokens.pop(0)
+
+ # Private methods.
+
+ def need_more_tokens(self):
+ if self.done:
+ return False
+ if not self.tokens:
+ return True
+ # The current token may be a potential simple key, so we
+ # need to look further.
+ self.stale_possible_simple_keys()
+ if self.next_possible_simple_key() == self.tokens_taken:
+ return True
+
+ def fetch_more_tokens(self):
+
+ # Eat whitespaces and comments until we reach the next token.
+ self.scan_to_next_token()
+
+ # Remove obsolete possible simple keys.
+ self.stale_possible_simple_keys()
+
+ # Compare the current indentation and column. It may add some tokens
+ # and decrease the current indentation level.
+ self.unwind_indent(self.column)
+
+ # Peek the next character.
+ ch = self.peek()
+
+ # Is it the end of stream?
+ if ch == u'\0':
+ return self.fetch_stream_end()
+
+ # Is it a directive?
+ if ch == u'%' and self.check_directive():
+ return self.fetch_directive()
+
+ # Is it the document start?
+ if ch == u'-' and self.check_document_start():
+ return self.fetch_document_start()
+
+ # Is it the document end?
+ if ch == u'.' and self.check_document_end():
+ return self.fetch_document_end()
+
+ # TODO: support for BOM within a stream.
+ #if ch == u'\uFEFF':
+ # return self.fetch_bom() <-- issue BOMToken
+
+ # Note: the order of the following checks is NOT significant.
+
+ # Is it the flow sequence start indicator?
+ if ch == u'[':
+ return self.fetch_flow_sequence_start()
+
+ # Is it the flow mapping start indicator?
+ if ch == u'{':
+ return self.fetch_flow_mapping_start()
+
+ # Is it the flow sequence end indicator?
+ if ch == u']':
+ return self.fetch_flow_sequence_end()
+
+ # Is it the flow mapping end indicator?
+ if ch == u'}':
+ return self.fetch_flow_mapping_end()
+
+ # Is it the flow entry indicator?
+ if ch == u',':
+ return self.fetch_flow_entry()
+
+ # Is it the block entry indicator?
+ if ch == u'-' and self.check_block_entry():
+ return self.fetch_block_entry()
+
+ # Is it the key indicator?
+ if ch == u'?' and self.check_key():
+ return self.fetch_key()
+
+ # Is it the value indicator?
+ if ch == u':' and self.check_value():
+ return self.fetch_value()
+
+ # Is it an alias?
+ if ch == u'*':
+ return self.fetch_alias()
+
+ # Is it an anchor?
+ if ch == u'&':
+ return self.fetch_anchor()
+
+ # Is it a tag?
+ if ch == u'!':
+ return self.fetch_tag()
+
+ # Is it a literal scalar?
+ if ch == u'|' and not self.flow_level:
+ return self.fetch_literal()
+
+ # Is it a folded scalar?
+ if ch == u'>' and not self.flow_level:
+ return self.fetch_folded()
+
+ # Is it a single quoted scalar?
+ if ch == u'\'':
+ return self.fetch_single()
+
+ # Is it a double quoted scalar?
+ if ch == u'\"':
+ return self.fetch_double()
+
+ # It must be a plain scalar then.
+ if self.check_plain():
+ return self.fetch_plain()
+
+ # No? It's an error. Let's produce a nice error message.
+ raise ScannerError("while scanning for the next token", None,
+ "found character %r that cannot start any token"
+ % ch.encode('utf-8'), self.get_mark())
+
+ # Simple keys treatment.
+
+ def next_possible_simple_key(self):
+ # Return the number of the nearest possible simple key. Actually we
+ # don't need to loop through the whole dictionary. We may replace it
+ # with the following code:
+ # if not self.possible_simple_keys:
+ # return None
+ # return self.possible_simple_keys[
+ # min(self.possible_simple_keys.keys())].token_number
+ min_token_number = None
+ for level in self.possible_simple_keys:
+ key = self.possible_simple_keys[level]
+ if min_token_number is None or key.token_number < min_token_number:
+ min_token_number = key.token_number
+ return min_token_number
+
+ def stale_possible_simple_keys(self):
+ # Remove entries that are no longer possible simple keys. According to
+ # the YAML specification, simple keys
+ # - should be limited to a single line,
+ # - should be no longer than 1024 characters.
+ # Disabling this procedure will allow simple keys of any length and
+ # height (may cause problems if indentation is broken though).
+ for level in self.possible_simple_keys.keys():
+ key = self.possible_simple_keys[level]
+ if key.line != self.line \
+ or self.index-key.index > 1024:
+ if key.required:
+ raise ScannerError("while scanning a simple key", key.mark,
+ "could not found expected ':'", self.get_mark())
+ del self.possible_simple_keys[level]
+
+ def save_possible_simple_key(self):
+ # The next token may start a simple key. We check if it's possible
+ # and save its position. This function is called for
+ # ALIAS, ANCHOR, TAG, SCALAR(flow), '[', and '{'.
+
+ # Check if a simple key is required at the current position.
+ required = not self.flow_level and self.indent == self.column
+
+ # A simple key is required only if it is the first token in the current
+ # line. Therefore it is always allowed.
+ assert self.allow_simple_key or not required
+
+ # The next token might be a simple key. Let's save it's number and
+ # position.
+ if self.allow_simple_key:
+ self.remove_possible_simple_key()
+ token_number = self.tokens_taken+len(self.tokens)
+ key = SimpleKey(token_number, required,
+ self.index, self.line, self.column, self.get_mark())
+ self.possible_simple_keys[self.flow_level] = key
+
+ def remove_possible_simple_key(self):
+ # Remove the saved possible key position at the current flow level.
+ if self.flow_level in self.possible_simple_keys:
+ key = self.possible_simple_keys[self.flow_level]
+
+ if key.required:
+ raise ScannerError("while scanning a simple key", key.mark,
+ "could not found expected ':'", self.get_mark())
+
+ del self.possible_simple_keys[self.flow_level]
+
+ # Indentation functions.
+
+ def unwind_indent(self, column):
+
+ ## In flow context, tokens should respect indentation.
+ ## Actually the condition should be `self.indent >= column` according to
+ ## the spec. But this condition will prohibit intuitively correct
+ ## constructions such as
+ ## key : {
+ ## }
+ #if self.flow_level and self.indent > column:
+ # raise ScannerError(None, None,
+ # "invalid intendation or unclosed '[' or '{'",
+ # self.get_mark())
+
+ # In the flow context, indentation is ignored. We make the scanner less
+ # restrictive then specification requires.
+ if self.flow_level:
+ return
+
+ # In block context, we may need to issue the BLOCK-END tokens.
+ while self.indent > column:
+ mark = self.get_mark()
+ self.indent = self.indents.pop()
+ self.tokens.append(BlockEndToken(mark, mark))
+
+ def add_indent(self, column):
+ # Check if we need to increase indentation.
+ if self.indent < column:
+ self.indents.append(self.indent)
+ self.indent = column
+ return True
+ return False
+
+ # Fetchers.
+
+ def fetch_stream_start(self):
+ # We always add STREAM-START as the first token and STREAM-END as the
+ # last token.
+
+ # Read the token.
+ mark = self.get_mark()
+
+ # Add STREAM-START.
+ self.tokens.append(StreamStartToken(mark, mark,
+ encoding=self.encoding))
+
+
+ def fetch_stream_end(self):
+
+ # Set the current intendation to -1.
+ self.unwind_indent(-1)
+
+ # Reset simple keys.
+ self.remove_possible_simple_key()
+ self.allow_simple_key = False
+ self.possible_simple_keys = {}
+
+ # Read the token.
+ mark = self.get_mark()
+
+ # Add STREAM-END.
+ self.tokens.append(StreamEndToken(mark, mark))
+
+ # The steam is finished.
+ self.done = True
+
+ def fetch_directive(self):
+
+ # Set the current intendation to -1.
+ self.unwind_indent(-1)
+
+ # Reset simple keys.
+ self.remove_possible_simple_key()
+ self.allow_simple_key = False
+
+ # Scan and add DIRECTIVE.
+ self.tokens.append(self.scan_directive())
+
+ def fetch_document_start(self):
+ self.fetch_document_indicator(DocumentStartToken)
+
+ def fetch_document_end(self):
+ self.fetch_document_indicator(DocumentEndToken)
+
+ def fetch_document_indicator(self, TokenClass):
+
+ # Set the current intendation to -1.
+ self.unwind_indent(-1)
+
+ # Reset simple keys. Note that there could not be a block collection
+ # after '---'.
+ self.remove_possible_simple_key()
+ self.allow_simple_key = False
+
+ # Add DOCUMENT-START or DOCUMENT-END.
+ start_mark = self.get_mark()
+ self.forward(3)
+ end_mark = self.get_mark()
+ self.tokens.append(TokenClass(start_mark, end_mark))
+
+ def fetch_flow_sequence_start(self):
+ self.fetch_flow_collection_start(FlowSequenceStartToken)
+
+ def fetch_flow_mapping_start(self):
+ self.fetch_flow_collection_start(FlowMappingStartToken)
+
+ def fetch_flow_collection_start(self, TokenClass):
+
+ # '[' and '{' may start a simple key.
+ self.save_possible_simple_key()
+
+ # Increase the flow level.
+ self.flow_level += 1
+
+ # Simple keys are allowed after '[' and '{'.
+ self.allow_simple_key = True
+
+ # Add FLOW-SEQUENCE-START or FLOW-MAPPING-START.
+ start_mark = self.get_mark()
+ self.forward()
+ end_mark = self.get_mark()
+ self.tokens.append(TokenClass(start_mark, end_mark))
+
+ def fetch_flow_sequence_end(self):
+ self.fetch_flow_collection_end(FlowSequenceEndToken)
+
+ def fetch_flow_mapping_end(self):
+ self.fetch_flow_collection_end(FlowMappingEndToken)
+
+ def fetch_flow_collection_end(self, TokenClass):
+
+ # Reset possible simple key on the current level.
+ self.remove_possible_simple_key()
+
+ # Decrease the flow level.
+ self.flow_level -= 1
+
+ # No simple keys after ']' or '}'.
+ self.allow_simple_key = False
+
+ # Add FLOW-SEQUENCE-END or FLOW-MAPPING-END.
+ start_mark = self.get_mark()
+ self.forward()
+ end_mark = self.get_mark()
+ self.tokens.append(TokenClass(start_mark, end_mark))
+
+ def fetch_flow_entry(self):
+
+ # Simple keys are allowed after ','.
+ self.allow_simple_key = True
+
+ # Reset possible simple key on the current level.
+ self.remove_possible_simple_key()
+
+ # Add FLOW-ENTRY.
+ start_mark = self.get_mark()
+ self.forward()
+ end_mark = self.get_mark()
+ self.tokens.append(FlowEntryToken(start_mark, end_mark))
+
+ def fetch_block_entry(self):
+
+ # Block context needs additional checks.
+ if not self.flow_level:
+
+ # Are we allowed to start a new entry?
+ if not self.allow_simple_key:
+ raise ScannerError(None, None,
+ "sequence entries are not allowed here",
+ self.get_mark())
+
+ # We may need to add BLOCK-SEQUENCE-START.
+ if self.add_indent(self.column):
+ mark = self.get_mark()
+ self.tokens.append(BlockSequenceStartToken(mark, mark))
+
+ # It's an error for the block entry to occur in the flow context,
+ # but we let the parser detect this.
+ else:
+ pass
+
+ # Simple keys are allowed after '-'.
+ self.allow_simple_key = True
+
+ # Reset possible simple key on the current level.
+ self.remove_possible_simple_key()
+
+ # Add BLOCK-ENTRY.
+ start_mark = self.get_mark()
+ self.forward()
+ end_mark = self.get_mark()
+ self.tokens.append(BlockEntryToken(start_mark, end_mark))
+
+ def fetch_key(self):
+
+ # Block context needs additional checks.
+ if not self.flow_level:
+
+ # Are we allowed to start a key (not nessesary a simple)?
+ if not self.allow_simple_key:
+ raise ScannerError(None, None,
+ "mapping keys are not allowed here",
+ self.get_mark())
+
+ # We may need to add BLOCK-MAPPING-START.
+ if self.add_indent(self.column):
+ mark = self.get_mark()
+ self.tokens.append(BlockMappingStartToken(mark, mark))
+
+ # Simple keys are allowed after '?' in the block context.
+ self.allow_simple_key = not self.flow_level
+
+ # Reset possible simple key on the current level.
+ self.remove_possible_simple_key()
+
+ # Add KEY.
+ start_mark = self.get_mark()
+ self.forward()
+ end_mark = self.get_mark()
+ self.tokens.append(KeyToken(start_mark, end_mark))
+
+ def fetch_value(self):
+
+ # Do we determine a simple key?
+ if self.flow_level in self.possible_simple_keys:
+
+ # Add KEY.
+ key = self.possible_simple_keys[self.flow_level]
+ del self.possible_simple_keys[self.flow_level]
+ self.tokens.insert(key.token_number-self.tokens_taken,
+ KeyToken(key.mark, key.mark))
+
+ # If this key starts a new block mapping, we need to add
+ # BLOCK-MAPPING-START.
+ if not self.flow_level:
+ if self.add_indent(key.column):
+ self.tokens.insert(key.token_number-self.tokens_taken,
+ BlockMappingStartToken(key.mark, key.mark))
+
+ # There cannot be two simple keys one after another.
+ self.allow_simple_key = False
+
+ # It must be a part of a complex key.
+ else:
+
+ # Block context needs additional checks.
+ # (Do we really need them? They will be catched by the parser
+ # anyway.)
+ if not self.flow_level:
+
+ # We are allowed to start a complex value if and only if
+ # we can start a simple key.
+ if not self.allow_simple_key:
+ raise ScannerError(None, None,
+ "mapping values are not allowed here",
+ self.get_mark())
+
+ # If this value starts a new block mapping, we need to add
+ # BLOCK-MAPPING-START. It will be detected as an error later by
+ # the parser.
+ if not self.flow_level:
+ if self.add_indent(self.column):
+ mark = self.get_mark()
+ self.tokens.append(BlockMappingStartToken(mark, mark))
+
+ # Simple keys are allowed after ':' in the block context.
+ self.allow_simple_key = not self.flow_level
+
+ # Reset possible simple key on the current level.
+ self.remove_possible_simple_key()
+
+ # Add VALUE.
+ start_mark = self.get_mark()
+ self.forward()
+ end_mark = self.get_mark()
+ self.tokens.append(ValueToken(start_mark, end_mark))
+
+ def fetch_alias(self):
+
+ # ALIAS could be a simple key.
+ self.save_possible_simple_key()
+
+ # No simple keys after ALIAS.
+ self.allow_simple_key = False
+
+ # Scan and add ALIAS.
+ self.tokens.append(self.scan_anchor(AliasToken))
+
+ def fetch_anchor(self):
+
+ # ANCHOR could start a simple key.
+ self.save_possible_simple_key()
+
+ # No simple keys after ANCHOR.
+ self.allow_simple_key = False
+
+ # Scan and add ANCHOR.
+ self.tokens.append(self.scan_anchor(AnchorToken))
+
+ def fetch_tag(self):
+
+ # TAG could start a simple key.
+ self.save_possible_simple_key()
+
+ # No simple keys after TAG.
+ self.allow_simple_key = False
+
+ # Scan and add TAG.
+ self.tokens.append(self.scan_tag())
+
+ def fetch_literal(self):
+ self.fetch_block_scalar(style='|')
+
+ def fetch_folded(self):
+ self.fetch_block_scalar(style='>')
+
+ def fetch_block_scalar(self, style):
+
+ # A simple key may follow a block scalar.
+ self.allow_simple_key = True
+
+ # Reset possible simple key on the current level.
+ self.remove_possible_simple_key()
+
+ # Scan and add SCALAR.
+ self.tokens.append(self.scan_block_scalar(style))
+
+ def fetch_single(self):
+ self.fetch_flow_scalar(style='\'')
+
+ def fetch_double(self):
+ self.fetch_flow_scalar(style='"')
+
+ def fetch_flow_scalar(self, style):
+
+ # A flow scalar could be a simple key.
+ self.save_possible_simple_key()
+
+ # No simple keys after flow scalars.
+ self.allow_simple_key = False
+
+ # Scan and add SCALAR.
+ self.tokens.append(self.scan_flow_scalar(style))
+
+ def fetch_plain(self):
+
+ # A plain scalar could be a simple key.
+ self.save_possible_simple_key()
+
+ # No simple keys after plain scalars. But note that `scan_plain` will
+ # change this flag if the scan is finished at the beginning of the
+ # line.
+ self.allow_simple_key = False
+
+ # Scan and add SCALAR. May change `allow_simple_key`.
+ self.tokens.append(self.scan_plain())
+
+ # Checkers.
+
+ def check_directive(self):
+
+ # DIRECTIVE: ^ '%' ...
+ # The '%' indicator is already checked.
+ if self.column == 0:
+ return True
+
+ def check_document_start(self):
+
+ # DOCUMENT-START: ^ '---' (' '|'\n')
+ if self.column == 0:
+ if self.prefix(3) == u'---' \
+ and self.peek(3) in u'\0 \t\r\n\x85\u2028\u2029':
+ return True
+
+ def check_document_end(self):
+
+ # DOCUMENT-END: ^ '...' (' '|'\n')
+ if self.column == 0:
+ if self.prefix(3) == u'...' \
+ and self.peek(3) in u'\0 \t\r\n\x85\u2028\u2029':
+ return True
+
+ def check_block_entry(self):
+
+ # BLOCK-ENTRY: '-' (' '|'\n')
+ return self.peek(1) in u'\0 \t\r\n\x85\u2028\u2029'
+
+ def check_key(self):
+
+ # KEY(flow context): '?'
+ if self.flow_level:
+ return True
+
+ # KEY(block context): '?' (' '|'\n')
+ else:
+ return self.peek(1) in u'\0 \t\r\n\x85\u2028\u2029'
+
+ def check_value(self):
+
+ # VALUE(flow context): ':'
+ if self.flow_level:
+ return True
+
+ # VALUE(block context): ':' (' '|'\n')
+ else:
+ return self.peek(1) in u'\0 \t\r\n\x85\u2028\u2029'
+
+ def check_plain(self):
+
+ # A plain scalar may start with any non-space character except:
+ # '-', '?', ':', ',', '[', ']', '{', '}',
+ # '#', '&', '*', '!', '|', '>', '\'', '\"',
+ # '%', '@', '`'.
+ #
+ # It may also start with
+ # '-', '?', ':'
+ # if it is followed by a non-space character.
+ #
+ # Note that we limit the last rule to the block context (except the
+ # '-' character) because we want the flow context to be space
+ # independent.
+ ch = self.peek()
+ return ch not in u'\0 \t\r\n\x85\u2028\u2029-?:,[]{}#&*!|>\'\"%@`' \
+ or (self.peek(1) not in u'\0 \t\r\n\x85\u2028\u2029'
+ and (ch == u'-' or (not self.flow_level and ch in u'?:')))
+
+ # Scanners.
+
+ def scan_to_next_token(self):
+ # We ignore spaces, line breaks and comments.
+ # If we find a line break in the block context, we set the flag
+ # `allow_simple_key` on.
+ # The byte order mark is stripped if it's the first character in the
+ # stream. We do not yet support BOM inside the stream as the
+ # specification requires. Any such mark will be considered as a part
+ # of the document.
+ #
+ # TODO: We need to make tab handling rules more sane. A good rule is
+ # Tabs cannot precede tokens
+ # BLOCK-SEQUENCE-START, BLOCK-MAPPING-START, BLOCK-END,
+ # KEY(block), VALUE(block), BLOCK-ENTRY
+ # So the checking code is
+ # if <TAB>:
+ # self.allow_simple_keys = False
+ # We also need to add the check for `allow_simple_keys == True` to
+ # `unwind_indent` before issuing BLOCK-END.
+ # Scanners for block, flow, and plain scalars need to be modified.
+
+ if self.index == 0 and self.peek() == u'\uFEFF':
+ self.forward()
+ found = False
+ while not found:
+ while self.peek() == u' ':
+ self.forward()
+ if self.peek() == u'#':
+ while self.peek() not in u'\0\r\n\x85\u2028\u2029':
+ self.forward()
+ if self.scan_line_break():
+ if not self.flow_level:
+ self.allow_simple_key = True
+ else:
+ found = True
+
+ def scan_directive(self):
+ # See the specification for details.
+ start_mark = self.get_mark()
+ self.forward()
+ name = self.scan_directive_name(start_mark)
+ value = None
+ if name == u'YAML':
+ value = self.scan_yaml_directive_value(start_mark)
+ end_mark = self.get_mark()
+ elif name == u'TAG':
+ value = self.scan_tag_directive_value(start_mark)
+ end_mark = self.get_mark()
+ else:
+ end_mark = self.get_mark()
+ while self.peek() not in u'\0\r\n\x85\u2028\u2029':
+ self.forward()
+ self.scan_directive_ignored_line(start_mark)
+ return DirectiveToken(name, value, start_mark, end_mark)
+
+ def scan_directive_name(self, start_mark):
+ # See the specification for details.
+ length = 0
+ ch = self.peek(length)
+ while u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or u'a' <= ch <= u'z' \
+ or ch in u'-_':
+ length += 1
+ ch = self.peek(length)
+ if not length:
+ raise ScannerError("while scanning a directive", start_mark,
+ "expected alphabetic or numeric character, but found %r"
+ % ch.encode('utf-8'), self.get_mark())
+ value = self.prefix(length)
+ self.forward(length)
+ ch = self.peek()
+ if ch not in u'\0 \r\n\x85\u2028\u2029':
+ raise ScannerError("while scanning a directive", start_mark,
+ "expected alphabetic or numeric character, but found %r"
+ % ch.encode('utf-8'), self.get_mark())
+ return value
+
+ def scan_yaml_directive_value(self, start_mark):
+ # See the specification for details.
+ while self.peek() == u' ':
+ self.forward()
+ major = self.scan_yaml_directive_number(start_mark)
+ if self.peek() != '.':
+ raise ScannerError("while scanning a directive", start_mark,
+ "expected a digit or '.', but found %r"
+ % self.peek().encode('utf-8'),
+ self.get_mark())
+ self.forward()
+ minor = self.scan_yaml_directive_number(start_mark)
+ if self.peek() not in u'\0 \r\n\x85\u2028\u2029':
+ raise ScannerError("while scanning a directive", start_mark,
+ "expected a digit or ' ', but found %r"
+ % self.peek().encode('utf-8'),
+ self.get_mark())
+ return (major, minor)
+
+ def scan_yaml_directive_number(self, start_mark):
+ # See the specification for details.
+ ch = self.peek()
+ if not (u'0' <= ch <= u'9'):
+ raise ScannerError("while scanning a directive", start_mark,
+ "expected a digit, but found %r" % ch.encode('utf-8'),
+ self.get_mark())
+ length = 0
+ while u'0' <= self.peek(length) <= u'9':
+ length += 1
+ value = int(self.prefix(length))
+ self.forward(length)
+ return value
+
+ def scan_tag_directive_value(self, start_mark):
+ # See the specification for details.
+ while self.peek() == u' ':
+ self.forward()
+ handle = self.scan_tag_directive_handle(start_mark)
+ while self.peek() == u' ':
+ self.forward()
+ prefix = self.scan_tag_directive_prefix(start_mark)
+ return (handle, prefix)
+
+ def scan_tag_directive_handle(self, start_mark):
+ # See the specification for details.
+ value = self.scan_tag_handle('directive', start_mark)
+ ch = self.peek()
+ if ch != u' ':
+ raise ScannerError("while scanning a directive", start_mark,
+ "expected ' ', but found %r" % ch.encode('utf-8'),
+ self.get_mark())
+ return value
+
+ def scan_tag_directive_prefix(self, start_mark):
+ # See the specification for details.
+ value = self.scan_tag_uri('directive', start_mark)
+ ch = self.peek()
+ if ch not in u'\0 \r\n\x85\u2028\u2029':
+ raise ScannerError("while scanning a directive", start_mark,
+ "expected ' ', but found %r" % ch.encode('utf-8'),
+ self.get_mark())
+ return value
+
+ def scan_directive_ignored_line(self, start_mark):
+ # See the specification for details.
+ while self.peek() == u' ':
+ self.forward()
+ if self.peek() == u'#':
+ while self.peek() not in u'\0\r\n\x85\u2028\u2029':
+ self.forward()
+ ch = self.peek()
+ if ch not in u'\0\r\n\x85\u2028\u2029':
+ raise ScannerError("while scanning a directive", start_mark,
+ "expected a comment or a line break, but found %r"
+ % ch.encode('utf-8'), self.get_mark())
+ self.scan_line_break()
+
+ def scan_anchor(self, TokenClass):
+ # The specification does not restrict characters for anchors and
+ # aliases. This may lead to problems, for instance, the document:
+ # [ *alias, value ]
+ # can be interpteted in two ways, as
+ # [ "value" ]
+ # and
+ # [ *alias , "value" ]
+ # Therefore we restrict aliases to numbers and ASCII letters.
+ start_mark = self.get_mark()
+ indicator = self.peek()
+ if indicator == u'*':
+ name = 'alias'
+ else:
+ name = 'anchor'
+ self.forward()
+ length = 0
+ ch = self.peek(length)
+ while u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or u'a' <= ch <= u'z' \
+ or ch in u'-_':
+ length += 1
+ ch = self.peek(length)
+ if not length:
+ raise ScannerError("while scanning an %s" % name, start_mark,
+ "expected alphabetic or numeric character, but found %r"
+ % ch.encode('utf-8'), self.get_mark())
+ value = self.prefix(length)
+ self.forward(length)
+ ch = self.peek()
+ if ch not in u'\0 \t\r\n\x85\u2028\u2029?:,]}%@`':
+ raise ScannerError("while scanning an %s" % name, start_mark,
+ "expected alphabetic or numeric character, but found %r"
+ % ch.encode('utf-8'), self.get_mark())
+ end_mark = self.get_mark()
+ return TokenClass(value, start_mark, end_mark)
+
+ def scan_tag(self):
+ # See the specification for details.
+ start_mark = self.get_mark()
+ ch = self.peek(1)
+ if ch == u'<':
+ handle = None
+ self.forward(2)
+ suffix = self.scan_tag_uri('tag', start_mark)
+ if self.peek() != u'>':
+ raise ScannerError("while parsing a tag", start_mark,
+ "expected '>', but found %r" % self.peek().encode('utf-8'),
+ self.get_mark())
+ self.forward()
+ elif ch in u'\0 \t\r\n\x85\u2028\u2029':
+ handle = None
+ suffix = u'!'
+ self.forward()
+ else:
+ length = 1
+ use_handle = False
+ while ch not in u'\0 \r\n\x85\u2028\u2029':
+ if ch == u'!':
+ use_handle = True
+ break
+ length += 1
+ ch = self.peek(length)
+ handle = u'!'
+ if use_handle:
+ handle = self.scan_tag_handle('tag', start_mark)
+ else:
+ handle = u'!'
+ self.forward()
+ suffix = self.scan_tag_uri('tag', start_mark)
+ ch = self.peek()
+ if ch not in u'\0 \r\n\x85\u2028\u2029':
+ raise ScannerError("while scanning a tag", start_mark,
+ "expected ' ', but found %r" % ch.encode('utf-8'),
+ self.get_mark())
+ value = (handle, suffix)
+ end_mark = self.get_mark()
+ return TagToken(value, start_mark, end_mark)
+
+ def scan_block_scalar(self, style):
+ # See the specification for details.
+
+ if style == '>':
+ folded = True
+ else:
+ folded = False
+
+ chunks = []
+ start_mark = self.get_mark()
+
+ # Scan the header.
+ self.forward()
+ chomping, increment = self.scan_block_scalar_indicators(start_mark)
+ self.scan_block_scalar_ignored_line(start_mark)
+
+ # Determine the indentation level and go to the first non-empty line.
+ min_indent = self.indent+1
+ if min_indent < 1:
+ min_indent = 1
+ if increment is None:
+ breaks, max_indent, end_mark = self.scan_block_scalar_indentation()
+ indent = max(min_indent, max_indent)
+ else:
+ indent = min_indent+increment-1
+ breaks, end_mark = self.scan_block_scalar_breaks(indent)
+ line_break = u''
+
+ # Scan the inner part of the block scalar.
+ while self.column == indent and self.peek() != u'\0':
+ chunks.extend(breaks)
+ leading_non_space = self.peek() not in u' \t'
+ length = 0
+ while self.peek(length) not in u'\0\r\n\x85\u2028\u2029':
+ length += 1
+ chunks.append(self.prefix(length))
+ self.forward(length)
+ line_break = self.scan_line_break()
+ breaks, end_mark = self.scan_block_scalar_breaks(indent)
+ if self.column == indent and self.peek() != u'\0':
+
+ # Unfortunately, folding rules are ambiguous.
+ #
+ # This is the folding according to the specification:
+
+ if folded and line_break == u'\n' \
+ and leading_non_space and self.peek() not in u' \t':
+ if not breaks:
+ chunks.append(u' ')
+ else:
+ chunks.append(line_break)
+
+ # This is Clark Evans's interpretation (also in the spec
+ # examples):
+ #
+ #if folded and line_break == u'\n':
+ # if not breaks:
+ # if self.peek() not in ' \t':
+ # chunks.append(u' ')
+ # else:
+ # chunks.append(line_break)
+ #else:
+ # chunks.append(line_break)
+ else:
+ break
+
+ # Chomp the tail.
+ if chomping is not False:
+ chunks.append(line_break)
+ if chomping is True:
+ chunks.extend(breaks)
+
+ # We are done.
+ return ScalarToken(u''.join(chunks), False, start_mark, end_mark,
+ style)
+
+ def scan_block_scalar_indicators(self, start_mark):
+ # See the specification for details.
+ chomping = None
+ increment = None
+ ch = self.peek()
+ if ch in u'+-':
+ if ch == '+':
+ chomping = True
+ else:
+ chomping = False
+ self.forward()
+ ch = self.peek()
+ if ch in u'0123456789':
+ increment = int(ch)
+ if increment == 0:
+ raise ScannerError("while scanning a block scalar", start_mark,
+ "expected indentation indicator in the range 1-9, but found 0",
+ self.get_mark())
+ self.forward()
+ elif ch in u'0123456789':
+ increment = int(ch)
+ if increment == 0:
+ raise ScannerError("while scanning a block scalar", start_mark,
+ "expected indentation indicator in the range 1-9, but found 0",
+ self.get_mark())
+ self.forward()
+ ch = self.peek()
+ if ch in u'+-':
+ if ch == '+':
+ chomping = True
+ else:
+ chomping = False
+ self.forward()
+ ch = self.peek()
+ if ch not in u'\0 \r\n\x85\u2028\u2029':
+ raise ScannerError("while scanning a block scalar", start_mark,
+ "expected chomping or indentation indicators, but found %r"
+ % ch.encode('utf-8'), self.get_mark())
+ return chomping, increment
+
+ def scan_block_scalar_ignored_line(self, start_mark):
+ # See the specification for details.
+ while self.peek() == u' ':
+ self.forward()
+ if self.peek() == u'#':
+ while self.peek() not in u'\0\r\n\x85\u2028\u2029':
+ self.forward()
+ ch = self.peek()
+ if ch not in u'\0\r\n\x85\u2028\u2029':
+ raise ScannerError("while scanning a block scalar", start_mark,
+ "expected a comment or a line break, but found %r"
+ % ch.encode('utf-8'), self.get_mark())
+ self.scan_line_break()
+
+ def scan_block_scalar_indentation(self):
+ # See the specification for details.
+ chunks = []
+ max_indent = 0
+ end_mark = self.get_mark()
+ while self.peek() in u' \r\n\x85\u2028\u2029':
+ if self.peek() != u' ':
+ chunks.append(self.scan_line_break())
+ end_mark = self.get_mark()
+ else:
+ self.forward()
+ if self.column > max_indent:
+ max_indent = self.column
+ return chunks, max_indent, end_mark
+
+ def scan_block_scalar_breaks(self, indent):
+ # See the specification for details.
+ chunks = []
+ end_mark = self.get_mark()
+ while self.column < indent and self.peek() == u' ':
+ self.forward()
+ while self.peek() in u'\r\n\x85\u2028\u2029':
+ chunks.append(self.scan_line_break())
+ end_mark = self.get_mark()
+ while self.column < indent and self.peek() == u' ':
+ self.forward()
+ return chunks, end_mark
+
+ def scan_flow_scalar(self, style):
+ # See the specification for details.
+ # Note that we loose indentation rules for quoted scalars. Quoted
+ # scalars don't need to adhere indentation because " and ' clearly
+ # mark the beginning and the end of them. Therefore we are less
+ # restrictive then the specification requires. We only need to check
+ # that document separators are not included in scalars.
+ if style == '"':
+ double = True
+ else:
+ double = False
+ chunks = []
+ start_mark = self.get_mark()
+ quote = self.peek()
+ self.forward()
+ chunks.extend(self.scan_flow_scalar_non_spaces(double, start_mark))
+ while self.peek() != quote:
+ chunks.extend(self.scan_flow_scalar_spaces(double, start_mark))
+ chunks.extend(self.scan_flow_scalar_non_spaces(double, start_mark))
+ self.forward()
+ end_mark = self.get_mark()
+ return ScalarToken(u''.join(chunks), False, start_mark, end_mark,
+ style)
+
+ ESCAPE_REPLACEMENTS = {
+ u'0': u'\0',
+ u'a': u'\x07',
+ u'b': u'\x08',
+ u't': u'\x09',
+ u'\t': u'\x09',
+ u'n': u'\x0A',
+ u'v': u'\x0B',
+ u'f': u'\x0C',
+ u'r': u'\x0D',
+ u'e': u'\x1B',
+ u' ': u'\x20',
+ u'\"': u'\"',
+ u'\\': u'\\',
+ u'N': u'\x85',
+ u'_': u'\xA0',
+ u'L': u'\u2028',
+ u'P': u'\u2029',
+ }
+
+ ESCAPE_CODES = {
+ u'x': 2,
+ u'u': 4,
+ u'U': 8,
+ }
+
+ def scan_flow_scalar_non_spaces(self, double, start_mark):
+ # See the specification for details.
+ chunks = []
+ while True:
+ length = 0
+ while self.peek(length) not in u'\'\"\\\0 \t\r\n\x85\u2028\u2029':
+ length += 1
+ if length:
+ chunks.append(self.prefix(length))
+ self.forward(length)
+ ch = self.peek()
+ if not double and ch == u'\'' and self.peek(1) == u'\'':
+ chunks.append(u'\'')
+ self.forward(2)
+ elif (double and ch == u'\'') or (not double and ch in u'\"\\'):
+ chunks.append(ch)
+ self.forward()
+ elif double and ch == u'\\':
+ self.forward()
+ ch = self.peek()
+ if ch in self.ESCAPE_REPLACEMENTS:
+ chunks.append(self.ESCAPE_REPLACEMENTS[ch])
+ self.forward()
+ elif ch in self.ESCAPE_CODES:
+ length = self.ESCAPE_CODES[ch]
+ self.forward()
+ for k in range(length):
+ if self.peek(k) not in u'0123456789ABCDEFabcdef':
+ raise ScannerError("while scanning a double-quoted scalar", start_mark,
+ "expected escape sequence of %d hexdecimal numbers, but found %r" %
+ (length, self.peek(k).encode('utf-8')), self.get_mark())
+ code = int(self.prefix(length), 16)
+ chunks.append(unichr(code))
+ self.forward(length)
+ elif ch in u'\r\n\x85\u2028\u2029':
+ self.scan_line_break()
+ chunks.extend(self.scan_flow_scalar_breaks(double, start_mark))
+ else:
+ raise ScannerError("while scanning a double-quoted scalar", start_mark,
+ "found unknown escape character %r" % ch.encode('utf-8'), self.get_mark())
+ else:
+ return chunks
+
+ def scan_flow_scalar_spaces(self, double, start_mark):
+ # See the specification for details.
+ chunks = []
+ length = 0
+ while self.peek(length) in u' \t':
+ length += 1
+ whitespaces = self.prefix(length)
+ self.forward(length)
+ ch = self.peek()
+ if ch == u'\0':
+ raise ScannerError("while scanning a quoted scalar", start_mark,
+ "found unexpected end of stream", self.get_mark())
+ elif ch in u'\r\n\x85\u2028\u2029':
+ line_break = self.scan_line_break()
+ breaks = self.scan_flow_scalar_breaks(double, start_mark)
+ if line_break != u'\n':
+ chunks.append(line_break)
+ elif not breaks:
+ chunks.append(u' ')
+ chunks.extend(breaks)
+ else:
+ chunks.append(whitespaces)
+ return chunks
+
+ def scan_flow_scalar_breaks(self, double, start_mark):
+ # See the specification for details.
+ chunks = []
+ while True:
+ # Instead of checking indentation, we check for document
+ # separators.
+ prefix = self.prefix(3)
+ if (prefix == u'---' or prefix == u'...') \
+ and self.peek(3) in u'\0 \t\r\n\x85\u2028\u2029':
+ raise ScannerError("while scanning a quoted scalar", start_mark,
+ "found unexpected document separator", self.get_mark())
+ while self.peek() in u' \t':
+ self.forward()
+ if self.peek() in u'\r\n\x85\u2028\u2029':
+ chunks.append(self.scan_line_break())
+ else:
+ return chunks
+
+ def scan_plain(self):
+ # See the specification for details.
+ # We add an additional restriction for the flow context:
+ # plain scalars in the flow context cannot contain ',', ':' and '?'.
+ # We also keep track of the `allow_simple_key` flag here.
+ # Indentation rules are loosed for the flow context.
+ chunks = []
+ start_mark = self.get_mark()
+ end_mark = start_mark
+ indent = self.indent+1
+ # We allow zero indentation for scalars, but then we need to check for
+ # document separators at the beginning of the line.
+ #if indent == 0:
+ # indent = 1
+ spaces = []
+ while True:
+ length = 0
+ if self.peek() == u'#':
+ break
+ while True:
+ ch = self.peek(length)
+ if ch in u'\0 \t\r\n\x85\u2028\u2029' \
+ or (not self.flow_level and ch == u':' and
+ self.peek(length+1) in u'\0 \t\r\n\x85\u2028\u2029') \
+ or (self.flow_level and ch in u',:?[]{}'):
+ break
+ length += 1
+ # It's not clear what we should do with ':' in the flow context.
+ if (self.flow_level and ch == u':'
+ and self.peek(length+1) not in u'\0 \t\r\n\x85\u2028\u2029,[]{}'):
+ self.forward(length)
+ raise ScannerError("while scanning a plain scalar", start_mark,
+ "found unexpected ':'", self.get_mark(),
+ "Please check http://pyyaml.org/wiki/YAMLColonInFlowContext for details.")
+ if length == 0:
+ break
+ self.allow_simple_key = False
+ chunks.extend(spaces)
+ chunks.append(self.prefix(length))
+ self.forward(length)
+ end_mark = self.get_mark()
+ spaces = self.scan_plain_spaces(indent, start_mark)
+ if not spaces or self.peek() == u'#' \
+ or (not self.flow_level and self.column < indent):
+ break
+ return ScalarToken(u''.join(chunks), True, start_mark, end_mark)
+
+ def scan_plain_spaces(self, indent, start_mark):
+ # See the specification for details.
+ # The specification is really confusing about tabs in plain scalars.
+ # We just forbid them completely. Do not use tabs in YAML!
+ chunks = []
+ length = 0
+ while self.peek(length) in u' ':
+ length += 1
+ whitespaces = self.prefix(length)
+ self.forward(length)
+ ch = self.peek()
+ if ch in u'\r\n\x85\u2028\u2029':
+ line_break = self.scan_line_break()
+ self.allow_simple_key = True
+ prefix = self.prefix(3)
+ if (prefix == u'---' or prefix == u'...') \
+ and self.peek(3) in u'\0 \t\r\n\x85\u2028\u2029':
+ return
+ breaks = []
+ while self.peek() in u' \r\n\x85\u2028\u2029':
+ if self.peek() == ' ':
+ self.forward()
+ else:
+ breaks.append(self.scan_line_break())
+ prefix = self.prefix(3)
+ if (prefix == u'---' or prefix == u'...') \
+ and self.peek(3) in u'\0 \t\r\n\x85\u2028\u2029':
+ return
+ if line_break != u'\n':
+ chunks.append(line_break)
+ elif not breaks:
+ chunks.append(u' ')
+ chunks.extend(breaks)
+ elif whitespaces:
+ chunks.append(whitespaces)
+ return chunks
+
+ def scan_tag_handle(self, name, start_mark):
+ # See the specification for details.
+ # For some strange reasons, the specification does not allow '_' in
+ # tag handles. I have allowed it anyway.
+ ch = self.peek()
+ if ch != u'!':
+ raise ScannerError("while scanning a %s" % name, start_mark,
+ "expected '!', but found %r" % ch.encode('utf-8'),
+ self.get_mark())
+ length = 1
+ ch = self.peek(length)
+ if ch != u' ':
+ while u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or u'a' <= ch <= u'z' \
+ or ch in u'-_':
+ length += 1
+ ch = self.peek(length)
+ if ch != u'!':
+ self.forward(length)
+ raise ScannerError("while scanning a %s" % name, start_mark,
+ "expected '!', but found %r" % ch.encode('utf-8'),
+ self.get_mark())
+ length += 1
+ value = self.prefix(length)
+ self.forward(length)
+ return value
+
+ def scan_tag_uri(self, name, start_mark):
+ # See the specification for details.
+ # Note: we do not check if URI is well-formed.
+ chunks = []
+ length = 0
+ ch = self.peek(length)
+ while u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or u'a' <= ch <= u'z' \
+ or ch in u'-;/?:@&=+$,_.!~*\'()[]%':
+ if ch == u'%':
+ chunks.append(self.prefix(length))
+ self.forward(length)
+ length = 0
+ chunks.append(self.scan_uri_escapes(name, start_mark))
+ else:
+ length += 1
+ ch = self.peek(length)
+ if length:
+ chunks.append(self.prefix(length))
+ self.forward(length)
+ length = 0
+ if not chunks:
+ raise ScannerError("while parsing a %s" % name, start_mark,
+ "expected URI, but found %r" % ch.encode('utf-8'),
+ self.get_mark())
+ return u''.join(chunks)
+
+ def scan_uri_escapes(self, name, start_mark):
+ # See the specification for details.
+ bytes = []
+ mark = self.get_mark()
+ while self.peek() == u'%':
+ self.forward()
+ for k in range(2):
+ if self.peek(k) not in u'0123456789ABCDEFabcdef':
+ raise ScannerError("while scanning a %s" % name, start_mark,
+ "expected URI escape sequence of 2 hexdecimal numbers, but found %r" %
+ (self.peek(k).encode('utf-8')), self.get_mark())
+ bytes.append(chr(int(self.prefix(2), 16)))
+ self.forward(2)
+ try:
+ value = unicode(''.join(bytes), 'utf-8')
+ except UnicodeDecodeError, exc:
+ raise ScannerError("while scanning a %s" % name, start_mark, str(exc), mark)
+ return value
+
+ def scan_line_break(self):
+ # Transforms:
+ # '\r\n' : '\n'
+ # '\r' : '\n'
+ # '\n' : '\n'
+ # '\x85' : '\n'
+ # '\u2028' : '\u2028'
+ # '\u2029 : '\u2029'
+ # default : ''
+ ch = self.peek()
+ if ch in u'\r\n\x85':
+ if self.prefix(2) == u'\r\n':
+ self.forward(2)
+ else:
+ self.forward()
+ return u'\n'
+ elif ch in u'\u2028\u2029':
+ self.forward()
+ return ch
+ return u''
+
+#try:
+# import psyco
+# psyco.bind(Scanner)
+#except ImportError:
+# pass
+
diff --git a/scripts/external_libs/pyyaml-3.11/python2/yaml/serializer.py b/scripts/external_libs/pyyaml-3.11/python2/yaml/serializer.py
new file mode 100644
index 00000000..0bf1e96d
--- /dev/null
+++ b/scripts/external_libs/pyyaml-3.11/python2/yaml/serializer.py
@@ -0,0 +1,111 @@
+
+__all__ = ['Serializer', 'SerializerError']
+
+from error import YAMLError
+from events import *
+from nodes import *
+
+class SerializerError(YAMLError):
+ pass
+
+class Serializer(object):
+
+ ANCHOR_TEMPLATE = u'id%03d'
+
+ def __init__(self, encoding=None,
+ explicit_start=None, explicit_end=None, version=None, tags=None):
+ self.use_encoding = encoding
+ self.use_explicit_start = explicit_start
+ self.use_explicit_end = explicit_end
+ self.use_version = version
+ self.use_tags = tags
+ self.serialized_nodes = {}
+ self.anchors = {}
+ self.last_anchor_id = 0
+ self.closed = None
+
+ def open(self):
+ if self.closed is None:
+ self.emit(StreamStartEvent(encoding=self.use_encoding))
+ self.closed = False
+ elif self.closed:
+ raise SerializerError("serializer is closed")
+ else:
+ raise SerializerError("serializer is already opened")
+
+ def close(self):
+ if self.closed is None:
+ raise SerializerError("serializer is not opened")
+ elif not self.closed:
+ self.emit(StreamEndEvent())
+ self.closed = True
+
+ #def __del__(self):
+ # self.close()
+
+ def serialize(self, node):
+ if self.closed is None:
+ raise SerializerError("serializer is not opened")
+ elif self.closed:
+ raise SerializerError("serializer is closed")
+ self.emit(DocumentStartEvent(explicit=self.use_explicit_start,
+ version=self.use_version, tags=self.use_tags))
+ self.anchor_node(node)
+ self.serialize_node(node, None, None)
+ self.emit(DocumentEndEvent(explicit=self.use_explicit_end))
+ self.serialized_nodes = {}
+ self.anchors = {}
+ self.last_anchor_id = 0
+
+ def anchor_node(self, node):
+ if node in self.anchors:
+ if self.anchors[node] is None:
+ self.anchors[node] = self.generate_anchor(node)
+ else:
+ self.anchors[node] = None
+ if isinstance(node, SequenceNode):
+ for item in node.value:
+ self.anchor_node(item)
+ elif isinstance(node, MappingNode):
+ for key, value in node.value:
+ self.anchor_node(key)
+ self.anchor_node(value)
+
+ def generate_anchor(self, node):
+ self.last_anchor_id += 1
+ return self.ANCHOR_TEMPLATE % self.last_anchor_id
+
+ def serialize_node(self, node, parent, index):
+ alias = self.anchors[node]
+ if node in self.serialized_nodes:
+ self.emit(AliasEvent(alias))
+ else:
+ self.serialized_nodes[node] = True
+ self.descend_resolver(parent, index)
+ if isinstance(node, ScalarNode):
+ detected_tag = self.resolve(ScalarNode, node.value, (True, False))
+ default_tag = self.resolve(ScalarNode, node.value, (False, True))
+ implicit = (node.tag == detected_tag), (node.tag == default_tag)
+ self.emit(ScalarEvent(alias, node.tag, implicit, node.value,
+ style=node.style))
+ elif isinstance(node, SequenceNode):
+ implicit = (node.tag
+ == self.resolve(SequenceNode, node.value, True))
+ self.emit(SequenceStartEvent(alias, node.tag, implicit,
+ flow_style=node.flow_style))
+ index = 0
+ for item in node.value:
+ self.serialize_node(item, node, index)
+ index += 1
+ self.emit(SequenceEndEvent())
+ elif isinstance(node, MappingNode):
+ implicit = (node.tag
+ == self.resolve(MappingNode, node.value, True))
+ self.emit(MappingStartEvent(alias, node.tag, implicit,
+ flow_style=node.flow_style))
+ for key, value in node.value:
+ self.serialize_node(key, node, None)
+ self.serialize_node(value, node, key)
+ self.emit(MappingEndEvent())
+ self.ascend_resolver()
+
diff --git a/scripts/external_libs/pyyaml-3.11/python2/yaml/tokens.py b/scripts/external_libs/pyyaml-3.11/python2/yaml/tokens.py
new file mode 100644
index 00000000..4d0b48a3
--- /dev/null
+++ b/scripts/external_libs/pyyaml-3.11/python2/yaml/tokens.py
@@ -0,0 +1,104 @@
+
+class Token(object):
+ def __init__(self, start_mark, end_mark):
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+ def __repr__(self):
+ attributes = [key for key in self.__dict__
+ if not key.endswith('_mark')]
+ attributes.sort()
+ arguments = ', '.join(['%s=%r' % (key, getattr(self, key))
+ for key in attributes])
+ return '%s(%s)' % (self.__class__.__name__, arguments)
+
+#class BOMToken(Token):
+# id = '<byte order mark>'
+
+class DirectiveToken(Token):
+ id = '<directive>'
+ def __init__(self, name, value, start_mark, end_mark):
+ self.name = name
+ self.value = value
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+
+class DocumentStartToken(Token):
+ id = '<document start>'
+
+class DocumentEndToken(Token):
+ id = '<document end>'
+
+class StreamStartToken(Token):
+ id = '<stream start>'
+ def __init__(self, start_mark=None, end_mark=None,
+ encoding=None):
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+ self.encoding = encoding
+
+class StreamEndToken(Token):
+ id = '<stream end>'
+
+class BlockSequenceStartToken(Token):
+ id = '<block sequence start>'
+
+class BlockMappingStartToken(Token):
+ id = '<block mapping start>'
+
+class BlockEndToken(Token):
+ id = '<block end>'
+
+class FlowSequenceStartToken(Token):
+ id = '['
+
+class FlowMappingStartToken(Token):
+ id = '{'
+
+class FlowSequenceEndToken(Token):
+ id = ']'
+
+class FlowMappingEndToken(Token):
+ id = '}'
+
+class KeyToken(Token):
+ id = '?'
+
+class ValueToken(Token):
+ id = ':'
+
+class BlockEntryToken(Token):
+ id = '-'
+
+class FlowEntryToken(Token):
+ id = ','
+
+class AliasToken(Token):
+ id = '<alias>'
+ def __init__(self, value, start_mark, end_mark):
+ self.value = value
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+
+class AnchorToken(Token):
+ id = '<anchor>'
+ def __init__(self, value, start_mark, end_mark):
+ self.value = value
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+
+class TagToken(Token):
+ id = '<tag>'
+ def __init__(self, value, start_mark, end_mark):
+ self.value = value
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+
+class ScalarToken(Token):
+ id = '<scalar>'
+ def __init__(self, value, plain, start_mark, end_mark, style=None):
+ self.value = value
+ self.plain = plain
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+ self.style = style
+
diff --git a/scripts/external_libs/pyyaml-3.11/python3/yaml/__init__.py b/scripts/external_libs/pyyaml-3.11/python3/yaml/__init__.py
new file mode 100644
index 00000000..a5e20f94
--- /dev/null
+++ b/scripts/external_libs/pyyaml-3.11/python3/yaml/__init__.py
@@ -0,0 +1,312 @@
+
+from .error import *
+
+from .tokens import *
+from .events import *
+from .nodes import *
+
+from .loader import *
+from .dumper import *
+
+__version__ = '3.11'
+try:
+ from .cyaml import *
+ __with_libyaml__ = True
+except ImportError:
+ __with_libyaml__ = False
+
+import io
+
+def scan(stream, Loader=Loader):
+ """
+ Scan a YAML stream and produce scanning tokens.
+ """
+ loader = Loader(stream)
+ try:
+ while loader.check_token():
+ yield loader.get_token()
+ finally:
+ loader.dispose()
+
+def parse(stream, Loader=Loader):
+ """
+ Parse a YAML stream and produce parsing events.
+ """
+ loader = Loader(stream)
+ try:
+ while loader.check_event():
+ yield loader.get_event()
+ finally:
+ loader.dispose()
+
+def compose(stream, Loader=Loader):
+ """
+ Parse the first YAML document in a stream
+ and produce the corresponding representation tree.
+ """
+ loader = Loader(stream)
+ try:
+ return loader.get_single_node()
+ finally:
+ loader.dispose()
+
+def compose_all(stream, Loader=Loader):
+ """
+ Parse all YAML documents in a stream
+ and produce corresponding representation trees.
+ """
+ loader = Loader(stream)
+ try:
+ while loader.check_node():
+ yield loader.get_node()
+ finally:
+ loader.dispose()
+
+def load(stream, Loader=Loader):
+ """
+ Parse the first YAML document in a stream
+ and produce the corresponding Python object.
+ """
+ loader = Loader(stream)
+ try:
+ return loader.get_single_data()
+ finally:
+ loader.dispose()
+
+def load_all(stream, Loader=Loader):
+ """
+ Parse all YAML documents in a stream
+ and produce corresponding Python objects.
+ """
+ loader = Loader(stream)
+ try:
+ while loader.check_data():
+ yield loader.get_data()
+ finally:
+ loader.dispose()
+
+def safe_load(stream):
+ """
+ Parse the first YAML document in a stream
+ and produce the corresponding Python object.
+ Resolve only basic YAML tags.
+ """
+ return load(stream, SafeLoader)
+
+def safe_load_all(stream):
+ """
+ Parse all YAML documents in a stream
+ and produce corresponding Python objects.
+ Resolve only basic YAML tags.
+ """
+ return load_all(stream, SafeLoader)
+
+def emit(events, stream=None, Dumper=Dumper,
+ canonical=None, indent=None, width=None,
+ allow_unicode=None, line_break=None):
+ """
+ Emit YAML parsing events into a stream.
+ If stream is None, return the produced string instead.
+ """
+ getvalue = None
+ if stream is None:
+ stream = io.StringIO()
+ getvalue = stream.getvalue
+ dumper = Dumper(stream, canonical=canonical, indent=indent, width=width,
+ allow_unicode=allow_unicode, line_break=line_break)
+ try:
+ for event in events:
+ dumper.emit(event)
+ finally:
+ dumper.dispose()
+ if getvalue:
+ return getvalue()
+
+def serialize_all(nodes, stream=None, Dumper=Dumper,
+ canonical=None, indent=None, width=None,
+ allow_unicode=None, line_break=None,
+ encoding=None, explicit_start=None, explicit_end=None,
+ version=None, tags=None):
+ """
+ Serialize a sequence of representation trees into a YAML stream.
+ If stream is None, return the produced string instead.
+ """
+ getvalue = None
+ if stream is None:
+ if encoding is None:
+ stream = io.StringIO()
+ else:
+ stream = io.BytesIO()
+ getvalue = stream.getvalue
+ dumper = Dumper(stream, canonical=canonical, indent=indent, width=width,
+ allow_unicode=allow_unicode, line_break=line_break,
+ encoding=encoding, version=version, tags=tags,
+ explicit_start=explicit_start, explicit_end=explicit_end)
+ try:
+ dumper.open()
+ for node in nodes:
+ dumper.serialize(node)
+ dumper.close()
+ finally:
+ dumper.dispose()
+ if getvalue:
+ return getvalue()
+
+def serialize(node, stream=None, Dumper=Dumper, **kwds):
+ """
+ Serialize a representation tree into a YAML stream.
+ If stream is None, return the produced string instead.
+ """
+ return serialize_all([node], stream, Dumper=Dumper, **kwds)
+
+def dump_all(documents, stream=None, Dumper=Dumper,
+ default_style=None, default_flow_style=None,
+ canonical=None, indent=None, width=None,
+ allow_unicode=None, line_break=None,
+ encoding=None, explicit_start=None, explicit_end=None,
+ version=None, tags=None):
+ """
+ Serialize a sequence of Python objects into a YAML stream.
+ If stream is None, return the produced string instead.
+ """
+ getvalue = None
+ if stream is None:
+ if encoding is None:
+ stream = io.StringIO()
+ else:
+ stream = io.BytesIO()
+ getvalue = stream.getvalue
+ dumper = Dumper(stream, default_style=default_style,
+ default_flow_style=default_flow_style,
+ canonical=canonical, indent=indent, width=width,
+ allow_unicode=allow_unicode, line_break=line_break,
+ encoding=encoding, version=version, tags=tags,
+ explicit_start=explicit_start, explicit_end=explicit_end)
+ try:
+ dumper.open()
+ for data in documents:
+ dumper.represent(data)
+ dumper.close()
+ finally:
+ dumper.dispose()
+ if getvalue:
+ return getvalue()
+
+def dump(data, stream=None, Dumper=Dumper, **kwds):
+ """
+ Serialize a Python object into a YAML stream.
+ If stream is None, return the produced string instead.
+ """
+ return dump_all([data], stream, Dumper=Dumper, **kwds)
+
+def safe_dump_all(documents, stream=None, **kwds):
+ """
+ Serialize a sequence of Python objects into a YAML stream.
+ Produce only basic YAML tags.
+ If stream is None, return the produced string instead.
+ """
+ return dump_all(documents, stream, Dumper=SafeDumper, **kwds)
+
+def safe_dump(data, stream=None, **kwds):
+ """
+ Serialize a Python object into a YAML stream.
+ Produce only basic YAML tags.
+ If stream is None, return the produced string instead.
+ """
+ return dump_all([data], stream, Dumper=SafeDumper, **kwds)
+
+def add_implicit_resolver(tag, regexp, first=None,
+ Loader=Loader, Dumper=Dumper):
+ """
+ Add an implicit scalar detector.
+ If an implicit scalar value matches the given regexp,
+ the corresponding tag is assigned to the scalar.
+ first is a sequence of possible initial characters or None.
+ """
+ Loader.add_implicit_resolver(tag, regexp, first)
+ Dumper.add_implicit_resolver(tag, regexp, first)
+
+def add_path_resolver(tag, path, kind=None, Loader=Loader, Dumper=Dumper):
+ """
+ Add a path based resolver for the given tag.
+ A path is a list of keys that forms a path
+ to a node in the representation tree.
+ Keys can be string values, integers, or None.
+ """
+ Loader.add_path_resolver(tag, path, kind)
+ Dumper.add_path_resolver(tag, path, kind)
+
+def add_constructor(tag, constructor, Loader=Loader):
+ """
+ Add a constructor for the given tag.
+ Constructor is a function that accepts a Loader instance
+ and a node object and produces the corresponding Python object.
+ """
+ Loader.add_constructor(tag, constructor)
+
+def add_multi_constructor(tag_prefix, multi_constructor, Loader=Loader):
+ """
+ Add a multi-constructor for the given tag prefix.
+ Multi-constructor is called for a node if its tag starts with tag_prefix.
+ Multi-constructor accepts a Loader instance, a tag suffix,
+ and a node object and produces the corresponding Python object.
+ """
+ Loader.add_multi_constructor(tag_prefix, multi_constructor)
+
+def add_representer(data_type, representer, Dumper=Dumper):
+ """
+ Add a representer for the given type.
+ Representer is a function accepting a Dumper instance
+ and an instance of the given data type
+ and producing the corresponding representation node.
+ """
+ Dumper.add_representer(data_type, representer)
+
+def add_multi_representer(data_type, multi_representer, Dumper=Dumper):
+ """
+ Add a representer for the given type.
+ Multi-representer is a function accepting a Dumper instance
+ and an instance of the given data type or subtype
+ and producing the corresponding representation node.
+ """
+ Dumper.add_multi_representer(data_type, multi_representer)
+
+class YAMLObjectMetaclass(type):
+ """
+ The metaclass for YAMLObject.
+ """
+ def __init__(cls, name, bases, kwds):
+ super(YAMLObjectMetaclass, cls).__init__(name, bases, kwds)
+ if 'yaml_tag' in kwds and kwds['yaml_tag'] is not None:
+ cls.yaml_loader.add_constructor(cls.yaml_tag, cls.from_yaml)
+ cls.yaml_dumper.add_representer(cls, cls.to_yaml)
+
+class YAMLObject(metaclass=YAMLObjectMetaclass):
+ """
+ An object that can dump itself to a YAML stream
+ and load itself from a YAML stream.
+ """
+
+ __slots__ = () # no direct instantiation, so allow immutable subclasses
+
+ yaml_loader = Loader
+ yaml_dumper = Dumper
+
+ yaml_tag = None
+ yaml_flow_style = None
+
+ @classmethod
+ def from_yaml(cls, loader, node):
+ """
+ Convert a representation node to a Python object.
+ """
+ return loader.construct_yaml_object(node, cls)
+
+ @classmethod
+ def to_yaml(cls, dumper, data):
+ """
+ Convert a Python object to a representation node.
+ """
+ return dumper.represent_yaml_object(cls.yaml_tag, data, cls,
+ flow_style=cls.yaml_flow_style)
+
diff --git a/scripts/external_libs/pyyaml-3.11/python3/yaml/composer.py b/scripts/external_libs/pyyaml-3.11/python3/yaml/composer.py
new file mode 100644
index 00000000..d5c6a7ac
--- /dev/null
+++ b/scripts/external_libs/pyyaml-3.11/python3/yaml/composer.py
@@ -0,0 +1,139 @@
+
+__all__ = ['Composer', 'ComposerError']
+
+from .error import MarkedYAMLError
+from .events import *
+from .nodes import *
+
+class ComposerError(MarkedYAMLError):
+ pass
+
+class Composer:
+
+ def __init__(self):
+ self.anchors = {}
+
+ def check_node(self):
+ # Drop the STREAM-START event.
+ if self.check_event(StreamStartEvent):
+ self.get_event()
+
+ # If there are more documents available?
+ return not self.check_event(StreamEndEvent)
+
+ def get_node(self):
+ # Get the root node of the next document.
+ if not self.check_event(StreamEndEvent):
+ return self.compose_document()
+
+ def get_single_node(self):
+ # Drop the STREAM-START event.
+ self.get_event()
+
+ # Compose a document if the stream is not empty.
+ document = None
+ if not self.check_event(StreamEndEvent):
+ document = self.compose_document()
+
+ # Ensure that the stream contains no more documents.
+ if not self.check_event(StreamEndEvent):
+ event = self.get_event()
+ raise ComposerError("expected a single document in the stream",
+ document.start_mark, "but found another document",
+ event.start_mark)
+
+ # Drop the STREAM-END event.
+ self.get_event()
+
+ return document
+
+ def compose_document(self):
+ # Drop the DOCUMENT-START event.
+ self.get_event()
+
+ # Compose the root node.
+ node = self.compose_node(None, None)
+
+ # Drop the DOCUMENT-END event.
+ self.get_event()
+
+ self.anchors = {}
+ return node
+
+ def compose_node(self, parent, index):
+ if self.check_event(AliasEvent):
+ event = self.get_event()
+ anchor = event.anchor
+ if anchor not in self.anchors:
+ raise ComposerError(None, None, "found undefined alias %r"
+ % anchor, event.start_mark)
+ return self.anchors[anchor]
+ event = self.peek_event()
+ anchor = event.anchor
+ if anchor is not None:
+ if anchor in self.anchors:
+ raise ComposerError("found duplicate anchor %r; first occurence"
+ % anchor, self.anchors[anchor].start_mark,
+ "second occurence", event.start_mark)
+ self.descend_resolver(parent, index)
+ if self.check_event(ScalarEvent):
+ node = self.compose_scalar_node(anchor)
+ elif self.check_event(SequenceStartEvent):
+ node = self.compose_sequence_node(anchor)
+ elif self.check_event(MappingStartEvent):
+ node = self.compose_mapping_node(anchor)
+ self.ascend_resolver()
+ return node
+
+ def compose_scalar_node(self, anchor):
+ event = self.get_event()
+ tag = event.tag
+ if tag is None or tag == '!':
+ tag = self.resolve(ScalarNode, event.value, event.implicit)
+ node = ScalarNode(tag, event.value,
+ event.start_mark, event.end_mark, style=event.style)
+ if anchor is not None:
+ self.anchors[anchor] = node
+ return node
+
+ def compose_sequence_node(self, anchor):
+ start_event = self.get_event()
+ tag = start_event.tag
+ if tag is None or tag == '!':
+ tag = self.resolve(SequenceNode, None, start_event.implicit)
+ node = SequenceNode(tag, [],
+ start_event.start_mark, None,
+ flow_style=start_event.flow_style)
+ if anchor is not None:
+ self.anchors[anchor] = node
+ index = 0
+ while not self.check_event(SequenceEndEvent):
+ node.value.append(self.compose_node(node, index))
+ index += 1
+ end_event = self.get_event()
+ node.end_mark = end_event.end_mark
+ return node
+
+ def compose_mapping_node(self, anchor):
+ start_event = self.get_event()
+ tag = start_event.tag
+ if tag is None or tag == '!':
+ tag = self.resolve(MappingNode, None, start_event.implicit)
+ node = MappingNode(tag, [],
+ start_event.start_mark, None,
+ flow_style=start_event.flow_style)
+ if anchor is not None:
+ self.anchors[anchor] = node
+ while not self.check_event(MappingEndEvent):
+ #key_event = self.peek_event()
+ item_key = self.compose_node(node, None)
+ #if item_key in node.value:
+ # raise ComposerError("while composing a mapping", start_event.start_mark,
+ # "found duplicate key", key_event.start_mark)
+ item_value = self.compose_node(node, item_key)
+ #node.value[item_key] = item_value
+ node.value.append((item_key, item_value))
+ end_event = self.get_event()
+ node.end_mark = end_event.end_mark
+ return node
+
diff --git a/scripts/external_libs/pyyaml-3.11/python3/yaml/constructor.py b/scripts/external_libs/pyyaml-3.11/python3/yaml/constructor.py
new file mode 100644
index 00000000..981543ae
--- /dev/null
+++ b/scripts/external_libs/pyyaml-3.11/python3/yaml/constructor.py
@@ -0,0 +1,686 @@
+
+__all__ = ['BaseConstructor', 'SafeConstructor', 'Constructor',
+ 'ConstructorError']
+
+from .error import *
+from .nodes import *
+
+import collections, datetime, base64, binascii, re, sys, types
+
+class ConstructorError(MarkedYAMLError):
+ pass
+
+class BaseConstructor:
+
+ yaml_constructors = {}
+ yaml_multi_constructors = {}
+
+ def __init__(self):
+ self.constructed_objects = {}
+ self.recursive_objects = {}
+ self.state_generators = []
+ self.deep_construct = False
+
+ def check_data(self):
+ # If there are more documents available?
+ return self.check_node()
+
+ def get_data(self):
+ # Construct and return the next document.
+ if self.check_node():
+ return self.construct_document(self.get_node())
+
+ def get_single_data(self):
+ # Ensure that the stream contains a single document and construct it.
+ node = self.get_single_node()
+ if node is not None:
+ return self.construct_document(node)
+ return None
+
+ def construct_document(self, node):
+ data = self.construct_object(node)
+ while self.state_generators:
+ state_generators = self.state_generators
+ self.state_generators = []
+ for generator in state_generators:
+ for dummy in generator:
+ pass
+ self.constructed_objects = {}
+ self.recursive_objects = {}
+ self.deep_construct = False
+ return data
+
+ def construct_object(self, node, deep=False):
+ if node in self.constructed_objects:
+ return self.constructed_objects[node]
+ if deep:
+ old_deep = self.deep_construct
+ self.deep_construct = True
+ if node in self.recursive_objects:
+ raise ConstructorError(None, None,
+ "found unconstructable recursive node", node.start_mark)
+ self.recursive_objects[node] = None
+ constructor = None
+ tag_suffix = None
+ if node.tag in self.yaml_constructors:
+ constructor = self.yaml_constructors[node.tag]
+ else:
+ for tag_prefix in self.yaml_multi_constructors:
+ if node.tag.startswith(tag_prefix):
+ tag_suffix = node.tag[len(tag_prefix):]
+ constructor = self.yaml_multi_constructors[tag_prefix]
+ break
+ else:
+ if None in self.yaml_multi_constructors:
+ tag_suffix = node.tag
+ constructor = self.yaml_multi_constructors[None]
+ elif None in self.yaml_constructors:
+ constructor = self.yaml_constructors[None]
+ elif isinstance(node, ScalarNode):
+ constructor = self.__class__.construct_scalar
+ elif isinstance(node, SequenceNode):
+ constructor = self.__class__.construct_sequence
+ elif isinstance(node, MappingNode):
+ constructor = self.__class__.construct_mapping
+ if tag_suffix is None:
+ data = constructor(self, node)
+ else:
+ data = constructor(self, tag_suffix, node)
+ if isinstance(data, types.GeneratorType):
+ generator = data
+ data = next(generator)
+ if self.deep_construct:
+ for dummy in generator:
+ pass
+ else:
+ self.state_generators.append(generator)
+ self.constructed_objects[node] = data
+ del self.recursive_objects[node]
+ if deep:
+ self.deep_construct = old_deep
+ return data
+
+ def construct_scalar(self, node):
+ if not isinstance(node, ScalarNode):
+ raise ConstructorError(None, None,
+ "expected a scalar node, but found %s" % node.id,
+ node.start_mark)
+ return node.value
+
+ def construct_sequence(self, node, deep=False):
+ if not isinstance(node, SequenceNode):
+ raise ConstructorError(None, None,
+ "expected a sequence node, but found %s" % node.id,
+ node.start_mark)
+ return [self.construct_object(child, deep=deep)
+ for child in node.value]
+
+ def construct_mapping(self, node, deep=False):
+ if not isinstance(node, MappingNode):
+ raise ConstructorError(None, None,
+ "expected a mapping node, but found %s" % node.id,
+ node.start_mark)
+ mapping = {}
+ for key_node, value_node in node.value:
+ key = self.construct_object(key_node, deep=deep)
+ if not isinstance(key, collections.Hashable):
+ raise ConstructorError("while constructing a mapping", node.start_mark,
+ "found unhashable key", key_node.start_mark)
+ value = self.construct_object(value_node, deep=deep)
+ mapping[key] = value
+ return mapping
+
+ def construct_pairs(self, node, deep=False):
+ if not isinstance(node, MappingNode):
+ raise ConstructorError(None, None,
+ "expected a mapping node, but found %s" % node.id,
+ node.start_mark)
+ pairs = []
+ for key_node, value_node in node.value:
+ key = self.construct_object(key_node, deep=deep)
+ value = self.construct_object(value_node, deep=deep)
+ pairs.append((key, value))
+ return pairs
+
+ @classmethod
+ def add_constructor(cls, tag, constructor):
+ if not 'yaml_constructors' in cls.__dict__:
+ cls.yaml_constructors = cls.yaml_constructors.copy()
+ cls.yaml_constructors[tag] = constructor
+
+ @classmethod
+ def add_multi_constructor(cls, tag_prefix, multi_constructor):
+ if not 'yaml_multi_constructors' in cls.__dict__:
+ cls.yaml_multi_constructors = cls.yaml_multi_constructors.copy()
+ cls.yaml_multi_constructors[tag_prefix] = multi_constructor
+
+class SafeConstructor(BaseConstructor):
+
+ def construct_scalar(self, node):
+ if isinstance(node, MappingNode):
+ for key_node, value_node in node.value:
+ if key_node.tag == 'tag:yaml.org,2002:value':
+ return self.construct_scalar(value_node)
+ return super().construct_scalar(node)
+
+ def flatten_mapping(self, node):
+ merge = []
+ index = 0
+ while index < len(node.value):
+ key_node, value_node = node.value[index]
+ if key_node.tag == 'tag:yaml.org,2002:merge':
+ del node.value[index]
+ if isinstance(value_node, MappingNode):
+ self.flatten_mapping(value_node)
+ merge.extend(value_node.value)
+ elif isinstance(value_node, SequenceNode):
+ submerge = []
+ for subnode in value_node.value:
+ if not isinstance(subnode, MappingNode):
+ raise ConstructorError("while constructing a mapping",
+ node.start_mark,
+ "expected a mapping for merging, but found %s"
+ % subnode.id, subnode.start_mark)
+ self.flatten_mapping(subnode)
+ submerge.append(subnode.value)
+ submerge.reverse()
+ for value in submerge:
+ merge.extend(value)
+ else:
+ raise ConstructorError("while constructing a mapping", node.start_mark,
+ "expected a mapping or list of mappings for merging, but found %s"
+ % value_node.id, value_node.start_mark)
+ elif key_node.tag == 'tag:yaml.org,2002:value':
+ key_node.tag = 'tag:yaml.org,2002:str'
+ index += 1
+ else:
+ index += 1
+ if merge:
+ node.value = merge + node.value
+
+ def construct_mapping(self, node, deep=False):
+ if isinstance(node, MappingNode):
+ self.flatten_mapping(node)
+ return super().construct_mapping(node, deep=deep)
+
+ def construct_yaml_null(self, node):
+ self.construct_scalar(node)
+ return None
+
+ bool_values = {
+ 'yes': True,
+ 'no': False,
+ 'true': True,
+ 'false': False,
+ 'on': True,
+ 'off': False,
+ }
+
+ def construct_yaml_bool(self, node):
+ value = self.construct_scalar(node)
+ return self.bool_values[value.lower()]
+
+ def construct_yaml_int(self, node):
+ value = self.construct_scalar(node)
+ value = value.replace('_', '')
+ sign = +1
+ if value[0] == '-':
+ sign = -1
+ if value[0] in '+-':
+ value = value[1:]
+ if value == '0':
+ return 0
+ elif value.startswith('0b'):
+ return sign*int(value[2:], 2)
+ elif value.startswith('0x'):
+ return sign*int(value[2:], 16)
+ elif value[0] == '0':
+ return sign*int(value, 8)
+ elif ':' in value:
+ digits = [int(part) for part in value.split(':')]
+ digits.reverse()
+ base = 1
+ value = 0
+ for digit in digits:
+ value += digit*base
+ base *= 60
+ return sign*value
+ else:
+ return sign*int(value)
+
+ inf_value = 1e300
+ while inf_value != inf_value*inf_value:
+ inf_value *= inf_value
+ nan_value = -inf_value/inf_value # Trying to make a quiet NaN (like C99).
+
+ def construct_yaml_float(self, node):
+ value = self.construct_scalar(node)
+ value = value.replace('_', '').lower()
+ sign = +1
+ if value[0] == '-':
+ sign = -1
+ if value[0] in '+-':
+ value = value[1:]
+ if value == '.inf':
+ return sign*self.inf_value
+ elif value == '.nan':
+ return self.nan_value
+ elif ':' in value:
+ digits = [float(part) for part in value.split(':')]
+ digits.reverse()
+ base = 1
+ value = 0.0
+ for digit in digits:
+ value += digit*base
+ base *= 60
+ return sign*value
+ else:
+ return sign*float(value)
+
+ def construct_yaml_binary(self, node):
+ try:
+ value = self.construct_scalar(node).encode('ascii')
+ except UnicodeEncodeError as exc:
+ raise ConstructorError(None, None,
+ "failed to convert base64 data into ascii: %s" % exc,
+ node.start_mark)
+ try:
+ if hasattr(base64, 'decodebytes'):
+ return base64.decodebytes(value)
+ else:
+ return base64.decodestring(value)
+ except binascii.Error as exc:
+ raise ConstructorError(None, None,
+ "failed to decode base64 data: %s" % exc, node.start_mark)
+
+ timestamp_regexp = re.compile(
+ r'''^(?P<year>[0-9][0-9][0-9][0-9])
+ -(?P<month>[0-9][0-9]?)
+ -(?P<day>[0-9][0-9]?)
+ (?:(?:[Tt]|[ \t]+)
+ (?P<hour>[0-9][0-9]?)
+ :(?P<minute>[0-9][0-9])
+ :(?P<second>[0-9][0-9])
+ (?:\.(?P<fraction>[0-9]*))?
+ (?:[ \t]*(?P<tz>Z|(?P<tz_sign>[-+])(?P<tz_hour>[0-9][0-9]?)
+ (?::(?P<tz_minute>[0-9][0-9]))?))?)?$''', re.X)
+
+ def construct_yaml_timestamp(self, node):
+ value = self.construct_scalar(node)
+ match = self.timestamp_regexp.match(node.value)
+ values = match.groupdict()
+ year = int(values['year'])
+ month = int(values['month'])
+ day = int(values['day'])
+ if not values['hour']:
+ return datetime.date(year, month, day)
+ hour = int(values['hour'])
+ minute = int(values['minute'])
+ second = int(values['second'])
+ fraction = 0
+ if values['fraction']:
+ fraction = values['fraction'][:6]
+ while len(fraction) < 6:
+ fraction += '0'
+ fraction = int(fraction)
+ delta = None
+ if values['tz_sign']:
+ tz_hour = int(values['tz_hour'])
+ tz_minute = int(values['tz_minute'] or 0)
+ delta = datetime.timedelta(hours=tz_hour, minutes=tz_minute)
+ if values['tz_sign'] == '-':
+ delta = -delta
+ data = datetime.datetime(year, month, day, hour, minute, second, fraction)
+ if delta:
+ data -= delta
+ return data
+
+ def construct_yaml_omap(self, node):
+ # Note: we do not check for duplicate keys, because it's too
+ # CPU-expensive.
+ omap = []
+ yield omap
+ if not isinstance(node, SequenceNode):
+ raise ConstructorError("while constructing an ordered map", node.start_mark,
+ "expected a sequence, but found %s" % node.id, node.start_mark)
+ for subnode in node.value:
+ if not isinstance(subnode, MappingNode):
+ raise ConstructorError("while constructing an ordered map", node.start_mark,
+ "expected a mapping of length 1, but found %s" % subnode.id,
+ subnode.start_mark)
+ if len(subnode.value) != 1:
+ raise ConstructorError("while constructing an ordered map", node.start_mark,
+ "expected a single mapping item, but found %d items" % len(subnode.value),
+ subnode.start_mark)
+ key_node, value_node = subnode.value[0]
+ key = self.construct_object(key_node)
+ value = self.construct_object(value_node)
+ omap.append((key, value))
+
+ def construct_yaml_pairs(self, node):
+ # Note: the same code as `construct_yaml_omap`.
+ pairs = []
+ yield pairs
+ if not isinstance(node, SequenceNode):
+ raise ConstructorError("while constructing pairs", node.start_mark,
+ "expected a sequence, but found %s" % node.id, node.start_mark)
+ for subnode in node.value:
+ if not isinstance(subnode, MappingNode):
+ raise ConstructorError("while constructing pairs", node.start_mark,
+ "expected a mapping of length 1, but found %s" % subnode.id,
+ subnode.start_mark)
+ if len(subnode.value) != 1:
+ raise ConstructorError("while constructing pairs", node.start_mark,
+ "expected a single mapping item, but found %d items" % len(subnode.value),
+ subnode.start_mark)
+ key_node, value_node = subnode.value[0]
+ key = self.construct_object(key_node)
+ value = self.construct_object(value_node)
+ pairs.append((key, value))
+
+ def construct_yaml_set(self, node):
+ data = set()
+ yield data
+ value = self.construct_mapping(node)
+ data.update(value)
+
+ def construct_yaml_str(self, node):
+ return self.construct_scalar(node)
+
+ def construct_yaml_seq(self, node):
+ data = []
+ yield data
+ data.extend(self.construct_sequence(node))
+
+ def construct_yaml_map(self, node):
+ data = {}
+ yield data
+ value = self.construct_mapping(node)
+ data.update(value)
+
+ def construct_yaml_object(self, node, cls):
+ data = cls.__new__(cls)
+ yield data
+ if hasattr(data, '__setstate__'):
+ state = self.construct_mapping(node, deep=True)
+ data.__setstate__(state)
+ else:
+ state = self.construct_mapping(node)
+ data.__dict__.update(state)
+
+ def construct_undefined(self, node):
+ raise ConstructorError(None, None,
+ "could not determine a constructor for the tag %r" % node.tag,
+ node.start_mark)
+
+SafeConstructor.add_constructor(
+ 'tag:yaml.org,2002:null',
+ SafeConstructor.construct_yaml_null)
+
+SafeConstructor.add_constructor(
+ 'tag:yaml.org,2002:bool',
+ SafeConstructor.construct_yaml_bool)
+
+SafeConstructor.add_constructor(
+ 'tag:yaml.org,2002:int',
+ SafeConstructor.construct_yaml_int)
+
+SafeConstructor.add_constructor(
+ 'tag:yaml.org,2002:float',
+ SafeConstructor.construct_yaml_float)
+
+SafeConstructor.add_constructor(
+ 'tag:yaml.org,2002:binary',
+ SafeConstructor.construct_yaml_binary)
+
+SafeConstructor.add_constructor(
+ 'tag:yaml.org,2002:timestamp',
+ SafeConstructor.construct_yaml_timestamp)
+
+SafeConstructor.add_constructor(
+ 'tag:yaml.org,2002:omap',
+ SafeConstructor.construct_yaml_omap)
+
+SafeConstructor.add_constructor(
+ 'tag:yaml.org,2002:pairs',
+ SafeConstructor.construct_yaml_pairs)
+
+SafeConstructor.add_constructor(
+ 'tag:yaml.org,2002:set',
+ SafeConstructor.construct_yaml_set)
+
+SafeConstructor.add_constructor(
+ 'tag:yaml.org,2002:str',
+ SafeConstructor.construct_yaml_str)
+
+SafeConstructor.add_constructor(
+ 'tag:yaml.org,2002:seq',
+ SafeConstructor.construct_yaml_seq)
+
+SafeConstructor.add_constructor(
+ 'tag:yaml.org,2002:map',
+ SafeConstructor.construct_yaml_map)
+
+SafeConstructor.add_constructor(None,
+ SafeConstructor.construct_undefined)
+
+class Constructor(SafeConstructor):
+
+ def construct_python_str(self, node):
+ return self.construct_scalar(node)
+
+ def construct_python_unicode(self, node):
+ return self.construct_scalar(node)
+
+ def construct_python_bytes(self, node):
+ try:
+ value = self.construct_scalar(node).encode('ascii')
+ except UnicodeEncodeError as exc:
+ raise ConstructorError(None, None,
+ "failed to convert base64 data into ascii: %s" % exc,
+ node.start_mark)
+ try:
+ if hasattr(base64, 'decodebytes'):
+ return base64.decodebytes(value)
+ else:
+ return base64.decodestring(value)
+ except binascii.Error as exc:
+ raise ConstructorError(None, None,
+ "failed to decode base64 data: %s" % exc, node.start_mark)
+
+ def construct_python_long(self, node):
+ return self.construct_yaml_int(node)
+
+ def construct_python_complex(self, node):
+ return complex(self.construct_scalar(node))
+
+ def construct_python_tuple(self, node):
+ return tuple(self.construct_sequence(node))
+
+ def find_python_module(self, name, mark):
+ if not name:
+ raise ConstructorError("while constructing a Python module", mark,
+ "expected non-empty name appended to the tag", mark)
+ try:
+ __import__(name)
+ except ImportError as exc:
+ raise ConstructorError("while constructing a Python module", mark,
+ "cannot find module %r (%s)" % (name, exc), mark)
+ return sys.modules[name]
+
+ def find_python_name(self, name, mark):
+ if not name:
+ raise ConstructorError("while constructing a Python object", mark,
+ "expected non-empty name appended to the tag", mark)
+ if '.' in name:
+ module_name, object_name = name.rsplit('.', 1)
+ else:
+ module_name = 'builtins'
+ object_name = name
+ try:
+ __import__(module_name)
+ except ImportError as exc:
+ raise ConstructorError("while constructing a Python object", mark,
+ "cannot find module %r (%s)" % (module_name, exc), mark)
+ module = sys.modules[module_name]
+ if not hasattr(module, object_name):
+ raise ConstructorError("while constructing a Python object", mark,
+ "cannot find %r in the module %r"
+ % (object_name, module.__name__), mark)
+ return getattr(module, object_name)
+
+ def construct_python_name(self, suffix, node):
+ value = self.construct_scalar(node)
+ if value:
+ raise ConstructorError("while constructing a Python name", node.start_mark,
+ "expected the empty value, but found %r" % value, node.start_mark)
+ return self.find_python_name(suffix, node.start_mark)
+
+ def construct_python_module(self, suffix, node):
+ value = self.construct_scalar(node)
+ if value:
+ raise ConstructorError("while constructing a Python module", node.start_mark,
+ "expected the empty value, but found %r" % value, node.start_mark)
+ return self.find_python_module(suffix, node.start_mark)
+
+ def make_python_instance(self, suffix, node,
+ args=None, kwds=None, newobj=False):
+ if not args:
+ args = []
+ if not kwds:
+ kwds = {}
+ cls = self.find_python_name(suffix, node.start_mark)
+ if newobj and isinstance(cls, type):
+ return cls.__new__(cls, *args, **kwds)
+ else:
+ return cls(*args, **kwds)
+
+ def set_python_instance_state(self, instance, state):
+ if hasattr(instance, '__setstate__'):
+ instance.__setstate__(state)
+ else:
+ slotstate = {}
+ if isinstance(state, tuple) and len(state) == 2:
+ state, slotstate = state
+ if hasattr(instance, '__dict__'):
+ instance.__dict__.update(state)
+ elif state:
+ slotstate.update(state)
+ for key, value in slotstate.items():
+ setattr(object, key, value)
+
+ def construct_python_object(self, suffix, node):
+ # Format:
+ # !!python/object:module.name { ... state ... }
+ instance = self.make_python_instance(suffix, node, newobj=True)
+ yield instance
+ deep = hasattr(instance, '__setstate__')
+ state = self.construct_mapping(node, deep=deep)
+ self.set_python_instance_state(instance, state)
+
+ def construct_python_object_apply(self, suffix, node, newobj=False):
+ # Format:
+ # !!python/object/apply # (or !!python/object/new)
+ # args: [ ... arguments ... ]
+ # kwds: { ... keywords ... }
+ # state: ... state ...
+ # listitems: [ ... listitems ... ]
+ # dictitems: { ... dictitems ... }
+ # or short format:
+ # !!python/object/apply [ ... arguments ... ]
+ # The difference between !!python/object/apply and !!python/object/new
+ # is how an object is created, check make_python_instance for details.
+ if isinstance(node, SequenceNode):
+ args = self.construct_sequence(node, deep=True)
+ kwds = {}
+ state = {}
+ listitems = []
+ dictitems = {}
+ else:
+ value = self.construct_mapping(node, deep=True)
+ args = value.get('args', [])
+ kwds = value.get('kwds', {})
+ state = value.get('state', {})
+ listitems = value.get('listitems', [])
+ dictitems = value.get('dictitems', {})
+ instance = self.make_python_instance(suffix, node, args, kwds, newobj)
+ if state:
+ self.set_python_instance_state(instance, state)
+ if listitems:
+ instance.extend(listitems)
+ if dictitems:
+ for key in dictitems:
+ instance[key] = dictitems[key]
+ return instance
+
+ def construct_python_object_new(self, suffix, node):
+ return self.construct_python_object_apply(suffix, node, newobj=True)
+
+Constructor.add_constructor(
+ 'tag:yaml.org,2002:python/none',
+ Constructor.construct_yaml_null)
+
+Constructor.add_constructor(
+ 'tag:yaml.org,2002:python/bool',
+ Constructor.construct_yaml_bool)
+
+Constructor.add_constructor(
+ 'tag:yaml.org,2002:python/str',
+ Constructor.construct_python_str)
+
+Constructor.add_constructor(
+ 'tag:yaml.org,2002:python/unicode',
+ Constructor.construct_python_unicode)
+
+Constructor.add_constructor(
+ 'tag:yaml.org,2002:python/bytes',
+ Constructor.construct_python_bytes)
+
+Constructor.add_constructor(
+ 'tag:yaml.org,2002:python/int',
+ Constructor.construct_yaml_int)
+
+Constructor.add_constructor(
+ 'tag:yaml.org,2002:python/long',
+ Constructor.construct_python_long)
+
+Constructor.add_constructor(
+ 'tag:yaml.org,2002:python/float',
+ Constructor.construct_yaml_float)
+
+Constructor.add_constructor(
+ 'tag:yaml.org,2002:python/complex',
+ Constructor.construct_python_complex)
+
+Constructor.add_constructor(
+ 'tag:yaml.org,2002:python/list',
+ Constructor.construct_yaml_seq)
+
+Constructor.add_constructor(
+ 'tag:yaml.org,2002:python/tuple',
+ Constructor.construct_python_tuple)
+
+Constructor.add_constructor(
+ 'tag:yaml.org,2002:python/dict',
+ Constructor.construct_yaml_map)
+
+Constructor.add_multi_constructor(
+ 'tag:yaml.org,2002:python/name:',
+ Constructor.construct_python_name)
+
+Constructor.add_multi_constructor(
+ 'tag:yaml.org,2002:python/module:',
+ Constructor.construct_python_module)
+
+Constructor.add_multi_constructor(
+ 'tag:yaml.org,2002:python/object:',
+ Constructor.construct_python_object)
+
+Constructor.add_multi_constructor(
+ 'tag:yaml.org,2002:python/object/apply:',
+ Constructor.construct_python_object_apply)
+
+Constructor.add_multi_constructor(
+ 'tag:yaml.org,2002:python/object/new:',
+ Constructor.construct_python_object_new)
+
diff --git a/scripts/external_libs/pyyaml-3.11/python3/yaml/cyaml.py b/scripts/external_libs/pyyaml-3.11/python3/yaml/cyaml.py
new file mode 100644
index 00000000..d5cb87e9
--- /dev/null
+++ b/scripts/external_libs/pyyaml-3.11/python3/yaml/cyaml.py
@@ -0,0 +1,85 @@
+
+__all__ = ['CBaseLoader', 'CSafeLoader', 'CLoader',
+ 'CBaseDumper', 'CSafeDumper', 'CDumper']
+
+from _yaml import CParser, CEmitter
+
+from .constructor import *
+
+from .serializer import *
+from .representer import *
+
+from .resolver import *
+
+class CBaseLoader(CParser, BaseConstructor, BaseResolver):
+
+ def __init__(self, stream):
+ CParser.__init__(self, stream)
+ BaseConstructor.__init__(self)
+ BaseResolver.__init__(self)
+
+class CSafeLoader(CParser, SafeConstructor, Resolver):
+
+ def __init__(self, stream):
+ CParser.__init__(self, stream)
+ SafeConstructor.__init__(self)
+ Resolver.__init__(self)
+
+class CLoader(CParser, Constructor, Resolver):
+
+ def __init__(self, stream):
+ CParser.__init__(self, stream)
+ Constructor.__init__(self)
+ Resolver.__init__(self)
+
+class CBaseDumper(CEmitter, BaseRepresenter, BaseResolver):
+
+ def __init__(self, stream,
+ default_style=None, default_flow_style=None,
+ canonical=None, indent=None, width=None,
+ allow_unicode=None, line_break=None,
+ encoding=None, explicit_start=None, explicit_end=None,
+ version=None, tags=None):
+ CEmitter.__init__(self, stream, canonical=canonical,
+ indent=indent, width=width, encoding=encoding,
+ allow_unicode=allow_unicode, line_break=line_break,
+ explicit_start=explicit_start, explicit_end=explicit_end,
+ version=version, tags=tags)
+ Representer.__init__(self, default_style=default_style,
+ default_flow_style=default_flow_style)
+ Resolver.__init__(self)
+
+class CSafeDumper(CEmitter, SafeRepresenter, Resolver):
+
+ def __init__(self, stream,
+ default_style=None, default_flow_style=None,
+ canonical=None, indent=None, width=None,
+ allow_unicode=None, line_break=None,
+ encoding=None, explicit_start=None, explicit_end=None,
+ version=None, tags=None):
+ CEmitter.__init__(self, stream, canonical=canonical,
+ indent=indent, width=width, encoding=encoding,
+ allow_unicode=allow_unicode, line_break=line_break,
+ explicit_start=explicit_start, explicit_end=explicit_end,
+ version=version, tags=tags)
+ SafeRepresenter.__init__(self, default_style=default_style,
+ default_flow_style=default_flow_style)
+ Resolver.__init__(self)
+
+class CDumper(CEmitter, Serializer, Representer, Resolver):
+
+ def __init__(self, stream,
+ default_style=None, default_flow_style=None,
+ canonical=None, indent=None, width=None,
+ allow_unicode=None, line_break=None,
+ encoding=None, explicit_start=None, explicit_end=None,
+ version=None, tags=None):
+ CEmitter.__init__(self, stream, canonical=canonical,
+ indent=indent, width=width, encoding=encoding,
+ allow_unicode=allow_unicode, line_break=line_break,
+ explicit_start=explicit_start, explicit_end=explicit_end,
+ version=version, tags=tags)
+ Representer.__init__(self, default_style=default_style,
+ default_flow_style=default_flow_style)
+ Resolver.__init__(self)
+
diff --git a/scripts/external_libs/pyyaml-3.11/python3/yaml/dumper.py b/scripts/external_libs/pyyaml-3.11/python3/yaml/dumper.py
new file mode 100644
index 00000000..0b691287
--- /dev/null
+++ b/scripts/external_libs/pyyaml-3.11/python3/yaml/dumper.py
@@ -0,0 +1,62 @@
+
+__all__ = ['BaseDumper', 'SafeDumper', 'Dumper']
+
+from .emitter import *
+from .serializer import *
+from .representer import *
+from .resolver import *
+
+class BaseDumper(Emitter, Serializer, BaseRepresenter, BaseResolver):
+
+ def __init__(self, stream,
+ default_style=None, default_flow_style=None,
+ canonical=None, indent=None, width=None,
+ allow_unicode=None, line_break=None,
+ encoding=None, explicit_start=None, explicit_end=None,
+ version=None, tags=None):
+ Emitter.__init__(self, stream, canonical=canonical,
+ indent=indent, width=width,
+ allow_unicode=allow_unicode, line_break=line_break)
+ Serializer.__init__(self, encoding=encoding,
+ explicit_start=explicit_start, explicit_end=explicit_end,
+ version=version, tags=tags)
+ Representer.__init__(self, default_style=default_style,
+ default_flow_style=default_flow_style)
+ Resolver.__init__(self)
+
+class SafeDumper(Emitter, Serializer, SafeRepresenter, Resolver):
+
+ def __init__(self, stream,
+ default_style=None, default_flow_style=None,
+ canonical=None, indent=None, width=None,
+ allow_unicode=None, line_break=None,
+ encoding=None, explicit_start=None, explicit_end=None,
+ version=None, tags=None):
+ Emitter.__init__(self, stream, canonical=canonical,
+ indent=indent, width=width,
+ allow_unicode=allow_unicode, line_break=line_break)
+ Serializer.__init__(self, encoding=encoding,
+ explicit_start=explicit_start, explicit_end=explicit_end,
+ version=version, tags=tags)
+ SafeRepresenter.__init__(self, default_style=default_style,
+ default_flow_style=default_flow_style)
+ Resolver.__init__(self)
+
+class Dumper(Emitter, Serializer, Representer, Resolver):
+
+ def __init__(self, stream,
+ default_style=None, default_flow_style=None,
+ canonical=None, indent=None, width=None,
+ allow_unicode=None, line_break=None,
+ encoding=None, explicit_start=None, explicit_end=None,
+ version=None, tags=None):
+ Emitter.__init__(self, stream, canonical=canonical,
+ indent=indent, width=width,
+ allow_unicode=allow_unicode, line_break=line_break)
+ Serializer.__init__(self, encoding=encoding,
+ explicit_start=explicit_start, explicit_end=explicit_end,
+ version=version, tags=tags)
+ Representer.__init__(self, default_style=default_style,
+ default_flow_style=default_flow_style)
+ Resolver.__init__(self)
+
diff --git a/scripts/external_libs/pyyaml-3.11/python3/yaml/emitter.py b/scripts/external_libs/pyyaml-3.11/python3/yaml/emitter.py
new file mode 100644
index 00000000..34cb145a
--- /dev/null
+++ b/scripts/external_libs/pyyaml-3.11/python3/yaml/emitter.py
@@ -0,0 +1,1137 @@
+
+# Emitter expects events obeying the following grammar:
+# stream ::= STREAM-START document* STREAM-END
+# document ::= DOCUMENT-START node DOCUMENT-END
+# node ::= SCALAR | sequence | mapping
+# sequence ::= SEQUENCE-START node* SEQUENCE-END
+# mapping ::= MAPPING-START (node node)* MAPPING-END
+
+__all__ = ['Emitter', 'EmitterError']
+
+from .error import YAMLError
+from .events import *
+
+class EmitterError(YAMLError):
+ pass
+
+class ScalarAnalysis:
+ def __init__(self, scalar, empty, multiline,
+ allow_flow_plain, allow_block_plain,
+ allow_single_quoted, allow_double_quoted,
+ allow_block):
+ self.scalar = scalar
+ self.empty = empty
+ self.multiline = multiline
+ self.allow_flow_plain = allow_flow_plain
+ self.allow_block_plain = allow_block_plain
+ self.allow_single_quoted = allow_single_quoted
+ self.allow_double_quoted = allow_double_quoted
+ self.allow_block = allow_block
+
+class Emitter:
+
+ DEFAULT_TAG_PREFIXES = {
+ '!' : '!',
+ 'tag:yaml.org,2002:' : '!!',
+ }
+
+ def __init__(self, stream, canonical=None, indent=None, width=None,
+ allow_unicode=None, line_break=None):
+
+ # The stream should have the methods `write` and possibly `flush`.
+ self.stream = stream
+
+ # Encoding can be overriden by STREAM-START.
+ self.encoding = None
+
+ # Emitter is a state machine with a stack of states to handle nested
+ # structures.
+ self.states = []
+ self.state = self.expect_stream_start
+
+ # Current event and the event queue.
+ self.events = []
+ self.event = None
+
+ # The current indentation level and the stack of previous indents.
+ self.indents = []
+ self.indent = None
+
+ # Flow level.
+ self.flow_level = 0
+
+ # Contexts.
+ self.root_context = False
+ self.sequence_context = False
+ self.mapping_context = False
+ self.simple_key_context = False
+
+ # Characteristics of the last emitted character:
+ # - current position.
+ # - is it a whitespace?
+ # - is it an indention character
+ # (indentation space, '-', '?', or ':')?
+ self.line = 0
+ self.column = 0
+ self.whitespace = True
+ self.indention = True
+
+ # Whether the document requires an explicit document indicator
+ self.open_ended = False
+
+ # Formatting details.
+ self.canonical = canonical
+ self.allow_unicode = allow_unicode
+ self.best_indent = 2
+ if indent and 1 < indent < 10:
+ self.best_indent = indent
+ self.best_width = 80
+ if width and width > self.best_indent*2:
+ self.best_width = width
+ self.best_line_break = '\n'
+ if line_break in ['\r', '\n', '\r\n']:
+ self.best_line_break = line_break
+
+ # Tag prefixes.
+ self.tag_prefixes = None
+
+ # Prepared anchor and tag.
+ self.prepared_anchor = None
+ self.prepared_tag = None
+
+ # Scalar analysis and style.
+ self.analysis = None
+ self.style = None
+
+ def dispose(self):
+ # Reset the state attributes (to clear self-references)
+ self.states = []
+ self.state = None
+
+ def emit(self, event):
+ self.events.append(event)
+ while not self.need_more_events():
+ self.event = self.events.pop(0)
+ self.state()
+ self.event = None
+
+ # In some cases, we wait for a few next events before emitting.
+
+ def need_more_events(self):
+ if not self.events:
+ return True
+ event = self.events[0]
+ if isinstance(event, DocumentStartEvent):
+ return self.need_events(1)
+ elif isinstance(event, SequenceStartEvent):
+ return self.need_events(2)
+ elif isinstance(event, MappingStartEvent):
+ return self.need_events(3)
+ else:
+ return False
+
+ def need_events(self, count):
+ level = 0
+ for event in self.events[1:]:
+ if isinstance(event, (DocumentStartEvent, CollectionStartEvent)):
+ level += 1
+ elif isinstance(event, (DocumentEndEvent, CollectionEndEvent)):
+ level -= 1
+ elif isinstance(event, StreamEndEvent):
+ level = -1
+ if level < 0:
+ return False
+ return (len(self.events) < count+1)
+
+ def increase_indent(self, flow=False, indentless=False):
+ self.indents.append(self.indent)
+ if self.indent is None:
+ if flow:
+ self.indent = self.best_indent
+ else:
+ self.indent = 0
+ elif not indentless:
+ self.indent += self.best_indent
+
+ # States.
+
+ # Stream handlers.
+
+ def expect_stream_start(self):
+ if isinstance(self.event, StreamStartEvent):
+ if self.event.encoding and not hasattr(self.stream, 'encoding'):
+ self.encoding = self.event.encoding
+ self.write_stream_start()
+ self.state = self.expect_first_document_start
+ else:
+ raise EmitterError("expected StreamStartEvent, but got %s"
+ % self.event)
+
+ def expect_nothing(self):
+ raise EmitterError("expected nothing, but got %s" % self.event)
+
+ # Document handlers.
+
+ def expect_first_document_start(self):
+ return self.expect_document_start(first=True)
+
+ def expect_document_start(self, first=False):
+ if isinstance(self.event, DocumentStartEvent):
+ if (self.event.version or self.event.tags) and self.open_ended:
+ self.write_indicator('...', True)
+ self.write_indent()
+ if self.event.version:
+ version_text = self.prepare_version(self.event.version)
+ self.write_version_directive(version_text)
+ self.tag_prefixes = self.DEFAULT_TAG_PREFIXES.copy()
+ if self.event.tags:
+ handles = sorted(self.event.tags.keys())
+ for handle in handles:
+ prefix = self.event.tags[handle]
+ self.tag_prefixes[prefix] = handle
+ handle_text = self.prepare_tag_handle(handle)
+ prefix_text = self.prepare_tag_prefix(prefix)
+ self.write_tag_directive(handle_text, prefix_text)
+ implicit = (first and not self.event.explicit and not self.canonical
+ and not self.event.version and not self.event.tags
+ and not self.check_empty_document())
+ if not implicit:
+ self.write_indent()
+ self.write_indicator('---', True)
+ if self.canonical:
+ self.write_indent()
+ self.state = self.expect_document_root
+ elif isinstance(self.event, StreamEndEvent):
+ if self.open_ended:
+ self.write_indicator('...', True)
+ self.write_indent()
+ self.write_stream_end()
+ self.state = self.expect_nothing
+ else:
+ raise EmitterError("expected DocumentStartEvent, but got %s"
+ % self.event)
+
+ def expect_document_end(self):
+ if isinstance(self.event, DocumentEndEvent):
+ self.write_indent()
+ if self.event.explicit:
+ self.write_indicator('...', True)
+ self.write_indent()
+ self.flush_stream()
+ self.state = self.expect_document_start
+ else:
+ raise EmitterError("expected DocumentEndEvent, but got %s"
+ % self.event)
+
+ def expect_document_root(self):
+ self.states.append(self.expect_document_end)
+ self.expect_node(root=True)
+
+ # Node handlers.
+
+ def expect_node(self, root=False, sequence=False, mapping=False,
+ simple_key=False):
+ self.root_context = root
+ self.sequence_context = sequence
+ self.mapping_context = mapping
+ self.simple_key_context = simple_key
+ if isinstance(self.event, AliasEvent):
+ self.expect_alias()
+ elif isinstance(self.event, (ScalarEvent, CollectionStartEvent)):
+ self.process_anchor('&')
+ self.process_tag()
+ if isinstance(self.event, ScalarEvent):
+ self.expect_scalar()
+ elif isinstance(self.event, SequenceStartEvent):
+ if self.flow_level or self.canonical or self.event.flow_style \
+ or self.check_empty_sequence():
+ self.expect_flow_sequence()
+ else:
+ self.expect_block_sequence()
+ elif isinstance(self.event, MappingStartEvent):
+ if self.flow_level or self.canonical or self.event.flow_style \
+ or self.check_empty_mapping():
+ self.expect_flow_mapping()
+ else:
+ self.expect_block_mapping()
+ else:
+ raise EmitterError("expected NodeEvent, but got %s" % self.event)
+
+ def expect_alias(self):
+ if self.event.anchor is None:
+ raise EmitterError("anchor is not specified for alias")
+ self.process_anchor('*')
+ self.state = self.states.pop()
+
+ def expect_scalar(self):
+ self.increase_indent(flow=True)
+ self.process_scalar()
+ self.indent = self.indents.pop()
+ self.state = self.states.pop()
+
+ # Flow sequence handlers.
+
+ def expect_flow_sequence(self):
+ self.write_indicator('[', True, whitespace=True)
+ self.flow_level += 1
+ self.increase_indent(flow=True)
+ self.state = self.expect_first_flow_sequence_item
+
+ def expect_first_flow_sequence_item(self):
+ if isinstance(self.event, SequenceEndEvent):
+ self.indent = self.indents.pop()
+ self.flow_level -= 1
+ self.write_indicator(']', False)
+ self.state = self.states.pop()
+ else:
+ if self.canonical or self.column > self.best_width:
+ self.write_indent()
+ self.states.append(self.expect_flow_sequence_item)
+ self.expect_node(sequence=True)
+
+ def expect_flow_sequence_item(self):
+ if isinstance(self.event, SequenceEndEvent):
+ self.indent = self.indents.pop()
+ self.flow_level -= 1
+ if self.canonical:
+ self.write_indicator(',', False)
+ self.write_indent()
+ self.write_indicator(']', False)
+ self.state = self.states.pop()
+ else:
+ self.write_indicator(',', False)
+ if self.canonical or self.column > self.best_width:
+ self.write_indent()
+ self.states.append(self.expect_flow_sequence_item)
+ self.expect_node(sequence=True)
+
+ # Flow mapping handlers.
+
+ def expect_flow_mapping(self):
+ self.write_indicator('{', True, whitespace=True)
+ self.flow_level += 1
+ self.increase_indent(flow=True)
+ self.state = self.expect_first_flow_mapping_key
+
+ def expect_first_flow_mapping_key(self):
+ if isinstance(self.event, MappingEndEvent):
+ self.indent = self.indents.pop()
+ self.flow_level -= 1
+ self.write_indicator('}', False)
+ self.state = self.states.pop()
+ else:
+ if self.canonical or self.column > self.best_width:
+ self.write_indent()
+ if not self.canonical and self.check_simple_key():
+ self.states.append(self.expect_flow_mapping_simple_value)
+ self.expect_node(mapping=True, simple_key=True)
+ else:
+ self.write_indicator('?', True)
+ self.states.append(self.expect_flow_mapping_value)
+ self.expect_node(mapping=True)
+
+ def expect_flow_mapping_key(self):
+ if isinstance(self.event, MappingEndEvent):
+ self.indent = self.indents.pop()
+ self.flow_level -= 1
+ if self.canonical:
+ self.write_indicator(',', False)
+ self.write_indent()
+ self.write_indicator('}', False)
+ self.state = self.states.pop()
+ else:
+ self.write_indicator(',', False)
+ if self.canonical or self.column > self.best_width:
+ self.write_indent()
+ if not self.canonical and self.check_simple_key():
+ self.states.append(self.expect_flow_mapping_simple_value)
+ self.expect_node(mapping=True, simple_key=True)
+ else:
+ self.write_indicator('?', True)
+ self.states.append(self.expect_flow_mapping_value)
+ self.expect_node(mapping=True)
+
+ def expect_flow_mapping_simple_value(self):
+ self.write_indicator(':', False)
+ self.states.append(self.expect_flow_mapping_key)
+ self.expect_node(mapping=True)
+
+ def expect_flow_mapping_value(self):
+ if self.canonical or self.column > self.best_width:
+ self.write_indent()
+ self.write_indicator(':', True)
+ self.states.append(self.expect_flow_mapping_key)
+ self.expect_node(mapping=True)
+
+ # Block sequence handlers.
+
+ def expect_block_sequence(self):
+ indentless = (self.mapping_context and not self.indention)
+ self.increase_indent(flow=False, indentless=indentless)
+ self.state = self.expect_first_block_sequence_item
+
+ def expect_first_block_sequence_item(self):
+ return self.expect_block_sequence_item(first=True)
+
+ def expect_block_sequence_item(self, first=False):
+ if not first and isinstance(self.event, SequenceEndEvent):
+ self.indent = self.indents.pop()
+ self.state = self.states.pop()
+ else:
+ self.write_indent()
+ self.write_indicator('-', True, indention=True)
+ self.states.append(self.expect_block_sequence_item)
+ self.expect_node(sequence=True)
+
+ # Block mapping handlers.
+
+ def expect_block_mapping(self):
+ self.increase_indent(flow=False)
+ self.state = self.expect_first_block_mapping_key
+
+ def expect_first_block_mapping_key(self):
+ return self.expect_block_mapping_key(first=True)
+
+ def expect_block_mapping_key(self, first=False):
+ if not first and isinstance(self.event, MappingEndEvent):
+ self.indent = self.indents.pop()
+ self.state = self.states.pop()
+ else:
+ self.write_indent()
+ if self.check_simple_key():
+ self.states.append(self.expect_block_mapping_simple_value)
+ self.expect_node(mapping=True, simple_key=True)
+ else:
+ self.write_indicator('?', True, indention=True)
+ self.states.append(self.expect_block_mapping_value)
+ self.expect_node(mapping=True)
+
+ def expect_block_mapping_simple_value(self):
+ self.write_indicator(':', False)
+ self.states.append(self.expect_block_mapping_key)
+ self.expect_node(mapping=True)
+
+ def expect_block_mapping_value(self):
+ self.write_indent()
+ self.write_indicator(':', True, indention=True)
+ self.states.append(self.expect_block_mapping_key)
+ self.expect_node(mapping=True)
+
+ # Checkers.
+
+ def check_empty_sequence(self):
+ return (isinstance(self.event, SequenceStartEvent) and self.events
+ and isinstance(self.events[0], SequenceEndEvent))
+
+ def check_empty_mapping(self):
+ return (isinstance(self.event, MappingStartEvent) and self.events
+ and isinstance(self.events[0], MappingEndEvent))
+
+ def check_empty_document(self):
+ if not isinstance(self.event, DocumentStartEvent) or not self.events:
+ return False
+ event = self.events[0]
+ return (isinstance(event, ScalarEvent) and event.anchor is None
+ and event.tag is None and event.implicit and event.value == '')
+
+ def check_simple_key(self):
+ length = 0
+ if isinstance(self.event, NodeEvent) and self.event.anchor is not None:
+ if self.prepared_anchor is None:
+ self.prepared_anchor = self.prepare_anchor(self.event.anchor)
+ length += len(self.prepared_anchor)
+ if isinstance(self.event, (ScalarEvent, CollectionStartEvent)) \
+ and self.event.tag is not None:
+ if self.prepared_tag is None:
+ self.prepared_tag = self.prepare_tag(self.event.tag)
+ length += len(self.prepared_tag)
+ if isinstance(self.event, ScalarEvent):
+ if self.analysis is None:
+ self.analysis = self.analyze_scalar(self.event.value)
+ length += len(self.analysis.scalar)
+ return (length < 128 and (isinstance(self.event, AliasEvent)
+ or (isinstance(self.event, ScalarEvent)
+ and not self.analysis.empty and not self.analysis.multiline)
+ or self.check_empty_sequence() or self.check_empty_mapping()))
+
+ # Anchor, Tag, and Scalar processors.
+
+ def process_anchor(self, indicator):
+ if self.event.anchor is None:
+ self.prepared_anchor = None
+ return
+ if self.prepared_anchor is None:
+ self.prepared_anchor = self.prepare_anchor(self.event.anchor)
+ if self.prepared_anchor:
+ self.write_indicator(indicator+self.prepared_anchor, True)
+ self.prepared_anchor = None
+
+ def process_tag(self):
+ tag = self.event.tag
+ if isinstance(self.event, ScalarEvent):
+ if self.style is None:
+ self.style = self.choose_scalar_style()
+ if ((not self.canonical or tag is None) and
+ ((self.style == '' and self.event.implicit[0])
+ or (self.style != '' and self.event.implicit[1]))):
+ self.prepared_tag = None
+ return
+ if self.event.implicit[0] and tag is None:
+ tag = '!'
+ self.prepared_tag = None
+ else:
+ if (not self.canonical or tag is None) and self.event.implicit:
+ self.prepared_tag = None
+ return
+ if tag is None:
+ raise EmitterError("tag is not specified")
+ if self.prepared_tag is None:
+ self.prepared_tag = self.prepare_tag(tag)
+ if self.prepared_tag:
+ self.write_indicator(self.prepared_tag, True)
+ self.prepared_tag = None
+
+ def choose_scalar_style(self):
+ if self.analysis is None:
+ self.analysis = self.analyze_scalar(self.event.value)
+ if self.event.style == '"' or self.canonical:
+ return '"'
+ if not self.event.style and self.event.implicit[0]:
+ if (not (self.simple_key_context and
+ (self.analysis.empty or self.analysis.multiline))
+ and (self.flow_level and self.analysis.allow_flow_plain
+ or (not self.flow_level and self.analysis.allow_block_plain))):
+ return ''
+ if self.event.style and self.event.style in '|>':
+ if (not self.flow_level and not self.simple_key_context
+ and self.analysis.allow_block):
+ return self.event.style
+ if not self.event.style or self.event.style == '\'':
+ if (self.analysis.allow_single_quoted and
+ not (self.simple_key_context and self.analysis.multiline)):
+ return '\''
+ return '"'
+
+ def process_scalar(self):
+ if self.analysis is None:
+ self.analysis = self.analyze_scalar(self.event.value)
+ if self.style is None:
+ self.style = self.choose_scalar_style()
+ split = (not self.simple_key_context)
+ #if self.analysis.multiline and split \
+ # and (not self.style or self.style in '\'\"'):
+ # self.write_indent()
+ if self.style == '"':
+ self.write_double_quoted(self.analysis.scalar, split)
+ elif self.style == '\'':
+ self.write_single_quoted(self.analysis.scalar, split)
+ elif self.style == '>':
+ self.write_folded(self.analysis.scalar)
+ elif self.style == '|':
+ self.write_literal(self.analysis.scalar)
+ else:
+ self.write_plain(self.analysis.scalar, split)
+ self.analysis = None
+ self.style = None
+
+ # Analyzers.
+
+ def prepare_version(self, version):
+ major, minor = version
+ if major != 1:
+ raise EmitterError("unsupported YAML version: %d.%d" % (major, minor))
+ return '%d.%d' % (major, minor)
+
+ def prepare_tag_handle(self, handle):
+ if not handle:
+ raise EmitterError("tag handle must not be empty")
+ if handle[0] != '!' or handle[-1] != '!':
+ raise EmitterError("tag handle must start and end with '!': %r" % handle)
+ for ch in handle[1:-1]:
+ if not ('0' <= ch <= '9' or 'A' <= ch <= 'Z' or 'a' <= ch <= 'z' \
+ or ch in '-_'):
+ raise EmitterError("invalid character %r in the tag handle: %r"
+ % (ch, handle))
+ return handle
+
+ def prepare_tag_prefix(self, prefix):
+ if not prefix:
+ raise EmitterError("tag prefix must not be empty")
+ chunks = []
+ start = end = 0
+ if prefix[0] == '!':
+ end = 1
+ while end < len(prefix):
+ ch = prefix[end]
+ if '0' <= ch <= '9' or 'A' <= ch <= 'Z' or 'a' <= ch <= 'z' \
+ or ch in '-;/?!:@&=+$,_.~*\'()[]':
+ end += 1
+ else:
+ if start < end:
+ chunks.append(prefix[start:end])
+ start = end = end+1
+ data = ch.encode('utf-8')
+ for ch in data:
+ chunks.append('%%%02X' % ord(ch))
+ if start < end:
+ chunks.append(prefix[start:end])
+ return ''.join(chunks)
+
+ def prepare_tag(self, tag):
+ if not tag:
+ raise EmitterError("tag must not be empty")
+ if tag == '!':
+ return tag
+ handle = None
+ suffix = tag
+ prefixes = sorted(self.tag_prefixes.keys())
+ for prefix in prefixes:
+ if tag.startswith(prefix) \
+ and (prefix == '!' or len(prefix) < len(tag)):
+ handle = self.tag_prefixes[prefix]
+ suffix = tag[len(prefix):]
+ chunks = []
+ start = end = 0
+ while end < len(suffix):
+ ch = suffix[end]
+ if '0' <= ch <= '9' or 'A' <= ch <= 'Z' or 'a' <= ch <= 'z' \
+ or ch in '-;/?:@&=+$,_.~*\'()[]' \
+ or (ch == '!' and handle != '!'):
+ end += 1
+ else:
+ if start < end:
+ chunks.append(suffix[start:end])
+ start = end = end+1
+ data = ch.encode('utf-8')
+ for ch in data:
+ chunks.append('%%%02X' % ord(ch))
+ if start < end:
+ chunks.append(suffix[start:end])
+ suffix_text = ''.join(chunks)
+ if handle:
+ return '%s%s' % (handle, suffix_text)
+ else:
+ return '!<%s>' % suffix_text
+
+ def prepare_anchor(self, anchor):
+ if not anchor:
+ raise EmitterError("anchor must not be empty")
+ for ch in anchor:
+ if not ('0' <= ch <= '9' or 'A' <= ch <= 'Z' or 'a' <= ch <= 'z' \
+ or ch in '-_'):
+ raise EmitterError("invalid character %r in the anchor: %r"
+ % (ch, anchor))
+ return anchor
+
+ def analyze_scalar(self, scalar):
+
+ # Empty scalar is a special case.
+ if not scalar:
+ return ScalarAnalysis(scalar=scalar, empty=True, multiline=False,
+ allow_flow_plain=False, allow_block_plain=True,
+ allow_single_quoted=True, allow_double_quoted=True,
+ allow_block=False)
+
+ # Indicators and special characters.
+ block_indicators = False
+ flow_indicators = False
+ line_breaks = False
+ special_characters = False
+
+ # Important whitespace combinations.
+ leading_space = False
+ leading_break = False
+ trailing_space = False
+ trailing_break = False
+ break_space = False
+ space_break = False
+
+ # Check document indicators.
+ if scalar.startswith('---') or scalar.startswith('...'):
+ block_indicators = True
+ flow_indicators = True
+
+ # First character or preceded by a whitespace.
+ preceeded_by_whitespace = True
+
+ # Last character or followed by a whitespace.
+ followed_by_whitespace = (len(scalar) == 1 or
+ scalar[1] in '\0 \t\r\n\x85\u2028\u2029')
+
+ # The previous character is a space.
+ previous_space = False
+
+ # The previous character is a break.
+ previous_break = False
+
+ index = 0
+ while index < len(scalar):
+ ch = scalar[index]
+
+ # Check for indicators.
+ if index == 0:
+ # Leading indicators are special characters.
+ if ch in '#,[]{}&*!|>\'\"%@`':
+ flow_indicators = True
+ block_indicators = True
+ if ch in '?:':
+ flow_indicators = True
+ if followed_by_whitespace:
+ block_indicators = True
+ if ch == '-' and followed_by_whitespace:
+ flow_indicators = True
+ block_indicators = True
+ else:
+ # Some indicators cannot appear within a scalar as well.
+ if ch in ',?[]{}':
+ flow_indicators = True
+ if ch == ':':
+ flow_indicators = True
+ if followed_by_whitespace:
+ block_indicators = True
+ if ch == '#' and preceeded_by_whitespace:
+ flow_indicators = True
+ block_indicators = True
+
+ # Check for line breaks, special, and unicode characters.
+ if ch in '\n\x85\u2028\u2029':
+ line_breaks = True
+ if not (ch == '\n' or '\x20' <= ch <= '\x7E'):
+ if (ch == '\x85' or '\xA0' <= ch <= '\uD7FF'
+ or '\uE000' <= ch <= '\uFFFD') and ch != '\uFEFF':
+ unicode_characters = True
+ if not self.allow_unicode:
+ special_characters = True
+ else:
+ special_characters = True
+
+ # Detect important whitespace combinations.
+ if ch == ' ':
+ if index == 0:
+ leading_space = True
+ if index == len(scalar)-1:
+ trailing_space = True
+ if previous_break:
+ break_space = True
+ previous_space = True
+ previous_break = False
+ elif ch in '\n\x85\u2028\u2029':
+ if index == 0:
+ leading_break = True
+ if index == len(scalar)-1:
+ trailing_break = True
+ if previous_space:
+ space_break = True
+ previous_space = False
+ previous_break = True
+ else:
+ previous_space = False
+ previous_break = False
+
+ # Prepare for the next character.
+ index += 1
+ preceeded_by_whitespace = (ch in '\0 \t\r\n\x85\u2028\u2029')
+ followed_by_whitespace = (index+1 >= len(scalar) or
+ scalar[index+1] in '\0 \t\r\n\x85\u2028\u2029')
+
+ # Let's decide what styles are allowed.
+ allow_flow_plain = True
+ allow_block_plain = True
+ allow_single_quoted = True
+ allow_double_quoted = True
+ allow_block = True
+
+ # Leading and trailing whitespaces are bad for plain scalars.
+ if (leading_space or leading_break
+ or trailing_space or trailing_break):
+ allow_flow_plain = allow_block_plain = False
+
+ # We do not permit trailing spaces for block scalars.
+ if trailing_space:
+ allow_block = False
+
+ # Spaces at the beginning of a new line are only acceptable for block
+ # scalars.
+ if break_space:
+ allow_flow_plain = allow_block_plain = allow_single_quoted = False
+
+ # Spaces followed by breaks, as well as special character are only
+ # allowed for double quoted scalars.
+ if space_break or special_characters:
+ allow_flow_plain = allow_block_plain = \
+ allow_single_quoted = allow_block = False
+
+ # Although the plain scalar writer supports breaks, we never emit
+ # multiline plain scalars.
+ if line_breaks:
+ allow_flow_plain = allow_block_plain = False
+
+ # Flow indicators are forbidden for flow plain scalars.
+ if flow_indicators:
+ allow_flow_plain = False
+
+ # Block indicators are forbidden for block plain scalars.
+ if block_indicators:
+ allow_block_plain = False
+
+ return ScalarAnalysis(scalar=scalar,
+ empty=False, multiline=line_breaks,
+ allow_flow_plain=allow_flow_plain,
+ allow_block_plain=allow_block_plain,
+ allow_single_quoted=allow_single_quoted,
+ allow_double_quoted=allow_double_quoted,
+ allow_block=allow_block)
+
+ # Writers.
+
+ def flush_stream(self):
+ if hasattr(self.stream, 'flush'):
+ self.stream.flush()
+
+ def write_stream_start(self):
+ # Write BOM if needed.
+ if self.encoding and self.encoding.startswith('utf-16'):
+ self.stream.write('\uFEFF'.encode(self.encoding))
+
+ def write_stream_end(self):
+ self.flush_stream()
+
+ def write_indicator(self, indicator, need_whitespace,
+ whitespace=False, indention=False):
+ if self.whitespace or not need_whitespace:
+ data = indicator
+ else:
+ data = ' '+indicator
+ self.whitespace = whitespace
+ self.indention = self.indention and indention
+ self.column += len(data)
+ self.open_ended = False
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+
+ def write_indent(self):
+ indent = self.indent or 0
+ if not self.indention or self.column > indent \
+ or (self.column == indent and not self.whitespace):
+ self.write_line_break()
+ if self.column < indent:
+ self.whitespace = True
+ data = ' '*(indent-self.column)
+ self.column = indent
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+
+ def write_line_break(self, data=None):
+ if data is None:
+ data = self.best_line_break
+ self.whitespace = True
+ self.indention = True
+ self.line += 1
+ self.column = 0
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+
+ def write_version_directive(self, version_text):
+ data = '%%YAML %s' % version_text
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ self.write_line_break()
+
+ def write_tag_directive(self, handle_text, prefix_text):
+ data = '%%TAG %s %s' % (handle_text, prefix_text)
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ self.write_line_break()
+
+ # Scalar streams.
+
+ def write_single_quoted(self, text, split=True):
+ self.write_indicator('\'', True)
+ spaces = False
+ breaks = False
+ start = end = 0
+ while end <= len(text):
+ ch = None
+ if end < len(text):
+ ch = text[end]
+ if spaces:
+ if ch is None or ch != ' ':
+ if start+1 == end and self.column > self.best_width and split \
+ and start != 0 and end != len(text):
+ self.write_indent()
+ else:
+ data = text[start:end]
+ self.column += len(data)
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ start = end
+ elif breaks:
+ if ch is None or ch not in '\n\x85\u2028\u2029':
+ if text[start] == '\n':
+ self.write_line_break()
+ for br in text[start:end]:
+ if br == '\n':
+ self.write_line_break()
+ else:
+ self.write_line_break(br)
+ self.write_indent()
+ start = end
+ else:
+ if ch is None or ch in ' \n\x85\u2028\u2029' or ch == '\'':
+ if start < end:
+ data = text[start:end]
+ self.column += len(data)
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ start = end
+ if ch == '\'':
+ data = '\'\''
+ self.column += 2
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ start = end + 1
+ if ch is not None:
+ spaces = (ch == ' ')
+ breaks = (ch in '\n\x85\u2028\u2029')
+ end += 1
+ self.write_indicator('\'', False)
+
+ ESCAPE_REPLACEMENTS = {
+ '\0': '0',
+ '\x07': 'a',
+ '\x08': 'b',
+ '\x09': 't',
+ '\x0A': 'n',
+ '\x0B': 'v',
+ '\x0C': 'f',
+ '\x0D': 'r',
+ '\x1B': 'e',
+ '\"': '\"',
+ '\\': '\\',
+ '\x85': 'N',
+ '\xA0': '_',
+ '\u2028': 'L',
+ '\u2029': 'P',
+ }
+
+ def write_double_quoted(self, text, split=True):
+ self.write_indicator('"', True)
+ start = end = 0
+ while end <= len(text):
+ ch = None
+ if end < len(text):
+ ch = text[end]
+ if ch is None or ch in '"\\\x85\u2028\u2029\uFEFF' \
+ or not ('\x20' <= ch <= '\x7E'
+ or (self.allow_unicode
+ and ('\xA0' <= ch <= '\uD7FF'
+ or '\uE000' <= ch <= '\uFFFD'))):
+ if start < end:
+ data = text[start:end]
+ self.column += len(data)
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ start = end
+ if ch is not None:
+ if ch in self.ESCAPE_REPLACEMENTS:
+ data = '\\'+self.ESCAPE_REPLACEMENTS[ch]
+ elif ch <= '\xFF':
+ data = '\\x%02X' % ord(ch)
+ elif ch <= '\uFFFF':
+ data = '\\u%04X' % ord(ch)
+ else:
+ data = '\\U%08X' % ord(ch)
+ self.column += len(data)
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ start = end+1
+ if 0 < end < len(text)-1 and (ch == ' ' or start >= end) \
+ and self.column+(end-start) > self.best_width and split:
+ data = text[start:end]+'\\'
+ if start < end:
+ start = end
+ self.column += len(data)
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ self.write_indent()
+ self.whitespace = False
+ self.indention = False
+ if text[start] == ' ':
+ data = '\\'
+ self.column += len(data)
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ end += 1
+ self.write_indicator('"', False)
+
+ def determine_block_hints(self, text):
+ hints = ''
+ if text:
+ if text[0] in ' \n\x85\u2028\u2029':
+ hints += str(self.best_indent)
+ if text[-1] not in '\n\x85\u2028\u2029':
+ hints += '-'
+ elif len(text) == 1 or text[-2] in '\n\x85\u2028\u2029':
+ hints += '+'
+ return hints
+
+ def write_folded(self, text):
+ hints = self.determine_block_hints(text)
+ self.write_indicator('>'+hints, True)
+ if hints[-1:] == '+':
+ self.open_ended = True
+ self.write_line_break()
+ leading_space = True
+ spaces = False
+ breaks = True
+ start = end = 0
+ while end <= len(text):
+ ch = None
+ if end < len(text):
+ ch = text[end]
+ if breaks:
+ if ch is None or ch not in '\n\x85\u2028\u2029':
+ if not leading_space and ch is not None and ch != ' ' \
+ and text[start] == '\n':
+ self.write_line_break()
+ leading_space = (ch == ' ')
+ for br in text[start:end]:
+ if br == '\n':
+ self.write_line_break()
+ else:
+ self.write_line_break(br)
+ if ch is not None:
+ self.write_indent()
+ start = end
+ elif spaces:
+ if ch != ' ':
+ if start+1 == end and self.column > self.best_width:
+ self.write_indent()
+ else:
+ data = text[start:end]
+ self.column += len(data)
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ start = end
+ else:
+ if ch is None or ch in ' \n\x85\u2028\u2029':
+ data = text[start:end]
+ self.column += len(data)
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ if ch is None:
+ self.write_line_break()
+ start = end
+ if ch is not None:
+ breaks = (ch in '\n\x85\u2028\u2029')
+ spaces = (ch == ' ')
+ end += 1
+
+ def write_literal(self, text):
+ hints = self.determine_block_hints(text)
+ self.write_indicator('|'+hints, True)
+ if hints[-1:] == '+':
+ self.open_ended = True
+ self.write_line_break()
+ breaks = True
+ start = end = 0
+ while end <= len(text):
+ ch = None
+ if end < len(text):
+ ch = text[end]
+ if breaks:
+ if ch is None or ch not in '\n\x85\u2028\u2029':
+ for br in text[start:end]:
+ if br == '\n':
+ self.write_line_break()
+ else:
+ self.write_line_break(br)
+ if ch is not None:
+ self.write_indent()
+ start = end
+ else:
+ if ch is None or ch in '\n\x85\u2028\u2029':
+ data = text[start:end]
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ if ch is None:
+ self.write_line_break()
+ start = end
+ if ch is not None:
+ breaks = (ch in '\n\x85\u2028\u2029')
+ end += 1
+
+ def write_plain(self, text, split=True):
+ if self.root_context:
+ self.open_ended = True
+ if not text:
+ return
+ if not self.whitespace:
+ data = ' '
+ self.column += len(data)
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ self.whitespace = False
+ self.indention = False
+ spaces = False
+ breaks = False
+ start = end = 0
+ while end <= len(text):
+ ch = None
+ if end < len(text):
+ ch = text[end]
+ if spaces:
+ if ch != ' ':
+ if start+1 == end and self.column > self.best_width and split:
+ self.write_indent()
+ self.whitespace = False
+ self.indention = False
+ else:
+ data = text[start:end]
+ self.column += len(data)
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ start = end
+ elif breaks:
+ if ch not in '\n\x85\u2028\u2029':
+ if text[start] == '\n':
+ self.write_line_break()
+ for br in text[start:end]:
+ if br == '\n':
+ self.write_line_break()
+ else:
+ self.write_line_break(br)
+ self.write_indent()
+ self.whitespace = False
+ self.indention = False
+ start = end
+ else:
+ if ch is None or ch in ' \n\x85\u2028\u2029':
+ data = text[start:end]
+ self.column += len(data)
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ start = end
+ if ch is not None:
+ spaces = (ch == ' ')
+ breaks = (ch in '\n\x85\u2028\u2029')
+ end += 1
+
diff --git a/scripts/external_libs/pyyaml-3.11/python3/yaml/error.py b/scripts/external_libs/pyyaml-3.11/python3/yaml/error.py
new file mode 100644
index 00000000..b796b4dc
--- /dev/null
+++ b/scripts/external_libs/pyyaml-3.11/python3/yaml/error.py
@@ -0,0 +1,75 @@
+
+__all__ = ['Mark', 'YAMLError', 'MarkedYAMLError']
+
+class Mark:
+
+ def __init__(self, name, index, line, column, buffer, pointer):
+ self.name = name
+ self.index = index
+ self.line = line
+ self.column = column
+ self.buffer = buffer
+ self.pointer = pointer
+
+ def get_snippet(self, indent=4, max_length=75):
+ if self.buffer is None:
+ return None
+ head = ''
+ start = self.pointer
+ while start > 0 and self.buffer[start-1] not in '\0\r\n\x85\u2028\u2029':
+ start -= 1
+ if self.pointer-start > max_length/2-1:
+ head = ' ... '
+ start += 5
+ break
+ tail = ''
+ end = self.pointer
+ while end < len(self.buffer) and self.buffer[end] not in '\0\r\n\x85\u2028\u2029':
+ end += 1
+ if end-self.pointer > max_length/2-1:
+ tail = ' ... '
+ end -= 5
+ break
+ snippet = self.buffer[start:end]
+ return ' '*indent + head + snippet + tail + '\n' \
+ + ' '*(indent+self.pointer-start+len(head)) + '^'
+
+ def __str__(self):
+ snippet = self.get_snippet()
+ where = " in \"%s\", line %d, column %d" \
+ % (self.name, self.line+1, self.column+1)
+ if snippet is not None:
+ where += ":\n"+snippet
+ return where
+
+class YAMLError(Exception):
+ pass
+
+class MarkedYAMLError(YAMLError):
+
+ def __init__(self, context=None, context_mark=None,
+ problem=None, problem_mark=None, note=None):
+ self.context = context
+ self.context_mark = context_mark
+ self.problem = problem
+ self.problem_mark = problem_mark
+ self.note = note
+
+ def __str__(self):
+ lines = []
+ if self.context is not None:
+ lines.append(self.context)
+ if self.context_mark is not None \
+ and (self.problem is None or self.problem_mark is None
+ or self.context_mark.name != self.problem_mark.name
+ or self.context_mark.line != self.problem_mark.line
+ or self.context_mark.column != self.problem_mark.column):
+ lines.append(str(self.context_mark))
+ if self.problem is not None:
+ lines.append(self.problem)
+ if self.problem_mark is not None:
+ lines.append(str(self.problem_mark))
+ if self.note is not None:
+ lines.append(self.note)
+ return '\n'.join(lines)
+
diff --git a/scripts/external_libs/pyyaml-3.11/python3/yaml/events.py b/scripts/external_libs/pyyaml-3.11/python3/yaml/events.py
new file mode 100644
index 00000000..f79ad389
--- /dev/null
+++ b/scripts/external_libs/pyyaml-3.11/python3/yaml/events.py
@@ -0,0 +1,86 @@
+
+# Abstract classes.
+
+class Event(object):
+ def __init__(self, start_mark=None, end_mark=None):
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+ def __repr__(self):
+ attributes = [key for key in ['anchor', 'tag', 'implicit', 'value']
+ if hasattr(self, key)]
+ arguments = ', '.join(['%s=%r' % (key, getattr(self, key))
+ for key in attributes])
+ return '%s(%s)' % (self.__class__.__name__, arguments)
+
+class NodeEvent(Event):
+ def __init__(self, anchor, start_mark=None, end_mark=None):
+ self.anchor = anchor
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+
+class CollectionStartEvent(NodeEvent):
+ def __init__(self, anchor, tag, implicit, start_mark=None, end_mark=None,
+ flow_style=None):
+ self.anchor = anchor
+ self.tag = tag
+ self.implicit = implicit
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+ self.flow_style = flow_style
+
+class CollectionEndEvent(Event):
+ pass
+
+# Implementations.
+
+class StreamStartEvent(Event):
+ def __init__(self, start_mark=None, end_mark=None, encoding=None):
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+ self.encoding = encoding
+
+class StreamEndEvent(Event):
+ pass
+
+class DocumentStartEvent(Event):
+ def __init__(self, start_mark=None, end_mark=None,
+ explicit=None, version=None, tags=None):
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+ self.explicit = explicit
+ self.version = version
+ self.tags = tags
+
+class DocumentEndEvent(Event):
+ def __init__(self, start_mark=None, end_mark=None,
+ explicit=None):
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+ self.explicit = explicit
+
+class AliasEvent(NodeEvent):
+ pass
+
+class ScalarEvent(NodeEvent):
+ def __init__(self, anchor, tag, implicit, value,
+ start_mark=None, end_mark=None, style=None):
+ self.anchor = anchor
+ self.tag = tag
+ self.implicit = implicit
+ self.value = value
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+ self.style = style
+
+class SequenceStartEvent(CollectionStartEvent):
+ pass
+
+class SequenceEndEvent(CollectionEndEvent):
+ pass
+
+class MappingStartEvent(CollectionStartEvent):
+ pass
+
+class MappingEndEvent(CollectionEndEvent):
+ pass
+
diff --git a/scripts/external_libs/pyyaml-3.11/python3/yaml/loader.py b/scripts/external_libs/pyyaml-3.11/python3/yaml/loader.py
new file mode 100644
index 00000000..08c8f01b
--- /dev/null
+++ b/scripts/external_libs/pyyaml-3.11/python3/yaml/loader.py
@@ -0,0 +1,40 @@
+
+__all__ = ['BaseLoader', 'SafeLoader', 'Loader']
+
+from .reader import *
+from .scanner import *
+from .parser import *
+from .composer import *
+from .constructor import *
+from .resolver import *
+
+class BaseLoader(Reader, Scanner, Parser, Composer, BaseConstructor, BaseResolver):
+
+ def __init__(self, stream):
+ Reader.__init__(self, stream)
+ Scanner.__init__(self)
+ Parser.__init__(self)
+ Composer.__init__(self)
+ BaseConstructor.__init__(self)
+ BaseResolver.__init__(self)
+
+class SafeLoader(Reader, Scanner, Parser, Composer, SafeConstructor, Resolver):
+
+ def __init__(self, stream):
+ Reader.__init__(self, stream)
+ Scanner.__init__(self)
+ Parser.__init__(self)
+ Composer.__init__(self)
+ SafeConstructor.__init__(self)
+ Resolver.__init__(self)
+
+class Loader(Reader, Scanner, Parser, Composer, Constructor, Resolver):
+
+ def __init__(self, stream):
+ Reader.__init__(self, stream)
+ Scanner.__init__(self)
+ Parser.__init__(self)
+ Composer.__init__(self)
+ Constructor.__init__(self)
+ Resolver.__init__(self)
+
diff --git a/scripts/external_libs/pyyaml-3.11/python3/yaml/nodes.py b/scripts/external_libs/pyyaml-3.11/python3/yaml/nodes.py
new file mode 100644
index 00000000..c4f070c4
--- /dev/null
+++ b/scripts/external_libs/pyyaml-3.11/python3/yaml/nodes.py
@@ -0,0 +1,49 @@
+
+class Node(object):
+ def __init__(self, tag, value, start_mark, end_mark):
+ self.tag = tag
+ self.value = value
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+ def __repr__(self):
+ value = self.value
+ #if isinstance(value, list):
+ # if len(value) == 0:
+ # value = '<empty>'
+ # elif len(value) == 1:
+ # value = '<1 item>'
+ # else:
+ # value = '<%d items>' % len(value)
+ #else:
+ # if len(value) > 75:
+ # value = repr(value[:70]+u' ... ')
+ # else:
+ # value = repr(value)
+ value = repr(value)
+ return '%s(tag=%r, value=%s)' % (self.__class__.__name__, self.tag, value)
+
+class ScalarNode(Node):
+ id = 'scalar'
+ def __init__(self, tag, value,
+ start_mark=None, end_mark=None, style=None):
+ self.tag = tag
+ self.value = value
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+ self.style = style
+
+class CollectionNode(Node):
+ def __init__(self, tag, value,
+ start_mark=None, end_mark=None, flow_style=None):
+ self.tag = tag
+ self.value = value
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+ self.flow_style = flow_style
+
+class SequenceNode(CollectionNode):
+ id = 'sequence'
+
+class MappingNode(CollectionNode):
+ id = 'mapping'
+
diff --git a/scripts/external_libs/pyyaml-3.11/python3/yaml/parser.py b/scripts/external_libs/pyyaml-3.11/python3/yaml/parser.py
new file mode 100644
index 00000000..13a5995d
--- /dev/null
+++ b/scripts/external_libs/pyyaml-3.11/python3/yaml/parser.py
@@ -0,0 +1,589 @@
+
+# The following YAML grammar is LL(1) and is parsed by a recursive descent
+# parser.
+#
+# stream ::= STREAM-START implicit_document? explicit_document* STREAM-END
+# implicit_document ::= block_node DOCUMENT-END*
+# explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END*
+# block_node_or_indentless_sequence ::=
+# ALIAS
+# | properties (block_content | indentless_block_sequence)?
+# | block_content
+# | indentless_block_sequence
+# block_node ::= ALIAS
+# | properties block_content?
+# | block_content
+# flow_node ::= ALIAS
+# | properties flow_content?
+# | flow_content
+# properties ::= TAG ANCHOR? | ANCHOR TAG?
+# block_content ::= block_collection | flow_collection | SCALAR
+# flow_content ::= flow_collection | SCALAR
+# block_collection ::= block_sequence | block_mapping
+# flow_collection ::= flow_sequence | flow_mapping
+# block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END
+# indentless_sequence ::= (BLOCK-ENTRY block_node?)+
+# block_mapping ::= BLOCK-MAPPING_START
+# ((KEY block_node_or_indentless_sequence?)?
+# (VALUE block_node_or_indentless_sequence?)?)*
+# BLOCK-END
+# flow_sequence ::= FLOW-SEQUENCE-START
+# (flow_sequence_entry FLOW-ENTRY)*
+# flow_sequence_entry?
+# FLOW-SEQUENCE-END
+# flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+# flow_mapping ::= FLOW-MAPPING-START
+# (flow_mapping_entry FLOW-ENTRY)*
+# flow_mapping_entry?
+# FLOW-MAPPING-END
+# flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+#
+# FIRST sets:
+#
+# stream: { STREAM-START }
+# explicit_document: { DIRECTIVE DOCUMENT-START }
+# implicit_document: FIRST(block_node)
+# block_node: { ALIAS TAG ANCHOR SCALAR BLOCK-SEQUENCE-START BLOCK-MAPPING-START FLOW-SEQUENCE-START FLOW-MAPPING-START }
+# flow_node: { ALIAS ANCHOR TAG SCALAR FLOW-SEQUENCE-START FLOW-MAPPING-START }
+# block_content: { BLOCK-SEQUENCE-START BLOCK-MAPPING-START FLOW-SEQUENCE-START FLOW-MAPPING-START SCALAR }
+# flow_content: { FLOW-SEQUENCE-START FLOW-MAPPING-START SCALAR }
+# block_collection: { BLOCK-SEQUENCE-START BLOCK-MAPPING-START }
+# flow_collection: { FLOW-SEQUENCE-START FLOW-MAPPING-START }
+# block_sequence: { BLOCK-SEQUENCE-START }
+# block_mapping: { BLOCK-MAPPING-START }
+# block_node_or_indentless_sequence: { ALIAS ANCHOR TAG SCALAR BLOCK-SEQUENCE-START BLOCK-MAPPING-START FLOW-SEQUENCE-START FLOW-MAPPING-START BLOCK-ENTRY }
+# indentless_sequence: { ENTRY }
+# flow_collection: { FLOW-SEQUENCE-START FLOW-MAPPING-START }
+# flow_sequence: { FLOW-SEQUENCE-START }
+# flow_mapping: { FLOW-MAPPING-START }
+# flow_sequence_entry: { ALIAS ANCHOR TAG SCALAR FLOW-SEQUENCE-START FLOW-MAPPING-START KEY }
+# flow_mapping_entry: { ALIAS ANCHOR TAG SCALAR FLOW-SEQUENCE-START FLOW-MAPPING-START KEY }
+
+__all__ = ['Parser', 'ParserError']
+
+from .error import MarkedYAMLError
+from .tokens import *
+from .events import *
+from .scanner import *
+
+class ParserError(MarkedYAMLError):
+ pass
+
+class Parser:
+ # Since writing a recursive-descendant parser is a straightforward task, we
+ # do not give many comments here.
+
+ DEFAULT_TAGS = {
+ '!': '!',
+ '!!': 'tag:yaml.org,2002:',
+ }
+
+ def __init__(self):
+ self.current_event = None
+ self.yaml_version = None
+ self.tag_handles = {}
+ self.states = []
+ self.marks = []
+ self.state = self.parse_stream_start
+
+ def dispose(self):
+ # Reset the state attributes (to clear self-references)
+ self.states = []
+ self.state = None
+
+ def check_event(self, *choices):
+ # Check the type of the next event.
+ if self.current_event is None:
+ if self.state:
+ self.current_event = self.state()
+ if self.current_event is not None:
+ if not choices:
+ return True
+ for choice in choices:
+ if isinstance(self.current_event, choice):
+ return True
+ return False
+
+ def peek_event(self):
+ # Get the next event.
+ if self.current_event is None:
+ if self.state:
+ self.current_event = self.state()
+ return self.current_event
+
+ def get_event(self):
+ # Get the next event and proceed further.
+ if self.current_event is None:
+ if self.state:
+ self.current_event = self.state()
+ value = self.current_event
+ self.current_event = None
+ return value
+
+ # stream ::= STREAM-START implicit_document? explicit_document* STREAM-END
+ # implicit_document ::= block_node DOCUMENT-END*
+ # explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END*
+
+ def parse_stream_start(self):
+
+ # Parse the stream start.
+ token = self.get_token()
+ event = StreamStartEvent(token.start_mark, token.end_mark,
+ encoding=token.encoding)
+
+ # Prepare the next state.
+ self.state = self.parse_implicit_document_start
+
+ return event
+
+ def parse_implicit_document_start(self):
+
+ # Parse an implicit document.
+ if not self.check_token(DirectiveToken, DocumentStartToken,
+ StreamEndToken):
+ self.tag_handles = self.DEFAULT_TAGS
+ token = self.peek_token()
+ start_mark = end_mark = token.start_mark
+ event = DocumentStartEvent(start_mark, end_mark,
+ explicit=False)
+
+ # Prepare the next state.
+ self.states.append(self.parse_document_end)
+ self.state = self.parse_block_node
+
+ return event
+
+ else:
+ return self.parse_document_start()
+
+ def parse_document_start(self):
+
+ # Parse any extra document end indicators.
+ while self.check_token(DocumentEndToken):
+ self.get_token()
+
+ # Parse an explicit document.
+ if not self.check_token(StreamEndToken):
+ token = self.peek_token()
+ start_mark = token.start_mark
+ version, tags = self.process_directives()
+ if not self.check_token(DocumentStartToken):
+ raise ParserError(None, None,
+ "expected '<document start>', but found %r"
+ % self.peek_token().id,
+ self.peek_token().start_mark)
+ token = self.get_token()
+ end_mark = token.end_mark
+ event = DocumentStartEvent(start_mark, end_mark,
+ explicit=True, version=version, tags=tags)
+ self.states.append(self.parse_document_end)
+ self.state = self.parse_document_content
+ else:
+ # Parse the end of the stream.
+ token = self.get_token()
+ event = StreamEndEvent(token.start_mark, token.end_mark)
+ assert not self.states
+ assert not self.marks
+ self.state = None
+ return event
+
+ def parse_document_end(self):
+
+ # Parse the document end.
+ token = self.peek_token()
+ start_mark = end_mark = token.start_mark
+ explicit = False
+ if self.check_token(DocumentEndToken):
+ token = self.get_token()
+ end_mark = token.end_mark
+ explicit = True
+ event = DocumentEndEvent(start_mark, end_mark,
+ explicit=explicit)
+
+ # Prepare the next state.
+ self.state = self.parse_document_start
+
+ return event
+
+ def parse_document_content(self):
+ if self.check_token(DirectiveToken,
+ DocumentStartToken, DocumentEndToken, StreamEndToken):
+ event = self.process_empty_scalar(self.peek_token().start_mark)
+ self.state = self.states.pop()
+ return event
+ else:
+ return self.parse_block_node()
+
+ def process_directives(self):
+ self.yaml_version = None
+ self.tag_handles = {}
+ while self.check_token(DirectiveToken):
+ token = self.get_token()
+ if token.name == 'YAML':
+ if self.yaml_version is not None:
+ raise ParserError(None, None,
+ "found duplicate YAML directive", token.start_mark)
+ major, minor = token.value
+ if major != 1:
+ raise ParserError(None, None,
+ "found incompatible YAML document (version 1.* is required)",
+ token.start_mark)
+ self.yaml_version = token.value
+ elif token.name == 'TAG':
+ handle, prefix = token.value
+ if handle in self.tag_handles:
+ raise ParserError(None, None,
+ "duplicate tag handle %r" % handle,
+ token.start_mark)
+ self.tag_handles[handle] = prefix
+ if self.tag_handles:
+ value = self.yaml_version, self.tag_handles.copy()
+ else:
+ value = self.yaml_version, None
+ for key in self.DEFAULT_TAGS:
+ if key not in self.tag_handles:
+ self.tag_handles[key] = self.DEFAULT_TAGS[key]
+ return value
+
+ # block_node_or_indentless_sequence ::= ALIAS
+ # | properties (block_content | indentless_block_sequence)?
+ # | block_content
+ # | indentless_block_sequence
+ # block_node ::= ALIAS
+ # | properties block_content?
+ # | block_content
+ # flow_node ::= ALIAS
+ # | properties flow_content?
+ # | flow_content
+ # properties ::= TAG ANCHOR? | ANCHOR TAG?
+ # block_content ::= block_collection | flow_collection | SCALAR
+ # flow_content ::= flow_collection | SCALAR
+ # block_collection ::= block_sequence | block_mapping
+ # flow_collection ::= flow_sequence | flow_mapping
+
+ def parse_block_node(self):
+ return self.parse_node(block=True)
+
+ def parse_flow_node(self):
+ return self.parse_node()
+
+ def parse_block_node_or_indentless_sequence(self):
+ return self.parse_node(block=True, indentless_sequence=True)
+
+ def parse_node(self, block=False, indentless_sequence=False):
+ if self.check_token(AliasToken):
+ token = self.get_token()
+ event = AliasEvent(token.value, token.start_mark, token.end_mark)
+ self.state = self.states.pop()
+ else:
+ anchor = None
+ tag = None
+ start_mark = end_mark = tag_mark = None
+ if self.check_token(AnchorToken):
+ token = self.get_token()
+ start_mark = token.start_mark
+ end_mark = token.end_mark
+ anchor = token.value
+ if self.check_token(TagToken):
+ token = self.get_token()
+ tag_mark = token.start_mark
+ end_mark = token.end_mark
+ tag = token.value
+ elif self.check_token(TagToken):
+ token = self.get_token()
+ start_mark = tag_mark = token.start_mark
+ end_mark = token.end_mark
+ tag = token.value
+ if self.check_token(AnchorToken):
+ token = self.get_token()
+ end_mark = token.end_mark
+ anchor = token.value
+ if tag is not None:
+ handle, suffix = tag
+ if handle is not None:
+ if handle not in self.tag_handles:
+ raise ParserError("while parsing a node", start_mark,
+ "found undefined tag handle %r" % handle,
+ tag_mark)
+ tag = self.tag_handles[handle]+suffix
+ else:
+ tag = suffix
+ #if tag == '!':
+ # raise ParserError("while parsing a node", start_mark,
+ # "found non-specific tag '!'", tag_mark,
+ # "Please check 'http://pyyaml.org/wiki/YAMLNonSpecificTag' and share your opinion.")
+ if start_mark is None:
+ start_mark = end_mark = self.peek_token().start_mark
+ event = None
+ implicit = (tag is None or tag == '!')
+ if indentless_sequence and self.check_token(BlockEntryToken):
+ end_mark = self.peek_token().end_mark
+ event = SequenceStartEvent(anchor, tag, implicit,
+ start_mark, end_mark)
+ self.state = self.parse_indentless_sequence_entry
+ else:
+ if self.check_token(ScalarToken):
+ token = self.get_token()
+ end_mark = token.end_mark
+ if (token.plain and tag is None) or tag == '!':
+ implicit = (True, False)
+ elif tag is None:
+ implicit = (False, True)
+ else:
+ implicit = (False, False)
+ event = ScalarEvent(anchor, tag, implicit, token.value,
+ start_mark, end_mark, style=token.style)
+ self.state = self.states.pop()
+ elif self.check_token(FlowSequenceStartToken):
+ end_mark = self.peek_token().end_mark
+ event = SequenceStartEvent(anchor, tag, implicit,
+ start_mark, end_mark, flow_style=True)
+ self.state = self.parse_flow_sequence_first_entry
+ elif self.check_token(FlowMappingStartToken):
+ end_mark = self.peek_token().end_mark
+ event = MappingStartEvent(anchor, tag, implicit,
+ start_mark, end_mark, flow_style=True)
+ self.state = self.parse_flow_mapping_first_key
+ elif block and self.check_token(BlockSequenceStartToken):
+ end_mark = self.peek_token().start_mark
+ event = SequenceStartEvent(anchor, tag, implicit,
+ start_mark, end_mark, flow_style=False)
+ self.state = self.parse_block_sequence_first_entry
+ elif block and self.check_token(BlockMappingStartToken):
+ end_mark = self.peek_token().start_mark
+ event = MappingStartEvent(anchor, tag, implicit,
+ start_mark, end_mark, flow_style=False)
+ self.state = self.parse_block_mapping_first_key
+ elif anchor is not None or tag is not None:
+ # Empty scalars are allowed even if a tag or an anchor is
+ # specified.
+ event = ScalarEvent(anchor, tag, (implicit, False), '',
+ start_mark, end_mark)
+ self.state = self.states.pop()
+ else:
+ if block:
+ node = 'block'
+ else:
+ node = 'flow'
+ token = self.peek_token()
+ raise ParserError("while parsing a %s node" % node, start_mark,
+ "expected the node content, but found %r" % token.id,
+ token.start_mark)
+ return event
+
+ # block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END
+
+ def parse_block_sequence_first_entry(self):
+ token = self.get_token()
+ self.marks.append(token.start_mark)
+ return self.parse_block_sequence_entry()
+
+ def parse_block_sequence_entry(self):
+ if self.check_token(BlockEntryToken):
+ token = self.get_token()
+ if not self.check_token(BlockEntryToken, BlockEndToken):
+ self.states.append(self.parse_block_sequence_entry)
+ return self.parse_block_node()
+ else:
+ self.state = self.parse_block_sequence_entry
+ return self.process_empty_scalar(token.end_mark)
+ if not self.check_token(BlockEndToken):
+ token = self.peek_token()
+ raise ParserError("while parsing a block collection", self.marks[-1],
+ "expected <block end>, but found %r" % token.id, token.start_mark)
+ token = self.get_token()
+ event = SequenceEndEvent(token.start_mark, token.end_mark)
+ self.state = self.states.pop()
+ self.marks.pop()
+ return event
+
+ # indentless_sequence ::= (BLOCK-ENTRY block_node?)+
+
+ def parse_indentless_sequence_entry(self):
+ if self.check_token(BlockEntryToken):
+ token = self.get_token()
+ if not self.check_token(BlockEntryToken,
+ KeyToken, ValueToken, BlockEndToken):
+ self.states.append(self.parse_indentless_sequence_entry)
+ return self.parse_block_node()
+ else:
+ self.state = self.parse_indentless_sequence_entry
+ return self.process_empty_scalar(token.end_mark)
+ token = self.peek_token()
+ event = SequenceEndEvent(token.start_mark, token.start_mark)
+ self.state = self.states.pop()
+ return event
+
+ # block_mapping ::= BLOCK-MAPPING_START
+ # ((KEY block_node_or_indentless_sequence?)?
+ # (VALUE block_node_or_indentless_sequence?)?)*
+ # BLOCK-END
+
+ def parse_block_mapping_first_key(self):
+ token = self.get_token()
+ self.marks.append(token.start_mark)
+ return self.parse_block_mapping_key()
+
+ def parse_block_mapping_key(self):
+ if self.check_token(KeyToken):
+ token = self.get_token()
+ if not self.check_token(KeyToken, ValueToken, BlockEndToken):
+ self.states.append(self.parse_block_mapping_value)
+ return self.parse_block_node_or_indentless_sequence()
+ else:
+ self.state = self.parse_block_mapping_value
+ return self.process_empty_scalar(token.end_mark)
+ if not self.check_token(BlockEndToken):
+ token = self.peek_token()
+ raise ParserError("while parsing a block mapping", self.marks[-1],
+ "expected <block end>, but found %r" % token.id, token.start_mark)
+ token = self.get_token()
+ event = MappingEndEvent(token.start_mark, token.end_mark)
+ self.state = self.states.pop()
+ self.marks.pop()
+ return event
+
+ def parse_block_mapping_value(self):
+ if self.check_token(ValueToken):
+ token = self.get_token()
+ if not self.check_token(KeyToken, ValueToken, BlockEndToken):
+ self.states.append(self.parse_block_mapping_key)
+ return self.parse_block_node_or_indentless_sequence()
+ else:
+ self.state = self.parse_block_mapping_key
+ return self.process_empty_scalar(token.end_mark)
+ else:
+ self.state = self.parse_block_mapping_key
+ token = self.peek_token()
+ return self.process_empty_scalar(token.start_mark)
+
+ # flow_sequence ::= FLOW-SEQUENCE-START
+ # (flow_sequence_entry FLOW-ENTRY)*
+ # flow_sequence_entry?
+ # FLOW-SEQUENCE-END
+ # flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+ #
+ # Note that while production rules for both flow_sequence_entry and
+ # flow_mapping_entry are equal, their interpretations are different.
+ # For `flow_sequence_entry`, the part `KEY flow_node? (VALUE flow_node?)?`
+ # generate an inline mapping (set syntax).
+
+ def parse_flow_sequence_first_entry(self):
+ token = self.get_token()
+ self.marks.append(token.start_mark)
+ return self.parse_flow_sequence_entry(first=True)
+
+ def parse_flow_sequence_entry(self, first=False):
+ if not self.check_token(FlowSequenceEndToken):
+ if not first:
+ if self.check_token(FlowEntryToken):
+ self.get_token()
+ else:
+ token = self.peek_token()
+ raise ParserError("while parsing a flow sequence", self.marks[-1],
+ "expected ',' or ']', but got %r" % token.id, token.start_mark)
+
+ if self.check_token(KeyToken):
+ token = self.peek_token()
+ event = MappingStartEvent(None, None, True,
+ token.start_mark, token.end_mark,
+ flow_style=True)
+ self.state = self.parse_flow_sequence_entry_mapping_key
+ return event
+ elif not self.check_token(FlowSequenceEndToken):
+ self.states.append(self.parse_flow_sequence_entry)
+ return self.parse_flow_node()
+ token = self.get_token()
+ event = SequenceEndEvent(token.start_mark, token.end_mark)
+ self.state = self.states.pop()
+ self.marks.pop()
+ return event
+
+ def parse_flow_sequence_entry_mapping_key(self):
+ token = self.get_token()
+ if not self.check_token(ValueToken,
+ FlowEntryToken, FlowSequenceEndToken):
+ self.states.append(self.parse_flow_sequence_entry_mapping_value)
+ return self.parse_flow_node()
+ else:
+ self.state = self.parse_flow_sequence_entry_mapping_value
+ return self.process_empty_scalar(token.end_mark)
+
+ def parse_flow_sequence_entry_mapping_value(self):
+ if self.check_token(ValueToken):
+ token = self.get_token()
+ if not self.check_token(FlowEntryToken, FlowSequenceEndToken):
+ self.states.append(self.parse_flow_sequence_entry_mapping_end)
+ return self.parse_flow_node()
+ else:
+ self.state = self.parse_flow_sequence_entry_mapping_end
+ return self.process_empty_scalar(token.end_mark)
+ else:
+ self.state = self.parse_flow_sequence_entry_mapping_end
+ token = self.peek_token()
+ return self.process_empty_scalar(token.start_mark)
+
+ def parse_flow_sequence_entry_mapping_end(self):
+ self.state = self.parse_flow_sequence_entry
+ token = self.peek_token()
+ return MappingEndEvent(token.start_mark, token.start_mark)
+
+ # flow_mapping ::= FLOW-MAPPING-START
+ # (flow_mapping_entry FLOW-ENTRY)*
+ # flow_mapping_entry?
+ # FLOW-MAPPING-END
+ # flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+
+ def parse_flow_mapping_first_key(self):
+ token = self.get_token()
+ self.marks.append(token.start_mark)
+ return self.parse_flow_mapping_key(first=True)
+
+ def parse_flow_mapping_key(self, first=False):
+ if not self.check_token(FlowMappingEndToken):
+ if not first:
+ if self.check_token(FlowEntryToken):
+ self.get_token()
+ else:
+ token = self.peek_token()
+ raise ParserError("while parsing a flow mapping", self.marks[-1],
+ "expected ',' or '}', but got %r" % token.id, token.start_mark)
+ if self.check_token(KeyToken):
+ token = self.get_token()
+ if not self.check_token(ValueToken,
+ FlowEntryToken, FlowMappingEndToken):
+ self.states.append(self.parse_flow_mapping_value)
+ return self.parse_flow_node()
+ else:
+ self.state = self.parse_flow_mapping_value
+ return self.process_empty_scalar(token.end_mark)
+ elif not self.check_token(FlowMappingEndToken):
+ self.states.append(self.parse_flow_mapping_empty_value)
+ return self.parse_flow_node()
+ token = self.get_token()
+ event = MappingEndEvent(token.start_mark, token.end_mark)
+ self.state = self.states.pop()
+ self.marks.pop()
+ return event
+
+ def parse_flow_mapping_value(self):
+ if self.check_token(ValueToken):
+ token = self.get_token()
+ if not self.check_token(FlowEntryToken, FlowMappingEndToken):
+ self.states.append(self.parse_flow_mapping_key)
+ return self.parse_flow_node()
+ else:
+ self.state = self.parse_flow_mapping_key
+ return self.process_empty_scalar(token.end_mark)
+ else:
+ self.state = self.parse_flow_mapping_key
+ token = self.peek_token()
+ return self.process_empty_scalar(token.start_mark)
+
+ def parse_flow_mapping_empty_value(self):
+ self.state = self.parse_flow_mapping_key
+ return self.process_empty_scalar(self.peek_token().start_mark)
+
+ def process_empty_scalar(self, mark):
+ return ScalarEvent(None, None, (True, False), '', mark, mark)
+
diff --git a/scripts/external_libs/pyyaml-3.11/python3/yaml/reader.py b/scripts/external_libs/pyyaml-3.11/python3/yaml/reader.py
new file mode 100644
index 00000000..f70e920f
--- /dev/null
+++ b/scripts/external_libs/pyyaml-3.11/python3/yaml/reader.py
@@ -0,0 +1,192 @@
+# This module contains abstractions for the input stream. You don't have to
+# looks further, there are no pretty code.
+#
+# We define two classes here.
+#
+# Mark(source, line, column)
+# It's just a record and its only use is producing nice error messages.
+# Parser does not use it for any other purposes.
+#
+# Reader(source, data)
+# Reader determines the encoding of `data` and converts it to unicode.
+# Reader provides the following methods and attributes:
+# reader.peek(length=1) - return the next `length` characters
+# reader.forward(length=1) - move the current position to `length` characters.
+# reader.index - the number of the current character.
+# reader.line, stream.column - the line and the column of the current character.
+
+__all__ = ['Reader', 'ReaderError']
+
+from .error import YAMLError, Mark
+
+import codecs, re
+
+class ReaderError(YAMLError):
+
+ def __init__(self, name, position, character, encoding, reason):
+ self.name = name
+ self.character = character
+ self.position = position
+ self.encoding = encoding
+ self.reason = reason
+
+ def __str__(self):
+ if isinstance(self.character, bytes):
+ return "'%s' codec can't decode byte #x%02x: %s\n" \
+ " in \"%s\", position %d" \
+ % (self.encoding, ord(self.character), self.reason,
+ self.name, self.position)
+ else:
+ return "unacceptable character #x%04x: %s\n" \
+ " in \"%s\", position %d" \
+ % (self.character, self.reason,
+ self.name, self.position)
+
+class Reader(object):
+ # Reader:
+ # - determines the data encoding and converts it to a unicode string,
+ # - checks if characters are in allowed range,
+ # - adds '\0' to the end.
+
+ # Reader accepts
+ # - a `bytes` object,
+ # - a `str` object,
+ # - a file-like object with its `read` method returning `str`,
+ # - a file-like object with its `read` method returning `unicode`.
+
+ # Yeah, it's ugly and slow.
+
+ def __init__(self, stream):
+ self.name = None
+ self.stream = None
+ self.stream_pointer = 0
+ self.eof = True
+ self.buffer = ''
+ self.pointer = 0
+ self.raw_buffer = None
+ self.raw_decode = None
+ self.encoding = None
+ self.index = 0
+ self.line = 0
+ self.column = 0
+ if isinstance(stream, str):
+ self.name = "<unicode string>"
+ self.check_printable(stream)
+ self.buffer = stream+'\0'
+ elif isinstance(stream, bytes):
+ self.name = "<byte string>"
+ self.raw_buffer = stream
+ self.determine_encoding()
+ else:
+ self.stream = stream
+ self.name = getattr(stream, 'name', "<file>")
+ self.eof = False
+ self.raw_buffer = None
+ self.determine_encoding()
+
+ def peek(self, index=0):
+ try:
+ return self.buffer[self.pointer+index]
+ except IndexError:
+ self.update(index+1)
+ return self.buffer[self.pointer+index]
+
+ def prefix(self, length=1):
+ if self.pointer+length >= len(self.buffer):
+ self.update(length)
+ return self.buffer[self.pointer:self.pointer+length]
+
+ def forward(self, length=1):
+ if self.pointer+length+1 >= len(self.buffer):
+ self.update(length+1)
+ while length:
+ ch = self.buffer[self.pointer]
+ self.pointer += 1
+ self.index += 1
+ if ch in '\n\x85\u2028\u2029' \
+ or (ch == '\r' and self.buffer[self.pointer] != '\n'):
+ self.line += 1
+ self.column = 0
+ elif ch != '\uFEFF':
+ self.column += 1
+ length -= 1
+
+ def get_mark(self):
+ if self.stream is None:
+ return Mark(self.name, self.index, self.line, self.column,
+ self.buffer, self.pointer)
+ else:
+ return Mark(self.name, self.index, self.line, self.column,
+ None, None)
+
+ def determine_encoding(self):
+ while not self.eof and (self.raw_buffer is None or len(self.raw_buffer) < 2):
+ self.update_raw()
+ if isinstance(self.raw_buffer, bytes):
+ if self.raw_buffer.startswith(codecs.BOM_UTF16_LE):
+ self.raw_decode = codecs.utf_16_le_decode
+ self.encoding = 'utf-16-le'
+ elif self.raw_buffer.startswith(codecs.BOM_UTF16_BE):
+ self.raw_decode = codecs.utf_16_be_decode
+ self.encoding = 'utf-16-be'
+ else:
+ self.raw_decode = codecs.utf_8_decode
+ self.encoding = 'utf-8'
+ self.update(1)
+
+ NON_PRINTABLE = re.compile('[^\x09\x0A\x0D\x20-\x7E\x85\xA0-\uD7FF\uE000-\uFFFD]')
+ def check_printable(self, data):
+ match = self.NON_PRINTABLE.search(data)
+ if match:
+ character = match.group()
+ position = self.index+(len(self.buffer)-self.pointer)+match.start()
+ raise ReaderError(self.name, position, ord(character),
+ 'unicode', "special characters are not allowed")
+
+ def update(self, length):
+ if self.raw_buffer is None:
+ return
+ self.buffer = self.buffer[self.pointer:]
+ self.pointer = 0
+ while len(self.buffer) < length:
+ if not self.eof:
+ self.update_raw()
+ if self.raw_decode is not None:
+ try:
+ data, converted = self.raw_decode(self.raw_buffer,
+ 'strict', self.eof)
+ except UnicodeDecodeError as exc:
+ character = self.raw_buffer[exc.start]
+ if self.stream is not None:
+ position = self.stream_pointer-len(self.raw_buffer)+exc.start
+ else:
+ position = exc.start
+ raise ReaderError(self.name, position, character,
+ exc.encoding, exc.reason)
+ else:
+ data = self.raw_buffer
+ converted = len(data)
+ self.check_printable(data)
+ self.buffer += data
+ self.raw_buffer = self.raw_buffer[converted:]
+ if self.eof:
+ self.buffer += '\0'
+ self.raw_buffer = None
+ break
+
+ def update_raw(self, size=4096):
+ data = self.stream.read(size)
+ if self.raw_buffer is None:
+ self.raw_buffer = data
+ else:
+ self.raw_buffer += data
+ self.stream_pointer += len(data)
+ if not data:
+ self.eof = True
+
+#try:
+# import psyco
+# psyco.bind(Reader)
+#except ImportError:
+# pass
+
diff --git a/scripts/external_libs/pyyaml-3.11/python3/yaml/representer.py b/scripts/external_libs/pyyaml-3.11/python3/yaml/representer.py
new file mode 100644
index 00000000..67cd6fd2
--- /dev/null
+++ b/scripts/external_libs/pyyaml-3.11/python3/yaml/representer.py
@@ -0,0 +1,374 @@
+
+__all__ = ['BaseRepresenter', 'SafeRepresenter', 'Representer',
+ 'RepresenterError']
+
+from .error import *
+from .nodes import *
+
+import datetime, sys, copyreg, types, base64
+
+class RepresenterError(YAMLError):
+ pass
+
+class BaseRepresenter:
+
+ yaml_representers = {}
+ yaml_multi_representers = {}
+
+ def __init__(self, default_style=None, default_flow_style=None):
+ self.default_style = default_style
+ self.default_flow_style = default_flow_style
+ self.represented_objects = {}
+ self.object_keeper = []
+ self.alias_key = None
+
+ def represent(self, data):
+ node = self.represent_data(data)
+ self.serialize(node)
+ self.represented_objects = {}
+ self.object_keeper = []
+ self.alias_key = None
+
+ def represent_data(self, data):
+ if self.ignore_aliases(data):
+ self.alias_key = None
+ else:
+ self.alias_key = id(data)
+ if self.alias_key is not None:
+ if self.alias_key in self.represented_objects:
+ node = self.represented_objects[self.alias_key]
+ #if node is None:
+ # raise RepresenterError("recursive objects are not allowed: %r" % data)
+ return node
+ #self.represented_objects[alias_key] = None
+ self.object_keeper.append(data)
+ data_types = type(data).__mro__
+ if data_types[0] in self.yaml_representers:
+ node = self.yaml_representers[data_types[0]](self, data)
+ else:
+ for data_type in data_types:
+ if data_type in self.yaml_multi_representers:
+ node = self.yaml_multi_representers[data_type](self, data)
+ break
+ else:
+ if None in self.yaml_multi_representers:
+ node = self.yaml_multi_representers[None](self, data)
+ elif None in self.yaml_representers:
+ node = self.yaml_representers[None](self, data)
+ else:
+ node = ScalarNode(None, str(data))
+ #if alias_key is not None:
+ # self.represented_objects[alias_key] = node
+ return node
+
+ @classmethod
+ def add_representer(cls, data_type, representer):
+ if not 'yaml_representers' in cls.__dict__:
+ cls.yaml_representers = cls.yaml_representers.copy()
+ cls.yaml_representers[data_type] = representer
+
+ @classmethod
+ def add_multi_representer(cls, data_type, representer):
+ if not 'yaml_multi_representers' in cls.__dict__:
+ cls.yaml_multi_representers = cls.yaml_multi_representers.copy()
+ cls.yaml_multi_representers[data_type] = representer
+
+ def represent_scalar(self, tag, value, style=None):
+ if style is None:
+ style = self.default_style
+ node = ScalarNode(tag, value, style=style)
+ if self.alias_key is not None:
+ self.represented_objects[self.alias_key] = node
+ return node
+
+ def represent_sequence(self, tag, sequence, flow_style=None):
+ value = []
+ node = SequenceNode(tag, value, flow_style=flow_style)
+ if self.alias_key is not None:
+ self.represented_objects[self.alias_key] = node
+ best_style = True
+ for item in sequence:
+ node_item = self.represent_data(item)
+ if not (isinstance(node_item, ScalarNode) and not node_item.style):
+ best_style = False
+ value.append(node_item)
+ if flow_style is None:
+ if self.default_flow_style is not None:
+ node.flow_style = self.default_flow_style
+ else:
+ node.flow_style = best_style
+ return node
+
+ def represent_mapping(self, tag, mapping, flow_style=None):
+ value = []
+ node = MappingNode(tag, value, flow_style=flow_style)
+ if self.alias_key is not None:
+ self.represented_objects[self.alias_key] = node
+ best_style = True
+ if hasattr(mapping, 'items'):
+ mapping = list(mapping.items())
+ try:
+ mapping = sorted(mapping)
+ except TypeError:
+ pass
+ for item_key, item_value in mapping:
+ node_key = self.represent_data(item_key)
+ node_value = self.represent_data(item_value)
+ if not (isinstance(node_key, ScalarNode) and not node_key.style):
+ best_style = False
+ if not (isinstance(node_value, ScalarNode) and not node_value.style):
+ best_style = False
+ value.append((node_key, node_value))
+ if flow_style is None:
+ if self.default_flow_style is not None:
+ node.flow_style = self.default_flow_style
+ else:
+ node.flow_style = best_style
+ return node
+
+ def ignore_aliases(self, data):
+ return False
+
+class SafeRepresenter(BaseRepresenter):
+
+ def ignore_aliases(self, data):
+ if data in [None, ()]:
+ return True
+ if isinstance(data, (str, bytes, bool, int, float)):
+ return True
+
+ def represent_none(self, data):
+ return self.represent_scalar('tag:yaml.org,2002:null', 'null')
+
+ def represent_str(self, data):
+ return self.represent_scalar('tag:yaml.org,2002:str', data)
+
+ def represent_binary(self, data):
+ if hasattr(base64, 'encodebytes'):
+ data = base64.encodebytes(data).decode('ascii')
+ else:
+ data = base64.encodestring(data).decode('ascii')
+ return self.represent_scalar('tag:yaml.org,2002:binary', data, style='|')
+
+ def represent_bool(self, data):
+ if data:
+ value = 'true'
+ else:
+ value = 'false'
+ return self.represent_scalar('tag:yaml.org,2002:bool', value)
+
+ def represent_int(self, data):
+ return self.represent_scalar('tag:yaml.org,2002:int', str(data))
+
+ inf_value = 1e300
+ while repr(inf_value) != repr(inf_value*inf_value):
+ inf_value *= inf_value
+
+ def represent_float(self, data):
+ if data != data or (data == 0.0 and data == 1.0):
+ value = '.nan'
+ elif data == self.inf_value:
+ value = '.inf'
+ elif data == -self.inf_value:
+ value = '-.inf'
+ else:
+ value = repr(data).lower()
+ # Note that in some cases `repr(data)` represents a float number
+ # without the decimal parts. For instance:
+ # >>> repr(1e17)
+ # '1e17'
+ # Unfortunately, this is not a valid float representation according
+ # to the definition of the `!!float` tag. We fix this by adding
+ # '.0' before the 'e' symbol.
+ if '.' not in value and 'e' in value:
+ value = value.replace('e', '.0e', 1)
+ return self.represent_scalar('tag:yaml.org,2002:float', value)
+
+ def represent_list(self, data):
+ #pairs = (len(data) > 0 and isinstance(data, list))
+ #if pairs:
+ # for item in data:
+ # if not isinstance(item, tuple) or len(item) != 2:
+ # pairs = False
+ # break
+ #if not pairs:
+ return self.represent_sequence('tag:yaml.org,2002:seq', data)
+ #value = []
+ #for item_key, item_value in data:
+ # value.append(self.represent_mapping(u'tag:yaml.org,2002:map',
+ # [(item_key, item_value)]))
+ #return SequenceNode(u'tag:yaml.org,2002:pairs', value)
+
+ def represent_dict(self, data):
+ return self.represent_mapping('tag:yaml.org,2002:map', data)
+
+ def represent_set(self, data):
+ value = {}
+ for key in data:
+ value[key] = None
+ return self.represent_mapping('tag:yaml.org,2002:set', value)
+
+ def represent_date(self, data):
+ value = data.isoformat()
+ return self.represent_scalar('tag:yaml.org,2002:timestamp', value)
+
+ def represent_datetime(self, data):
+ value = data.isoformat(' ')
+ return self.represent_scalar('tag:yaml.org,2002:timestamp', value)
+
+ def represent_yaml_object(self, tag, data, cls, flow_style=None):
+ if hasattr(data, '__getstate__'):
+ state = data.__getstate__()
+ else:
+ state = data.__dict__.copy()
+ return self.represent_mapping(tag, state, flow_style=flow_style)
+
+ def represent_undefined(self, data):
+ raise RepresenterError("cannot represent an object: %s" % data)
+
+SafeRepresenter.add_representer(type(None),
+ SafeRepresenter.represent_none)
+
+SafeRepresenter.add_representer(str,
+ SafeRepresenter.represent_str)
+
+SafeRepresenter.add_representer(bytes,
+ SafeRepresenter.represent_binary)
+
+SafeRepresenter.add_representer(bool,
+ SafeRepresenter.represent_bool)
+
+SafeRepresenter.add_representer(int,
+ SafeRepresenter.represent_int)
+
+SafeRepresenter.add_representer(float,
+ SafeRepresenter.represent_float)
+
+SafeRepresenter.add_representer(list,
+ SafeRepresenter.represent_list)
+
+SafeRepresenter.add_representer(tuple,
+ SafeRepresenter.represent_list)
+
+SafeRepresenter.add_representer(dict,
+ SafeRepresenter.represent_dict)
+
+SafeRepresenter.add_representer(set,
+ SafeRepresenter.represent_set)
+
+SafeRepresenter.add_representer(datetime.date,
+ SafeRepresenter.represent_date)
+
+SafeRepresenter.add_representer(datetime.datetime,
+ SafeRepresenter.represent_datetime)
+
+SafeRepresenter.add_representer(None,
+ SafeRepresenter.represent_undefined)
+
+class Representer(SafeRepresenter):
+
+ def represent_complex(self, data):
+ if data.imag == 0.0:
+ data = '%r' % data.real
+ elif data.real == 0.0:
+ data = '%rj' % data.imag
+ elif data.imag > 0:
+ data = '%r+%rj' % (data.real, data.imag)
+ else:
+ data = '%r%rj' % (data.real, data.imag)
+ return self.represent_scalar('tag:yaml.org,2002:python/complex', data)
+
+ def represent_tuple(self, data):
+ return self.represent_sequence('tag:yaml.org,2002:python/tuple', data)
+
+ def represent_name(self, data):
+ name = '%s.%s' % (data.__module__, data.__name__)
+ return self.represent_scalar('tag:yaml.org,2002:python/name:'+name, '')
+
+ def represent_module(self, data):
+ return self.represent_scalar(
+ 'tag:yaml.org,2002:python/module:'+data.__name__, '')
+
+ def represent_object(self, data):
+ # We use __reduce__ API to save the data. data.__reduce__ returns
+ # a tuple of length 2-5:
+ # (function, args, state, listitems, dictitems)
+
+ # For reconstructing, we calls function(*args), then set its state,
+ # listitems, and dictitems if they are not None.
+
+ # A special case is when function.__name__ == '__newobj__'. In this
+ # case we create the object with args[0].__new__(*args).
+
+ # Another special case is when __reduce__ returns a string - we don't
+ # support it.
+
+ # We produce a !!python/object, !!python/object/new or
+ # !!python/object/apply node.
+
+ cls = type(data)
+ if cls in copyreg.dispatch_table:
+ reduce = copyreg.dispatch_table[cls](data)
+ elif hasattr(data, '__reduce_ex__'):
+ reduce = data.__reduce_ex__(2)
+ elif hasattr(data, '__reduce__'):
+ reduce = data.__reduce__()
+ else:
+ raise RepresenterError("cannot represent object: %r" % data)
+ reduce = (list(reduce)+[None]*5)[:5]
+ function, args, state, listitems, dictitems = reduce
+ args = list(args)
+ if state is None:
+ state = {}
+ if listitems is not None:
+ listitems = list(listitems)
+ if dictitems is not None:
+ dictitems = dict(dictitems)
+ if function.__name__ == '__newobj__':
+ function = args[0]
+ args = args[1:]
+ tag = 'tag:yaml.org,2002:python/object/new:'
+ newobj = True
+ else:
+ tag = 'tag:yaml.org,2002:python/object/apply:'
+ newobj = False
+ function_name = '%s.%s' % (function.__module__, function.__name__)
+ if not args and not listitems and not dictitems \
+ and isinstance(state, dict) and newobj:
+ return self.represent_mapping(
+ 'tag:yaml.org,2002:python/object:'+function_name, state)
+ if not listitems and not dictitems \
+ and isinstance(state, dict) and not state:
+ return self.represent_sequence(tag+function_name, args)
+ value = {}
+ if args:
+ value['args'] = args
+ if state or not isinstance(state, dict):
+ value['state'] = state
+ if listitems:
+ value['listitems'] = listitems
+ if dictitems:
+ value['dictitems'] = dictitems
+ return self.represent_mapping(tag+function_name, value)
+
+Representer.add_representer(complex,
+ Representer.represent_complex)
+
+Representer.add_representer(tuple,
+ Representer.represent_tuple)
+
+Representer.add_representer(type,
+ Representer.represent_name)
+
+Representer.add_representer(types.FunctionType,
+ Representer.represent_name)
+
+Representer.add_representer(types.BuiltinFunctionType,
+ Representer.represent_name)
+
+Representer.add_representer(types.ModuleType,
+ Representer.represent_module)
+
+Representer.add_multi_representer(object,
+ Representer.represent_object)
+
diff --git a/scripts/external_libs/pyyaml-3.11/python3/yaml/resolver.py b/scripts/external_libs/pyyaml-3.11/python3/yaml/resolver.py
new file mode 100644
index 00000000..0eece258
--- /dev/null
+++ b/scripts/external_libs/pyyaml-3.11/python3/yaml/resolver.py
@@ -0,0 +1,224 @@
+
+__all__ = ['BaseResolver', 'Resolver']
+
+from .error import *
+from .nodes import *
+
+import re
+
+class ResolverError(YAMLError):
+ pass
+
+class BaseResolver:
+
+ DEFAULT_SCALAR_TAG = 'tag:yaml.org,2002:str'
+ DEFAULT_SEQUENCE_TAG = 'tag:yaml.org,2002:seq'
+ DEFAULT_MAPPING_TAG = 'tag:yaml.org,2002:map'
+
+ yaml_implicit_resolvers = {}
+ yaml_path_resolvers = {}
+
+ def __init__(self):
+ self.resolver_exact_paths = []
+ self.resolver_prefix_paths = []
+
+ @classmethod
+ def add_implicit_resolver(cls, tag, regexp, first):
+ if not 'yaml_implicit_resolvers' in cls.__dict__:
+ cls.yaml_implicit_resolvers = cls.yaml_implicit_resolvers.copy()
+ if first is None:
+ first = [None]
+ for ch in first:
+ cls.yaml_implicit_resolvers.setdefault(ch, []).append((tag, regexp))
+
+ @classmethod
+ def add_path_resolver(cls, tag, path, kind=None):
+ # Note: `add_path_resolver` is experimental. The API could be changed.
+ # `new_path` is a pattern that is matched against the path from the
+ # root to the node that is being considered. `node_path` elements are
+ # tuples `(node_check, index_check)`. `node_check` is a node class:
+ # `ScalarNode`, `SequenceNode`, `MappingNode` or `None`. `None`
+ # matches any kind of a node. `index_check` could be `None`, a boolean
+ # value, a string value, or a number. `None` and `False` match against
+ # any _value_ of sequence and mapping nodes. `True` matches against
+ # any _key_ of a mapping node. A string `index_check` matches against
+ # a mapping value that corresponds to a scalar key which content is
+ # equal to the `index_check` value. An integer `index_check` matches
+ # against a sequence value with the index equal to `index_check`.
+ if not 'yaml_path_resolvers' in cls.__dict__:
+ cls.yaml_path_resolvers = cls.yaml_path_resolvers.copy()
+ new_path = []
+ for element in path:
+ if isinstance(element, (list, tuple)):
+ if len(element) == 2:
+ node_check, index_check = element
+ elif len(element) == 1:
+ node_check = element[0]
+ index_check = True
+ else:
+ raise ResolverError("Invalid path element: %s" % element)
+ else:
+ node_check = None
+ index_check = element
+ if node_check is str:
+ node_check = ScalarNode
+ elif node_check is list:
+ node_check = SequenceNode
+ elif node_check is dict:
+ node_check = MappingNode
+ elif node_check not in [ScalarNode, SequenceNode, MappingNode] \
+ and not isinstance(node_check, str) \
+ and node_check is not None:
+ raise ResolverError("Invalid node checker: %s" % node_check)
+ if not isinstance(index_check, (str, int)) \
+ and index_check is not None:
+ raise ResolverError("Invalid index checker: %s" % index_check)
+ new_path.append((node_check, index_check))
+ if kind is str:
+ kind = ScalarNode
+ elif kind is list:
+ kind = SequenceNode
+ elif kind is dict:
+ kind = MappingNode
+ elif kind not in [ScalarNode, SequenceNode, MappingNode] \
+ and kind is not None:
+ raise ResolverError("Invalid node kind: %s" % kind)
+ cls.yaml_path_resolvers[tuple(new_path), kind] = tag
+
+ def descend_resolver(self, current_node, current_index):
+ if not self.yaml_path_resolvers:
+ return
+ exact_paths = {}
+ prefix_paths = []
+ if current_node:
+ depth = len(self.resolver_prefix_paths)
+ for path, kind in self.resolver_prefix_paths[-1]:
+ if self.check_resolver_prefix(depth, path, kind,
+ current_node, current_index):
+ if len(path) > depth:
+ prefix_paths.append((path, kind))
+ else:
+ exact_paths[kind] = self.yaml_path_resolvers[path, kind]
+ else:
+ for path, kind in self.yaml_path_resolvers:
+ if not path:
+ exact_paths[kind] = self.yaml_path_resolvers[path, kind]
+ else:
+ prefix_paths.append((path, kind))
+ self.resolver_exact_paths.append(exact_paths)
+ self.resolver_prefix_paths.append(prefix_paths)
+
+ def ascend_resolver(self):
+ if not self.yaml_path_resolvers:
+ return
+ self.resolver_exact_paths.pop()
+ self.resolver_prefix_paths.pop()
+
+ def check_resolver_prefix(self, depth, path, kind,
+ current_node, current_index):
+ node_check, index_check = path[depth-1]
+ if isinstance(node_check, str):
+ if current_node.tag != node_check:
+ return
+ elif node_check is not None:
+ if not isinstance(current_node, node_check):
+ return
+ if index_check is True and current_index is not None:
+ return
+ if (index_check is False or index_check is None) \
+ and current_index is None:
+ return
+ if isinstance(index_check, str):
+ if not (isinstance(current_index, ScalarNode)
+ and index_check == current_index.value):
+ return
+ elif isinstance(index_check, int) and not isinstance(index_check, bool):
+ if index_check != current_index:
+ return
+ return True
+
+ def resolve(self, kind, value, implicit):
+ if kind is ScalarNode and implicit[0]:
+ if value == '':
+ resolvers = self.yaml_implicit_resolvers.get('', [])
+ else:
+ resolvers = self.yaml_implicit_resolvers.get(value[0], [])
+ resolvers += self.yaml_implicit_resolvers.get(None, [])
+ for tag, regexp in resolvers:
+ if regexp.match(value):
+ return tag
+ implicit = implicit[1]
+ if self.yaml_path_resolvers:
+ exact_paths = self.resolver_exact_paths[-1]
+ if kind in exact_paths:
+ return exact_paths[kind]
+ if None in exact_paths:
+ return exact_paths[None]
+ if kind is ScalarNode:
+ return self.DEFAULT_SCALAR_TAG
+ elif kind is SequenceNode:
+ return self.DEFAULT_SEQUENCE_TAG
+ elif kind is MappingNode:
+ return self.DEFAULT_MAPPING_TAG
+
+class Resolver(BaseResolver):
+ pass
+
+Resolver.add_implicit_resolver(
+ 'tag:yaml.org,2002:bool',
+ re.compile(r'''^(?:yes|Yes|YES|no|No|NO
+ |true|True|TRUE|false|False|FALSE
+ |on|On|ON|off|Off|OFF)$''', re.X),
+ list('yYnNtTfFoO'))
+
+Resolver.add_implicit_resolver(
+ 'tag:yaml.org,2002:float',
+ re.compile(r'''^(?:[-+]?(?:[0-9][0-9_]*)\.[0-9_]*(?:[eE][-+][0-9]+)?
+ |\.[0-9_]+(?:[eE][-+][0-9]+)?
+ |[-+]?[0-9][0-9_]*(?::[0-5]?[0-9])+\.[0-9_]*
+ |[-+]?\.(?:inf|Inf|INF)
+ |\.(?:nan|NaN|NAN))$''', re.X),
+ list('-+0123456789.'))
+
+Resolver.add_implicit_resolver(
+ 'tag:yaml.org,2002:int',
+ re.compile(r'''^(?:[-+]?0b[0-1_]+
+ |[-+]?0[0-7_]+
+ |[-+]?(?:0|[1-9][0-9_]*)
+ |[-+]?0x[0-9a-fA-F_]+
+ |[-+]?[1-9][0-9_]*(?::[0-5]?[0-9])+)$''', re.X),
+ list('-+0123456789'))
+
+Resolver.add_implicit_resolver(
+ 'tag:yaml.org,2002:merge',
+ re.compile(r'^(?:<<)$'),
+ ['<'])
+
+Resolver.add_implicit_resolver(
+ 'tag:yaml.org,2002:null',
+ re.compile(r'''^(?: ~
+ |null|Null|NULL
+ | )$''', re.X),
+ ['~', 'n', 'N', ''])
+
+Resolver.add_implicit_resolver(
+ 'tag:yaml.org,2002:timestamp',
+ re.compile(r'''^(?:[0-9][0-9][0-9][0-9]-[0-9][0-9]-[0-9][0-9]
+ |[0-9][0-9][0-9][0-9] -[0-9][0-9]? -[0-9][0-9]?
+ (?:[Tt]|[ \t]+)[0-9][0-9]?
+ :[0-9][0-9] :[0-9][0-9] (?:\.[0-9]*)?
+ (?:[ \t]*(?:Z|[-+][0-9][0-9]?(?::[0-9][0-9])?))?)$''', re.X),
+ list('0123456789'))
+
+Resolver.add_implicit_resolver(
+ 'tag:yaml.org,2002:value',
+ re.compile(r'^(?:=)$'),
+ ['='])
+
+# The following resolver is only for documentation purposes. It cannot work
+# because plain scalars cannot start with '!', '&', or '*'.
+Resolver.add_implicit_resolver(
+ 'tag:yaml.org,2002:yaml',
+ re.compile(r'^(?:!|&|\*)$'),
+ list('!&*'))
+
diff --git a/scripts/external_libs/pyyaml-3.11/python3/yaml/scanner.py b/scripts/external_libs/pyyaml-3.11/python3/yaml/scanner.py
new file mode 100644
index 00000000..494d975b
--- /dev/null
+++ b/scripts/external_libs/pyyaml-3.11/python3/yaml/scanner.py
@@ -0,0 +1,1448 @@
+
+# Scanner produces tokens of the following types:
+# STREAM-START
+# STREAM-END
+# DIRECTIVE(name, value)
+# DOCUMENT-START
+# DOCUMENT-END
+# BLOCK-SEQUENCE-START
+# BLOCK-MAPPING-START
+# BLOCK-END
+# FLOW-SEQUENCE-START
+# FLOW-MAPPING-START
+# FLOW-SEQUENCE-END
+# FLOW-MAPPING-END
+# BLOCK-ENTRY
+# FLOW-ENTRY
+# KEY
+# VALUE
+# ALIAS(value)
+# ANCHOR(value)
+# TAG(value)
+# SCALAR(value, plain, style)
+#
+# Read comments in the Scanner code for more details.
+#
+
+__all__ = ['Scanner', 'ScannerError']
+
+from .error import MarkedYAMLError
+from .tokens import *
+
+class ScannerError(MarkedYAMLError):
+ pass
+
+class SimpleKey:
+ # See below simple keys treatment.
+
+ def __init__(self, token_number, required, index, line, column, mark):
+ self.token_number = token_number
+ self.required = required
+ self.index = index
+ self.line = line
+ self.column = column
+ self.mark = mark
+
+class Scanner:
+
+ def __init__(self):
+ """Initialize the scanner."""
+ # It is assumed that Scanner and Reader will have a common descendant.
+ # Reader do the dirty work of checking for BOM and converting the
+ # input data to Unicode. It also adds NUL to the end.
+ #
+ # Reader supports the following methods
+ # self.peek(i=0) # peek the next i-th character
+ # self.prefix(l=1) # peek the next l characters
+ # self.forward(l=1) # read the next l characters and move the pointer.
+
+ # Had we reached the end of the stream?
+ self.done = False
+
+ # The number of unclosed '{' and '['. `flow_level == 0` means block
+ # context.
+ self.flow_level = 0
+
+ # List of processed tokens that are not yet emitted.
+ self.tokens = []
+
+ # Add the STREAM-START token.
+ self.fetch_stream_start()
+
+ # Number of tokens that were emitted through the `get_token` method.
+ self.tokens_taken = 0
+
+ # The current indentation level.
+ self.indent = -1
+
+ # Past indentation levels.
+ self.indents = []
+
+ # Variables related to simple keys treatment.
+
+ # A simple key is a key that is not denoted by the '?' indicator.
+ # Example of simple keys:
+ # ---
+ # block simple key: value
+ # ? not a simple key:
+ # : { flow simple key: value }
+ # We emit the KEY token before all keys, so when we find a potential
+ # simple key, we try to locate the corresponding ':' indicator.
+ # Simple keys should be limited to a single line and 1024 characters.
+
+ # Can a simple key start at the current position? A simple key may
+ # start:
+ # - at the beginning of the line, not counting indentation spaces
+ # (in block context),
+ # - after '{', '[', ',' (in the flow context),
+ # - after '?', ':', '-' (in the block context).
+ # In the block context, this flag also signifies if a block collection
+ # may start at the current position.
+ self.allow_simple_key = True
+
+ # Keep track of possible simple keys. This is a dictionary. The key
+ # is `flow_level`; there can be no more that one possible simple key
+ # for each level. The value is a SimpleKey record:
+ # (token_number, required, index, line, column, mark)
+ # A simple key may start with ALIAS, ANCHOR, TAG, SCALAR(flow),
+ # '[', or '{' tokens.
+ self.possible_simple_keys = {}
+
+ # Public methods.
+
+ def check_token(self, *choices):
+ # Check if the next token is one of the given types.
+ while self.need_more_tokens():
+ self.fetch_more_tokens()
+ if self.tokens:
+ if not choices:
+ return True
+ for choice in choices:
+ if isinstance(self.tokens[0], choice):
+ return True
+ return False
+
+ def peek_token(self):
+ # Return the next token, but do not delete if from the queue.
+ while self.need_more_tokens():
+ self.fetch_more_tokens()
+ if self.tokens:
+ return self.tokens[0]
+
+ def get_token(self):
+ # Return the next token.
+ while self.need_more_tokens():
+ self.fetch_more_tokens()
+ if self.tokens:
+ self.tokens_taken += 1
+ return self.tokens.pop(0)
+
+ # Private methods.
+
+ def need_more_tokens(self):
+ if self.done:
+ return False
+ if not self.tokens:
+ return True
+ # The current token may be a potential simple key, so we
+ # need to look further.
+ self.stale_possible_simple_keys()
+ if self.next_possible_simple_key() == self.tokens_taken:
+ return True
+
+ def fetch_more_tokens(self):
+
+ # Eat whitespaces and comments until we reach the next token.
+ self.scan_to_next_token()
+
+ # Remove obsolete possible simple keys.
+ self.stale_possible_simple_keys()
+
+ # Compare the current indentation and column. It may add some tokens
+ # and decrease the current indentation level.
+ self.unwind_indent(self.column)
+
+ # Peek the next character.
+ ch = self.peek()
+
+ # Is it the end of stream?
+ if ch == '\0':
+ return self.fetch_stream_end()
+
+ # Is it a directive?
+ if ch == '%' and self.check_directive():
+ return self.fetch_directive()
+
+ # Is it the document start?
+ if ch == '-' and self.check_document_start():
+ return self.fetch_document_start()
+
+ # Is it the document end?
+ if ch == '.' and self.check_document_end():
+ return self.fetch_document_end()
+
+ # TODO: support for BOM within a stream.
+ #if ch == '\uFEFF':
+ # return self.fetch_bom() <-- issue BOMToken
+
+ # Note: the order of the following checks is NOT significant.
+
+ # Is it the flow sequence start indicator?
+ if ch == '[':
+ return self.fetch_flow_sequence_start()
+
+ # Is it the flow mapping start indicator?
+ if ch == '{':
+ return self.fetch_flow_mapping_start()
+
+ # Is it the flow sequence end indicator?
+ if ch == ']':
+ return self.fetch_flow_sequence_end()
+
+ # Is it the flow mapping end indicator?
+ if ch == '}':
+ return self.fetch_flow_mapping_end()
+
+ # Is it the flow entry indicator?
+ if ch == ',':
+ return self.fetch_flow_entry()
+
+ # Is it the block entry indicator?
+ if ch == '-' and self.check_block_entry():
+ return self.fetch_block_entry()
+
+ # Is it the key indicator?
+ if ch == '?' and self.check_key():
+ return self.fetch_key()
+
+ # Is it the value indicator?
+ if ch == ':' and self.check_value():
+ return self.fetch_value()
+
+ # Is it an alias?
+ if ch == '*':
+ return self.fetch_alias()
+
+ # Is it an anchor?
+ if ch == '&':
+ return self.fetch_anchor()
+
+ # Is it a tag?
+ if ch == '!':
+ return self.fetch_tag()
+
+ # Is it a literal scalar?
+ if ch == '|' and not self.flow_level:
+ return self.fetch_literal()
+
+ # Is it a folded scalar?
+ if ch == '>' and not self.flow_level:
+ return self.fetch_folded()
+
+ # Is it a single quoted scalar?
+ if ch == '\'':
+ return self.fetch_single()
+
+ # Is it a double quoted scalar?
+ if ch == '\"':
+ return self.fetch_double()
+
+ # It must be a plain scalar then.
+ if self.check_plain():
+ return self.fetch_plain()
+
+ # No? It's an error. Let's produce a nice error message.
+ raise ScannerError("while scanning for the next token", None,
+ "found character %r that cannot start any token" % ch,
+ self.get_mark())
+
+ # Simple keys treatment.
+
+ def next_possible_simple_key(self):
+ # Return the number of the nearest possible simple key. Actually we
+ # don't need to loop through the whole dictionary. We may replace it
+ # with the following code:
+ # if not self.possible_simple_keys:
+ # return None
+ # return self.possible_simple_keys[
+ # min(self.possible_simple_keys.keys())].token_number
+ min_token_number = None
+ for level in self.possible_simple_keys:
+ key = self.possible_simple_keys[level]
+ if min_token_number is None or key.token_number < min_token_number:
+ min_token_number = key.token_number
+ return min_token_number
+
+ def stale_possible_simple_keys(self):
+ # Remove entries that are no longer possible simple keys. According to
+ # the YAML specification, simple keys
+ # - should be limited to a single line,
+ # - should be no longer than 1024 characters.
+ # Disabling this procedure will allow simple keys of any length and
+ # height (may cause problems if indentation is broken though).
+ for level in list(self.possible_simple_keys):
+ key = self.possible_simple_keys[level]
+ if key.line != self.line \
+ or self.index-key.index > 1024:
+ if key.required:
+ raise ScannerError("while scanning a simple key", key.mark,
+ "could not found expected ':'", self.get_mark())
+ del self.possible_simple_keys[level]
+
+ def save_possible_simple_key(self):
+ # The next token may start a simple key. We check if it's possible
+ # and save its position. This function is called for
+ # ALIAS, ANCHOR, TAG, SCALAR(flow), '[', and '{'.
+
+ # Check if a simple key is required at the current position.
+ required = not self.flow_level and self.indent == self.column
+
+ # A simple key is required only if it is the first token in the current
+ # line. Therefore it is always allowed.
+ assert self.allow_simple_key or not required
+
+ # The next token might be a simple key. Let's save it's number and
+ # position.
+ if self.allow_simple_key:
+ self.remove_possible_simple_key()
+ token_number = self.tokens_taken+len(self.tokens)
+ key = SimpleKey(token_number, required,
+ self.index, self.line, self.column, self.get_mark())
+ self.possible_simple_keys[self.flow_level] = key
+
+ def remove_possible_simple_key(self):
+ # Remove the saved possible key position at the current flow level.
+ if self.flow_level in self.possible_simple_keys:
+ key = self.possible_simple_keys[self.flow_level]
+
+ if key.required:
+ raise ScannerError("while scanning a simple key", key.mark,
+ "could not found expected ':'", self.get_mark())
+
+ del self.possible_simple_keys[self.flow_level]
+
+ # Indentation functions.
+
+ def unwind_indent(self, column):
+
+ ## In flow context, tokens should respect indentation.
+ ## Actually the condition should be `self.indent >= column` according to
+ ## the spec. But this condition will prohibit intuitively correct
+ ## constructions such as
+ ## key : {
+ ## }
+ #if self.flow_level and self.indent > column:
+ # raise ScannerError(None, None,
+ # "invalid intendation or unclosed '[' or '{'",
+ # self.get_mark())
+
+ # In the flow context, indentation is ignored. We make the scanner less
+ # restrictive then specification requires.
+ if self.flow_level:
+ return
+
+ # In block context, we may need to issue the BLOCK-END tokens.
+ while self.indent > column:
+ mark = self.get_mark()
+ self.indent = self.indents.pop()
+ self.tokens.append(BlockEndToken(mark, mark))
+
+ def add_indent(self, column):
+ # Check if we need to increase indentation.
+ if self.indent < column:
+ self.indents.append(self.indent)
+ self.indent = column
+ return True
+ return False
+
+ # Fetchers.
+
+ def fetch_stream_start(self):
+ # We always add STREAM-START as the first token and STREAM-END as the
+ # last token.
+
+ # Read the token.
+ mark = self.get_mark()
+
+ # Add STREAM-START.
+ self.tokens.append(StreamStartToken(mark, mark,
+ encoding=self.encoding))
+
+
+ def fetch_stream_end(self):
+
+ # Set the current intendation to -1.
+ self.unwind_indent(-1)
+
+ # Reset simple keys.
+ self.remove_possible_simple_key()
+ self.allow_simple_key = False
+ self.possible_simple_keys = {}
+
+ # Read the token.
+ mark = self.get_mark()
+
+ # Add STREAM-END.
+ self.tokens.append(StreamEndToken(mark, mark))
+
+ # The steam is finished.
+ self.done = True
+
+ def fetch_directive(self):
+
+ # Set the current intendation to -1.
+ self.unwind_indent(-1)
+
+ # Reset simple keys.
+ self.remove_possible_simple_key()
+ self.allow_simple_key = False
+
+ # Scan and add DIRECTIVE.
+ self.tokens.append(self.scan_directive())
+
+ def fetch_document_start(self):
+ self.fetch_document_indicator(DocumentStartToken)
+
+ def fetch_document_end(self):
+ self.fetch_document_indicator(DocumentEndToken)
+
+ def fetch_document_indicator(self, TokenClass):
+
+ # Set the current intendation to -1.
+ self.unwind_indent(-1)
+
+ # Reset simple keys. Note that there could not be a block collection
+ # after '---'.
+ self.remove_possible_simple_key()
+ self.allow_simple_key = False
+
+ # Add DOCUMENT-START or DOCUMENT-END.
+ start_mark = self.get_mark()
+ self.forward(3)
+ end_mark = self.get_mark()
+ self.tokens.append(TokenClass(start_mark, end_mark))
+
+ def fetch_flow_sequence_start(self):
+ self.fetch_flow_collection_start(FlowSequenceStartToken)
+
+ def fetch_flow_mapping_start(self):
+ self.fetch_flow_collection_start(FlowMappingStartToken)
+
+ def fetch_flow_collection_start(self, TokenClass):
+
+ # '[' and '{' may start a simple key.
+ self.save_possible_simple_key()
+
+ # Increase the flow level.
+ self.flow_level += 1
+
+ # Simple keys are allowed after '[' and '{'.
+ self.allow_simple_key = True
+
+ # Add FLOW-SEQUENCE-START or FLOW-MAPPING-START.
+ start_mark = self.get_mark()
+ self.forward()
+ end_mark = self.get_mark()
+ self.tokens.append(TokenClass(start_mark, end_mark))
+
+ def fetch_flow_sequence_end(self):
+ self.fetch_flow_collection_end(FlowSequenceEndToken)
+
+ def fetch_flow_mapping_end(self):
+ self.fetch_flow_collection_end(FlowMappingEndToken)
+
+ def fetch_flow_collection_end(self, TokenClass):
+
+ # Reset possible simple key on the current level.
+ self.remove_possible_simple_key()
+
+ # Decrease the flow level.
+ self.flow_level -= 1
+
+ # No simple keys after ']' or '}'.
+ self.allow_simple_key = False
+
+ # Add FLOW-SEQUENCE-END or FLOW-MAPPING-END.
+ start_mark = self.get_mark()
+ self.forward()
+ end_mark = self.get_mark()
+ self.tokens.append(TokenClass(start_mark, end_mark))
+
+ def fetch_flow_entry(self):
+
+ # Simple keys are allowed after ','.
+ self.allow_simple_key = True
+
+ # Reset possible simple key on the current level.
+ self.remove_possible_simple_key()
+
+ # Add FLOW-ENTRY.
+ start_mark = self.get_mark()
+ self.forward()
+ end_mark = self.get_mark()
+ self.tokens.append(FlowEntryToken(start_mark, end_mark))
+
+ def fetch_block_entry(self):
+
+ # Block context needs additional checks.
+ if not self.flow_level:
+
+ # Are we allowed to start a new entry?
+ if not self.allow_simple_key:
+ raise ScannerError(None, None,
+ "sequence entries are not allowed here",
+ self.get_mark())
+
+ # We may need to add BLOCK-SEQUENCE-START.
+ if self.add_indent(self.column):
+ mark = self.get_mark()
+ self.tokens.append(BlockSequenceStartToken(mark, mark))
+
+ # It's an error for the block entry to occur in the flow context,
+ # but we let the parser detect this.
+ else:
+ pass
+
+ # Simple keys are allowed after '-'.
+ self.allow_simple_key = True
+
+ # Reset possible simple key on the current level.
+ self.remove_possible_simple_key()
+
+ # Add BLOCK-ENTRY.
+ start_mark = self.get_mark()
+ self.forward()
+ end_mark = self.get_mark()
+ self.tokens.append(BlockEntryToken(start_mark, end_mark))
+
+ def fetch_key(self):
+
+ # Block context needs additional checks.
+ if not self.flow_level:
+
+ # Are we allowed to start a key (not nessesary a simple)?
+ if not self.allow_simple_key:
+ raise ScannerError(None, None,
+ "mapping keys are not allowed here",
+ self.get_mark())
+
+ # We may need to add BLOCK-MAPPING-START.
+ if self.add_indent(self.column):
+ mark = self.get_mark()
+ self.tokens.append(BlockMappingStartToken(mark, mark))
+
+ # Simple keys are allowed after '?' in the block context.
+ self.allow_simple_key = not self.flow_level
+
+ # Reset possible simple key on the current level.
+ self.remove_possible_simple_key()
+
+ # Add KEY.
+ start_mark = self.get_mark()
+ self.forward()
+ end_mark = self.get_mark()
+ self.tokens.append(KeyToken(start_mark, end_mark))
+
+ def fetch_value(self):
+
+ # Do we determine a simple key?
+ if self.flow_level in self.possible_simple_keys:
+
+ # Add KEY.
+ key = self.possible_simple_keys[self.flow_level]
+ del self.possible_simple_keys[self.flow_level]
+ self.tokens.insert(key.token_number-self.tokens_taken,
+ KeyToken(key.mark, key.mark))
+
+ # If this key starts a new block mapping, we need to add
+ # BLOCK-MAPPING-START.
+ if not self.flow_level:
+ if self.add_indent(key.column):
+ self.tokens.insert(key.token_number-self.tokens_taken,
+ BlockMappingStartToken(key.mark, key.mark))
+
+ # There cannot be two simple keys one after another.
+ self.allow_simple_key = False
+
+ # It must be a part of a complex key.
+ else:
+
+ # Block context needs additional checks.
+ # (Do we really need them? They will be catched by the parser
+ # anyway.)
+ if not self.flow_level:
+
+ # We are allowed to start a complex value if and only if
+ # we can start a simple key.
+ if not self.allow_simple_key:
+ raise ScannerError(None, None,
+ "mapping values are not allowed here",
+ self.get_mark())
+
+ # If this value starts a new block mapping, we need to add
+ # BLOCK-MAPPING-START. It will be detected as an error later by
+ # the parser.
+ if not self.flow_level:
+ if self.add_indent(self.column):
+ mark = self.get_mark()
+ self.tokens.append(BlockMappingStartToken(mark, mark))
+
+ # Simple keys are allowed after ':' in the block context.
+ self.allow_simple_key = not self.flow_level
+
+ # Reset possible simple key on the current level.
+ self.remove_possible_simple_key()
+
+ # Add VALUE.
+ start_mark = self.get_mark()
+ self.forward()
+ end_mark = self.get_mark()
+ self.tokens.append(ValueToken(start_mark, end_mark))
+
+ def fetch_alias(self):
+
+ # ALIAS could be a simple key.
+ self.save_possible_simple_key()
+
+ # No simple keys after ALIAS.
+ self.allow_simple_key = False
+
+ # Scan and add ALIAS.
+ self.tokens.append(self.scan_anchor(AliasToken))
+
+ def fetch_anchor(self):
+
+ # ANCHOR could start a simple key.
+ self.save_possible_simple_key()
+
+ # No simple keys after ANCHOR.
+ self.allow_simple_key = False
+
+ # Scan and add ANCHOR.
+ self.tokens.append(self.scan_anchor(AnchorToken))
+
+ def fetch_tag(self):
+
+ # TAG could start a simple key.
+ self.save_possible_simple_key()
+
+ # No simple keys after TAG.
+ self.allow_simple_key = False
+
+ # Scan and add TAG.
+ self.tokens.append(self.scan_tag())
+
+ def fetch_literal(self):
+ self.fetch_block_scalar(style='|')
+
+ def fetch_folded(self):
+ self.fetch_block_scalar(style='>')
+
+ def fetch_block_scalar(self, style):
+
+ # A simple key may follow a block scalar.
+ self.allow_simple_key = True
+
+ # Reset possible simple key on the current level.
+ self.remove_possible_simple_key()
+
+ # Scan and add SCALAR.
+ self.tokens.append(self.scan_block_scalar(style))
+
+ def fetch_single(self):
+ self.fetch_flow_scalar(style='\'')
+
+ def fetch_double(self):
+ self.fetch_flow_scalar(style='"')
+
+ def fetch_flow_scalar(self, style):
+
+ # A flow scalar could be a simple key.
+ self.save_possible_simple_key()
+
+ # No simple keys after flow scalars.
+ self.allow_simple_key = False
+
+ # Scan and add SCALAR.
+ self.tokens.append(self.scan_flow_scalar(style))
+
+ def fetch_plain(self):
+
+ # A plain scalar could be a simple key.
+ self.save_possible_simple_key()
+
+ # No simple keys after plain scalars. But note that `scan_plain` will
+ # change this flag if the scan is finished at the beginning of the
+ # line.
+ self.allow_simple_key = False
+
+ # Scan and add SCALAR. May change `allow_simple_key`.
+ self.tokens.append(self.scan_plain())
+
+ # Checkers.
+
+ def check_directive(self):
+
+ # DIRECTIVE: ^ '%' ...
+ # The '%' indicator is already checked.
+ if self.column == 0:
+ return True
+
+ def check_document_start(self):
+
+ # DOCUMENT-START: ^ '---' (' '|'\n')
+ if self.column == 0:
+ if self.prefix(3) == '---' \
+ and self.peek(3) in '\0 \t\r\n\x85\u2028\u2029':
+ return True
+
+ def check_document_end(self):
+
+ # DOCUMENT-END: ^ '...' (' '|'\n')
+ if self.column == 0:
+ if self.prefix(3) == '...' \
+ and self.peek(3) in '\0 \t\r\n\x85\u2028\u2029':
+ return True
+
+ def check_block_entry(self):
+
+ # BLOCK-ENTRY: '-' (' '|'\n')
+ return self.peek(1) in '\0 \t\r\n\x85\u2028\u2029'
+
+ def check_key(self):
+
+ # KEY(flow context): '?'
+ if self.flow_level:
+ return True
+
+ # KEY(block context): '?' (' '|'\n')
+ else:
+ return self.peek(1) in '\0 \t\r\n\x85\u2028\u2029'
+
+ def check_value(self):
+
+ # VALUE(flow context): ':'
+ if self.flow_level:
+ return True
+
+ # VALUE(block context): ':' (' '|'\n')
+ else:
+ return self.peek(1) in '\0 \t\r\n\x85\u2028\u2029'
+
+ def check_plain(self):
+
+ # A plain scalar may start with any non-space character except:
+ # '-', '?', ':', ',', '[', ']', '{', '}',
+ # '#', '&', '*', '!', '|', '>', '\'', '\"',
+ # '%', '@', '`'.
+ #
+ # It may also start with
+ # '-', '?', ':'
+ # if it is followed by a non-space character.
+ #
+ # Note that we limit the last rule to the block context (except the
+ # '-' character) because we want the flow context to be space
+ # independent.
+ ch = self.peek()
+ return ch not in '\0 \t\r\n\x85\u2028\u2029-?:,[]{}#&*!|>\'\"%@`' \
+ or (self.peek(1) not in '\0 \t\r\n\x85\u2028\u2029'
+ and (ch == '-' or (not self.flow_level and ch in '?:')))
+
+ # Scanners.
+
+ def scan_to_next_token(self):
+ # We ignore spaces, line breaks and comments.
+ # If we find a line break in the block context, we set the flag
+ # `allow_simple_key` on.
+ # The byte order mark is stripped if it's the first character in the
+ # stream. We do not yet support BOM inside the stream as the
+ # specification requires. Any such mark will be considered as a part
+ # of the document.
+ #
+ # TODO: We need to make tab handling rules more sane. A good rule is
+ # Tabs cannot precede tokens
+ # BLOCK-SEQUENCE-START, BLOCK-MAPPING-START, BLOCK-END,
+ # KEY(block), VALUE(block), BLOCK-ENTRY
+ # So the checking code is
+ # if <TAB>:
+ # self.allow_simple_keys = False
+ # We also need to add the check for `allow_simple_keys == True` to
+ # `unwind_indent` before issuing BLOCK-END.
+ # Scanners for block, flow, and plain scalars need to be modified.
+
+ if self.index == 0 and self.peek() == '\uFEFF':
+ self.forward()
+ found = False
+ while not found:
+ while self.peek() == ' ':
+ self.forward()
+ if self.peek() == '#':
+ while self.peek() not in '\0\r\n\x85\u2028\u2029':
+ self.forward()
+ if self.scan_line_break():
+ if not self.flow_level:
+ self.allow_simple_key = True
+ else:
+ found = True
+
+ def scan_directive(self):
+ # See the specification for details.
+ start_mark = self.get_mark()
+ self.forward()
+ name = self.scan_directive_name(start_mark)
+ value = None
+ if name == 'YAML':
+ value = self.scan_yaml_directive_value(start_mark)
+ end_mark = self.get_mark()
+ elif name == 'TAG':
+ value = self.scan_tag_directive_value(start_mark)
+ end_mark = self.get_mark()
+ else:
+ end_mark = self.get_mark()
+ while self.peek() not in '\0\r\n\x85\u2028\u2029':
+ self.forward()
+ self.scan_directive_ignored_line(start_mark)
+ return DirectiveToken(name, value, start_mark, end_mark)
+
+ def scan_directive_name(self, start_mark):
+ # See the specification for details.
+ length = 0
+ ch = self.peek(length)
+ while '0' <= ch <= '9' or 'A' <= ch <= 'Z' or 'a' <= ch <= 'z' \
+ or ch in '-_':
+ length += 1
+ ch = self.peek(length)
+ if not length:
+ raise ScannerError("while scanning a directive", start_mark,
+ "expected alphabetic or numeric character, but found %r"
+ % ch, self.get_mark())
+ value = self.prefix(length)
+ self.forward(length)
+ ch = self.peek()
+ if ch not in '\0 \r\n\x85\u2028\u2029':
+ raise ScannerError("while scanning a directive", start_mark,
+ "expected alphabetic or numeric character, but found %r"
+ % ch, self.get_mark())
+ return value
+
+ def scan_yaml_directive_value(self, start_mark):
+ # See the specification for details.
+ while self.peek() == ' ':
+ self.forward()
+ major = self.scan_yaml_directive_number(start_mark)
+ if self.peek() != '.':
+ raise ScannerError("while scanning a directive", start_mark,
+ "expected a digit or '.', but found %r" % self.peek(),
+ self.get_mark())
+ self.forward()
+ minor = self.scan_yaml_directive_number(start_mark)
+ if self.peek() not in '\0 \r\n\x85\u2028\u2029':
+ raise ScannerError("while scanning a directive", start_mark,
+ "expected a digit or ' ', but found %r" % self.peek(),
+ self.get_mark())
+ return (major, minor)
+
+ def scan_yaml_directive_number(self, start_mark):
+ # See the specification for details.
+ ch = self.peek()
+ if not ('0' <= ch <= '9'):
+ raise ScannerError("while scanning a directive", start_mark,
+ "expected a digit, but found %r" % ch, self.get_mark())
+ length = 0
+ while '0' <= self.peek(length) <= '9':
+ length += 1
+ value = int(self.prefix(length))
+ self.forward(length)
+ return value
+
+ def scan_tag_directive_value(self, start_mark):
+ # See the specification for details.
+ while self.peek() == ' ':
+ self.forward()
+ handle = self.scan_tag_directive_handle(start_mark)
+ while self.peek() == ' ':
+ self.forward()
+ prefix = self.scan_tag_directive_prefix(start_mark)
+ return (handle, prefix)
+
+ def scan_tag_directive_handle(self, start_mark):
+ # See the specification for details.
+ value = self.scan_tag_handle('directive', start_mark)
+ ch = self.peek()
+ if ch != ' ':
+ raise ScannerError("while scanning a directive", start_mark,
+ "expected ' ', but found %r" % ch, self.get_mark())
+ return value
+
+ def scan_tag_directive_prefix(self, start_mark):
+ # See the specification for details.
+ value = self.scan_tag_uri('directive', start_mark)
+ ch = self.peek()
+ if ch not in '\0 \r\n\x85\u2028\u2029':
+ raise ScannerError("while scanning a directive", start_mark,
+ "expected ' ', but found %r" % ch, self.get_mark())
+ return value
+
+ def scan_directive_ignored_line(self, start_mark):
+ # See the specification for details.
+ while self.peek() == ' ':
+ self.forward()
+ if self.peek() == '#':
+ while self.peek() not in '\0\r\n\x85\u2028\u2029':
+ self.forward()
+ ch = self.peek()
+ if ch not in '\0\r\n\x85\u2028\u2029':
+ raise ScannerError("while scanning a directive", start_mark,
+ "expected a comment or a line break, but found %r"
+ % ch, self.get_mark())
+ self.scan_line_break()
+
+ def scan_anchor(self, TokenClass):
+ # The specification does not restrict characters for anchors and
+ # aliases. This may lead to problems, for instance, the document:
+ # [ *alias, value ]
+ # can be interpteted in two ways, as
+ # [ "value" ]
+ # and
+ # [ *alias , "value" ]
+ # Therefore we restrict aliases to numbers and ASCII letters.
+ start_mark = self.get_mark()
+ indicator = self.peek()
+ if indicator == '*':
+ name = 'alias'
+ else:
+ name = 'anchor'
+ self.forward()
+ length = 0
+ ch = self.peek(length)
+ while '0' <= ch <= '9' or 'A' <= ch <= 'Z' or 'a' <= ch <= 'z' \
+ or ch in '-_':
+ length += 1
+ ch = self.peek(length)
+ if not length:
+ raise ScannerError("while scanning an %s" % name, start_mark,
+ "expected alphabetic or numeric character, but found %r"
+ % ch, self.get_mark())
+ value = self.prefix(length)
+ self.forward(length)
+ ch = self.peek()
+ if ch not in '\0 \t\r\n\x85\u2028\u2029?:,]}%@`':
+ raise ScannerError("while scanning an %s" % name, start_mark,
+ "expected alphabetic or numeric character, but found %r"
+ % ch, self.get_mark())
+ end_mark = self.get_mark()
+ return TokenClass(value, start_mark, end_mark)
+
+ def scan_tag(self):
+ # See the specification for details.
+ start_mark = self.get_mark()
+ ch = self.peek(1)
+ if ch == '<':
+ handle = None
+ self.forward(2)
+ suffix = self.scan_tag_uri('tag', start_mark)
+ if self.peek() != '>':
+ raise ScannerError("while parsing a tag", start_mark,
+ "expected '>', but found %r" % self.peek(),
+ self.get_mark())
+ self.forward()
+ elif ch in '\0 \t\r\n\x85\u2028\u2029':
+ handle = None
+ suffix = '!'
+ self.forward()
+ else:
+ length = 1
+ use_handle = False
+ while ch not in '\0 \r\n\x85\u2028\u2029':
+ if ch == '!':
+ use_handle = True
+ break
+ length += 1
+ ch = self.peek(length)
+ handle = '!'
+ if use_handle:
+ handle = self.scan_tag_handle('tag', start_mark)
+ else:
+ handle = '!'
+ self.forward()
+ suffix = self.scan_tag_uri('tag', start_mark)
+ ch = self.peek()
+ if ch not in '\0 \r\n\x85\u2028\u2029':
+ raise ScannerError("while scanning a tag", start_mark,
+ "expected ' ', but found %r" % ch, self.get_mark())
+ value = (handle, suffix)
+ end_mark = self.get_mark()
+ return TagToken(value, start_mark, end_mark)
+
+ def scan_block_scalar(self, style):
+ # See the specification for details.
+
+ if style == '>':
+ folded = True
+ else:
+ folded = False
+
+ chunks = []
+ start_mark = self.get_mark()
+
+ # Scan the header.
+ self.forward()
+ chomping, increment = self.scan_block_scalar_indicators(start_mark)
+ self.scan_block_scalar_ignored_line(start_mark)
+
+ # Determine the indentation level and go to the first non-empty line.
+ min_indent = self.indent+1
+ if min_indent < 1:
+ min_indent = 1
+ if increment is None:
+ breaks, max_indent, end_mark = self.scan_block_scalar_indentation()
+ indent = max(min_indent, max_indent)
+ else:
+ indent = min_indent+increment-1
+ breaks, end_mark = self.scan_block_scalar_breaks(indent)
+ line_break = ''
+
+ # Scan the inner part of the block scalar.
+ while self.column == indent and self.peek() != '\0':
+ chunks.extend(breaks)
+ leading_non_space = self.peek() not in ' \t'
+ length = 0
+ while self.peek(length) not in '\0\r\n\x85\u2028\u2029':
+ length += 1
+ chunks.append(self.prefix(length))
+ self.forward(length)
+ line_break = self.scan_line_break()
+ breaks, end_mark = self.scan_block_scalar_breaks(indent)
+ if self.column == indent and self.peek() != '\0':
+
+ # Unfortunately, folding rules are ambiguous.
+ #
+ # This is the folding according to the specification:
+
+ if folded and line_break == '\n' \
+ and leading_non_space and self.peek() not in ' \t':
+ if not breaks:
+ chunks.append(' ')
+ else:
+ chunks.append(line_break)
+
+ # This is Clark Evans's interpretation (also in the spec
+ # examples):
+ #
+ #if folded and line_break == '\n':
+ # if not breaks:
+ # if self.peek() not in ' \t':
+ # chunks.append(' ')
+ # else:
+ # chunks.append(line_break)
+ #else:
+ # chunks.append(line_break)
+ else:
+ break
+
+ # Chomp the tail.
+ if chomping is not False:
+ chunks.append(line_break)
+ if chomping is True:
+ chunks.extend(breaks)
+
+ # We are done.
+ return ScalarToken(''.join(chunks), False, start_mark, end_mark,
+ style)
+
+ def scan_block_scalar_indicators(self, start_mark):
+ # See the specification for details.
+ chomping = None
+ increment = None
+ ch = self.peek()
+ if ch in '+-':
+ if ch == '+':
+ chomping = True
+ else:
+ chomping = False
+ self.forward()
+ ch = self.peek()
+ if ch in '0123456789':
+ increment = int(ch)
+ if increment == 0:
+ raise ScannerError("while scanning a block scalar", start_mark,
+ "expected indentation indicator in the range 1-9, but found 0",
+ self.get_mark())
+ self.forward()
+ elif ch in '0123456789':
+ increment = int(ch)
+ if increment == 0:
+ raise ScannerError("while scanning a block scalar", start_mark,
+ "expected indentation indicator in the range 1-9, but found 0",
+ self.get_mark())
+ self.forward()
+ ch = self.peek()
+ if ch in '+-':
+ if ch == '+':
+ chomping = True
+ else:
+ chomping = False
+ self.forward()
+ ch = self.peek()
+ if ch not in '\0 \r\n\x85\u2028\u2029':
+ raise ScannerError("while scanning a block scalar", start_mark,
+ "expected chomping or indentation indicators, but found %r"
+ % ch, self.get_mark())
+ return chomping, increment
+
+ def scan_block_scalar_ignored_line(self, start_mark):
+ # See the specification for details.
+ while self.peek() == ' ':
+ self.forward()
+ if self.peek() == '#':
+ while self.peek() not in '\0\r\n\x85\u2028\u2029':
+ self.forward()
+ ch = self.peek()
+ if ch not in '\0\r\n\x85\u2028\u2029':
+ raise ScannerError("while scanning a block scalar", start_mark,
+ "expected a comment or a line break, but found %r" % ch,
+ self.get_mark())
+ self.scan_line_break()
+
+ def scan_block_scalar_indentation(self):
+ # See the specification for details.
+ chunks = []
+ max_indent = 0
+ end_mark = self.get_mark()
+ while self.peek() in ' \r\n\x85\u2028\u2029':
+ if self.peek() != ' ':
+ chunks.append(self.scan_line_break())
+ end_mark = self.get_mark()
+ else:
+ self.forward()
+ if self.column > max_indent:
+ max_indent = self.column
+ return chunks, max_indent, end_mark
+
+ def scan_block_scalar_breaks(self, indent):
+ # See the specification for details.
+ chunks = []
+ end_mark = self.get_mark()
+ while self.column < indent and self.peek() == ' ':
+ self.forward()
+ while self.peek() in '\r\n\x85\u2028\u2029':
+ chunks.append(self.scan_line_break())
+ end_mark = self.get_mark()
+ while self.column < indent and self.peek() == ' ':
+ self.forward()
+ return chunks, end_mark
+
+ def scan_flow_scalar(self, style):
+ # See the specification for details.
+ # Note that we loose indentation rules for quoted scalars. Quoted
+ # scalars don't need to adhere indentation because " and ' clearly
+ # mark the beginning and the end of them. Therefore we are less
+ # restrictive then the specification requires. We only need to check
+ # that document separators are not included in scalars.
+ if style == '"':
+ double = True
+ else:
+ double = False
+ chunks = []
+ start_mark = self.get_mark()
+ quote = self.peek()
+ self.forward()
+ chunks.extend(self.scan_flow_scalar_non_spaces(double, start_mark))
+ while self.peek() != quote:
+ chunks.extend(self.scan_flow_scalar_spaces(double, start_mark))
+ chunks.extend(self.scan_flow_scalar_non_spaces(double, start_mark))
+ self.forward()
+ end_mark = self.get_mark()
+ return ScalarToken(''.join(chunks), False, start_mark, end_mark,
+ style)
+
+ ESCAPE_REPLACEMENTS = {
+ '0': '\0',
+ 'a': '\x07',
+ 'b': '\x08',
+ 't': '\x09',
+ '\t': '\x09',
+ 'n': '\x0A',
+ 'v': '\x0B',
+ 'f': '\x0C',
+ 'r': '\x0D',
+ 'e': '\x1B',
+ ' ': '\x20',
+ '\"': '\"',
+ '\\': '\\',
+ 'N': '\x85',
+ '_': '\xA0',
+ 'L': '\u2028',
+ 'P': '\u2029',
+ }
+
+ ESCAPE_CODES = {
+ 'x': 2,
+ 'u': 4,
+ 'U': 8,
+ }
+
+ def scan_flow_scalar_non_spaces(self, double, start_mark):
+ # See the specification for details.
+ chunks = []
+ while True:
+ length = 0
+ while self.peek(length) not in '\'\"\\\0 \t\r\n\x85\u2028\u2029':
+ length += 1
+ if length:
+ chunks.append(self.prefix(length))
+ self.forward(length)
+ ch = self.peek()
+ if not double and ch == '\'' and self.peek(1) == '\'':
+ chunks.append('\'')
+ self.forward(2)
+ elif (double and ch == '\'') or (not double and ch in '\"\\'):
+ chunks.append(ch)
+ self.forward()
+ elif double and ch == '\\':
+ self.forward()
+ ch = self.peek()
+ if ch in self.ESCAPE_REPLACEMENTS:
+ chunks.append(self.ESCAPE_REPLACEMENTS[ch])
+ self.forward()
+ elif ch in self.ESCAPE_CODES:
+ length = self.ESCAPE_CODES[ch]
+ self.forward()
+ for k in range(length):
+ if self.peek(k) not in '0123456789ABCDEFabcdef':
+ raise ScannerError("while scanning a double-quoted scalar", start_mark,
+ "expected escape sequence of %d hexdecimal numbers, but found %r" %
+ (length, self.peek(k)), self.get_mark())
+ code = int(self.prefix(length), 16)
+ chunks.append(chr(code))
+ self.forward(length)
+ elif ch in '\r\n\x85\u2028\u2029':
+ self.scan_line_break()
+ chunks.extend(self.scan_flow_scalar_breaks(double, start_mark))
+ else:
+ raise ScannerError("while scanning a double-quoted scalar", start_mark,
+ "found unknown escape character %r" % ch, self.get_mark())
+ else:
+ return chunks
+
+ def scan_flow_scalar_spaces(self, double, start_mark):
+ # See the specification for details.
+ chunks = []
+ length = 0
+ while self.peek(length) in ' \t':
+ length += 1
+ whitespaces = self.prefix(length)
+ self.forward(length)
+ ch = self.peek()
+ if ch == '\0':
+ raise ScannerError("while scanning a quoted scalar", start_mark,
+ "found unexpected end of stream", self.get_mark())
+ elif ch in '\r\n\x85\u2028\u2029':
+ line_break = self.scan_line_break()
+ breaks = self.scan_flow_scalar_breaks(double, start_mark)
+ if line_break != '\n':
+ chunks.append(line_break)
+ elif not breaks:
+ chunks.append(' ')
+ chunks.extend(breaks)
+ else:
+ chunks.append(whitespaces)
+ return chunks
+
+ def scan_flow_scalar_breaks(self, double, start_mark):
+ # See the specification for details.
+ chunks = []
+ while True:
+ # Instead of checking indentation, we check for document
+ # separators.
+ prefix = self.prefix(3)
+ if (prefix == '---' or prefix == '...') \
+ and self.peek(3) in '\0 \t\r\n\x85\u2028\u2029':
+ raise ScannerError("while scanning a quoted scalar", start_mark,
+ "found unexpected document separator", self.get_mark())
+ while self.peek() in ' \t':
+ self.forward()
+ if self.peek() in '\r\n\x85\u2028\u2029':
+ chunks.append(self.scan_line_break())
+ else:
+ return chunks
+
+ def scan_plain(self):
+ # See the specification for details.
+ # We add an additional restriction for the flow context:
+ # plain scalars in the flow context cannot contain ',', ':' and '?'.
+ # We also keep track of the `allow_simple_key` flag here.
+ # Indentation rules are loosed for the flow context.
+ chunks = []
+ start_mark = self.get_mark()
+ end_mark = start_mark
+ indent = self.indent+1
+ # We allow zero indentation for scalars, but then we need to check for
+ # document separators at the beginning of the line.
+ #if indent == 0:
+ # indent = 1
+ spaces = []
+ while True:
+ length = 0
+ if self.peek() == '#':
+ break
+ while True:
+ ch = self.peek(length)
+ if ch in '\0 \t\r\n\x85\u2028\u2029' \
+ or (not self.flow_level and ch == ':' and
+ self.peek(length+1) in '\0 \t\r\n\x85\u2028\u2029') \
+ or (self.flow_level and ch in ',:?[]{}'):
+ break
+ length += 1
+ # It's not clear what we should do with ':' in the flow context.
+ if (self.flow_level and ch == ':'
+ and self.peek(length+1) not in '\0 \t\r\n\x85\u2028\u2029,[]{}'):
+ self.forward(length)
+ raise ScannerError("while scanning a plain scalar", start_mark,
+ "found unexpected ':'", self.get_mark(),
+ "Please check http://pyyaml.org/wiki/YAMLColonInFlowContext for details.")
+ if length == 0:
+ break
+ self.allow_simple_key = False
+ chunks.extend(spaces)
+ chunks.append(self.prefix(length))
+ self.forward(length)
+ end_mark = self.get_mark()
+ spaces = self.scan_plain_spaces(indent, start_mark)
+ if not spaces or self.peek() == '#' \
+ or (not self.flow_level and self.column < indent):
+ break
+ return ScalarToken(''.join(chunks), True, start_mark, end_mark)
+
+ def scan_plain_spaces(self, indent, start_mark):
+ # See the specification for details.
+ # The specification is really confusing about tabs in plain scalars.
+ # We just forbid them completely. Do not use tabs in YAML!
+ chunks = []
+ length = 0
+ while self.peek(length) in ' ':
+ length += 1
+ whitespaces = self.prefix(length)
+ self.forward(length)
+ ch = self.peek()
+ if ch in '\r\n\x85\u2028\u2029':
+ line_break = self.scan_line_break()
+ self.allow_simple_key = True
+ prefix = self.prefix(3)
+ if (prefix == '---' or prefix == '...') \
+ and self.peek(3) in '\0 \t\r\n\x85\u2028\u2029':
+ return
+ breaks = []
+ while self.peek() in ' \r\n\x85\u2028\u2029':
+ if self.peek() == ' ':
+ self.forward()
+ else:
+ breaks.append(self.scan_line_break())
+ prefix = self.prefix(3)
+ if (prefix == '---' or prefix == '...') \
+ and self.peek(3) in '\0 \t\r\n\x85\u2028\u2029':
+ return
+ if line_break != '\n':
+ chunks.append(line_break)
+ elif not breaks:
+ chunks.append(' ')
+ chunks.extend(breaks)
+ elif whitespaces:
+ chunks.append(whitespaces)
+ return chunks
+
+ def scan_tag_handle(self, name, start_mark):
+ # See the specification for details.
+ # For some strange reasons, the specification does not allow '_' in
+ # tag handles. I have allowed it anyway.
+ ch = self.peek()
+ if ch != '!':
+ raise ScannerError("while scanning a %s" % name, start_mark,
+ "expected '!', but found %r" % ch, self.get_mark())
+ length = 1
+ ch = self.peek(length)
+ if ch != ' ':
+ while '0' <= ch <= '9' or 'A' <= ch <= 'Z' or 'a' <= ch <= 'z' \
+ or ch in '-_':
+ length += 1
+ ch = self.peek(length)
+ if ch != '!':
+ self.forward(length)
+ raise ScannerError("while scanning a %s" % name, start_mark,
+ "expected '!', but found %r" % ch, self.get_mark())
+ length += 1
+ value = self.prefix(length)
+ self.forward(length)
+ return value
+
+ def scan_tag_uri(self, name, start_mark):
+ # See the specification for details.
+ # Note: we do not check if URI is well-formed.
+ chunks = []
+ length = 0
+ ch = self.peek(length)
+ while '0' <= ch <= '9' or 'A' <= ch <= 'Z' or 'a' <= ch <= 'z' \
+ or ch in '-;/?:@&=+$,_.!~*\'()[]%':
+ if ch == '%':
+ chunks.append(self.prefix(length))
+ self.forward(length)
+ length = 0
+ chunks.append(self.scan_uri_escapes(name, start_mark))
+ else:
+ length += 1
+ ch = self.peek(length)
+ if length:
+ chunks.append(self.prefix(length))
+ self.forward(length)
+ length = 0
+ if not chunks:
+ raise ScannerError("while parsing a %s" % name, start_mark,
+ "expected URI, but found %r" % ch, self.get_mark())
+ return ''.join(chunks)
+
+ def scan_uri_escapes(self, name, start_mark):
+ # See the specification for details.
+ codes = []
+ mark = self.get_mark()
+ while self.peek() == '%':
+ self.forward()
+ for k in range(2):
+ if self.peek(k) not in '0123456789ABCDEFabcdef':
+ raise ScannerError("while scanning a %s" % name, start_mark,
+ "expected URI escape sequence of 2 hexdecimal numbers, but found %r"
+ % self.peek(k), self.get_mark())
+ codes.append(int(self.prefix(2), 16))
+ self.forward(2)
+ try:
+ value = bytes(codes).decode('utf-8')
+ except UnicodeDecodeError as exc:
+ raise ScannerError("while scanning a %s" % name, start_mark, str(exc), mark)
+ return value
+
+ def scan_line_break(self):
+ # Transforms:
+ # '\r\n' : '\n'
+ # '\r' : '\n'
+ # '\n' : '\n'
+ # '\x85' : '\n'
+ # '\u2028' : '\u2028'
+ # '\u2029 : '\u2029'
+ # default : ''
+ ch = self.peek()
+ if ch in '\r\n\x85':
+ if self.prefix(2) == '\r\n':
+ self.forward(2)
+ else:
+ self.forward()
+ return '\n'
+ elif ch in '\u2028\u2029':
+ self.forward()
+ return ch
+ return ''
+
+#try:
+# import psyco
+# psyco.bind(Scanner)
+#except ImportError:
+# pass
+
diff --git a/scripts/external_libs/pyyaml-3.11/python3/yaml/serializer.py b/scripts/external_libs/pyyaml-3.11/python3/yaml/serializer.py
new file mode 100644
index 00000000..fe911e67
--- /dev/null
+++ b/scripts/external_libs/pyyaml-3.11/python3/yaml/serializer.py
@@ -0,0 +1,111 @@
+
+__all__ = ['Serializer', 'SerializerError']
+
+from .error import YAMLError
+from .events import *
+from .nodes import *
+
+class SerializerError(YAMLError):
+ pass
+
+class Serializer:
+
+ ANCHOR_TEMPLATE = 'id%03d'
+
+ def __init__(self, encoding=None,
+ explicit_start=None, explicit_end=None, version=None, tags=None):
+ self.use_encoding = encoding
+ self.use_explicit_start = explicit_start
+ self.use_explicit_end = explicit_end
+ self.use_version = version
+ self.use_tags = tags
+ self.serialized_nodes = {}
+ self.anchors = {}
+ self.last_anchor_id = 0
+ self.closed = None
+
+ def open(self):
+ if self.closed is None:
+ self.emit(StreamStartEvent(encoding=self.use_encoding))
+ self.closed = False
+ elif self.closed:
+ raise SerializerError("serializer is closed")
+ else:
+ raise SerializerError("serializer is already opened")
+
+ def close(self):
+ if self.closed is None:
+ raise SerializerError("serializer is not opened")
+ elif not self.closed:
+ self.emit(StreamEndEvent())
+ self.closed = True
+
+ #def __del__(self):
+ # self.close()
+
+ def serialize(self, node):
+ if self.closed is None:
+ raise SerializerError("serializer is not opened")
+ elif self.closed:
+ raise SerializerError("serializer is closed")
+ self.emit(DocumentStartEvent(explicit=self.use_explicit_start,
+ version=self.use_version, tags=self.use_tags))
+ self.anchor_node(node)
+ self.serialize_node(node, None, None)
+ self.emit(DocumentEndEvent(explicit=self.use_explicit_end))
+ self.serialized_nodes = {}
+ self.anchors = {}
+ self.last_anchor_id = 0
+
+ def anchor_node(self, node):
+ if node in self.anchors:
+ if self.anchors[node] is None:
+ self.anchors[node] = self.generate_anchor(node)
+ else:
+ self.anchors[node] = None
+ if isinstance(node, SequenceNode):
+ for item in node.value:
+ self.anchor_node(item)
+ elif isinstance(node, MappingNode):
+ for key, value in node.value:
+ self.anchor_node(key)
+ self.anchor_node(value)
+
+ def generate_anchor(self, node):
+ self.last_anchor_id += 1
+ return self.ANCHOR_TEMPLATE % self.last_anchor_id
+
+ def serialize_node(self, node, parent, index):
+ alias = self.anchors[node]
+ if node in self.serialized_nodes:
+ self.emit(AliasEvent(alias))
+ else:
+ self.serialized_nodes[node] = True
+ self.descend_resolver(parent, index)
+ if isinstance(node, ScalarNode):
+ detected_tag = self.resolve(ScalarNode, node.value, (True, False))
+ default_tag = self.resolve(ScalarNode, node.value, (False, True))
+ implicit = (node.tag == detected_tag), (node.tag == default_tag)
+ self.emit(ScalarEvent(alias, node.tag, implicit, node.value,
+ style=node.style))
+ elif isinstance(node, SequenceNode):
+ implicit = (node.tag
+ == self.resolve(SequenceNode, node.value, True))
+ self.emit(SequenceStartEvent(alias, node.tag, implicit,
+ flow_style=node.flow_style))
+ index = 0
+ for item in node.value:
+ self.serialize_node(item, node, index)
+ index += 1
+ self.emit(SequenceEndEvent())
+ elif isinstance(node, MappingNode):
+ implicit = (node.tag
+ == self.resolve(MappingNode, node.value, True))
+ self.emit(MappingStartEvent(alias, node.tag, implicit,
+ flow_style=node.flow_style))
+ for key, value in node.value:
+ self.serialize_node(key, node, None)
+ self.serialize_node(value, node, key)
+ self.emit(MappingEndEvent())
+ self.ascend_resolver()
+
diff --git a/scripts/external_libs/pyyaml-3.11/python3/yaml/tokens.py b/scripts/external_libs/pyyaml-3.11/python3/yaml/tokens.py
new file mode 100644
index 00000000..4d0b48a3
--- /dev/null
+++ b/scripts/external_libs/pyyaml-3.11/python3/yaml/tokens.py
@@ -0,0 +1,104 @@
+
+class Token(object):
+ def __init__(self, start_mark, end_mark):
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+ def __repr__(self):
+ attributes = [key for key in self.__dict__
+ if not key.endswith('_mark')]
+ attributes.sort()
+ arguments = ', '.join(['%s=%r' % (key, getattr(self, key))
+ for key in attributes])
+ return '%s(%s)' % (self.__class__.__name__, arguments)
+
+#class BOMToken(Token):
+# id = '<byte order mark>'
+
+class DirectiveToken(Token):
+ id = '<directive>'
+ def __init__(self, name, value, start_mark, end_mark):
+ self.name = name
+ self.value = value
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+
+class DocumentStartToken(Token):
+ id = '<document start>'
+
+class DocumentEndToken(Token):
+ id = '<document end>'
+
+class StreamStartToken(Token):
+ id = '<stream start>'
+ def __init__(self, start_mark=None, end_mark=None,
+ encoding=None):
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+ self.encoding = encoding
+
+class StreamEndToken(Token):
+ id = '<stream end>'
+
+class BlockSequenceStartToken(Token):
+ id = '<block sequence start>'
+
+class BlockMappingStartToken(Token):
+ id = '<block mapping start>'
+
+class BlockEndToken(Token):
+ id = '<block end>'
+
+class FlowSequenceStartToken(Token):
+ id = '['
+
+class FlowMappingStartToken(Token):
+ id = '{'
+
+class FlowSequenceEndToken(Token):
+ id = ']'
+
+class FlowMappingEndToken(Token):
+ id = '}'
+
+class KeyToken(Token):
+ id = '?'
+
+class ValueToken(Token):
+ id = ':'
+
+class BlockEntryToken(Token):
+ id = '-'
+
+class FlowEntryToken(Token):
+ id = ','
+
+class AliasToken(Token):
+ id = '<alias>'
+ def __init__(self, value, start_mark, end_mark):
+ self.value = value
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+
+class AnchorToken(Token):
+ id = '<anchor>'
+ def __init__(self, value, start_mark, end_mark):
+ self.value = value
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+
+class TagToken(Token):
+ id = '<tag>'
+ def __init__(self, value, start_mark, end_mark):
+ self.value = value
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+
+class ScalarToken(Token):
+ id = '<scalar>'
+ def __init__(self, value, plain, start_mark, end_mark, style=None):
+ self.value = value
+ self.plain = plain
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+ self.style = style
+
diff --git a/scripts/external_libs/platform/cel59/32bit/zmq/__init__.py b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/__init__.py
index 3408b3ba..3408b3ba 100644
--- a/scripts/external_libs/platform/cel59/32bit/zmq/__init__.py
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/__init__.py
diff --git a/scripts/external_libs/platform/cel59/32bit/zmq/auth/__init__.py b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/auth/__init__.py
index 11d3ad6b..11d3ad6b 100644
--- a/scripts/external_libs/platform/cel59/32bit/zmq/auth/__init__.py
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/auth/__init__.py
diff --git a/scripts/external_libs/platform/cel59/32bit/zmq/auth/base.py b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/auth/base.py
index 9b4aaed7..9b4aaed7 100644
--- a/scripts/external_libs/platform/cel59/32bit/zmq/auth/base.py
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/auth/base.py
diff --git a/scripts/external_libs/platform/cel59/32bit/zmq/auth/certs.py b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/auth/certs.py
index 4d26ad7b..4d26ad7b 100644
--- a/scripts/external_libs/platform/cel59/32bit/zmq/auth/certs.py
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/auth/certs.py
diff --git a/scripts/external_libs/platform/cel59/32bit/zmq/auth/ioloop.py b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/auth/ioloop.py
index 1f448b47..1f448b47 100644
--- a/scripts/external_libs/platform/cel59/32bit/zmq/auth/ioloop.py
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/auth/ioloop.py
diff --git a/scripts/external_libs/platform/cel59/32bit/zmq/auth/thread.py b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/auth/thread.py
index 8c3355a9..8c3355a9 100644
--- a/scripts/external_libs/platform/cel59/32bit/zmq/auth/thread.py
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/auth/thread.py
diff --git a/scripts/external_libs/platform/cel59/32bit/zmq/backend/__init__.py b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/backend/__init__.py
index 7cac725c..7cac725c 100644
--- a/scripts/external_libs/platform/cel59/32bit/zmq/backend/__init__.py
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/backend/__init__.py
diff --git a/scripts/external_libs/platform/cel59/32bit/zmq/backend/cffi/__init__.py b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/backend/cffi/__init__.py
index ca3164d3..ca3164d3 100644
--- a/scripts/external_libs/platform/cel59/32bit/zmq/backend/cffi/__init__.py
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/backend/cffi/__init__.py
diff --git a/scripts/external_libs/platform/cel59/32bit/zmq/backend/cffi/_cdefs.h b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/backend/cffi/_cdefs.h
index d3300575..d3300575 100644
--- a/scripts/external_libs/platform/cel59/32bit/zmq/backend/cffi/_cdefs.h
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/backend/cffi/_cdefs.h
diff --git a/scripts/external_libs/platform/cel59/32bit/zmq/backend/cffi/_cffi.py b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/backend/cffi/_cffi.py
index c73ebf83..c73ebf83 100644
--- a/scripts/external_libs/platform/cel59/32bit/zmq/backend/cffi/_cffi.py
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/backend/cffi/_cffi.py
diff --git a/scripts/external_libs/platform/cel59/32bit/zmq/backend/cffi/_poll.py b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/backend/cffi/_poll.py
index 9bca34ca..9bca34ca 100644
--- a/scripts/external_libs/platform/cel59/32bit/zmq/backend/cffi/_poll.py
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/backend/cffi/_poll.py
diff --git a/scripts/external_libs/platform/cel59/32bit/zmq/backend/cffi/_verify.c b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/backend/cffi/_verify.c
index 547840eb..547840eb 100644
--- a/scripts/external_libs/platform/cel59/32bit/zmq/backend/cffi/_verify.c
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/backend/cffi/_verify.c
diff --git a/scripts/external_libs/platform/cel59/32bit/zmq/backend/cffi/constants.py b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/backend/cffi/constants.py
index ee293e74..ee293e74 100644
--- a/scripts/external_libs/platform/cel59/32bit/zmq/backend/cffi/constants.py
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/backend/cffi/constants.py
diff --git a/scripts/external_libs/platform/cel59/32bit/zmq/backend/cffi/context.py b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/backend/cffi/context.py
index 16a7b257..16a7b257 100644
--- a/scripts/external_libs/platform/cel59/32bit/zmq/backend/cffi/context.py
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/backend/cffi/context.py
diff --git a/scripts/external_libs/platform/cel59/32bit/zmq/backend/cffi/devices.py b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/backend/cffi/devices.py
index c7a514a8..c7a514a8 100644
--- a/scripts/external_libs/platform/cel59/32bit/zmq/backend/cffi/devices.py
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/backend/cffi/devices.py
diff --git a/scripts/external_libs/platform/cel59/32bit/zmq/backend/cffi/error.py b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/backend/cffi/error.py
index 3bb64de0..3bb64de0 100644
--- a/scripts/external_libs/platform/cel59/32bit/zmq/backend/cffi/error.py
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/backend/cffi/error.py
diff --git a/scripts/external_libs/platform/cel59/32bit/zmq/backend/cffi/message.py b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/backend/cffi/message.py
index c35decb6..c35decb6 100644
--- a/scripts/external_libs/platform/cel59/32bit/zmq/backend/cffi/message.py
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/backend/cffi/message.py
diff --git a/scripts/external_libs/platform/cel59/32bit/zmq/backend/cffi/socket.py b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/backend/cffi/socket.py
index 3c427739..3c427739 100644
--- a/scripts/external_libs/platform/cel59/32bit/zmq/backend/cffi/socket.py
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/backend/cffi/socket.py
diff --git a/scripts/external_libs/platform/cel59/32bit/zmq/backend/cffi/utils.py b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/backend/cffi/utils.py
index fde7827b..fde7827b 100644
--- a/scripts/external_libs/platform/cel59/32bit/zmq/backend/cffi/utils.py
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/backend/cffi/utils.py
diff --git a/scripts/external_libs/platform/cel59/32bit/zmq/backend/cython/__init__.py b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/backend/cython/__init__.py
index e5358185..e5358185 100644
--- a/scripts/external_libs/platform/cel59/32bit/zmq/backend/cython/__init__.py
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/backend/cython/__init__.py
diff --git a/scripts/external_libs/platform/cel59/32bit/zmq/backend/cython/_device.so b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/backend/cython/_device.so
index b375648a..b375648a 100644
--- a/scripts/external_libs/platform/cel59/32bit/zmq/backend/cython/_device.so
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/backend/cython/_device.so
Binary files differ
diff --git a/scripts/external_libs/platform/cel59/32bit/zmq/backend/cython/_poll.so b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/backend/cython/_poll.so
index 4516898f..4516898f 100644
--- a/scripts/external_libs/platform/cel59/32bit/zmq/backend/cython/_poll.so
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/backend/cython/_poll.so
Binary files differ
diff --git a/scripts/external_libs/platform/cel59/32bit/zmq/backend/cython/_version.so b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/backend/cython/_version.so
index 82732213..82732213 100644
--- a/scripts/external_libs/platform/cel59/32bit/zmq/backend/cython/_version.so
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/backend/cython/_version.so
Binary files differ
diff --git a/scripts/external_libs/platform/cel59/32bit/zmq/backend/cython/checkrc.pxd b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/backend/cython/checkrc.pxd
index 3bf69fc3..3bf69fc3 100644
--- a/scripts/external_libs/platform/cel59/32bit/zmq/backend/cython/checkrc.pxd
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/backend/cython/checkrc.pxd
diff --git a/scripts/external_libs/platform/cel59/32bit/zmq/backend/cython/constants.so b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/backend/cython/constants.so
index ee126822..ee126822 100644
--- a/scripts/external_libs/platform/cel59/32bit/zmq/backend/cython/constants.so
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/backend/cython/constants.so
Binary files differ
diff --git a/scripts/external_libs/platform/cel59/32bit/zmq/backend/cython/context.pxd b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/backend/cython/context.pxd
index 9c9267a5..9c9267a5 100644
--- a/scripts/external_libs/platform/cel59/32bit/zmq/backend/cython/context.pxd
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/backend/cython/context.pxd
diff --git a/scripts/external_libs/platform/cel59/32bit/zmq/backend/cython/context.so b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/backend/cython/context.so
index f8d8799f..f8d8799f 100644
--- a/scripts/external_libs/platform/cel59/32bit/zmq/backend/cython/context.so
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/backend/cython/context.so
Binary files differ
diff --git a/scripts/external_libs/platform/cel59/32bit/zmq/backend/cython/error.so b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/backend/cython/error.so
index 26ef58ad..26ef58ad 100644
--- a/scripts/external_libs/platform/cel59/32bit/zmq/backend/cython/error.so
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/backend/cython/error.so
Binary files differ
diff --git a/scripts/external_libs/platform/cel59/32bit/zmq/backend/cython/libzmq.pxd b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/backend/cython/libzmq.pxd
index e42f6d6b..e42f6d6b 100644
--- a/scripts/external_libs/platform/cel59/32bit/zmq/backend/cython/libzmq.pxd
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/backend/cython/libzmq.pxd
diff --git a/scripts/external_libs/platform/cel59/32bit/zmq/backend/cython/message.pxd b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/backend/cython/message.pxd
index 4781195f..4781195f 100644
--- a/scripts/external_libs/platform/cel59/32bit/zmq/backend/cython/message.pxd
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/backend/cython/message.pxd
diff --git a/scripts/external_libs/platform/cel59/32bit/zmq/backend/cython/message.so b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/backend/cython/message.so
index ceea2307..ceea2307 100644
--- a/scripts/external_libs/platform/cel59/32bit/zmq/backend/cython/message.so
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/backend/cython/message.so
Binary files differ
diff --git a/scripts/external_libs/platform/cel59/32bit/zmq/backend/cython/socket.pxd b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/backend/cython/socket.pxd
index b8a331e2..b8a331e2 100644
--- a/scripts/external_libs/platform/cel59/32bit/zmq/backend/cython/socket.pxd
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/backend/cython/socket.pxd
diff --git a/scripts/external_libs/platform/cel59/32bit/zmq/backend/cython/socket.so b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/backend/cython/socket.so
index 00c87d63..00c87d63 100644
--- a/scripts/external_libs/platform/cel59/32bit/zmq/backend/cython/socket.so
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/backend/cython/socket.so
Binary files differ
diff --git a/scripts/external_libs/platform/cel59/32bit/zmq/backend/cython/utils.pxd b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/backend/cython/utils.pxd
index 1d7117f1..1d7117f1 100644
--- a/scripts/external_libs/platform/cel59/32bit/zmq/backend/cython/utils.pxd
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/backend/cython/utils.pxd
diff --git a/scripts/external_libs/platform/cel59/32bit/zmq/backend/cython/utils.so b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/backend/cython/utils.so
index 99d0b839..99d0b839 100644
--- a/scripts/external_libs/platform/cel59/32bit/zmq/backend/cython/utils.so
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/backend/cython/utils.so
Binary files differ
diff --git a/scripts/external_libs/platform/cel59/32bit/zmq/backend/select.py b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/backend/select.py
index 0a2e09a2..0a2e09a2 100644
--- a/scripts/external_libs/platform/cel59/32bit/zmq/backend/select.py
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/backend/select.py
diff --git a/scripts/external_libs/platform/cel59/32bit/zmq/devices/__init__.py b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/devices/__init__.py
index 23715963..23715963 100644
--- a/scripts/external_libs/platform/cel59/32bit/zmq/devices/__init__.py
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/devices/__init__.py
diff --git a/scripts/external_libs/platform/cel59/32bit/zmq/devices/basedevice.py b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/devices/basedevice.py
index 7ba1b7ac..7ba1b7ac 100644
--- a/scripts/external_libs/platform/cel59/32bit/zmq/devices/basedevice.py
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/devices/basedevice.py
diff --git a/scripts/external_libs/platform/cel59/32bit/zmq/devices/monitoredqueue.pxd b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/devices/monitoredqueue.pxd
index 1e26ed86..1e26ed86 100644
--- a/scripts/external_libs/platform/cel59/32bit/zmq/devices/monitoredqueue.pxd
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/devices/monitoredqueue.pxd
diff --git a/scripts/external_libs/platform/cel59/32bit/zmq/devices/monitoredqueue.py b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/devices/monitoredqueue.py
index c6d91429..c6d91429 100644
--- a/scripts/external_libs/platform/cel59/32bit/zmq/devices/monitoredqueue.py
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/devices/monitoredqueue.py
diff --git a/scripts/external_libs/platform/cel59/32bit/zmq/devices/monitoredqueue.so b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/devices/monitoredqueue.so
index 46b057fd..46b057fd 100644
--- a/scripts/external_libs/platform/cel59/32bit/zmq/devices/monitoredqueue.so
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/devices/monitoredqueue.so
Binary files differ
diff --git a/scripts/external_libs/platform/cel59/32bit/zmq/devices/monitoredqueuedevice.py b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/devices/monitoredqueuedevice.py
index 9723f866..9723f866 100644
--- a/scripts/external_libs/platform/cel59/32bit/zmq/devices/monitoredqueuedevice.py
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/devices/monitoredqueuedevice.py
diff --git a/scripts/external_libs/platform/cel59/32bit/zmq/devices/proxydevice.py b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/devices/proxydevice.py
index 68be3f15..68be3f15 100644
--- a/scripts/external_libs/platform/cel59/32bit/zmq/devices/proxydevice.py
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/devices/proxydevice.py
diff --git a/scripts/external_libs/platform/cel59/32bit/zmq/error.py b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/error.py
index 48cdaafa..48cdaafa 100644
--- a/scripts/external_libs/platform/cel59/32bit/zmq/error.py
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/error.py
diff --git a/scripts/external_libs/platform/cel59/32bit/zmq/eventloop/__init__.py b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/eventloop/__init__.py
index 568e8e8d..568e8e8d 100644
--- a/scripts/external_libs/platform/cel59/32bit/zmq/eventloop/__init__.py
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/eventloop/__init__.py
diff --git a/scripts/external_libs/platform/cel59/32bit/zmq/eventloop/ioloop.py b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/eventloop/ioloop.py
index 35f4c418..35f4c418 100644
--- a/scripts/external_libs/platform/cel59/32bit/zmq/eventloop/ioloop.py
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/eventloop/ioloop.py
diff --git a/scripts/external_libs/platform/cel59/32bit/zmq/eventloop/minitornado/__init__.py b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/eventloop/minitornado/__init__.py
index e69de29b..e69de29b 100644
--- a/scripts/external_libs/platform/cel59/32bit/zmq/eventloop/minitornado/__init__.py
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/eventloop/minitornado/__init__.py
diff --git a/scripts/external_libs/platform/cel59/32bit/zmq/eventloop/minitornado/concurrent.py b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/eventloop/minitornado/concurrent.py
index 519b23d5..519b23d5 100644
--- a/scripts/external_libs/platform/cel59/32bit/zmq/eventloop/minitornado/concurrent.py
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/eventloop/minitornado/concurrent.py
diff --git a/scripts/external_libs/platform/cel59/32bit/zmq/eventloop/minitornado/ioloop.py b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/eventloop/minitornado/ioloop.py
index 710a3ecb..710a3ecb 100644
--- a/scripts/external_libs/platform/cel59/32bit/zmq/eventloop/minitornado/ioloop.py
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/eventloop/minitornado/ioloop.py
diff --git a/scripts/external_libs/platform/cel59/32bit/zmq/eventloop/minitornado/log.py b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/eventloop/minitornado/log.py
index 49051e89..49051e89 100644
--- a/scripts/external_libs/platform/cel59/32bit/zmq/eventloop/minitornado/log.py
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/eventloop/minitornado/log.py
diff --git a/scripts/external_libs/platform/cel59/32bit/zmq/eventloop/minitornado/platform/__init__.py b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/eventloop/minitornado/platform/__init__.py
index e69de29b..e69de29b 100644
--- a/scripts/external_libs/platform/cel59/32bit/zmq/eventloop/minitornado/platform/__init__.py
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/eventloop/minitornado/platform/__init__.py
diff --git a/scripts/external_libs/platform/cel59/32bit/zmq/eventloop/minitornado/platform/auto.py b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/eventloop/minitornado/platform/auto.py
index b40ccd94..b40ccd94 100644
--- a/scripts/external_libs/platform/cel59/32bit/zmq/eventloop/minitornado/platform/auto.py
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/eventloop/minitornado/platform/auto.py
diff --git a/scripts/external_libs/platform/cel59/32bit/zmq/eventloop/minitornado/platform/common.py b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/eventloop/minitornado/platform/common.py
index 2d75dc1e..2d75dc1e 100644
--- a/scripts/external_libs/platform/cel59/32bit/zmq/eventloop/minitornado/platform/common.py
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/eventloop/minitornado/platform/common.py
diff --git a/scripts/external_libs/platform/cel59/32bit/zmq/eventloop/minitornado/platform/interface.py b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/eventloop/minitornado/platform/interface.py
index 07da6bab..07da6bab 100644
--- a/scripts/external_libs/platform/cel59/32bit/zmq/eventloop/minitornado/platform/interface.py
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/eventloop/minitornado/platform/interface.py
diff --git a/scripts/external_libs/platform/cel59/32bit/zmq/eventloop/minitornado/platform/posix.py b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/eventloop/minitornado/platform/posix.py
index ccffbb66..ccffbb66 100644
--- a/scripts/external_libs/platform/cel59/32bit/zmq/eventloop/minitornado/platform/posix.py
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/eventloop/minitornado/platform/posix.py
diff --git a/scripts/external_libs/platform/cel59/32bit/zmq/eventloop/minitornado/platform/windows.py b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/eventloop/minitornado/platform/windows.py
index 817bdca1..817bdca1 100644
--- a/scripts/external_libs/platform/cel59/32bit/zmq/eventloop/minitornado/platform/windows.py
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/eventloop/minitornado/platform/windows.py
diff --git a/scripts/external_libs/platform/cel59/32bit/zmq/eventloop/minitornado/stack_context.py b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/eventloop/minitornado/stack_context.py
index 226d8042..226d8042 100644
--- a/scripts/external_libs/platform/cel59/32bit/zmq/eventloop/minitornado/stack_context.py
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/eventloop/minitornado/stack_context.py
diff --git a/scripts/external_libs/platform/cel59/32bit/zmq/eventloop/minitornado/util.py b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/eventloop/minitornado/util.py
index c1e2eb95..c1e2eb95 100644
--- a/scripts/external_libs/platform/cel59/32bit/zmq/eventloop/minitornado/util.py
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/eventloop/minitornado/util.py
diff --git a/scripts/external_libs/platform/cel59/32bit/zmq/eventloop/zmqstream.py b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/eventloop/zmqstream.py
index 86a97e44..86a97e44 100644
--- a/scripts/external_libs/platform/cel59/32bit/zmq/eventloop/zmqstream.py
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/eventloop/zmqstream.py
diff --git a/scripts/external_libs/platform/cel59/32bit/zmq/green/__init__.py b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/green/__init__.py
index ff7e5965..ff7e5965 100644
--- a/scripts/external_libs/platform/cel59/32bit/zmq/green/__init__.py
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/green/__init__.py
diff --git a/scripts/external_libs/platform/cel59/32bit/zmq/green/core.py b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/green/core.py
index 9fc73e32..9fc73e32 100644
--- a/scripts/external_libs/platform/cel59/32bit/zmq/green/core.py
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/green/core.py
diff --git a/scripts/external_libs/platform/cel59/32bit/zmq/green/device.py b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/green/device.py
index 4b070237..4b070237 100644
--- a/scripts/external_libs/platform/cel59/32bit/zmq/green/device.py
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/green/device.py
diff --git a/scripts/external_libs/platform/cel59/32bit/zmq/green/eventloop/__init__.py b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/green/eventloop/__init__.py
index c5150efe..c5150efe 100644
--- a/scripts/external_libs/platform/cel59/32bit/zmq/green/eventloop/__init__.py
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/green/eventloop/__init__.py
diff --git a/scripts/external_libs/platform/cel59/32bit/zmq/green/eventloop/ioloop.py b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/green/eventloop/ioloop.py
index e12fd5e9..e12fd5e9 100644
--- a/scripts/external_libs/platform/cel59/32bit/zmq/green/eventloop/ioloop.py
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/green/eventloop/ioloop.py
diff --git a/scripts/external_libs/platform/cel59/32bit/zmq/green/eventloop/zmqstream.py b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/green/eventloop/zmqstream.py
index 90fbd1f5..90fbd1f5 100644
--- a/scripts/external_libs/platform/cel59/32bit/zmq/green/eventloop/zmqstream.py
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/green/eventloop/zmqstream.py
diff --git a/scripts/external_libs/platform/cel59/32bit/zmq/green/poll.py b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/green/poll.py
index 8f016129..8f016129 100644
--- a/scripts/external_libs/platform/cel59/32bit/zmq/green/poll.py
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/green/poll.py
diff --git a/scripts/external_libs/platform/cel59/32bit/zmq/libzmq.so.3 b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/libzmq.so.3
index ed940931..ed940931 100644
--- a/scripts/external_libs/platform/cel59/32bit/zmq/libzmq.so.3
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/libzmq.so.3
Binary files differ
diff --git a/scripts/external_libs/platform/cel59/32bit/zmq/log/__init__.py b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/log/__init__.py
index e69de29b..e69de29b 100644
--- a/scripts/external_libs/platform/cel59/32bit/zmq/log/__init__.py
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/log/__init__.py
diff --git a/scripts/external_libs/platform/cel59/32bit/zmq/log/handlers.py b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/log/handlers.py
index 5ff21bf3..5ff21bf3 100644
--- a/scripts/external_libs/platform/cel59/32bit/zmq/log/handlers.py
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/log/handlers.py
diff --git a/scripts/external_libs/platform/cel59/32bit/zmq/ssh/__init__.py b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/ssh/__init__.py
index 57f09568..57f09568 100644
--- a/scripts/external_libs/platform/cel59/32bit/zmq/ssh/__init__.py
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/ssh/__init__.py
diff --git a/scripts/external_libs/platform/cel59/32bit/zmq/ssh/forward.py b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/ssh/forward.py
index 2d619462..2d619462 100644
--- a/scripts/external_libs/platform/cel59/32bit/zmq/ssh/forward.py
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/ssh/forward.py
diff --git a/scripts/external_libs/platform/cel59/32bit/zmq/ssh/tunnel.py b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/ssh/tunnel.py
index 5a0c5433..5a0c5433 100644
--- a/scripts/external_libs/platform/cel59/32bit/zmq/ssh/tunnel.py
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/ssh/tunnel.py
diff --git a/scripts/external_libs/platform/cel59/32bit/zmq/sugar/__init__.py b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/sugar/__init__.py
index d0510a44..d0510a44 100644
--- a/scripts/external_libs/platform/cel59/32bit/zmq/sugar/__init__.py
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/sugar/__init__.py
diff --git a/scripts/external_libs/platform/cel59/32bit/zmq/sugar/attrsettr.py b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/sugar/attrsettr.py
index 4bbd36d6..4bbd36d6 100644
--- a/scripts/external_libs/platform/cel59/32bit/zmq/sugar/attrsettr.py
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/sugar/attrsettr.py
diff --git a/scripts/external_libs/platform/cel59/32bit/zmq/sugar/constants.py b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/sugar/constants.py
index 88281176..88281176 100644
--- a/scripts/external_libs/platform/cel59/32bit/zmq/sugar/constants.py
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/sugar/constants.py
diff --git a/scripts/external_libs/platform/cel59/32bit/zmq/sugar/context.py b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/sugar/context.py
index 86a9c5dc..86a9c5dc 100644
--- a/scripts/external_libs/platform/cel59/32bit/zmq/sugar/context.py
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/sugar/context.py
diff --git a/scripts/external_libs/platform/cel59/32bit/zmq/sugar/frame.py b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/sugar/frame.py
index 9f556c86..9f556c86 100644
--- a/scripts/external_libs/platform/cel59/32bit/zmq/sugar/frame.py
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/sugar/frame.py
diff --git a/scripts/external_libs/platform/cel59/32bit/zmq/sugar/poll.py b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/sugar/poll.py
index c7b1d1bb..c7b1d1bb 100644
--- a/scripts/external_libs/platform/cel59/32bit/zmq/sugar/poll.py
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/sugar/poll.py
diff --git a/scripts/external_libs/platform/cel59/32bit/zmq/sugar/socket.py b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/sugar/socket.py
index c91589d7..c91589d7 100644
--- a/scripts/external_libs/platform/cel59/32bit/zmq/sugar/socket.py
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/sugar/socket.py
diff --git a/scripts/external_libs/platform/cel59/32bit/zmq/sugar/tracker.py b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/sugar/tracker.py
index fb8c007f..fb8c007f 100644
--- a/scripts/external_libs/platform/cel59/32bit/zmq/sugar/tracker.py
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/sugar/tracker.py
diff --git a/scripts/external_libs/platform/cel59/32bit/zmq/sugar/version.py b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/sugar/version.py
index ea8fbbc4..ea8fbbc4 100644
--- a/scripts/external_libs/platform/cel59/32bit/zmq/sugar/version.py
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/sugar/version.py
diff --git a/scripts/external_libs/platform/cel59/32bit/zmq/tests/__init__.py b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/tests/__init__.py
index 325a3f19..325a3f19 100644
--- a/scripts/external_libs/platform/cel59/32bit/zmq/tests/__init__.py
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/tests/__init__.py
diff --git a/scripts/external_libs/platform/cel59/32bit/zmq/tests/test_auth.py b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/tests/test_auth.py
index d350f61f..d350f61f 100644
--- a/scripts/external_libs/platform/cel59/32bit/zmq/tests/test_auth.py
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/tests/test_auth.py
diff --git a/scripts/external_libs/platform/cel59/32bit/zmq/tests/test_cffi_backend.py b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/tests/test_cffi_backend.py
index 1f85eebf..1f85eebf 100644
--- a/scripts/external_libs/platform/cel59/32bit/zmq/tests/test_cffi_backend.py
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/tests/test_cffi_backend.py
diff --git a/scripts/external_libs/platform/cel59/32bit/zmq/tests/test_constants.py b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/tests/test_constants.py
index d32b2b48..d32b2b48 100644
--- a/scripts/external_libs/platform/cel59/32bit/zmq/tests/test_constants.py
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/tests/test_constants.py
diff --git a/scripts/external_libs/platform/cel59/32bit/zmq/tests/test_context.py b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/tests/test_context.py
index e3280778..e3280778 100644
--- a/scripts/external_libs/platform/cel59/32bit/zmq/tests/test_context.py
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/tests/test_context.py
diff --git a/scripts/external_libs/platform/cel59/32bit/zmq/tests/test_device.py b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/tests/test_device.py
index f8305074..f8305074 100644
--- a/scripts/external_libs/platform/cel59/32bit/zmq/tests/test_device.py
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/tests/test_device.py
diff --git a/scripts/external_libs/platform/cel59/32bit/zmq/tests/test_error.py b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/tests/test_error.py
index a2eee14a..a2eee14a 100644
--- a/scripts/external_libs/platform/cel59/32bit/zmq/tests/test_error.py
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/tests/test_error.py
diff --git a/scripts/external_libs/platform/cel59/32bit/zmq/tests/test_etc.py b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/tests/test_etc.py
index ad224064..ad224064 100644
--- a/scripts/external_libs/platform/cel59/32bit/zmq/tests/test_etc.py
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/tests/test_etc.py
diff --git a/scripts/external_libs/platform/cel59/32bit/zmq/tests/test_imports.py b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/tests/test_imports.py
index c0ddfaac..c0ddfaac 100644
--- a/scripts/external_libs/platform/cel59/32bit/zmq/tests/test_imports.py
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/tests/test_imports.py
diff --git a/scripts/external_libs/platform/cel59/32bit/zmq/tests/test_ioloop.py b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/tests/test_ioloop.py
index 2a8b1153..2a8b1153 100644
--- a/scripts/external_libs/platform/cel59/32bit/zmq/tests/test_ioloop.py
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/tests/test_ioloop.py
diff --git a/scripts/external_libs/platform/cel59/32bit/zmq/tests/test_log.py b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/tests/test_log.py
index 9206f095..9206f095 100644
--- a/scripts/external_libs/platform/cel59/32bit/zmq/tests/test_log.py
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/tests/test_log.py
diff --git a/scripts/external_libs/platform/cel59/32bit/zmq/tests/test_message.py b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/tests/test_message.py
index d8770bdf..d8770bdf 100644
--- a/scripts/external_libs/platform/cel59/32bit/zmq/tests/test_message.py
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/tests/test_message.py
diff --git a/scripts/external_libs/platform/cel59/32bit/zmq/tests/test_monitor.py b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/tests/test_monitor.py
index 4f035388..4f035388 100644
--- a/scripts/external_libs/platform/cel59/32bit/zmq/tests/test_monitor.py
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/tests/test_monitor.py
diff --git a/scripts/external_libs/platform/cel59/32bit/zmq/tests/test_monqueue.py b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/tests/test_monqueue.py
index e855602e..e855602e 100644
--- a/scripts/external_libs/platform/cel59/32bit/zmq/tests/test_monqueue.py
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/tests/test_monqueue.py
diff --git a/scripts/external_libs/platform/cel59/32bit/zmq/tests/test_multipart.py b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/tests/test_multipart.py
index 24d41be0..24d41be0 100644
--- a/scripts/external_libs/platform/cel59/32bit/zmq/tests/test_multipart.py
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/tests/test_multipart.py
diff --git a/scripts/external_libs/platform/cel59/32bit/zmq/tests/test_pair.py b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/tests/test_pair.py
index e88c1e8b..e88c1e8b 100644
--- a/scripts/external_libs/platform/cel59/32bit/zmq/tests/test_pair.py
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/tests/test_pair.py
diff --git a/scripts/external_libs/platform/cel59/32bit/zmq/tests/test_poll.py b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/tests/test_poll.py
index 57346c89..57346c89 100644
--- a/scripts/external_libs/platform/cel59/32bit/zmq/tests/test_poll.py
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/tests/test_poll.py
diff --git a/scripts/external_libs/platform/cel59/32bit/zmq/tests/test_pubsub.py b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/tests/test_pubsub.py
index a3ee22aa..a3ee22aa 100644
--- a/scripts/external_libs/platform/cel59/32bit/zmq/tests/test_pubsub.py
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/tests/test_pubsub.py
diff --git a/scripts/external_libs/platform/cel59/32bit/zmq/tests/test_reqrep.py b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/tests/test_reqrep.py
index de17f2b3..de17f2b3 100644
--- a/scripts/external_libs/platform/cel59/32bit/zmq/tests/test_reqrep.py
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/tests/test_reqrep.py
diff --git a/scripts/external_libs/platform/cel59/32bit/zmq/tests/test_security.py b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/tests/test_security.py
index 687b7e0f..687b7e0f 100644
--- a/scripts/external_libs/platform/cel59/32bit/zmq/tests/test_security.py
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/tests/test_security.py
diff --git a/scripts/external_libs/platform/cel59/32bit/zmq/tests/test_socket.py b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/tests/test_socket.py
index 5c842edc..5c842edc 100644
--- a/scripts/external_libs/platform/cel59/32bit/zmq/tests/test_socket.py
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/tests/test_socket.py
diff --git a/scripts/external_libs/platform/cel59/32bit/zmq/tests/test_stopwatch.py b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/tests/test_stopwatch.py
index 49fb79f2..49fb79f2 100644
--- a/scripts/external_libs/platform/cel59/32bit/zmq/tests/test_stopwatch.py
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/tests/test_stopwatch.py
diff --git a/scripts/external_libs/platform/cel59/32bit/zmq/tests/test_version.py b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/tests/test_version.py
index 6ebebf30..6ebebf30 100644
--- a/scripts/external_libs/platform/cel59/32bit/zmq/tests/test_version.py
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/tests/test_version.py
diff --git a/scripts/external_libs/platform/cel59/32bit/zmq/tests/test_win32_shim.py b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/tests/test_win32_shim.py
index 55657bda..55657bda 100644
--- a/scripts/external_libs/platform/cel59/32bit/zmq/tests/test_win32_shim.py
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/tests/test_win32_shim.py
diff --git a/scripts/external_libs/platform/cel59/32bit/zmq/tests/test_z85.py b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/tests/test_z85.py
index 8a73cb4d..8a73cb4d 100644
--- a/scripts/external_libs/platform/cel59/32bit/zmq/tests/test_z85.py
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/tests/test_z85.py
diff --git a/scripts/external_libs/platform/cel59/32bit/zmq/tests/test_zmqstream.py b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/tests/test_zmqstream.py
index cdb3a171..cdb3a171 100644
--- a/scripts/external_libs/platform/cel59/32bit/zmq/tests/test_zmqstream.py
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/tests/test_zmqstream.py
diff --git a/scripts/external_libs/platform/cel59/32bit/zmq/utils/__init__.py b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/utils/__init__.py
index e69de29b..e69de29b 100644
--- a/scripts/external_libs/platform/cel59/32bit/zmq/utils/__init__.py
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/utils/__init__.py
diff --git a/scripts/external_libs/platform/cel59/32bit/zmq/utils/buffers.pxd b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/utils/buffers.pxd
index 998aa551..998aa551 100644
--- a/scripts/external_libs/platform/cel59/32bit/zmq/utils/buffers.pxd
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/utils/buffers.pxd
diff --git a/scripts/external_libs/platform/cel59/32bit/zmq/utils/compiler.json b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/utils/compiler.json
index 773affda..773affda 100644
--- a/scripts/external_libs/platform/cel59/32bit/zmq/utils/compiler.json
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/utils/compiler.json
diff --git a/scripts/external_libs/platform/cel59/32bit/zmq/utils/config.json b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/utils/config.json
index 672d80f0..672d80f0 100644
--- a/scripts/external_libs/platform/cel59/32bit/zmq/utils/config.json
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/utils/config.json
diff --git a/scripts/external_libs/platform/cel59/32bit/zmq/utils/constant_names.py b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/utils/constant_names.py
index 47da9dc2..47da9dc2 100644
--- a/scripts/external_libs/platform/cel59/32bit/zmq/utils/constant_names.py
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/utils/constant_names.py
diff --git a/scripts/external_libs/platform/cel59/32bit/zmq/utils/garbage.py b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/utils/garbage.py
index 80a8725a..80a8725a 100644
--- a/scripts/external_libs/platform/cel59/32bit/zmq/utils/garbage.py
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/utils/garbage.py
diff --git a/scripts/external_libs/platform/cel59/32bit/zmq/utils/getpid_compat.h b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/utils/getpid_compat.h
index 47ce90fa..47ce90fa 100644
--- a/scripts/external_libs/platform/cel59/32bit/zmq/utils/getpid_compat.h
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/utils/getpid_compat.h
diff --git a/scripts/external_libs/platform/cel59/32bit/zmq/utils/interop.py b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/utils/interop.py
index 26c01969..26c01969 100644
--- a/scripts/external_libs/platform/cel59/32bit/zmq/utils/interop.py
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/utils/interop.py
diff --git a/scripts/external_libs/platform/cel59/32bit/zmq/utils/ipcmaxlen.h b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/utils/ipcmaxlen.h
index 7218db78..7218db78 100644
--- a/scripts/external_libs/platform/cel59/32bit/zmq/utils/ipcmaxlen.h
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/utils/ipcmaxlen.h
diff --git a/scripts/external_libs/platform/cel59/32bit/zmq/utils/jsonapi.py b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/utils/jsonapi.py
index 865ca6d5..865ca6d5 100644
--- a/scripts/external_libs/platform/cel59/32bit/zmq/utils/jsonapi.py
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/utils/jsonapi.py
diff --git a/scripts/external_libs/platform/cel59/32bit/zmq/utils/monitor.py b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/utils/monitor.py
index 734d54b1..734d54b1 100644
--- a/scripts/external_libs/platform/cel59/32bit/zmq/utils/monitor.py
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/utils/monitor.py
diff --git a/scripts/external_libs/platform/cel59/32bit/zmq/utils/pyversion_compat.h b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/utils/pyversion_compat.h
index fac09046..fac09046 100644
--- a/scripts/external_libs/platform/cel59/32bit/zmq/utils/pyversion_compat.h
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/utils/pyversion_compat.h
diff --git a/scripts/external_libs/platform/cel59/32bit/zmq/utils/sixcerpt.py b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/utils/sixcerpt.py
index 5492fd59..5492fd59 100644
--- a/scripts/external_libs/platform/cel59/32bit/zmq/utils/sixcerpt.py
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/utils/sixcerpt.py
diff --git a/scripts/external_libs/platform/cel59/32bit/zmq/utils/strtypes.py b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/utils/strtypes.py
index 548410dc..548410dc 100644
--- a/scripts/external_libs/platform/cel59/32bit/zmq/utils/strtypes.py
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/utils/strtypes.py
diff --git a/scripts/external_libs/platform/cel59/32bit/zmq/utils/win32.py b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/utils/win32.py
index ea758299..ea758299 100644
--- a/scripts/external_libs/platform/cel59/32bit/zmq/utils/win32.py
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/utils/win32.py
diff --git a/scripts/external_libs/platform/cel59/32bit/zmq/utils/z85.py b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/utils/z85.py
index 1bb1784e..1bb1784e 100644
--- a/scripts/external_libs/platform/cel59/32bit/zmq/utils/z85.py
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/utils/z85.py
diff --git a/scripts/external_libs/platform/cel59/32bit/zmq/utils/zmq_compat.h b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/utils/zmq_compat.h
index 81c57b69..81c57b69 100644
--- a/scripts/external_libs/platform/cel59/32bit/zmq/utils/zmq_compat.h
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/utils/zmq_compat.h
diff --git a/scripts/external_libs/platform/cel59/32bit/zmq/utils/zmq_constants.h b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/utils/zmq_constants.h
index 97683022..97683022 100644
--- a/scripts/external_libs/platform/cel59/32bit/zmq/utils/zmq_constants.h
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/utils/zmq_constants.h
diff --git a/scripts/external_libs/platform/cel59/zmq/__init__.py b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/__init__.py
index 3408b3ba..3408b3ba 100644
--- a/scripts/external_libs/platform/cel59/zmq/__init__.py
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/__init__.py
diff --git a/scripts/external_libs/platform/cel59/zmq/auth/__init__.py b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/auth/__init__.py
index 11d3ad6b..11d3ad6b 100644
--- a/scripts/external_libs/platform/cel59/zmq/auth/__init__.py
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/auth/__init__.py
diff --git a/scripts/external_libs/platform/cel59/zmq/auth/base.py b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/auth/base.py
index 9b4aaed7..9b4aaed7 100644
--- a/scripts/external_libs/platform/cel59/zmq/auth/base.py
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/auth/base.py
diff --git a/scripts/external_libs/platform/cel59/zmq/auth/certs.py b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/auth/certs.py
index 4d26ad7b..4d26ad7b 100644
--- a/scripts/external_libs/platform/cel59/zmq/auth/certs.py
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/auth/certs.py
diff --git a/scripts/external_libs/platform/cel59/zmq/auth/ioloop.py b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/auth/ioloop.py
index 1f448b47..1f448b47 100644
--- a/scripts/external_libs/platform/cel59/zmq/auth/ioloop.py
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/auth/ioloop.py
diff --git a/scripts/external_libs/platform/cel59/zmq/auth/thread.py b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/auth/thread.py
index 8c3355a9..8c3355a9 100644
--- a/scripts/external_libs/platform/cel59/zmq/auth/thread.py
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/auth/thread.py
diff --git a/scripts/external_libs/platform/cel59/zmq/backend/__init__.py b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/backend/__init__.py
index 7cac725c..7cac725c 100644
--- a/scripts/external_libs/platform/cel59/zmq/backend/__init__.py
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/backend/__init__.py
diff --git a/scripts/external_libs/platform/cel59/zmq/backend/cffi/__init__.py b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/backend/cffi/__init__.py
index ca3164d3..ca3164d3 100644
--- a/scripts/external_libs/platform/cel59/zmq/backend/cffi/__init__.py
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/backend/cffi/__init__.py
diff --git a/scripts/external_libs/platform/cel59/zmq/backend/cffi/_cdefs.h b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/backend/cffi/_cdefs.h
index d3300575..d3300575 100644
--- a/scripts/external_libs/platform/cel59/zmq/backend/cffi/_cdefs.h
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/backend/cffi/_cdefs.h
diff --git a/scripts/external_libs/platform/cel59/zmq/backend/cffi/_cffi.py b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/backend/cffi/_cffi.py
index c73ebf83..c73ebf83 100644
--- a/scripts/external_libs/platform/cel59/zmq/backend/cffi/_cffi.py
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/backend/cffi/_cffi.py
diff --git a/scripts/external_libs/platform/cel59/zmq/backend/cffi/_poll.py b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/backend/cffi/_poll.py
index 9bca34ca..9bca34ca 100644
--- a/scripts/external_libs/platform/cel59/zmq/backend/cffi/_poll.py
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/backend/cffi/_poll.py
diff --git a/scripts/external_libs/platform/cel59/zmq/backend/cffi/_verify.c b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/backend/cffi/_verify.c
index 547840eb..547840eb 100644
--- a/scripts/external_libs/platform/cel59/zmq/backend/cffi/_verify.c
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/backend/cffi/_verify.c
diff --git a/scripts/external_libs/platform/cel59/zmq/backend/cffi/constants.py b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/backend/cffi/constants.py
index ee293e74..ee293e74 100644
--- a/scripts/external_libs/platform/cel59/zmq/backend/cffi/constants.py
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/backend/cffi/constants.py
diff --git a/scripts/external_libs/platform/cel59/zmq/backend/cffi/context.py b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/backend/cffi/context.py
index 16a7b257..16a7b257 100644
--- a/scripts/external_libs/platform/cel59/zmq/backend/cffi/context.py
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/backend/cffi/context.py
diff --git a/scripts/external_libs/platform/cel59/zmq/backend/cffi/devices.py b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/backend/cffi/devices.py
index c7a514a8..c7a514a8 100644
--- a/scripts/external_libs/platform/cel59/zmq/backend/cffi/devices.py
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/backend/cffi/devices.py
diff --git a/scripts/external_libs/platform/cel59/zmq/backend/cffi/error.py b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/backend/cffi/error.py
index 3bb64de0..3bb64de0 100644
--- a/scripts/external_libs/platform/cel59/zmq/backend/cffi/error.py
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/backend/cffi/error.py
diff --git a/scripts/external_libs/platform/cel59/zmq/backend/cffi/message.py b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/backend/cffi/message.py
index c35decb6..c35decb6 100644
--- a/scripts/external_libs/platform/cel59/zmq/backend/cffi/message.py
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/backend/cffi/message.py
diff --git a/scripts/external_libs/platform/cel59/zmq/backend/cffi/socket.py b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/backend/cffi/socket.py
index 3c427739..3c427739 100644
--- a/scripts/external_libs/platform/cel59/zmq/backend/cffi/socket.py
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/backend/cffi/socket.py
diff --git a/scripts/external_libs/platform/cel59/zmq/backend/cffi/utils.py b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/backend/cffi/utils.py
index fde7827b..fde7827b 100644
--- a/scripts/external_libs/platform/cel59/zmq/backend/cffi/utils.py
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/backend/cffi/utils.py
diff --git a/scripts/external_libs/platform/cel59/zmq/backend/cython/__init__.py b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/backend/cython/__init__.py
index e5358185..e5358185 100644
--- a/scripts/external_libs/platform/cel59/zmq/backend/cython/__init__.py
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/backend/cython/__init__.py
diff --git a/scripts/external_libs/platform/cel59/zmq/backend/cython/_device.c b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/backend/cython/_device.c
index 9c8777ea..9c8777ea 100644
--- a/scripts/external_libs/platform/cel59/zmq/backend/cython/_device.c
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/backend/cython/_device.c
diff --git a/scripts/external_libs/platform/cel59/zmq/backend/cython/_device.pyx b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/backend/cython/_device.pyx
index eea0a006..eea0a006 100644
--- a/scripts/external_libs/platform/cel59/zmq/backend/cython/_device.pyx
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/backend/cython/_device.pyx
diff --git a/scripts/external_libs/platform/cel59/zmq/backend/cython/_device.so b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/backend/cython/_device.so
index 9f453219..9f453219 100644
--- a/scripts/external_libs/platform/cel59/zmq/backend/cython/_device.so
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/backend/cython/_device.so
Binary files differ
diff --git a/scripts/external_libs/platform/cel59/zmq/backend/cython/_poll.c b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/backend/cython/_poll.c
index b54c94eb..b54c94eb 100644
--- a/scripts/external_libs/platform/cel59/zmq/backend/cython/_poll.c
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/backend/cython/_poll.c
diff --git a/scripts/external_libs/platform/cel59/zmq/backend/cython/_poll.pyx b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/backend/cython/_poll.pyx
index 5bed46b6..5bed46b6 100644
--- a/scripts/external_libs/platform/cel59/zmq/backend/cython/_poll.pyx
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/backend/cython/_poll.pyx
diff --git a/scripts/external_libs/platform/cel59/zmq/backend/cython/_poll.so b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/backend/cython/_poll.so
index 61c407cb..61c407cb 100644
--- a/scripts/external_libs/platform/cel59/zmq/backend/cython/_poll.so
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/backend/cython/_poll.so
Binary files differ
diff --git a/scripts/external_libs/platform/cel59/zmq/backend/cython/_version.c b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/backend/cython/_version.c
index 431df002..431df002 100644
--- a/scripts/external_libs/platform/cel59/zmq/backend/cython/_version.c
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/backend/cython/_version.c
diff --git a/scripts/external_libs/platform/cel59/zmq/backend/cython/_version.pyx b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/backend/cython/_version.pyx
index 02cf6fcc..02cf6fcc 100644
--- a/scripts/external_libs/platform/cel59/zmq/backend/cython/_version.pyx
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/backend/cython/_version.pyx
diff --git a/scripts/external_libs/platform/cel59/zmq/backend/cython/_version.so b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/backend/cython/_version.so
index 451aec83..451aec83 100644
--- a/scripts/external_libs/platform/cel59/zmq/backend/cython/_version.so
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/backend/cython/_version.so
Binary files differ
diff --git a/scripts/external_libs/platform/cel59/zmq/backend/cython/checkrc.pxd b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/backend/cython/checkrc.pxd
index 3bf69fc3..3bf69fc3 100644
--- a/scripts/external_libs/platform/cel59/zmq/backend/cython/checkrc.pxd
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/backend/cython/checkrc.pxd
diff --git a/scripts/external_libs/platform/cel59/zmq/backend/cython/constant_enums.pxi b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/backend/cython/constant_enums.pxi
index 3d0efd9f..3d0efd9f 100644
--- a/scripts/external_libs/platform/cel59/zmq/backend/cython/constant_enums.pxi
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/backend/cython/constant_enums.pxi
diff --git a/scripts/external_libs/platform/cel59/zmq/backend/cython/constants.c b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/backend/cython/constants.c
index 24c1fbc5..24c1fbc5 100644
--- a/scripts/external_libs/platform/cel59/zmq/backend/cython/constants.c
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/backend/cython/constants.c
diff --git a/scripts/external_libs/platform/cel59/zmq/backend/cython/constants.pxi b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/backend/cython/constants.pxi
index 606e6cbf..606e6cbf 100644
--- a/scripts/external_libs/platform/cel59/zmq/backend/cython/constants.pxi
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/backend/cython/constants.pxi
diff --git a/scripts/external_libs/platform/cel59/zmq/backend/cython/constants.pyx b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/backend/cython/constants.pyx
index f924f030..f924f030 100644
--- a/scripts/external_libs/platform/cel59/zmq/backend/cython/constants.pyx
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/backend/cython/constants.pyx
diff --git a/scripts/external_libs/platform/cel59/zmq/backend/cython/constants.so b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/backend/cython/constants.so
index bce66fa0..bce66fa0 100644
--- a/scripts/external_libs/platform/cel59/zmq/backend/cython/constants.so
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/backend/cython/constants.so
Binary files differ
diff --git a/scripts/external_libs/platform/cel59/zmq/backend/cython/context.c b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/backend/cython/context.c
index 11eb40c9..11eb40c9 100644
--- a/scripts/external_libs/platform/cel59/zmq/backend/cython/context.c
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/backend/cython/context.c
diff --git a/scripts/external_libs/platform/cel59/zmq/backend/cython/context.pxd b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/backend/cython/context.pxd
index 9c9267a5..9c9267a5 100644
--- a/scripts/external_libs/platform/cel59/zmq/backend/cython/context.pxd
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/backend/cython/context.pxd
diff --git a/scripts/external_libs/platform/cel59/zmq/backend/cython/context.pyx b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/backend/cython/context.pyx
index b527e5d6..b527e5d6 100644
--- a/scripts/external_libs/platform/cel59/zmq/backend/cython/context.pyx
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/backend/cython/context.pyx
diff --git a/scripts/external_libs/platform/cel59/zmq/backend/cython/context.so b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/backend/cython/context.so
index 982c1fa4..982c1fa4 100644
--- a/scripts/external_libs/platform/cel59/zmq/backend/cython/context.so
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/backend/cython/context.so
Binary files differ
diff --git a/scripts/external_libs/platform/cel59/zmq/backend/cython/error.c b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/backend/cython/error.c
index 925f5fd4..925f5fd4 100644
--- a/scripts/external_libs/platform/cel59/zmq/backend/cython/error.c
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/backend/cython/error.c
diff --git a/scripts/external_libs/platform/cel59/zmq/backend/cython/error.pyx b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/backend/cython/error.pyx
index 85e785f6..85e785f6 100644
--- a/scripts/external_libs/platform/cel59/zmq/backend/cython/error.pyx
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/backend/cython/error.pyx
diff --git a/scripts/external_libs/platform/cel59/zmq/backend/cython/error.so b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/backend/cython/error.so
index 6aa7f8e9..6aa7f8e9 100644
--- a/scripts/external_libs/platform/cel59/zmq/backend/cython/error.so
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/backend/cython/error.so
Binary files differ
diff --git a/scripts/external_libs/platform/cel59/zmq/backend/cython/libzmq.pxd b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/backend/cython/libzmq.pxd
index e42f6d6b..e42f6d6b 100644
--- a/scripts/external_libs/platform/cel59/zmq/backend/cython/libzmq.pxd
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/backend/cython/libzmq.pxd
diff --git a/scripts/external_libs/platform/cel59/zmq/backend/cython/message.c b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/backend/cython/message.c
index bedf453f..bedf453f 100644
--- a/scripts/external_libs/platform/cel59/zmq/backend/cython/message.c
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/backend/cython/message.c
diff --git a/scripts/external_libs/platform/cel59/zmq/backend/cython/message.pxd b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/backend/cython/message.pxd
index 4781195f..4781195f 100644
--- a/scripts/external_libs/platform/cel59/zmq/backend/cython/message.pxd
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/backend/cython/message.pxd
diff --git a/scripts/external_libs/platform/cel59/zmq/backend/cython/message.pyx b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/backend/cython/message.pyx
index 312ae120..312ae120 100644
--- a/scripts/external_libs/platform/cel59/zmq/backend/cython/message.pyx
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/backend/cython/message.pyx
diff --git a/scripts/external_libs/platform/cel59/zmq/backend/cython/message.so b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/backend/cython/message.so
index f2966bd6..f2966bd6 100644
--- a/scripts/external_libs/platform/cel59/zmq/backend/cython/message.so
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/backend/cython/message.so
Binary files differ
diff --git a/scripts/external_libs/platform/cel59/zmq/backend/cython/rebuffer.pyx b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/backend/cython/rebuffer.pyx
index 402e3b6e..402e3b6e 100644
--- a/scripts/external_libs/platform/cel59/zmq/backend/cython/rebuffer.pyx
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/backend/cython/rebuffer.pyx
diff --git a/scripts/external_libs/platform/cel59/zmq/backend/cython/socket.c b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/backend/cython/socket.c
index c52e2e31..c52e2e31 100644
--- a/scripts/external_libs/platform/cel59/zmq/backend/cython/socket.c
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/backend/cython/socket.c
diff --git a/scripts/external_libs/platform/cel59/zmq/backend/cython/socket.pxd b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/backend/cython/socket.pxd
index b8a331e2..b8a331e2 100644
--- a/scripts/external_libs/platform/cel59/zmq/backend/cython/socket.pxd
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/backend/cython/socket.pxd
diff --git a/scripts/external_libs/platform/cel59/zmq/backend/cython/socket.pyx b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/backend/cython/socket.pyx
index 9b9ec36e..9b9ec36e 100644
--- a/scripts/external_libs/platform/cel59/zmq/backend/cython/socket.pyx
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/backend/cython/socket.pyx
diff --git a/scripts/external_libs/platform/cel59/zmq/backend/cython/socket.so b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/backend/cython/socket.so
index 48660233..48660233 100644
--- a/scripts/external_libs/platform/cel59/zmq/backend/cython/socket.so
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/backend/cython/socket.so
Binary files differ
diff --git a/scripts/external_libs/platform/cel59/zmq/backend/cython/utils.c b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/backend/cython/utils.c
index f96eb4e6..f96eb4e6 100644
--- a/scripts/external_libs/platform/cel59/zmq/backend/cython/utils.c
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/backend/cython/utils.c
diff --git a/scripts/external_libs/platform/cel59/zmq/backend/cython/utils.pxd b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/backend/cython/utils.pxd
index 1d7117f1..1d7117f1 100644
--- a/scripts/external_libs/platform/cel59/zmq/backend/cython/utils.pxd
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/backend/cython/utils.pxd
diff --git a/scripts/external_libs/platform/cel59/zmq/backend/cython/utils.pyx b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/backend/cython/utils.pyx
index 68976e3b..68976e3b 100644
--- a/scripts/external_libs/platform/cel59/zmq/backend/cython/utils.pyx
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/backend/cython/utils.pyx
diff --git a/scripts/external_libs/platform/cel59/zmq/backend/cython/utils.so b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/backend/cython/utils.so
index e124c74e..e124c74e 100644
--- a/scripts/external_libs/platform/cel59/zmq/backend/cython/utils.so
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/backend/cython/utils.so
Binary files differ
diff --git a/scripts/external_libs/platform/cel59/zmq/backend/select.py b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/backend/select.py
index 0a2e09a2..0a2e09a2 100644
--- a/scripts/external_libs/platform/cel59/zmq/backend/select.py
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/backend/select.py
diff --git a/scripts/external_libs/platform/cel59/zmq/devices/__init__.py b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/devices/__init__.py
index 23715963..23715963 100644
--- a/scripts/external_libs/platform/cel59/zmq/devices/__init__.py
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/devices/__init__.py
diff --git a/scripts/external_libs/platform/cel59/zmq/devices/basedevice.py b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/devices/basedevice.py
index 7ba1b7ac..7ba1b7ac 100644
--- a/scripts/external_libs/platform/cel59/zmq/devices/basedevice.py
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/devices/basedevice.py
diff --git a/scripts/external_libs/platform/cel59/zmq/devices/monitoredqueue.c b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/devices/monitoredqueue.c
index 92c2448f..92c2448f 100644
--- a/scripts/external_libs/platform/cel59/zmq/devices/monitoredqueue.c
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/devices/monitoredqueue.c
diff --git a/scripts/external_libs/platform/cel59/zmq/devices/monitoredqueue.pxd b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/devices/monitoredqueue.pxd
index 1e26ed86..1e26ed86 100644
--- a/scripts/external_libs/platform/cel59/zmq/devices/monitoredqueue.pxd
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/devices/monitoredqueue.pxd
diff --git a/scripts/external_libs/platform/cel59/zmq/devices/monitoredqueue.py b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/devices/monitoredqueue.py
index c6d91429..c6d91429 100644
--- a/scripts/external_libs/platform/cel59/zmq/devices/monitoredqueue.py
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/devices/monitoredqueue.py
diff --git a/scripts/external_libs/platform/cel59/zmq/devices/monitoredqueue.pyx b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/devices/monitoredqueue.pyx
index d5fec64d..d5fec64d 100644
--- a/scripts/external_libs/platform/cel59/zmq/devices/monitoredqueue.pyx
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/devices/monitoredqueue.pyx
diff --git a/scripts/external_libs/platform/cel59/zmq/devices/monitoredqueue.so b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/devices/monitoredqueue.so
index 87681943..87681943 100644
--- a/scripts/external_libs/platform/cel59/zmq/devices/monitoredqueue.so
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/devices/monitoredqueue.so
Binary files differ
diff --git a/scripts/external_libs/platform/cel59/zmq/devices/monitoredqueuedevice.py b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/devices/monitoredqueuedevice.py
index 9723f866..9723f866 100644
--- a/scripts/external_libs/platform/cel59/zmq/devices/monitoredqueuedevice.py
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/devices/monitoredqueuedevice.py
diff --git a/scripts/external_libs/platform/cel59/zmq/devices/proxydevice.py b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/devices/proxydevice.py
index 68be3f15..68be3f15 100644
--- a/scripts/external_libs/platform/cel59/zmq/devices/proxydevice.py
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/devices/proxydevice.py
diff --git a/scripts/external_libs/platform/cel59/zmq/error.py b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/error.py
index 48cdaafa..48cdaafa 100644
--- a/scripts/external_libs/platform/cel59/zmq/error.py
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/error.py
diff --git a/scripts/external_libs/platform/cel59/zmq/eventloop/__init__.py b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/eventloop/__init__.py
index 568e8e8d..568e8e8d 100644
--- a/scripts/external_libs/platform/cel59/zmq/eventloop/__init__.py
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/eventloop/__init__.py
diff --git a/scripts/external_libs/platform/cel59/zmq/eventloop/ioloop.py b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/eventloop/ioloop.py
index 35f4c418..35f4c418 100644
--- a/scripts/external_libs/platform/cel59/zmq/eventloop/ioloop.py
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/eventloop/ioloop.py
diff --git a/scripts/external_libs/platform/cel59/zmq/eventloop/minitornado/__init__.py b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/eventloop/minitornado/__init__.py
index e69de29b..e69de29b 100644
--- a/scripts/external_libs/platform/cel59/zmq/eventloop/minitornado/__init__.py
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/eventloop/minitornado/__init__.py
diff --git a/scripts/external_libs/platform/cel59/zmq/eventloop/minitornado/concurrent.py b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/eventloop/minitornado/concurrent.py
index 519b23d5..519b23d5 100644
--- a/scripts/external_libs/platform/cel59/zmq/eventloop/minitornado/concurrent.py
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/eventloop/minitornado/concurrent.py
diff --git a/scripts/external_libs/platform/cel59/zmq/eventloop/minitornado/ioloop.py b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/eventloop/minitornado/ioloop.py
index 710a3ecb..710a3ecb 100644
--- a/scripts/external_libs/platform/cel59/zmq/eventloop/minitornado/ioloop.py
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/eventloop/minitornado/ioloop.py
diff --git a/scripts/external_libs/platform/cel59/zmq/eventloop/minitornado/log.py b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/eventloop/minitornado/log.py
index 49051e89..49051e89 100644
--- a/scripts/external_libs/platform/cel59/zmq/eventloop/minitornado/log.py
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/eventloop/minitornado/log.py
diff --git a/scripts/external_libs/platform/cel59/zmq/eventloop/minitornado/platform/__init__.py b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/eventloop/minitornado/platform/__init__.py
index e69de29b..e69de29b 100644
--- a/scripts/external_libs/platform/cel59/zmq/eventloop/minitornado/platform/__init__.py
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/eventloop/minitornado/platform/__init__.py
diff --git a/scripts/external_libs/platform/cel59/zmq/eventloop/minitornado/platform/auto.py b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/eventloop/minitornado/platform/auto.py
index b40ccd94..b40ccd94 100644
--- a/scripts/external_libs/platform/cel59/zmq/eventloop/minitornado/platform/auto.py
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/eventloop/minitornado/platform/auto.py
diff --git a/scripts/external_libs/platform/cel59/zmq/eventloop/minitornado/platform/common.py b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/eventloop/minitornado/platform/common.py
index 2d75dc1e..2d75dc1e 100644
--- a/scripts/external_libs/platform/cel59/zmq/eventloop/minitornado/platform/common.py
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/eventloop/minitornado/platform/common.py
diff --git a/scripts/external_libs/platform/cel59/zmq/eventloop/minitornado/platform/interface.py b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/eventloop/minitornado/platform/interface.py
index 07da6bab..07da6bab 100644
--- a/scripts/external_libs/platform/cel59/zmq/eventloop/minitornado/platform/interface.py
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/eventloop/minitornado/platform/interface.py
diff --git a/scripts/external_libs/platform/cel59/zmq/eventloop/minitornado/platform/posix.py b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/eventloop/minitornado/platform/posix.py
index ccffbb66..ccffbb66 100644
--- a/scripts/external_libs/platform/cel59/zmq/eventloop/minitornado/platform/posix.py
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/eventloop/minitornado/platform/posix.py
diff --git a/scripts/external_libs/platform/cel59/zmq/eventloop/minitornado/platform/windows.py b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/eventloop/minitornado/platform/windows.py
index 817bdca1..817bdca1 100644
--- a/scripts/external_libs/platform/cel59/zmq/eventloop/minitornado/platform/windows.py
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/eventloop/minitornado/platform/windows.py
diff --git a/scripts/external_libs/platform/cel59/zmq/eventloop/minitornado/stack_context.py b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/eventloop/minitornado/stack_context.py
index 226d8042..226d8042 100644
--- a/scripts/external_libs/platform/cel59/zmq/eventloop/minitornado/stack_context.py
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/eventloop/minitornado/stack_context.py
diff --git a/scripts/external_libs/platform/cel59/zmq/eventloop/minitornado/util.py b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/eventloop/minitornado/util.py
index c1e2eb95..c1e2eb95 100644
--- a/scripts/external_libs/platform/cel59/zmq/eventloop/minitornado/util.py
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/eventloop/minitornado/util.py
diff --git a/scripts/external_libs/platform/cel59/zmq/eventloop/zmqstream.py b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/eventloop/zmqstream.py
index 86a97e44..86a97e44 100644
--- a/scripts/external_libs/platform/cel59/zmq/eventloop/zmqstream.py
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/eventloop/zmqstream.py
diff --git a/scripts/external_libs/platform/cel59/zmq/green/__init__.py b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/green/__init__.py
index ff7e5965..ff7e5965 100644
--- a/scripts/external_libs/platform/cel59/zmq/green/__init__.py
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/green/__init__.py
diff --git a/scripts/external_libs/platform/cel59/zmq/green/core.py b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/green/core.py
index 9fc73e32..9fc73e32 100644
--- a/scripts/external_libs/platform/cel59/zmq/green/core.py
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/green/core.py
diff --git a/scripts/external_libs/platform/cel59/zmq/green/device.py b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/green/device.py
index 4b070237..4b070237 100644
--- a/scripts/external_libs/platform/cel59/zmq/green/device.py
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/green/device.py
diff --git a/scripts/external_libs/platform/cel59/zmq/green/eventloop/__init__.py b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/green/eventloop/__init__.py
index c5150efe..c5150efe 100644
--- a/scripts/external_libs/platform/cel59/zmq/green/eventloop/__init__.py
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/green/eventloop/__init__.py
diff --git a/scripts/external_libs/platform/cel59/zmq/green/eventloop/ioloop.py b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/green/eventloop/ioloop.py
index e12fd5e9..e12fd5e9 100644
--- a/scripts/external_libs/platform/cel59/zmq/green/eventloop/ioloop.py
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/green/eventloop/ioloop.py
diff --git a/scripts/external_libs/platform/cel59/zmq/green/eventloop/zmqstream.py b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/green/eventloop/zmqstream.py
index 90fbd1f5..90fbd1f5 100644
--- a/scripts/external_libs/platform/cel59/zmq/green/eventloop/zmqstream.py
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/green/eventloop/zmqstream.py
diff --git a/scripts/external_libs/platform/cel59/zmq/green/poll.py b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/green/poll.py
index 8f016129..8f016129 100644
--- a/scripts/external_libs/platform/cel59/zmq/green/poll.py
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/green/poll.py
diff --git a/scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/libzmq.so.3 b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/libzmq.so.3
new file mode 100644
index 00000000..b5e3bab2
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/libzmq.so.3
Binary files differ
diff --git a/scripts/external_libs/platform/cel59/zmq/log/__init__.py b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/log/__init__.py
index e69de29b..e69de29b 100644
--- a/scripts/external_libs/platform/cel59/zmq/log/__init__.py
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/log/__init__.py
diff --git a/scripts/external_libs/platform/cel59/zmq/log/handlers.py b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/log/handlers.py
index 5ff21bf3..5ff21bf3 100644
--- a/scripts/external_libs/platform/cel59/zmq/log/handlers.py
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/log/handlers.py
diff --git a/scripts/external_libs/platform/cel59/zmq/ssh/__init__.py b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/ssh/__init__.py
index 57f09568..57f09568 100644
--- a/scripts/external_libs/platform/cel59/zmq/ssh/__init__.py
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/ssh/__init__.py
diff --git a/scripts/external_libs/platform/cel59/zmq/ssh/forward.py b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/ssh/forward.py
index 2d619462..2d619462 100644
--- a/scripts/external_libs/platform/cel59/zmq/ssh/forward.py
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/ssh/forward.py
diff --git a/scripts/external_libs/platform/cel59/zmq/ssh/tunnel.py b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/ssh/tunnel.py
index 5a0c5433..5a0c5433 100644
--- a/scripts/external_libs/platform/cel59/zmq/ssh/tunnel.py
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/ssh/tunnel.py
diff --git a/scripts/external_libs/platform/cel59/zmq/sugar/__init__.py b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/sugar/__init__.py
index d0510a44..d0510a44 100644
--- a/scripts/external_libs/platform/cel59/zmq/sugar/__init__.py
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/sugar/__init__.py
diff --git a/scripts/external_libs/platform/cel59/zmq/sugar/attrsettr.py b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/sugar/attrsettr.py
index 4bbd36d6..4bbd36d6 100644
--- a/scripts/external_libs/platform/cel59/zmq/sugar/attrsettr.py
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/sugar/attrsettr.py
diff --git a/scripts/external_libs/platform/cel59/zmq/sugar/constants.py b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/sugar/constants.py
index 88281176..88281176 100644
--- a/scripts/external_libs/platform/cel59/zmq/sugar/constants.py
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/sugar/constants.py
diff --git a/scripts/external_libs/platform/cel59/zmq/sugar/context.py b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/sugar/context.py
index 86a9c5dc..86a9c5dc 100644
--- a/scripts/external_libs/platform/cel59/zmq/sugar/context.py
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/sugar/context.py
diff --git a/scripts/external_libs/platform/cel59/zmq/sugar/frame.py b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/sugar/frame.py
index 9f556c86..9f556c86 100644
--- a/scripts/external_libs/platform/cel59/zmq/sugar/frame.py
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/sugar/frame.py
diff --git a/scripts/external_libs/platform/cel59/zmq/sugar/poll.py b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/sugar/poll.py
index c7b1d1bb..c7b1d1bb 100644
--- a/scripts/external_libs/platform/cel59/zmq/sugar/poll.py
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/sugar/poll.py
diff --git a/scripts/external_libs/platform/cel59/zmq/sugar/socket.py b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/sugar/socket.py
index c91589d7..c91589d7 100644
--- a/scripts/external_libs/platform/cel59/zmq/sugar/socket.py
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/sugar/socket.py
diff --git a/scripts/external_libs/platform/cel59/zmq/sugar/tracker.py b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/sugar/tracker.py
index fb8c007f..fb8c007f 100644
--- a/scripts/external_libs/platform/cel59/zmq/sugar/tracker.py
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/sugar/tracker.py
diff --git a/scripts/external_libs/platform/cel59/zmq/sugar/version.py b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/sugar/version.py
index ea8fbbc4..ea8fbbc4 100644
--- a/scripts/external_libs/platform/cel59/zmq/sugar/version.py
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/sugar/version.py
diff --git a/scripts/external_libs/platform/cel59/zmq/tests/__init__.py b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/tests/__init__.py
index 325a3f19..325a3f19 100644
--- a/scripts/external_libs/platform/cel59/zmq/tests/__init__.py
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/tests/__init__.py
diff --git a/scripts/external_libs/platform/cel59/zmq/tests/test_auth.py b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/tests/test_auth.py
index d350f61f..d350f61f 100644
--- a/scripts/external_libs/platform/cel59/zmq/tests/test_auth.py
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/tests/test_auth.py
diff --git a/scripts/external_libs/platform/cel59/zmq/tests/test_cffi_backend.py b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/tests/test_cffi_backend.py
index 1f85eebf..1f85eebf 100644
--- a/scripts/external_libs/platform/cel59/zmq/tests/test_cffi_backend.py
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/tests/test_cffi_backend.py
diff --git a/scripts/external_libs/platform/cel59/zmq/tests/test_constants.py b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/tests/test_constants.py
index d32b2b48..d32b2b48 100644
--- a/scripts/external_libs/platform/cel59/zmq/tests/test_constants.py
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/tests/test_constants.py
diff --git a/scripts/external_libs/platform/cel59/zmq/tests/test_context.py b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/tests/test_context.py
index e3280778..e3280778 100644
--- a/scripts/external_libs/platform/cel59/zmq/tests/test_context.py
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/tests/test_context.py
diff --git a/scripts/external_libs/platform/cel59/zmq/tests/test_device.py b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/tests/test_device.py
index f8305074..f8305074 100644
--- a/scripts/external_libs/platform/cel59/zmq/tests/test_device.py
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/tests/test_device.py
diff --git a/scripts/external_libs/platform/cel59/zmq/tests/test_error.py b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/tests/test_error.py
index a2eee14a..a2eee14a 100644
--- a/scripts/external_libs/platform/cel59/zmq/tests/test_error.py
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/tests/test_error.py
diff --git a/scripts/external_libs/platform/cel59/zmq/tests/test_etc.py b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/tests/test_etc.py
index ad224064..ad224064 100644
--- a/scripts/external_libs/platform/cel59/zmq/tests/test_etc.py
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/tests/test_etc.py
diff --git a/scripts/external_libs/platform/cel59/zmq/tests/test_imports.py b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/tests/test_imports.py
index c0ddfaac..c0ddfaac 100644
--- a/scripts/external_libs/platform/cel59/zmq/tests/test_imports.py
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/tests/test_imports.py
diff --git a/scripts/external_libs/platform/cel59/zmq/tests/test_ioloop.py b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/tests/test_ioloop.py
index 2a8b1153..2a8b1153 100644
--- a/scripts/external_libs/platform/cel59/zmq/tests/test_ioloop.py
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/tests/test_ioloop.py
diff --git a/scripts/external_libs/platform/cel59/zmq/tests/test_log.py b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/tests/test_log.py
index 9206f095..9206f095 100644
--- a/scripts/external_libs/platform/cel59/zmq/tests/test_log.py
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/tests/test_log.py
diff --git a/scripts/external_libs/platform/cel59/zmq/tests/test_message.py b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/tests/test_message.py
index d8770bdf..d8770bdf 100644
--- a/scripts/external_libs/platform/cel59/zmq/tests/test_message.py
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/tests/test_message.py
diff --git a/scripts/external_libs/platform/cel59/zmq/tests/test_monitor.py b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/tests/test_monitor.py
index 4f035388..4f035388 100644
--- a/scripts/external_libs/platform/cel59/zmq/tests/test_monitor.py
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/tests/test_monitor.py
diff --git a/scripts/external_libs/platform/cel59/zmq/tests/test_monqueue.py b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/tests/test_monqueue.py
index e855602e..e855602e 100644
--- a/scripts/external_libs/platform/cel59/zmq/tests/test_monqueue.py
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/tests/test_monqueue.py
diff --git a/scripts/external_libs/platform/cel59/zmq/tests/test_multipart.py b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/tests/test_multipart.py
index 24d41be0..24d41be0 100644
--- a/scripts/external_libs/platform/cel59/zmq/tests/test_multipart.py
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/tests/test_multipart.py
diff --git a/scripts/external_libs/platform/cel59/zmq/tests/test_pair.py b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/tests/test_pair.py
index e88c1e8b..e88c1e8b 100644
--- a/scripts/external_libs/platform/cel59/zmq/tests/test_pair.py
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/tests/test_pair.py
diff --git a/scripts/external_libs/platform/cel59/zmq/tests/test_poll.py b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/tests/test_poll.py
index 57346c89..57346c89 100644
--- a/scripts/external_libs/platform/cel59/zmq/tests/test_poll.py
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/tests/test_poll.py
diff --git a/scripts/external_libs/platform/cel59/zmq/tests/test_pubsub.py b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/tests/test_pubsub.py
index a3ee22aa..a3ee22aa 100644
--- a/scripts/external_libs/platform/cel59/zmq/tests/test_pubsub.py
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/tests/test_pubsub.py
diff --git a/scripts/external_libs/platform/cel59/zmq/tests/test_reqrep.py b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/tests/test_reqrep.py
index de17f2b3..de17f2b3 100644
--- a/scripts/external_libs/platform/cel59/zmq/tests/test_reqrep.py
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/tests/test_reqrep.py
diff --git a/scripts/external_libs/platform/cel59/zmq/tests/test_security.py b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/tests/test_security.py
index 687b7e0f..687b7e0f 100644
--- a/scripts/external_libs/platform/cel59/zmq/tests/test_security.py
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/tests/test_security.py
diff --git a/scripts/external_libs/platform/cel59/zmq/tests/test_socket.py b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/tests/test_socket.py
index 5c842edc..5c842edc 100644
--- a/scripts/external_libs/platform/cel59/zmq/tests/test_socket.py
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/tests/test_socket.py
diff --git a/scripts/external_libs/platform/cel59/zmq/tests/test_stopwatch.py b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/tests/test_stopwatch.py
index 49fb79f2..49fb79f2 100644
--- a/scripts/external_libs/platform/cel59/zmq/tests/test_stopwatch.py
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/tests/test_stopwatch.py
diff --git a/scripts/external_libs/platform/cel59/zmq/tests/test_version.py b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/tests/test_version.py
index 6ebebf30..6ebebf30 100644
--- a/scripts/external_libs/platform/cel59/zmq/tests/test_version.py
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/tests/test_version.py
diff --git a/scripts/external_libs/platform/cel59/zmq/tests/test_win32_shim.py b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/tests/test_win32_shim.py
index 55657bda..55657bda 100644
--- a/scripts/external_libs/platform/cel59/zmq/tests/test_win32_shim.py
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/tests/test_win32_shim.py
diff --git a/scripts/external_libs/platform/cel59/zmq/tests/test_z85.py b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/tests/test_z85.py
index 8a73cb4d..8a73cb4d 100644
--- a/scripts/external_libs/platform/cel59/zmq/tests/test_z85.py
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/tests/test_z85.py
diff --git a/scripts/external_libs/platform/cel59/zmq/tests/test_zmqstream.py b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/tests/test_zmqstream.py
index cdb3a171..cdb3a171 100644
--- a/scripts/external_libs/platform/cel59/zmq/tests/test_zmqstream.py
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/tests/test_zmqstream.py
diff --git a/scripts/external_libs/platform/cel59/zmq/utils/__init__.py b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/utils/__init__.py
index e69de29b..e69de29b 100644
--- a/scripts/external_libs/platform/cel59/zmq/utils/__init__.py
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/utils/__init__.py
diff --git a/scripts/external_libs/platform/cel59/zmq/utils/buffers.pxd b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/utils/buffers.pxd
index 998aa551..998aa551 100644
--- a/scripts/external_libs/platform/cel59/zmq/utils/buffers.pxd
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/utils/buffers.pxd
diff --git a/scripts/external_libs/platform/cel59/zmq/utils/compiler.json b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/utils/compiler.json
index 2394c4c4..2394c4c4 100644
--- a/scripts/external_libs/platform/cel59/zmq/utils/compiler.json
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/utils/compiler.json
diff --git a/scripts/external_libs/platform/cel59/zmq/utils/config.json b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/utils/config.json
index e5b09891..e5b09891 100644
--- a/scripts/external_libs/platform/cel59/zmq/utils/config.json
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/utils/config.json
diff --git a/scripts/external_libs/platform/cel59/zmq/utils/constant_names.py b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/utils/constant_names.py
index 47da9dc2..47da9dc2 100644
--- a/scripts/external_libs/platform/cel59/zmq/utils/constant_names.py
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/utils/constant_names.py
diff --git a/scripts/external_libs/platform/cel59/zmq/utils/garbage.py b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/utils/garbage.py
index 80a8725a..80a8725a 100644
--- a/scripts/external_libs/platform/cel59/zmq/utils/garbage.py
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/utils/garbage.py
diff --git a/scripts/external_libs/platform/cel59/zmq/utils/getpid_compat.h b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/utils/getpid_compat.h
index 47ce90fa..47ce90fa 100644
--- a/scripts/external_libs/platform/cel59/zmq/utils/getpid_compat.h
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/utils/getpid_compat.h
diff --git a/scripts/external_libs/platform/cel59/zmq/utils/interop.py b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/utils/interop.py
index 26c01969..26c01969 100644
--- a/scripts/external_libs/platform/cel59/zmq/utils/interop.py
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/utils/interop.py
diff --git a/scripts/external_libs/platform/cel59/zmq/utils/ipcmaxlen.h b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/utils/ipcmaxlen.h
index 7218db78..7218db78 100644
--- a/scripts/external_libs/platform/cel59/zmq/utils/ipcmaxlen.h
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/utils/ipcmaxlen.h
diff --git a/scripts/external_libs/platform/cel59/zmq/utils/jsonapi.py b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/utils/jsonapi.py
index 865ca6d5..865ca6d5 100644
--- a/scripts/external_libs/platform/cel59/zmq/utils/jsonapi.py
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/utils/jsonapi.py
diff --git a/scripts/external_libs/platform/cel59/zmq/utils/monitor.py b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/utils/monitor.py
index 734d54b1..734d54b1 100644
--- a/scripts/external_libs/platform/cel59/zmq/utils/monitor.py
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/utils/monitor.py
diff --git a/scripts/external_libs/platform/cel59/zmq/utils/pyversion_compat.h b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/utils/pyversion_compat.h
index fac09046..fac09046 100644
--- a/scripts/external_libs/platform/cel59/zmq/utils/pyversion_compat.h
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/utils/pyversion_compat.h
diff --git a/scripts/external_libs/platform/cel59/zmq/utils/sixcerpt.py b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/utils/sixcerpt.py
index 5492fd59..5492fd59 100644
--- a/scripts/external_libs/platform/cel59/zmq/utils/sixcerpt.py
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/utils/sixcerpt.py
diff --git a/scripts/external_libs/platform/cel59/zmq/utils/strtypes.py b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/utils/strtypes.py
index 548410dc..548410dc 100644
--- a/scripts/external_libs/platform/cel59/zmq/utils/strtypes.py
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/utils/strtypes.py
diff --git a/scripts/external_libs/platform/cel59/zmq/utils/win32.py b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/utils/win32.py
index ea758299..ea758299 100644
--- a/scripts/external_libs/platform/cel59/zmq/utils/win32.py
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/utils/win32.py
diff --git a/scripts/external_libs/platform/cel59/zmq/utils/z85.py b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/utils/z85.py
index 1bb1784e..1bb1784e 100644
--- a/scripts/external_libs/platform/cel59/zmq/utils/z85.py
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/utils/z85.py
diff --git a/scripts/external_libs/platform/cel59/zmq/utils/zmq_compat.h b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/utils/zmq_compat.h
index 81c57b69..81c57b69 100644
--- a/scripts/external_libs/platform/cel59/zmq/utils/zmq_compat.h
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/utils/zmq_compat.h
diff --git a/scripts/external_libs/platform/cel59/zmq/utils/zmq_constants.h b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/utils/zmq_constants.h
index 97683022..97683022 100644
--- a/scripts/external_libs/platform/cel59/zmq/utils/zmq_constants.h
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/utils/zmq_constants.h
diff --git a/scripts/external_libs/platform/fedora18/zmq/__init__.py b/scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/__init__.py
index 3408b3ba..3408b3ba 100644
--- a/scripts/external_libs/platform/fedora18/zmq/__init__.py
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/__init__.py
diff --git a/scripts/external_libs/platform/fedora18/zmq/auth/__init__.py b/scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/auth/__init__.py
index 11d3ad6b..11d3ad6b 100644
--- a/scripts/external_libs/platform/fedora18/zmq/auth/__init__.py
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/auth/__init__.py
diff --git a/scripts/external_libs/platform/fedora18/zmq/auth/base.py b/scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/auth/base.py
index 9b4aaed7..9b4aaed7 100644
--- a/scripts/external_libs/platform/fedora18/zmq/auth/base.py
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/auth/base.py
diff --git a/scripts/external_libs/platform/fedora18/zmq/auth/certs.py b/scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/auth/certs.py
index 4d26ad7b..4d26ad7b 100644
--- a/scripts/external_libs/platform/fedora18/zmq/auth/certs.py
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/auth/certs.py
diff --git a/scripts/external_libs/platform/fedora18/zmq/auth/ioloop.py b/scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/auth/ioloop.py
index 1f448b47..1f448b47 100644
--- a/scripts/external_libs/platform/fedora18/zmq/auth/ioloop.py
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/auth/ioloop.py
diff --git a/scripts/external_libs/platform/fedora18/zmq/auth/thread.py b/scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/auth/thread.py
index 8c3355a9..8c3355a9 100644
--- a/scripts/external_libs/platform/fedora18/zmq/auth/thread.py
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/auth/thread.py
diff --git a/scripts/external_libs/platform/fedora18/zmq/backend/__init__.py b/scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/backend/__init__.py
index 7cac725c..7cac725c 100644
--- a/scripts/external_libs/platform/fedora18/zmq/backend/__init__.py
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/backend/__init__.py
diff --git a/scripts/external_libs/platform/fedora18/zmq/backend/cffi/__init__.py b/scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/backend/cffi/__init__.py
index ca3164d3..ca3164d3 100644
--- a/scripts/external_libs/platform/fedora18/zmq/backend/cffi/__init__.py
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/backend/cffi/__init__.py
diff --git a/scripts/external_libs/platform/fedora18/zmq/backend/cffi/_cdefs.h b/scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/backend/cffi/_cdefs.h
index d3300575..d3300575 100644
--- a/scripts/external_libs/platform/fedora18/zmq/backend/cffi/_cdefs.h
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/backend/cffi/_cdefs.h
diff --git a/scripts/external_libs/platform/fedora18/zmq/backend/cffi/_cffi.py b/scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/backend/cffi/_cffi.py
index c73ebf83..c73ebf83 100644
--- a/scripts/external_libs/platform/fedora18/zmq/backend/cffi/_cffi.py
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/backend/cffi/_cffi.py
diff --git a/scripts/external_libs/platform/fedora18/zmq/backend/cffi/_poll.py b/scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/backend/cffi/_poll.py
index 9bca34ca..9bca34ca 100644
--- a/scripts/external_libs/platform/fedora18/zmq/backend/cffi/_poll.py
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/backend/cffi/_poll.py
diff --git a/scripts/external_libs/platform/fedora18/zmq/backend/cffi/_verify.c b/scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/backend/cffi/_verify.c
index 547840eb..547840eb 100644
--- a/scripts/external_libs/platform/fedora18/zmq/backend/cffi/_verify.c
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/backend/cffi/_verify.c
diff --git a/scripts/external_libs/platform/fedora18/zmq/backend/cffi/constants.py b/scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/backend/cffi/constants.py
index ee293e74..ee293e74 100644
--- a/scripts/external_libs/platform/fedora18/zmq/backend/cffi/constants.py
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/backend/cffi/constants.py
diff --git a/scripts/external_libs/platform/fedora18/zmq/backend/cffi/context.py b/scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/backend/cffi/context.py
index 16a7b257..16a7b257 100644
--- a/scripts/external_libs/platform/fedora18/zmq/backend/cffi/context.py
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/backend/cffi/context.py
diff --git a/scripts/external_libs/platform/fedora18/zmq/backend/cffi/devices.py b/scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/backend/cffi/devices.py
index c7a514a8..c7a514a8 100644
--- a/scripts/external_libs/platform/fedora18/zmq/backend/cffi/devices.py
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/backend/cffi/devices.py
diff --git a/scripts/external_libs/platform/fedora18/zmq/backend/cffi/error.py b/scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/backend/cffi/error.py
index 3bb64de0..3bb64de0 100644
--- a/scripts/external_libs/platform/fedora18/zmq/backend/cffi/error.py
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/backend/cffi/error.py
diff --git a/scripts/external_libs/platform/fedora18/zmq/backend/cffi/message.py b/scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/backend/cffi/message.py
index c35decb6..c35decb6 100644
--- a/scripts/external_libs/platform/fedora18/zmq/backend/cffi/message.py
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/backend/cffi/message.py
diff --git a/scripts/external_libs/platform/fedora18/zmq/backend/cffi/socket.py b/scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/backend/cffi/socket.py
index 3c427739..3c427739 100644
--- a/scripts/external_libs/platform/fedora18/zmq/backend/cffi/socket.py
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/backend/cffi/socket.py
diff --git a/scripts/external_libs/platform/fedora18/zmq/backend/cffi/utils.py b/scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/backend/cffi/utils.py
index fde7827b..fde7827b 100644
--- a/scripts/external_libs/platform/fedora18/zmq/backend/cffi/utils.py
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/backend/cffi/utils.py
diff --git a/scripts/external_libs/platform/fedora18/zmq/backend/cython/__init__.py b/scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/backend/cython/__init__.py
index e5358185..e5358185 100644
--- a/scripts/external_libs/platform/fedora18/zmq/backend/cython/__init__.py
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/backend/cython/__init__.py
diff --git a/scripts/external_libs/platform/fedora18/zmq/backend/cython/_device.py b/scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/backend/cython/_device.py
index 3368ca2c..3368ca2c 100644
--- a/scripts/external_libs/platform/fedora18/zmq/backend/cython/_device.py
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/backend/cython/_device.py
diff --git a/scripts/external_libs/platform/fedora18/zmq/backend/cython/_device.so b/scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/backend/cython/_device.so
index 5957f8e0..5957f8e0 100644
--- a/scripts/external_libs/platform/fedora18/zmq/backend/cython/_device.so
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/backend/cython/_device.so
Binary files differ
diff --git a/scripts/external_libs/platform/fedora18/zmq/backend/cython/_poll.py b/scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/backend/cython/_poll.py
index cb1d5d77..cb1d5d77 100644
--- a/scripts/external_libs/platform/fedora18/zmq/backend/cython/_poll.py
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/backend/cython/_poll.py
diff --git a/scripts/external_libs/platform/fedora18/zmq/backend/cython/_poll.so b/scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/backend/cython/_poll.so
index d93d0f10..d93d0f10 100644
--- a/scripts/external_libs/platform/fedora18/zmq/backend/cython/_poll.so
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/backend/cython/_poll.so
Binary files differ
diff --git a/scripts/external_libs/platform/fedora18/zmq/backend/cython/_version.py b/scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/backend/cython/_version.py
index 08262706..08262706 100644
--- a/scripts/external_libs/platform/fedora18/zmq/backend/cython/_version.py
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/backend/cython/_version.py
diff --git a/scripts/external_libs/platform/fedora18/zmq/backend/cython/_version.so b/scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/backend/cython/_version.so
index 40dd3dc8..40dd3dc8 100644
--- a/scripts/external_libs/platform/fedora18/zmq/backend/cython/_version.so
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/backend/cython/_version.so
Binary files differ
diff --git a/scripts/external_libs/platform/fedora18/zmq/backend/cython/checkrc.pxd b/scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/backend/cython/checkrc.pxd
index 3bf69fc3..3bf69fc3 100644
--- a/scripts/external_libs/platform/fedora18/zmq/backend/cython/checkrc.pxd
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/backend/cython/checkrc.pxd
diff --git a/scripts/external_libs/platform/fedora18/zmq/backend/cython/constants.py b/scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/backend/cython/constants.py
index ea772ac0..ea772ac0 100644
--- a/scripts/external_libs/platform/fedora18/zmq/backend/cython/constants.py
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/backend/cython/constants.py
diff --git a/scripts/external_libs/platform/fedora18/zmq/backend/cython/constants.so b/scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/backend/cython/constants.so
index cf44c07e..cf44c07e 100644
--- a/scripts/external_libs/platform/fedora18/zmq/backend/cython/constants.so
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/backend/cython/constants.so
Binary files differ
diff --git a/scripts/external_libs/platform/fedora18/zmq/backend/cython/context.pxd b/scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/backend/cython/context.pxd
index 9c9267a5..9c9267a5 100644
--- a/scripts/external_libs/platform/fedora18/zmq/backend/cython/context.pxd
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/backend/cython/context.pxd
diff --git a/scripts/external_libs/platform/fedora18/zmq/backend/cython/context.py b/scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/backend/cython/context.py
index 19f8ec7c..19f8ec7c 100644
--- a/scripts/external_libs/platform/fedora18/zmq/backend/cython/context.py
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/backend/cython/context.py
diff --git a/scripts/external_libs/platform/fedora18/zmq/backend/cython/context.so b/scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/backend/cython/context.so
index ef9b9699..ef9b9699 100644
--- a/scripts/external_libs/platform/fedora18/zmq/backend/cython/context.so
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/backend/cython/context.so
Binary files differ
diff --git a/scripts/external_libs/platform/fedora18/zmq/backend/cython/error.py b/scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/backend/cython/error.py
index d3a4ea0e..d3a4ea0e 100644
--- a/scripts/external_libs/platform/fedora18/zmq/backend/cython/error.py
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/backend/cython/error.py
diff --git a/scripts/external_libs/platform/fedora18/zmq/backend/cython/error.so b/scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/backend/cython/error.so
index 360da9dd..360da9dd 100644
--- a/scripts/external_libs/platform/fedora18/zmq/backend/cython/error.so
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/backend/cython/error.so
Binary files differ
diff --git a/scripts/external_libs/platform/fedora18/zmq/backend/cython/libzmq.pxd b/scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/backend/cython/libzmq.pxd
index e42f6d6b..e42f6d6b 100644
--- a/scripts/external_libs/platform/fedora18/zmq/backend/cython/libzmq.pxd
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/backend/cython/libzmq.pxd
diff --git a/scripts/external_libs/platform/fedora18/zmq/backend/cython/message.pxd b/scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/backend/cython/message.pxd
index 4781195f..4781195f 100644
--- a/scripts/external_libs/platform/fedora18/zmq/backend/cython/message.pxd
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/backend/cython/message.pxd
diff --git a/scripts/external_libs/platform/fedora18/zmq/backend/cython/message.py b/scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/backend/cython/message.py
index 5e423b62..5e423b62 100644
--- a/scripts/external_libs/platform/fedora18/zmq/backend/cython/message.py
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/backend/cython/message.py
diff --git a/scripts/external_libs/platform/fedora18/zmq/backend/cython/message.so b/scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/backend/cython/message.so
index f674489f..f674489f 100644
--- a/scripts/external_libs/platform/fedora18/zmq/backend/cython/message.so
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/backend/cython/message.so
Binary files differ
diff --git a/scripts/external_libs/platform/fedora18/zmq/backend/cython/socket.pxd b/scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/backend/cython/socket.pxd
index b8a331e2..b8a331e2 100644
--- a/scripts/external_libs/platform/fedora18/zmq/backend/cython/socket.pxd
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/backend/cython/socket.pxd
diff --git a/scripts/external_libs/platform/fedora18/zmq/backend/cython/socket.py b/scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/backend/cython/socket.py
index faef8bee..faef8bee 100644
--- a/scripts/external_libs/platform/fedora18/zmq/backend/cython/socket.py
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/backend/cython/socket.py
diff --git a/scripts/external_libs/platform/fedora18/zmq/backend/cython/socket.so b/scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/backend/cython/socket.so
index 1c927042..1c927042 100644
--- a/scripts/external_libs/platform/fedora18/zmq/backend/cython/socket.so
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/backend/cython/socket.so
Binary files differ
diff --git a/scripts/external_libs/platform/fedora18/zmq/backend/cython/utils.pxd b/scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/backend/cython/utils.pxd
index 1d7117f1..1d7117f1 100644
--- a/scripts/external_libs/platform/fedora18/zmq/backend/cython/utils.pxd
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/backend/cython/utils.pxd
diff --git a/scripts/external_libs/platform/fedora18/zmq/backend/cython/utils.py b/scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/backend/cython/utils.py
index fe928300..fe928300 100644
--- a/scripts/external_libs/platform/fedora18/zmq/backend/cython/utils.py
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/backend/cython/utils.py
diff --git a/scripts/external_libs/platform/fedora18/zmq/backend/cython/utils.so b/scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/backend/cython/utils.so
index b4e5b283..b4e5b283 100644
--- a/scripts/external_libs/platform/fedora18/zmq/backend/cython/utils.so
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/backend/cython/utils.so
Binary files differ
diff --git a/scripts/external_libs/platform/fedora18/zmq/backend/select.py b/scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/backend/select.py
index 0a2e09a2..0a2e09a2 100644
--- a/scripts/external_libs/platform/fedora18/zmq/backend/select.py
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/backend/select.py
diff --git a/scripts/external_libs/platform/fedora18/zmq/devices/__init__.py b/scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/devices/__init__.py
index 23715963..23715963 100644
--- a/scripts/external_libs/platform/fedora18/zmq/devices/__init__.py
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/devices/__init__.py
diff --git a/scripts/external_libs/platform/fedora18/zmq/devices/basedevice.py b/scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/devices/basedevice.py
index 7ba1b7ac..7ba1b7ac 100644
--- a/scripts/external_libs/platform/fedora18/zmq/devices/basedevice.py
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/devices/basedevice.py
diff --git a/scripts/external_libs/platform/fedora18/zmq/devices/monitoredqueue.pxd b/scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/devices/monitoredqueue.pxd
index 1e26ed86..1e26ed86 100644
--- a/scripts/external_libs/platform/fedora18/zmq/devices/monitoredqueue.pxd
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/devices/monitoredqueue.pxd
diff --git a/scripts/external_libs/platform/fedora18/zmq/devices/monitoredqueue.py b/scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/devices/monitoredqueue.py
index 6d714e51..6d714e51 100644
--- a/scripts/external_libs/platform/fedora18/zmq/devices/monitoredqueue.py
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/devices/monitoredqueue.py
diff --git a/scripts/external_libs/platform/fedora18/zmq/devices/monitoredqueue.so b/scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/devices/monitoredqueue.so
index edca8a4b..edca8a4b 100644
--- a/scripts/external_libs/platform/fedora18/zmq/devices/monitoredqueue.so
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/devices/monitoredqueue.so
Binary files differ
diff --git a/scripts/external_libs/platform/fedora18/zmq/devices/monitoredqueuedevice.py b/scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/devices/monitoredqueuedevice.py
index 9723f866..9723f866 100644
--- a/scripts/external_libs/platform/fedora18/zmq/devices/monitoredqueuedevice.py
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/devices/monitoredqueuedevice.py
diff --git a/scripts/external_libs/platform/fedora18/zmq/devices/proxydevice.py b/scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/devices/proxydevice.py
index 68be3f15..68be3f15 100644
--- a/scripts/external_libs/platform/fedora18/zmq/devices/proxydevice.py
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/devices/proxydevice.py
diff --git a/scripts/external_libs/platform/fedora18/zmq/error.py b/scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/error.py
index 48cdaafa..48cdaafa 100644
--- a/scripts/external_libs/platform/fedora18/zmq/error.py
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/error.py
diff --git a/scripts/external_libs/platform/fedora18/zmq/eventloop/__init__.py b/scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/eventloop/__init__.py
index 568e8e8d..568e8e8d 100644
--- a/scripts/external_libs/platform/fedora18/zmq/eventloop/__init__.py
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/eventloop/__init__.py
diff --git a/scripts/external_libs/platform/fedora18/zmq/eventloop/ioloop.py b/scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/eventloop/ioloop.py
index 35f4c418..35f4c418 100644
--- a/scripts/external_libs/platform/fedora18/zmq/eventloop/ioloop.py
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/eventloop/ioloop.py
diff --git a/scripts/external_libs/platform/fedora18/zmq/eventloop/minitornado/__init__.py b/scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/eventloop/minitornado/__init__.py
index e69de29b..e69de29b 100644
--- a/scripts/external_libs/platform/fedora18/zmq/eventloop/minitornado/__init__.py
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/eventloop/minitornado/__init__.py
diff --git a/scripts/external_libs/platform/fedora18/zmq/eventloop/minitornado/concurrent.py b/scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/eventloop/minitornado/concurrent.py
index 519b23d5..519b23d5 100644
--- a/scripts/external_libs/platform/fedora18/zmq/eventloop/minitornado/concurrent.py
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/eventloop/minitornado/concurrent.py
diff --git a/scripts/external_libs/platform/fedora18/zmq/eventloop/minitornado/ioloop.py b/scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/eventloop/minitornado/ioloop.py
index 710a3ecb..710a3ecb 100644
--- a/scripts/external_libs/platform/fedora18/zmq/eventloop/minitornado/ioloop.py
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/eventloop/minitornado/ioloop.py
diff --git a/scripts/external_libs/platform/fedora18/zmq/eventloop/minitornado/log.py b/scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/eventloop/minitornado/log.py
index 49051e89..49051e89 100644
--- a/scripts/external_libs/platform/fedora18/zmq/eventloop/minitornado/log.py
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/eventloop/minitornado/log.py
diff --git a/scripts/external_libs/platform/fedora18/zmq/eventloop/minitornado/platform/__init__.py b/scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/eventloop/minitornado/platform/__init__.py
index e69de29b..e69de29b 100644
--- a/scripts/external_libs/platform/fedora18/zmq/eventloop/minitornado/platform/__init__.py
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/eventloop/minitornado/platform/__init__.py
diff --git a/scripts/external_libs/platform/fedora18/zmq/eventloop/minitornado/platform/auto.py b/scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/eventloop/minitornado/platform/auto.py
index b40ccd94..b40ccd94 100644
--- a/scripts/external_libs/platform/fedora18/zmq/eventloop/minitornado/platform/auto.py
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/eventloop/minitornado/platform/auto.py
diff --git a/scripts/external_libs/platform/fedora18/zmq/eventloop/minitornado/platform/common.py b/scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/eventloop/minitornado/platform/common.py
index 2d75dc1e..2d75dc1e 100644
--- a/scripts/external_libs/platform/fedora18/zmq/eventloop/minitornado/platform/common.py
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/eventloop/minitornado/platform/common.py
diff --git a/scripts/external_libs/platform/fedora18/zmq/eventloop/minitornado/platform/interface.py b/scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/eventloop/minitornado/platform/interface.py
index 07da6bab..07da6bab 100644
--- a/scripts/external_libs/platform/fedora18/zmq/eventloop/minitornado/platform/interface.py
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/eventloop/minitornado/platform/interface.py
diff --git a/scripts/external_libs/platform/fedora18/zmq/eventloop/minitornado/platform/posix.py b/scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/eventloop/minitornado/platform/posix.py
index ccffbb66..ccffbb66 100644
--- a/scripts/external_libs/platform/fedora18/zmq/eventloop/minitornado/platform/posix.py
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/eventloop/minitornado/platform/posix.py
diff --git a/scripts/external_libs/platform/fedora18/zmq/eventloop/minitornado/platform/windows.py b/scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/eventloop/minitornado/platform/windows.py
index 817bdca1..817bdca1 100644
--- a/scripts/external_libs/platform/fedora18/zmq/eventloop/minitornado/platform/windows.py
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/eventloop/minitornado/platform/windows.py
diff --git a/scripts/external_libs/platform/fedora18/zmq/eventloop/minitornado/stack_context.py b/scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/eventloop/minitornado/stack_context.py
index 226d8042..226d8042 100644
--- a/scripts/external_libs/platform/fedora18/zmq/eventloop/minitornado/stack_context.py
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/eventloop/minitornado/stack_context.py
diff --git a/scripts/external_libs/platform/fedora18/zmq/eventloop/minitornado/util.py b/scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/eventloop/minitornado/util.py
index c1e2eb95..c1e2eb95 100644
--- a/scripts/external_libs/platform/fedora18/zmq/eventloop/minitornado/util.py
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/eventloop/minitornado/util.py
diff --git a/scripts/external_libs/platform/fedora18/zmq/eventloop/zmqstream.py b/scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/eventloop/zmqstream.py
index 86a97e44..86a97e44 100644
--- a/scripts/external_libs/platform/fedora18/zmq/eventloop/zmqstream.py
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/eventloop/zmqstream.py
diff --git a/scripts/external_libs/platform/fedora18/zmq/green/__init__.py b/scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/green/__init__.py
index ff7e5965..ff7e5965 100644
--- a/scripts/external_libs/platform/fedora18/zmq/green/__init__.py
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/green/__init__.py
diff --git a/scripts/external_libs/platform/fedora18/zmq/green/core.py b/scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/green/core.py
index 9fc73e32..9fc73e32 100644
--- a/scripts/external_libs/platform/fedora18/zmq/green/core.py
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/green/core.py
diff --git a/scripts/external_libs/platform/fedora18/zmq/green/device.py b/scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/green/device.py
index 4b070237..4b070237 100644
--- a/scripts/external_libs/platform/fedora18/zmq/green/device.py
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/green/device.py
diff --git a/scripts/external_libs/platform/fedora18/zmq/green/eventloop/__init__.py b/scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/green/eventloop/__init__.py
index c5150efe..c5150efe 100644
--- a/scripts/external_libs/platform/fedora18/zmq/green/eventloop/__init__.py
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/green/eventloop/__init__.py
diff --git a/scripts/external_libs/platform/fedora18/zmq/green/eventloop/ioloop.py b/scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/green/eventloop/ioloop.py
index e12fd5e9..e12fd5e9 100644
--- a/scripts/external_libs/platform/fedora18/zmq/green/eventloop/ioloop.py
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/green/eventloop/ioloop.py
diff --git a/scripts/external_libs/platform/fedora18/zmq/green/eventloop/zmqstream.py b/scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/green/eventloop/zmqstream.py
index 90fbd1f5..90fbd1f5 100644
--- a/scripts/external_libs/platform/fedora18/zmq/green/eventloop/zmqstream.py
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/green/eventloop/zmqstream.py
diff --git a/scripts/external_libs/platform/fedora18/zmq/green/poll.py b/scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/green/poll.py
index 8f016129..8f016129 100644
--- a/scripts/external_libs/platform/fedora18/zmq/green/poll.py
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/green/poll.py
diff --git a/scripts/external_libs/platform/fedora18/zmq/libzmq.so.3 b/scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/libzmq.so.3
index 16980c27..16980c27 100644
--- a/scripts/external_libs/platform/fedora18/zmq/libzmq.so.3
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/libzmq.so.3
Binary files differ
diff --git a/scripts/external_libs/platform/fedora18/zmq/log/__init__.py b/scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/log/__init__.py
index e69de29b..e69de29b 100644
--- a/scripts/external_libs/platform/fedora18/zmq/log/__init__.py
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/log/__init__.py
diff --git a/scripts/external_libs/platform/fedora18/zmq/log/handlers.py b/scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/log/handlers.py
index 5ff21bf3..5ff21bf3 100644
--- a/scripts/external_libs/platform/fedora18/zmq/log/handlers.py
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/log/handlers.py
diff --git a/scripts/external_libs/platform/fedora18/zmq/ssh/__init__.py b/scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/ssh/__init__.py
index 57f09568..57f09568 100644
--- a/scripts/external_libs/platform/fedora18/zmq/ssh/__init__.py
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/ssh/__init__.py
diff --git a/scripts/external_libs/platform/fedora18/zmq/ssh/forward.py b/scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/ssh/forward.py
index 2d619462..2d619462 100644
--- a/scripts/external_libs/platform/fedora18/zmq/ssh/forward.py
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/ssh/forward.py
diff --git a/scripts/external_libs/platform/fedora18/zmq/ssh/tunnel.py b/scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/ssh/tunnel.py
index 5a0c5433..5a0c5433 100644
--- a/scripts/external_libs/platform/fedora18/zmq/ssh/tunnel.py
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/ssh/tunnel.py
diff --git a/scripts/external_libs/platform/fedora18/zmq/sugar/.version.py.swp b/scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/sugar/.version.py.swp
index 803e5049..803e5049 100644
--- a/scripts/external_libs/platform/fedora18/zmq/sugar/.version.py.swp
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/sugar/.version.py.swp
Binary files differ
diff --git a/scripts/external_libs/platform/fedora18/zmq/sugar/__init__.py b/scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/sugar/__init__.py
index d0510a44..d0510a44 100644
--- a/scripts/external_libs/platform/fedora18/zmq/sugar/__init__.py
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/sugar/__init__.py
diff --git a/scripts/external_libs/platform/fedora18/zmq/sugar/attrsettr.py b/scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/sugar/attrsettr.py
index 4bbd36d6..4bbd36d6 100644
--- a/scripts/external_libs/platform/fedora18/zmq/sugar/attrsettr.py
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/sugar/attrsettr.py
diff --git a/scripts/external_libs/platform/fedora18/zmq/sugar/constants.py b/scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/sugar/constants.py
index 88281176..88281176 100644
--- a/scripts/external_libs/platform/fedora18/zmq/sugar/constants.py
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/sugar/constants.py
diff --git a/scripts/external_libs/platform/fedora18/zmq/sugar/context.py b/scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/sugar/context.py
index 86a9c5dc..86a9c5dc 100644
--- a/scripts/external_libs/platform/fedora18/zmq/sugar/context.py
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/sugar/context.py
diff --git a/scripts/external_libs/platform/fedora18/zmq/sugar/frame.py b/scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/sugar/frame.py
index 9f556c86..9f556c86 100644
--- a/scripts/external_libs/platform/fedora18/zmq/sugar/frame.py
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/sugar/frame.py
diff --git a/scripts/external_libs/platform/fedora18/zmq/sugar/poll.py b/scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/sugar/poll.py
index c7b1d1bb..c7b1d1bb 100644
--- a/scripts/external_libs/platform/fedora18/zmq/sugar/poll.py
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/sugar/poll.py
diff --git a/scripts/external_libs/platform/fedora18/zmq/sugar/socket.py b/scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/sugar/socket.py
index c91589d7..c91589d7 100644
--- a/scripts/external_libs/platform/fedora18/zmq/sugar/socket.py
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/sugar/socket.py
diff --git a/scripts/external_libs/platform/fedora18/zmq/sugar/tracker.py b/scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/sugar/tracker.py
index fb8c007f..fb8c007f 100644
--- a/scripts/external_libs/platform/fedora18/zmq/sugar/tracker.py
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/sugar/tracker.py
diff --git a/scripts/external_libs/platform/fedora18/zmq/sugar/version.py b/scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/sugar/version.py
index ea8fbbc4..ea8fbbc4 100644
--- a/scripts/external_libs/platform/fedora18/zmq/sugar/version.py
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/sugar/version.py
diff --git a/scripts/external_libs/platform/fedora18/zmq/tests/__init__.py b/scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/tests/__init__.py
index 325a3f19..325a3f19 100644
--- a/scripts/external_libs/platform/fedora18/zmq/tests/__init__.py
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/tests/__init__.py
diff --git a/scripts/external_libs/platform/fedora18/zmq/tests/test_auth.py b/scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/tests/test_auth.py
index d350f61f..d350f61f 100644
--- a/scripts/external_libs/platform/fedora18/zmq/tests/test_auth.py
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/tests/test_auth.py
diff --git a/scripts/external_libs/platform/fedora18/zmq/tests/test_cffi_backend.py b/scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/tests/test_cffi_backend.py
index 1f85eebf..1f85eebf 100644
--- a/scripts/external_libs/platform/fedora18/zmq/tests/test_cffi_backend.py
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/tests/test_cffi_backend.py
diff --git a/scripts/external_libs/platform/fedora18/zmq/tests/test_constants.py b/scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/tests/test_constants.py
index d32b2b48..d32b2b48 100644
--- a/scripts/external_libs/platform/fedora18/zmq/tests/test_constants.py
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/tests/test_constants.py
diff --git a/scripts/external_libs/platform/fedora18/zmq/tests/test_context.py b/scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/tests/test_context.py
index e3280778..e3280778 100644
--- a/scripts/external_libs/platform/fedora18/zmq/tests/test_context.py
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/tests/test_context.py
diff --git a/scripts/external_libs/platform/fedora18/zmq/tests/test_device.py b/scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/tests/test_device.py
index f8305074..f8305074 100644
--- a/scripts/external_libs/platform/fedora18/zmq/tests/test_device.py
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/tests/test_device.py
diff --git a/scripts/external_libs/platform/fedora18/zmq/tests/test_error.py b/scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/tests/test_error.py
index a2eee14a..a2eee14a 100644
--- a/scripts/external_libs/platform/fedora18/zmq/tests/test_error.py
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/tests/test_error.py
diff --git a/scripts/external_libs/platform/fedora18/zmq/tests/test_etc.py b/scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/tests/test_etc.py
index ad224064..ad224064 100644
--- a/scripts/external_libs/platform/fedora18/zmq/tests/test_etc.py
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/tests/test_etc.py
diff --git a/scripts/external_libs/platform/fedora18/zmq/tests/test_imports.py b/scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/tests/test_imports.py
index c0ddfaac..c0ddfaac 100644
--- a/scripts/external_libs/platform/fedora18/zmq/tests/test_imports.py
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/tests/test_imports.py
diff --git a/scripts/external_libs/platform/fedora18/zmq/tests/test_ioloop.py b/scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/tests/test_ioloop.py
index 2a8b1153..2a8b1153 100644
--- a/scripts/external_libs/platform/fedora18/zmq/tests/test_ioloop.py
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/tests/test_ioloop.py
diff --git a/scripts/external_libs/platform/fedora18/zmq/tests/test_log.py b/scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/tests/test_log.py
index 9206f095..9206f095 100644
--- a/scripts/external_libs/platform/fedora18/zmq/tests/test_log.py
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/tests/test_log.py
diff --git a/scripts/external_libs/platform/fedora18/zmq/tests/test_message.py b/scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/tests/test_message.py
index d8770bdf..d8770bdf 100644
--- a/scripts/external_libs/platform/fedora18/zmq/tests/test_message.py
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/tests/test_message.py
diff --git a/scripts/external_libs/platform/fedora18/zmq/tests/test_monitor.py b/scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/tests/test_monitor.py
index 4f035388..4f035388 100644
--- a/scripts/external_libs/platform/fedora18/zmq/tests/test_monitor.py
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/tests/test_monitor.py
diff --git a/scripts/external_libs/platform/fedora18/zmq/tests/test_monqueue.py b/scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/tests/test_monqueue.py
index e855602e..e855602e 100644
--- a/scripts/external_libs/platform/fedora18/zmq/tests/test_monqueue.py
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/tests/test_monqueue.py
diff --git a/scripts/external_libs/platform/fedora18/zmq/tests/test_multipart.py b/scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/tests/test_multipart.py
index 24d41be0..24d41be0 100644
--- a/scripts/external_libs/platform/fedora18/zmq/tests/test_multipart.py
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/tests/test_multipart.py
diff --git a/scripts/external_libs/platform/fedora18/zmq/tests/test_pair.py b/scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/tests/test_pair.py
index e88c1e8b..e88c1e8b 100644
--- a/scripts/external_libs/platform/fedora18/zmq/tests/test_pair.py
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/tests/test_pair.py
diff --git a/scripts/external_libs/platform/fedora18/zmq/tests/test_poll.py b/scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/tests/test_poll.py
index 57346c89..57346c89 100644
--- a/scripts/external_libs/platform/fedora18/zmq/tests/test_poll.py
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/tests/test_poll.py
diff --git a/scripts/external_libs/platform/fedora18/zmq/tests/test_pubsub.py b/scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/tests/test_pubsub.py
index a3ee22aa..a3ee22aa 100644
--- a/scripts/external_libs/platform/fedora18/zmq/tests/test_pubsub.py
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/tests/test_pubsub.py
diff --git a/scripts/external_libs/platform/fedora18/zmq/tests/test_reqrep.py b/scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/tests/test_reqrep.py
index de17f2b3..de17f2b3 100644
--- a/scripts/external_libs/platform/fedora18/zmq/tests/test_reqrep.py
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/tests/test_reqrep.py
diff --git a/scripts/external_libs/platform/fedora18/zmq/tests/test_security.py b/scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/tests/test_security.py
index 687b7e0f..687b7e0f 100644
--- a/scripts/external_libs/platform/fedora18/zmq/tests/test_security.py
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/tests/test_security.py
diff --git a/scripts/external_libs/platform/fedora18/zmq/tests/test_socket.py b/scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/tests/test_socket.py
index 5c842edc..5c842edc 100644
--- a/scripts/external_libs/platform/fedora18/zmq/tests/test_socket.py
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/tests/test_socket.py
diff --git a/scripts/external_libs/platform/fedora18/zmq/tests/test_stopwatch.py b/scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/tests/test_stopwatch.py
index 49fb79f2..49fb79f2 100644
--- a/scripts/external_libs/platform/fedora18/zmq/tests/test_stopwatch.py
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/tests/test_stopwatch.py
diff --git a/scripts/external_libs/platform/fedora18/zmq/tests/test_version.py b/scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/tests/test_version.py
index 6ebebf30..6ebebf30 100644
--- a/scripts/external_libs/platform/fedora18/zmq/tests/test_version.py
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/tests/test_version.py
diff --git a/scripts/external_libs/platform/fedora18/zmq/tests/test_win32_shim.py b/scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/tests/test_win32_shim.py
index 55657bda..55657bda 100644
--- a/scripts/external_libs/platform/fedora18/zmq/tests/test_win32_shim.py
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/tests/test_win32_shim.py
diff --git a/scripts/external_libs/platform/fedora18/zmq/tests/test_z85.py b/scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/tests/test_z85.py
index 8a73cb4d..8a73cb4d 100644
--- a/scripts/external_libs/platform/fedora18/zmq/tests/test_z85.py
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/tests/test_z85.py
diff --git a/scripts/external_libs/platform/fedora18/zmq/tests/test_zmqstream.py b/scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/tests/test_zmqstream.py
index cdb3a171..cdb3a171 100644
--- a/scripts/external_libs/platform/fedora18/zmq/tests/test_zmqstream.py
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/tests/test_zmqstream.py
diff --git a/scripts/external_libs/platform/fedora18/zmq/utils/__init__.py b/scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/utils/__init__.py
index e69de29b..e69de29b 100644
--- a/scripts/external_libs/platform/fedora18/zmq/utils/__init__.py
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/utils/__init__.py
diff --git a/scripts/external_libs/platform/fedora18/zmq/utils/buffers.pxd b/scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/utils/buffers.pxd
index 998aa551..998aa551 100644
--- a/scripts/external_libs/platform/fedora18/zmq/utils/buffers.pxd
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/utils/buffers.pxd
diff --git a/scripts/external_libs/platform/fedora18/zmq/utils/compiler.json b/scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/utils/compiler.json
index e58fc130..e58fc130 100644
--- a/scripts/external_libs/platform/fedora18/zmq/utils/compiler.json
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/utils/compiler.json
diff --git a/scripts/external_libs/platform/fedora18/zmq/utils/config.json b/scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/utils/config.json
index 1e4611f9..1e4611f9 100644
--- a/scripts/external_libs/platform/fedora18/zmq/utils/config.json
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/utils/config.json
diff --git a/scripts/external_libs/platform/fedora18/zmq/utils/constant_names.py b/scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/utils/constant_names.py
index 47da9dc2..47da9dc2 100644
--- a/scripts/external_libs/platform/fedora18/zmq/utils/constant_names.py
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/utils/constant_names.py
diff --git a/scripts/external_libs/platform/fedora18/zmq/utils/garbage.py b/scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/utils/garbage.py
index 80a8725a..80a8725a 100644
--- a/scripts/external_libs/platform/fedora18/zmq/utils/garbage.py
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/utils/garbage.py
diff --git a/scripts/external_libs/platform/fedora18/zmq/utils/getpid_compat.h b/scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/utils/getpid_compat.h
index 47ce90fa..47ce90fa 100644
--- a/scripts/external_libs/platform/fedora18/zmq/utils/getpid_compat.h
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/utils/getpid_compat.h
diff --git a/scripts/external_libs/platform/fedora18/zmq/utils/interop.py b/scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/utils/interop.py
index 26c01969..26c01969 100644
--- a/scripts/external_libs/platform/fedora18/zmq/utils/interop.py
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/utils/interop.py
diff --git a/scripts/external_libs/platform/fedora18/zmq/utils/ipcmaxlen.h b/scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/utils/ipcmaxlen.h
index 7218db78..7218db78 100644
--- a/scripts/external_libs/platform/fedora18/zmq/utils/ipcmaxlen.h
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/utils/ipcmaxlen.h
diff --git a/scripts/external_libs/platform/fedora18/zmq/utils/jsonapi.py b/scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/utils/jsonapi.py
index 865ca6d5..865ca6d5 100644
--- a/scripts/external_libs/platform/fedora18/zmq/utils/jsonapi.py
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/utils/jsonapi.py
diff --git a/scripts/external_libs/platform/fedora18/zmq/utils/monitor.py b/scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/utils/monitor.py
index 734d54b1..734d54b1 100644
--- a/scripts/external_libs/platform/fedora18/zmq/utils/monitor.py
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/utils/monitor.py
diff --git a/scripts/external_libs/platform/fedora18/zmq/utils/pyversion_compat.h b/scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/utils/pyversion_compat.h
index fac09046..fac09046 100644
--- a/scripts/external_libs/platform/fedora18/zmq/utils/pyversion_compat.h
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/utils/pyversion_compat.h
diff --git a/scripts/external_libs/platform/fedora18/zmq/utils/sixcerpt.py b/scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/utils/sixcerpt.py
index 5492fd59..5492fd59 100644
--- a/scripts/external_libs/platform/fedora18/zmq/utils/sixcerpt.py
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/utils/sixcerpt.py
diff --git a/scripts/external_libs/platform/fedora18/zmq/utils/strtypes.py b/scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/utils/strtypes.py
index 548410dc..548410dc 100644
--- a/scripts/external_libs/platform/fedora18/zmq/utils/strtypes.py
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/utils/strtypes.py
diff --git a/scripts/external_libs/platform/fedora18/zmq/utils/win32.py b/scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/utils/win32.py
index ea758299..ea758299 100644
--- a/scripts/external_libs/platform/fedora18/zmq/utils/win32.py
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/utils/win32.py
diff --git a/scripts/external_libs/platform/fedora18/zmq/utils/z85.py b/scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/utils/z85.py
index 1bb1784e..1bb1784e 100644
--- a/scripts/external_libs/platform/fedora18/zmq/utils/z85.py
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/utils/z85.py
diff --git a/scripts/external_libs/platform/fedora18/zmq/utils/zmq_compat.h b/scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/utils/zmq_compat.h
index 81c57b69..81c57b69 100644
--- a/scripts/external_libs/platform/fedora18/zmq/utils/zmq_compat.h
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/utils/zmq_compat.h
diff --git a/scripts/external_libs/platform/fedora18/zmq/utils/zmq_constants.h b/scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/utils/zmq_constants.h
index 97683022..97683022 100644
--- a/scripts/external_libs/platform/fedora18/zmq/utils/zmq_constants.h
+++ b/scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/utils/zmq_constants.h
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/__init__.py b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/__init__.py
new file mode 100644
index 00000000..3408b3ba
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/__init__.py
@@ -0,0 +1,64 @@
+"""Python bindings for 0MQ."""
+
+# Copyright (C) PyZMQ Developers
+# Distributed under the terms of the Modified BSD License.
+
+import os
+import sys
+import glob
+
+# load bundled libzmq, if there is one:
+
+here = os.path.dirname(__file__)
+
+bundled = []
+bundled_sodium = []
+for ext in ('pyd', 'so', 'dll', 'dylib'):
+ bundled_sodium.extend(glob.glob(os.path.join(here, 'libsodium*.%s*' % ext)))
+ bundled.extend(glob.glob(os.path.join(here, 'libzmq*.%s*' % ext)))
+
+if bundled:
+ import ctypes
+ if bundled_sodium:
+ if bundled[0].endswith('.pyd'):
+ # a Windows Extension
+ _libsodium = ctypes.cdll.LoadLibrary(bundled_sodium[0])
+ else:
+ _libsodium = ctypes.CDLL(bundled_sodium[0], mode=ctypes.RTLD_GLOBAL)
+ if bundled[0].endswith('.pyd'):
+ # a Windows Extension
+ _libzmq = ctypes.cdll.LoadLibrary(bundled[0])
+ else:
+ _libzmq = ctypes.CDLL(bundled[0], mode=ctypes.RTLD_GLOBAL)
+ del ctypes
+else:
+ import zipimport
+ try:
+ if isinstance(__loader__, zipimport.zipimporter):
+ # a zipped pyzmq egg
+ from zmq import libzmq as _libzmq
+ except (NameError, ImportError):
+ pass
+ finally:
+ del zipimport
+
+del os, sys, glob, here, bundled, bundled_sodium, ext
+
+# zmq top-level imports
+
+from zmq import backend
+from zmq.backend import *
+from zmq import sugar
+from zmq.sugar import *
+from zmq import devices
+
+def get_includes():
+ """Return a list of directories to include for linking against pyzmq with cython."""
+ from os.path import join, dirname, abspath, pardir
+ base = dirname(__file__)
+ parent = abspath(join(base, pardir))
+ return [ parent ] + [ join(parent, base, subdir) for subdir in ('utils',) ]
+
+
+__all__ = ['get_includes'] + sugar.__all__ + backend.__all__
+
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/auth/__init__.py b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/auth/__init__.py
new file mode 100644
index 00000000..11d3ad6b
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/auth/__init__.py
@@ -0,0 +1,10 @@
+"""Utilities for ZAP authentication.
+
+To run authentication in a background thread, see :mod:`zmq.auth.thread`.
+For integration with the tornado eventloop, see :mod:`zmq.auth.ioloop`.
+
+.. versionadded:: 14.1
+"""
+
+from .base import *
+from .certs import *
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/auth/base.py b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/auth/base.py
new file mode 100644
index 00000000..9b4aaed7
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/auth/base.py
@@ -0,0 +1,272 @@
+"""Base implementation of 0MQ authentication."""
+
+# Copyright (C) PyZMQ Developers
+# Distributed under the terms of the Modified BSD License.
+
+import logging
+
+import zmq
+from zmq.utils import z85
+from zmq.utils.strtypes import bytes, unicode, b, u
+from zmq.error import _check_version
+
+from .certs import load_certificates
+
+
+CURVE_ALLOW_ANY = '*'
+VERSION = b'1.0'
+
+class Authenticator(object):
+ """Implementation of ZAP authentication for zmq connections.
+
+ Note:
+ - libzmq provides four levels of security: default NULL (which the Authenticator does
+ not see), and authenticated NULL, PLAIN, and CURVE, which the Authenticator can see.
+ - until you add policies, all incoming NULL connections are allowed
+ (classic ZeroMQ behavior), and all PLAIN and CURVE connections are denied.
+ """
+
+ def __init__(self, context=None, encoding='utf-8', log=None):
+ _check_version((4,0), "security")
+ self.context = context or zmq.Context.instance()
+ self.encoding = encoding
+ self.allow_any = False
+ self.zap_socket = None
+ self.whitelist = set()
+ self.blacklist = set()
+ # passwords is a dict keyed by domain and contains values
+ # of dicts with username:password pairs.
+ self.passwords = {}
+ # certs is dict keyed by domain and contains values
+ # of dicts keyed by the public keys from the specified location.
+ self.certs = {}
+ self.log = log or logging.getLogger('zmq.auth')
+
+ def start(self):
+ """Create and bind the ZAP socket"""
+ self.zap_socket = self.context.socket(zmq.REP)
+ self.zap_socket.linger = 1
+ self.zap_socket.bind("inproc://zeromq.zap.01")
+
+ def stop(self):
+ """Close the ZAP socket"""
+ if self.zap_socket:
+ self.zap_socket.close()
+ self.zap_socket = None
+
+ def allow(self, *addresses):
+ """Allow (whitelist) IP address(es).
+
+ Connections from addresses not in the whitelist will be rejected.
+
+ - For NULL, all clients from this address will be accepted.
+ - For PLAIN and CURVE, they will be allowed to continue with authentication.
+
+ whitelist is mutually exclusive with blacklist.
+ """
+ if self.blacklist:
+ raise ValueError("Only use a whitelist or a blacklist, not both")
+ self.whitelist.update(addresses)
+
+ def deny(self, *addresses):
+ """Deny (blacklist) IP address(es).
+
+ Addresses not in the blacklist will be allowed to continue with authentication.
+
+ Blacklist is mutually exclusive with whitelist.
+ """
+ if self.whitelist:
+ raise ValueError("Only use a whitelist or a blacklist, not both")
+ self.blacklist.update(addresses)
+
+ def configure_plain(self, domain='*', passwords=None):
+ """Configure PLAIN authentication for a given domain.
+
+ PLAIN authentication uses a plain-text password file.
+ To cover all domains, use "*".
+ You can modify the password file at any time; it is reloaded automatically.
+ """
+ if passwords:
+ self.passwords[domain] = passwords
+
+ def configure_curve(self, domain='*', location=None):
+ """Configure CURVE authentication for a given domain.
+
+ CURVE authentication uses a directory that holds all public client certificates,
+ i.e. their public keys.
+
+ To cover all domains, use "*".
+
+ You can add and remove certificates in that directory at any time.
+
+ To allow all client keys without checking, specify CURVE_ALLOW_ANY for the location.
+ """
+ # If location is CURVE_ALLOW_ANY then allow all clients. Otherwise
+ # treat location as a directory that holds the certificates.
+ if location == CURVE_ALLOW_ANY:
+ self.allow_any = True
+ else:
+ self.allow_any = False
+ try:
+ self.certs[domain] = load_certificates(location)
+ except Exception as e:
+ self.log.error("Failed to load CURVE certs from %s: %s", location, e)
+
+ def handle_zap_message(self, msg):
+ """Perform ZAP authentication"""
+ if len(msg) < 6:
+ self.log.error("Invalid ZAP message, not enough frames: %r", msg)
+ if len(msg) < 2:
+ self.log.error("Not enough information to reply")
+ else:
+ self._send_zap_reply(msg[1], b"400", b"Not enough frames")
+ return
+
+ version, request_id, domain, address, identity, mechanism = msg[:6]
+ credentials = msg[6:]
+
+ domain = u(domain, self.encoding, 'replace')
+ address = u(address, self.encoding, 'replace')
+
+ if (version != VERSION):
+ self.log.error("Invalid ZAP version: %r", msg)
+ self._send_zap_reply(request_id, b"400", b"Invalid version")
+ return
+
+ self.log.debug("version: %r, request_id: %r, domain: %r,"
+ " address: %r, identity: %r, mechanism: %r",
+ version, request_id, domain,
+ address, identity, mechanism,
+ )
+
+
+ # Is address is explicitly whitelisted or blacklisted?
+ allowed = False
+ denied = False
+ reason = b"NO ACCESS"
+
+ if self.whitelist:
+ if address in self.whitelist:
+ allowed = True
+ self.log.debug("PASSED (whitelist) address=%s", address)
+ else:
+ denied = True
+ reason = b"Address not in whitelist"
+ self.log.debug("DENIED (not in whitelist) address=%s", address)
+
+ elif self.blacklist:
+ if address in self.blacklist:
+ denied = True
+ reason = b"Address is blacklisted"
+ self.log.debug("DENIED (blacklist) address=%s", address)
+ else:
+ allowed = True
+ self.log.debug("PASSED (not in blacklist) address=%s", address)
+
+ # Perform authentication mechanism-specific checks if necessary
+ username = u("user")
+ if not denied:
+
+ if mechanism == b'NULL' and not allowed:
+ # For NULL, we allow if the address wasn't blacklisted
+ self.log.debug("ALLOWED (NULL)")
+ allowed = True
+
+ elif mechanism == b'PLAIN':
+ # For PLAIN, even a whitelisted address must authenticate
+ if len(credentials) != 2:
+ self.log.error("Invalid PLAIN credentials: %r", credentials)
+ self._send_zap_reply(request_id, b"400", b"Invalid credentials")
+ return
+ username, password = [ u(c, self.encoding, 'replace') for c in credentials ]
+ allowed, reason = self._authenticate_plain(domain, username, password)
+
+ elif mechanism == b'CURVE':
+ # For CURVE, even a whitelisted address must authenticate
+ if len(credentials) != 1:
+ self.log.error("Invalid CURVE credentials: %r", credentials)
+ self._send_zap_reply(request_id, b"400", b"Invalid credentials")
+ return
+ key = credentials[0]
+ allowed, reason = self._authenticate_curve(domain, key)
+
+ if allowed:
+ self._send_zap_reply(request_id, b"200", b"OK", username)
+ else:
+ self._send_zap_reply(request_id, b"400", reason)
+
+ def _authenticate_plain(self, domain, username, password):
+ """PLAIN ZAP authentication"""
+ allowed = False
+ reason = b""
+ if self.passwords:
+ # If no domain is not specified then use the default domain
+ if not domain:
+ domain = '*'
+
+ if domain in self.passwords:
+ if username in self.passwords[domain]:
+ if password == self.passwords[domain][username]:
+ allowed = True
+ else:
+ reason = b"Invalid password"
+ else:
+ reason = b"Invalid username"
+ else:
+ reason = b"Invalid domain"
+
+ if allowed:
+ self.log.debug("ALLOWED (PLAIN) domain=%s username=%s password=%s",
+ domain, username, password,
+ )
+ else:
+ self.log.debug("DENIED %s", reason)
+
+ else:
+ reason = b"No passwords defined"
+ self.log.debug("DENIED (PLAIN) %s", reason)
+
+ return allowed, reason
+
+ def _authenticate_curve(self, domain, client_key):
+ """CURVE ZAP authentication"""
+ allowed = False
+ reason = b""
+ if self.allow_any:
+ allowed = True
+ reason = b"OK"
+ self.log.debug("ALLOWED (CURVE allow any client)")
+ else:
+ # If no explicit domain is specified then use the default domain
+ if not domain:
+ domain = '*'
+
+ if domain in self.certs:
+ # The certs dict stores keys in z85 format, convert binary key to z85 bytes
+ z85_client_key = z85.encode(client_key)
+ if z85_client_key in self.certs[domain] or self.certs[domain] == b'OK':
+ allowed = True
+ reason = b"OK"
+ else:
+ reason = b"Unknown key"
+
+ status = "ALLOWED" if allowed else "DENIED"
+ self.log.debug("%s (CURVE) domain=%s client_key=%s",
+ status, domain, z85_client_key,
+ )
+ else:
+ reason = b"Unknown domain"
+
+ return allowed, reason
+
+ def _send_zap_reply(self, request_id, status_code, status_text, user_id='user'):
+ """Send a ZAP reply to finish the authentication."""
+ user_id = user_id if status_code == b'200' else b''
+ if isinstance(user_id, unicode):
+ user_id = user_id.encode(self.encoding, 'replace')
+ metadata = b'' # not currently used
+ self.log.debug("ZAP reply code=%s text=%s", status_code, status_text)
+ reply = [VERSION, request_id, status_code, status_text, user_id, metadata]
+ self.zap_socket.send_multipart(reply)
+
+__all__ = ['Authenticator', 'CURVE_ALLOW_ANY']
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/auth/certs.py b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/auth/certs.py
new file mode 100644
index 00000000..4d26ad7b
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/auth/certs.py
@@ -0,0 +1,119 @@
+"""0MQ authentication related functions and classes."""
+
+# Copyright (C) PyZMQ Developers
+# Distributed under the terms of the Modified BSD License.
+
+
+import datetime
+import glob
+import io
+import os
+import zmq
+from zmq.utils.strtypes import bytes, unicode, b, u
+
+
+_cert_secret_banner = u("""# **** Generated on {0} by pyzmq ****
+# ZeroMQ CURVE **Secret** Certificate
+# DO NOT PROVIDE THIS FILE TO OTHER USERS nor change its permissions.
+
+""")
+
+_cert_public_banner = u("""# **** Generated on {0} by pyzmq ****
+# ZeroMQ CURVE Public Certificate
+# Exchange securely, or use a secure mechanism to verify the contents
+# of this file after exchange. Store public certificates in your home
+# directory, in the .curve subdirectory.
+
+""")
+
+def _write_key_file(key_filename, banner, public_key, secret_key=None, metadata=None, encoding='utf-8'):
+ """Create a certificate file"""
+ if isinstance(public_key, bytes):
+ public_key = public_key.decode(encoding)
+ if isinstance(secret_key, bytes):
+ secret_key = secret_key.decode(encoding)
+ with io.open(key_filename, 'w', encoding='utf8') as f:
+ f.write(banner.format(datetime.datetime.now()))
+
+ f.write(u('metadata\n'))
+ if metadata:
+ for k, v in metadata.items():
+ if isinstance(v, bytes):
+ v = v.decode(encoding)
+ f.write(u(" {0} = {1}\n").format(k, v))
+
+ f.write(u('curve\n'))
+ f.write(u(" public-key = \"{0}\"\n").format(public_key))
+
+ if secret_key:
+ f.write(u(" secret-key = \"{0}\"\n").format(secret_key))
+
+
+def create_certificates(key_dir, name, metadata=None):
+ """Create zmq certificates.
+
+ Returns the file paths to the public and secret certificate files.
+ """
+ public_key, secret_key = zmq.curve_keypair()
+ base_filename = os.path.join(key_dir, name)
+ secret_key_file = "{0}.key_secret".format(base_filename)
+ public_key_file = "{0}.key".format(base_filename)
+ now = datetime.datetime.now()
+
+ _write_key_file(public_key_file,
+ _cert_public_banner.format(now),
+ public_key)
+
+ _write_key_file(secret_key_file,
+ _cert_secret_banner.format(now),
+ public_key,
+ secret_key=secret_key,
+ metadata=metadata)
+
+ return public_key_file, secret_key_file
+
+
+def load_certificate(filename):
+ """Load public and secret key from a zmq certificate.
+
+ Returns (public_key, secret_key)
+
+ If the certificate file only contains the public key,
+ secret_key will be None.
+ """
+ public_key = None
+ secret_key = None
+ if not os.path.exists(filename):
+ raise IOError("Invalid certificate file: {0}".format(filename))
+
+ with open(filename, 'rb') as f:
+ for line in f:
+ line = line.strip()
+ if line.startswith(b'#'):
+ continue
+ if line.startswith(b'public-key'):
+ public_key = line.split(b"=", 1)[1].strip(b' \t\'"')
+ if line.startswith(b'secret-key'):
+ secret_key = line.split(b"=", 1)[1].strip(b' \t\'"')
+ if public_key and secret_key:
+ break
+
+ return public_key, secret_key
+
+
+def load_certificates(directory='.'):
+ """Load public keys from all certificates in a directory"""
+ certs = {}
+ if not os.path.isdir(directory):
+ raise IOError("Invalid certificate directory: {0}".format(directory))
+ # Follow czmq pattern of public keys stored in *.key files.
+ glob_string = os.path.join(directory, "*.key")
+
+ cert_files = glob.glob(glob_string)
+ for cert_file in cert_files:
+ public_key, _ = load_certificate(cert_file)
+ if public_key:
+ certs[public_key] = 'OK'
+ return certs
+
+__all__ = ['create_certificates', 'load_certificate', 'load_certificates']
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/auth/ioloop.py b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/auth/ioloop.py
new file mode 100644
index 00000000..1f448b47
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/auth/ioloop.py
@@ -0,0 +1,34 @@
+"""ZAP Authenticator integrated with the tornado IOLoop.
+
+.. versionadded:: 14.1
+"""
+
+# Copyright (C) PyZMQ Developers
+# Distributed under the terms of the Modified BSD License.
+
+from zmq.eventloop import ioloop, zmqstream
+from .base import Authenticator
+
+
+class IOLoopAuthenticator(Authenticator):
+ """ZAP authentication for use in the tornado IOLoop"""
+
+ def __init__(self, context=None, encoding='utf-8', log=None, io_loop=None):
+ super(IOLoopAuthenticator, self).__init__(context)
+ self.zap_stream = None
+ self.io_loop = io_loop or ioloop.IOLoop.instance()
+
+ def start(self):
+ """Start ZAP authentication"""
+ super(IOLoopAuthenticator, self).start()
+ self.zap_stream = zmqstream.ZMQStream(self.zap_socket, self.io_loop)
+ self.zap_stream.on_recv(self.handle_zap_message)
+
+ def stop(self):
+ """Stop ZAP authentication"""
+ if self.zap_stream:
+ self.zap_stream.close()
+ self.zap_stream = None
+ super(IOLoopAuthenticator, self).stop()
+
+__all__ = ['IOLoopAuthenticator']
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/auth/thread.py b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/auth/thread.py
new file mode 100644
index 00000000..8c3355a9
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/auth/thread.py
@@ -0,0 +1,184 @@
+"""ZAP Authenticator in a Python Thread.
+
+.. versionadded:: 14.1
+"""
+
+# Copyright (C) PyZMQ Developers
+# Distributed under the terms of the Modified BSD License.
+
+import logging
+from threading import Thread
+
+import zmq
+from zmq.utils import jsonapi
+from zmq.utils.strtypes import bytes, unicode, b, u
+
+from .base import Authenticator
+
+class AuthenticationThread(Thread):
+ """A Thread for running a zmq Authenticator
+
+ This is run in the background by ThreadedAuthenticator
+ """
+
+ def __init__(self, context, endpoint, encoding='utf-8', log=None):
+ super(AuthenticationThread, self).__init__()
+ self.context = context or zmq.Context.instance()
+ self.encoding = encoding
+ self.log = log = log or logging.getLogger('zmq.auth')
+ self.authenticator = Authenticator(context, encoding=encoding, log=log)
+
+ # create a socket to communicate back to main thread.
+ self.pipe = context.socket(zmq.PAIR)
+ self.pipe.linger = 1
+ self.pipe.connect(endpoint)
+
+ def run(self):
+ """ Start the Authentication Agent thread task """
+ self.authenticator.start()
+ zap = self.authenticator.zap_socket
+ poller = zmq.Poller()
+ poller.register(self.pipe, zmq.POLLIN)
+ poller.register(zap, zmq.POLLIN)
+ while True:
+ try:
+ socks = dict(poller.poll())
+ except zmq.ZMQError:
+ break # interrupted
+
+ if self.pipe in socks and socks[self.pipe] == zmq.POLLIN:
+ terminate = self._handle_pipe()
+ if terminate:
+ break
+
+ if zap in socks and socks[zap] == zmq.POLLIN:
+ self._handle_zap()
+
+ self.pipe.close()
+ self.authenticator.stop()
+
+ def _handle_zap(self):
+ """
+ Handle a message from the ZAP socket.
+ """
+ msg = self.authenticator.zap_socket.recv_multipart()
+ if not msg: return
+ self.authenticator.handle_zap_message(msg)
+
+ def _handle_pipe(self):
+ """
+ Handle a message from front-end API.
+ """
+ terminate = False
+
+ # Get the whole message off the pipe in one go
+ msg = self.pipe.recv_multipart()
+
+ if msg is None:
+ terminate = True
+ return terminate
+
+ command = msg[0]
+ self.log.debug("auth received API command %r", command)
+
+ if command == b'ALLOW':
+ addresses = [u(m, self.encoding) for m in msg[1:]]
+ try:
+ self.authenticator.allow(*addresses)
+ except Exception as e:
+ self.log.exception("Failed to allow %s", addresses)
+
+ elif command == b'DENY':
+ addresses = [u(m, self.encoding) for m in msg[1:]]
+ try:
+ self.authenticator.deny(*addresses)
+ except Exception as e:
+ self.log.exception("Failed to deny %s", addresses)
+
+ elif command == b'PLAIN':
+ domain = u(msg[1], self.encoding)
+ json_passwords = msg[2]
+ self.authenticator.configure_plain(domain, jsonapi.loads(json_passwords))
+
+ elif command == b'CURVE':
+ # For now we don't do anything with domains
+ domain = u(msg[1], self.encoding)
+
+ # If location is CURVE_ALLOW_ANY, allow all clients. Otherwise
+ # treat location as a directory that holds the certificates.
+ location = u(msg[2], self.encoding)
+ self.authenticator.configure_curve(domain, location)
+
+ elif command == b'TERMINATE':
+ terminate = True
+
+ else:
+ self.log.error("Invalid auth command from API: %r", command)
+
+ return terminate
+
+def _inherit_docstrings(cls):
+ """inherit docstrings from Authenticator, so we don't duplicate them"""
+ for name, method in cls.__dict__.items():
+ if name.startswith('_'):
+ continue
+ upstream_method = getattr(Authenticator, name, None)
+ if not method.__doc__:
+ method.__doc__ = upstream_method.__doc__
+ return cls
+
+@_inherit_docstrings
+class ThreadAuthenticator(object):
+ """Run ZAP authentication in a background thread"""
+
+ def __init__(self, context=None, encoding='utf-8', log=None):
+ self.context = context or zmq.Context.instance()
+ self.log = log
+ self.encoding = encoding
+ self.pipe = None
+ self.pipe_endpoint = "inproc://{0}.inproc".format(id(self))
+ self.thread = None
+
+ def allow(self, *addresses):
+ self.pipe.send_multipart([b'ALLOW'] + [b(a, self.encoding) for a in addresses])
+
+ def deny(self, *addresses):
+ self.pipe.send_multipart([b'DENY'] + [b(a, self.encoding) for a in addresses])
+
+ def configure_plain(self, domain='*', passwords=None):
+ self.pipe.send_multipart([b'PLAIN', b(domain, self.encoding), jsonapi.dumps(passwords or {})])
+
+ def configure_curve(self, domain='*', location=''):
+ domain = b(domain, self.encoding)
+ location = b(location, self.encoding)
+ self.pipe.send_multipart([b'CURVE', domain, location])
+
+ def start(self):
+ """Start the authentication thread"""
+ # create a socket to communicate with auth thread.
+ self.pipe = self.context.socket(zmq.PAIR)
+ self.pipe.linger = 1
+ self.pipe.bind(self.pipe_endpoint)
+ self.thread = AuthenticationThread(self.context, self.pipe_endpoint, encoding=self.encoding, log=self.log)
+ self.thread.start()
+
+ def stop(self):
+ """Stop the authentication thread"""
+ if self.pipe:
+ self.pipe.send(b'TERMINATE')
+ if self.is_alive():
+ self.thread.join()
+ self.thread = None
+ self.pipe.close()
+ self.pipe = None
+
+ def is_alive(self):
+ """Is the ZAP thread currently running?"""
+ if self.thread and self.thread.is_alive():
+ return True
+ return False
+
+ def __del__(self):
+ self.stop()
+
+__all__ = ['ThreadAuthenticator']
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/backend/__init__.py b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/backend/__init__.py
new file mode 100644
index 00000000..7cac725c
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/backend/__init__.py
@@ -0,0 +1,45 @@
+"""Import basic exposure of libzmq C API as a backend"""
+
+# Copyright (C) PyZMQ Developers
+# Distributed under the terms of the Modified BSD License.
+
+
+import os
+import platform
+import sys
+
+from zmq.utils.sixcerpt import reraise
+
+from .select import public_api, select_backend
+
+if 'PYZMQ_BACKEND' in os.environ:
+ backend = os.environ['PYZMQ_BACKEND']
+ if backend in ('cython', 'cffi'):
+ backend = 'zmq.backend.%s' % backend
+ _ns = select_backend(backend)
+else:
+ # default to cython, fallback to cffi
+ # (reverse on PyPy)
+ if platform.python_implementation() == 'PyPy':
+ first, second = ('zmq.backend.cffi', 'zmq.backend.cython')
+ else:
+ first, second = ('zmq.backend.cython', 'zmq.backend.cffi')
+
+ try:
+ _ns = select_backend(first)
+ except Exception:
+ exc_info = sys.exc_info()
+ exc = exc_info[1]
+ try:
+ _ns = select_backend(second)
+ except ImportError:
+ # prevent 'During handling of the above exception...' on py3
+ # can't use `raise ... from` on Python 2
+ if hasattr(exc, '__cause__'):
+ exc.__cause__ = None
+ # raise the *first* error, not the fallback
+ reraise(*exc_info)
+
+globals().update(_ns)
+
+__all__ = public_api
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/backend/cffi/__init__.py b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/backend/cffi/__init__.py
new file mode 100644
index 00000000..ca3164d3
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/backend/cffi/__init__.py
@@ -0,0 +1,22 @@
+"""CFFI backend (for PyPY)"""
+
+# Copyright (C) PyZMQ Developers
+# Distributed under the terms of the Modified BSD License.
+
+from zmq.backend.cffi import (constants, error, message, context, socket,
+ _poll, devices, utils)
+
+__all__ = []
+for submod in (constants, error, message, context, socket,
+ _poll, devices, utils):
+ __all__.extend(submod.__all__)
+
+from .constants import *
+from .error import *
+from .message import *
+from .context import *
+from .socket import *
+from .devices import *
+from ._poll import *
+from ._cffi import zmq_version_info, ffi
+from .utils import *
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/backend/cffi/_cdefs.h b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/backend/cffi/_cdefs.h
new file mode 100644
index 00000000..d3300575
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/backend/cffi/_cdefs.h
@@ -0,0 +1,68 @@
+void zmq_version(int *major, int *minor, int *patch);
+
+void* zmq_socket(void *context, int type);
+int zmq_close(void *socket);
+
+int zmq_bind(void *socket, const char *endpoint);
+int zmq_connect(void *socket, const char *endpoint);
+
+int zmq_errno(void);
+const char * zmq_strerror(int errnum);
+
+void* zmq_stopwatch_start(void);
+unsigned long zmq_stopwatch_stop(void *watch);
+void zmq_sleep(int seconds_);
+int zmq_device(int device, void *frontend, void *backend);
+
+int zmq_unbind(void *socket, const char *endpoint);
+int zmq_disconnect(void *socket, const char *endpoint);
+void* zmq_ctx_new();
+int zmq_ctx_destroy(void *context);
+int zmq_ctx_get(void *context, int opt);
+int zmq_ctx_set(void *context, int opt, int optval);
+int zmq_proxy(void *frontend, void *backend, void *capture);
+int zmq_socket_monitor(void *socket, const char *addr, int events);
+
+int zmq_curve_keypair (char *z85_public_key, char *z85_secret_key);
+int zmq_has (const char *capability);
+
+typedef struct { ...; } zmq_msg_t;
+typedef ... zmq_free_fn;
+
+int zmq_msg_init(zmq_msg_t *msg);
+int zmq_msg_init_size(zmq_msg_t *msg, size_t size);
+int zmq_msg_init_data(zmq_msg_t *msg,
+ void *data,
+ size_t size,
+ zmq_free_fn *ffn,
+ void *hint);
+
+size_t zmq_msg_size(zmq_msg_t *msg);
+void *zmq_msg_data(zmq_msg_t *msg);
+int zmq_msg_close(zmq_msg_t *msg);
+
+int zmq_msg_send(zmq_msg_t *msg, void *socket, int flags);
+int zmq_msg_recv(zmq_msg_t *msg, void *socket, int flags);
+
+int zmq_getsockopt(void *socket,
+ int option_name,
+ void *option_value,
+ size_t *option_len);
+
+int zmq_setsockopt(void *socket,
+ int option_name,
+ const void *option_value,
+ size_t option_len);
+typedef struct
+{
+ void *socket;
+ int fd;
+ short events;
+ short revents;
+} zmq_pollitem_t;
+
+int zmq_poll(zmq_pollitem_t *items, int nitems, long timeout);
+
+// miscellany
+void * memcpy(void *restrict s1, const void *restrict s2, size_t n);
+int get_ipc_path_max_len(void);
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/backend/cffi/_cffi.py b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/backend/cffi/_cffi.py
new file mode 100644
index 00000000..c73ebf83
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/backend/cffi/_cffi.py
@@ -0,0 +1,127 @@
+# coding: utf-8
+"""The main CFFI wrapping of libzmq"""
+
+# Copyright (C) PyZMQ Developers
+# Distributed under the terms of the Modified BSD License.
+
+
+import json
+import os
+from os.path import dirname, join
+from cffi import FFI
+
+from zmq.utils.constant_names import all_names, no_prefix
+
+
+base_zmq_version = (3,2,2)
+
+def load_compiler_config():
+ """load pyzmq compiler arguments"""
+ import zmq
+ zmq_dir = dirname(zmq.__file__)
+ zmq_parent = dirname(zmq_dir)
+
+ fname = join(zmq_dir, 'utils', 'compiler.json')
+ if os.path.exists(fname):
+ with open(fname) as f:
+ cfg = json.load(f)
+ else:
+ cfg = {}
+
+ cfg.setdefault("include_dirs", [])
+ cfg.setdefault("library_dirs", [])
+ cfg.setdefault("runtime_library_dirs", [])
+ cfg.setdefault("libraries", ["zmq"])
+
+ # cast to str, because cffi can't handle unicode paths (?!)
+ cfg['libraries'] = [str(lib) for lib in cfg['libraries']]
+ for key in ("include_dirs", "library_dirs", "runtime_library_dirs"):
+ # interpret paths relative to parent of zmq (like source tree)
+ abs_paths = []
+ for p in cfg[key]:
+ if p.startswith('zmq'):
+ p = join(zmq_parent, p)
+ abs_paths.append(str(p))
+ cfg[key] = abs_paths
+ return cfg
+
+
+def zmq_version_info():
+ """Get libzmq version as tuple of ints"""
+ major = ffi.new('int*')
+ minor = ffi.new('int*')
+ patch = ffi.new('int*')
+
+ C.zmq_version(major, minor, patch)
+
+ return (int(major[0]), int(minor[0]), int(patch[0]))
+
+
+cfg = load_compiler_config()
+ffi = FFI()
+
+def _make_defines(names):
+ _names = []
+ for name in names:
+ define_line = "#define %s ..." % (name)
+ _names.append(define_line)
+
+ return "\n".join(_names)
+
+c_constant_names = []
+for name in all_names:
+ if no_prefix(name):
+ c_constant_names.append(name)
+ else:
+ c_constant_names.append("ZMQ_" + name)
+
+# load ffi definitions
+here = os.path.dirname(__file__)
+with open(os.path.join(here, '_cdefs.h')) as f:
+ _cdefs = f.read()
+
+with open(os.path.join(here, '_verify.c')) as f:
+ _verify = f.read()
+
+ffi.cdef(_cdefs)
+ffi.cdef(_make_defines(c_constant_names))
+
+try:
+ C = ffi.verify(_verify,
+ modulename='_cffi_ext',
+ libraries=cfg['libraries'],
+ include_dirs=cfg['include_dirs'],
+ library_dirs=cfg['library_dirs'],
+ runtime_library_dirs=cfg['runtime_library_dirs'],
+ )
+ _version_info = zmq_version_info()
+except Exception as e:
+ raise ImportError("PyZMQ CFFI backend couldn't find zeromq: %s\n"
+ "Please check that you have zeromq headers and libraries." % e)
+
+if _version_info < (3,2,2):
+ raise ImportError("PyZMQ CFFI backend requires zeromq >= 3.2.2,"
+ " but found %i.%i.%i" % _version_info
+ )
+
+nsp = new_sizet_pointer = lambda length: ffi.new('size_t*', length)
+
+new_uint64_pointer = lambda: (ffi.new('uint64_t*'),
+ nsp(ffi.sizeof('uint64_t')))
+new_int64_pointer = lambda: (ffi.new('int64_t*'),
+ nsp(ffi.sizeof('int64_t')))
+new_int_pointer = lambda: (ffi.new('int*'),
+ nsp(ffi.sizeof('int')))
+new_binary_data = lambda length: (ffi.new('char[%d]' % (length)),
+ nsp(ffi.sizeof('char') * length))
+
+value_uint64_pointer = lambda val : (ffi.new('uint64_t*', val),
+ ffi.sizeof('uint64_t'))
+value_int64_pointer = lambda val: (ffi.new('int64_t*', val),
+ ffi.sizeof('int64_t'))
+value_int_pointer = lambda val: (ffi.new('int*', val),
+ ffi.sizeof('int'))
+value_binary_data = lambda val, length: (ffi.new('char[%d]' % (length + 1), val),
+ ffi.sizeof('char') * length)
+
+IPC_PATH_MAX_LEN = C.get_ipc_path_max_len()
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/backend/cffi/_poll.py b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/backend/cffi/_poll.py
new file mode 100644
index 00000000..9bca34ca
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/backend/cffi/_poll.py
@@ -0,0 +1,56 @@
+# coding: utf-8
+"""zmq poll function"""
+
+# Copyright (C) PyZMQ Developers
+# Distributed under the terms of the Modified BSD License.
+
+from ._cffi import C, ffi, zmq_version_info
+
+from .constants import *
+
+from zmq.error import _check_rc
+
+
+def _make_zmq_pollitem(socket, flags):
+ zmq_socket = socket._zmq_socket
+ zmq_pollitem = ffi.new('zmq_pollitem_t*')
+ zmq_pollitem.socket = zmq_socket
+ zmq_pollitem.fd = 0
+ zmq_pollitem.events = flags
+ zmq_pollitem.revents = 0
+ return zmq_pollitem[0]
+
+def _make_zmq_pollitem_fromfd(socket_fd, flags):
+ zmq_pollitem = ffi.new('zmq_pollitem_t*')
+ zmq_pollitem.socket = ffi.NULL
+ zmq_pollitem.fd = socket_fd
+ zmq_pollitem.events = flags
+ zmq_pollitem.revents = 0
+ return zmq_pollitem[0]
+
+def zmq_poll(sockets, timeout):
+ cffi_pollitem_list = []
+ low_level_to_socket_obj = {}
+ for item in sockets:
+ if isinstance(item[0], int):
+ low_level_to_socket_obj[item[0]] = item
+ cffi_pollitem_list.append(_make_zmq_pollitem_fromfd(item[0], item[1]))
+ else:
+ low_level_to_socket_obj[item[0]._zmq_socket] = item
+ cffi_pollitem_list.append(_make_zmq_pollitem(item[0], item[1]))
+ items = ffi.new('zmq_pollitem_t[]', cffi_pollitem_list)
+ list_length = ffi.cast('int', len(cffi_pollitem_list))
+ c_timeout = ffi.cast('long', timeout)
+ rc = C.zmq_poll(items, list_length, c_timeout)
+ _check_rc(rc)
+ result = []
+ for index in range(len(items)):
+ if not items[index].socket == ffi.NULL:
+ if items[index].revents > 0:
+ result.append((low_level_to_socket_obj[items[index].socket][0],
+ items[index].revents))
+ else:
+ result.append((items[index].fd, items[index].revents))
+ return result
+
+__all__ = ['zmq_poll']
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/backend/cffi/_verify.c b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/backend/cffi/_verify.c
new file mode 100644
index 00000000..547840eb
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/backend/cffi/_verify.c
@@ -0,0 +1,12 @@
+#include <stdio.h>
+#include <sys/un.h>
+#include <string.h>
+
+#include <zmq.h>
+#include <zmq_utils.h>
+#include "zmq_compat.h"
+
+int get_ipc_path_max_len(void) {
+ struct sockaddr_un *dummy;
+ return sizeof(dummy->sun_path) - 1;
+}
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/backend/cffi/constants.py b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/backend/cffi/constants.py
new file mode 100644
index 00000000..ee293e74
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/backend/cffi/constants.py
@@ -0,0 +1,15 @@
+# coding: utf-8
+"""zmq constants"""
+
+from ._cffi import C, c_constant_names
+from zmq.utils.constant_names import all_names
+
+g = globals()
+for cname in c_constant_names:
+ if cname.startswith("ZMQ_"):
+ name = cname[4:]
+ else:
+ name = cname
+ g[name] = getattr(C, cname)
+
+__all__ = all_names
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/backend/cffi/context.py b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/backend/cffi/context.py
new file mode 100644
index 00000000..16a7b257
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/backend/cffi/context.py
@@ -0,0 +1,100 @@
+# coding: utf-8
+"""zmq Context class"""
+
+# Copyright (C) PyZMQ Developers
+# Distributed under the terms of the Modified BSD License.
+
+import weakref
+
+from ._cffi import C, ffi
+
+from .socket import *
+from .constants import *
+
+from zmq.error import ZMQError, _check_rc
+
+class Context(object):
+ _zmq_ctx = None
+ _iothreads = None
+ _closed = None
+ _sockets = None
+ _shadow = False
+
+ def __init__(self, io_threads=1, shadow=None):
+
+ if shadow:
+ self._zmq_ctx = ffi.cast("void *", shadow)
+ self._shadow = True
+ else:
+ self._shadow = False
+ if not io_threads >= 0:
+ raise ZMQError(EINVAL)
+
+ self._zmq_ctx = C.zmq_ctx_new()
+ if self._zmq_ctx == ffi.NULL:
+ raise ZMQError(C.zmq_errno())
+ if not shadow:
+ C.zmq_ctx_set(self._zmq_ctx, IO_THREADS, io_threads)
+ self._closed = False
+ self._sockets = set()
+
+ @property
+ def underlying(self):
+ """The address of the underlying libzmq context"""
+ return int(ffi.cast('size_t', self._zmq_ctx))
+
+ @property
+ def closed(self):
+ return self._closed
+
+ def _add_socket(self, socket):
+ ref = weakref.ref(socket)
+ self._sockets.add(ref)
+ return ref
+
+ def _rm_socket(self, ref):
+ if ref in self._sockets:
+ self._sockets.remove(ref)
+
+ def set(self, option, value):
+ """set a context option
+
+ see zmq_ctx_set
+ """
+ rc = C.zmq_ctx_set(self._zmq_ctx, option, value)
+ _check_rc(rc)
+
+ def get(self, option):
+ """get context option
+
+ see zmq_ctx_get
+ """
+ rc = C.zmq_ctx_get(self._zmq_ctx, option)
+ _check_rc(rc)
+ return rc
+
+ def term(self):
+ if self.closed:
+ return
+
+ C.zmq_ctx_destroy(self._zmq_ctx)
+
+ self._zmq_ctx = None
+ self._closed = True
+
+ def destroy(self, linger=None):
+ if self.closed:
+ return
+
+ sockets = self._sockets
+ self._sockets = set()
+ for s in sockets:
+ s = s()
+ if s and not s.closed:
+ if linger:
+ s.setsockopt(LINGER, linger)
+ s.close()
+
+ self.term()
+
+__all__ = ['Context']
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/backend/cffi/devices.py b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/backend/cffi/devices.py
new file mode 100644
index 00000000..c7a514a8
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/backend/cffi/devices.py
@@ -0,0 +1,24 @@
+# coding: utf-8
+"""zmq device functions"""
+
+# Copyright (C) PyZMQ Developers
+# Distributed under the terms of the Modified BSD License.
+
+from ._cffi import C, ffi, zmq_version_info
+from .socket import Socket
+from zmq.error import ZMQError, _check_rc
+
+def device(device_type, frontend, backend):
+ rc = C.zmq_proxy(frontend._zmq_socket, backend._zmq_socket, ffi.NULL)
+ _check_rc(rc)
+
+def proxy(frontend, backend, capture=None):
+ if isinstance(capture, Socket):
+ capture = capture._zmq_socket
+ else:
+ capture = ffi.NULL
+
+ rc = C.zmq_proxy(frontend._zmq_socket, backend._zmq_socket, capture)
+ _check_rc(rc)
+
+__all__ = ['device', 'proxy']
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/backend/cffi/error.py b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/backend/cffi/error.py
new file mode 100644
index 00000000..3bb64de0
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/backend/cffi/error.py
@@ -0,0 +1,13 @@
+"""zmq error functions"""
+
+# Copyright (C) PyZMQ Developers
+# Distributed under the terms of the Modified BSD License.
+
+from ._cffi import C, ffi
+
+def strerror(errno):
+ return ffi.string(C.zmq_strerror(errno))
+
+zmq_errno = C.zmq_errno
+
+__all__ = ['strerror', 'zmq_errno']
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/backend/cffi/message.py b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/backend/cffi/message.py
new file mode 100644
index 00000000..c35decb6
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/backend/cffi/message.py
@@ -0,0 +1,69 @@
+"""Dummy Frame object"""
+
+# Copyright (C) PyZMQ Developers
+# Distributed under the terms of the Modified BSD License.
+
+from ._cffi import ffi, C
+
+import zmq
+from zmq.utils.strtypes import unicode
+
+try:
+ view = memoryview
+except NameError:
+ view = buffer
+
+_content = lambda x: x.tobytes() if type(x) == memoryview else x
+
+class Frame(object):
+ _data = None
+ tracker = None
+ closed = False
+ more = False
+ buffer = None
+
+
+ def __init__(self, data, track=False):
+ try:
+ view(data)
+ except TypeError:
+ raise
+
+ self._data = data
+
+ if isinstance(data, unicode):
+ raise TypeError("Unicode objects not allowed. Only: str/bytes, " +
+ "buffer interfaces.")
+
+ self.more = False
+ self.tracker = None
+ self.closed = False
+ if track:
+ self.tracker = zmq.MessageTracker()
+
+ self.buffer = view(self.bytes)
+
+ @property
+ def bytes(self):
+ data = _content(self._data)
+ return data
+
+ def __len__(self):
+ return len(self.bytes)
+
+ def __eq__(self, other):
+ return self.bytes == _content(other)
+
+ def __str__(self):
+ if str is unicode:
+ return self.bytes.decode()
+ else:
+ return self.bytes
+
+ @property
+ def done(self):
+ return True
+
+Message = Frame
+
+__all__ = ['Frame', 'Message']
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/backend/cffi/socket.py b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/backend/cffi/socket.py
new file mode 100644
index 00000000..3c427739
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/backend/cffi/socket.py
@@ -0,0 +1,244 @@
+# coding: utf-8
+"""zmq Socket class"""
+
+# Copyright (C) PyZMQ Developers
+# Distributed under the terms of the Modified BSD License.
+
+import random
+import codecs
+
+import errno as errno_mod
+
+from ._cffi import (C, ffi, new_uint64_pointer, new_int64_pointer,
+ new_int_pointer, new_binary_data, value_uint64_pointer,
+ value_int64_pointer, value_int_pointer, value_binary_data,
+ IPC_PATH_MAX_LEN)
+
+from .message import Frame
+from .constants import *
+
+import zmq
+from zmq.error import ZMQError, _check_rc, _check_version
+from zmq.utils.strtypes import unicode
+
+
+def new_pointer_from_opt(option, length=0):
+ from zmq.sugar.constants import (
+ int64_sockopts, bytes_sockopts,
+ )
+ if option in int64_sockopts:
+ return new_int64_pointer()
+ elif option in bytes_sockopts:
+ return new_binary_data(length)
+ else:
+ # default
+ return new_int_pointer()
+
+def value_from_opt_pointer(option, opt_pointer, length=0):
+ from zmq.sugar.constants import (
+ int64_sockopts, bytes_sockopts,
+ )
+ if option in int64_sockopts:
+ return int(opt_pointer[0])
+ elif option in bytes_sockopts:
+ return ffi.buffer(opt_pointer, length)[:]
+ else:
+ return int(opt_pointer[0])
+
+def initialize_opt_pointer(option, value, length=0):
+ from zmq.sugar.constants import (
+ int64_sockopts, bytes_sockopts,
+ )
+ if option in int64_sockopts:
+ return value_int64_pointer(value)
+ elif option in bytes_sockopts:
+ return value_binary_data(value, length)
+ else:
+ return value_int_pointer(value)
+
+
+class Socket(object):
+ context = None
+ socket_type = None
+ _zmq_socket = None
+ _closed = None
+ _ref = None
+ _shadow = False
+
+ def __init__(self, context=None, socket_type=None, shadow=None):
+ self.context = context
+ if shadow is not None:
+ self._zmq_socket = ffi.cast("void *", shadow)
+ self._shadow = True
+ else:
+ self._shadow = False
+ self._zmq_socket = C.zmq_socket(context._zmq_ctx, socket_type)
+ if self._zmq_socket == ffi.NULL:
+ raise ZMQError()
+ self._closed = False
+ if context:
+ self._ref = context._add_socket(self)
+
+ @property
+ def underlying(self):
+ """The address of the underlying libzmq socket"""
+ return int(ffi.cast('size_t', self._zmq_socket))
+
+ @property
+ def closed(self):
+ return self._closed
+
+ def close(self, linger=None):
+ rc = 0
+ if not self._closed and hasattr(self, '_zmq_socket'):
+ if self._zmq_socket is not None:
+ rc = C.zmq_close(self._zmq_socket)
+ self._closed = True
+ if self.context:
+ self.context._rm_socket(self._ref)
+ return rc
+
+ def bind(self, address):
+ if isinstance(address, unicode):
+ address = address.encode('utf8')
+ rc = C.zmq_bind(self._zmq_socket, address)
+ if rc < 0:
+ if IPC_PATH_MAX_LEN and C.zmq_errno() == errno_mod.ENAMETOOLONG:
+ # py3compat: address is bytes, but msg wants str
+ if str is unicode:
+ address = address.decode('utf-8', 'replace')
+ path = address.split('://', 1)[-1]
+ msg = ('ipc path "{0}" is longer than {1} '
+ 'characters (sizeof(sockaddr_un.sun_path)).'
+ .format(path, IPC_PATH_MAX_LEN))
+ raise ZMQError(C.zmq_errno(), msg=msg)
+ else:
+ _check_rc(rc)
+
+ def unbind(self, address):
+ _check_version((3,2), "unbind")
+ if isinstance(address, unicode):
+ address = address.encode('utf8')
+ rc = C.zmq_unbind(self._zmq_socket, address)
+ _check_rc(rc)
+
+ def connect(self, address):
+ if isinstance(address, unicode):
+ address = address.encode('utf8')
+ rc = C.zmq_connect(self._zmq_socket, address)
+ _check_rc(rc)
+
+ def disconnect(self, address):
+ _check_version((3,2), "disconnect")
+ if isinstance(address, unicode):
+ address = address.encode('utf8')
+ rc = C.zmq_disconnect(self._zmq_socket, address)
+ _check_rc(rc)
+
+ def set(self, option, value):
+ length = None
+ if isinstance(value, unicode):
+ raise TypeError("unicode not allowed, use bytes")
+
+ if isinstance(value, bytes):
+ if option not in zmq.constants.bytes_sockopts:
+ raise TypeError("not a bytes sockopt: %s" % option)
+ length = len(value)
+
+ c_data = initialize_opt_pointer(option, value, length)
+
+ c_value_pointer = c_data[0]
+ c_sizet = c_data[1]
+
+ rc = C.zmq_setsockopt(self._zmq_socket,
+ option,
+ ffi.cast('void*', c_value_pointer),
+ c_sizet)
+ _check_rc(rc)
+
+ def get(self, option):
+ c_data = new_pointer_from_opt(option, length=255)
+
+ c_value_pointer = c_data[0]
+ c_sizet_pointer = c_data[1]
+
+ rc = C.zmq_getsockopt(self._zmq_socket,
+ option,
+ c_value_pointer,
+ c_sizet_pointer)
+ _check_rc(rc)
+
+ sz = c_sizet_pointer[0]
+ v = value_from_opt_pointer(option, c_value_pointer, sz)
+ if option != zmq.IDENTITY and option in zmq.constants.bytes_sockopts and v.endswith(b'\0'):
+ v = v[:-1]
+ return v
+
+ def send(self, message, flags=0, copy=False, track=False):
+ if isinstance(message, unicode):
+ raise TypeError("Message must be in bytes, not an unicode Object")
+
+ if isinstance(message, Frame):
+ message = message.bytes
+
+ zmq_msg = ffi.new('zmq_msg_t*')
+ c_message = ffi.new('char[]', message)
+ rc = C.zmq_msg_init_size(zmq_msg, len(message))
+ C.memcpy(C.zmq_msg_data(zmq_msg), c_message, len(message))
+
+ rc = C.zmq_msg_send(zmq_msg, self._zmq_socket, flags)
+ C.zmq_msg_close(zmq_msg)
+ _check_rc(rc)
+
+ if track:
+ return zmq.MessageTracker()
+
+ def recv(self, flags=0, copy=True, track=False):
+ zmq_msg = ffi.new('zmq_msg_t*')
+ C.zmq_msg_init(zmq_msg)
+
+ rc = C.zmq_msg_recv(zmq_msg, self._zmq_socket, flags)
+
+ if rc < 0:
+ C.zmq_msg_close(zmq_msg)
+ _check_rc(rc)
+
+ _buffer = ffi.buffer(C.zmq_msg_data(zmq_msg), C.zmq_msg_size(zmq_msg))
+ value = _buffer[:]
+ C.zmq_msg_close(zmq_msg)
+
+ frame = Frame(value, track=track)
+ frame.more = self.getsockopt(RCVMORE)
+
+ if copy:
+ return frame.bytes
+ else:
+ return frame
+
+ def monitor(self, addr, events=-1):
+ """s.monitor(addr, flags)
+
+ Start publishing socket events on inproc.
+ See libzmq docs for zmq_monitor for details.
+
+ Note: requires libzmq >= 3.2
+
+ Parameters
+ ----------
+ addr : str
+ The inproc url used for monitoring. Passing None as
+ the addr will cause an existing socket monitor to be
+ deregistered.
+ events : int [default: zmq.EVENT_ALL]
+ The zmq event bitmask for which events will be sent to the monitor.
+ """
+
+ _check_version((3,2), "monitor")
+ if events < 0:
+ events = zmq.EVENT_ALL
+ if addr is None:
+ addr = ffi.NULL
+ rc = C.zmq_socket_monitor(self._zmq_socket, addr, events)
+
+
+__all__ = ['Socket', 'IPC_PATH_MAX_LEN']
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/backend/cffi/utils.py b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/backend/cffi/utils.py
new file mode 100644
index 00000000..fde7827b
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/backend/cffi/utils.py
@@ -0,0 +1,62 @@
+# coding: utf-8
+"""miscellaneous zmq_utils wrapping"""
+
+# Copyright (C) PyZMQ Developers
+# Distributed under the terms of the Modified BSD License.
+
+from ._cffi import ffi, C
+
+from zmq.error import ZMQError, _check_rc, _check_version
+from zmq.utils.strtypes import unicode
+
+def has(capability):
+ """Check for zmq capability by name (e.g. 'ipc', 'curve')
+
+ .. versionadded:: libzmq-4.1
+ .. versionadded:: 14.1
+ """
+ _check_version((4,1), 'zmq.has')
+ if isinstance(capability, unicode):
+ capability = capability.encode('utf8')
+ return bool(C.zmq_has(capability))
+
+def curve_keypair():
+ """generate a Z85 keypair for use with zmq.CURVE security
+
+ Requires libzmq (≥ 4.0) to have been linked with libsodium.
+
+ Returns
+ -------
+ (public, secret) : two bytestrings
+ The public and private keypair as 40 byte z85-encoded bytestrings.
+ """
+ _check_version((3,2), "monitor")
+ public = ffi.new('char[64]')
+ private = ffi.new('char[64]')
+ rc = C.zmq_curve_keypair(public, private)
+ _check_rc(rc)
+ return ffi.buffer(public)[:40], ffi.buffer(private)[:40]
+
+
+class Stopwatch(object):
+ def __init__(self):
+ self.watch = ffi.NULL
+
+ def start(self):
+ if self.watch == ffi.NULL:
+ self.watch = C.zmq_stopwatch_start()
+ else:
+ raise ZMQError('Stopwatch is already runing.')
+
+ def stop(self):
+ if self.watch == ffi.NULL:
+ raise ZMQError('Must start the Stopwatch before calling stop.')
+ else:
+ time = C.zmq_stopwatch_stop(self.watch)
+ self.watch = ffi.NULL
+ return time
+
+ def sleep(self, seconds):
+ C.zmq_sleep(seconds)
+
+__all__ = ['has', 'curve_keypair', 'Stopwatch']
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/backend/cython/__init__.py b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/backend/cython/__init__.py
new file mode 100644
index 00000000..e5358185
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/backend/cython/__init__.py
@@ -0,0 +1,23 @@
+"""Python bindings for core 0MQ objects."""
+
+# Copyright (C) PyZMQ Developers
+# Distributed under the terms of the Lesser GNU Public License (LGPL).
+
+from . import (constants, error, message, context,
+ socket, utils, _poll, _version, _device )
+
+__all__ = []
+for submod in (constants, error, message, context,
+ socket, utils, _poll, _version, _device):
+ __all__.extend(submod.__all__)
+
+from .constants import *
+from .error import *
+from .message import *
+from .context import *
+from .socket import *
+from ._poll import *
+from .utils import *
+from ._device import *
+from ._version import *
+
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/backend/cython/_device.cpython-34m.so b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/backend/cython/_device.cpython-34m.so
new file mode 100644
index 00000000..a7ede3aa
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/backend/cython/_device.cpython-34m.so
Binary files differ
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/backend/cython/_poll.cpython-34m.so b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/backend/cython/_poll.cpython-34m.so
new file mode 100644
index 00000000..e1238a2b
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/backend/cython/_poll.cpython-34m.so
Binary files differ
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/backend/cython/_version.cpython-34m.so b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/backend/cython/_version.cpython-34m.so
new file mode 100644
index 00000000..be91b95a
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/backend/cython/_version.cpython-34m.so
Binary files differ
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/backend/cython/checkrc.pxd b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/backend/cython/checkrc.pxd
new file mode 100644
index 00000000..3bf69fc3
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/backend/cython/checkrc.pxd
@@ -0,0 +1,23 @@
+from libc.errno cimport EINTR, EAGAIN
+from cpython cimport PyErr_CheckSignals
+from libzmq cimport zmq_errno, ZMQ_ETERM
+
+cdef inline int _check_rc(int rc) except -1:
+ """internal utility for checking zmq return condition
+
+ and raising the appropriate Exception class
+ """
+ cdef int errno = zmq_errno()
+ PyErr_CheckSignals()
+ if rc < 0:
+ if errno == EAGAIN:
+ from zmq.error import Again
+ raise Again(errno)
+ elif errno == ZMQ_ETERM:
+ from zmq.error import ContextTerminated
+ raise ContextTerminated(errno)
+ else:
+ from zmq.error import ZMQError
+ raise ZMQError(errno)
+ # return -1
+ return 0
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/backend/cython/constants.cpython-34m.so b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/backend/cython/constants.cpython-34m.so
new file mode 100644
index 00000000..6c6f4b5f
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/backend/cython/constants.cpython-34m.so
Binary files differ
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/backend/cython/context.cpython-34m.so b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/backend/cython/context.cpython-34m.so
new file mode 100644
index 00000000..1f64c52f
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/backend/cython/context.cpython-34m.so
Binary files differ
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/backend/cython/context.pxd b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/backend/cython/context.pxd
new file mode 100644
index 00000000..9c9267a5
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/backend/cython/context.pxd
@@ -0,0 +1,41 @@
+"""0MQ Context class declaration."""
+
+#
+# Copyright (c) 2010-2011 Brian E. Granger & Min Ragan-Kelley
+#
+# This file is part of pyzmq.
+#
+# pyzmq is free software; you can redistribute it and/or modify it under
+# the terms of the Lesser GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# pyzmq is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# Lesser GNU General Public License for more details.
+#
+# You should have received a copy of the Lesser GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+#-----------------------------------------------------------------------------
+# Code
+#-----------------------------------------------------------------------------
+
+cdef class Context:
+
+ cdef object __weakref__ # enable weakref
+ cdef void *handle # The C handle for the underlying zmq object.
+ cdef bint _shadow # whether the Context is a shadow wrapper of another
+ cdef void **_sockets # A C-array containg socket handles
+ cdef size_t _n_sockets # the number of sockets
+ cdef size_t _max_sockets # the size of the _sockets array
+ cdef int _pid # the pid of the process which created me (for fork safety)
+
+ cdef public bint closed # bool property for a closed context.
+ cdef inline int _term(self)
+ # helpers for events on _sockets in Socket.__cinit__()/close()
+ cdef inline void _add_socket(self, void* handle)
+ cdef inline void _remove_socket(self, void* handle)
+
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/backend/cython/error.cpython-34m.so b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/backend/cython/error.cpython-34m.so
new file mode 100644
index 00000000..1e4a4ecd
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/backend/cython/error.cpython-34m.so
Binary files differ
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/backend/cython/libzmq.pxd b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/backend/cython/libzmq.pxd
new file mode 100644
index 00000000..e42f6d6b
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/backend/cython/libzmq.pxd
@@ -0,0 +1,110 @@
+"""All the C imports for 0MQ"""
+
+#
+# Copyright (c) 2010 Brian E. Granger & Min Ragan-Kelley
+#
+# This file is part of pyzmq.
+#
+# pyzmq is free software; you can redistribute it and/or modify it under
+# the terms of the Lesser GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# pyzmq is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# Lesser GNU General Public License for more details.
+#
+# You should have received a copy of the Lesser GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+#-----------------------------------------------------------------------------
+# Imports
+#-----------------------------------------------------------------------------
+
+#-----------------------------------------------------------------------------
+# Import the C header files
+#-----------------------------------------------------------------------------
+
+cdef extern from *:
+ ctypedef void* const_void_ptr "const void *"
+ ctypedef char* const_char_ptr "const char *"
+
+cdef extern from "zmq_compat.h":
+ ctypedef signed long long int64_t "pyzmq_int64_t"
+
+include "constant_enums.pxi"
+
+cdef extern from "zmq.h" nogil:
+
+ void _zmq_version "zmq_version"(int *major, int *minor, int *patch)
+
+ ctypedef int fd_t "ZMQ_FD_T"
+
+ enum: errno
+ char *zmq_strerror (int errnum)
+ int zmq_errno()
+
+ void *zmq_ctx_new ()
+ int zmq_ctx_destroy (void *context)
+ int zmq_ctx_set (void *context, int option, int optval)
+ int zmq_ctx_get (void *context, int option)
+ void *zmq_init (int io_threads)
+ int zmq_term (void *context)
+
+ # blackbox def for zmq_msg_t
+ ctypedef void * zmq_msg_t "zmq_msg_t"
+
+ ctypedef void zmq_free_fn(void *data, void *hint)
+
+ int zmq_msg_init (zmq_msg_t *msg)
+ int zmq_msg_init_size (zmq_msg_t *msg, size_t size)
+ int zmq_msg_init_data (zmq_msg_t *msg, void *data,
+ size_t size, zmq_free_fn *ffn, void *hint)
+ int zmq_msg_send (zmq_msg_t *msg, void *s, int flags)
+ int zmq_msg_recv (zmq_msg_t *msg, void *s, int flags)
+ int zmq_msg_close (zmq_msg_t *msg)
+ int zmq_msg_move (zmq_msg_t *dest, zmq_msg_t *src)
+ int zmq_msg_copy (zmq_msg_t *dest, zmq_msg_t *src)
+ void *zmq_msg_data (zmq_msg_t *msg)
+ size_t zmq_msg_size (zmq_msg_t *msg)
+ int zmq_msg_more (zmq_msg_t *msg)
+ int zmq_msg_get (zmq_msg_t *msg, int option)
+ int zmq_msg_set (zmq_msg_t *msg, int option, int optval)
+ const_char_ptr zmq_msg_gets (zmq_msg_t *msg, const_char_ptr property)
+ int zmq_has (const_char_ptr capability)
+
+ void *zmq_socket (void *context, int type)
+ int zmq_close (void *s)
+ int zmq_setsockopt (void *s, int option, void *optval, size_t optvallen)
+ int zmq_getsockopt (void *s, int option, void *optval, size_t *optvallen)
+ int zmq_bind (void *s, char *addr)
+ int zmq_connect (void *s, char *addr)
+ int zmq_unbind (void *s, char *addr)
+ int zmq_disconnect (void *s, char *addr)
+
+ int zmq_socket_monitor (void *s, char *addr, int flags)
+
+ # send/recv
+ int zmq_sendbuf (void *s, const_void_ptr buf, size_t n, int flags)
+ int zmq_recvbuf (void *s, void *buf, size_t n, int flags)
+
+ ctypedef struct zmq_pollitem_t:
+ void *socket
+ int fd
+ short events
+ short revents
+
+ int zmq_poll (zmq_pollitem_t *items, int nitems, long timeout)
+
+ int zmq_device (int device_, void *insocket_, void *outsocket_)
+ int zmq_proxy (void *frontend, void *backend, void *capture)
+
+cdef extern from "zmq_utils.h" nogil:
+
+ void *zmq_stopwatch_start ()
+ unsigned long zmq_stopwatch_stop (void *watch_)
+ void zmq_sleep (int seconds_)
+ int zmq_curve_keypair (char *z85_public_key, char *z85_secret_key)
+
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/backend/cython/message.cpython-34m.so b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/backend/cython/message.cpython-34m.so
new file mode 100644
index 00000000..2db58e42
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/backend/cython/message.cpython-34m.so
Binary files differ
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/backend/cython/message.pxd b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/backend/cython/message.pxd
new file mode 100644
index 00000000..4781195f
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/backend/cython/message.pxd
@@ -0,0 +1,63 @@
+"""0MQ Message related class declarations."""
+
+#
+# Copyright (c) 2010-2011 Brian E. Granger & Min Ragan-Kelley
+#
+# This file is part of pyzmq.
+#
+# pyzmq is free software; you can redistribute it and/or modify it under
+# the terms of the Lesser GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# pyzmq is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# Lesser GNU General Public License for more details.
+#
+# You should have received a copy of the Lesser GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+#-----------------------------------------------------------------------------
+# Imports
+#-----------------------------------------------------------------------------
+
+from cpython cimport PyBytes_FromStringAndSize
+
+from libzmq cimport zmq_msg_t, zmq_msg_data, zmq_msg_size
+
+#-----------------------------------------------------------------------------
+# Code
+#-----------------------------------------------------------------------------
+
+cdef class MessageTracker(object):
+
+ cdef set events # Message Event objects to track.
+ cdef set peers # Other Message or MessageTracker objects.
+
+
+cdef class Frame:
+
+ cdef zmq_msg_t zmq_msg
+ cdef object _data # The actual message data as a Python object.
+ cdef object _buffer # A Python Buffer/View of the message contents
+ cdef object _bytes # A bytes/str copy of the message.
+ cdef bint _failed_init # Flag to handle failed zmq_msg_init
+ cdef public object tracker_event # Event for use with zmq_free_fn.
+ cdef public object tracker # MessageTracker object.
+ cdef public bint more # whether RCVMORE was set
+
+ cdef Frame fast_copy(self) # Create shallow copy of Message object.
+ cdef object _getbuffer(self) # Construct self._buffer.
+
+
+cdef inline object copy_zmq_msg_bytes(zmq_msg_t *zmq_msg):
+ """ Copy the data from a zmq_msg_t """
+ cdef char *data_c = NULL
+ cdef Py_ssize_t data_len_c
+ data_c = <char *>zmq_msg_data(zmq_msg)
+ data_len_c = zmq_msg_size(zmq_msg)
+ return PyBytes_FromStringAndSize(data_c, data_len_c)
+
+
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/backend/cython/socket.cpython-34m.so b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/backend/cython/socket.cpython-34m.so
new file mode 100644
index 00000000..5421b769
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/backend/cython/socket.cpython-34m.so
Binary files differ
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/backend/cython/socket.pxd b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/backend/cython/socket.pxd
new file mode 100644
index 00000000..b8a331e2
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/backend/cython/socket.pxd
@@ -0,0 +1,47 @@
+"""0MQ Socket class declaration."""
+
+#
+# Copyright (c) 2010-2011 Brian E. Granger & Min Ragan-Kelley
+#
+# This file is part of pyzmq.
+#
+# pyzmq is free software; you can redistribute it and/or modify it under
+# the terms of the Lesser GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# pyzmq is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# Lesser GNU General Public License for more details.
+#
+# You should have received a copy of the Lesser GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+#-----------------------------------------------------------------------------
+# Imports
+#-----------------------------------------------------------------------------
+
+from context cimport Context
+
+#-----------------------------------------------------------------------------
+# Code
+#-----------------------------------------------------------------------------
+
+
+cdef class Socket:
+
+ cdef object __weakref__ # enable weakref
+ cdef void *handle # The C handle for the underlying zmq object.
+ cdef bint _shadow # whether the Socket is a shadow wrapper of another
+ # Hold on to a reference to the context to make sure it is not garbage
+ # collected until the socket it done with it.
+ cdef public Context context # The zmq Context object that owns this.
+ cdef public bint _closed # bool property for a closed socket.
+ cdef int _pid # the pid of the process which created me (for fork safety)
+
+ # cpdef methods for direct-cython access:
+ cpdef object send(self, object data, int flags=*, copy=*, track=*)
+ cpdef object recv(self, int flags=*, copy=*, track=*)
+
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/backend/cython/utils.cpython-34m.so b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/backend/cython/utils.cpython-34m.so
new file mode 100644
index 00000000..d9cd485d
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/backend/cython/utils.cpython-34m.so
Binary files differ
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/backend/cython/utils.pxd b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/backend/cython/utils.pxd
new file mode 100644
index 00000000..1d7117f1
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/backend/cython/utils.pxd
@@ -0,0 +1,29 @@
+"""Wrap zmq_utils.h"""
+
+#
+# Copyright (c) 2010 Brian E. Granger & Min Ragan-Kelley
+#
+# This file is part of pyzmq.
+#
+# pyzmq is free software; you can redistribute it and/or modify it under
+# the terms of the Lesser GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# pyzmq is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# Lesser GNU General Public License for more details.
+#
+# You should have received a copy of the Lesser GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+#-----------------------------------------------------------------------------
+# Code
+#-----------------------------------------------------------------------------
+
+
+cdef class Stopwatch:
+ cdef void *watch # The C handle for the underlying zmq object
+
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/backend/select.py b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/backend/select.py
new file mode 100644
index 00000000..0a2e09a2
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/backend/select.py
@@ -0,0 +1,39 @@
+"""Import basic exposure of libzmq C API as a backend"""
+
+# Copyright (C) PyZMQ Developers
+# Distributed under the terms of the Modified BSD License.
+
+public_api = [
+ 'Context',
+ 'Socket',
+ 'Frame',
+ 'Message',
+ 'Stopwatch',
+ 'device',
+ 'proxy',
+ 'zmq_poll',
+ 'strerror',
+ 'zmq_errno',
+ 'has',
+ 'curve_keypair',
+ 'constants',
+ 'zmq_version_info',
+ 'IPC_PATH_MAX_LEN',
+]
+
+def select_backend(name):
+ """Select the pyzmq backend"""
+ try:
+ mod = __import__(name, fromlist=public_api)
+ except ImportError:
+ raise
+ except Exception as e:
+ import sys
+ from zmq.utils.sixcerpt import reraise
+ exc_info = sys.exc_info()
+ reraise(ImportError, ImportError("Importing %s failed with %s" % (name, e)), exc_info[2])
+
+ ns = {}
+ for key in public_api:
+ ns[key] = getattr(mod, key)
+ return ns
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/devices/__init__.py b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/devices/__init__.py
new file mode 100644
index 00000000..23715963
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/devices/__init__.py
@@ -0,0 +1,16 @@
+"""0MQ Device classes for running in background threads or processes."""
+
+# Copyright (C) PyZMQ Developers
+# Distributed under the terms of the Modified BSD License.
+
+from zmq import device
+from zmq.devices import basedevice, proxydevice, monitoredqueue, monitoredqueuedevice
+
+from zmq.devices.basedevice import *
+from zmq.devices.proxydevice import *
+from zmq.devices.monitoredqueue import *
+from zmq.devices.monitoredqueuedevice import *
+
+__all__ = ['device']
+for submod in (basedevice, proxydevice, monitoredqueue, monitoredqueuedevice):
+ __all__.extend(submod.__all__)
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/devices/basedevice.py b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/devices/basedevice.py
new file mode 100644
index 00000000..7ba1b7ac
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/devices/basedevice.py
@@ -0,0 +1,229 @@
+"""Classes for running 0MQ Devices in the background."""
+
+# Copyright (C) PyZMQ Developers
+# Distributed under the terms of the Modified BSD License.
+
+
+import time
+from threading import Thread
+from multiprocessing import Process
+
+from zmq import device, QUEUE, Context, ETERM, ZMQError
+
+
+class Device:
+ """A 0MQ Device to be run in the background.
+
+ You do not pass Socket instances to this, but rather Socket types::
+
+ Device(device_type, in_socket_type, out_socket_type)
+
+ For instance::
+
+ dev = Device(zmq.QUEUE, zmq.DEALER, zmq.ROUTER)
+
+ Similar to zmq.device, but socket types instead of sockets themselves are
+ passed, and the sockets are created in the work thread, to avoid issues
+ with thread safety. As a result, additional bind_{in|out} and
+ connect_{in|out} methods and setsockopt_{in|out} allow users to specify
+ connections for the sockets.
+
+ Parameters
+ ----------
+ device_type : int
+ The 0MQ Device type
+ {in|out}_type : int
+ zmq socket types, to be passed later to context.socket(). e.g.
+ zmq.PUB, zmq.SUB, zmq.REQ. If out_type is < 0, then in_socket is used
+ for both in_socket and out_socket.
+
+ Methods
+ -------
+ bind_{in_out}(iface)
+ passthrough for ``{in|out}_socket.bind(iface)``, to be called in the thread
+ connect_{in_out}(iface)
+ passthrough for ``{in|out}_socket.connect(iface)``, to be called in the
+ thread
+ setsockopt_{in_out}(opt,value)
+ passthrough for ``{in|out}_socket.setsockopt(opt, value)``, to be called in
+ the thread
+
+ Attributes
+ ----------
+ daemon : int
+ sets whether the thread should be run as a daemon
+ Default is true, because if it is false, the thread will not
+ exit unless it is killed
+ context_factory : callable (class attribute)
+ Function for creating the Context. This will be Context.instance
+ in ThreadDevices, and Context in ProcessDevices. The only reason
+ it is not instance() in ProcessDevices is that there may be a stale
+ Context instance already initialized, and the forked environment
+ should *never* try to use it.
+ """
+
+ context_factory = Context.instance
+ """Callable that returns a context. Typically either Context.instance or Context,
+ depending on whether the device should share the global instance or not.
+ """
+
+ def __init__(self, device_type=QUEUE, in_type=None, out_type=None):
+ self.device_type = device_type
+ if in_type is None:
+ raise TypeError("in_type must be specified")
+ if out_type is None:
+ raise TypeError("out_type must be specified")
+ self.in_type = in_type
+ self.out_type = out_type
+ self._in_binds = []
+ self._in_connects = []
+ self._in_sockopts = []
+ self._out_binds = []
+ self._out_connects = []
+ self._out_sockopts = []
+ self.daemon = True
+ self.done = False
+
+ def bind_in(self, addr):
+ """Enqueue ZMQ address for binding on in_socket.
+
+ See zmq.Socket.bind for details.
+ """
+ self._in_binds.append(addr)
+
+ def connect_in(self, addr):
+ """Enqueue ZMQ address for connecting on in_socket.
+
+ See zmq.Socket.connect for details.
+ """
+ self._in_connects.append(addr)
+
+ def setsockopt_in(self, opt, value):
+ """Enqueue setsockopt(opt, value) for in_socket
+
+ See zmq.Socket.setsockopt for details.
+ """
+ self._in_sockopts.append((opt, value))
+
+ def bind_out(self, addr):
+ """Enqueue ZMQ address for binding on out_socket.
+
+ See zmq.Socket.bind for details.
+ """
+ self._out_binds.append(addr)
+
+ def connect_out(self, addr):
+ """Enqueue ZMQ address for connecting on out_socket.
+
+ See zmq.Socket.connect for details.
+ """
+ self._out_connects.append(addr)
+
+ def setsockopt_out(self, opt, value):
+ """Enqueue setsockopt(opt, value) for out_socket
+
+ See zmq.Socket.setsockopt for details.
+ """
+ self._out_sockopts.append((opt, value))
+
+ def _setup_sockets(self):
+ ctx = self.context_factory()
+
+ self._context = ctx
+
+ # create the sockets
+ ins = ctx.socket(self.in_type)
+ if self.out_type < 0:
+ outs = ins
+ else:
+ outs = ctx.socket(self.out_type)
+
+ # set sockopts (must be done first, in case of zmq.IDENTITY)
+ for opt,value in self._in_sockopts:
+ ins.setsockopt(opt, value)
+ for opt,value in self._out_sockopts:
+ outs.setsockopt(opt, value)
+
+ for iface in self._in_binds:
+ ins.bind(iface)
+ for iface in self._out_binds:
+ outs.bind(iface)
+
+ for iface in self._in_connects:
+ ins.connect(iface)
+ for iface in self._out_connects:
+ outs.connect(iface)
+
+ return ins,outs
+
+ def run_device(self):
+ """The runner method.
+
+ Do not call me directly, instead call ``self.start()``, just like a Thread.
+ """
+ ins,outs = self._setup_sockets()
+ device(self.device_type, ins, outs)
+
+ def run(self):
+ """wrap run_device in try/catch ETERM"""
+ try:
+ self.run_device()
+ except ZMQError as e:
+ if e.errno == ETERM:
+ # silence TERM errors, because this should be a clean shutdown
+ pass
+ else:
+ raise
+ finally:
+ self.done = True
+
+ def start(self):
+ """Start the device. Override me in subclass for other launchers."""
+ return self.run()
+
+ def join(self,timeout=None):
+ """wait for me to finish, like Thread.join.
+
+ Reimplemented appropriately by subclasses."""
+ tic = time.time()
+ toc = tic
+ while not self.done and not (timeout is not None and toc-tic > timeout):
+ time.sleep(.001)
+ toc = time.time()
+
+
+class BackgroundDevice(Device):
+ """Base class for launching Devices in background processes and threads."""
+
+ launcher=None
+ _launch_class=None
+
+ def start(self):
+ self.launcher = self._launch_class(target=self.run)
+ self.launcher.daemon = self.daemon
+ return self.launcher.start()
+
+ def join(self, timeout=None):
+ return self.launcher.join(timeout=timeout)
+
+
+class ThreadDevice(BackgroundDevice):
+ """A Device that will be run in a background Thread.
+
+ See Device for details.
+ """
+ _launch_class=Thread
+
+class ProcessDevice(BackgroundDevice):
+ """A Device that will be run in a background Process.
+
+ See Device for details.
+ """
+ _launch_class=Process
+ context_factory = Context
+ """Callable that returns a context. Typically either Context.instance or Context,
+ depending on whether the device should share the global instance or not.
+ """
+
+
+__all__ = ['Device', 'ThreadDevice', 'ProcessDevice']
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/devices/monitoredqueue.cpython-34m.so b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/devices/monitoredqueue.cpython-34m.so
new file mode 100644
index 00000000..724d265e
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/devices/monitoredqueue.cpython-34m.so
Binary files differ
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/devices/monitoredqueue.pxd b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/devices/monitoredqueue.pxd
new file mode 100644
index 00000000..1e26ed86
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/devices/monitoredqueue.pxd
@@ -0,0 +1,177 @@
+"""MonitoredQueue class declarations.
+
+Authors
+-------
+* MinRK
+* Brian Granger
+"""
+
+#
+# Copyright (c) 2010 Min Ragan-Kelley, Brian Granger
+#
+# This file is part of pyzmq, but is derived and adapted from zmq_queue.cpp
+# originally from libzmq-2.1.6, used under LGPLv3
+#
+# pyzmq is free software; you can redistribute it and/or modify it under
+# the terms of the Lesser GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# pyzmq is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# Lesser GNU General Public License for more details.
+#
+# You should have received a copy of the Lesser GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+#-----------------------------------------------------------------------------
+# Imports
+#-----------------------------------------------------------------------------
+
+from libzmq cimport *
+
+#-----------------------------------------------------------------------------
+# MonitoredQueue C functions
+#-----------------------------------------------------------------------------
+
+cdef inline int _relay(void *insocket_, void *outsocket_, void *sidesocket_,
+ zmq_msg_t msg, zmq_msg_t side_msg, zmq_msg_t id_msg,
+ bint swap_ids) nogil:
+ cdef int rc
+ cdef int64_t flag_2
+ cdef int flag_3
+ cdef int flags
+ cdef bint more
+ cdef size_t flagsz
+ cdef void * flag_ptr
+
+ if ZMQ_VERSION_MAJOR < 3:
+ flagsz = sizeof (int64_t)
+ flag_ptr = &flag_2
+ else:
+ flagsz = sizeof (int)
+ flag_ptr = &flag_3
+
+ if swap_ids:# both router, must send second identity first
+ # recv two ids into msg, id_msg
+ rc = zmq_msg_recv(&msg, insocket_, 0)
+ if rc < 0: return rc
+
+ rc = zmq_msg_recv(&id_msg, insocket_, 0)
+ if rc < 0: return rc
+
+ # send second id (id_msg) first
+ #!!!! always send a copy before the original !!!!
+ rc = zmq_msg_copy(&side_msg, &id_msg)
+ if rc < 0: return rc
+ rc = zmq_msg_send(&side_msg, outsocket_, ZMQ_SNDMORE)
+ if rc < 0: return rc
+ rc = zmq_msg_send(&id_msg, sidesocket_, ZMQ_SNDMORE)
+ if rc < 0: return rc
+ # send first id (msg) second
+ rc = zmq_msg_copy(&side_msg, &msg)
+ if rc < 0: return rc
+ rc = zmq_msg_send(&side_msg, outsocket_, ZMQ_SNDMORE)
+ if rc < 0: return rc
+ rc = zmq_msg_send(&msg, sidesocket_, ZMQ_SNDMORE)
+ if rc < 0: return rc
+ while (True):
+ rc = zmq_msg_recv(&msg, insocket_, 0)
+ if rc < 0: return rc
+ # assert (rc == 0)
+ rc = zmq_getsockopt (insocket_, ZMQ_RCVMORE, flag_ptr, &flagsz)
+ if rc < 0: return rc
+ flags = 0
+ if ZMQ_VERSION_MAJOR < 3:
+ if flag_2:
+ flags |= ZMQ_SNDMORE
+ else:
+ if flag_3:
+ flags |= ZMQ_SNDMORE
+ # LABEL has been removed:
+ # rc = zmq_getsockopt (insocket_, ZMQ_RCVLABEL, flag_ptr, &flagsz)
+ # if flag_3:
+ # flags |= ZMQ_SNDLABEL
+ # assert (rc == 0)
+
+ rc = zmq_msg_copy(&side_msg, &msg)
+ if rc < 0: return rc
+ if flags:
+ rc = zmq_msg_send(&side_msg, outsocket_, flags)
+ if rc < 0: return rc
+ # only SNDMORE for side-socket
+ rc = zmq_msg_send(&msg, sidesocket_, ZMQ_SNDMORE)
+ if rc < 0: return rc
+ else:
+ rc = zmq_msg_send(&side_msg, outsocket_, 0)
+ if rc < 0: return rc
+ rc = zmq_msg_send(&msg, sidesocket_, 0)
+ if rc < 0: return rc
+ break
+ return rc
+
+# the MonitoredQueue C function, adapted from zmq::queue.cpp :
+cdef inline int c_monitored_queue (void *insocket_, void *outsocket_,
+ void *sidesocket_, zmq_msg_t *in_msg_ptr,
+ zmq_msg_t *out_msg_ptr, int swap_ids) nogil:
+ """The actual C function for a monitored queue device.
+
+ See ``monitored_queue()`` for details.
+ """
+
+ cdef zmq_msg_t msg
+ cdef int rc = zmq_msg_init (&msg)
+ cdef zmq_msg_t id_msg
+ rc = zmq_msg_init (&id_msg)
+ if rc < 0: return rc
+ cdef zmq_msg_t side_msg
+ rc = zmq_msg_init (&side_msg)
+ if rc < 0: return rc
+
+ cdef zmq_pollitem_t items [2]
+ items [0].socket = insocket_
+ items [0].fd = 0
+ items [0].events = ZMQ_POLLIN
+ items [0].revents = 0
+ items [1].socket = outsocket_
+ items [1].fd = 0
+ items [1].events = ZMQ_POLLIN
+ items [1].revents = 0
+ # I don't think sidesocket should be polled?
+ # items [2].socket = sidesocket_
+ # items [2].fd = 0
+ # items [2].events = ZMQ_POLLIN
+ # items [2].revents = 0
+
+ while (True):
+
+ # // Wait while there are either requests or replies to process.
+ rc = zmq_poll (&items [0], 2, -1)
+ if rc < 0: return rc
+ # // The algorithm below asumes ratio of request and replies processed
+ # // under full load to be 1:1. Although processing requests replies
+ # // first is tempting it is suspectible to DoS attacks (overloading
+ # // the system with unsolicited replies).
+ #
+ # // Process a request.
+ if (items [0].revents & ZMQ_POLLIN):
+ # send in_prefix to side socket
+ rc = zmq_msg_copy(&side_msg, in_msg_ptr)
+ if rc < 0: return rc
+ rc = zmq_msg_send(&side_msg, sidesocket_, ZMQ_SNDMORE)
+ if rc < 0: return rc
+ # relay the rest of the message
+ rc = _relay(insocket_, outsocket_, sidesocket_, msg, side_msg, id_msg, swap_ids)
+ if rc < 0: return rc
+ if (items [1].revents & ZMQ_POLLIN):
+ # send out_prefix to side socket
+ rc = zmq_msg_copy(&side_msg, out_msg_ptr)
+ if rc < 0: return rc
+ rc = zmq_msg_send(&side_msg, sidesocket_, ZMQ_SNDMORE)
+ if rc < 0: return rc
+ # relay the rest of the message
+ rc = _relay(outsocket_, insocket_, sidesocket_, msg, side_msg, id_msg, swap_ids)
+ if rc < 0: return rc
+ return rc
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/devices/monitoredqueue.py b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/devices/monitoredqueue.py
new file mode 100644
index 00000000..c6d91429
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/devices/monitoredqueue.py
@@ -0,0 +1,37 @@
+"""pure Python monitored_queue function
+
+For use when Cython extension is unavailable (PyPy).
+
+Authors
+-------
+* MinRK
+"""
+
+# Copyright (C) PyZMQ Developers
+# Distributed under the terms of the Modified BSD License.
+
+import zmq
+
+def _relay(ins, outs, sides, prefix, swap_ids):
+ msg = ins.recv_multipart()
+ if swap_ids:
+ msg[:2] = msg[:2][::-1]
+ outs.send_multipart(msg)
+ sides.send_multipart([prefix] + msg)
+
+def monitored_queue(in_socket, out_socket, mon_socket,
+ in_prefix=b'in', out_prefix=b'out'):
+
+ swap_ids = in_socket.type == zmq.ROUTER and out_socket.type == zmq.ROUTER
+
+ poller = zmq.Poller()
+ poller.register(in_socket, zmq.POLLIN)
+ poller.register(out_socket, zmq.POLLIN)
+ while True:
+ events = dict(poller.poll())
+ if in_socket in events:
+ _relay(in_socket, out_socket, mon_socket, in_prefix, swap_ids)
+ if out_socket in events:
+ _relay(out_socket, in_socket, mon_socket, out_prefix, swap_ids)
+
+__all__ = ['monitored_queue']
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/devices/monitoredqueuedevice.py b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/devices/monitoredqueuedevice.py
new file mode 100644
index 00000000..9723f866
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/devices/monitoredqueuedevice.py
@@ -0,0 +1,66 @@
+"""MonitoredQueue classes and functions."""
+
+# Copyright (C) PyZMQ Developers
+# Distributed under the terms of the Modified BSD License.
+
+
+from zmq import ZMQError, PUB
+from zmq.devices.proxydevice import ProxyBase, Proxy, ThreadProxy, ProcessProxy
+from zmq.devices.monitoredqueue import monitored_queue
+
+
+class MonitoredQueueBase(ProxyBase):
+ """Base class for overriding methods."""
+
+ _in_prefix = b''
+ _out_prefix = b''
+
+ def __init__(self, in_type, out_type, mon_type=PUB, in_prefix=b'in', out_prefix=b'out'):
+
+ ProxyBase.__init__(self, in_type=in_type, out_type=out_type, mon_type=mon_type)
+
+ self._in_prefix = in_prefix
+ self._out_prefix = out_prefix
+
+ def run_device(self):
+ ins,outs,mons = self._setup_sockets()
+ monitored_queue(ins, outs, mons, self._in_prefix, self._out_prefix)
+
+
+class MonitoredQueue(MonitoredQueueBase, Proxy):
+ """Class for running monitored_queue in the background.
+
+ See zmq.devices.Device for most of the spec. MonitoredQueue differs from Proxy,
+ only in that it adds a ``prefix`` to messages sent on the monitor socket,
+ with a different prefix for each direction.
+
+ MQ also supports ROUTER on both sides, which zmq.proxy does not.
+
+ If a message arrives on `in_sock`, it will be prefixed with `in_prefix` on the monitor socket.
+ If it arrives on out_sock, it will be prefixed with `out_prefix`.
+
+ A PUB socket is the most logical choice for the mon_socket, but it is not required.
+ """
+ pass
+
+
+class ThreadMonitoredQueue(MonitoredQueueBase, ThreadProxy):
+ """Run zmq.monitored_queue in a background thread.
+
+ See MonitoredQueue and Proxy for details.
+ """
+ pass
+
+
+class ProcessMonitoredQueue(MonitoredQueueBase, ProcessProxy):
+ """Run zmq.monitored_queue in a background thread.
+
+ See MonitoredQueue and Proxy for details.
+ """
+
+
+__all__ = [
+ 'MonitoredQueue',
+ 'ThreadMonitoredQueue',
+ 'ProcessMonitoredQueue'
+]
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/devices/proxydevice.py b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/devices/proxydevice.py
new file mode 100644
index 00000000..68be3f15
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/devices/proxydevice.py
@@ -0,0 +1,90 @@
+"""Proxy classes and functions."""
+
+# Copyright (C) PyZMQ Developers
+# Distributed under the terms of the Modified BSD License.
+
+import zmq
+from zmq.devices.basedevice import Device, ThreadDevice, ProcessDevice
+
+
+class ProxyBase(object):
+ """Base class for overriding methods."""
+
+ def __init__(self, in_type, out_type, mon_type=zmq.PUB):
+
+ Device.__init__(self, in_type=in_type, out_type=out_type)
+ self.mon_type = mon_type
+ self._mon_binds = []
+ self._mon_connects = []
+ self._mon_sockopts = []
+
+ def bind_mon(self, addr):
+ """Enqueue ZMQ address for binding on mon_socket.
+
+ See zmq.Socket.bind for details.
+ """
+ self._mon_binds.append(addr)
+
+ def connect_mon(self, addr):
+ """Enqueue ZMQ address for connecting on mon_socket.
+
+ See zmq.Socket.bind for details.
+ """
+ self._mon_connects.append(addr)
+
+ def setsockopt_mon(self, opt, value):
+ """Enqueue setsockopt(opt, value) for mon_socket
+
+ See zmq.Socket.setsockopt for details.
+ """
+ self._mon_sockopts.append((opt, value))
+
+ def _setup_sockets(self):
+ ins,outs = Device._setup_sockets(self)
+ ctx = self._context
+ mons = ctx.socket(self.mon_type)
+
+ # set sockopts (must be done first, in case of zmq.IDENTITY)
+ for opt,value in self._mon_sockopts:
+ mons.setsockopt(opt, value)
+
+ for iface in self._mon_binds:
+ mons.bind(iface)
+
+ for iface in self._mon_connects:
+ mons.connect(iface)
+
+ return ins,outs,mons
+
+ def run_device(self):
+ ins,outs,mons = self._setup_sockets()
+ zmq.proxy(ins, outs, mons)
+
+class Proxy(ProxyBase, Device):
+ """Threadsafe Proxy object.
+
+ See zmq.devices.Device for most of the spec. This subclass adds a
+ <method>_mon version of each <method>_{in|out} method, for configuring the
+ monitor socket.
+
+ A Proxy is a 3-socket ZMQ Device that functions just like a
+ QUEUE, except each message is also sent out on the monitor socket.
+
+ A PUB socket is the most logical choice for the mon_socket, but it is not required.
+ """
+ pass
+
+class ThreadProxy(ProxyBase, ThreadDevice):
+ """Proxy in a Thread. See Proxy for more."""
+ pass
+
+class ProcessProxy(ProxyBase, ProcessDevice):
+ """Proxy in a Process. See Proxy for more."""
+ pass
+
+
+__all__ = [
+ 'Proxy',
+ 'ThreadProxy',
+ 'ProcessProxy',
+]
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/error.py b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/error.py
new file mode 100644
index 00000000..48cdaafa
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/error.py
@@ -0,0 +1,164 @@
+"""0MQ Error classes and functions."""
+
+# Copyright (C) PyZMQ Developers
+# Distributed under the terms of the Modified BSD License.
+
+
+class ZMQBaseError(Exception):
+ """Base exception class for 0MQ errors in Python."""
+ pass
+
+class ZMQError(ZMQBaseError):
+ """Wrap an errno style error.
+
+ Parameters
+ ----------
+ errno : int
+ The ZMQ errno or None. If None, then ``zmq_errno()`` is called and
+ used.
+ msg : string
+ Description of the error or None.
+ """
+ errno = None
+
+ def __init__(self, errno=None, msg=None):
+ """Wrap an errno style error.
+
+ Parameters
+ ----------
+ errno : int
+ The ZMQ errno or None. If None, then ``zmq_errno()`` is called and
+ used.
+ msg : string
+ Description of the error or None.
+ """
+ from zmq.backend import strerror, zmq_errno
+ if errno is None:
+ errno = zmq_errno()
+ if isinstance(errno, int):
+ self.errno = errno
+ if msg is None:
+ self.strerror = strerror(errno)
+ else:
+ self.strerror = msg
+ else:
+ if msg is None:
+ self.strerror = str(errno)
+ else:
+ self.strerror = msg
+ # flush signals, because there could be a SIGINT
+ # waiting to pounce, resulting in uncaught exceptions.
+ # Doing this here means getting SIGINT during a blocking
+ # libzmq call will raise a *catchable* KeyboardInterrupt
+ # PyErr_CheckSignals()
+
+ def __str__(self):
+ return self.strerror
+
+ def __repr__(self):
+ return "ZMQError('%s')"%self.strerror
+
+
+class ZMQBindError(ZMQBaseError):
+ """An error for ``Socket.bind_to_random_port()``.
+
+ See Also
+ --------
+ .Socket.bind_to_random_port
+ """
+ pass
+
+
+class NotDone(ZMQBaseError):
+ """Raised when timeout is reached while waiting for 0MQ to finish with a Message
+
+ See Also
+ --------
+ .MessageTracker.wait : object for tracking when ZeroMQ is done
+ """
+ pass
+
+
+class ContextTerminated(ZMQError):
+ """Wrapper for zmq.ETERM
+
+ .. versionadded:: 13.0
+ """
+ pass
+
+
+class Again(ZMQError):
+ """Wrapper for zmq.EAGAIN
+
+ .. versionadded:: 13.0
+ """
+ pass
+
+
+def _check_rc(rc, errno=None):
+ """internal utility for checking zmq return condition
+
+ and raising the appropriate Exception class
+ """
+ if rc < 0:
+ from zmq.backend import zmq_errno
+ if errno is None:
+ errno = zmq_errno()
+ from zmq import EAGAIN, ETERM
+ if errno == EAGAIN:
+ raise Again(errno)
+ elif errno == ETERM:
+ raise ContextTerminated(errno)
+ else:
+ raise ZMQError(errno)
+
+_zmq_version_info = None
+_zmq_version = None
+
+class ZMQVersionError(NotImplementedError):
+ """Raised when a feature is not provided by the linked version of libzmq.
+
+ .. versionadded:: 14.2
+ """
+ min_version = None
+ def __init__(self, min_version, msg='Feature'):
+ global _zmq_version
+ if _zmq_version is None:
+ from zmq import zmq_version
+ _zmq_version = zmq_version()
+ self.msg = msg
+ self.min_version = min_version
+ self.version = _zmq_version
+
+ def __repr__(self):
+ return "ZMQVersionError('%s')" % str(self)
+
+ def __str__(self):
+ return "%s requires libzmq >= %s, have %s" % (self.msg, self.min_version, self.version)
+
+
+def _check_version(min_version_info, msg='Feature'):
+ """Check for libzmq
+
+ raises ZMQVersionError if current zmq version is not at least min_version
+
+ min_version_info is a tuple of integers, and will be compared against zmq.zmq_version_info().
+ """
+ global _zmq_version_info
+ if _zmq_version_info is None:
+ from zmq import zmq_version_info
+ _zmq_version_info = zmq_version_info()
+ if _zmq_version_info < min_version_info:
+ min_version = '.'.join(str(v) for v in min_version_info)
+ raise ZMQVersionError(min_version, msg)
+
+
+__all__ = [
+ 'ZMQBaseError',
+ 'ZMQBindError',
+ 'ZMQError',
+ 'NotDone',
+ 'ContextTerminated',
+ 'Again',
+ 'ZMQVersionError',
+]
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/eventloop/__init__.py b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/eventloop/__init__.py
new file mode 100644
index 00000000..568e8e8d
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/eventloop/__init__.py
@@ -0,0 +1,5 @@
+"""A Tornado based event loop for PyZMQ."""
+
+from zmq.eventloop.ioloop import IOLoop
+
+__all__ = ['IOLoop'] \ No newline at end of file
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/eventloop/ioloop.py b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/eventloop/ioloop.py
new file mode 100644
index 00000000..35f4c418
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/eventloop/ioloop.py
@@ -0,0 +1,193 @@
+# coding: utf-8
+"""tornado IOLoop API with zmq compatibility
+
+If you have tornado ≥ 3.0, this is a subclass of tornado's IOLoop,
+otherwise we ship a minimal subset of tornado in zmq.eventloop.minitornado.
+
+The minimal shipped version of tornado's IOLoop does not include
+support for concurrent futures - this will only be available if you
+have tornado ≥ 3.0.
+"""
+
+# Copyright (C) PyZMQ Developers
+# Distributed under the terms of the Modified BSD License.
+
+from __future__ import absolute_import, division, with_statement
+
+import os
+import time
+import warnings
+
+from zmq import (
+ Poller,
+ POLLIN, POLLOUT, POLLERR,
+ ZMQError, ETERM,
+)
+
+try:
+ import tornado
+ tornado_version = tornado.version_info
+except (ImportError, AttributeError):
+ tornado_version = ()
+
+try:
+ # tornado ≥ 3
+ from tornado.ioloop import PollIOLoop, PeriodicCallback
+ from tornado.log import gen_log
+except ImportError:
+ from .minitornado.ioloop import PollIOLoop, PeriodicCallback
+ from .minitornado.log import gen_log
+
+
+class DelayedCallback(PeriodicCallback):
+ """Schedules the given callback to be called once.
+
+ The callback is called once, after callback_time milliseconds.
+
+ `start` must be called after the DelayedCallback is created.
+
+ The timeout is calculated from when `start` is called.
+ """
+ def __init__(self, callback, callback_time, io_loop=None):
+ # PeriodicCallback require callback_time to be positive
+ warnings.warn("""DelayedCallback is deprecated.
+ Use loop.add_timeout instead.""", DeprecationWarning)
+ callback_time = max(callback_time, 1e-3)
+ super(DelayedCallback, self).__init__(callback, callback_time, io_loop)
+
+ def start(self):
+ """Starts the timer."""
+ self._running = True
+ self._firstrun = True
+ self._next_timeout = time.time() + self.callback_time / 1000.0
+ self.io_loop.add_timeout(self._next_timeout, self._run)
+
+ def _run(self):
+ if not self._running: return
+ self._running = False
+ try:
+ self.callback()
+ except Exception:
+ gen_log.error("Error in delayed callback", exc_info=True)
+
+
+class ZMQPoller(object):
+ """A poller that can be used in the tornado IOLoop.
+
+ This simply wraps a regular zmq.Poller, scaling the timeout
+ by 1000, so that it is in seconds rather than milliseconds.
+ """
+
+ def __init__(self):
+ self._poller = Poller()
+
+ @staticmethod
+ def _map_events(events):
+ """translate IOLoop.READ/WRITE/ERROR event masks into zmq.POLLIN/OUT/ERR"""
+ z_events = 0
+ if events & IOLoop.READ:
+ z_events |= POLLIN
+ if events & IOLoop.WRITE:
+ z_events |= POLLOUT
+ if events & IOLoop.ERROR:
+ z_events |= POLLERR
+ return z_events
+
+ @staticmethod
+ def _remap_events(z_events):
+ """translate zmq.POLLIN/OUT/ERR event masks into IOLoop.READ/WRITE/ERROR"""
+ events = 0
+ if z_events & POLLIN:
+ events |= IOLoop.READ
+ if z_events & POLLOUT:
+ events |= IOLoop.WRITE
+ if z_events & POLLERR:
+ events |= IOLoop.ERROR
+ return events
+
+ def register(self, fd, events):
+ return self._poller.register(fd, self._map_events(events))
+
+ def modify(self, fd, events):
+ return self._poller.modify(fd, self._map_events(events))
+
+ def unregister(self, fd):
+ return self._poller.unregister(fd)
+
+ def poll(self, timeout):
+ """poll in seconds rather than milliseconds.
+
+ Event masks will be IOLoop.READ/WRITE/ERROR
+ """
+ z_events = self._poller.poll(1000*timeout)
+ return [ (fd,self._remap_events(evt)) for (fd,evt) in z_events ]
+
+ def close(self):
+ pass
+
+
+class ZMQIOLoop(PollIOLoop):
+ """ZMQ subclass of tornado's IOLoop"""
+ def initialize(self, impl=None, **kwargs):
+ impl = ZMQPoller() if impl is None else impl
+ super(ZMQIOLoop, self).initialize(impl=impl, **kwargs)
+
+ @staticmethod
+ def instance():
+ """Returns a global `IOLoop` instance.
+
+ Most applications have a single, global `IOLoop` running on the
+ main thread. Use this method to get this instance from
+ another thread. To get the current thread's `IOLoop`, use `current()`.
+ """
+ # install ZMQIOLoop as the active IOLoop implementation
+ # when using tornado 3
+ if tornado_version >= (3,):
+ PollIOLoop.configure(ZMQIOLoop)
+ return PollIOLoop.instance()
+
+ def start(self):
+ try:
+ super(ZMQIOLoop, self).start()
+ except ZMQError as e:
+ if e.errno == ETERM:
+ # quietly return on ETERM
+ pass
+ else:
+ raise e
+
+
+if tornado_version >= (3,0) and tornado_version < (3,1):
+ def backport_close(self, all_fds=False):
+ """backport IOLoop.close to 3.0 from 3.1 (supports fd.close() method)"""
+ from zmq.eventloop.minitornado.ioloop import PollIOLoop as mini_loop
+ return mini_loop.close.__get__(self)(all_fds)
+ ZMQIOLoop.close = backport_close
+
+
+# public API name
+IOLoop = ZMQIOLoop
+
+
+def install():
+ """set the tornado IOLoop instance with the pyzmq IOLoop.
+
+ After calling this function, tornado's IOLoop.instance() and pyzmq's
+ IOLoop.instance() will return the same object.
+
+ An assertion error will be raised if tornado's IOLoop has been initialized
+ prior to calling this function.
+ """
+ from tornado import ioloop
+ # check if tornado's IOLoop is already initialized to something other
+ # than the pyzmq IOLoop instance:
+ assert (not ioloop.IOLoop.initialized()) or \
+ ioloop.IOLoop.instance() is IOLoop.instance(), "tornado IOLoop already initialized"
+
+ if tornado_version >= (3,):
+ # tornado 3 has an official API for registering new defaults, yay!
+ ioloop.IOLoop.configure(ZMQIOLoop)
+ else:
+ # we have to set the global instance explicitly
+ ioloop.IOLoop._instance = IOLoop.instance()
+
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/eventloop/minitornado/__init__.py b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/eventloop/minitornado/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/eventloop/minitornado/__init__.py
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/eventloop/minitornado/concurrent.py b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/eventloop/minitornado/concurrent.py
new file mode 100644
index 00000000..519b23d5
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/eventloop/minitornado/concurrent.py
@@ -0,0 +1,11 @@
+"""pyzmq does not ship tornado's futures,
+this just raises informative NotImplementedErrors to avoid having to change too much code.
+"""
+
+class NotImplementedFuture(object):
+ def __init__(self, *args, **kwargs):
+ raise NotImplementedError("pyzmq does not ship tornado's Futures, "
+ "install tornado >= 3.0 for future support."
+ )
+
+Future = TracebackFuture = NotImplementedFuture
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/eventloop/minitornado/ioloop.py b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/eventloop/minitornado/ioloop.py
new file mode 100644
index 00000000..710a3ecb
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/eventloop/minitornado/ioloop.py
@@ -0,0 +1,829 @@
+#!/usr/bin/env python
+#
+# Copyright 2009 Facebook
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""An I/O event loop for non-blocking sockets.
+
+Typical applications will use a single `IOLoop` object, in the
+`IOLoop.instance` singleton. The `IOLoop.start` method should usually
+be called at the end of the ``main()`` function. Atypical applications may
+use more than one `IOLoop`, such as one `IOLoop` per thread, or per `unittest`
+case.
+
+In addition to I/O events, the `IOLoop` can also schedule time-based events.
+`IOLoop.add_timeout` is a non-blocking alternative to `time.sleep`.
+"""
+
+from __future__ import absolute_import, division, print_function, with_statement
+
+import datetime
+import errno
+import functools
+import heapq
+import logging
+import numbers
+import os
+import select
+import sys
+import threading
+import time
+import traceback
+
+from .concurrent import Future, TracebackFuture
+from .log import app_log, gen_log
+from . import stack_context
+from .util import Configurable
+
+try:
+ import signal
+except ImportError:
+ signal = None
+
+try:
+ import thread # py2
+except ImportError:
+ import _thread as thread # py3
+
+from .platform.auto import set_close_exec, Waker
+
+
+class TimeoutError(Exception):
+ pass
+
+
+class IOLoop(Configurable):
+ """A level-triggered I/O loop.
+
+ We use ``epoll`` (Linux) or ``kqueue`` (BSD and Mac OS X) if they
+ are available, or else we fall back on select(). If you are
+ implementing a system that needs to handle thousands of
+ simultaneous connections, you should use a system that supports
+ either ``epoll`` or ``kqueue``.
+
+ Example usage for a simple TCP server::
+
+ import errno
+ import functools
+ import ioloop
+ import socket
+
+ def connection_ready(sock, fd, events):
+ while True:
+ try:
+ connection, address = sock.accept()
+ except socket.error, e:
+ if e.args[0] not in (errno.EWOULDBLOCK, errno.EAGAIN):
+ raise
+ return
+ connection.setblocking(0)
+ handle_connection(connection, address)
+
+ sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0)
+ sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
+ sock.setblocking(0)
+ sock.bind(("", port))
+ sock.listen(128)
+
+ io_loop = ioloop.IOLoop.instance()
+ callback = functools.partial(connection_ready, sock)
+ io_loop.add_handler(sock.fileno(), callback, io_loop.READ)
+ io_loop.start()
+
+ """
+ # Constants from the epoll module
+ _EPOLLIN = 0x001
+ _EPOLLPRI = 0x002
+ _EPOLLOUT = 0x004
+ _EPOLLERR = 0x008
+ _EPOLLHUP = 0x010
+ _EPOLLRDHUP = 0x2000
+ _EPOLLONESHOT = (1 << 30)
+ _EPOLLET = (1 << 31)
+
+ # Our events map exactly to the epoll events
+ NONE = 0
+ READ = _EPOLLIN
+ WRITE = _EPOLLOUT
+ ERROR = _EPOLLERR | _EPOLLHUP
+
+ # Global lock for creating global IOLoop instance
+ _instance_lock = threading.Lock()
+
+ _current = threading.local()
+
+ @staticmethod
+ def instance():
+ """Returns a global `IOLoop` instance.
+
+ Most applications have a single, global `IOLoop` running on the
+ main thread. Use this method to get this instance from
+ another thread. To get the current thread's `IOLoop`, use `current()`.
+ """
+ if not hasattr(IOLoop, "_instance"):
+ with IOLoop._instance_lock:
+ if not hasattr(IOLoop, "_instance"):
+ # New instance after double check
+ IOLoop._instance = IOLoop()
+ return IOLoop._instance
+
+ @staticmethod
+ def initialized():
+ """Returns true if the singleton instance has been created."""
+ return hasattr(IOLoop, "_instance")
+
+ def install(self):
+ """Installs this `IOLoop` object as the singleton instance.
+
+ This is normally not necessary as `instance()` will create
+ an `IOLoop` on demand, but you may want to call `install` to use
+ a custom subclass of `IOLoop`.
+ """
+ assert not IOLoop.initialized()
+ IOLoop._instance = self
+
+ @staticmethod
+ def current():
+ """Returns the current thread's `IOLoop`.
+
+ If an `IOLoop` is currently running or has been marked as current
+ by `make_current`, returns that instance. Otherwise returns
+ `IOLoop.instance()`, i.e. the main thread's `IOLoop`.
+
+ A common pattern for classes that depend on ``IOLoops`` is to use
+ a default argument to enable programs with multiple ``IOLoops``
+ but not require the argument for simpler applications::
+
+ class MyClass(object):
+ def __init__(self, io_loop=None):
+ self.io_loop = io_loop or IOLoop.current()
+
+ In general you should use `IOLoop.current` as the default when
+ constructing an asynchronous object, and use `IOLoop.instance`
+ when you mean to communicate to the main thread from a different
+ one.
+ """
+ current = getattr(IOLoop._current, "instance", None)
+ if current is None:
+ return IOLoop.instance()
+ return current
+
+ def make_current(self):
+ """Makes this the `IOLoop` for the current thread.
+
+ An `IOLoop` automatically becomes current for its thread
+ when it is started, but it is sometimes useful to call
+ `make_current` explictly before starting the `IOLoop`,
+ so that code run at startup time can find the right
+ instance.
+ """
+ IOLoop._current.instance = self
+
+ @staticmethod
+ def clear_current():
+ IOLoop._current.instance = None
+
+ @classmethod
+ def configurable_base(cls):
+ return IOLoop
+
+ @classmethod
+ def configurable_default(cls):
+ # this is the only patch to IOLoop:
+ from zmq.eventloop.ioloop import ZMQIOLoop
+ return ZMQIOLoop
+ # the remainder of this method is unused,
+ # but left for preservation reasons
+ if hasattr(select, "epoll"):
+ from tornado.platform.epoll import EPollIOLoop
+ return EPollIOLoop
+ if hasattr(select, "kqueue"):
+ # Python 2.6+ on BSD or Mac
+ from tornado.platform.kqueue import KQueueIOLoop
+ return KQueueIOLoop
+ from tornado.platform.select import SelectIOLoop
+ return SelectIOLoop
+
+ def initialize(self):
+ pass
+
+ def close(self, all_fds=False):
+ """Closes the `IOLoop`, freeing any resources used.
+
+ If ``all_fds`` is true, all file descriptors registered on the
+ IOLoop will be closed (not just the ones created by the
+ `IOLoop` itself).
+
+ Many applications will only use a single `IOLoop` that runs for the
+ entire lifetime of the process. In that case closing the `IOLoop`
+ is not necessary since everything will be cleaned up when the
+ process exits. `IOLoop.close` is provided mainly for scenarios
+ such as unit tests, which create and destroy a large number of
+ ``IOLoops``.
+
+ An `IOLoop` must be completely stopped before it can be closed. This
+ means that `IOLoop.stop()` must be called *and* `IOLoop.start()` must
+ be allowed to return before attempting to call `IOLoop.close()`.
+ Therefore the call to `close` will usually appear just after
+ the call to `start` rather than near the call to `stop`.
+
+ .. versionchanged:: 3.1
+ If the `IOLoop` implementation supports non-integer objects
+ for "file descriptors", those objects will have their
+ ``close`` method when ``all_fds`` is true.
+ """
+ raise NotImplementedError()
+
+ def add_handler(self, fd, handler, events):
+ """Registers the given handler to receive the given events for fd.
+
+ The ``events`` argument is a bitwise or of the constants
+ ``IOLoop.READ``, ``IOLoop.WRITE``, and ``IOLoop.ERROR``.
+
+ When an event occurs, ``handler(fd, events)`` will be run.
+ """
+ raise NotImplementedError()
+
+ def update_handler(self, fd, events):
+ """Changes the events we listen for fd."""
+ raise NotImplementedError()
+
+ def remove_handler(self, fd):
+ """Stop listening for events on fd."""
+ raise NotImplementedError()
+
+ def set_blocking_signal_threshold(self, seconds, action):
+ """Sends a signal if the `IOLoop` is blocked for more than
+ ``s`` seconds.
+
+ Pass ``seconds=None`` to disable. Requires Python 2.6 on a unixy
+ platform.
+
+ The action parameter is a Python signal handler. Read the
+ documentation for the `signal` module for more information.
+ If ``action`` is None, the process will be killed if it is
+ blocked for too long.
+ """
+ raise NotImplementedError()
+
+ def set_blocking_log_threshold(self, seconds):
+ """Logs a stack trace if the `IOLoop` is blocked for more than
+ ``s`` seconds.
+
+ Equivalent to ``set_blocking_signal_threshold(seconds,
+ self.log_stack)``
+ """
+ self.set_blocking_signal_threshold(seconds, self.log_stack)
+
+ def log_stack(self, signal, frame):
+ """Signal handler to log the stack trace of the current thread.
+
+ For use with `set_blocking_signal_threshold`.
+ """
+ gen_log.warning('IOLoop blocked for %f seconds in\n%s',
+ self._blocking_signal_threshold,
+ ''.join(traceback.format_stack(frame)))
+
+ def start(self):
+ """Starts the I/O loop.
+
+ The loop will run until one of the callbacks calls `stop()`, which
+ will make the loop stop after the current event iteration completes.
+ """
+ raise NotImplementedError()
+
+ def stop(self):
+ """Stop the I/O loop.
+
+ If the event loop is not currently running, the next call to `start()`
+ will return immediately.
+
+ To use asynchronous methods from otherwise-synchronous code (such as
+ unit tests), you can start and stop the event loop like this::
+
+ ioloop = IOLoop()
+ async_method(ioloop=ioloop, callback=ioloop.stop)
+ ioloop.start()
+
+ ``ioloop.start()`` will return after ``async_method`` has run
+ its callback, whether that callback was invoked before or
+ after ``ioloop.start``.
+
+ Note that even after `stop` has been called, the `IOLoop` is not
+ completely stopped until `IOLoop.start` has also returned.
+ Some work that was scheduled before the call to `stop` may still
+ be run before the `IOLoop` shuts down.
+ """
+ raise NotImplementedError()
+
+ def run_sync(self, func, timeout=None):
+ """Starts the `IOLoop`, runs the given function, and stops the loop.
+
+ If the function returns a `.Future`, the `IOLoop` will run
+ until the future is resolved. If it raises an exception, the
+ `IOLoop` will stop and the exception will be re-raised to the
+ caller.
+
+ The keyword-only argument ``timeout`` may be used to set
+ a maximum duration for the function. If the timeout expires,
+ a `TimeoutError` is raised.
+
+ This method is useful in conjunction with `tornado.gen.coroutine`
+ to allow asynchronous calls in a ``main()`` function::
+
+ @gen.coroutine
+ def main():
+ # do stuff...
+
+ if __name__ == '__main__':
+ IOLoop.instance().run_sync(main)
+ """
+ future_cell = [None]
+
+ def run():
+ try:
+ result = func()
+ except Exception:
+ future_cell[0] = TracebackFuture()
+ future_cell[0].set_exc_info(sys.exc_info())
+ else:
+ if isinstance(result, Future):
+ future_cell[0] = result
+ else:
+ future_cell[0] = Future()
+ future_cell[0].set_result(result)
+ self.add_future(future_cell[0], lambda future: self.stop())
+ self.add_callback(run)
+ if timeout is not None:
+ timeout_handle = self.add_timeout(self.time() + timeout, self.stop)
+ self.start()
+ if timeout is not None:
+ self.remove_timeout(timeout_handle)
+ if not future_cell[0].done():
+ raise TimeoutError('Operation timed out after %s seconds' % timeout)
+ return future_cell[0].result()
+
+ def time(self):
+ """Returns the current time according to the `IOLoop`'s clock.
+
+ The return value is a floating-point number relative to an
+ unspecified time in the past.
+
+ By default, the `IOLoop`'s time function is `time.time`. However,
+ it may be configured to use e.g. `time.monotonic` instead.
+ Calls to `add_timeout` that pass a number instead of a
+ `datetime.timedelta` should use this function to compute the
+ appropriate time, so they can work no matter what time function
+ is chosen.
+ """
+ return time.time()
+
+ def add_timeout(self, deadline, callback):
+ """Runs the ``callback`` at the time ``deadline`` from the I/O loop.
+
+ Returns an opaque handle that may be passed to
+ `remove_timeout` to cancel.
+
+ ``deadline`` may be a number denoting a time (on the same
+ scale as `IOLoop.time`, normally `time.time`), or a
+ `datetime.timedelta` object for a deadline relative to the
+ current time.
+
+ Note that it is not safe to call `add_timeout` from other threads.
+ Instead, you must use `add_callback` to transfer control to the
+ `IOLoop`'s thread, and then call `add_timeout` from there.
+ """
+ raise NotImplementedError()
+
+ def remove_timeout(self, timeout):
+ """Cancels a pending timeout.
+
+ The argument is a handle as returned by `add_timeout`. It is
+ safe to call `remove_timeout` even if the callback has already
+ been run.
+ """
+ raise NotImplementedError()
+
+ def add_callback(self, callback, *args, **kwargs):
+ """Calls the given callback on the next I/O loop iteration.
+
+ It is safe to call this method from any thread at any time,
+ except from a signal handler. Note that this is the **only**
+ method in `IOLoop` that makes this thread-safety guarantee; all
+ other interaction with the `IOLoop` must be done from that
+ `IOLoop`'s thread. `add_callback()` may be used to transfer
+ control from other threads to the `IOLoop`'s thread.
+
+ To add a callback from a signal handler, see
+ `add_callback_from_signal`.
+ """
+ raise NotImplementedError()
+
+ def add_callback_from_signal(self, callback, *args, **kwargs):
+ """Calls the given callback on the next I/O loop iteration.
+
+ Safe for use from a Python signal handler; should not be used
+ otherwise.
+
+ Callbacks added with this method will be run without any
+ `.stack_context`, to avoid picking up the context of the function
+ that was interrupted by the signal.
+ """
+ raise NotImplementedError()
+
+ def add_future(self, future, callback):
+ """Schedules a callback on the ``IOLoop`` when the given
+ `.Future` is finished.
+
+ The callback is invoked with one argument, the
+ `.Future`.
+ """
+ assert isinstance(future, Future)
+ callback = stack_context.wrap(callback)
+ future.add_done_callback(
+ lambda future: self.add_callback(callback, future))
+
+ def _run_callback(self, callback):
+ """Runs a callback with error handling.
+
+ For use in subclasses.
+ """
+ try:
+ callback()
+ except Exception:
+ self.handle_callback_exception(callback)
+
+ def handle_callback_exception(self, callback):
+ """This method is called whenever a callback run by the `IOLoop`
+ throws an exception.
+
+ By default simply logs the exception as an error. Subclasses
+ may override this method to customize reporting of exceptions.
+
+ The exception itself is not passed explicitly, but is available
+ in `sys.exc_info`.
+ """
+ app_log.error("Exception in callback %r", callback, exc_info=True)
+
+
+class PollIOLoop(IOLoop):
+ """Base class for IOLoops built around a select-like function.
+
+ For concrete implementations, see `tornado.platform.epoll.EPollIOLoop`
+ (Linux), `tornado.platform.kqueue.KQueueIOLoop` (BSD and Mac), or
+ `tornado.platform.select.SelectIOLoop` (all platforms).
+ """
+ def initialize(self, impl, time_func=None):
+ super(PollIOLoop, self).initialize()
+ self._impl = impl
+ if hasattr(self._impl, 'fileno'):
+ set_close_exec(self._impl.fileno())
+ self.time_func = time_func or time.time
+ self._handlers = {}
+ self._events = {}
+ self._callbacks = []
+ self._callback_lock = threading.Lock()
+ self._timeouts = []
+ self._cancellations = 0
+ self._running = False
+ self._stopped = False
+ self._closing = False
+ self._thread_ident = None
+ self._blocking_signal_threshold = None
+
+ # Create a pipe that we send bogus data to when we want to wake
+ # the I/O loop when it is idle
+ self._waker = Waker()
+ self.add_handler(self._waker.fileno(),
+ lambda fd, events: self._waker.consume(),
+ self.READ)
+
+ def close(self, all_fds=False):
+ with self._callback_lock:
+ self._closing = True
+ self.remove_handler(self._waker.fileno())
+ if all_fds:
+ for fd in self._handlers.keys():
+ try:
+ close_method = getattr(fd, 'close', None)
+ if close_method is not None:
+ close_method()
+ else:
+ os.close(fd)
+ except Exception:
+ gen_log.debug("error closing fd %s", fd, exc_info=True)
+ self._waker.close()
+ self._impl.close()
+
+ def add_handler(self, fd, handler, events):
+ self._handlers[fd] = stack_context.wrap(handler)
+ self._impl.register(fd, events | self.ERROR)
+
+ def update_handler(self, fd, events):
+ self._impl.modify(fd, events | self.ERROR)
+
+ def remove_handler(self, fd):
+ self._handlers.pop(fd, None)
+ self._events.pop(fd, None)
+ try:
+ self._impl.unregister(fd)
+ except Exception:
+ gen_log.debug("Error deleting fd from IOLoop", exc_info=True)
+
+ def set_blocking_signal_threshold(self, seconds, action):
+ if not hasattr(signal, "setitimer"):
+ gen_log.error("set_blocking_signal_threshold requires a signal module "
+ "with the setitimer method")
+ return
+ self._blocking_signal_threshold = seconds
+ if seconds is not None:
+ signal.signal(signal.SIGALRM,
+ action if action is not None else signal.SIG_DFL)
+
+ def start(self):
+ if not logging.getLogger().handlers:
+ # The IOLoop catches and logs exceptions, so it's
+ # important that log output be visible. However, python's
+ # default behavior for non-root loggers (prior to python
+ # 3.2) is to print an unhelpful "no handlers could be
+ # found" message rather than the actual log entry, so we
+ # must explicitly configure logging if we've made it this
+ # far without anything.
+ logging.basicConfig()
+ if self._stopped:
+ self._stopped = False
+ return
+ old_current = getattr(IOLoop._current, "instance", None)
+ IOLoop._current.instance = self
+ self._thread_ident = thread.get_ident()
+ self._running = True
+
+ # signal.set_wakeup_fd closes a race condition in event loops:
+ # a signal may arrive at the beginning of select/poll/etc
+ # before it goes into its interruptible sleep, so the signal
+ # will be consumed without waking the select. The solution is
+ # for the (C, synchronous) signal handler to write to a pipe,
+ # which will then be seen by select.
+ #
+ # In python's signal handling semantics, this only matters on the
+ # main thread (fortunately, set_wakeup_fd only works on the main
+ # thread and will raise a ValueError otherwise).
+ #
+ # If someone has already set a wakeup fd, we don't want to
+ # disturb it. This is an issue for twisted, which does its
+ # SIGCHILD processing in response to its own wakeup fd being
+ # written to. As long as the wakeup fd is registered on the IOLoop,
+ # the loop will still wake up and everything should work.
+ old_wakeup_fd = None
+ if hasattr(signal, 'set_wakeup_fd') and os.name == 'posix':
+ # requires python 2.6+, unix. set_wakeup_fd exists but crashes
+ # the python process on windows.
+ try:
+ old_wakeup_fd = signal.set_wakeup_fd(self._waker.write_fileno())
+ if old_wakeup_fd != -1:
+ # Already set, restore previous value. This is a little racy,
+ # but there's no clean get_wakeup_fd and in real use the
+ # IOLoop is just started once at the beginning.
+ signal.set_wakeup_fd(old_wakeup_fd)
+ old_wakeup_fd = None
+ except ValueError: # non-main thread
+ pass
+
+ while True:
+ poll_timeout = 3600.0
+
+ # Prevent IO event starvation by delaying new callbacks
+ # to the next iteration of the event loop.
+ with self._callback_lock:
+ callbacks = self._callbacks
+ self._callbacks = []
+ for callback in callbacks:
+ self._run_callback(callback)
+
+ if self._timeouts:
+ now = self.time()
+ while self._timeouts:
+ if self._timeouts[0].callback is None:
+ # the timeout was cancelled
+ heapq.heappop(self._timeouts)
+ self._cancellations -= 1
+ elif self._timeouts[0].deadline <= now:
+ timeout = heapq.heappop(self._timeouts)
+ self._run_callback(timeout.callback)
+ else:
+ seconds = self._timeouts[0].deadline - now
+ poll_timeout = min(seconds, poll_timeout)
+ break
+ if (self._cancellations > 512
+ and self._cancellations > (len(self._timeouts) >> 1)):
+ # Clean up the timeout queue when it gets large and it's
+ # more than half cancellations.
+ self._cancellations = 0
+ self._timeouts = [x for x in self._timeouts
+ if x.callback is not None]
+ heapq.heapify(self._timeouts)
+
+ if self._callbacks:
+ # If any callbacks or timeouts called add_callback,
+ # we don't want to wait in poll() before we run them.
+ poll_timeout = 0.0
+
+ if not self._running:
+ break
+
+ if self._blocking_signal_threshold is not None:
+ # clear alarm so it doesn't fire while poll is waiting for
+ # events.
+ signal.setitimer(signal.ITIMER_REAL, 0, 0)
+
+ try:
+ event_pairs = self._impl.poll(poll_timeout)
+ except Exception as e:
+ # Depending on python version and IOLoop implementation,
+ # different exception types may be thrown and there are
+ # two ways EINTR might be signaled:
+ # * e.errno == errno.EINTR
+ # * e.args is like (errno.EINTR, 'Interrupted system call')
+ if (getattr(e, 'errno', None) == errno.EINTR or
+ (isinstance(getattr(e, 'args', None), tuple) and
+ len(e.args) == 2 and e.args[0] == errno.EINTR)):
+ continue
+ else:
+ raise
+
+ if self._blocking_signal_threshold is not None:
+ signal.setitimer(signal.ITIMER_REAL,
+ self._blocking_signal_threshold, 0)
+
+ # Pop one fd at a time from the set of pending fds and run
+ # its handler. Since that handler may perform actions on
+ # other file descriptors, there may be reentrant calls to
+ # this IOLoop that update self._events
+ self._events.update(event_pairs)
+ while self._events:
+ fd, events = self._events.popitem()
+ try:
+ self._handlers[fd](fd, events)
+ except (OSError, IOError) as e:
+ if e.args[0] == errno.EPIPE:
+ # Happens when the client closes the connection
+ pass
+ else:
+ app_log.error("Exception in I/O handler for fd %s",
+ fd, exc_info=True)
+ except Exception:
+ app_log.error("Exception in I/O handler for fd %s",
+ fd, exc_info=True)
+ # reset the stopped flag so another start/stop pair can be issued
+ self._stopped = False
+ if self._blocking_signal_threshold is not None:
+ signal.setitimer(signal.ITIMER_REAL, 0, 0)
+ IOLoop._current.instance = old_current
+ if old_wakeup_fd is not None:
+ signal.set_wakeup_fd(old_wakeup_fd)
+
+ def stop(self):
+ self._running = False
+ self._stopped = True
+ self._waker.wake()
+
+ def time(self):
+ return self.time_func()
+
+ def add_timeout(self, deadline, callback):
+ timeout = _Timeout(deadline, stack_context.wrap(callback), self)
+ heapq.heappush(self._timeouts, timeout)
+ return timeout
+
+ def remove_timeout(self, timeout):
+ # Removing from a heap is complicated, so just leave the defunct
+ # timeout object in the queue (see discussion in
+ # http://docs.python.org/library/heapq.html).
+ # If this turns out to be a problem, we could add a garbage
+ # collection pass whenever there are too many dead timeouts.
+ timeout.callback = None
+ self._cancellations += 1
+
+ def add_callback(self, callback, *args, **kwargs):
+ with self._callback_lock:
+ if self._closing:
+ raise RuntimeError("IOLoop is closing")
+ list_empty = not self._callbacks
+ self._callbacks.append(functools.partial(
+ stack_context.wrap(callback), *args, **kwargs))
+ if list_empty and thread.get_ident() != self._thread_ident:
+ # If we're in the IOLoop's thread, we know it's not currently
+ # polling. If we're not, and we added the first callback to an
+ # empty list, we may need to wake it up (it may wake up on its
+ # own, but an occasional extra wake is harmless). Waking
+ # up a polling IOLoop is relatively expensive, so we try to
+ # avoid it when we can.
+ self._waker.wake()
+
+ def add_callback_from_signal(self, callback, *args, **kwargs):
+ with stack_context.NullContext():
+ if thread.get_ident() != self._thread_ident:
+ # if the signal is handled on another thread, we can add
+ # it normally (modulo the NullContext)
+ self.add_callback(callback, *args, **kwargs)
+ else:
+ # If we're on the IOLoop's thread, we cannot use
+ # the regular add_callback because it may deadlock on
+ # _callback_lock. Blindly insert into self._callbacks.
+ # This is safe because the GIL makes list.append atomic.
+ # One subtlety is that if the signal interrupted the
+ # _callback_lock block in IOLoop.start, we may modify
+ # either the old or new version of self._callbacks,
+ # but either way will work.
+ self._callbacks.append(functools.partial(
+ stack_context.wrap(callback), *args, **kwargs))
+
+
+class _Timeout(object):
+ """An IOLoop timeout, a UNIX timestamp and a callback"""
+
+ # Reduce memory overhead when there are lots of pending callbacks
+ __slots__ = ['deadline', 'callback']
+
+ def __init__(self, deadline, callback, io_loop):
+ if isinstance(deadline, numbers.Real):
+ self.deadline = deadline
+ elif isinstance(deadline, datetime.timedelta):
+ self.deadline = io_loop.time() + _Timeout.timedelta_to_seconds(deadline)
+ else:
+ raise TypeError("Unsupported deadline %r" % deadline)
+ self.callback = callback
+
+ @staticmethod
+ def timedelta_to_seconds(td):
+ """Equivalent to td.total_seconds() (introduced in python 2.7)."""
+ return (td.microseconds + (td.seconds + td.days * 24 * 3600) * 10 ** 6) / float(10 ** 6)
+
+ # Comparison methods to sort by deadline, with object id as a tiebreaker
+ # to guarantee a consistent ordering. The heapq module uses __le__
+ # in python2.5, and __lt__ in 2.6+ (sort() and most other comparisons
+ # use __lt__).
+ def __lt__(self, other):
+ return ((self.deadline, id(self)) <
+ (other.deadline, id(other)))
+
+ def __le__(self, other):
+ return ((self.deadline, id(self)) <=
+ (other.deadline, id(other)))
+
+
+class PeriodicCallback(object):
+ """Schedules the given callback to be called periodically.
+
+ The callback is called every ``callback_time`` milliseconds.
+
+ `start` must be called after the `PeriodicCallback` is created.
+ """
+ def __init__(self, callback, callback_time, io_loop=None):
+ self.callback = callback
+ if callback_time <= 0:
+ raise ValueError("Periodic callback must have a positive callback_time")
+ self.callback_time = callback_time
+ self.io_loop = io_loop or IOLoop.current()
+ self._running = False
+ self._timeout = None
+
+ def start(self):
+ """Starts the timer."""
+ self._running = True
+ self._next_timeout = self.io_loop.time()
+ self._schedule_next()
+
+ def stop(self):
+ """Stops the timer."""
+ self._running = False
+ if self._timeout is not None:
+ self.io_loop.remove_timeout(self._timeout)
+ self._timeout = None
+
+ def _run(self):
+ if not self._running:
+ return
+ try:
+ self.callback()
+ except Exception:
+ app_log.error("Error in periodic callback", exc_info=True)
+ self._schedule_next()
+
+ def _schedule_next(self):
+ if self._running:
+ current_time = self.io_loop.time()
+ while self._next_timeout <= current_time:
+ self._next_timeout += self.callback_time / 1000.0
+ self._timeout = self.io_loop.add_timeout(self._next_timeout, self._run)
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/eventloop/minitornado/log.py b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/eventloop/minitornado/log.py
new file mode 100644
index 00000000..49051e89
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/eventloop/minitornado/log.py
@@ -0,0 +1,6 @@
+"""minimal subset of tornado.log for zmq.eventloop.minitornado"""
+
+import logging
+
+app_log = logging.getLogger("tornado.application")
+gen_log = logging.getLogger("tornado.general")
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/eventloop/minitornado/platform/__init__.py b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/eventloop/minitornado/platform/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/eventloop/minitornado/platform/__init__.py
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/eventloop/minitornado/platform/auto.py b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/eventloop/minitornado/platform/auto.py
new file mode 100644
index 00000000..b40ccd94
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/eventloop/minitornado/platform/auto.py
@@ -0,0 +1,45 @@
+#!/usr/bin/env python
+#
+# Copyright 2011 Facebook
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Implementation of platform-specific functionality.
+
+For each function or class described in `tornado.platform.interface`,
+the appropriate platform-specific implementation exists in this module.
+Most code that needs access to this functionality should do e.g.::
+
+ from tornado.platform.auto import set_close_exec
+"""
+
+from __future__ import absolute_import, division, print_function, with_statement
+
+import os
+
+if os.name == 'nt':
+ from .common import Waker
+ from .windows import set_close_exec
+else:
+ from .posix import set_close_exec, Waker
+
+try:
+ # monotime monkey-patches the time module to have a monotonic function
+ # in versions of python before 3.3.
+ import monotime
+except ImportError:
+ pass
+try:
+ from time import monotonic as monotonic_time
+except ImportError:
+ monotonic_time = None
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/eventloop/minitornado/platform/common.py b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/eventloop/minitornado/platform/common.py
new file mode 100644
index 00000000..2d75dc1e
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/eventloop/minitornado/platform/common.py
@@ -0,0 +1,91 @@
+"""Lowest-common-denominator implementations of platform functionality."""
+from __future__ import absolute_import, division, print_function, with_statement
+
+import errno
+import socket
+
+from . import interface
+
+
+class Waker(interface.Waker):
+ """Create an OS independent asynchronous pipe.
+
+ For use on platforms that don't have os.pipe() (or where pipes cannot
+ be passed to select()), but do have sockets. This includes Windows
+ and Jython.
+ """
+ def __init__(self):
+ # Based on Zope async.py: http://svn.zope.org/zc.ngi/trunk/src/zc/ngi/async.py
+
+ self.writer = socket.socket()
+ # Disable buffering -- pulling the trigger sends 1 byte,
+ # and we want that sent immediately, to wake up ASAP.
+ self.writer.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
+
+ count = 0
+ while 1:
+ count += 1
+ # Bind to a local port; for efficiency, let the OS pick
+ # a free port for us.
+ # Unfortunately, stress tests showed that we may not
+ # be able to connect to that port ("Address already in
+ # use") despite that the OS picked it. This appears
+ # to be a race bug in the Windows socket implementation.
+ # So we loop until a connect() succeeds (almost always
+ # on the first try). See the long thread at
+ # http://mail.zope.org/pipermail/zope/2005-July/160433.html
+ # for hideous details.
+ a = socket.socket()
+ a.bind(("127.0.0.1", 0))
+ a.listen(1)
+ connect_address = a.getsockname() # assigned (host, port) pair
+ try:
+ self.writer.connect(connect_address)
+ break # success
+ except socket.error as detail:
+ if (not hasattr(errno, 'WSAEADDRINUSE') or
+ detail[0] != errno.WSAEADDRINUSE):
+ # "Address already in use" is the only error
+ # I've seen on two WinXP Pro SP2 boxes, under
+ # Pythons 2.3.5 and 2.4.1.
+ raise
+ # (10048, 'Address already in use')
+ # assert count <= 2 # never triggered in Tim's tests
+ if count >= 10: # I've never seen it go above 2
+ a.close()
+ self.writer.close()
+ raise socket.error("Cannot bind trigger!")
+ # Close `a` and try again. Note: I originally put a short
+ # sleep() here, but it didn't appear to help or hurt.
+ a.close()
+
+ self.reader, addr = a.accept()
+ self.reader.setblocking(0)
+ self.writer.setblocking(0)
+ a.close()
+ self.reader_fd = self.reader.fileno()
+
+ def fileno(self):
+ return self.reader.fileno()
+
+ def write_fileno(self):
+ return self.writer.fileno()
+
+ def wake(self):
+ try:
+ self.writer.send(b"x")
+ except (IOError, socket.error):
+ pass
+
+ def consume(self):
+ try:
+ while True:
+ result = self.reader.recv(1024)
+ if not result:
+ break
+ except (IOError, socket.error):
+ pass
+
+ def close(self):
+ self.reader.close()
+ self.writer.close()
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/eventloop/minitornado/platform/interface.py b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/eventloop/minitornado/platform/interface.py
new file mode 100644
index 00000000..07da6bab
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/eventloop/minitornado/platform/interface.py
@@ -0,0 +1,63 @@
+#!/usr/bin/env python
+#
+# Copyright 2011 Facebook
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Interfaces for platform-specific functionality.
+
+This module exists primarily for documentation purposes and as base classes
+for other tornado.platform modules. Most code should import the appropriate
+implementation from `tornado.platform.auto`.
+"""
+
+from __future__ import absolute_import, division, print_function, with_statement
+
+
+def set_close_exec(fd):
+ """Sets the close-on-exec bit (``FD_CLOEXEC``)for a file descriptor."""
+ raise NotImplementedError()
+
+
+class Waker(object):
+ """A socket-like object that can wake another thread from ``select()``.
+
+ The `~tornado.ioloop.IOLoop` will add the Waker's `fileno()` to
+ its ``select`` (or ``epoll`` or ``kqueue``) calls. When another
+ thread wants to wake up the loop, it calls `wake`. Once it has woken
+ up, it will call `consume` to do any necessary per-wake cleanup. When
+ the ``IOLoop`` is closed, it closes its waker too.
+ """
+ def fileno(self):
+ """Returns the read file descriptor for this waker.
+
+ Must be suitable for use with ``select()`` or equivalent on the
+ local platform.
+ """
+ raise NotImplementedError()
+
+ def write_fileno(self):
+ """Returns the write file descriptor for this waker."""
+ raise NotImplementedError()
+
+ def wake(self):
+ """Triggers activity on the waker's file descriptor."""
+ raise NotImplementedError()
+
+ def consume(self):
+ """Called after the listen has woken up to do any necessary cleanup."""
+ raise NotImplementedError()
+
+ def close(self):
+ """Closes the waker's file descriptor(s)."""
+ raise NotImplementedError()
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/eventloop/minitornado/platform/posix.py b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/eventloop/minitornado/platform/posix.py
new file mode 100644
index 00000000..ccffbb66
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/eventloop/minitornado/platform/posix.py
@@ -0,0 +1,70 @@
+#!/usr/bin/env python
+#
+# Copyright 2011 Facebook
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Posix implementations of platform-specific functionality."""
+
+from __future__ import absolute_import, division, print_function, with_statement
+
+import fcntl
+import os
+
+from . import interface
+
+
+def set_close_exec(fd):
+ flags = fcntl.fcntl(fd, fcntl.F_GETFD)
+ fcntl.fcntl(fd, fcntl.F_SETFD, flags | fcntl.FD_CLOEXEC)
+
+
+def _set_nonblocking(fd):
+ flags = fcntl.fcntl(fd, fcntl.F_GETFL)
+ fcntl.fcntl(fd, fcntl.F_SETFL, flags | os.O_NONBLOCK)
+
+
+class Waker(interface.Waker):
+ def __init__(self):
+ r, w = os.pipe()
+ _set_nonblocking(r)
+ _set_nonblocking(w)
+ set_close_exec(r)
+ set_close_exec(w)
+ self.reader = os.fdopen(r, "rb", 0)
+ self.writer = os.fdopen(w, "wb", 0)
+
+ def fileno(self):
+ return self.reader.fileno()
+
+ def write_fileno(self):
+ return self.writer.fileno()
+
+ def wake(self):
+ try:
+ self.writer.write(b"x")
+ except IOError:
+ pass
+
+ def consume(self):
+ try:
+ while True:
+ result = self.reader.read()
+ if not result:
+ break
+ except IOError:
+ pass
+
+ def close(self):
+ self.reader.close()
+ self.writer.close()
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/eventloop/minitornado/platform/windows.py b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/eventloop/minitornado/platform/windows.py
new file mode 100644
index 00000000..817bdca1
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/eventloop/minitornado/platform/windows.py
@@ -0,0 +1,20 @@
+# NOTE: win32 support is currently experimental, and not recommended
+# for production use.
+
+
+from __future__ import absolute_import, division, print_function, with_statement
+import ctypes
+import ctypes.wintypes
+
+# See: http://msdn.microsoft.com/en-us/library/ms724935(VS.85).aspx
+SetHandleInformation = ctypes.windll.kernel32.SetHandleInformation
+SetHandleInformation.argtypes = (ctypes.wintypes.HANDLE, ctypes.wintypes.DWORD, ctypes.wintypes.DWORD)
+SetHandleInformation.restype = ctypes.wintypes.BOOL
+
+HANDLE_FLAG_INHERIT = 0x00000001
+
+
+def set_close_exec(fd):
+ success = SetHandleInformation(fd, HANDLE_FLAG_INHERIT, 0)
+ if not success:
+ raise ctypes.GetLastError()
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/eventloop/minitornado/stack_context.py b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/eventloop/minitornado/stack_context.py
new file mode 100644
index 00000000..226d8042
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/eventloop/minitornado/stack_context.py
@@ -0,0 +1,376 @@
+#!/usr/bin/env python
+#
+# Copyright 2010 Facebook
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""`StackContext` allows applications to maintain threadlocal-like state
+that follows execution as it moves to other execution contexts.
+
+The motivating examples are to eliminate the need for explicit
+``async_callback`` wrappers (as in `tornado.web.RequestHandler`), and to
+allow some additional context to be kept for logging.
+
+This is slightly magic, but it's an extension of the idea that an
+exception handler is a kind of stack-local state and when that stack
+is suspended and resumed in a new context that state needs to be
+preserved. `StackContext` shifts the burden of restoring that state
+from each call site (e.g. wrapping each `.AsyncHTTPClient` callback
+in ``async_callback``) to the mechanisms that transfer control from
+one context to another (e.g. `.AsyncHTTPClient` itself, `.IOLoop`,
+thread pools, etc).
+
+Example usage::
+
+ @contextlib.contextmanager
+ def die_on_error():
+ try:
+ yield
+ except Exception:
+ logging.error("exception in asynchronous operation",exc_info=True)
+ sys.exit(1)
+
+ with StackContext(die_on_error):
+ # Any exception thrown here *or in callback and its desendents*
+ # will cause the process to exit instead of spinning endlessly
+ # in the ioloop.
+ http_client.fetch(url, callback)
+ ioloop.start()
+
+Most applications shouln't have to work with `StackContext` directly.
+Here are a few rules of thumb for when it's necessary:
+
+* If you're writing an asynchronous library that doesn't rely on a
+ stack_context-aware library like `tornado.ioloop` or `tornado.iostream`
+ (for example, if you're writing a thread pool), use
+ `.stack_context.wrap()` before any asynchronous operations to capture the
+ stack context from where the operation was started.
+
+* If you're writing an asynchronous library that has some shared
+ resources (such as a connection pool), create those shared resources
+ within a ``with stack_context.NullContext():`` block. This will prevent
+ ``StackContexts`` from leaking from one request to another.
+
+* If you want to write something like an exception handler that will
+ persist across asynchronous calls, create a new `StackContext` (or
+ `ExceptionStackContext`), and make your asynchronous calls in a ``with``
+ block that references your `StackContext`.
+"""
+
+from __future__ import absolute_import, division, print_function, with_statement
+
+import sys
+import threading
+
+from .util import raise_exc_info
+
+
+class StackContextInconsistentError(Exception):
+ pass
+
+
+class _State(threading.local):
+ def __init__(self):
+ self.contexts = (tuple(), None)
+_state = _State()
+
+
+class StackContext(object):
+ """Establishes the given context as a StackContext that will be transferred.
+
+ Note that the parameter is a callable that returns a context
+ manager, not the context itself. That is, where for a
+ non-transferable context manager you would say::
+
+ with my_context():
+
+ StackContext takes the function itself rather than its result::
+
+ with StackContext(my_context):
+
+ The result of ``with StackContext() as cb:`` is a deactivation
+ callback. Run this callback when the StackContext is no longer
+ needed to ensure that it is not propagated any further (note that
+ deactivating a context does not affect any instances of that
+ context that are currently pending). This is an advanced feature
+ and not necessary in most applications.
+ """
+ def __init__(self, context_factory):
+ self.context_factory = context_factory
+ self.contexts = []
+ self.active = True
+
+ def _deactivate(self):
+ self.active = False
+
+ # StackContext protocol
+ def enter(self):
+ context = self.context_factory()
+ self.contexts.append(context)
+ context.__enter__()
+
+ def exit(self, type, value, traceback):
+ context = self.contexts.pop()
+ context.__exit__(type, value, traceback)
+
+ # Note that some of this code is duplicated in ExceptionStackContext
+ # below. ExceptionStackContext is more common and doesn't need
+ # the full generality of this class.
+ def __enter__(self):
+ self.old_contexts = _state.contexts
+ self.new_contexts = (self.old_contexts[0] + (self,), self)
+ _state.contexts = self.new_contexts
+
+ try:
+ self.enter()
+ except:
+ _state.contexts = self.old_contexts
+ raise
+
+ return self._deactivate
+
+ def __exit__(self, type, value, traceback):
+ try:
+ self.exit(type, value, traceback)
+ finally:
+ final_contexts = _state.contexts
+ _state.contexts = self.old_contexts
+
+ # Generator coroutines and with-statements with non-local
+ # effects interact badly. Check here for signs of
+ # the stack getting out of sync.
+ # Note that this check comes after restoring _state.context
+ # so that if it fails things are left in a (relatively)
+ # consistent state.
+ if final_contexts is not self.new_contexts:
+ raise StackContextInconsistentError(
+ 'stack_context inconsistency (may be caused by yield '
+ 'within a "with StackContext" block)')
+
+ # Break up a reference to itself to allow for faster GC on CPython.
+ self.new_contexts = None
+
+
+class ExceptionStackContext(object):
+ """Specialization of StackContext for exception handling.
+
+ The supplied ``exception_handler`` function will be called in the
+ event of an uncaught exception in this context. The semantics are
+ similar to a try/finally clause, and intended use cases are to log
+ an error, close a socket, or similar cleanup actions. The
+ ``exc_info`` triple ``(type, value, traceback)`` will be passed to the
+ exception_handler function.
+
+ If the exception handler returns true, the exception will be
+ consumed and will not be propagated to other exception handlers.
+ """
+ def __init__(self, exception_handler):
+ self.exception_handler = exception_handler
+ self.active = True
+
+ def _deactivate(self):
+ self.active = False
+
+ def exit(self, type, value, traceback):
+ if type is not None:
+ return self.exception_handler(type, value, traceback)
+
+ def __enter__(self):
+ self.old_contexts = _state.contexts
+ self.new_contexts = (self.old_contexts[0], self)
+ _state.contexts = self.new_contexts
+
+ return self._deactivate
+
+ def __exit__(self, type, value, traceback):
+ try:
+ if type is not None:
+ return self.exception_handler(type, value, traceback)
+ finally:
+ final_contexts = _state.contexts
+ _state.contexts = self.old_contexts
+
+ if final_contexts is not self.new_contexts:
+ raise StackContextInconsistentError(
+ 'stack_context inconsistency (may be caused by yield '
+ 'within a "with StackContext" block)')
+
+ # Break up a reference to itself to allow for faster GC on CPython.
+ self.new_contexts = None
+
+
+class NullContext(object):
+ """Resets the `StackContext`.
+
+ Useful when creating a shared resource on demand (e.g. an
+ `.AsyncHTTPClient`) where the stack that caused the creating is
+ not relevant to future operations.
+ """
+ def __enter__(self):
+ self.old_contexts = _state.contexts
+ _state.contexts = (tuple(), None)
+
+ def __exit__(self, type, value, traceback):
+ _state.contexts = self.old_contexts
+
+
+def _remove_deactivated(contexts):
+ """Remove deactivated handlers from the chain"""
+ # Clean ctx handlers
+ stack_contexts = tuple([h for h in contexts[0] if h.active])
+
+ # Find new head
+ head = contexts[1]
+ while head is not None and not head.active:
+ head = head.old_contexts[1]
+
+ # Process chain
+ ctx = head
+ while ctx is not None:
+ parent = ctx.old_contexts[1]
+
+ while parent is not None:
+ if parent.active:
+ break
+ ctx.old_contexts = parent.old_contexts
+ parent = parent.old_contexts[1]
+
+ ctx = parent
+
+ return (stack_contexts, head)
+
+
+def wrap(fn):
+ """Returns a callable object that will restore the current `StackContext`
+ when executed.
+
+ Use this whenever saving a callback to be executed later in a
+ different execution context (either in a different thread or
+ asynchronously in the same thread).
+ """
+ # Check if function is already wrapped
+ if fn is None or hasattr(fn, '_wrapped'):
+ return fn
+
+ # Capture current stack head
+ # TODO: Any other better way to store contexts and update them in wrapped function?
+ cap_contexts = [_state.contexts]
+
+ def wrapped(*args, **kwargs):
+ ret = None
+ try:
+ # Capture old state
+ current_state = _state.contexts
+
+ # Remove deactivated items
+ cap_contexts[0] = contexts = _remove_deactivated(cap_contexts[0])
+
+ # Force new state
+ _state.contexts = contexts
+
+ # Current exception
+ exc = (None, None, None)
+ top = None
+
+ # Apply stack contexts
+ last_ctx = 0
+ stack = contexts[0]
+
+ # Apply state
+ for n in stack:
+ try:
+ n.enter()
+ last_ctx += 1
+ except:
+ # Exception happened. Record exception info and store top-most handler
+ exc = sys.exc_info()
+ top = n.old_contexts[1]
+
+ # Execute callback if no exception happened while restoring state
+ if top is None:
+ try:
+ ret = fn(*args, **kwargs)
+ except:
+ exc = sys.exc_info()
+ top = contexts[1]
+
+ # If there was exception, try to handle it by going through the exception chain
+ if top is not None:
+ exc = _handle_exception(top, exc)
+ else:
+ # Otherwise take shorter path and run stack contexts in reverse order
+ while last_ctx > 0:
+ last_ctx -= 1
+ c = stack[last_ctx]
+
+ try:
+ c.exit(*exc)
+ except:
+ exc = sys.exc_info()
+ top = c.old_contexts[1]
+ break
+ else:
+ top = None
+
+ # If if exception happened while unrolling, take longer exception handler path
+ if top is not None:
+ exc = _handle_exception(top, exc)
+
+ # If exception was not handled, raise it
+ if exc != (None, None, None):
+ raise_exc_info(exc)
+ finally:
+ _state.contexts = current_state
+ return ret
+
+ wrapped._wrapped = True
+ return wrapped
+
+
+def _handle_exception(tail, exc):
+ while tail is not None:
+ try:
+ if tail.exit(*exc):
+ exc = (None, None, None)
+ except:
+ exc = sys.exc_info()
+
+ tail = tail.old_contexts[1]
+
+ return exc
+
+
+def run_with_stack_context(context, func):
+ """Run a coroutine ``func`` in the given `StackContext`.
+
+ It is not safe to have a ``yield`` statement within a ``with StackContext``
+ block, so it is difficult to use stack context with `.gen.coroutine`.
+ This helper function runs the function in the correct context while
+ keeping the ``yield`` and ``with`` statements syntactically separate.
+
+ Example::
+
+ @gen.coroutine
+ def incorrect():
+ with StackContext(ctx):
+ # ERROR: this will raise StackContextInconsistentError
+ yield other_coroutine()
+
+ @gen.coroutine
+ def correct():
+ yield run_with_stack_context(StackContext(ctx), other_coroutine)
+
+ .. versionadded:: 3.1
+ """
+ with context:
+ return func()
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/eventloop/minitornado/util.py b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/eventloop/minitornado/util.py
new file mode 100644
index 00000000..c1e2eb95
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/eventloop/minitornado/util.py
@@ -0,0 +1,184 @@
+"""Miscellaneous utility functions and classes.
+
+This module is used internally by Tornado. It is not necessarily expected
+that the functions and classes defined here will be useful to other
+applications, but they are documented here in case they are.
+
+The one public-facing part of this module is the `Configurable` class
+and its `~Configurable.configure` method, which becomes a part of the
+interface of its subclasses, including `.AsyncHTTPClient`, `.IOLoop`,
+and `.Resolver`.
+"""
+
+from __future__ import absolute_import, division, print_function, with_statement
+
+import sys
+
+
+def import_object(name):
+ """Imports an object by name.
+
+ import_object('x') is equivalent to 'import x'.
+ import_object('x.y.z') is equivalent to 'from x.y import z'.
+
+ >>> import tornado.escape
+ >>> import_object('tornado.escape') is tornado.escape
+ True
+ >>> import_object('tornado.escape.utf8') is tornado.escape.utf8
+ True
+ >>> import_object('tornado') is tornado
+ True
+ >>> import_object('tornado.missing_module')
+ Traceback (most recent call last):
+ ...
+ ImportError: No module named missing_module
+ """
+ if name.count('.') == 0:
+ return __import__(name, None, None)
+
+ parts = name.split('.')
+ obj = __import__('.'.join(parts[:-1]), None, None, [parts[-1]], 0)
+ try:
+ return getattr(obj, parts[-1])
+ except AttributeError:
+ raise ImportError("No module named %s" % parts[-1])
+
+
+# Fake unicode literal support: Python 3.2 doesn't have the u'' marker for
+# literal strings, and alternative solutions like "from __future__ import
+# unicode_literals" have other problems (see PEP 414). u() can be applied
+# to ascii strings that include \u escapes (but they must not contain
+# literal non-ascii characters).
+if type('') is not type(b''):
+ def u(s):
+ return s
+ bytes_type = bytes
+ unicode_type = str
+ basestring_type = str
+else:
+ def u(s):
+ return s.decode('unicode_escape')
+ bytes_type = str
+ unicode_type = unicode
+ basestring_type = basestring
+
+
+if sys.version_info > (3,):
+ exec("""
+def raise_exc_info(exc_info):
+ raise exc_info[1].with_traceback(exc_info[2])
+
+def exec_in(code, glob, loc=None):
+ if isinstance(code, str):
+ code = compile(code, '<string>', 'exec', dont_inherit=True)
+ exec(code, glob, loc)
+""")
+else:
+ exec("""
+def raise_exc_info(exc_info):
+ raise exc_info[0], exc_info[1], exc_info[2]
+
+def exec_in(code, glob, loc=None):
+ if isinstance(code, basestring):
+ # exec(string) inherits the caller's future imports; compile
+ # the string first to prevent that.
+ code = compile(code, '<string>', 'exec', dont_inherit=True)
+ exec code in glob, loc
+""")
+
+
+class Configurable(object):
+ """Base class for configurable interfaces.
+
+ A configurable interface is an (abstract) class whose constructor
+ acts as a factory function for one of its implementation subclasses.
+ The implementation subclass as well as optional keyword arguments to
+ its initializer can be set globally at runtime with `configure`.
+
+ By using the constructor as the factory method, the interface
+ looks like a normal class, `isinstance` works as usual, etc. This
+ pattern is most useful when the choice of implementation is likely
+ to be a global decision (e.g. when `~select.epoll` is available,
+ always use it instead of `~select.select`), or when a
+ previously-monolithic class has been split into specialized
+ subclasses.
+
+ Configurable subclasses must define the class methods
+ `configurable_base` and `configurable_default`, and use the instance
+ method `initialize` instead of ``__init__``.
+ """
+ __impl_class = None
+ __impl_kwargs = None
+
+ def __new__(cls, **kwargs):
+ base = cls.configurable_base()
+ args = {}
+ if cls is base:
+ impl = cls.configured_class()
+ if base.__impl_kwargs:
+ args.update(base.__impl_kwargs)
+ else:
+ impl = cls
+ args.update(kwargs)
+ instance = super(Configurable, cls).__new__(impl)
+ # initialize vs __init__ chosen for compatiblity with AsyncHTTPClient
+ # singleton magic. If we get rid of that we can switch to __init__
+ # here too.
+ instance.initialize(**args)
+ return instance
+
+ @classmethod
+ def configurable_base(cls):
+ """Returns the base class of a configurable hierarchy.
+
+ This will normally return the class in which it is defined.
+ (which is *not* necessarily the same as the cls classmethod parameter).
+ """
+ raise NotImplementedError()
+
+ @classmethod
+ def configurable_default(cls):
+ """Returns the implementation class to be used if none is configured."""
+ raise NotImplementedError()
+
+ def initialize(self):
+ """Initialize a `Configurable` subclass instance.
+
+ Configurable classes should use `initialize` instead of ``__init__``.
+ """
+
+ @classmethod
+ def configure(cls, impl, **kwargs):
+ """Sets the class to use when the base class is instantiated.
+
+ Keyword arguments will be saved and added to the arguments passed
+ to the constructor. This can be used to set global defaults for
+ some parameters.
+ """
+ base = cls.configurable_base()
+ if isinstance(impl, (unicode_type, bytes_type)):
+ impl = import_object(impl)
+ if impl is not None and not issubclass(impl, cls):
+ raise ValueError("Invalid subclass of %s" % cls)
+ base.__impl_class = impl
+ base.__impl_kwargs = kwargs
+
+ @classmethod
+ def configured_class(cls):
+ """Returns the currently configured class."""
+ base = cls.configurable_base()
+ if cls.__impl_class is None:
+ base.__impl_class = cls.configurable_default()
+ return base.__impl_class
+
+ @classmethod
+ def _save_configuration(cls):
+ base = cls.configurable_base()
+ return (base.__impl_class, base.__impl_kwargs)
+
+ @classmethod
+ def _restore_configuration(cls, saved):
+ base = cls.configurable_base()
+ base.__impl_class = saved[0]
+ base.__impl_kwargs = saved[1]
+
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/eventloop/zmqstream.py b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/eventloop/zmqstream.py
new file mode 100644
index 00000000..86a97e44
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/eventloop/zmqstream.py
@@ -0,0 +1,529 @@
+#
+# Copyright 2009 Facebook
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""A utility class to send to and recv from a non-blocking socket."""
+
+from __future__ import with_statement
+
+import sys
+
+import zmq
+from zmq.utils import jsonapi
+
+try:
+ import cPickle as pickle
+except ImportError:
+ import pickle
+
+from .ioloop import IOLoop
+
+try:
+ # gen_log will only import from >= 3.0
+ from tornado.log import gen_log
+ from tornado import stack_context
+except ImportError:
+ from .minitornado.log import gen_log
+ from .minitornado import stack_context
+
+try:
+ from queue import Queue
+except ImportError:
+ from Queue import Queue
+
+from zmq.utils.strtypes import bytes, unicode, basestring
+
+try:
+ callable
+except NameError:
+ callable = lambda obj: hasattr(obj, '__call__')
+
+
+class ZMQStream(object):
+ """A utility class to register callbacks when a zmq socket sends and receives
+
+ For use with zmq.eventloop.ioloop
+
+ There are three main methods
+
+ Methods:
+
+ * **on_recv(callback, copy=True):**
+ register a callback to be run every time the socket has something to receive
+ * **on_send(callback):**
+ register a callback to be run every time you call send
+ * **send(self, msg, flags=0, copy=False, callback=None):**
+ perform a send that will trigger the callback
+ if callback is passed, on_send is also called.
+
+ There are also send_multipart(), send_json(), send_pyobj()
+
+ Three other methods for deactivating the callbacks:
+
+ * **stop_on_recv():**
+ turn off the recv callback
+ * **stop_on_send():**
+ turn off the send callback
+
+ which simply call ``on_<evt>(None)``.
+
+ The entire socket interface, excluding direct recv methods, is also
+ provided, primarily through direct-linking the methods.
+ e.g.
+
+ >>> stream.bind is stream.socket.bind
+ True
+
+ """
+
+ socket = None
+ io_loop = None
+ poller = None
+
+ def __init__(self, socket, io_loop=None):
+ self.socket = socket
+ self.io_loop = io_loop or IOLoop.instance()
+ self.poller = zmq.Poller()
+
+ self._send_queue = Queue()
+ self._recv_callback = None
+ self._send_callback = None
+ self._close_callback = None
+ self._recv_copy = False
+ self._flushed = False
+
+ self._state = self.io_loop.ERROR
+ self._init_io_state()
+
+ # shortcircuit some socket methods
+ self.bind = self.socket.bind
+ self.bind_to_random_port = self.socket.bind_to_random_port
+ self.connect = self.socket.connect
+ self.setsockopt = self.socket.setsockopt
+ self.getsockopt = self.socket.getsockopt
+ self.setsockopt_string = self.socket.setsockopt_string
+ self.getsockopt_string = self.socket.getsockopt_string
+ self.setsockopt_unicode = self.socket.setsockopt_unicode
+ self.getsockopt_unicode = self.socket.getsockopt_unicode
+
+
+ def stop_on_recv(self):
+ """Disable callback and automatic receiving."""
+ return self.on_recv(None)
+
+ def stop_on_send(self):
+ """Disable callback on sending."""
+ return self.on_send(None)
+
+ def stop_on_err(self):
+ """DEPRECATED, does nothing"""
+ gen_log.warn("on_err does nothing, and will be removed")
+
+ def on_err(self, callback):
+ """DEPRECATED, does nothing"""
+ gen_log.warn("on_err does nothing, and will be removed")
+
+ def on_recv(self, callback, copy=True):
+ """Register a callback for when a message is ready to recv.
+
+ There can be only one callback registered at a time, so each
+ call to `on_recv` replaces previously registered callbacks.
+
+ on_recv(None) disables recv event polling.
+
+ Use on_recv_stream(callback) instead, to register a callback that will receive
+ both this ZMQStream and the message, instead of just the message.
+
+ Parameters
+ ----------
+
+ callback : callable
+ callback must take exactly one argument, which will be a
+ list, as returned by socket.recv_multipart()
+ if callback is None, recv callbacks are disabled.
+ copy : bool
+ copy is passed directly to recv, so if copy is False,
+ callback will receive Message objects. If copy is True,
+ then callback will receive bytes/str objects.
+
+ Returns : None
+ """
+
+ self._check_closed()
+ assert callback is None or callable(callback)
+ self._recv_callback = stack_context.wrap(callback)
+ self._recv_copy = copy
+ if callback is None:
+ self._drop_io_state(self.io_loop.READ)
+ else:
+ self._add_io_state(self.io_loop.READ)
+
+ def on_recv_stream(self, callback, copy=True):
+ """Same as on_recv, but callback will get this stream as first argument
+
+ callback must take exactly two arguments, as it will be called as::
+
+ callback(stream, msg)
+
+ Useful when a single callback should be used with multiple streams.
+ """
+ if callback is None:
+ self.stop_on_recv()
+ else:
+ self.on_recv(lambda msg: callback(self, msg), copy=copy)
+
+ def on_send(self, callback):
+ """Register a callback to be called on each send
+
+ There will be two arguments::
+
+ callback(msg, status)
+
+ * `msg` will be the list of sendable objects that was just sent
+ * `status` will be the return result of socket.send_multipart(msg) -
+ MessageTracker or None.
+
+ Non-copying sends return a MessageTracker object whose
+ `done` attribute will be True when the send is complete.
+ This allows users to track when an object is safe to write to
+ again.
+
+ The second argument will always be None if copy=True
+ on the send.
+
+ Use on_send_stream(callback) to register a callback that will be passed
+ this ZMQStream as the first argument, in addition to the other two.
+
+ on_send(None) disables recv event polling.
+
+ Parameters
+ ----------
+
+ callback : callable
+ callback must take exactly two arguments, which will be
+ the message being sent (always a list),
+ and the return result of socket.send_multipart(msg) -
+ MessageTracker or None.
+
+ if callback is None, send callbacks are disabled.
+ """
+
+ self._check_closed()
+ assert callback is None or callable(callback)
+ self._send_callback = stack_context.wrap(callback)
+
+
+ def on_send_stream(self, callback):
+ """Same as on_send, but callback will get this stream as first argument
+
+ Callback will be passed three arguments::
+
+ callback(stream, msg, status)
+
+ Useful when a single callback should be used with multiple streams.
+ """
+ if callback is None:
+ self.stop_on_send()
+ else:
+ self.on_send(lambda msg, status: callback(self, msg, status))
+
+
+ def send(self, msg, flags=0, copy=True, track=False, callback=None):
+ """Send a message, optionally also register a new callback for sends.
+ See zmq.socket.send for details.
+ """
+ return self.send_multipart([msg], flags=flags, copy=copy, track=track, callback=callback)
+
+ def send_multipart(self, msg, flags=0, copy=True, track=False, callback=None):
+ """Send a multipart message, optionally also register a new callback for sends.
+ See zmq.socket.send_multipart for details.
+ """
+ kwargs = dict(flags=flags, copy=copy, track=track)
+ self._send_queue.put((msg, kwargs))
+ callback = callback or self._send_callback
+ if callback is not None:
+ self.on_send(callback)
+ else:
+ # noop callback
+ self.on_send(lambda *args: None)
+ self._add_io_state(self.io_loop.WRITE)
+
+ def send_string(self, u, flags=0, encoding='utf-8', callback=None):
+ """Send a unicode message with an encoding.
+ See zmq.socket.send_unicode for details.
+ """
+ if not isinstance(u, basestring):
+ raise TypeError("unicode/str objects only")
+ return self.send(u.encode(encoding), flags=flags, callback=callback)
+
+ send_unicode = send_string
+
+ def send_json(self, obj, flags=0, callback=None):
+ """Send json-serialized version of an object.
+ See zmq.socket.send_json for details.
+ """
+ if jsonapi is None:
+ raise ImportError('jsonlib{1,2}, json or simplejson library is required.')
+ else:
+ msg = jsonapi.dumps(obj)
+ return self.send(msg, flags=flags, callback=callback)
+
+ def send_pyobj(self, obj, flags=0, protocol=-1, callback=None):
+ """Send a Python object as a message using pickle to serialize.
+
+ See zmq.socket.send_json for details.
+ """
+ msg = pickle.dumps(obj, protocol)
+ return self.send(msg, flags, callback=callback)
+
+ def _finish_flush(self):
+ """callback for unsetting _flushed flag."""
+ self._flushed = False
+
+ def flush(self, flag=zmq.POLLIN|zmq.POLLOUT, limit=None):
+ """Flush pending messages.
+
+ This method safely handles all pending incoming and/or outgoing messages,
+ bypassing the inner loop, passing them to the registered callbacks.
+
+ A limit can be specified, to prevent blocking under high load.
+
+ flush will return the first time ANY of these conditions are met:
+ * No more events matching the flag are pending.
+ * the total number of events handled reaches the limit.
+
+ Note that if ``flag|POLLIN != 0``, recv events will be flushed even if no callback
+ is registered, unlike normal IOLoop operation. This allows flush to be
+ used to remove *and ignore* incoming messages.
+
+ Parameters
+ ----------
+ flag : int, default=POLLIN|POLLOUT
+ 0MQ poll flags.
+ If flag|POLLIN, recv events will be flushed.
+ If flag|POLLOUT, send events will be flushed.
+ Both flags can be set at once, which is the default.
+ limit : None or int, optional
+ The maximum number of messages to send or receive.
+ Both send and recv count against this limit.
+
+ Returns
+ -------
+ int : count of events handled (both send and recv)
+ """
+ self._check_closed()
+ # unset self._flushed, so callbacks will execute, in case flush has
+ # already been called this iteration
+ already_flushed = self._flushed
+ self._flushed = False
+ # initialize counters
+ count = 0
+ def update_flag():
+ """Update the poll flag, to prevent registering POLLOUT events
+ if we don't have pending sends."""
+ return flag & zmq.POLLIN | (self.sending() and flag & zmq.POLLOUT)
+ flag = update_flag()
+ if not flag:
+ # nothing to do
+ return 0
+ self.poller.register(self.socket, flag)
+ events = self.poller.poll(0)
+ while events and (not limit or count < limit):
+ s,event = events[0]
+ if event & zmq.POLLIN: # receiving
+ self._handle_recv()
+ count += 1
+ if self.socket is None:
+ # break if socket was closed during callback
+ break
+ if event & zmq.POLLOUT and self.sending():
+ self._handle_send()
+ count += 1
+ if self.socket is None:
+ # break if socket was closed during callback
+ break
+
+ flag = update_flag()
+ if flag:
+ self.poller.register(self.socket, flag)
+ events = self.poller.poll(0)
+ else:
+ events = []
+ if count: # only bypass loop if we actually flushed something
+ # skip send/recv callbacks this iteration
+ self._flushed = True
+ # reregister them at the end of the loop
+ if not already_flushed: # don't need to do it again
+ self.io_loop.add_callback(self._finish_flush)
+ elif already_flushed:
+ self._flushed = True
+
+ # update ioloop poll state, which may have changed
+ self._rebuild_io_state()
+ return count
+
+ def set_close_callback(self, callback):
+ """Call the given callback when the stream is closed."""
+ self._close_callback = stack_context.wrap(callback)
+
+ def close(self, linger=None):
+ """Close this stream."""
+ if self.socket is not None:
+ self.io_loop.remove_handler(self.socket)
+ self.socket.close(linger)
+ self.socket = None
+ if self._close_callback:
+ self._run_callback(self._close_callback)
+
+ def receiving(self):
+ """Returns True if we are currently receiving from the stream."""
+ return self._recv_callback is not None
+
+ def sending(self):
+ """Returns True if we are currently sending to the stream."""
+ return not self._send_queue.empty()
+
+ def closed(self):
+ return self.socket is None
+
+ def _run_callback(self, callback, *args, **kwargs):
+ """Wrap running callbacks in try/except to allow us to
+ close our socket."""
+ try:
+ # Use a NullContext to ensure that all StackContexts are run
+ # inside our blanket exception handler rather than outside.
+ with stack_context.NullContext():
+ callback(*args, **kwargs)
+ except:
+ gen_log.error("Uncaught exception, closing connection.",
+ exc_info=True)
+ # Close the socket on an uncaught exception from a user callback
+ # (It would eventually get closed when the socket object is
+ # gc'd, but we don't want to rely on gc happening before we
+ # run out of file descriptors)
+ self.close()
+ # Re-raise the exception so that IOLoop.handle_callback_exception
+ # can see it and log the error
+ raise
+
+ def _handle_events(self, fd, events):
+ """This method is the actual handler for IOLoop, that gets called whenever
+ an event on my socket is posted. It dispatches to _handle_recv, etc."""
+ # print "handling events"
+ if not self.socket:
+ gen_log.warning("Got events for closed stream %s", fd)
+ return
+ try:
+ # dispatch events:
+ if events & IOLoop.ERROR:
+ gen_log.error("got POLLERR event on ZMQStream, which doesn't make sense")
+ return
+ if events & IOLoop.READ:
+ self._handle_recv()
+ if not self.socket:
+ return
+ if events & IOLoop.WRITE:
+ self._handle_send()
+ if not self.socket:
+ return
+
+ # rebuild the poll state
+ self._rebuild_io_state()
+ except:
+ gen_log.error("Uncaught exception, closing connection.",
+ exc_info=True)
+ self.close()
+ raise
+
+ def _handle_recv(self):
+ """Handle a recv event."""
+ if self._flushed:
+ return
+ try:
+ msg = self.socket.recv_multipart(zmq.NOBLOCK, copy=self._recv_copy)
+ except zmq.ZMQError as e:
+ if e.errno == zmq.EAGAIN:
+ # state changed since poll event
+ pass
+ else:
+ gen_log.error("RECV Error: %s"%zmq.strerror(e.errno))
+ else:
+ if self._recv_callback:
+ callback = self._recv_callback
+ # self._recv_callback = None
+ self._run_callback(callback, msg)
+
+ # self.update_state()
+
+
+ def _handle_send(self):
+ """Handle a send event."""
+ if self._flushed:
+ return
+ if not self.sending():
+ gen_log.error("Shouldn't have handled a send event")
+ return
+
+ msg, kwargs = self._send_queue.get()
+ try:
+ status = self.socket.send_multipart(msg, **kwargs)
+ except zmq.ZMQError as e:
+ gen_log.error("SEND Error: %s", e)
+ status = e
+ if self._send_callback:
+ callback = self._send_callback
+ self._run_callback(callback, msg, status)
+
+ # self.update_state()
+
+ def _check_closed(self):
+ if not self.socket:
+ raise IOError("Stream is closed")
+
+ def _rebuild_io_state(self):
+ """rebuild io state based on self.sending() and receiving()"""
+ if self.socket is None:
+ return
+ state = self.io_loop.ERROR
+ if self.receiving():
+ state |= self.io_loop.READ
+ if self.sending():
+ state |= self.io_loop.WRITE
+ if state != self._state:
+ self._state = state
+ self._update_handler(state)
+
+ def _add_io_state(self, state):
+ """Add io_state to poller."""
+ if not self._state & state:
+ self._state = self._state | state
+ self._update_handler(self._state)
+
+ def _drop_io_state(self, state):
+ """Stop poller from watching an io_state."""
+ if self._state & state:
+ self._state = self._state & (~state)
+ self._update_handler(self._state)
+
+ def _update_handler(self, state):
+ """Update IOLoop handler with state."""
+ if self.socket is None:
+ return
+ self.io_loop.update_handler(self.socket, state)
+
+ def _init_io_state(self):
+ """initialize the ioloop event handler"""
+ with stack_context.NullContext():
+ self.io_loop.add_handler(self.socket, self._handle_events, self._state)
+
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/green/__init__.py b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/green/__init__.py
new file mode 100644
index 00000000..ff7e5965
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/green/__init__.py
@@ -0,0 +1,40 @@
+# -*- coding: utf-8 -*-
+#-----------------------------------------------------------------------------
+# Copyright (C) 2011-2012 Travis Cline
+#
+# This file is part of pyzmq
+# It is adapted from upstream project zeromq_gevent under the New BSD License
+#
+# Distributed under the terms of the New BSD License. The full license is in
+# the file COPYING.BSD, distributed as part of this software.
+#-----------------------------------------------------------------------------
+
+"""zmq.green - gevent compatibility with zeromq.
+
+Usage
+-----
+
+Instead of importing zmq directly, do so in the following manner:
+
+..
+
+ import zmq.green as zmq
+
+
+Any calls that would have blocked the current thread will now only block the
+current green thread.
+
+This compatibility is accomplished by ensuring the nonblocking flag is set
+before any blocking operation and the ØMQ file descriptor is polled internally
+to trigger needed events.
+"""
+
+from zmq import *
+from zmq.green.core import _Context, _Socket
+from zmq.green.poll import _Poller
+Context = _Context
+Socket = _Socket
+Poller = _Poller
+
+from zmq.green.device import device
+
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/green/core.py b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/green/core.py
new file mode 100644
index 00000000..9fc73e32
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/green/core.py
@@ -0,0 +1,287 @@
+#-----------------------------------------------------------------------------
+# Copyright (C) 2011-2012 Travis Cline
+#
+# This file is part of pyzmq
+# It is adapted from upstream project zeromq_gevent under the New BSD License
+#
+# Distributed under the terms of the New BSD License. The full license is in
+# the file COPYING.BSD, distributed as part of this software.
+#-----------------------------------------------------------------------------
+
+"""This module wraps the :class:`Socket` and :class:`Context` found in :mod:`pyzmq <zmq>` to be non blocking
+"""
+
+from __future__ import print_function
+
+import sys
+import time
+import warnings
+
+import zmq
+
+from zmq import Context as _original_Context
+from zmq import Socket as _original_Socket
+from .poll import _Poller
+
+import gevent
+from gevent.event import AsyncResult
+from gevent.hub import get_hub
+
+if hasattr(zmq, 'RCVTIMEO'):
+ TIMEOS = (zmq.RCVTIMEO, zmq.SNDTIMEO)
+else:
+ TIMEOS = ()
+
+def _stop(evt):
+ """simple wrapper for stopping an Event, allowing for method rename in gevent 1.0"""
+ try:
+ evt.stop()
+ except AttributeError as e:
+ # gevent<1.0 compat
+ evt.cancel()
+
+class _Socket(_original_Socket):
+ """Green version of :class:`zmq.Socket`
+
+ The following methods are overridden:
+
+ * send
+ * recv
+
+ To ensure that the ``zmq.NOBLOCK`` flag is set and that sending or receiving
+ is deferred to the hub if a ``zmq.EAGAIN`` (retry) error is raised.
+
+ The `__state_changed` method is triggered when the zmq.FD for the socket is
+ marked as readable and triggers the necessary read and write events (which
+ are waited for in the recv and send methods).
+
+ Some double underscore prefixes are used to minimize pollution of
+ :class:`zmq.Socket`'s namespace.
+ """
+ __in_send_multipart = False
+ __in_recv_multipart = False
+ __writable = None
+ __readable = None
+ _state_event = None
+ _gevent_bug_timeout = 11.6 # timeout for not trusting gevent
+ _debug_gevent = False # turn on if you think gevent is missing events
+ _poller_class = _Poller
+
+ def __init__(self, context, socket_type):
+ _original_Socket.__init__(self, context, socket_type)
+ self.__in_send_multipart = False
+ self.__in_recv_multipart = False
+ self.__setup_events()
+
+
+ def __del__(self):
+ self.close()
+
+ def close(self, linger=None):
+ super(_Socket, self).close(linger)
+ self.__cleanup_events()
+
+ def __cleanup_events(self):
+ # close the _state_event event, keeps the number of active file descriptors down
+ if getattr(self, '_state_event', None):
+ _stop(self._state_event)
+ self._state_event = None
+ # if the socket has entered a close state resume any waiting greenlets
+ self.__writable.set()
+ self.__readable.set()
+
+ def __setup_events(self):
+ self.__readable = AsyncResult()
+ self.__writable = AsyncResult()
+ self.__readable.set()
+ self.__writable.set()
+
+ try:
+ self._state_event = get_hub().loop.io(self.getsockopt(zmq.FD), 1) # read state watcher
+ self._state_event.start(self.__state_changed)
+ except AttributeError:
+ # for gevent<1.0 compatibility
+ from gevent.core import read_event
+ self._state_event = read_event(self.getsockopt(zmq.FD), self.__state_changed, persist=True)
+
+ def __state_changed(self, event=None, _evtype=None):
+ if self.closed:
+ self.__cleanup_events()
+ return
+ try:
+ # avoid triggering __state_changed from inside __state_changed
+ events = super(_Socket, self).getsockopt(zmq.EVENTS)
+ except zmq.ZMQError as exc:
+ self.__writable.set_exception(exc)
+ self.__readable.set_exception(exc)
+ else:
+ if events & zmq.POLLOUT:
+ self.__writable.set()
+ if events & zmq.POLLIN:
+ self.__readable.set()
+
+ def _wait_write(self):
+ assert self.__writable.ready(), "Only one greenlet can be waiting on this event"
+ self.__writable = AsyncResult()
+ # timeout is because libzmq cannot be trusted to properly signal a new send event:
+ # this is effectively a maximum poll interval of 1s
+ tic = time.time()
+ dt = self._gevent_bug_timeout
+ if dt:
+ timeout = gevent.Timeout(seconds=dt)
+ else:
+ timeout = None
+ try:
+ if timeout:
+ timeout.start()
+ self.__writable.get(block=True)
+ except gevent.Timeout as t:
+ if t is not timeout:
+ raise
+ toc = time.time()
+ # gevent bug: get can raise timeout even on clean return
+ # don't display zmq bug warning for gevent bug (this is getting ridiculous)
+ if self._debug_gevent and timeout and toc-tic > dt and \
+ self.getsockopt(zmq.EVENTS) & zmq.POLLOUT:
+ print("BUG: gevent may have missed a libzmq send event on %i!" % self.FD, file=sys.stderr)
+ finally:
+ if timeout:
+ timeout.cancel()
+ self.__writable.set()
+
+ def _wait_read(self):
+ assert self.__readable.ready(), "Only one greenlet can be waiting on this event"
+ self.__readable = AsyncResult()
+ # timeout is because libzmq cannot always be trusted to play nice with libevent.
+ # I can only confirm that this actually happens for send, but lets be symmetrical
+ # with our dirty hacks.
+ # this is effectively a maximum poll interval of 1s
+ tic = time.time()
+ dt = self._gevent_bug_timeout
+ if dt:
+ timeout = gevent.Timeout(seconds=dt)
+ else:
+ timeout = None
+ try:
+ if timeout:
+ timeout.start()
+ self.__readable.get(block=True)
+ except gevent.Timeout as t:
+ if t is not timeout:
+ raise
+ toc = time.time()
+ # gevent bug: get can raise timeout even on clean return
+ # don't display zmq bug warning for gevent bug (this is getting ridiculous)
+ if self._debug_gevent and timeout and toc-tic > dt and \
+ self.getsockopt(zmq.EVENTS) & zmq.POLLIN:
+ print("BUG: gevent may have missed a libzmq recv event on %i!" % self.FD, file=sys.stderr)
+ finally:
+ if timeout:
+ timeout.cancel()
+ self.__readable.set()
+
+ def send(self, data, flags=0, copy=True, track=False):
+ """send, which will only block current greenlet
+
+ state_changed always fires exactly once (success or fail) at the
+ end of this method.
+ """
+
+ # if we're given the NOBLOCK flag act as normal and let the EAGAIN get raised
+ if flags & zmq.NOBLOCK:
+ try:
+ msg = super(_Socket, self).send(data, flags, copy, track)
+ finally:
+ if not self.__in_send_multipart:
+ self.__state_changed()
+ return msg
+ # ensure the zmq.NOBLOCK flag is part of flags
+ flags |= zmq.NOBLOCK
+ while True: # Attempt to complete this operation indefinitely, blocking the current greenlet
+ try:
+ # attempt the actual call
+ msg = super(_Socket, self).send(data, flags, copy, track)
+ except zmq.ZMQError as e:
+ # if the raised ZMQError is not EAGAIN, reraise
+ if e.errno != zmq.EAGAIN:
+ if not self.__in_send_multipart:
+ self.__state_changed()
+ raise
+ else:
+ if not self.__in_send_multipart:
+ self.__state_changed()
+ return msg
+ # defer to the event loop until we're notified the socket is writable
+ self._wait_write()
+
+ def recv(self, flags=0, copy=True, track=False):
+ """recv, which will only block current greenlet
+
+ state_changed always fires exactly once (success or fail) at the
+ end of this method.
+ """
+ if flags & zmq.NOBLOCK:
+ try:
+ msg = super(_Socket, self).recv(flags, copy, track)
+ finally:
+ if not self.__in_recv_multipart:
+ self.__state_changed()
+ return msg
+
+ flags |= zmq.NOBLOCK
+ while True:
+ try:
+ msg = super(_Socket, self).recv(flags, copy, track)
+ except zmq.ZMQError as e:
+ if e.errno != zmq.EAGAIN:
+ if not self.__in_recv_multipart:
+ self.__state_changed()
+ raise
+ else:
+ if not self.__in_recv_multipart:
+ self.__state_changed()
+ return msg
+ self._wait_read()
+
+ def send_multipart(self, *args, **kwargs):
+ """wrap send_multipart to prevent state_changed on each partial send"""
+ self.__in_send_multipart = True
+ try:
+ msg = super(_Socket, self).send_multipart(*args, **kwargs)
+ finally:
+ self.__in_send_multipart = False
+ self.__state_changed()
+ return msg
+
+ def recv_multipart(self, *args, **kwargs):
+ """wrap recv_multipart to prevent state_changed on each partial recv"""
+ self.__in_recv_multipart = True
+ try:
+ msg = super(_Socket, self).recv_multipart(*args, **kwargs)
+ finally:
+ self.__in_recv_multipart = False
+ self.__state_changed()
+ return msg
+
+ def get(self, opt):
+ """trigger state_changed on getsockopt(EVENTS)"""
+ if opt in TIMEOS:
+ warnings.warn("TIMEO socket options have no effect in zmq.green", UserWarning)
+ optval = super(_Socket, self).get(opt)
+ if opt == zmq.EVENTS:
+ self.__state_changed()
+ return optval
+
+ def set(self, opt, val):
+ """set socket option"""
+ if opt in TIMEOS:
+ warnings.warn("TIMEO socket options have no effect in zmq.green", UserWarning)
+ return super(_Socket, self).set(opt, val)
+
+
+class _Context(_original_Context):
+ """Replacement for :class:`zmq.Context`
+
+ Ensures that the greened Socket above is used in calls to `socket`.
+ """
+ _socket_class = _Socket
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/green/device.py b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/green/device.py
new file mode 100644
index 00000000..4b070237
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/green/device.py
@@ -0,0 +1,32 @@
+# Copyright (C) PyZMQ Developers
+# Distributed under the terms of the Modified BSD License.
+
+import zmq
+from zmq.green import Poller
+
+def device(device_type, isocket, osocket):
+ """Start a zeromq device (gevent-compatible).
+
+ Unlike the true zmq.device, this does not release the GIL.
+
+ Parameters
+ ----------
+ device_type : (QUEUE, FORWARDER, STREAMER)
+ The type of device to start (ignored).
+ isocket : Socket
+ The Socket instance for the incoming traffic.
+ osocket : Socket
+ The Socket instance for the outbound traffic.
+ """
+ p = Poller()
+ if osocket == -1:
+ osocket = isocket
+ p.register(isocket, zmq.POLLIN)
+ p.register(osocket, zmq.POLLIN)
+
+ while True:
+ events = dict(p.poll())
+ if isocket in events:
+ osocket.send_multipart(isocket.recv_multipart())
+ if osocket in events:
+ isocket.send_multipart(osocket.recv_multipart())
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/green/eventloop/__init__.py b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/green/eventloop/__init__.py
new file mode 100644
index 00000000..c5150efe
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/green/eventloop/__init__.py
@@ -0,0 +1,3 @@
+from zmq.green.eventloop.ioloop import IOLoop
+
+__all__ = ['IOLoop'] \ No newline at end of file
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/green/eventloop/ioloop.py b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/green/eventloop/ioloop.py
new file mode 100644
index 00000000..e12fd5e9
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/green/eventloop/ioloop.py
@@ -0,0 +1,33 @@
+from zmq.eventloop.ioloop import *
+from zmq.green import Poller
+
+RealIOLoop = IOLoop
+RealZMQPoller = ZMQPoller
+
+class IOLoop(RealIOLoop):
+
+ def initialize(self, impl=None):
+ impl = _poll() if impl is None else impl
+ super(IOLoop, self).initialize(impl)
+
+ @staticmethod
+ def instance():
+ """Returns a global `IOLoop` instance.
+
+ Most applications have a single, global `IOLoop` running on the
+ main thread. Use this method to get this instance from
+ another thread. To get the current thread's `IOLoop`, use `current()`.
+ """
+ # install this class as the active IOLoop implementation
+ # when using tornado 3
+ if tornado_version >= (3,):
+ PollIOLoop.configure(IOLoop)
+ return PollIOLoop.instance()
+
+
+class ZMQPoller(RealZMQPoller):
+ """gevent-compatible version of ioloop.ZMQPoller"""
+ def __init__(self):
+ self._poller = Poller()
+
+_poll = ZMQPoller
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/green/eventloop/zmqstream.py b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/green/eventloop/zmqstream.py
new file mode 100644
index 00000000..90fbd1f5
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/green/eventloop/zmqstream.py
@@ -0,0 +1,11 @@
+from zmq.eventloop.zmqstream import *
+
+from zmq.green.eventloop.ioloop import IOLoop
+
+RealZMQStream = ZMQStream
+
+class ZMQStream(RealZMQStream):
+
+ def __init__(self, socket, io_loop=None):
+ io_loop = io_loop or IOLoop.instance()
+ super(ZMQStream, self).__init__(socket, io_loop=io_loop)
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/green/poll.py b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/green/poll.py
new file mode 100644
index 00000000..8f016129
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/green/poll.py
@@ -0,0 +1,95 @@
+import zmq
+import gevent
+from gevent import select
+
+from zmq import Poller as _original_Poller
+
+
+class _Poller(_original_Poller):
+ """Replacement for :class:`zmq.Poller`
+
+ Ensures that the greened Poller below is used in calls to
+ :meth:`zmq.Poller.poll`.
+ """
+ _gevent_bug_timeout = 1.33 # minimum poll interval, for working around gevent bug
+
+ def _get_descriptors(self):
+ """Returns three elements tuple with socket descriptors ready
+ for gevent.select.select
+ """
+ rlist = []
+ wlist = []
+ xlist = []
+
+ for socket, flags in self.sockets:
+ if isinstance(socket, zmq.Socket):
+ rlist.append(socket.getsockopt(zmq.FD))
+ continue
+ elif isinstance(socket, int):
+ fd = socket
+ elif hasattr(socket, 'fileno'):
+ try:
+ fd = int(socket.fileno())
+ except:
+ raise ValueError('fileno() must return an valid integer fd')
+ else:
+ raise TypeError('Socket must be a 0MQ socket, an integer fd '
+ 'or have a fileno() method: %r' % socket)
+
+ if flags & zmq.POLLIN:
+ rlist.append(fd)
+ if flags & zmq.POLLOUT:
+ wlist.append(fd)
+ if flags & zmq.POLLERR:
+ xlist.append(fd)
+
+ return (rlist, wlist, xlist)
+
+ def poll(self, timeout=-1):
+ """Overridden method to ensure that the green version of
+ Poller is used.
+
+ Behaves the same as :meth:`zmq.core.Poller.poll`
+ """
+
+ if timeout is None:
+ timeout = -1
+
+ if timeout < 0:
+ timeout = -1
+
+ rlist = None
+ wlist = None
+ xlist = None
+
+ if timeout > 0:
+ tout = gevent.Timeout.start_new(timeout/1000.0)
+
+ try:
+ # Loop until timeout or events available
+ rlist, wlist, xlist = self._get_descriptors()
+ while True:
+ events = super(_Poller, self).poll(0)
+ if events or timeout == 0:
+ return events
+
+ # wait for activity on sockets in a green way
+ # set a minimum poll frequency,
+ # because gevent < 1.0 cannot be trusted to catch edge-triggered FD events
+ _bug_timeout = gevent.Timeout.start_new(self._gevent_bug_timeout)
+ try:
+ select.select(rlist, wlist, xlist)
+ except gevent.Timeout as t:
+ if t is not _bug_timeout:
+ raise
+ finally:
+ _bug_timeout.cancel()
+
+ except gevent.Timeout as t:
+ if t is not tout:
+ raise
+ return []
+ finally:
+ if timeout > 0:
+ tout.cancel()
+
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/libzmq.so.3 b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/libzmq.so.3
new file mode 100644
index 00000000..ed940931
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/libzmq.so.3
Binary files differ
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/log/__init__.py b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/log/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/log/__init__.py
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/log/handlers.py b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/log/handlers.py
new file mode 100644
index 00000000..5ff21bf3
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/log/handlers.py
@@ -0,0 +1,146 @@
+"""pyzmq logging handlers.
+
+This mainly defines the PUBHandler object for publishing logging messages over
+a zmq.PUB socket.
+
+The PUBHandler can be used with the regular logging module, as in::
+
+ >>> import logging
+ >>> handler = PUBHandler('tcp://127.0.0.1:12345')
+ >>> handler.root_topic = 'foo'
+ >>> logger = logging.getLogger('foobar')
+ >>> logger.setLevel(logging.DEBUG)
+ >>> logger.addHandler(handler)
+
+After this point, all messages logged by ``logger`` will be published on the
+PUB socket.
+
+Code adapted from StarCluster:
+
+ http://github.com/jtriley/StarCluster/blob/master/starcluster/logger.py
+"""
+
+# Copyright (C) PyZMQ Developers
+# Distributed under the terms of the Modified BSD License.
+
+
+import logging
+from logging import INFO, DEBUG, WARN, ERROR, FATAL
+
+import zmq
+from zmq.utils.strtypes import bytes, unicode, cast_bytes
+
+
+TOPIC_DELIM="::" # delimiter for splitting topics on the receiving end.
+
+
+class PUBHandler(logging.Handler):
+ """A basic logging handler that emits log messages through a PUB socket.
+
+ Takes a PUB socket already bound to interfaces or an interface to bind to.
+
+ Example::
+
+ sock = context.socket(zmq.PUB)
+ sock.bind('inproc://log')
+ handler = PUBHandler(sock)
+
+ Or::
+
+ handler = PUBHandler('inproc://loc')
+
+ These are equivalent.
+
+ Log messages handled by this handler are broadcast with ZMQ topics
+ ``this.root_topic`` comes first, followed by the log level
+ (DEBUG,INFO,etc.), followed by any additional subtopics specified in the
+ message by: log.debug("subtopic.subsub::the real message")
+ """
+ root_topic=""
+ socket = None
+
+ formatters = {
+ logging.DEBUG: logging.Formatter(
+ "%(levelname)s %(filename)s:%(lineno)d - %(message)s\n"),
+ logging.INFO: logging.Formatter("%(message)s\n"),
+ logging.WARN: logging.Formatter(
+ "%(levelname)s %(filename)s:%(lineno)d - %(message)s\n"),
+ logging.ERROR: logging.Formatter(
+ "%(levelname)s %(filename)s:%(lineno)d - %(message)s - %(exc_info)s\n"),
+ logging.CRITICAL: logging.Formatter(
+ "%(levelname)s %(filename)s:%(lineno)d - %(message)s\n")}
+
+ def __init__(self, interface_or_socket, context=None):
+ logging.Handler.__init__(self)
+ if isinstance(interface_or_socket, zmq.Socket):
+ self.socket = interface_or_socket
+ self.ctx = self.socket.context
+ else:
+ self.ctx = context or zmq.Context()
+ self.socket = self.ctx.socket(zmq.PUB)
+ self.socket.bind(interface_or_socket)
+
+ def format(self,record):
+ """Format a record."""
+ return self.formatters[record.levelno].format(record)
+
+ def emit(self, record):
+ """Emit a log message on my socket."""
+ try:
+ topic, record.msg = record.msg.split(TOPIC_DELIM,1)
+ except Exception:
+ topic = ""
+ try:
+ bmsg = cast_bytes(self.format(record))
+ except Exception:
+ self.handleError(record)
+ return
+
+ topic_list = []
+
+ if self.root_topic:
+ topic_list.append(self.root_topic)
+
+ topic_list.append(record.levelname)
+
+ if topic:
+ topic_list.append(topic)
+
+ btopic = b'.'.join(cast_bytes(t) for t in topic_list)
+
+ self.socket.send_multipart([btopic, bmsg])
+
+
+class TopicLogger(logging.Logger):
+ """A simple wrapper that takes an additional argument to log methods.
+
+ All the regular methods exist, but instead of one msg argument, two
+ arguments: topic, msg are passed.
+
+ That is::
+
+ logger.debug('msg')
+
+ Would become::
+
+ logger.debug('topic.sub', 'msg')
+ """
+ def log(self, level, topic, msg, *args, **kwargs):
+ """Log 'msg % args' with level and topic.
+
+ To pass exception information, use the keyword argument exc_info
+ with a True value::
+
+ logger.log(level, "zmq.fun", "We have a %s",
+ "mysterious problem", exc_info=1)
+ """
+ logging.Logger.log(self, level, '%s::%s'%(topic,msg), *args, **kwargs)
+
+# Generate the methods of TopicLogger, since they are just adding a
+# topic prefix to a message.
+for name in "debug warn warning error critical fatal".split():
+ meth = getattr(logging.Logger,name)
+ setattr(TopicLogger, name,
+ lambda self, level, topic, msg, *args, **kwargs:
+ meth(self, level, topic+TOPIC_DELIM+msg,*args, **kwargs))
+
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/ssh/__init__.py b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/ssh/__init__.py
new file mode 100644
index 00000000..57f09568
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/ssh/__init__.py
@@ -0,0 +1 @@
+from zmq.ssh.tunnel import *
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/ssh/forward.py b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/ssh/forward.py
new file mode 100644
index 00000000..2d619462
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/ssh/forward.py
@@ -0,0 +1,91 @@
+#
+# This file is adapted from a paramiko demo, and thus licensed under LGPL 2.1.
+# Original Copyright (C) 2003-2007 Robey Pointer <robeypointer@gmail.com>
+# Edits Copyright (C) 2010 The IPython Team
+#
+# Paramiko is free software; you can redistribute it and/or modify it under the
+# terms of the GNU Lesser General Public License as published by the Free
+# Software Foundation; either version 2.1 of the License, or (at your option)
+# any later version.
+#
+# Paramiko is distrubuted in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
+# A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
+# details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with Paramiko; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02111-1301 USA.
+
+"""
+Sample script showing how to do local port forwarding over paramiko.
+
+This script connects to the requested SSH server and sets up local port
+forwarding (the openssh -L option) from a local port through a tunneled
+connection to a destination reachable from the SSH server machine.
+"""
+
+from __future__ import print_function
+
+import logging
+import select
+try: # Python 3
+ import socketserver
+except ImportError: # Python 2
+ import SocketServer as socketserver
+
+logger = logging.getLogger('ssh')
+
+class ForwardServer (socketserver.ThreadingTCPServer):
+ daemon_threads = True
+ allow_reuse_address = True
+
+
+class Handler (socketserver.BaseRequestHandler):
+
+ def handle(self):
+ try:
+ chan = self.ssh_transport.open_channel('direct-tcpip',
+ (self.chain_host, self.chain_port),
+ self.request.getpeername())
+ except Exception as e:
+ logger.debug('Incoming request to %s:%d failed: %s' % (self.chain_host,
+ self.chain_port,
+ repr(e)))
+ return
+ if chan is None:
+ logger.debug('Incoming request to %s:%d was rejected by the SSH server.' %
+ (self.chain_host, self.chain_port))
+ return
+
+ logger.debug('Connected! Tunnel open %r -> %r -> %r' % (self.request.getpeername(),
+ chan.getpeername(), (self.chain_host, self.chain_port)))
+ while True:
+ r, w, x = select.select([self.request, chan], [], [])
+ if self.request in r:
+ data = self.request.recv(1024)
+ if len(data) == 0:
+ break
+ chan.send(data)
+ if chan in r:
+ data = chan.recv(1024)
+ if len(data) == 0:
+ break
+ self.request.send(data)
+ chan.close()
+ self.request.close()
+ logger.debug('Tunnel closed ')
+
+
+def forward_tunnel(local_port, remote_host, remote_port, transport):
+ # this is a little convoluted, but lets me configure things for the Handler
+ # object. (SocketServer doesn't give Handlers any way to access the outer
+ # server normally.)
+ class SubHander (Handler):
+ chain_host = remote_host
+ chain_port = remote_port
+ ssh_transport = transport
+ ForwardServer(('127.0.0.1', local_port), SubHander).serve_forever()
+
+
+__all__ = ['forward_tunnel']
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/ssh/tunnel.py b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/ssh/tunnel.py
new file mode 100644
index 00000000..5a0c5433
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/ssh/tunnel.py
@@ -0,0 +1,376 @@
+"""Basic ssh tunnel utilities, and convenience functions for tunneling
+zeromq connections.
+"""
+
+# Copyright (C) 2010-2011 IPython Development Team
+# Copyright (C) 2011- PyZMQ Developers
+#
+# Redistributed from IPython under the terms of the BSD License.
+
+
+from __future__ import print_function
+
+import atexit
+import os
+import signal
+import socket
+import sys
+import warnings
+from getpass import getpass, getuser
+from multiprocessing import Process
+
+try:
+ with warnings.catch_warnings():
+ warnings.simplefilter('ignore', DeprecationWarning)
+ import paramiko
+ SSHException = paramiko.ssh_exception.SSHException
+except ImportError:
+ paramiko = None
+ class SSHException(Exception):
+ pass
+else:
+ from .forward import forward_tunnel
+
+try:
+ import pexpect
+except ImportError:
+ pexpect = None
+
+
+_random_ports = set()
+
+def select_random_ports(n):
+ """Selects and return n random ports that are available."""
+ ports = []
+ for i in range(n):
+ sock = socket.socket()
+ sock.bind(('', 0))
+ while sock.getsockname()[1] in _random_ports:
+ sock.close()
+ sock = socket.socket()
+ sock.bind(('', 0))
+ ports.append(sock)
+ for i, sock in enumerate(ports):
+ port = sock.getsockname()[1]
+ sock.close()
+ ports[i] = port
+ _random_ports.add(port)
+ return ports
+
+
+#-----------------------------------------------------------------------------
+# Check for passwordless login
+#-----------------------------------------------------------------------------
+
+def try_passwordless_ssh(server, keyfile, paramiko=None):
+ """Attempt to make an ssh connection without a password.
+ This is mainly used for requiring password input only once
+ when many tunnels may be connected to the same server.
+
+ If paramiko is None, the default for the platform is chosen.
+ """
+ if paramiko is None:
+ paramiko = sys.platform == 'win32'
+ if not paramiko:
+ f = _try_passwordless_openssh
+ else:
+ f = _try_passwordless_paramiko
+ return f(server, keyfile)
+
+def _try_passwordless_openssh(server, keyfile):
+ """Try passwordless login with shell ssh command."""
+ if pexpect is None:
+ raise ImportError("pexpect unavailable, use paramiko")
+ cmd = 'ssh -f '+ server
+ if keyfile:
+ cmd += ' -i ' + keyfile
+ cmd += ' exit'
+
+ # pop SSH_ASKPASS from env
+ env = os.environ.copy()
+ env.pop('SSH_ASKPASS', None)
+
+ ssh_newkey = 'Are you sure you want to continue connecting'
+ p = pexpect.spawn(cmd, env=env)
+ while True:
+ try:
+ i = p.expect([ssh_newkey, '[Pp]assword:'], timeout=.1)
+ if i==0:
+ raise SSHException('The authenticity of the host can\'t be established.')
+ except pexpect.TIMEOUT:
+ continue
+ except pexpect.EOF:
+ return True
+ else:
+ return False
+
+def _try_passwordless_paramiko(server, keyfile):
+ """Try passwordless login with paramiko."""
+ if paramiko is None:
+ msg = "Paramiko unavaliable, "
+ if sys.platform == 'win32':
+ msg += "Paramiko is required for ssh tunneled connections on Windows."
+ else:
+ msg += "use OpenSSH."
+ raise ImportError(msg)
+ username, server, port = _split_server(server)
+ client = paramiko.SSHClient()
+ client.load_system_host_keys()
+ client.set_missing_host_key_policy(paramiko.WarningPolicy())
+ try:
+ client.connect(server, port, username=username, key_filename=keyfile,
+ look_for_keys=True)
+ except paramiko.AuthenticationException:
+ return False
+ else:
+ client.close()
+ return True
+
+
+def tunnel_connection(socket, addr, server, keyfile=None, password=None, paramiko=None, timeout=60):
+ """Connect a socket to an address via an ssh tunnel.
+
+ This is a wrapper for socket.connect(addr), when addr is not accessible
+ from the local machine. It simply creates an ssh tunnel using the remaining args,
+ and calls socket.connect('tcp://localhost:lport') where lport is the randomly
+ selected local port of the tunnel.
+
+ """
+ new_url, tunnel = open_tunnel(addr, server, keyfile=keyfile, password=password, paramiko=paramiko, timeout=timeout)
+ socket.connect(new_url)
+ return tunnel
+
+
+def open_tunnel(addr, server, keyfile=None, password=None, paramiko=None, timeout=60):
+ """Open a tunneled connection from a 0MQ url.
+
+ For use inside tunnel_connection.
+
+ Returns
+ -------
+
+ (url, tunnel) : (str, object)
+ The 0MQ url that has been forwarded, and the tunnel object
+ """
+
+ lport = select_random_ports(1)[0]
+ transport, addr = addr.split('://')
+ ip,rport = addr.split(':')
+ rport = int(rport)
+ if paramiko is None:
+ paramiko = sys.platform == 'win32'
+ if paramiko:
+ tunnelf = paramiko_tunnel
+ else:
+ tunnelf = openssh_tunnel
+
+ tunnel = tunnelf(lport, rport, server, remoteip=ip, keyfile=keyfile, password=password, timeout=timeout)
+ return 'tcp://127.0.0.1:%i'%lport, tunnel
+
+def openssh_tunnel(lport, rport, server, remoteip='127.0.0.1', keyfile=None, password=None, timeout=60):
+ """Create an ssh tunnel using command-line ssh that connects port lport
+ on this machine to localhost:rport on server. The tunnel
+ will automatically close when not in use, remaining open
+ for a minimum of timeout seconds for an initial connection.
+
+ This creates a tunnel redirecting `localhost:lport` to `remoteip:rport`,
+ as seen from `server`.
+
+ keyfile and password may be specified, but ssh config is checked for defaults.
+
+ Parameters
+ ----------
+
+ lport : int
+ local port for connecting to the tunnel from this machine.
+ rport : int
+ port on the remote machine to connect to.
+ server : str
+ The ssh server to connect to. The full ssh server string will be parsed.
+ user@server:port
+ remoteip : str [Default: 127.0.0.1]
+ The remote ip, specifying the destination of the tunnel.
+ Default is localhost, which means that the tunnel would redirect
+ localhost:lport on this machine to localhost:rport on the *server*.
+
+ keyfile : str; path to public key file
+ This specifies a key to be used in ssh login, default None.
+ Regular default ssh keys will be used without specifying this argument.
+ password : str;
+ Your ssh password to the ssh server. Note that if this is left None,
+ you will be prompted for it if passwordless key based login is unavailable.
+ timeout : int [default: 60]
+ The time (in seconds) after which no activity will result in the tunnel
+ closing. This prevents orphaned tunnels from running forever.
+ """
+ if pexpect is None:
+ raise ImportError("pexpect unavailable, use paramiko_tunnel")
+ ssh="ssh "
+ if keyfile:
+ ssh += "-i " + keyfile
+
+ if ':' in server:
+ server, port = server.split(':')
+ ssh += " -p %s" % port
+
+ cmd = "%s -O check %s" % (ssh, server)
+ (output, exitstatus) = pexpect.run(cmd, withexitstatus=True)
+ if not exitstatus:
+ pid = int(output[output.find("(pid=")+5:output.find(")")])
+ cmd = "%s -O forward -L 127.0.0.1:%i:%s:%i %s" % (
+ ssh, lport, remoteip, rport, server)
+ (output, exitstatus) = pexpect.run(cmd, withexitstatus=True)
+ if not exitstatus:
+ atexit.register(_stop_tunnel, cmd.replace("-O forward", "-O cancel", 1))
+ return pid
+ cmd = "%s -f -S none -L 127.0.0.1:%i:%s:%i %s sleep %i" % (
+ ssh, lport, remoteip, rport, server, timeout)
+
+ # pop SSH_ASKPASS from env
+ env = os.environ.copy()
+ env.pop('SSH_ASKPASS', None)
+
+ ssh_newkey = 'Are you sure you want to continue connecting'
+ tunnel = pexpect.spawn(cmd, env=env)
+ failed = False
+ while True:
+ try:
+ i = tunnel.expect([ssh_newkey, '[Pp]assword:'], timeout=.1)
+ if i==0:
+ raise SSHException('The authenticity of the host can\'t be established.')
+ except pexpect.TIMEOUT:
+ continue
+ except pexpect.EOF:
+ if tunnel.exitstatus:
+ print(tunnel.exitstatus)
+ print(tunnel.before)
+ print(tunnel.after)
+ raise RuntimeError("tunnel '%s' failed to start"%(cmd))
+ else:
+ return tunnel.pid
+ else:
+ if failed:
+ print("Password rejected, try again")
+ password=None
+ if password is None:
+ password = getpass("%s's password: "%(server))
+ tunnel.sendline(password)
+ failed = True
+
+def _stop_tunnel(cmd):
+ pexpect.run(cmd)
+
+def _split_server(server):
+ if '@' in server:
+ username,server = server.split('@', 1)
+ else:
+ username = getuser()
+ if ':' in server:
+ server, port = server.split(':')
+ port = int(port)
+ else:
+ port = 22
+ return username, server, port
+
+def paramiko_tunnel(lport, rport, server, remoteip='127.0.0.1', keyfile=None, password=None, timeout=60):
+ """launch a tunner with paramiko in a subprocess. This should only be used
+ when shell ssh is unavailable (e.g. Windows).
+
+ This creates a tunnel redirecting `localhost:lport` to `remoteip:rport`,
+ as seen from `server`.
+
+ If you are familiar with ssh tunnels, this creates the tunnel:
+
+ ssh server -L localhost:lport:remoteip:rport
+
+ keyfile and password may be specified, but ssh config is checked for defaults.
+
+
+ Parameters
+ ----------
+
+ lport : int
+ local port for connecting to the tunnel from this machine.
+ rport : int
+ port on the remote machine to connect to.
+ server : str
+ The ssh server to connect to. The full ssh server string will be parsed.
+ user@server:port
+ remoteip : str [Default: 127.0.0.1]
+ The remote ip, specifying the destination of the tunnel.
+ Default is localhost, which means that the tunnel would redirect
+ localhost:lport on this machine to localhost:rport on the *server*.
+
+ keyfile : str; path to public key file
+ This specifies a key to be used in ssh login, default None.
+ Regular default ssh keys will be used without specifying this argument.
+ password : str;
+ Your ssh password to the ssh server. Note that if this is left None,
+ you will be prompted for it if passwordless key based login is unavailable.
+ timeout : int [default: 60]
+ The time (in seconds) after which no activity will result in the tunnel
+ closing. This prevents orphaned tunnels from running forever.
+
+ """
+ if paramiko is None:
+ raise ImportError("Paramiko not available")
+
+ if password is None:
+ if not _try_passwordless_paramiko(server, keyfile):
+ password = getpass("%s's password: "%(server))
+
+ p = Process(target=_paramiko_tunnel,
+ args=(lport, rport, server, remoteip),
+ kwargs=dict(keyfile=keyfile, password=password))
+ p.daemon=False
+ p.start()
+ atexit.register(_shutdown_process, p)
+ return p
+
+def _shutdown_process(p):
+ if p.is_alive():
+ p.terminate()
+
+def _paramiko_tunnel(lport, rport, server, remoteip, keyfile=None, password=None):
+ """Function for actually starting a paramiko tunnel, to be passed
+ to multiprocessing.Process(target=this), and not called directly.
+ """
+ username, server, port = _split_server(server)
+ client = paramiko.SSHClient()
+ client.load_system_host_keys()
+ client.set_missing_host_key_policy(paramiko.WarningPolicy())
+
+ try:
+ client.connect(server, port, username=username, key_filename=keyfile,
+ look_for_keys=True, password=password)
+# except paramiko.AuthenticationException:
+# if password is None:
+# password = getpass("%s@%s's password: "%(username, server))
+# client.connect(server, port, username=username, password=password)
+# else:
+# raise
+ except Exception as e:
+ print('*** Failed to connect to %s:%d: %r' % (server, port, e))
+ sys.exit(1)
+
+ # Don't let SIGINT kill the tunnel subprocess
+ signal.signal(signal.SIGINT, signal.SIG_IGN)
+
+ try:
+ forward_tunnel(lport, remoteip, rport, client.get_transport())
+ except KeyboardInterrupt:
+ print('SIGINT: Port forwarding stopped cleanly')
+ sys.exit(0)
+ except Exception as e:
+ print("Port forwarding stopped uncleanly: %s"%e)
+ sys.exit(255)
+
+if sys.platform == 'win32':
+ ssh_tunnel = paramiko_tunnel
+else:
+ ssh_tunnel = openssh_tunnel
+
+
+__all__ = ['tunnel_connection', 'ssh_tunnel', 'openssh_tunnel', 'paramiko_tunnel', 'try_passwordless_ssh']
+
+
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/sugar/__init__.py b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/sugar/__init__.py
new file mode 100644
index 00000000..d0510a44
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/sugar/__init__.py
@@ -0,0 +1,27 @@
+"""pure-Python sugar wrappers for core 0MQ objects."""
+
+# Copyright (C) PyZMQ Developers
+# Distributed under the terms of the Modified BSD License.
+
+
+from zmq.sugar import (
+ constants, context, frame, poll, socket, tracker, version
+)
+from zmq import error
+
+__all__ = ['constants']
+for submod in (
+ constants, context, error, frame, poll, socket, tracker, version
+):
+ __all__.extend(submod.__all__)
+
+from zmq.error import *
+from zmq.sugar.context import *
+from zmq.sugar.tracker import *
+from zmq.sugar.socket import *
+from zmq.sugar.constants import *
+from zmq.sugar.frame import *
+from zmq.sugar.poll import *
+# from zmq.sugar.stopwatch import *
+# from zmq.sugar._device import *
+from zmq.sugar.version import *
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/sugar/attrsettr.py b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/sugar/attrsettr.py
new file mode 100644
index 00000000..4bbd36d6
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/sugar/attrsettr.py
@@ -0,0 +1,52 @@
+# coding: utf-8
+"""Mixin for mapping set/getattr to self.set/get"""
+
+# Copyright (C) PyZMQ Developers
+# Distributed under the terms of the Modified BSD License.
+
+
+from . import constants
+
+class AttributeSetter(object):
+
+ def __setattr__(self, key, value):
+ """set zmq options by attribute"""
+
+ # regular setattr only allowed for class-defined attributes
+ for obj in [self] + self.__class__.mro():
+ if key in obj.__dict__:
+ object.__setattr__(self, key, value)
+ return
+
+ upper_key = key.upper()
+ try:
+ opt = getattr(constants, upper_key)
+ except AttributeError:
+ raise AttributeError("%s has no such option: %s" % (
+ self.__class__.__name__, upper_key)
+ )
+ else:
+ self._set_attr_opt(upper_key, opt, value)
+
+ def _set_attr_opt(self, name, opt, value):
+ """override if setattr should do something other than call self.set"""
+ self.set(opt, value)
+
+ def __getattr__(self, key):
+ """get zmq options by attribute"""
+ upper_key = key.upper()
+ try:
+ opt = getattr(constants, upper_key)
+ except AttributeError:
+ raise AttributeError("%s has no such option: %s" % (
+ self.__class__.__name__, upper_key)
+ )
+ else:
+ return self._get_attr_opt(upper_key, opt)
+
+ def _get_attr_opt(self, name, opt):
+ """override if getattr should do something other than call self.get"""
+ return self.get(opt)
+
+
+__all__ = ['AttributeSetter']
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/sugar/constants.py b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/sugar/constants.py
new file mode 100644
index 00000000..88281176
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/sugar/constants.py
@@ -0,0 +1,98 @@
+"""0MQ Constants."""
+
+# Copyright (c) PyZMQ Developers.
+# Distributed under the terms of the Modified BSD License.
+
+from zmq.backend import constants
+from zmq.utils.constant_names import (
+ base_names,
+ switched_sockopt_names,
+ int_sockopt_names,
+ int64_sockopt_names,
+ bytes_sockopt_names,
+ fd_sockopt_names,
+ ctx_opt_names,
+ msg_opt_names,
+)
+
+#-----------------------------------------------------------------------------
+# Python module level constants
+#-----------------------------------------------------------------------------
+
+__all__ = [
+ 'int_sockopts',
+ 'int64_sockopts',
+ 'bytes_sockopts',
+ 'ctx_opts',
+ 'ctx_opt_names',
+ ]
+
+int_sockopts = set()
+int64_sockopts = set()
+bytes_sockopts = set()
+fd_sockopts = set()
+ctx_opts = set()
+msg_opts = set()
+
+
+if constants.VERSION < 30000:
+ int64_sockopt_names.extend(switched_sockopt_names)
+else:
+ int_sockopt_names.extend(switched_sockopt_names)
+
+_UNDEFINED = -9999
+
+def _add_constant(name, container=None):
+ """add a constant to be defined
+
+ optionally add it to one of the sets for use in get/setopt checkers
+ """
+ c = getattr(constants, name, _UNDEFINED)
+ if c == _UNDEFINED:
+ return
+ globals()[name] = c
+ __all__.append(name)
+ if container is not None:
+ container.add(c)
+ return c
+
+for name in base_names:
+ _add_constant(name)
+
+for name in int_sockopt_names:
+ _add_constant(name, int_sockopts)
+
+for name in int64_sockopt_names:
+ _add_constant(name, int64_sockopts)
+
+for name in bytes_sockopt_names:
+ _add_constant(name, bytes_sockopts)
+
+for name in fd_sockopt_names:
+ _add_constant(name, fd_sockopts)
+
+for name in ctx_opt_names:
+ _add_constant(name, ctx_opts)
+
+for name in msg_opt_names:
+ _add_constant(name, msg_opts)
+
+# ensure some aliases are always defined
+aliases = [
+ ('DONTWAIT', 'NOBLOCK'),
+ ('XREQ', 'DEALER'),
+ ('XREP', 'ROUTER'),
+]
+for group in aliases:
+ undefined = set()
+ found = None
+ for name in group:
+ value = getattr(constants, name, -1)
+ if value != -1:
+ found = value
+ else:
+ undefined.add(name)
+ if found is not None:
+ for name in undefined:
+ globals()[name] = found
+ __all__.append(name)
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/sugar/context.py b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/sugar/context.py
new file mode 100644
index 00000000..86a9c5dc
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/sugar/context.py
@@ -0,0 +1,192 @@
+# coding: utf-8
+"""Python bindings for 0MQ."""
+
+# Copyright (C) PyZMQ Developers
+# Distributed under the terms of the Modified BSD License.
+
+import atexit
+import weakref
+
+from zmq.backend import Context as ContextBase
+from . import constants
+from .attrsettr import AttributeSetter
+from .constants import ENOTSUP, ctx_opt_names
+from .socket import Socket
+from zmq.error import ZMQError
+
+from zmq.utils.interop import cast_int_addr
+
+
+class Context(ContextBase, AttributeSetter):
+ """Create a zmq Context
+
+ A zmq Context creates sockets via its ``ctx.socket`` method.
+ """
+ sockopts = None
+ _instance = None
+ _shadow = False
+ _exiting = False
+
+ def __init__(self, io_threads=1, **kwargs):
+ super(Context, self).__init__(io_threads=io_threads, **kwargs)
+ if kwargs.get('shadow', False):
+ self._shadow = True
+ else:
+ self._shadow = False
+ self.sockopts = {}
+
+ self._exiting = False
+ if not self._shadow:
+ ctx_ref = weakref.ref(self)
+ def _notify_atexit():
+ ctx = ctx_ref()
+ if ctx is not None:
+ ctx._exiting = True
+ atexit.register(_notify_atexit)
+
+ def __del__(self):
+ """deleting a Context should terminate it, without trying non-threadsafe destroy"""
+ if not self._shadow and not self._exiting:
+ self.term()
+
+ def __enter__(self):
+ return self
+
+ def __exit__(self, *args, **kwargs):
+ self.term()
+
+ @classmethod
+ def shadow(cls, address):
+ """Shadow an existing libzmq context
+
+ address is the integer address of the libzmq context
+ or an FFI pointer to it.
+
+ .. versionadded:: 14.1
+ """
+ address = cast_int_addr(address)
+ return cls(shadow=address)
+
+ @classmethod
+ def shadow_pyczmq(cls, ctx):
+ """Shadow an existing pyczmq context
+
+ ctx is the FFI `zctx_t *` pointer
+
+ .. versionadded:: 14.1
+ """
+ from pyczmq import zctx
+
+ underlying = zctx.underlying(ctx)
+ address = cast_int_addr(underlying)
+ return cls(shadow=address)
+
+ # static method copied from tornado IOLoop.instance
+ @classmethod
+ def instance(cls, io_threads=1):
+ """Returns a global Context instance.
+
+ Most single-threaded applications have a single, global Context.
+ Use this method instead of passing around Context instances
+ throughout your code.
+
+ A common pattern for classes that depend on Contexts is to use
+ a default argument to enable programs with multiple Contexts
+ but not require the argument for simpler applications:
+
+ class MyClass(object):
+ def __init__(self, context=None):
+ self.context = context or Context.instance()
+ """
+ if cls._instance is None or cls._instance.closed:
+ cls._instance = cls(io_threads=io_threads)
+ return cls._instance
+
+ #-------------------------------------------------------------------------
+ # Hooks for ctxopt completion
+ #-------------------------------------------------------------------------
+
+ def __dir__(self):
+ keys = dir(self.__class__)
+
+ for collection in (
+ ctx_opt_names,
+ ):
+ keys.extend(collection)
+ return keys
+
+ #-------------------------------------------------------------------------
+ # Creating Sockets
+ #-------------------------------------------------------------------------
+
+ @property
+ def _socket_class(self):
+ return Socket
+
+ def socket(self, socket_type):
+ """Create a Socket associated with this Context.
+
+ Parameters
+ ----------
+ socket_type : int
+ The socket type, which can be any of the 0MQ socket types:
+ REQ, REP, PUB, SUB, PAIR, DEALER, ROUTER, PULL, PUSH, etc.
+ """
+ if self.closed:
+ raise ZMQError(ENOTSUP)
+ s = self._socket_class(self, socket_type)
+ for opt, value in self.sockopts.items():
+ try:
+ s.setsockopt(opt, value)
+ except ZMQError:
+ # ignore ZMQErrors, which are likely for socket options
+ # that do not apply to a particular socket type, e.g.
+ # SUBSCRIBE for non-SUB sockets.
+ pass
+ return s
+
+ def setsockopt(self, opt, value):
+ """set default socket options for new sockets created by this Context
+
+ .. versionadded:: 13.0
+ """
+ self.sockopts[opt] = value
+
+ def getsockopt(self, opt):
+ """get default socket options for new sockets created by this Context
+
+ .. versionadded:: 13.0
+ """
+ return self.sockopts[opt]
+
+ def _set_attr_opt(self, name, opt, value):
+ """set default sockopts as attributes"""
+ if name in constants.ctx_opt_names:
+ return self.set(opt, value)
+ else:
+ self.sockopts[opt] = value
+
+ def _get_attr_opt(self, name, opt):
+ """get default sockopts as attributes"""
+ if name in constants.ctx_opt_names:
+ return self.get(opt)
+ else:
+ if opt not in self.sockopts:
+ raise AttributeError(name)
+ else:
+ return self.sockopts[opt]
+
+ def __delattr__(self, key):
+ """delete default sockopts as attributes"""
+ key = key.upper()
+ try:
+ opt = getattr(constants, key)
+ except AttributeError:
+ raise AttributeError("no such socket option: %s" % key)
+ else:
+ if opt not in self.sockopts:
+ raise AttributeError(key)
+ else:
+ del self.sockopts[opt]
+
+__all__ = ['Context']
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/sugar/frame.py b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/sugar/frame.py
new file mode 100644
index 00000000..9f556c86
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/sugar/frame.py
@@ -0,0 +1,19 @@
+# coding: utf-8
+"""0MQ Frame pure Python methods."""
+
+# Copyright (C) PyZMQ Developers
+# Distributed under the terms of the Modified BSD License.
+
+
+from .attrsettr import AttributeSetter
+from zmq.backend import Frame as FrameBase
+
+
+class Frame(FrameBase, AttributeSetter):
+ def __getitem__(self, key):
+ # map Frame['User-Id'] to Frame.get('User-Id')
+ return self.get(key)
+
+# keep deprecated alias
+Message = Frame
+__all__ = ['Frame', 'Message'] \ No newline at end of file
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/sugar/poll.py b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/sugar/poll.py
new file mode 100644
index 00000000..c7b1d1bb
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/sugar/poll.py
@@ -0,0 +1,161 @@
+"""0MQ polling related functions and classes."""
+
+# Copyright (C) PyZMQ Developers
+# Distributed under the terms of the Modified BSD License.
+
+
+import zmq
+from zmq.backend import zmq_poll
+from .constants import POLLIN, POLLOUT, POLLERR
+
+#-----------------------------------------------------------------------------
+# Polling related methods
+#-----------------------------------------------------------------------------
+
+
+class Poller(object):
+ """A stateful poll interface that mirrors Python's built-in poll."""
+ sockets = None
+ _map = {}
+
+ def __init__(self):
+ self.sockets = []
+ self._map = {}
+
+ def __contains__(self, socket):
+ return socket in self._map
+
+ def register(self, socket, flags=POLLIN|POLLOUT):
+ """p.register(socket, flags=POLLIN|POLLOUT)
+
+ Register a 0MQ socket or native fd for I/O monitoring.
+
+ register(s,0) is equivalent to unregister(s).
+
+ Parameters
+ ----------
+ socket : zmq.Socket or native socket
+ A zmq.Socket or any Python object having a ``fileno()``
+ method that returns a valid file descriptor.
+ flags : int
+ The events to watch for. Can be POLLIN, POLLOUT or POLLIN|POLLOUT.
+ If `flags=0`, socket will be unregistered.
+ """
+ if flags:
+ if socket in self._map:
+ idx = self._map[socket]
+ self.sockets[idx] = (socket, flags)
+ else:
+ idx = len(self.sockets)
+ self.sockets.append((socket, flags))
+ self._map[socket] = idx
+ elif socket in self._map:
+ # uregister sockets registered with no events
+ self.unregister(socket)
+ else:
+ # ignore new sockets with no events
+ pass
+
+ def modify(self, socket, flags=POLLIN|POLLOUT):
+ """Modify the flags for an already registered 0MQ socket or native fd."""
+ self.register(socket, flags)
+
+ def unregister(self, socket):
+ """Remove a 0MQ socket or native fd for I/O monitoring.
+
+ Parameters
+ ----------
+ socket : Socket
+ The socket instance to stop polling.
+ """
+ idx = self._map.pop(socket)
+ self.sockets.pop(idx)
+ # shift indices after deletion
+ for socket, flags in self.sockets[idx:]:
+ self._map[socket] -= 1
+
+ def poll(self, timeout=None):
+ """Poll the registered 0MQ or native fds for I/O.
+
+ Parameters
+ ----------
+ timeout : float, int
+ The timeout in milliseconds. If None, no `timeout` (infinite). This
+ is in milliseconds to be compatible with ``select.poll()``. The
+ underlying zmq_poll uses microseconds and we convert to that in
+ this function.
+
+ Returns
+ -------
+ events : list of tuples
+ The list of events that are ready to be processed.
+ This is a list of tuples of the form ``(socket, event)``, where the 0MQ Socket
+ or integer fd is the first element, and the poll event mask (POLLIN, POLLOUT) is the second.
+ It is common to call ``events = dict(poller.poll())``,
+ which turns the list of tuples into a mapping of ``socket : event``.
+ """
+ if timeout is None or timeout < 0:
+ timeout = -1
+ elif isinstance(timeout, float):
+ timeout = int(timeout)
+ return zmq_poll(self.sockets, timeout=timeout)
+
+
+def select(rlist, wlist, xlist, timeout=None):
+ """select(rlist, wlist, xlist, timeout=None) -> (rlist, wlist, xlist)
+
+ Return the result of poll as a lists of sockets ready for r/w/exception.
+
+ This has the same interface as Python's built-in ``select.select()`` function.
+
+ Parameters
+ ----------
+ timeout : float, int, optional
+ The timeout in seconds. If None, no timeout (infinite). This is in seconds to be
+ compatible with ``select.select()``. The underlying zmq_poll uses microseconds
+ and we convert to that in this function.
+ rlist : list of sockets/FDs
+ sockets/FDs to be polled for read events
+ wlist : list of sockets/FDs
+ sockets/FDs to be polled for write events
+ xlist : list of sockets/FDs
+ sockets/FDs to be polled for error events
+
+ Returns
+ -------
+ (rlist, wlist, xlist) : tuple of lists of sockets (length 3)
+ Lists correspond to sockets available for read/write/error events respectively.
+ """
+ if timeout is None:
+ timeout = -1
+ # Convert from sec -> us for zmq_poll.
+ # zmq_poll accepts 3.x style timeout in ms
+ timeout = int(timeout*1000.0)
+ if timeout < 0:
+ timeout = -1
+ sockets = []
+ for s in set(rlist + wlist + xlist):
+ flags = 0
+ if s in rlist:
+ flags |= POLLIN
+ if s in wlist:
+ flags |= POLLOUT
+ if s in xlist:
+ flags |= POLLERR
+ sockets.append((s, flags))
+ return_sockets = zmq_poll(sockets, timeout)
+ rlist, wlist, xlist = [], [], []
+ for s, flags in return_sockets:
+ if flags & POLLIN:
+ rlist.append(s)
+ if flags & POLLOUT:
+ wlist.append(s)
+ if flags & POLLERR:
+ xlist.append(s)
+ return rlist, wlist, xlist
+
+#-----------------------------------------------------------------------------
+# Symbols to export
+#-----------------------------------------------------------------------------
+
+__all__ = [ 'Poller', 'select' ]
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/sugar/socket.py b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/sugar/socket.py
new file mode 100644
index 00000000..c91589d7
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/sugar/socket.py
@@ -0,0 +1,495 @@
+# coding: utf-8
+"""0MQ Socket pure Python methods."""
+
+# Copyright (C) PyZMQ Developers
+# Distributed under the terms of the Modified BSD License.
+
+
+import codecs
+import random
+import warnings
+
+import zmq
+from zmq.backend import Socket as SocketBase
+from .poll import Poller
+from . import constants
+from .attrsettr import AttributeSetter
+from zmq.error import ZMQError, ZMQBindError
+from zmq.utils import jsonapi
+from zmq.utils.strtypes import bytes,unicode,basestring
+from zmq.utils.interop import cast_int_addr
+
+from .constants import (
+ SNDMORE, ENOTSUP, POLLIN,
+ int64_sockopt_names,
+ int_sockopt_names,
+ bytes_sockopt_names,
+ fd_sockopt_names,
+)
+try:
+ import cPickle
+ pickle = cPickle
+except:
+ cPickle = None
+ import pickle
+
+try:
+ DEFAULT_PROTOCOL = pickle.DEFAULT_PROTOCOL
+except AttributeError:
+ DEFAULT_PROTOCOL = pickle.HIGHEST_PROTOCOL
+
+
+class Socket(SocketBase, AttributeSetter):
+ """The ZMQ socket object
+
+ To create a Socket, first create a Context::
+
+ ctx = zmq.Context.instance()
+
+ then call ``ctx.socket(socket_type)``::
+
+ s = ctx.socket(zmq.ROUTER)
+
+ """
+ _shadow = False
+
+ def __del__(self):
+ if not self._shadow:
+ self.close()
+
+ # socket as context manager:
+ def __enter__(self):
+ """Sockets are context managers
+
+ .. versionadded:: 14.4
+ """
+ return self
+
+ def __exit__(self, *args, **kwargs):
+ self.close()
+
+ #-------------------------------------------------------------------------
+ # Socket creation
+ #-------------------------------------------------------------------------
+
+ @classmethod
+ def shadow(cls, address):
+ """Shadow an existing libzmq socket
+
+ address is the integer address of the libzmq socket
+ or an FFI pointer to it.
+
+ .. versionadded:: 14.1
+ """
+ address = cast_int_addr(address)
+ return cls(shadow=address)
+
+ #-------------------------------------------------------------------------
+ # Deprecated aliases
+ #-------------------------------------------------------------------------
+
+ @property
+ def socket_type(self):
+ warnings.warn("Socket.socket_type is deprecated, use Socket.type",
+ DeprecationWarning
+ )
+ return self.type
+
+ #-------------------------------------------------------------------------
+ # Hooks for sockopt completion
+ #-------------------------------------------------------------------------
+
+ def __dir__(self):
+ keys = dir(self.__class__)
+ for collection in (
+ bytes_sockopt_names,
+ int_sockopt_names,
+ int64_sockopt_names,
+ fd_sockopt_names,
+ ):
+ keys.extend(collection)
+ return keys
+
+ #-------------------------------------------------------------------------
+ # Getting/Setting options
+ #-------------------------------------------------------------------------
+ setsockopt = SocketBase.set
+ getsockopt = SocketBase.get
+
+ def set_string(self, option, optval, encoding='utf-8'):
+ """set socket options with a unicode object
+
+ This is simply a wrapper for setsockopt to protect from encoding ambiguity.
+
+ See the 0MQ documentation for details on specific options.
+
+ Parameters
+ ----------
+ option : int
+ The name of the option to set. Can be any of: SUBSCRIBE,
+ UNSUBSCRIBE, IDENTITY
+ optval : unicode string (unicode on py2, str on py3)
+ The value of the option to set.
+ encoding : str
+ The encoding to be used, default is utf8
+ """
+ if not isinstance(optval, unicode):
+ raise TypeError("unicode strings only")
+ return self.set(option, optval.encode(encoding))
+
+ setsockopt_unicode = setsockopt_string = set_string
+
+ def get_string(self, option, encoding='utf-8'):
+ """get the value of a socket option
+
+ See the 0MQ documentation for details on specific options.
+
+ Parameters
+ ----------
+ option : int
+ The option to retrieve.
+
+ Returns
+ -------
+ optval : unicode string (unicode on py2, str on py3)
+ The value of the option as a unicode string.
+ """
+
+ if option not in constants.bytes_sockopts:
+ raise TypeError("option %i will not return a string to be decoded"%option)
+ return self.getsockopt(option).decode(encoding)
+
+ getsockopt_unicode = getsockopt_string = get_string
+
+ def bind_to_random_port(self, addr, min_port=49152, max_port=65536, max_tries=100):
+ """bind this socket to a random port in a range
+
+ Parameters
+ ----------
+ addr : str
+ The address string without the port to pass to ``Socket.bind()``.
+ min_port : int, optional
+ The minimum port in the range of ports to try (inclusive).
+ max_port : int, optional
+ The maximum port in the range of ports to try (exclusive).
+ max_tries : int, optional
+ The maximum number of bind attempts to make.
+
+ Returns
+ -------
+ port : int
+ The port the socket was bound to.
+
+ Raises
+ ------
+ ZMQBindError
+ if `max_tries` reached before successful bind
+ """
+ for i in range(max_tries):
+ try:
+ port = random.randrange(min_port, max_port)
+ self.bind('%s:%s' % (addr, port))
+ except ZMQError as exception:
+ if not exception.errno == zmq.EADDRINUSE:
+ raise
+ else:
+ return port
+ raise ZMQBindError("Could not bind socket to random port.")
+
+ def get_hwm(self):
+ """get the High Water Mark
+
+ On libzmq ≥ 3, this gets SNDHWM if available, otherwise RCVHWM
+ """
+ major = zmq.zmq_version_info()[0]
+ if major >= 3:
+ # return sndhwm, fallback on rcvhwm
+ try:
+ return self.getsockopt(zmq.SNDHWM)
+ except zmq.ZMQError as e:
+ pass
+
+ return self.getsockopt(zmq.RCVHWM)
+ else:
+ return self.getsockopt(zmq.HWM)
+
+ def set_hwm(self, value):
+ """set the High Water Mark
+
+ On libzmq ≥ 3, this sets both SNDHWM and RCVHWM
+ """
+ major = zmq.zmq_version_info()[0]
+ if major >= 3:
+ raised = None
+ try:
+ self.sndhwm = value
+ except Exception as e:
+ raised = e
+ try:
+ self.rcvhwm = value
+ except Exception:
+ raised = e
+
+ if raised:
+ raise raised
+ else:
+ return self.setsockopt(zmq.HWM, value)
+
+ hwm = property(get_hwm, set_hwm,
+ """property for High Water Mark
+
+ Setting hwm sets both SNDHWM and RCVHWM as appropriate.
+ It gets SNDHWM if available, otherwise RCVHWM.
+ """
+ )
+
+ #-------------------------------------------------------------------------
+ # Sending and receiving messages
+ #-------------------------------------------------------------------------
+
+ def send_multipart(self, msg_parts, flags=0, copy=True, track=False):
+ """send a sequence of buffers as a multipart message
+
+ The zmq.SNDMORE flag is added to all msg parts before the last.
+
+ Parameters
+ ----------
+ msg_parts : iterable
+ A sequence of objects to send as a multipart message. Each element
+ can be any sendable object (Frame, bytes, buffer-providers)
+ flags : int, optional
+ SNDMORE is handled automatically for frames before the last.
+ copy : bool, optional
+ Should the frame(s) be sent in a copying or non-copying manner.
+ track : bool, optional
+ Should the frame(s) be tracked for notification that ZMQ has
+ finished with it (ignored if copy=True).
+
+ Returns
+ -------
+ None : if copy or not track
+ MessageTracker : if track and not copy
+ a MessageTracker object, whose `pending` property will
+ be True until the last send is completed.
+ """
+ for msg in msg_parts[:-1]:
+ self.send(msg, SNDMORE|flags, copy=copy, track=track)
+ # Send the last part without the extra SNDMORE flag.
+ return self.send(msg_parts[-1], flags, copy=copy, track=track)
+
+ def recv_multipart(self, flags=0, copy=True, track=False):
+ """receive a multipart message as a list of bytes or Frame objects
+
+ Parameters
+ ----------
+ flags : int, optional
+ Any supported flag: NOBLOCK. If NOBLOCK is set, this method
+ will raise a ZMQError with EAGAIN if a message is not ready.
+ If NOBLOCK is not set, then this method will block until a
+ message arrives.
+ copy : bool, optional
+ Should the message frame(s) be received in a copying or non-copying manner?
+ If False a Frame object is returned for each part, if True a copy of
+ the bytes is made for each frame.
+ track : bool, optional
+ Should the message frame(s) be tracked for notification that ZMQ has
+ finished with it? (ignored if copy=True)
+
+ Returns
+ -------
+ msg_parts : list
+ A list of frames in the multipart message; either Frames or bytes,
+ depending on `copy`.
+
+ """
+ parts = [self.recv(flags, copy=copy, track=track)]
+ # have first part already, only loop while more to receive
+ while self.getsockopt(zmq.RCVMORE):
+ part = self.recv(flags, copy=copy, track=track)
+ parts.append(part)
+
+ return parts
+
+ def send_string(self, u, flags=0, copy=True, encoding='utf-8'):
+ """send a Python unicode string as a message with an encoding
+
+ 0MQ communicates with raw bytes, so you must encode/decode
+ text (unicode on py2, str on py3) around 0MQ.
+
+ Parameters
+ ----------
+ u : Python unicode string (unicode on py2, str on py3)
+ The unicode string to send.
+ flags : int, optional
+ Any valid send flag.
+ encoding : str [default: 'utf-8']
+ The encoding to be used
+ """
+ if not isinstance(u, basestring):
+ raise TypeError("unicode/str objects only")
+ return self.send(u.encode(encoding), flags=flags, copy=copy)
+
+ send_unicode = send_string
+
+ def recv_string(self, flags=0, encoding='utf-8'):
+ """receive a unicode string, as sent by send_string
+
+ Parameters
+ ----------
+ flags : int
+ Any valid recv flag.
+ encoding : str [default: 'utf-8']
+ The encoding to be used
+
+ Returns
+ -------
+ s : unicode string (unicode on py2, str on py3)
+ The Python unicode string that arrives as encoded bytes.
+ """
+ b = self.recv(flags=flags)
+ return b.decode(encoding)
+
+ recv_unicode = recv_string
+
+ def send_pyobj(self, obj, flags=0, protocol=DEFAULT_PROTOCOL):
+ """send a Python object as a message using pickle to serialize
+
+ Parameters
+ ----------
+ obj : Python object
+ The Python object to send.
+ flags : int
+ Any valid send flag.
+ protocol : int
+ The pickle protocol number to use. The default is pickle.DEFAULT_PROTOCOl
+ where defined, and pickle.HIGHEST_PROTOCOL elsewhere.
+ """
+ msg = pickle.dumps(obj, protocol)
+ return self.send(msg, flags)
+
+ def recv_pyobj(self, flags=0):
+ """receive a Python object as a message using pickle to serialize
+
+ Parameters
+ ----------
+ flags : int
+ Any valid recv flag.
+
+ Returns
+ -------
+ obj : Python object
+ The Python object that arrives as a message.
+ """
+ s = self.recv(flags)
+ return pickle.loads(s)
+
+ def send_json(self, obj, flags=0, **kwargs):
+ """send a Python object as a message using json to serialize
+
+ Keyword arguments are passed on to json.dumps
+
+ Parameters
+ ----------
+ obj : Python object
+ The Python object to send
+ flags : int
+ Any valid send flag
+ """
+ msg = jsonapi.dumps(obj, **kwargs)
+ return self.send(msg, flags)
+
+ def recv_json(self, flags=0, **kwargs):
+ """receive a Python object as a message using json to serialize
+
+ Keyword arguments are passed on to json.loads
+
+ Parameters
+ ----------
+ flags : int
+ Any valid recv flag.
+
+ Returns
+ -------
+ obj : Python object
+ The Python object that arrives as a message.
+ """
+ msg = self.recv(flags)
+ return jsonapi.loads(msg, **kwargs)
+
+ _poller_class = Poller
+
+ def poll(self, timeout=None, flags=POLLIN):
+ """poll the socket for events
+
+ The default is to poll forever for incoming
+ events. Timeout is in milliseconds, if specified.
+
+ Parameters
+ ----------
+ timeout : int [default: None]
+ The timeout (in milliseconds) to wait for an event. If unspecified
+ (or specified None), will wait forever for an event.
+ flags : bitfield (int) [default: POLLIN]
+ The event flags to poll for (any combination of POLLIN|POLLOUT).
+ The default is to check for incoming events (POLLIN).
+
+ Returns
+ -------
+ events : bitfield (int)
+ The events that are ready and waiting. Will be 0 if no events were ready
+ by the time timeout was reached.
+ """
+
+ if self.closed:
+ raise ZMQError(ENOTSUP)
+
+ p = self._poller_class()
+ p.register(self, flags)
+ evts = dict(p.poll(timeout))
+ # return 0 if no events, otherwise return event bitfield
+ return evts.get(self, 0)
+
+ def get_monitor_socket(self, events=None, addr=None):
+ """Return a connected PAIR socket ready to receive the event notifications.
+
+ .. versionadded:: libzmq-4.0
+ .. versionadded:: 14.0
+
+ Parameters
+ ----------
+ events : bitfield (int) [default: ZMQ_EVENTS_ALL]
+ The bitmask defining which events are wanted.
+ addr : string [default: None]
+ The optional endpoint for the monitoring sockets.
+
+ Returns
+ -------
+ socket : (PAIR)
+ The socket is already connected and ready to receive messages.
+ """
+ # safe-guard, method only available on libzmq >= 4
+ if zmq.zmq_version_info() < (4,):
+ raise NotImplementedError("get_monitor_socket requires libzmq >= 4, have %s" % zmq.zmq_version())
+ if addr is None:
+ # create endpoint name from internal fd
+ addr = "inproc://monitor.s-%d" % self.FD
+ if events is None:
+ # use all events
+ events = zmq.EVENT_ALL
+ # attach monitoring socket
+ self.monitor(addr, events)
+ # create new PAIR socket and connect it
+ ret = self.context.socket(zmq.PAIR)
+ ret.connect(addr)
+ return ret
+
+ def disable_monitor(self):
+ """Shutdown the PAIR socket (created using get_monitor_socket)
+ that is serving socket events.
+
+ .. versionadded:: 14.4
+ """
+ self.monitor(None, 0)
+
+
+__all__ = ['Socket']
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/sugar/tracker.py b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/sugar/tracker.py
new file mode 100644
index 00000000..fb8c007f
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/sugar/tracker.py
@@ -0,0 +1,120 @@
+"""Tracker for zero-copy messages with 0MQ."""
+
+# Copyright (C) PyZMQ Developers
+# Distributed under the terms of the Modified BSD License.
+
+import time
+
+try:
+ # below 3.3
+ from threading import _Event as Event
+except (ImportError, AttributeError):
+ # python throws ImportError, cython throws AttributeError
+ from threading import Event
+
+from zmq.error import NotDone
+from zmq.backend import Frame
+
+class MessageTracker(object):
+ """MessageTracker(*towatch)
+
+ A class for tracking if 0MQ is done using one or more messages.
+
+ When you send a 0MQ message, it is not sent immediately. The 0MQ IO thread
+ sends the message at some later time. Often you want to know when 0MQ has
+ actually sent the message though. This is complicated by the fact that
+ a single 0MQ message can be sent multiple times using different sockets.
+ This class allows you to track all of the 0MQ usages of a message.
+
+ Parameters
+ ----------
+ *towatch : tuple of Event, MessageTracker, Message instances.
+ This list of objects to track. This class can track the low-level
+ Events used by the Message class, other MessageTrackers or
+ actual Messages.
+ """
+ events = None
+ peers = None
+
+ def __init__(self, *towatch):
+ """MessageTracker(*towatch)
+
+ Create a message tracker to track a set of mesages.
+
+ Parameters
+ ----------
+ *towatch : tuple of Event, MessageTracker, Message instances.
+ This list of objects to track. This class can track the low-level
+ Events used by the Message class, other MessageTrackers or
+ actual Messages.
+ """
+ self.events = set()
+ self.peers = set()
+ for obj in towatch:
+ if isinstance(obj, Event):
+ self.events.add(obj)
+ elif isinstance(obj, MessageTracker):
+ self.peers.add(obj)
+ elif isinstance(obj, Frame):
+ if not obj.tracker:
+ raise ValueError("Not a tracked message")
+ self.peers.add(obj.tracker)
+ else:
+ raise TypeError("Require Events or Message Frames, not %s"%type(obj))
+
+ @property
+ def done(self):
+ """Is 0MQ completely done with the message(s) being tracked?"""
+ for evt in self.events:
+ if not evt.is_set():
+ return False
+ for pm in self.peers:
+ if not pm.done:
+ return False
+ return True
+
+ def wait(self, timeout=-1):
+ """mt.wait(timeout=-1)
+
+ Wait for 0MQ to be done with the message or until `timeout`.
+
+ Parameters
+ ----------
+ timeout : float [default: -1, wait forever]
+ Maximum time in (s) to wait before raising NotDone.
+
+ Returns
+ -------
+ None
+ if done before `timeout`
+
+ Raises
+ ------
+ NotDone
+ if `timeout` reached before I am done.
+ """
+ tic = time.time()
+ if timeout is False or timeout < 0:
+ remaining = 3600*24*7 # a week
+ else:
+ remaining = timeout
+ done = False
+ for evt in self.events:
+ if remaining < 0:
+ raise NotDone
+ evt.wait(timeout=remaining)
+ if not evt.is_set():
+ raise NotDone
+ toc = time.time()
+ remaining -= (toc-tic)
+ tic = toc
+
+ for peer in self.peers:
+ if remaining < 0:
+ raise NotDone
+ peer.wait(timeout=remaining)
+ toc = time.time()
+ remaining -= (toc-tic)
+ tic = toc
+
+__all__ = ['MessageTracker'] \ No newline at end of file
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/sugar/version.py b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/sugar/version.py
new file mode 100644
index 00000000..ea8fbbc4
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/sugar/version.py
@@ -0,0 +1,48 @@
+"""PyZMQ and 0MQ version functions."""
+
+# Copyright (C) PyZMQ Developers
+# Distributed under the terms of the Modified BSD License.
+
+
+from zmq.backend import zmq_version_info
+
+
+VERSION_MAJOR = 14
+VERSION_MINOR = 5
+VERSION_PATCH = 0
+VERSION_EXTRA = ""
+__version__ = '%i.%i.%i' % (VERSION_MAJOR, VERSION_MINOR, VERSION_PATCH)
+
+if VERSION_EXTRA:
+ __version__ = "%s-%s" % (__version__, VERSION_EXTRA)
+ version_info = (VERSION_MAJOR, VERSION_MINOR, VERSION_PATCH, float('inf'))
+else:
+ version_info = (VERSION_MAJOR, VERSION_MINOR, VERSION_PATCH)
+
+__revision__ = ''
+
+def pyzmq_version():
+ """return the version of pyzmq as a string"""
+ if __revision__:
+ return '@'.join([__version__,__revision__[:6]])
+ else:
+ return __version__
+
+def pyzmq_version_info():
+ """return the pyzmq version as a tuple of at least three numbers
+
+ If pyzmq is a development version, `inf` will be appended after the third integer.
+ """
+ return version_info
+
+
+def zmq_version():
+ """return the version of libzmq as a string"""
+ return "%i.%i.%i" % zmq_version_info()
+
+
+__all__ = ['zmq_version', 'zmq_version_info',
+ 'pyzmq_version','pyzmq_version_info',
+ '__version__', '__revision__'
+]
+
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/tests/__init__.py b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/tests/__init__.py
new file mode 100644
index 00000000..325a3f19
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/tests/__init__.py
@@ -0,0 +1,211 @@
+# Copyright (c) PyZMQ Developers.
+# Distributed under the terms of the Modified BSD License.
+
+import functools
+import sys
+import time
+from threading import Thread
+
+from unittest import TestCase
+
+import zmq
+from zmq.utils import jsonapi
+
+try:
+ import gevent
+ from zmq import green as gzmq
+ have_gevent = True
+except ImportError:
+ have_gevent = False
+
+try:
+ from unittest import SkipTest
+except ImportError:
+ try:
+ from nose import SkipTest
+ except ImportError:
+ class SkipTest(Exception):
+ pass
+
+PYPY = 'PyPy' in sys.version
+
+#-----------------------------------------------------------------------------
+# skip decorators (directly from unittest)
+#-----------------------------------------------------------------------------
+
+_id = lambda x: x
+
+def skip(reason):
+ """
+ Unconditionally skip a test.
+ """
+ def decorator(test_item):
+ if not (isinstance(test_item, type) and issubclass(test_item, TestCase)):
+ @functools.wraps(test_item)
+ def skip_wrapper(*args, **kwargs):
+ raise SkipTest(reason)
+ test_item = skip_wrapper
+
+ test_item.__unittest_skip__ = True
+ test_item.__unittest_skip_why__ = reason
+ return test_item
+ return decorator
+
+def skip_if(condition, reason="Skipped"):
+ """
+ Skip a test if the condition is true.
+ """
+ if condition:
+ return skip(reason)
+ return _id
+
+skip_pypy = skip_if(PYPY, "Doesn't work on PyPy")
+
+#-----------------------------------------------------------------------------
+# Base test class
+#-----------------------------------------------------------------------------
+
+class BaseZMQTestCase(TestCase):
+ green = False
+
+ @property
+ def Context(self):
+ if self.green:
+ return gzmq.Context
+ else:
+ return zmq.Context
+
+ def socket(self, socket_type):
+ s = self.context.socket(socket_type)
+ self.sockets.append(s)
+ return s
+
+ def setUp(self):
+ if self.green and not have_gevent:
+ raise SkipTest("requires gevent")
+ self.context = self.Context.instance()
+ self.sockets = []
+
+ def tearDown(self):
+ contexts = set([self.context])
+ while self.sockets:
+ sock = self.sockets.pop()
+ contexts.add(sock.context) # in case additional contexts are created
+ sock.close(0)
+ for ctx in contexts:
+ t = Thread(target=ctx.term)
+ t.daemon = True
+ t.start()
+ t.join(timeout=2)
+ if t.is_alive():
+ # reset Context.instance, so the failure to term doesn't corrupt subsequent tests
+ zmq.sugar.context.Context._instance = None
+ raise RuntimeError("context could not terminate, open sockets likely remain in test")
+
+ def create_bound_pair(self, type1=zmq.PAIR, type2=zmq.PAIR, interface='tcp://127.0.0.1'):
+ """Create a bound socket pair using a random port."""
+ s1 = self.context.socket(type1)
+ s1.setsockopt(zmq.LINGER, 0)
+ port = s1.bind_to_random_port(interface)
+ s2 = self.context.socket(type2)
+ s2.setsockopt(zmq.LINGER, 0)
+ s2.connect('%s:%s' % (interface, port))
+ self.sockets.extend([s1,s2])
+ return s1, s2
+
+ def ping_pong(self, s1, s2, msg):
+ s1.send(msg)
+ msg2 = s2.recv()
+ s2.send(msg2)
+ msg3 = s1.recv()
+ return msg3
+
+ def ping_pong_json(self, s1, s2, o):
+ if jsonapi.jsonmod is None:
+ raise SkipTest("No json library")
+ s1.send_json(o)
+ o2 = s2.recv_json()
+ s2.send_json(o2)
+ o3 = s1.recv_json()
+ return o3
+
+ def ping_pong_pyobj(self, s1, s2, o):
+ s1.send_pyobj(o)
+ o2 = s2.recv_pyobj()
+ s2.send_pyobj(o2)
+ o3 = s1.recv_pyobj()
+ return o3
+
+ def assertRaisesErrno(self, errno, func, *args, **kwargs):
+ try:
+ func(*args, **kwargs)
+ except zmq.ZMQError as e:
+ self.assertEqual(e.errno, errno, "wrong error raised, expected '%s' \
+got '%s'" % (zmq.ZMQError(errno), zmq.ZMQError(e.errno)))
+ else:
+ self.fail("Function did not raise any error")
+
+ def _select_recv(self, multipart, socket, **kwargs):
+ """call recv[_multipart] in a way that raises if there is nothing to receive"""
+ if zmq.zmq_version_info() >= (3,1,0):
+ # zmq 3.1 has a bug, where poll can return false positives,
+ # so we wait a little bit just in case
+ # See LIBZMQ-280 on JIRA
+ time.sleep(0.1)
+
+ r,w,x = zmq.select([socket], [], [], timeout=5)
+ assert len(r) > 0, "Should have received a message"
+ kwargs['flags'] = zmq.DONTWAIT | kwargs.get('flags', 0)
+
+ recv = socket.recv_multipart if multipart else socket.recv
+ return recv(**kwargs)
+
+ def recv(self, socket, **kwargs):
+ """call recv in a way that raises if there is nothing to receive"""
+ return self._select_recv(False, socket, **kwargs)
+
+ def recv_multipart(self, socket, **kwargs):
+ """call recv_multipart in a way that raises if there is nothing to receive"""
+ return self._select_recv(True, socket, **kwargs)
+
+
+class PollZMQTestCase(BaseZMQTestCase):
+ pass
+
+class GreenTest:
+ """Mixin for making green versions of test classes"""
+ green = True
+
+ def assertRaisesErrno(self, errno, func, *args, **kwargs):
+ if errno == zmq.EAGAIN:
+ raise SkipTest("Skipping because we're green.")
+ try:
+ func(*args, **kwargs)
+ except zmq.ZMQError:
+ e = sys.exc_info()[1]
+ self.assertEqual(e.errno, errno, "wrong error raised, expected '%s' \
+got '%s'" % (zmq.ZMQError(errno), zmq.ZMQError(e.errno)))
+ else:
+ self.fail("Function did not raise any error")
+
+ def tearDown(self):
+ contexts = set([self.context])
+ while self.sockets:
+ sock = self.sockets.pop()
+ contexts.add(sock.context) # in case additional contexts are created
+ sock.close()
+ try:
+ gevent.joinall([gevent.spawn(ctx.term) for ctx in contexts], timeout=2, raise_error=True)
+ except gevent.Timeout:
+ raise RuntimeError("context could not terminate, open sockets likely remain in test")
+
+ def skip_green(self):
+ raise SkipTest("Skipping because we are green")
+
+def skip_green(f):
+ def skipping_test(self, *args, **kwargs):
+ if self.green:
+ raise SkipTest("Skipping because we are green")
+ else:
+ return f(self, *args, **kwargs)
+ return skipping_test
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/tests/test_auth.py b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/tests/test_auth.py
new file mode 100644
index 00000000..d350f61f
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/tests/test_auth.py
@@ -0,0 +1,431 @@
+# -*- coding: utf8 -*-
+
+# Copyright (C) PyZMQ Developers
+# Distributed under the terms of the Modified BSD License.
+
+import logging
+import os
+import shutil
+import sys
+import tempfile
+
+import zmq.auth
+from zmq.auth.ioloop import IOLoopAuthenticator
+from zmq.auth.thread import ThreadAuthenticator
+
+from zmq.eventloop import ioloop, zmqstream
+from zmq.tests import (BaseZMQTestCase, SkipTest)
+
+class BaseAuthTestCase(BaseZMQTestCase):
+ def setUp(self):
+ if zmq.zmq_version_info() < (4,0):
+ raise SkipTest("security is new in libzmq 4.0")
+ try:
+ zmq.curve_keypair()
+ except zmq.ZMQError:
+ raise SkipTest("security requires libzmq to be linked against libsodium")
+ super(BaseAuthTestCase, self).setUp()
+ # enable debug logging while we run tests
+ logging.getLogger('zmq.auth').setLevel(logging.DEBUG)
+ self.auth = self.make_auth()
+ self.auth.start()
+ self.base_dir, self.public_keys_dir, self.secret_keys_dir = self.create_certs()
+
+ def make_auth(self):
+ raise NotImplementedError()
+
+ def tearDown(self):
+ if self.auth:
+ self.auth.stop()
+ self.auth = None
+ self.remove_certs(self.base_dir)
+ super(BaseAuthTestCase, self).tearDown()
+
+ def create_certs(self):
+ """Create CURVE certificates for a test"""
+
+ # Create temporary CURVE keypairs for this test run. We create all keys in a
+ # temp directory and then move them into the appropriate private or public
+ # directory.
+
+ base_dir = tempfile.mkdtemp()
+ keys_dir = os.path.join(base_dir, 'certificates')
+ public_keys_dir = os.path.join(base_dir, 'public_keys')
+ secret_keys_dir = os.path.join(base_dir, 'private_keys')
+
+ os.mkdir(keys_dir)
+ os.mkdir(public_keys_dir)
+ os.mkdir(secret_keys_dir)
+
+ server_public_file, server_secret_file = zmq.auth.create_certificates(keys_dir, "server")
+ client_public_file, client_secret_file = zmq.auth.create_certificates(keys_dir, "client")
+
+ for key_file in os.listdir(keys_dir):
+ if key_file.endswith(".key"):
+ shutil.move(os.path.join(keys_dir, key_file),
+ os.path.join(public_keys_dir, '.'))
+
+ for key_file in os.listdir(keys_dir):
+ if key_file.endswith(".key_secret"):
+ shutil.move(os.path.join(keys_dir, key_file),
+ os.path.join(secret_keys_dir, '.'))
+
+ return (base_dir, public_keys_dir, secret_keys_dir)
+
+ def remove_certs(self, base_dir):
+ """Remove certificates for a test"""
+ shutil.rmtree(base_dir)
+
+ def load_certs(self, secret_keys_dir):
+ """Return server and client certificate keys"""
+ server_secret_file = os.path.join(secret_keys_dir, "server.key_secret")
+ client_secret_file = os.path.join(secret_keys_dir, "client.key_secret")
+
+ server_public, server_secret = zmq.auth.load_certificate(server_secret_file)
+ client_public, client_secret = zmq.auth.load_certificate(client_secret_file)
+
+ return server_public, server_secret, client_public, client_secret
+
+
+class TestThreadAuthentication(BaseAuthTestCase):
+ """Test authentication running in a thread"""
+
+ def make_auth(self):
+ return ThreadAuthenticator(self.context)
+
+ def can_connect(self, server, client):
+ """Check if client can connect to server using tcp transport"""
+ result = False
+ iface = 'tcp://127.0.0.1'
+ port = server.bind_to_random_port(iface)
+ client.connect("%s:%i" % (iface, port))
+ msg = [b"Hello World"]
+ server.send_multipart(msg)
+ if client.poll(1000):
+ rcvd_msg = client.recv_multipart()
+ self.assertEqual(rcvd_msg, msg)
+ result = True
+ return result
+
+ def test_null(self):
+ """threaded auth - NULL"""
+ # A default NULL connection should always succeed, and not
+ # go through our authentication infrastructure at all.
+ self.auth.stop()
+ self.auth = None
+
+ server = self.socket(zmq.PUSH)
+ client = self.socket(zmq.PULL)
+ self.assertTrue(self.can_connect(server, client))
+
+ # By setting a domain we switch on authentication for NULL sockets,
+ # though no policies are configured yet. The client connection
+ # should still be allowed.
+ server = self.socket(zmq.PUSH)
+ server.zap_domain = b'global'
+ client = self.socket(zmq.PULL)
+ self.assertTrue(self.can_connect(server, client))
+
+ def test_blacklist(self):
+ """threaded auth - Blacklist"""
+ # Blacklist 127.0.0.1, connection should fail
+ self.auth.deny('127.0.0.1')
+ server = self.socket(zmq.PUSH)
+ # By setting a domain we switch on authentication for NULL sockets,
+ # though no policies are configured yet.
+ server.zap_domain = b'global'
+ client = self.socket(zmq.PULL)
+ self.assertFalse(self.can_connect(server, client))
+
+ def test_whitelist(self):
+ """threaded auth - Whitelist"""
+ # Whitelist 127.0.0.1, connection should pass"
+ self.auth.allow('127.0.0.1')
+ server = self.socket(zmq.PUSH)
+ # By setting a domain we switch on authentication for NULL sockets,
+ # though no policies are configured yet.
+ server.zap_domain = b'global'
+ client = self.socket(zmq.PULL)
+ self.assertTrue(self.can_connect(server, client))
+
+ def test_plain(self):
+ """threaded auth - PLAIN"""
+
+ # Try PLAIN authentication - without configuring server, connection should fail
+ server = self.socket(zmq.PUSH)
+ server.plain_server = True
+ client = self.socket(zmq.PULL)
+ client.plain_username = b'admin'
+ client.plain_password = b'Password'
+ self.assertFalse(self.can_connect(server, client))
+
+ # Try PLAIN authentication - with server configured, connection should pass
+ server = self.socket(zmq.PUSH)
+ server.plain_server = True
+ client = self.socket(zmq.PULL)
+ client.plain_username = b'admin'
+ client.plain_password = b'Password'
+ self.auth.configure_plain(domain='*', passwords={'admin': 'Password'})
+ self.assertTrue(self.can_connect(server, client))
+
+ # Try PLAIN authentication - with bogus credentials, connection should fail
+ server = self.socket(zmq.PUSH)
+ server.plain_server = True
+ client = self.socket(zmq.PULL)
+ client.plain_username = b'admin'
+ client.plain_password = b'Bogus'
+ self.assertFalse(self.can_connect(server, client))
+
+ # Remove authenticator and check that a normal connection works
+ self.auth.stop()
+ self.auth = None
+
+ server = self.socket(zmq.PUSH)
+ client = self.socket(zmq.PULL)
+ self.assertTrue(self.can_connect(server, client))
+ client.close()
+ server.close()
+
+ def test_curve(self):
+ """threaded auth - CURVE"""
+ self.auth.allow('127.0.0.1')
+ certs = self.load_certs(self.secret_keys_dir)
+ server_public, server_secret, client_public, client_secret = certs
+
+ #Try CURVE authentication - without configuring server, connection should fail
+ server = self.socket(zmq.PUSH)
+ server.curve_publickey = server_public
+ server.curve_secretkey = server_secret
+ server.curve_server = True
+ client = self.socket(zmq.PULL)
+ client.curve_publickey = client_public
+ client.curve_secretkey = client_secret
+ client.curve_serverkey = server_public
+ self.assertFalse(self.can_connect(server, client))
+
+ #Try CURVE authentication - with server configured to CURVE_ALLOW_ANY, connection should pass
+ self.auth.configure_curve(domain='*', location=zmq.auth.CURVE_ALLOW_ANY)
+ server = self.socket(zmq.PUSH)
+ server.curve_publickey = server_public
+ server.curve_secretkey = server_secret
+ server.curve_server = True
+ client = self.socket(zmq.PULL)
+ client.curve_publickey = client_public
+ client.curve_secretkey = client_secret
+ client.curve_serverkey = server_public
+ self.assertTrue(self.can_connect(server, client))
+
+ # Try CURVE authentication - with server configured, connection should pass
+ self.auth.configure_curve(domain='*', location=self.public_keys_dir)
+ server = self.socket(zmq.PUSH)
+ server.curve_publickey = server_public
+ server.curve_secretkey = server_secret
+ server.curve_server = True
+ client = self.socket(zmq.PULL)
+ client.curve_publickey = client_public
+ client.curve_secretkey = client_secret
+ client.curve_serverkey = server_public
+ self.assertTrue(self.can_connect(server, client))
+
+ # Remove authenticator and check that a normal connection works
+ self.auth.stop()
+ self.auth = None
+
+ # Try connecting using NULL and no authentication enabled, connection should pass
+ server = self.socket(zmq.PUSH)
+ client = self.socket(zmq.PULL)
+ self.assertTrue(self.can_connect(server, client))
+
+
+def with_ioloop(method, expect_success=True):
+ """decorator for running tests with an IOLoop"""
+ def test_method(self):
+ r = method(self)
+
+ loop = self.io_loop
+ if expect_success:
+ self.pullstream.on_recv(self.on_message_succeed)
+ else:
+ self.pullstream.on_recv(self.on_message_fail)
+
+ t = loop.time()
+ loop.add_callback(self.attempt_connection)
+ loop.add_callback(self.send_msg)
+ if expect_success:
+ loop.add_timeout(t + 1, self.on_test_timeout_fail)
+ else:
+ loop.add_timeout(t + 1, self.on_test_timeout_succeed)
+
+ loop.start()
+ if self.fail_msg:
+ self.fail(self.fail_msg)
+
+ return r
+ return test_method
+
+def should_auth(method):
+ return with_ioloop(method, True)
+
+def should_not_auth(method):
+ return with_ioloop(method, False)
+
+class TestIOLoopAuthentication(BaseAuthTestCase):
+ """Test authentication running in ioloop"""
+
+ def setUp(self):
+ self.fail_msg = None
+ self.io_loop = ioloop.IOLoop()
+ super(TestIOLoopAuthentication, self).setUp()
+ self.server = self.socket(zmq.PUSH)
+ self.client = self.socket(zmq.PULL)
+ self.pushstream = zmqstream.ZMQStream(self.server, self.io_loop)
+ self.pullstream = zmqstream.ZMQStream(self.client, self.io_loop)
+
+ def make_auth(self):
+ return IOLoopAuthenticator(self.context, io_loop=self.io_loop)
+
+ def tearDown(self):
+ if self.auth:
+ self.auth.stop()
+ self.auth = None
+ self.io_loop.close(all_fds=True)
+ super(TestIOLoopAuthentication, self).tearDown()
+
+ def attempt_connection(self):
+ """Check if client can connect to server using tcp transport"""
+ iface = 'tcp://127.0.0.1'
+ port = self.server.bind_to_random_port(iface)
+ self.client.connect("%s:%i" % (iface, port))
+
+ def send_msg(self):
+ """Send a message from server to a client"""
+ msg = [b"Hello World"]
+ self.pushstream.send_multipart(msg)
+
+ def on_message_succeed(self, frames):
+ """A message was received, as expected."""
+ if frames != [b"Hello World"]:
+ self.fail_msg = "Unexpected message received"
+ self.io_loop.stop()
+
+ def on_message_fail(self, frames):
+ """A message was received, unexpectedly."""
+ self.fail_msg = 'Received messaged unexpectedly, security failed'
+ self.io_loop.stop()
+
+ def on_test_timeout_succeed(self):
+ """Test timer expired, indicates test success"""
+ self.io_loop.stop()
+
+ def on_test_timeout_fail(self):
+ """Test timer expired, indicates test failure"""
+ self.fail_msg = 'Test timed out'
+ self.io_loop.stop()
+
+ @should_auth
+ def test_none(self):
+ """ioloop auth - NONE"""
+ # A default NULL connection should always succeed, and not
+ # go through our authentication infrastructure at all.
+ # no auth should be running
+ self.auth.stop()
+ self.auth = None
+
+ @should_auth
+ def test_null(self):
+ """ioloop auth - NULL"""
+ # By setting a domain we switch on authentication for NULL sockets,
+ # though no policies are configured yet. The client connection
+ # should still be allowed.
+ self.server.zap_domain = b'global'
+
+ @should_not_auth
+ def test_blacklist(self):
+ """ioloop auth - Blacklist"""
+ # Blacklist 127.0.0.1, connection should fail
+ self.auth.deny('127.0.0.1')
+ self.server.zap_domain = b'global'
+
+ @should_auth
+ def test_whitelist(self):
+ """ioloop auth - Whitelist"""
+ # Whitelist 127.0.0.1, which overrides the blacklist, connection should pass"
+ self.auth.allow('127.0.0.1')
+
+ self.server.setsockopt(zmq.ZAP_DOMAIN, b'global')
+
+ @should_not_auth
+ def test_plain_unconfigured_server(self):
+ """ioloop auth - PLAIN, unconfigured server"""
+ self.client.plain_username = b'admin'
+ self.client.plain_password = b'Password'
+ # Try PLAIN authentication - without configuring server, connection should fail
+ self.server.plain_server = True
+
+ @should_auth
+ def test_plain_configured_server(self):
+ """ioloop auth - PLAIN, configured server"""
+ self.client.plain_username = b'admin'
+ self.client.plain_password = b'Password'
+ # Try PLAIN authentication - with server configured, connection should pass
+ self.server.plain_server = True
+ self.auth.configure_plain(domain='*', passwords={'admin': 'Password'})
+
+ @should_not_auth
+ def test_plain_bogus_credentials(self):
+ """ioloop auth - PLAIN, bogus credentials"""
+ self.client.plain_username = b'admin'
+ self.client.plain_password = b'Bogus'
+ self.server.plain_server = True
+
+ self.auth.configure_plain(domain='*', passwords={'admin': 'Password'})
+
+ @should_not_auth
+ def test_curve_unconfigured_server(self):
+ """ioloop auth - CURVE, unconfigured server"""
+ certs = self.load_certs(self.secret_keys_dir)
+ server_public, server_secret, client_public, client_secret = certs
+
+ self.auth.allow('127.0.0.1')
+
+ self.server.curve_publickey = server_public
+ self.server.curve_secretkey = server_secret
+ self.server.curve_server = True
+
+ self.client.curve_publickey = client_public
+ self.client.curve_secretkey = client_secret
+ self.client.curve_serverkey = server_public
+
+ @should_auth
+ def test_curve_allow_any(self):
+ """ioloop auth - CURVE, CURVE_ALLOW_ANY"""
+ certs = self.load_certs(self.secret_keys_dir)
+ server_public, server_secret, client_public, client_secret = certs
+
+ self.auth.allow('127.0.0.1')
+ self.auth.configure_curve(domain='*', location=zmq.auth.CURVE_ALLOW_ANY)
+
+ self.server.curve_publickey = server_public
+ self.server.curve_secretkey = server_secret
+ self.server.curve_server = True
+
+ self.client.curve_publickey = client_public
+ self.client.curve_secretkey = client_secret
+ self.client.curve_serverkey = server_public
+
+ @should_auth
+ def test_curve_configured_server(self):
+ """ioloop auth - CURVE, configured server"""
+ self.auth.allow('127.0.0.1')
+ certs = self.load_certs(self.secret_keys_dir)
+ server_public, server_secret, client_public, client_secret = certs
+
+ self.auth.configure_curve(domain='*', location=self.public_keys_dir)
+
+ self.server.curve_publickey = server_public
+ self.server.curve_secretkey = server_secret
+ self.server.curve_server = True
+
+ self.client.curve_publickey = client_public
+ self.client.curve_secretkey = client_secret
+ self.client.curve_serverkey = server_public
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/tests/test_cffi_backend.py b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/tests/test_cffi_backend.py
new file mode 100644
index 00000000..1f85eebf
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/tests/test_cffi_backend.py
@@ -0,0 +1,310 @@
+# -*- coding: utf8 -*-
+
+import sys
+import time
+
+from unittest import TestCase
+
+from zmq.tests import BaseZMQTestCase, SkipTest
+
+try:
+ from zmq.backend.cffi import (
+ zmq_version_info,
+ PUSH, PULL, IDENTITY,
+ REQ, REP, POLLIN, POLLOUT,
+ )
+ from zmq.backend.cffi._cffi import ffi, C
+ have_ffi_backend = True
+except ImportError:
+ have_ffi_backend = False
+
+
+class TestCFFIBackend(TestCase):
+
+ def setUp(self):
+ if not have_ffi_backend or not 'PyPy' in sys.version:
+ raise SkipTest('PyPy Tests Only')
+
+ def test_zmq_version_info(self):
+ version = zmq_version_info()
+
+ assert version[0] in range(2,11)
+
+ def test_zmq_ctx_new_destroy(self):
+ ctx = C.zmq_ctx_new()
+
+ assert ctx != ffi.NULL
+ assert 0 == C.zmq_ctx_destroy(ctx)
+
+ def test_zmq_socket_open_close(self):
+ ctx = C.zmq_ctx_new()
+ socket = C.zmq_socket(ctx, PUSH)
+
+ assert ctx != ffi.NULL
+ assert ffi.NULL != socket
+ assert 0 == C.zmq_close(socket)
+ assert 0 == C.zmq_ctx_destroy(ctx)
+
+ def test_zmq_setsockopt(self):
+ ctx = C.zmq_ctx_new()
+ socket = C.zmq_socket(ctx, PUSH)
+
+ identity = ffi.new('char[3]', 'zmq')
+ ret = C.zmq_setsockopt(socket, IDENTITY, ffi.cast('void*', identity), 3)
+
+ assert ret == 0
+ assert ctx != ffi.NULL
+ assert ffi.NULL != socket
+ assert 0 == C.zmq_close(socket)
+ assert 0 == C.zmq_ctx_destroy(ctx)
+
+ def test_zmq_getsockopt(self):
+ ctx = C.zmq_ctx_new()
+ socket = C.zmq_socket(ctx, PUSH)
+
+ identity = ffi.new('char[]', 'zmq')
+ ret = C.zmq_setsockopt(socket, IDENTITY, ffi.cast('void*', identity), 3)
+ assert ret == 0
+
+ option_len = ffi.new('size_t*', 3)
+ option = ffi.new('char*')
+ ret = C.zmq_getsockopt(socket,
+ IDENTITY,
+ ffi.cast('void*', option),
+ option_len)
+
+ assert ret == 0
+ assert ffi.string(ffi.cast('char*', option))[0] == "z"
+ assert ffi.string(ffi.cast('char*', option))[1] == "m"
+ assert ffi.string(ffi.cast('char*', option))[2] == "q"
+ assert ctx != ffi.NULL
+ assert ffi.NULL != socket
+ assert 0 == C.zmq_close(socket)
+ assert 0 == C.zmq_ctx_destroy(ctx)
+
+ def test_zmq_bind(self):
+ ctx = C.zmq_ctx_new()
+ socket = C.zmq_socket(ctx, 8)
+
+ assert 0 == C.zmq_bind(socket, 'tcp://*:4444')
+ assert ctx != ffi.NULL
+ assert ffi.NULL != socket
+ assert 0 == C.zmq_close(socket)
+ assert 0 == C.zmq_ctx_destroy(ctx)
+
+ def test_zmq_bind_connect(self):
+ ctx = C.zmq_ctx_new()
+
+ socket1 = C.zmq_socket(ctx, PUSH)
+ socket2 = C.zmq_socket(ctx, PULL)
+
+ assert 0 == C.zmq_bind(socket1, 'tcp://*:4444')
+ assert 0 == C.zmq_connect(socket2, 'tcp://127.0.0.1:4444')
+ assert ctx != ffi.NULL
+ assert ffi.NULL != socket1
+ assert ffi.NULL != socket2
+ assert 0 == C.zmq_close(socket1)
+ assert 0 == C.zmq_close(socket2)
+ assert 0 == C.zmq_ctx_destroy(ctx)
+
+ def test_zmq_msg_init_close(self):
+ zmq_msg = ffi.new('zmq_msg_t*')
+
+ assert ffi.NULL != zmq_msg
+ assert 0 == C.zmq_msg_init(zmq_msg)
+ assert 0 == C.zmq_msg_close(zmq_msg)
+
+ def test_zmq_msg_init_size(self):
+ zmq_msg = ffi.new('zmq_msg_t*')
+
+ assert ffi.NULL != zmq_msg
+ assert 0 == C.zmq_msg_init_size(zmq_msg, 10)
+ assert 0 == C.zmq_msg_close(zmq_msg)
+
+ def test_zmq_msg_init_data(self):
+ zmq_msg = ffi.new('zmq_msg_t*')
+ message = ffi.new('char[5]', 'Hello')
+
+ assert 0 == C.zmq_msg_init_data(zmq_msg,
+ ffi.cast('void*', message),
+ 5,
+ ffi.NULL,
+ ffi.NULL)
+
+ assert ffi.NULL != zmq_msg
+ assert 0 == C.zmq_msg_close(zmq_msg)
+
+ def test_zmq_msg_data(self):
+ zmq_msg = ffi.new('zmq_msg_t*')
+ message = ffi.new('char[]', 'Hello')
+ assert 0 == C.zmq_msg_init_data(zmq_msg,
+ ffi.cast('void*', message),
+ 5,
+ ffi.NULL,
+ ffi.NULL)
+
+ data = C.zmq_msg_data(zmq_msg)
+
+ assert ffi.NULL != zmq_msg
+ assert ffi.string(ffi.cast("char*", data)) == 'Hello'
+ assert 0 == C.zmq_msg_close(zmq_msg)
+
+
+ def test_zmq_send(self):
+ ctx = C.zmq_ctx_new()
+
+ sender = C.zmq_socket(ctx, REQ)
+ receiver = C.zmq_socket(ctx, REP)
+
+ assert 0 == C.zmq_bind(receiver, 'tcp://*:7777')
+ assert 0 == C.zmq_connect(sender, 'tcp://127.0.0.1:7777')
+
+ time.sleep(0.1)
+
+ zmq_msg = ffi.new('zmq_msg_t*')
+ message = ffi.new('char[5]', 'Hello')
+
+ C.zmq_msg_init_data(zmq_msg,
+ ffi.cast('void*', message),
+ ffi.cast('size_t', 5),
+ ffi.NULL,
+ ffi.NULL)
+
+ assert 5 == C.zmq_msg_send(zmq_msg, sender, 0)
+ assert 0 == C.zmq_msg_close(zmq_msg)
+ assert C.zmq_close(sender) == 0
+ assert C.zmq_close(receiver) == 0
+ assert C.zmq_ctx_destroy(ctx) == 0
+
+ def test_zmq_recv(self):
+ ctx = C.zmq_ctx_new()
+
+ sender = C.zmq_socket(ctx, REQ)
+ receiver = C.zmq_socket(ctx, REP)
+
+ assert 0 == C.zmq_bind(receiver, 'tcp://*:2222')
+ assert 0 == C.zmq_connect(sender, 'tcp://127.0.0.1:2222')
+
+ time.sleep(0.1)
+
+ zmq_msg = ffi.new('zmq_msg_t*')
+ message = ffi.new('char[5]', 'Hello')
+
+ C.zmq_msg_init_data(zmq_msg,
+ ffi.cast('void*', message),
+ ffi.cast('size_t', 5),
+ ffi.NULL,
+ ffi.NULL)
+
+ zmq_msg2 = ffi.new('zmq_msg_t*')
+ C.zmq_msg_init(zmq_msg2)
+
+ assert 5 == C.zmq_msg_send(zmq_msg, sender, 0)
+ assert 5 == C.zmq_msg_recv(zmq_msg2, receiver, 0)
+ assert 5 == C.zmq_msg_size(zmq_msg2)
+ assert b"Hello" == ffi.buffer(C.zmq_msg_data(zmq_msg2),
+ C.zmq_msg_size(zmq_msg2))[:]
+ assert C.zmq_close(sender) == 0
+ assert C.zmq_close(receiver) == 0
+ assert C.zmq_ctx_destroy(ctx) == 0
+
+ def test_zmq_poll(self):
+ ctx = C.zmq_ctx_new()
+
+ sender = C.zmq_socket(ctx, REQ)
+ receiver = C.zmq_socket(ctx, REP)
+
+ r1 = C.zmq_bind(receiver, 'tcp://*:3333')
+ r2 = C.zmq_connect(sender, 'tcp://127.0.0.1:3333')
+
+ zmq_msg = ffi.new('zmq_msg_t*')
+ message = ffi.new('char[5]', 'Hello')
+
+ C.zmq_msg_init_data(zmq_msg,
+ ffi.cast('void*', message),
+ ffi.cast('size_t', 5),
+ ffi.NULL,
+ ffi.NULL)
+
+ receiver_pollitem = ffi.new('zmq_pollitem_t*')
+ receiver_pollitem.socket = receiver
+ receiver_pollitem.fd = 0
+ receiver_pollitem.events = POLLIN | POLLOUT
+ receiver_pollitem.revents = 0
+
+ ret = C.zmq_poll(ffi.NULL, 0, 0)
+ assert ret == 0
+
+ ret = C.zmq_poll(receiver_pollitem, 1, 0)
+ assert ret == 0
+
+ ret = C.zmq_msg_send(zmq_msg, sender, 0)
+ print(ffi.string(C.zmq_strerror(C.zmq_errno())))
+ assert ret == 5
+
+ time.sleep(0.2)
+
+ ret = C.zmq_poll(receiver_pollitem, 1, 0)
+ assert ret == 1
+
+ assert int(receiver_pollitem.revents) & POLLIN
+ assert not int(receiver_pollitem.revents) & POLLOUT
+
+ zmq_msg2 = ffi.new('zmq_msg_t*')
+ C.zmq_msg_init(zmq_msg2)
+
+ ret_recv = C.zmq_msg_recv(zmq_msg2, receiver, 0)
+ assert ret_recv == 5
+
+ assert 5 == C.zmq_msg_size(zmq_msg2)
+ assert b"Hello" == ffi.buffer(C.zmq_msg_data(zmq_msg2),
+ C.zmq_msg_size(zmq_msg2))[:]
+
+ sender_pollitem = ffi.new('zmq_pollitem_t*')
+ sender_pollitem.socket = sender
+ sender_pollitem.fd = 0
+ sender_pollitem.events = POLLIN | POLLOUT
+ sender_pollitem.revents = 0
+
+ ret = C.zmq_poll(sender_pollitem, 1, 0)
+ assert ret == 0
+
+ zmq_msg_again = ffi.new('zmq_msg_t*')
+ message_again = ffi.new('char[11]', 'Hello Again')
+
+ C.zmq_msg_init_data(zmq_msg_again,
+ ffi.cast('void*', message_again),
+ ffi.cast('size_t', 11),
+ ffi.NULL,
+ ffi.NULL)
+
+ assert 11 == C.zmq_msg_send(zmq_msg_again, receiver, 0)
+
+ time.sleep(0.2)
+
+ assert 0 <= C.zmq_poll(sender_pollitem, 1, 0)
+ assert int(sender_pollitem.revents) & POLLIN
+ assert 11 == C.zmq_msg_recv(zmq_msg2, sender, 0)
+ assert 11 == C.zmq_msg_size(zmq_msg2)
+ assert b"Hello Again" == ffi.buffer(C.zmq_msg_data(zmq_msg2),
+ int(C.zmq_msg_size(zmq_msg2)))[:]
+ assert 0 == C.zmq_close(sender)
+ assert 0 == C.zmq_close(receiver)
+ assert 0 == C.zmq_ctx_destroy(ctx)
+ assert 0 == C.zmq_msg_close(zmq_msg)
+ assert 0 == C.zmq_msg_close(zmq_msg2)
+ assert 0 == C.zmq_msg_close(zmq_msg_again)
+
+ def test_zmq_stopwatch_functions(self):
+ stopwatch = C.zmq_stopwatch_start()
+ ret = C.zmq_stopwatch_stop(stopwatch)
+
+ assert ffi.NULL != stopwatch
+ assert 0 < int(ret)
+
+ def test_zmq_sleep(self):
+ try:
+ C.zmq_sleep(1)
+ except Exception as e:
+ raise AssertionError("Error executing zmq_sleep(int)")
+
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/tests/test_constants.py b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/tests/test_constants.py
new file mode 100644
index 00000000..d32b2b48
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/tests/test_constants.py
@@ -0,0 +1,104 @@
+# Copyright (C) PyZMQ Developers
+# Distributed under the terms of the Modified BSD License.
+
+import json
+from unittest import TestCase
+
+import zmq
+
+from zmq.utils import constant_names
+from zmq.sugar import constants as sugar_constants
+from zmq.backend import constants as backend_constants
+
+all_set = set(constant_names.all_names)
+
+class TestConstants(TestCase):
+
+ def _duplicate_test(self, namelist, listname):
+ """test that a given list has no duplicates"""
+ dupes = {}
+ for name in set(namelist):
+ cnt = namelist.count(name)
+ if cnt > 1:
+ dupes[name] = cnt
+ if dupes:
+ self.fail("The following names occur more than once in %s: %s" % (listname, json.dumps(dupes, indent=2)))
+
+ def test_duplicate_all(self):
+ return self._duplicate_test(constant_names.all_names, "all_names")
+
+ def _change_key(self, change, version):
+ """return changed-in key"""
+ return "%s-in %d.%d.%d" % tuple([change] + list(version))
+
+ def test_duplicate_changed(self):
+ all_changed = []
+ for change in ("new", "removed"):
+ d = getattr(constant_names, change + "_in")
+ for version, namelist in d.items():
+ all_changed.extend(namelist)
+ self._duplicate_test(namelist, self._change_key(change, version))
+
+ self._duplicate_test(all_changed, "all-changed")
+
+ def test_changed_in_all(self):
+ missing = {}
+ for change in ("new", "removed"):
+ d = getattr(constant_names, change + "_in")
+ for version, namelist in d.items():
+ key = self._change_key(change, version)
+ for name in namelist:
+ if name not in all_set:
+ if key not in missing:
+ missing[key] = []
+ missing[key].append(name)
+
+ if missing:
+ self.fail(
+ "The following names are missing in `all_names`: %s" % json.dumps(missing, indent=2)
+ )
+
+ def test_no_negative_constants(self):
+ for name in sugar_constants.__all__:
+ self.assertNotEqual(getattr(zmq, name), sugar_constants._UNDEFINED)
+
+ def test_undefined_constants(self):
+ all_aliases = []
+ for alias_group in sugar_constants.aliases:
+ all_aliases.extend(alias_group)
+
+ for name in all_set.difference(all_aliases):
+ raw = getattr(backend_constants, name)
+ if raw == sugar_constants._UNDEFINED:
+ self.assertRaises(AttributeError, getattr, zmq, name)
+ else:
+ self.assertEqual(getattr(zmq, name), raw)
+
+ def test_new(self):
+ zmq_version = zmq.zmq_version_info()
+ for version, new_names in constant_names.new_in.items():
+ should_have = zmq_version >= version
+ for name in new_names:
+ try:
+ value = getattr(zmq, name)
+ except AttributeError:
+ if should_have:
+ self.fail("AttributeError: zmq.%s" % name)
+ else:
+ if not should_have:
+ self.fail("Shouldn't have: zmq.%s=%s" % (name, value))
+
+ def test_removed(self):
+ zmq_version = zmq.zmq_version_info()
+ for version, new_names in constant_names.removed_in.items():
+ should_have = zmq_version < version
+ for name in new_names:
+ try:
+ value = getattr(zmq, name)
+ except AttributeError:
+ if should_have:
+ self.fail("AttributeError: zmq.%s" % name)
+ else:
+ if not should_have:
+ self.fail("Shouldn't have: zmq.%s=%s" % (name, value))
+
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/tests/test_context.py b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/tests/test_context.py
new file mode 100644
index 00000000..e3280778
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/tests/test_context.py
@@ -0,0 +1,257 @@
+# Copyright (C) PyZMQ Developers
+# Distributed under the terms of the Modified BSD License.
+
+import gc
+import sys
+import time
+from threading import Thread, Event
+
+import zmq
+from zmq.tests import (
+ BaseZMQTestCase, have_gevent, GreenTest, skip_green, PYPY, SkipTest,
+)
+
+
+class TestContext(BaseZMQTestCase):
+
+ def test_init(self):
+ c1 = self.Context()
+ self.assert_(isinstance(c1, self.Context))
+ del c1
+ c2 = self.Context()
+ self.assert_(isinstance(c2, self.Context))
+ del c2
+ c3 = self.Context()
+ self.assert_(isinstance(c3, self.Context))
+ del c3
+
+ def test_dir(self):
+ ctx = self.Context()
+ self.assertTrue('socket' in dir(ctx))
+ if zmq.zmq_version_info() > (3,):
+ self.assertTrue('IO_THREADS' in dir(ctx))
+ ctx.term()
+
+ def test_term(self):
+ c = self.Context()
+ c.term()
+ self.assert_(c.closed)
+
+ def test_context_manager(self):
+ with self.Context() as c:
+ pass
+ self.assert_(c.closed)
+
+ def test_fail_init(self):
+ self.assertRaisesErrno(zmq.EINVAL, self.Context, -1)
+
+ def test_term_hang(self):
+ rep,req = self.create_bound_pair(zmq.ROUTER, zmq.DEALER)
+ req.setsockopt(zmq.LINGER, 0)
+ req.send(b'hello', copy=False)
+ req.close()
+ rep.close()
+ self.context.term()
+
+ def test_instance(self):
+ ctx = self.Context.instance()
+ c2 = self.Context.instance(io_threads=2)
+ self.assertTrue(c2 is ctx)
+ c2.term()
+ c3 = self.Context.instance()
+ c4 = self.Context.instance()
+ self.assertFalse(c3 is c2)
+ self.assertFalse(c3.closed)
+ self.assertTrue(c3 is c4)
+
+ def test_many_sockets(self):
+ """opening and closing many sockets shouldn't cause problems"""
+ ctx = self.Context()
+ for i in range(16):
+ sockets = [ ctx.socket(zmq.REP) for i in range(65) ]
+ [ s.close() for s in sockets ]
+ # give the reaper a chance
+ time.sleep(1e-2)
+ ctx.term()
+
+ def test_sockopts(self):
+ """setting socket options with ctx attributes"""
+ ctx = self.Context()
+ ctx.linger = 5
+ self.assertEqual(ctx.linger, 5)
+ s = ctx.socket(zmq.REQ)
+ self.assertEqual(s.linger, 5)
+ self.assertEqual(s.getsockopt(zmq.LINGER), 5)
+ s.close()
+ # check that subscribe doesn't get set on sockets that don't subscribe:
+ ctx.subscribe = b''
+ s = ctx.socket(zmq.REQ)
+ s.close()
+
+ ctx.term()
+
+
+ def test_destroy(self):
+ """Context.destroy should close sockets"""
+ ctx = self.Context()
+ sockets = [ ctx.socket(zmq.REP) for i in range(65) ]
+
+ # close half of the sockets
+ [ s.close() for s in sockets[::2] ]
+
+ ctx.destroy()
+ # reaper is not instantaneous
+ time.sleep(1e-2)
+ for s in sockets:
+ self.assertTrue(s.closed)
+
+ def test_destroy_linger(self):
+ """Context.destroy should set linger on closing sockets"""
+ req,rep = self.create_bound_pair(zmq.REQ, zmq.REP)
+ req.send(b'hi')
+ time.sleep(1e-2)
+ self.context.destroy(linger=0)
+ # reaper is not instantaneous
+ time.sleep(1e-2)
+ for s in (req,rep):
+ self.assertTrue(s.closed)
+
+ def test_term_noclose(self):
+ """Context.term won't close sockets"""
+ ctx = self.Context()
+ s = ctx.socket(zmq.REQ)
+ self.assertFalse(s.closed)
+ t = Thread(target=ctx.term)
+ t.start()
+ t.join(timeout=0.1)
+ self.assertTrue(t.is_alive(), "Context should be waiting")
+ s.close()
+ t.join(timeout=0.1)
+ self.assertFalse(t.is_alive(), "Context should have closed")
+
+ def test_gc(self):
+ """test close&term by garbage collection alone"""
+ if PYPY:
+ raise SkipTest("GC doesn't work ")
+
+ # test credit @dln (GH #137):
+ def gcf():
+ def inner():
+ ctx = self.Context()
+ s = ctx.socket(zmq.PUSH)
+ inner()
+ gc.collect()
+ t = Thread(target=gcf)
+ t.start()
+ t.join(timeout=1)
+ self.assertFalse(t.is_alive(), "Garbage collection should have cleaned up context")
+
+ def test_cyclic_destroy(self):
+ """ctx.destroy should succeed when cyclic ref prevents gc"""
+ # test credit @dln (GH #137):
+ class CyclicReference(object):
+ def __init__(self, parent=None):
+ self.parent = parent
+
+ def crash(self, sock):
+ self.sock = sock
+ self.child = CyclicReference(self)
+
+ def crash_zmq():
+ ctx = self.Context()
+ sock = ctx.socket(zmq.PULL)
+ c = CyclicReference()
+ c.crash(sock)
+ ctx.destroy()
+
+ crash_zmq()
+
+ def test_term_thread(self):
+ """ctx.term should not crash active threads (#139)"""
+ ctx = self.Context()
+ evt = Event()
+ evt.clear()
+
+ def block():
+ s = ctx.socket(zmq.REP)
+ s.bind_to_random_port('tcp://127.0.0.1')
+ evt.set()
+ try:
+ s.recv()
+ except zmq.ZMQError as e:
+ self.assertEqual(e.errno, zmq.ETERM)
+ return
+ finally:
+ s.close()
+ self.fail("recv should have been interrupted with ETERM")
+ t = Thread(target=block)
+ t.start()
+
+ evt.wait(1)
+ self.assertTrue(evt.is_set(), "sync event never fired")
+ time.sleep(0.01)
+ ctx.term()
+ t.join(timeout=1)
+ self.assertFalse(t.is_alive(), "term should have interrupted s.recv()")
+
+ def test_destroy_no_sockets(self):
+ ctx = self.Context()
+ s = ctx.socket(zmq.PUB)
+ s.bind_to_random_port('tcp://127.0.0.1')
+ s.close()
+ ctx.destroy()
+ assert s.closed
+ assert ctx.closed
+
+ def test_ctx_opts(self):
+ if zmq.zmq_version_info() < (3,):
+ raise SkipTest("context options require libzmq 3")
+ ctx = self.Context()
+ ctx.set(zmq.MAX_SOCKETS, 2)
+ self.assertEqual(ctx.get(zmq.MAX_SOCKETS), 2)
+ ctx.max_sockets = 100
+ self.assertEqual(ctx.max_sockets, 100)
+ self.assertEqual(ctx.get(zmq.MAX_SOCKETS), 100)
+
+ def test_shadow(self):
+ ctx = self.Context()
+ ctx2 = self.Context.shadow(ctx.underlying)
+ self.assertEqual(ctx.underlying, ctx2.underlying)
+ s = ctx.socket(zmq.PUB)
+ s.close()
+ del ctx2
+ self.assertFalse(ctx.closed)
+ s = ctx.socket(zmq.PUB)
+ ctx2 = self.Context.shadow(ctx.underlying)
+ s2 = ctx2.socket(zmq.PUB)
+ s.close()
+ s2.close()
+ ctx.term()
+ self.assertRaisesErrno(zmq.EFAULT, ctx2.socket, zmq.PUB)
+ del ctx2
+
+ def test_shadow_pyczmq(self):
+ try:
+ from pyczmq import zctx, zsocket, zstr
+ except Exception:
+ raise SkipTest("Requires pyczmq")
+
+ ctx = zctx.new()
+ a = zsocket.new(ctx, zmq.PUSH)
+ zsocket.bind(a, "inproc://a")
+ ctx2 = self.Context.shadow_pyczmq(ctx)
+ b = ctx2.socket(zmq.PULL)
+ b.connect("inproc://a")
+ zstr.send(a, b'hi')
+ rcvd = self.recv(b)
+ self.assertEqual(rcvd, b'hi')
+ b.close()
+
+
+if False: # disable green context tests
+ class TestContextGreen(GreenTest, TestContext):
+ """gevent subclass of context tests"""
+ # skip tests that use real threads:
+ test_gc = GreenTest.skip_green
+ test_term_thread = GreenTest.skip_green
+ test_destroy_linger = GreenTest.skip_green
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/tests/test_device.py b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/tests/test_device.py
new file mode 100644
index 00000000..f8305074
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/tests/test_device.py
@@ -0,0 +1,146 @@
+# Copyright (C) PyZMQ Developers
+# Distributed under the terms of the Modified BSD License.
+
+import time
+
+import zmq
+from zmq import devices
+from zmq.tests import BaseZMQTestCase, SkipTest, have_gevent, GreenTest, PYPY
+from zmq.utils.strtypes import (bytes,unicode,basestring)
+
+if PYPY:
+ # cleanup of shared Context doesn't work on PyPy
+ devices.Device.context_factory = zmq.Context
+
+class TestDevice(BaseZMQTestCase):
+
+ def test_device_types(self):
+ for devtype in (zmq.STREAMER, zmq.FORWARDER, zmq.QUEUE):
+ dev = devices.Device(devtype, zmq.PAIR, zmq.PAIR)
+ self.assertEqual(dev.device_type, devtype)
+ del dev
+
+ def test_device_attributes(self):
+ dev = devices.Device(zmq.QUEUE, zmq.SUB, zmq.PUB)
+ self.assertEqual(dev.in_type, zmq.SUB)
+ self.assertEqual(dev.out_type, zmq.PUB)
+ self.assertEqual(dev.device_type, zmq.QUEUE)
+ self.assertEqual(dev.daemon, True)
+ del dev
+
+ def test_tsdevice_attributes(self):
+ dev = devices.Device(zmq.QUEUE, zmq.SUB, zmq.PUB)
+ self.assertEqual(dev.in_type, zmq.SUB)
+ self.assertEqual(dev.out_type, zmq.PUB)
+ self.assertEqual(dev.device_type, zmq.QUEUE)
+ self.assertEqual(dev.daemon, True)
+ del dev
+
+
+ def test_single_socket_forwarder_connect(self):
+ dev = devices.ThreadDevice(zmq.QUEUE, zmq.REP, -1)
+ req = self.context.socket(zmq.REQ)
+ port = req.bind_to_random_port('tcp://127.0.0.1')
+ dev.connect_in('tcp://127.0.0.1:%i'%port)
+ dev.start()
+ time.sleep(.25)
+ msg = b'hello'
+ req.send(msg)
+ self.assertEqual(msg, self.recv(req))
+ del dev
+ req.close()
+ dev = devices.ThreadDevice(zmq.QUEUE, zmq.REP, -1)
+ req = self.context.socket(zmq.REQ)
+ port = req.bind_to_random_port('tcp://127.0.0.1')
+ dev.connect_out('tcp://127.0.0.1:%i'%port)
+ dev.start()
+ time.sleep(.25)
+ msg = b'hello again'
+ req.send(msg)
+ self.assertEqual(msg, self.recv(req))
+ del dev
+ req.close()
+
+ def test_single_socket_forwarder_bind(self):
+ dev = devices.ThreadDevice(zmq.QUEUE, zmq.REP, -1)
+ # select random port:
+ binder = self.context.socket(zmq.REQ)
+ port = binder.bind_to_random_port('tcp://127.0.0.1')
+ binder.close()
+ time.sleep(0.1)
+ req = self.context.socket(zmq.REQ)
+ req.connect('tcp://127.0.0.1:%i'%port)
+ dev.bind_in('tcp://127.0.0.1:%i'%port)
+ dev.start()
+ time.sleep(.25)
+ msg = b'hello'
+ req.send(msg)
+ self.assertEqual(msg, self.recv(req))
+ del dev
+ req.close()
+ dev = devices.ThreadDevice(zmq.QUEUE, zmq.REP, -1)
+ # select random port:
+ binder = self.context.socket(zmq.REQ)
+ port = binder.bind_to_random_port('tcp://127.0.0.1')
+ binder.close()
+ time.sleep(0.1)
+ req = self.context.socket(zmq.REQ)
+ req.connect('tcp://127.0.0.1:%i'%port)
+ dev.bind_in('tcp://127.0.0.1:%i'%port)
+ dev.start()
+ time.sleep(.25)
+ msg = b'hello again'
+ req.send(msg)
+ self.assertEqual(msg, self.recv(req))
+ del dev
+ req.close()
+
+ def test_proxy(self):
+ if zmq.zmq_version_info() < (3,2):
+ raise SkipTest("Proxies only in libzmq >= 3")
+ dev = devices.ThreadProxy(zmq.PULL, zmq.PUSH, zmq.PUSH)
+ binder = self.context.socket(zmq.REQ)
+ iface = 'tcp://127.0.0.1'
+ port = binder.bind_to_random_port(iface)
+ port2 = binder.bind_to_random_port(iface)
+ port3 = binder.bind_to_random_port(iface)
+ binder.close()
+ time.sleep(0.1)
+ dev.bind_in("%s:%i" % (iface, port))
+ dev.bind_out("%s:%i" % (iface, port2))
+ dev.bind_mon("%s:%i" % (iface, port3))
+ dev.start()
+ time.sleep(0.25)
+ msg = b'hello'
+ push = self.context.socket(zmq.PUSH)
+ push.connect("%s:%i" % (iface, port))
+ pull = self.context.socket(zmq.PULL)
+ pull.connect("%s:%i" % (iface, port2))
+ mon = self.context.socket(zmq.PULL)
+ mon.connect("%s:%i" % (iface, port3))
+ push.send(msg)
+ self.sockets.extend([push, pull, mon])
+ self.assertEqual(msg, self.recv(pull))
+ self.assertEqual(msg, self.recv(mon))
+
+if have_gevent:
+ import gevent
+ import zmq.green
+
+ class TestDeviceGreen(GreenTest, BaseZMQTestCase):
+
+ def test_green_device(self):
+ rep = self.context.socket(zmq.REP)
+ req = self.context.socket(zmq.REQ)
+ self.sockets.extend([req, rep])
+ port = rep.bind_to_random_port('tcp://127.0.0.1')
+ g = gevent.spawn(zmq.green.device, zmq.QUEUE, rep, rep)
+ req.connect('tcp://127.0.0.1:%i' % port)
+ req.send(b'hi')
+ timeout = gevent.Timeout(3)
+ timeout.start()
+ receiver = gevent.spawn(req.recv)
+ self.assertEqual(receiver.get(2), b'hi')
+ timeout.cancel()
+ g.kill(block=True)
+
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/tests/test_error.py b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/tests/test_error.py
new file mode 100644
index 00000000..a2eee14a
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/tests/test_error.py
@@ -0,0 +1,43 @@
+# -*- coding: utf8 -*-
+# Copyright (C) PyZMQ Developers
+# Distributed under the terms of the Modified BSD License.
+
+import sys
+import time
+
+import zmq
+from zmq import ZMQError, strerror, Again, ContextTerminated
+from zmq.tests import BaseZMQTestCase
+
+if sys.version_info[0] >= 3:
+ long = int
+
+class TestZMQError(BaseZMQTestCase):
+
+ def test_strerror(self):
+ """test that strerror gets the right type."""
+ for i in range(10):
+ e = strerror(i)
+ self.assertTrue(isinstance(e, str))
+
+ def test_zmqerror(self):
+ for errno in range(10):
+ e = ZMQError(errno)
+ self.assertEqual(e.errno, errno)
+ self.assertEqual(str(e), strerror(errno))
+
+ def test_again(self):
+ s = self.context.socket(zmq.REP)
+ self.assertRaises(Again, s.recv, zmq.NOBLOCK)
+ self.assertRaisesErrno(zmq.EAGAIN, s.recv, zmq.NOBLOCK)
+ s.close()
+
+ def atest_ctxterm(self):
+ s = self.context.socket(zmq.REP)
+ t = Thread(target=self.context.term)
+ t.start()
+ self.assertRaises(ContextTerminated, s.recv, zmq.NOBLOCK)
+ self.assertRaisesErrno(zmq.TERM, s.recv, zmq.NOBLOCK)
+ s.close()
+ t.join()
+
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/tests/test_etc.py b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/tests/test_etc.py
new file mode 100644
index 00000000..ad224064
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/tests/test_etc.py
@@ -0,0 +1,15 @@
+# Copyright (c) PyZMQ Developers.
+# Distributed under the terms of the Modified BSD License.
+
+import sys
+
+import zmq
+
+from . import skip_if
+
+@skip_if(zmq.zmq_version_info() < (4,1), "libzmq < 4.1")
+def test_has():
+ assert not zmq.has('something weird')
+ has_ipc = zmq.has('ipc')
+ not_windows = not sys.platform.startswith('win')
+ assert has_ipc == not_windows
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/tests/test_imports.py b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/tests/test_imports.py
new file mode 100644
index 00000000..c0ddfaac
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/tests/test_imports.py
@@ -0,0 +1,62 @@
+# Copyright (C) PyZMQ Developers
+# Distributed under the terms of the Modified BSD License.
+
+import sys
+from unittest import TestCase
+
+class TestImports(TestCase):
+ """Test Imports - the quickest test to ensure that we haven't
+ introduced version-incompatible syntax errors."""
+
+ def test_toplevel(self):
+ """test toplevel import"""
+ import zmq
+
+ def test_core(self):
+ """test core imports"""
+ from zmq import Context
+ from zmq import Socket
+ from zmq import Poller
+ from zmq import Frame
+ from zmq import constants
+ from zmq import device, proxy
+ from zmq import Stopwatch
+ from zmq import (
+ zmq_version,
+ zmq_version_info,
+ pyzmq_version,
+ pyzmq_version_info,
+ )
+
+ def test_devices(self):
+ """test device imports"""
+ import zmq.devices
+ from zmq.devices import basedevice
+ from zmq.devices import monitoredqueue
+ from zmq.devices import monitoredqueuedevice
+
+ def test_log(self):
+ """test log imports"""
+ import zmq.log
+ from zmq.log import handlers
+
+ def test_eventloop(self):
+ """test eventloop imports"""
+ import zmq.eventloop
+ from zmq.eventloop import ioloop
+ from zmq.eventloop import zmqstream
+ from zmq.eventloop.minitornado.platform import auto
+ from zmq.eventloop.minitornado import ioloop
+
+ def test_utils(self):
+ """test util imports"""
+ import zmq.utils
+ from zmq.utils import strtypes
+ from zmq.utils import jsonapi
+
+ def test_ssh(self):
+ """test ssh imports"""
+ from zmq.ssh import tunnel
+
+
+
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/tests/test_ioloop.py b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/tests/test_ioloop.py
new file mode 100644
index 00000000..2a8b1153
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/tests/test_ioloop.py
@@ -0,0 +1,113 @@
+# Copyright (C) PyZMQ Developers
+# Distributed under the terms of the Modified BSD License.
+
+import time
+import os
+import threading
+
+import zmq
+from zmq.tests import BaseZMQTestCase
+from zmq.eventloop import ioloop
+from zmq.eventloop.minitornado.ioloop import _Timeout
+try:
+ from tornado.ioloop import PollIOLoop, IOLoop as BaseIOLoop
+except ImportError:
+ from zmq.eventloop.minitornado.ioloop import IOLoop as BaseIOLoop
+
+
+def printer():
+ os.system("say hello")
+ raise Exception
+ print (time.time())
+
+
+class Delay(threading.Thread):
+ def __init__(self, f, delay=1):
+ self.f=f
+ self.delay=delay
+ self.aborted=False
+ self.cond=threading.Condition()
+ super(Delay, self).__init__()
+
+ def run(self):
+ self.cond.acquire()
+ self.cond.wait(self.delay)
+ self.cond.release()
+ if not self.aborted:
+ self.f()
+
+ def abort(self):
+ self.aborted=True
+ self.cond.acquire()
+ self.cond.notify()
+ self.cond.release()
+
+
+class TestIOLoop(BaseZMQTestCase):
+
+ def test_simple(self):
+ """simple IOLoop creation test"""
+ loop = ioloop.IOLoop()
+ dc = ioloop.PeriodicCallback(loop.stop, 200, loop)
+ pc = ioloop.PeriodicCallback(lambda : None, 10, loop)
+ pc.start()
+ dc.start()
+ t = Delay(loop.stop,1)
+ t.start()
+ loop.start()
+ if t.isAlive():
+ t.abort()
+ else:
+ self.fail("IOLoop failed to exit")
+
+ def test_timeout_compare(self):
+ """test timeout comparisons"""
+ loop = ioloop.IOLoop()
+ t = _Timeout(1, 2, loop)
+ t2 = _Timeout(1, 3, loop)
+ self.assertEqual(t < t2, id(t) < id(t2))
+ t2 = _Timeout(2,1, loop)
+ self.assertTrue(t < t2)
+
+ def test_poller_events(self):
+ """Tornado poller implementation maps events correctly"""
+ req,rep = self.create_bound_pair(zmq.REQ, zmq.REP)
+ poller = ioloop.ZMQPoller()
+ poller.register(req, ioloop.IOLoop.READ)
+ poller.register(rep, ioloop.IOLoop.READ)
+ events = dict(poller.poll(0))
+ self.assertEqual(events.get(rep), None)
+ self.assertEqual(events.get(req), None)
+
+ poller.register(req, ioloop.IOLoop.WRITE)
+ poller.register(rep, ioloop.IOLoop.WRITE)
+ events = dict(poller.poll(1))
+ self.assertEqual(events.get(req), ioloop.IOLoop.WRITE)
+ self.assertEqual(events.get(rep), None)
+
+ poller.register(rep, ioloop.IOLoop.READ)
+ req.send(b'hi')
+ events = dict(poller.poll(1))
+ self.assertEqual(events.get(rep), ioloop.IOLoop.READ)
+ self.assertEqual(events.get(req), None)
+
+ def test_instance(self):
+ """Test IOLoop.instance returns the right object"""
+ loop = ioloop.IOLoop.instance()
+ self.assertEqual(loop.__class__, ioloop.IOLoop)
+ loop = BaseIOLoop.instance()
+ self.assertEqual(loop.__class__, ioloop.IOLoop)
+
+ def test_close_all(self):
+ """Test close(all_fds=True)"""
+ loop = ioloop.IOLoop.instance()
+ req,rep = self.create_bound_pair(zmq.REQ, zmq.REP)
+ loop.add_handler(req, lambda msg: msg, ioloop.IOLoop.READ)
+ loop.add_handler(rep, lambda msg: msg, ioloop.IOLoop.READ)
+ self.assertEqual(req.closed, False)
+ self.assertEqual(rep.closed, False)
+ loop.close(all_fds=True)
+ self.assertEqual(req.closed, True)
+ self.assertEqual(rep.closed, True)
+
+
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/tests/test_log.py b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/tests/test_log.py
new file mode 100644
index 00000000..9206f095
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/tests/test_log.py
@@ -0,0 +1,116 @@
+# encoding: utf-8
+
+# Copyright (C) PyZMQ Developers
+# Distributed under the terms of the Modified BSD License.
+
+
+import logging
+import time
+from unittest import TestCase
+
+import zmq
+from zmq.log import handlers
+from zmq.utils.strtypes import b, u
+from zmq.tests import BaseZMQTestCase
+
+
+class TestPubLog(BaseZMQTestCase):
+
+ iface = 'inproc://zmqlog'
+ topic= 'zmq'
+
+ @property
+ def logger(self):
+ # print dir(self)
+ logger = logging.getLogger('zmqtest')
+ logger.setLevel(logging.DEBUG)
+ return logger
+
+ def connect_handler(self, topic=None):
+ topic = self.topic if topic is None else topic
+ logger = self.logger
+ pub,sub = self.create_bound_pair(zmq.PUB, zmq.SUB)
+ handler = handlers.PUBHandler(pub)
+ handler.setLevel(logging.DEBUG)
+ handler.root_topic = topic
+ logger.addHandler(handler)
+ sub.setsockopt(zmq.SUBSCRIBE, b(topic))
+ time.sleep(0.1)
+ return logger, handler, sub
+
+ def test_init_iface(self):
+ logger = self.logger
+ ctx = self.context
+ handler = handlers.PUBHandler(self.iface)
+ self.assertFalse(handler.ctx is ctx)
+ self.sockets.append(handler.socket)
+ # handler.ctx.term()
+ handler = handlers.PUBHandler(self.iface, self.context)
+ self.sockets.append(handler.socket)
+ self.assertTrue(handler.ctx is ctx)
+ handler.setLevel(logging.DEBUG)
+ handler.root_topic = self.topic
+ logger.addHandler(handler)
+ sub = ctx.socket(zmq.SUB)
+ self.sockets.append(sub)
+ sub.setsockopt(zmq.SUBSCRIBE, b(self.topic))
+ sub.connect(self.iface)
+ import time; time.sleep(0.25)
+ msg1 = 'message'
+ logger.info(msg1)
+
+ (topic, msg2) = sub.recv_multipart()
+ self.assertEqual(topic, b'zmq.INFO')
+ self.assertEqual(msg2, b(msg1)+b'\n')
+ logger.removeHandler(handler)
+
+ def test_init_socket(self):
+ pub,sub = self.create_bound_pair(zmq.PUB, zmq.SUB)
+ logger = self.logger
+ handler = handlers.PUBHandler(pub)
+ handler.setLevel(logging.DEBUG)
+ handler.root_topic = self.topic
+ logger.addHandler(handler)
+
+ self.assertTrue(handler.socket is pub)
+ self.assertTrue(handler.ctx is pub.context)
+ self.assertTrue(handler.ctx is self.context)
+ sub.setsockopt(zmq.SUBSCRIBE, b(self.topic))
+ import time; time.sleep(0.1)
+ msg1 = 'message'
+ logger.info(msg1)
+
+ (topic, msg2) = sub.recv_multipart()
+ self.assertEqual(topic, b'zmq.INFO')
+ self.assertEqual(msg2, b(msg1)+b'\n')
+ logger.removeHandler(handler)
+
+ def test_root_topic(self):
+ logger, handler, sub = self.connect_handler()
+ handler.socket.bind(self.iface)
+ sub2 = sub.context.socket(zmq.SUB)
+ self.sockets.append(sub2)
+ sub2.connect(self.iface)
+ sub2.setsockopt(zmq.SUBSCRIBE, b'')
+ handler.root_topic = b'twoonly'
+ msg1 = 'ignored'
+ logger.info(msg1)
+ self.assertRaisesErrno(zmq.EAGAIN, sub.recv, zmq.NOBLOCK)
+ topic,msg2 = sub2.recv_multipart()
+ self.assertEqual(topic, b'twoonly.INFO')
+ self.assertEqual(msg2, b(msg1)+b'\n')
+
+ logger.removeHandler(handler)
+
+ def test_unicode_message(self):
+ logger, handler, sub = self.connect_handler()
+ base_topic = b(self.topic + '.INFO')
+ for msg, expected in [
+ (u('hello'), [base_topic, b('hello\n')]),
+ (u('héllo'), [base_topic, b('héllo\n')]),
+ (u('tøpic::héllo'), [base_topic + b('.tøpic'), b('héllo\n')]),
+ ]:
+ logger.info(msg)
+ received = sub.recv_multipart()
+ self.assertEqual(received, expected)
+
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/tests/test_message.py b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/tests/test_message.py
new file mode 100644
index 00000000..d8770bdf
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/tests/test_message.py
@@ -0,0 +1,362 @@
+# -*- coding: utf8 -*-
+# Copyright (C) PyZMQ Developers
+# Distributed under the terms of the Modified BSD License.
+
+
+import copy
+import sys
+try:
+ from sys import getrefcount as grc
+except ImportError:
+ grc = None
+
+import time
+from pprint import pprint
+from unittest import TestCase
+
+import zmq
+from zmq.tests import BaseZMQTestCase, SkipTest, skip_pypy, PYPY
+from zmq.utils.strtypes import unicode, bytes, b, u
+
+
+# some useful constants:
+
+x = b'x'
+
+try:
+ view = memoryview
+except NameError:
+ view = buffer
+
+if grc:
+ rc0 = grc(x)
+ v = view(x)
+ view_rc = grc(x) - rc0
+
+def await_gc(obj, rc):
+ """wait for refcount on an object to drop to an expected value
+
+ Necessary because of the zero-copy gc thread,
+ which can take some time to receive its DECREF message.
+ """
+ for i in range(50):
+ # rc + 2 because of the refs in this function
+ if grc(obj) <= rc + 2:
+ return
+ time.sleep(0.05)
+
+class TestFrame(BaseZMQTestCase):
+
+ @skip_pypy
+ def test_above_30(self):
+ """Message above 30 bytes are never copied by 0MQ."""
+ for i in range(5, 16): # 32, 64,..., 65536
+ s = (2**i)*x
+ self.assertEqual(grc(s), 2)
+ m = zmq.Frame(s)
+ self.assertEqual(grc(s), 4)
+ del m
+ await_gc(s, 2)
+ self.assertEqual(grc(s), 2)
+ del s
+
+ def test_str(self):
+ """Test the str representations of the Frames."""
+ for i in range(16):
+ s = (2**i)*x
+ m = zmq.Frame(s)
+ m_str = str(m)
+ m_str_b = b(m_str) # py3compat
+ self.assertEqual(s, m_str_b)
+
+ def test_bytes(self):
+ """Test the Frame.bytes property."""
+ for i in range(1,16):
+ s = (2**i)*x
+ m = zmq.Frame(s)
+ b = m.bytes
+ self.assertEqual(s, m.bytes)
+ if not PYPY:
+ # check that it copies
+ self.assert_(b is not s)
+ # check that it copies only once
+ self.assert_(b is m.bytes)
+
+ def test_unicode(self):
+ """Test the unicode representations of the Frames."""
+ s = u('asdf')
+ self.assertRaises(TypeError, zmq.Frame, s)
+ for i in range(16):
+ s = (2**i)*u('§')
+ m = zmq.Frame(s.encode('utf8'))
+ self.assertEqual(s, unicode(m.bytes,'utf8'))
+
+ def test_len(self):
+ """Test the len of the Frames."""
+ for i in range(16):
+ s = (2**i)*x
+ m = zmq.Frame(s)
+ self.assertEqual(len(s), len(m))
+
+ @skip_pypy
+ def test_lifecycle1(self):
+ """Run through a ref counting cycle with a copy."""
+ for i in range(5, 16): # 32, 64,..., 65536
+ s = (2**i)*x
+ rc = 2
+ self.assertEqual(grc(s), rc)
+ m = zmq.Frame(s)
+ rc += 2
+ self.assertEqual(grc(s), rc)
+ m2 = copy.copy(m)
+ rc += 1
+ self.assertEqual(grc(s), rc)
+ buf = m2.buffer
+
+ rc += view_rc
+ self.assertEqual(grc(s), rc)
+
+ self.assertEqual(s, b(str(m)))
+ self.assertEqual(s, bytes(m2))
+ self.assertEqual(s, m.bytes)
+ # self.assert_(s is str(m))
+ # self.assert_(s is str(m2))
+ del m2
+ rc -= 1
+ self.assertEqual(grc(s), rc)
+ rc -= view_rc
+ del buf
+ self.assertEqual(grc(s), rc)
+ del m
+ rc -= 2
+ await_gc(s, rc)
+ self.assertEqual(grc(s), rc)
+ self.assertEqual(rc, 2)
+ del s
+
+ @skip_pypy
+ def test_lifecycle2(self):
+ """Run through a different ref counting cycle with a copy."""
+ for i in range(5, 16): # 32, 64,..., 65536
+ s = (2**i)*x
+ rc = 2
+ self.assertEqual(grc(s), rc)
+ m = zmq.Frame(s)
+ rc += 2
+ self.assertEqual(grc(s), rc)
+ m2 = copy.copy(m)
+ rc += 1
+ self.assertEqual(grc(s), rc)
+ buf = m.buffer
+ rc += view_rc
+ self.assertEqual(grc(s), rc)
+ self.assertEqual(s, b(str(m)))
+ self.assertEqual(s, bytes(m2))
+ self.assertEqual(s, m2.bytes)
+ self.assertEqual(s, m.bytes)
+ # self.assert_(s is str(m))
+ # self.assert_(s is str(m2))
+ del buf
+ self.assertEqual(grc(s), rc)
+ del m
+ # m.buffer is kept until m is del'd
+ rc -= view_rc
+ rc -= 1
+ self.assertEqual(grc(s), rc)
+ del m2
+ rc -= 2
+ await_gc(s, rc)
+ self.assertEqual(grc(s), rc)
+ self.assertEqual(rc, 2)
+ del s
+
+ @skip_pypy
+ def test_tracker(self):
+ m = zmq.Frame(b'asdf', track=True)
+ self.assertFalse(m.tracker.done)
+ pm = zmq.MessageTracker(m)
+ self.assertFalse(pm.done)
+ del m
+ for i in range(10):
+ if pm.done:
+ break
+ time.sleep(0.1)
+ self.assertTrue(pm.done)
+
+ def test_no_tracker(self):
+ m = zmq.Frame(b'asdf', track=False)
+ self.assertEqual(m.tracker, None)
+ m2 = copy.copy(m)
+ self.assertEqual(m2.tracker, None)
+ self.assertRaises(ValueError, zmq.MessageTracker, m)
+
+ @skip_pypy
+ def test_multi_tracker(self):
+ m = zmq.Frame(b'asdf', track=True)
+ m2 = zmq.Frame(b'whoda', track=True)
+ mt = zmq.MessageTracker(m,m2)
+ self.assertFalse(m.tracker.done)
+ self.assertFalse(mt.done)
+ self.assertRaises(zmq.NotDone, mt.wait, 0.1)
+ del m
+ time.sleep(0.1)
+ self.assertRaises(zmq.NotDone, mt.wait, 0.1)
+ self.assertFalse(mt.done)
+ del m2
+ self.assertTrue(mt.wait() is None)
+ self.assertTrue(mt.done)
+
+
+ def test_buffer_in(self):
+ """test using a buffer as input"""
+ ins = b("§§¶•ªº˜µ¬˚…∆˙åß∂©œ∑´†≈ç√")
+ m = zmq.Frame(view(ins))
+
+ def test_bad_buffer_in(self):
+ """test using a bad object"""
+ self.assertRaises(TypeError, zmq.Frame, 5)
+ self.assertRaises(TypeError, zmq.Frame, object())
+
+ def test_buffer_out(self):
+ """receiving buffered output"""
+ ins = b("§§¶•ªº˜µ¬˚…∆˙åß∂©œ∑´†≈ç√")
+ m = zmq.Frame(ins)
+ outb = m.buffer
+ self.assertTrue(isinstance(outb, view))
+ self.assert_(outb is m.buffer)
+ self.assert_(m.buffer is m.buffer)
+
+ def test_multisend(self):
+ """ensure that a message remains intact after multiple sends"""
+ a,b = self.create_bound_pair(zmq.PAIR, zmq.PAIR)
+ s = b"message"
+ m = zmq.Frame(s)
+ self.assertEqual(s, m.bytes)
+
+ a.send(m, copy=False)
+ time.sleep(0.1)
+ self.assertEqual(s, m.bytes)
+ a.send(m, copy=False)
+ time.sleep(0.1)
+ self.assertEqual(s, m.bytes)
+ a.send(m, copy=True)
+ time.sleep(0.1)
+ self.assertEqual(s, m.bytes)
+ a.send(m, copy=True)
+ time.sleep(0.1)
+ self.assertEqual(s, m.bytes)
+ for i in range(4):
+ r = b.recv()
+ self.assertEqual(s,r)
+ self.assertEqual(s, m.bytes)
+
+ def test_buffer_numpy(self):
+ """test non-copying numpy array messages"""
+ try:
+ import numpy
+ except ImportError:
+ raise SkipTest("numpy required")
+ rand = numpy.random.randint
+ shapes = [ rand(2,16) for i in range(5) ]
+ for i in range(1,len(shapes)+1):
+ shape = shapes[:i]
+ A = numpy.random.random(shape)
+ m = zmq.Frame(A)
+ if view.__name__ == 'buffer':
+ self.assertEqual(A.data, m.buffer)
+ B = numpy.frombuffer(m.buffer,dtype=A.dtype).reshape(A.shape)
+ else:
+ self.assertEqual(memoryview(A), m.buffer)
+ B = numpy.array(m.buffer,dtype=A.dtype).reshape(A.shape)
+ self.assertEqual((A==B).all(), True)
+
+ def test_memoryview(self):
+ """test messages from memoryview"""
+ major,minor = sys.version_info[:2]
+ if not (major >= 3 or (major == 2 and minor >= 7)):
+ raise SkipTest("memoryviews only in python >= 2.7")
+
+ s = b'carrotjuice'
+ v = memoryview(s)
+ m = zmq.Frame(s)
+ buf = m.buffer
+ s2 = buf.tobytes()
+ self.assertEqual(s2,s)
+ self.assertEqual(m.bytes,s)
+
+ def test_noncopying_recv(self):
+ """check for clobbering message buffers"""
+ null = b'\0'*64
+ sa,sb = self.create_bound_pair(zmq.PAIR, zmq.PAIR)
+ for i in range(32):
+ # try a few times
+ sb.send(null, copy=False)
+ m = sa.recv(copy=False)
+ mb = m.bytes
+ # buf = view(m)
+ buf = m.buffer
+ del m
+ for i in range(5):
+ ff=b'\xff'*(40 + i*10)
+ sb.send(ff, copy=False)
+ m2 = sa.recv(copy=False)
+ if view.__name__ == 'buffer':
+ b = bytes(buf)
+ else:
+ b = buf.tobytes()
+ self.assertEqual(b, null)
+ self.assertEqual(mb, null)
+ self.assertEqual(m2.bytes, ff)
+
+ @skip_pypy
+ def test_buffer_numpy(self):
+ """test non-copying numpy array messages"""
+ try:
+ import numpy
+ except ImportError:
+ raise SkipTest("requires numpy")
+ if sys.version_info < (2,7):
+ raise SkipTest("requires new-style buffer interface (py >= 2.7)")
+ rand = numpy.random.randint
+ shapes = [ rand(2,5) for i in range(5) ]
+ a,b = self.create_bound_pair(zmq.PAIR, zmq.PAIR)
+ dtypes = [int, float, '>i4', 'B']
+ for i in range(1,len(shapes)+1):
+ shape = shapes[:i]
+ for dt in dtypes:
+ A = numpy.empty(shape, dtype=dt)
+ while numpy.isnan(A).any():
+ # don't let nan sneak in
+ A = numpy.ndarray(shape, dtype=dt)
+ a.send(A, copy=False)
+ msg = b.recv(copy=False)
+
+ B = numpy.frombuffer(msg, A.dtype).reshape(A.shape)
+ self.assertEqual(A.shape, B.shape)
+ self.assertTrue((A==B).all())
+ A = numpy.empty(shape, dtype=[('a', int), ('b', float), ('c', 'a32')])
+ A['a'] = 1024
+ A['b'] = 1e9
+ A['c'] = 'hello there'
+ a.send(A, copy=False)
+ msg = b.recv(copy=False)
+
+ B = numpy.frombuffer(msg, A.dtype).reshape(A.shape)
+ self.assertEqual(A.shape, B.shape)
+ self.assertTrue((A==B).all())
+
+ def test_frame_more(self):
+ """test Frame.more attribute"""
+ frame = zmq.Frame(b"hello")
+ self.assertFalse(frame.more)
+ sa,sb = self.create_bound_pair(zmq.PAIR, zmq.PAIR)
+ sa.send_multipart([b'hi', b'there'])
+ frame = self.recv(sb, copy=False)
+ self.assertTrue(frame.more)
+ if zmq.zmq_version_info()[0] >= 3 and not PYPY:
+ self.assertTrue(frame.get(zmq.MORE))
+ frame = self.recv(sb, copy=False)
+ self.assertFalse(frame.more)
+ if zmq.zmq_version_info()[0] >= 3 and not PYPY:
+ self.assertFalse(frame.get(zmq.MORE))
+
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/tests/test_monitor.py b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/tests/test_monitor.py
new file mode 100644
index 00000000..4f035388
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/tests/test_monitor.py
@@ -0,0 +1,71 @@
+# -*- coding: utf-8 -*-
+# Copyright (C) PyZMQ Developers
+# Distributed under the terms of the Modified BSD License.
+
+
+import sys
+import time
+import struct
+
+from unittest import TestCase
+
+import zmq
+from zmq.tests import BaseZMQTestCase, skip_if, skip_pypy
+from zmq.utils.monitor import recv_monitor_message
+
+skip_lt_4 = skip_if(zmq.zmq_version_info() < (4,), "requires zmq >= 4")
+
+class TestSocketMonitor(BaseZMQTestCase):
+
+ @skip_lt_4
+ def test_monitor(self):
+ """Test monitoring interface for sockets."""
+ s_rep = self.context.socket(zmq.REP)
+ s_req = self.context.socket(zmq.REQ)
+ self.sockets.extend([s_rep, s_req])
+ s_req.bind("tcp://127.0.0.1:6666")
+ # try monitoring the REP socket
+
+ s_rep.monitor("inproc://monitor.rep", zmq.EVENT_ALL)
+ # create listening socket for monitor
+ s_event = self.context.socket(zmq.PAIR)
+ self.sockets.append(s_event)
+ s_event.connect("inproc://monitor.rep")
+ s_event.linger = 0
+ # test receive event for connect event
+ s_rep.connect("tcp://127.0.0.1:6666")
+ m = recv_monitor_message(s_event)
+ if m['event'] == zmq.EVENT_CONNECT_DELAYED:
+ self.assertEqual(m['endpoint'], b"tcp://127.0.0.1:6666")
+ # test receive event for connected event
+ m = recv_monitor_message(s_event)
+ self.assertEqual(m['event'], zmq.EVENT_CONNECTED)
+ self.assertEqual(m['endpoint'], b"tcp://127.0.0.1:6666")
+
+ # test monitor can be disabled.
+ s_rep.disable_monitor()
+ m = recv_monitor_message(s_event)
+ self.assertEqual(m['event'], zmq.EVENT_MONITOR_STOPPED)
+
+
+ @skip_lt_4
+ def test_monitor_connected(self):
+ """Test connected monitoring socket."""
+ s_rep = self.context.socket(zmq.REP)
+ s_req = self.context.socket(zmq.REQ)
+ self.sockets.extend([s_rep, s_req])
+ s_req.bind("tcp://127.0.0.1:6667")
+ # try monitoring the REP socket
+ # create listening socket for monitor
+ s_event = s_rep.get_monitor_socket()
+ s_event.linger = 0
+ self.sockets.append(s_event)
+ # test receive event for connect event
+ s_rep.connect("tcp://127.0.0.1:6667")
+ m = recv_monitor_message(s_event)
+ if m['event'] == zmq.EVENT_CONNECT_DELAYED:
+ self.assertEqual(m['endpoint'], b"tcp://127.0.0.1:6667")
+ # test receive event for connected event
+ m = recv_monitor_message(s_event)
+ self.assertEqual(m['event'], zmq.EVENT_CONNECTED)
+ self.assertEqual(m['endpoint'], b"tcp://127.0.0.1:6667")
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/tests/test_monqueue.py b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/tests/test_monqueue.py
new file mode 100644
index 00000000..e855602e
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/tests/test_monqueue.py
@@ -0,0 +1,227 @@
+# Copyright (C) PyZMQ Developers
+# Distributed under the terms of the Modified BSD License.
+
+import time
+from unittest import TestCase
+
+import zmq
+from zmq import devices
+
+from zmq.tests import BaseZMQTestCase, SkipTest, PYPY
+from zmq.utils.strtypes import unicode
+
+
+if PYPY or zmq.zmq_version_info() >= (4,1):
+ # cleanup of shared Context doesn't work on PyPy
+ # there also seems to be a bug in cleanup in libzmq-4.1 (zeromq/libzmq#1052)
+ devices.Device.context_factory = zmq.Context
+
+
+class TestMonitoredQueue(BaseZMQTestCase):
+
+ sockets = []
+
+ def build_device(self, mon_sub=b"", in_prefix=b'in', out_prefix=b'out'):
+ self.device = devices.ThreadMonitoredQueue(zmq.PAIR, zmq.PAIR, zmq.PUB,
+ in_prefix, out_prefix)
+ alice = self.context.socket(zmq.PAIR)
+ bob = self.context.socket(zmq.PAIR)
+ mon = self.context.socket(zmq.SUB)
+
+ aport = alice.bind_to_random_port('tcp://127.0.0.1')
+ bport = bob.bind_to_random_port('tcp://127.0.0.1')
+ mport = mon.bind_to_random_port('tcp://127.0.0.1')
+ mon.setsockopt(zmq.SUBSCRIBE, mon_sub)
+
+ self.device.connect_in("tcp://127.0.0.1:%i"%aport)
+ self.device.connect_out("tcp://127.0.0.1:%i"%bport)
+ self.device.connect_mon("tcp://127.0.0.1:%i"%mport)
+ self.device.start()
+ time.sleep(.2)
+ try:
+ # this is currenlty necessary to ensure no dropped monitor messages
+ # see LIBZMQ-248 for more info
+ mon.recv_multipart(zmq.NOBLOCK)
+ except zmq.ZMQError:
+ pass
+ self.sockets.extend([alice, bob, mon])
+ return alice, bob, mon
+
+
+ def teardown_device(self):
+ for socket in self.sockets:
+ socket.close()
+ del socket
+ del self.device
+
+ def test_reply(self):
+ alice, bob, mon = self.build_device()
+ alices = b"hello bob".split()
+ alice.send_multipart(alices)
+ bobs = self.recv_multipart(bob)
+ self.assertEqual(alices, bobs)
+ bobs = b"hello alice".split()
+ bob.send_multipart(bobs)
+ alices = self.recv_multipart(alice)
+ self.assertEqual(alices, bobs)
+ self.teardown_device()
+
+ def test_queue(self):
+ alice, bob, mon = self.build_device()
+ alices = b"hello bob".split()
+ alice.send_multipart(alices)
+ alices2 = b"hello again".split()
+ alice.send_multipart(alices2)
+ alices3 = b"hello again and again".split()
+ alice.send_multipart(alices3)
+ bobs = self.recv_multipart(bob)
+ self.assertEqual(alices, bobs)
+ bobs = self.recv_multipart(bob)
+ self.assertEqual(alices2, bobs)
+ bobs = self.recv_multipart(bob)
+ self.assertEqual(alices3, bobs)
+ bobs = b"hello alice".split()
+ bob.send_multipart(bobs)
+ alices = self.recv_multipart(alice)
+ self.assertEqual(alices, bobs)
+ self.teardown_device()
+
+ def test_monitor(self):
+ alice, bob, mon = self.build_device()
+ alices = b"hello bob".split()
+ alice.send_multipart(alices)
+ alices2 = b"hello again".split()
+ alice.send_multipart(alices2)
+ alices3 = b"hello again and again".split()
+ alice.send_multipart(alices3)
+ bobs = self.recv_multipart(bob)
+ self.assertEqual(alices, bobs)
+ mons = self.recv_multipart(mon)
+ self.assertEqual([b'in']+bobs, mons)
+ bobs = self.recv_multipart(bob)
+ self.assertEqual(alices2, bobs)
+ bobs = self.recv_multipart(bob)
+ self.assertEqual(alices3, bobs)
+ mons = self.recv_multipart(mon)
+ self.assertEqual([b'in']+alices2, mons)
+ bobs = b"hello alice".split()
+ bob.send_multipart(bobs)
+ alices = self.recv_multipart(alice)
+ self.assertEqual(alices, bobs)
+ mons = self.recv_multipart(mon)
+ self.assertEqual([b'in']+alices3, mons)
+ mons = self.recv_multipart(mon)
+ self.assertEqual([b'out']+bobs, mons)
+ self.teardown_device()
+
+ def test_prefix(self):
+ alice, bob, mon = self.build_device(b"", b'foo', b'bar')
+ alices = b"hello bob".split()
+ alice.send_multipart(alices)
+ alices2 = b"hello again".split()
+ alice.send_multipart(alices2)
+ alices3 = b"hello again and again".split()
+ alice.send_multipart(alices3)
+ bobs = self.recv_multipart(bob)
+ self.assertEqual(alices, bobs)
+ mons = self.recv_multipart(mon)
+ self.assertEqual([b'foo']+bobs, mons)
+ bobs = self.recv_multipart(bob)
+ self.assertEqual(alices2, bobs)
+ bobs = self.recv_multipart(bob)
+ self.assertEqual(alices3, bobs)
+ mons = self.recv_multipart(mon)
+ self.assertEqual([b'foo']+alices2, mons)
+ bobs = b"hello alice".split()
+ bob.send_multipart(bobs)
+ alices = self.recv_multipart(alice)
+ self.assertEqual(alices, bobs)
+ mons = self.recv_multipart(mon)
+ self.assertEqual([b'foo']+alices3, mons)
+ mons = self.recv_multipart(mon)
+ self.assertEqual([b'bar']+bobs, mons)
+ self.teardown_device()
+
+ def test_monitor_subscribe(self):
+ alice, bob, mon = self.build_device(b"out")
+ alices = b"hello bob".split()
+ alice.send_multipart(alices)
+ alices2 = b"hello again".split()
+ alice.send_multipart(alices2)
+ alices3 = b"hello again and again".split()
+ alice.send_multipart(alices3)
+ bobs = self.recv_multipart(bob)
+ self.assertEqual(alices, bobs)
+ bobs = self.recv_multipart(bob)
+ self.assertEqual(alices2, bobs)
+ bobs = self.recv_multipart(bob)
+ self.assertEqual(alices3, bobs)
+ bobs = b"hello alice".split()
+ bob.send_multipart(bobs)
+ alices = self.recv_multipart(alice)
+ self.assertEqual(alices, bobs)
+ mons = self.recv_multipart(mon)
+ self.assertEqual([b'out']+bobs, mons)
+ self.teardown_device()
+
+ def test_router_router(self):
+ """test router-router MQ devices"""
+ dev = devices.ThreadMonitoredQueue(zmq.ROUTER, zmq.ROUTER, zmq.PUB, b'in', b'out')
+ self.device = dev
+ dev.setsockopt_in(zmq.LINGER, 0)
+ dev.setsockopt_out(zmq.LINGER, 0)
+ dev.setsockopt_mon(zmq.LINGER, 0)
+
+ binder = self.context.socket(zmq.DEALER)
+ porta = binder.bind_to_random_port('tcp://127.0.0.1')
+ portb = binder.bind_to_random_port('tcp://127.0.0.1')
+ binder.close()
+ time.sleep(0.1)
+ a = self.context.socket(zmq.DEALER)
+ a.identity = b'a'
+ b = self.context.socket(zmq.DEALER)
+ b.identity = b'b'
+ self.sockets.extend([a, b])
+
+ a.connect('tcp://127.0.0.1:%i'%porta)
+ dev.bind_in('tcp://127.0.0.1:%i'%porta)
+ b.connect('tcp://127.0.0.1:%i'%portb)
+ dev.bind_out('tcp://127.0.0.1:%i'%portb)
+ dev.start()
+ time.sleep(0.2)
+ if zmq.zmq_version_info() >= (3,1,0):
+ # flush erroneous poll state, due to LIBZMQ-280
+ ping_msg = [ b'ping', b'pong' ]
+ for s in (a,b):
+ s.send_multipart(ping_msg)
+ try:
+ s.recv(zmq.NOBLOCK)
+ except zmq.ZMQError:
+ pass
+ msg = [ b'hello', b'there' ]
+ a.send_multipart([b'b']+msg)
+ bmsg = self.recv_multipart(b)
+ self.assertEqual(bmsg, [b'a']+msg)
+ b.send_multipart(bmsg)
+ amsg = self.recv_multipart(a)
+ self.assertEqual(amsg, [b'b']+msg)
+ self.teardown_device()
+
+ def test_default_mq_args(self):
+ self.device = dev = devices.ThreadMonitoredQueue(zmq.ROUTER, zmq.DEALER, zmq.PUB)
+ dev.setsockopt_in(zmq.LINGER, 0)
+ dev.setsockopt_out(zmq.LINGER, 0)
+ dev.setsockopt_mon(zmq.LINGER, 0)
+ # this will raise if default args are wrong
+ dev.start()
+ self.teardown_device()
+
+ def test_mq_check_prefix(self):
+ ins = self.context.socket(zmq.ROUTER)
+ outs = self.context.socket(zmq.DEALER)
+ mons = self.context.socket(zmq.PUB)
+ self.sockets.extend([ins, outs, mons])
+
+ ins = unicode('in')
+ outs = unicode('out')
+ self.assertRaises(TypeError, devices.monitoredqueue, ins, outs, mons)
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/tests/test_multipart.py b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/tests/test_multipart.py
new file mode 100644
index 00000000..24d41be0
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/tests/test_multipart.py
@@ -0,0 +1,35 @@
+# Copyright (C) PyZMQ Developers
+# Distributed under the terms of the Modified BSD License.
+
+
+import zmq
+
+
+from zmq.tests import BaseZMQTestCase, SkipTest, have_gevent, GreenTest
+
+
+class TestMultipart(BaseZMQTestCase):
+
+ def test_router_dealer(self):
+ router, dealer = self.create_bound_pair(zmq.ROUTER, zmq.DEALER)
+
+ msg1 = b'message1'
+ dealer.send(msg1)
+ ident = self.recv(router)
+ more = router.rcvmore
+ self.assertEqual(more, True)
+ msg2 = self.recv(router)
+ self.assertEqual(msg1, msg2)
+ more = router.rcvmore
+ self.assertEqual(more, False)
+
+ def test_basic_multipart(self):
+ a,b = self.create_bound_pair(zmq.PAIR, zmq.PAIR)
+ msg = [ b'hi', b'there', b'b']
+ a.send_multipart(msg)
+ recvd = b.recv_multipart()
+ self.assertEqual(msg, recvd)
+
+if have_gevent:
+ class TestMultipartGreen(GreenTest, TestMultipart):
+ pass
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/tests/test_pair.py b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/tests/test_pair.py
new file mode 100644
index 00000000..e88c1e8b
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/tests/test_pair.py
@@ -0,0 +1,53 @@
+# Copyright (C) PyZMQ Developers
+# Distributed under the terms of the Modified BSD License.
+
+
+import zmq
+
+
+from zmq.tests import BaseZMQTestCase, have_gevent, GreenTest
+
+
+x = b' '
+class TestPair(BaseZMQTestCase):
+
+ def test_basic(self):
+ s1, s2 = self.create_bound_pair(zmq.PAIR, zmq.PAIR)
+
+ msg1 = b'message1'
+ msg2 = self.ping_pong(s1, s2, msg1)
+ self.assertEqual(msg1, msg2)
+
+ def test_multiple(self):
+ s1, s2 = self.create_bound_pair(zmq.PAIR, zmq.PAIR)
+
+ for i in range(10):
+ msg = i*x
+ s1.send(msg)
+
+ for i in range(10):
+ msg = i*x
+ s2.send(msg)
+
+ for i in range(10):
+ msg = s1.recv()
+ self.assertEqual(msg, i*x)
+
+ for i in range(10):
+ msg = s2.recv()
+ self.assertEqual(msg, i*x)
+
+ def test_json(self):
+ s1, s2 = self.create_bound_pair(zmq.PAIR, zmq.PAIR)
+ o = dict(a=10,b=list(range(10)))
+ o2 = self.ping_pong_json(s1, s2, o)
+
+ def test_pyobj(self):
+ s1, s2 = self.create_bound_pair(zmq.PAIR, zmq.PAIR)
+ o = dict(a=10,b=range(10))
+ o2 = self.ping_pong_pyobj(s1, s2, o)
+
+if have_gevent:
+ class TestReqRepGreen(GreenTest, TestPair):
+ pass
+
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/tests/test_poll.py b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/tests/test_poll.py
new file mode 100644
index 00000000..57346c89
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/tests/test_poll.py
@@ -0,0 +1,229 @@
+# Copyright (C) PyZMQ Developers
+# Distributed under the terms of the Modified BSD License.
+
+
+import time
+from unittest import TestCase
+
+import zmq
+
+from zmq.tests import PollZMQTestCase, have_gevent, GreenTest
+
+def wait():
+ time.sleep(.25)
+
+
+class TestPoll(PollZMQTestCase):
+
+ Poller = zmq.Poller
+
+ # This test is failing due to this issue:
+ # http://github.com/sustrik/zeromq2/issues#issue/26
+ def test_pair(self):
+ s1, s2 = self.create_bound_pair(zmq.PAIR, zmq.PAIR)
+
+ # Sleep to allow sockets to connect.
+ wait()
+
+ poller = self.Poller()
+ poller.register(s1, zmq.POLLIN|zmq.POLLOUT)
+ poller.register(s2, zmq.POLLIN|zmq.POLLOUT)
+ # Poll result should contain both sockets
+ socks = dict(poller.poll())
+ # Now make sure that both are send ready.
+ self.assertEqual(socks[s1], zmq.POLLOUT)
+ self.assertEqual(socks[s2], zmq.POLLOUT)
+ # Now do a send on both, wait and test for zmq.POLLOUT|zmq.POLLIN
+ s1.send(b'msg1')
+ s2.send(b'msg2')
+ wait()
+ socks = dict(poller.poll())
+ self.assertEqual(socks[s1], zmq.POLLOUT|zmq.POLLIN)
+ self.assertEqual(socks[s2], zmq.POLLOUT|zmq.POLLIN)
+ # Make sure that both are in POLLOUT after recv.
+ s1.recv()
+ s2.recv()
+ socks = dict(poller.poll())
+ self.assertEqual(socks[s1], zmq.POLLOUT)
+ self.assertEqual(socks[s2], zmq.POLLOUT)
+
+ poller.unregister(s1)
+ poller.unregister(s2)
+
+ # Wait for everything to finish.
+ wait()
+
+ def test_reqrep(self):
+ s1, s2 = self.create_bound_pair(zmq.REP, zmq.REQ)
+
+ # Sleep to allow sockets to connect.
+ wait()
+
+ poller = self.Poller()
+ poller.register(s1, zmq.POLLIN|zmq.POLLOUT)
+ poller.register(s2, zmq.POLLIN|zmq.POLLOUT)
+
+ # Make sure that s1 is in state 0 and s2 is in POLLOUT
+ socks = dict(poller.poll())
+ self.assertEqual(s1 in socks, 0)
+ self.assertEqual(socks[s2], zmq.POLLOUT)
+
+ # Make sure that s2 goes immediately into state 0 after send.
+ s2.send(b'msg1')
+ socks = dict(poller.poll())
+ self.assertEqual(s2 in socks, 0)
+
+ # Make sure that s1 goes into POLLIN state after a time.sleep().
+ time.sleep(0.5)
+ socks = dict(poller.poll())
+ self.assertEqual(socks[s1], zmq.POLLIN)
+
+ # Make sure that s1 goes into POLLOUT after recv.
+ s1.recv()
+ socks = dict(poller.poll())
+ self.assertEqual(socks[s1], zmq.POLLOUT)
+
+ # Make sure s1 goes into state 0 after send.
+ s1.send(b'msg2')
+ socks = dict(poller.poll())
+ self.assertEqual(s1 in socks, 0)
+
+ # Wait and then see that s2 is in POLLIN.
+ time.sleep(0.5)
+ socks = dict(poller.poll())
+ self.assertEqual(socks[s2], zmq.POLLIN)
+
+ # Make sure that s2 is in POLLOUT after recv.
+ s2.recv()
+ socks = dict(poller.poll())
+ self.assertEqual(socks[s2], zmq.POLLOUT)
+
+ poller.unregister(s1)
+ poller.unregister(s2)
+
+ # Wait for everything to finish.
+ wait()
+
+ def test_no_events(self):
+ s1, s2 = self.create_bound_pair(zmq.PAIR, zmq.PAIR)
+ poller = self.Poller()
+ poller.register(s1, zmq.POLLIN|zmq.POLLOUT)
+ poller.register(s2, 0)
+ self.assertTrue(s1 in poller)
+ self.assertFalse(s2 in poller)
+ poller.register(s1, 0)
+ self.assertFalse(s1 in poller)
+
+ def test_pubsub(self):
+ s1, s2 = self.create_bound_pair(zmq.PUB, zmq.SUB)
+ s2.setsockopt(zmq.SUBSCRIBE, b'')
+
+ # Sleep to allow sockets to connect.
+ wait()
+
+ poller = self.Poller()
+ poller.register(s1, zmq.POLLIN|zmq.POLLOUT)
+ poller.register(s2, zmq.POLLIN)
+
+ # Now make sure that both are send ready.
+ socks = dict(poller.poll())
+ self.assertEqual(socks[s1], zmq.POLLOUT)
+ self.assertEqual(s2 in socks, 0)
+ # Make sure that s1 stays in POLLOUT after a send.
+ s1.send(b'msg1')
+ socks = dict(poller.poll())
+ self.assertEqual(socks[s1], zmq.POLLOUT)
+
+ # Make sure that s2 is POLLIN after waiting.
+ wait()
+ socks = dict(poller.poll())
+ self.assertEqual(socks[s2], zmq.POLLIN)
+
+ # Make sure that s2 goes into 0 after recv.
+ s2.recv()
+ socks = dict(poller.poll())
+ self.assertEqual(s2 in socks, 0)
+
+ poller.unregister(s1)
+ poller.unregister(s2)
+
+ # Wait for everything to finish.
+ wait()
+ def test_timeout(self):
+ """make sure Poller.poll timeout has the right units (milliseconds)."""
+ s1, s2 = self.create_bound_pair(zmq.PAIR, zmq.PAIR)
+ poller = self.Poller()
+ poller.register(s1, zmq.POLLIN)
+ tic = time.time()
+ evt = poller.poll(.005)
+ toc = time.time()
+ self.assertTrue(toc-tic < 0.1)
+ tic = time.time()
+ evt = poller.poll(5)
+ toc = time.time()
+ self.assertTrue(toc-tic < 0.1)
+ self.assertTrue(toc-tic > .001)
+ tic = time.time()
+ evt = poller.poll(500)
+ toc = time.time()
+ self.assertTrue(toc-tic < 1)
+ self.assertTrue(toc-tic > 0.1)
+
+class TestSelect(PollZMQTestCase):
+
+ def test_pair(self):
+ s1, s2 = self.create_bound_pair(zmq.PAIR, zmq.PAIR)
+
+ # Sleep to allow sockets to connect.
+ wait()
+
+ rlist, wlist, xlist = zmq.select([s1, s2], [s1, s2], [s1, s2])
+ self.assert_(s1 in wlist)
+ self.assert_(s2 in wlist)
+ self.assert_(s1 not in rlist)
+ self.assert_(s2 not in rlist)
+
+ def test_timeout(self):
+ """make sure select timeout has the right units (seconds)."""
+ s1, s2 = self.create_bound_pair(zmq.PAIR, zmq.PAIR)
+ tic = time.time()
+ r,w,x = zmq.select([s1,s2],[],[],.005)
+ toc = time.time()
+ self.assertTrue(toc-tic < 1)
+ self.assertTrue(toc-tic > 0.001)
+ tic = time.time()
+ r,w,x = zmq.select([s1,s2],[],[],.25)
+ toc = time.time()
+ self.assertTrue(toc-tic < 1)
+ self.assertTrue(toc-tic > 0.1)
+
+
+if have_gevent:
+ import gevent
+ from zmq import green as gzmq
+
+ class TestPollGreen(GreenTest, TestPoll):
+ Poller = gzmq.Poller
+
+ def test_wakeup(self):
+ s1, s2 = self.create_bound_pair(zmq.PAIR, zmq.PAIR)
+ poller = self.Poller()
+ poller.register(s2, zmq.POLLIN)
+
+ tic = time.time()
+ r = gevent.spawn(lambda: poller.poll(10000))
+ s = gevent.spawn(lambda: s1.send(b'msg1'))
+ r.join()
+ toc = time.time()
+ self.assertTrue(toc-tic < 1)
+
+ def test_socket_poll(self):
+ s1, s2 = self.create_bound_pair(zmq.PAIR, zmq.PAIR)
+
+ tic = time.time()
+ r = gevent.spawn(lambda: s2.poll(10000))
+ s = gevent.spawn(lambda: s1.send(b'msg1'))
+ r.join()
+ toc = time.time()
+ self.assertTrue(toc-tic < 1)
+
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/tests/test_pubsub.py b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/tests/test_pubsub.py
new file mode 100644
index 00000000..a3ee22aa
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/tests/test_pubsub.py
@@ -0,0 +1,41 @@
+# Copyright (C) PyZMQ Developers
+# Distributed under the terms of the Modified BSD License.
+
+
+import time
+from unittest import TestCase
+
+import zmq
+
+from zmq.tests import BaseZMQTestCase, have_gevent, GreenTest
+
+
+class TestPubSub(BaseZMQTestCase):
+
+ pass
+
+ # We are disabling this test while an issue is being resolved.
+ def test_basic(self):
+ s1, s2 = self.create_bound_pair(zmq.PUB, zmq.SUB)
+ s2.setsockopt(zmq.SUBSCRIBE,b'')
+ time.sleep(0.1)
+ msg1 = b'message'
+ s1.send(msg1)
+ msg2 = s2.recv() # This is blocking!
+ self.assertEqual(msg1, msg2)
+
+ def test_topic(self):
+ s1, s2 = self.create_bound_pair(zmq.PUB, zmq.SUB)
+ s2.setsockopt(zmq.SUBSCRIBE, b'x')
+ time.sleep(0.1)
+ msg1 = b'message'
+ s1.send(msg1)
+ self.assertRaisesErrno(zmq.EAGAIN, s2.recv, zmq.NOBLOCK)
+ msg1 = b'xmessage'
+ s1.send(msg1)
+ msg2 = s2.recv()
+ self.assertEqual(msg1, msg2)
+
+if have_gevent:
+ class TestPubSubGreen(GreenTest, TestPubSub):
+ pass
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/tests/test_reqrep.py b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/tests/test_reqrep.py
new file mode 100644
index 00000000..de17f2b3
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/tests/test_reqrep.py
@@ -0,0 +1,62 @@
+# Copyright (C) PyZMQ Developers
+# Distributed under the terms of the Modified BSD License.
+
+
+from unittest import TestCase
+
+import zmq
+from zmq.tests import BaseZMQTestCase, have_gevent, GreenTest
+
+
+class TestReqRep(BaseZMQTestCase):
+
+ def test_basic(self):
+ s1, s2 = self.create_bound_pair(zmq.REQ, zmq.REP)
+
+ msg1 = b'message 1'
+ msg2 = self.ping_pong(s1, s2, msg1)
+ self.assertEqual(msg1, msg2)
+
+ def test_multiple(self):
+ s1, s2 = self.create_bound_pair(zmq.REQ, zmq.REP)
+
+ for i in range(10):
+ msg1 = i*b' '
+ msg2 = self.ping_pong(s1, s2, msg1)
+ self.assertEqual(msg1, msg2)
+
+ def test_bad_send_recv(self):
+ s1, s2 = self.create_bound_pair(zmq.REQ, zmq.REP)
+
+ if zmq.zmq_version() != '2.1.8':
+ # this doesn't work on 2.1.8
+ for copy in (True,False):
+ self.assertRaisesErrno(zmq.EFSM, s1.recv, copy=copy)
+ self.assertRaisesErrno(zmq.EFSM, s2.send, b'asdf', copy=copy)
+
+ # I have to have this or we die on an Abort trap.
+ msg1 = b'asdf'
+ msg2 = self.ping_pong(s1, s2, msg1)
+ self.assertEqual(msg1, msg2)
+
+ def test_json(self):
+ s1, s2 = self.create_bound_pair(zmq.REQ, zmq.REP)
+ o = dict(a=10,b=list(range(10)))
+ o2 = self.ping_pong_json(s1, s2, o)
+
+ def test_pyobj(self):
+ s1, s2 = self.create_bound_pair(zmq.REQ, zmq.REP)
+ o = dict(a=10,b=range(10))
+ o2 = self.ping_pong_pyobj(s1, s2, o)
+
+ def test_large_msg(self):
+ s1, s2 = self.create_bound_pair(zmq.REQ, zmq.REP)
+ msg1 = 10000*b'X'
+
+ for i in range(10):
+ msg2 = self.ping_pong(s1, s2, msg1)
+ self.assertEqual(msg1, msg2)
+
+if have_gevent:
+ class TestReqRepGreen(GreenTest, TestReqRep):
+ pass
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/tests/test_security.py b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/tests/test_security.py
new file mode 100644
index 00000000..687b7e0f
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/tests/test_security.py
@@ -0,0 +1,212 @@
+"""Test libzmq security (libzmq >= 3.3.0)"""
+# -*- coding: utf8 -*-
+
+# Copyright (C) PyZMQ Developers
+# Distributed under the terms of the Modified BSD License.
+
+import os
+from threading import Thread
+
+import zmq
+from zmq.tests import (
+ BaseZMQTestCase, SkipTest, PYPY
+)
+from zmq.utils import z85
+
+
+USER = b"admin"
+PASS = b"password"
+
+class TestSecurity(BaseZMQTestCase):
+
+ def setUp(self):
+ if zmq.zmq_version_info() < (4,0):
+ raise SkipTest("security is new in libzmq 4.0")
+ try:
+ zmq.curve_keypair()
+ except zmq.ZMQError:
+ raise SkipTest("security requires libzmq to be linked against libsodium")
+ super(TestSecurity, self).setUp()
+
+
+ def zap_handler(self):
+ socket = self.context.socket(zmq.REP)
+ socket.bind("inproc://zeromq.zap.01")
+ try:
+ msg = self.recv_multipart(socket)
+
+ version, sequence, domain, address, identity, mechanism = msg[:6]
+ if mechanism == b'PLAIN':
+ username, password = msg[6:]
+ elif mechanism == b'CURVE':
+ key = msg[6]
+
+ self.assertEqual(version, b"1.0")
+ self.assertEqual(identity, b"IDENT")
+ reply = [version, sequence]
+ if mechanism == b'CURVE' or \
+ (mechanism == b'PLAIN' and username == USER and password == PASS) or \
+ (mechanism == b'NULL'):
+ reply.extend([
+ b"200",
+ b"OK",
+ b"anonymous",
+ b"\5Hello\0\0\0\5World",
+ ])
+ else:
+ reply.extend([
+ b"400",
+ b"Invalid username or password",
+ b"",
+ b"",
+ ])
+ socket.send_multipart(reply)
+ finally:
+ socket.close()
+
+ def start_zap(self):
+ self.zap_thread = Thread(target=self.zap_handler)
+ self.zap_thread.start()
+
+ def stop_zap(self):
+ self.zap_thread.join()
+
+ def bounce(self, server, client, test_metadata=True):
+ msg = [os.urandom(64), os.urandom(64)]
+ client.send_multipart(msg)
+ frames = self.recv_multipart(server, copy=False)
+ recvd = list(map(lambda x: x.bytes, frames))
+
+ try:
+ if test_metadata and not PYPY:
+ for frame in frames:
+ self.assertEqual(frame.get('User-Id'), 'anonymous')
+ self.assertEqual(frame.get('Hello'), 'World')
+ self.assertEqual(frame['Socket-Type'], 'DEALER')
+ except zmq.ZMQVersionError:
+ pass
+
+ self.assertEqual(recvd, msg)
+ server.send_multipart(recvd)
+ msg2 = self.recv_multipart(client)
+ self.assertEqual(msg2, msg)
+
+ def test_null(self):
+ """test NULL (default) security"""
+ server = self.socket(zmq.DEALER)
+ client = self.socket(zmq.DEALER)
+ self.assertEqual(client.MECHANISM, zmq.NULL)
+ self.assertEqual(server.mechanism, zmq.NULL)
+ self.assertEqual(client.plain_server, 0)
+ self.assertEqual(server.plain_server, 0)
+ iface = 'tcp://127.0.0.1'
+ port = server.bind_to_random_port(iface)
+ client.connect("%s:%i" % (iface, port))
+ self.bounce(server, client, False)
+
+ def test_plain(self):
+ """test PLAIN authentication"""
+ server = self.socket(zmq.DEALER)
+ server.identity = b'IDENT'
+ client = self.socket(zmq.DEALER)
+ self.assertEqual(client.plain_username, b'')
+ self.assertEqual(client.plain_password, b'')
+ client.plain_username = USER
+ client.plain_password = PASS
+ self.assertEqual(client.getsockopt(zmq.PLAIN_USERNAME), USER)
+ self.assertEqual(client.getsockopt(zmq.PLAIN_PASSWORD), PASS)
+ self.assertEqual(client.plain_server, 0)
+ self.assertEqual(server.plain_server, 0)
+ server.plain_server = True
+ self.assertEqual(server.mechanism, zmq.PLAIN)
+ self.assertEqual(client.mechanism, zmq.PLAIN)
+
+ assert not client.plain_server
+ assert server.plain_server
+
+ self.start_zap()
+
+ iface = 'tcp://127.0.0.1'
+ port = server.bind_to_random_port(iface)
+ client.connect("%s:%i" % (iface, port))
+ self.bounce(server, client)
+ self.stop_zap()
+
+ def skip_plain_inauth(self):
+ """test PLAIN failed authentication"""
+ server = self.socket(zmq.DEALER)
+ server.identity = b'IDENT'
+ client = self.socket(zmq.DEALER)
+ self.sockets.extend([server, client])
+ client.plain_username = USER
+ client.plain_password = b'incorrect'
+ server.plain_server = True
+ self.assertEqual(server.mechanism, zmq.PLAIN)
+ self.assertEqual(client.mechanism, zmq.PLAIN)
+
+ self.start_zap()
+
+ iface = 'tcp://127.0.0.1'
+ port = server.bind_to_random_port(iface)
+ client.connect("%s:%i" % (iface, port))
+ client.send(b'ping')
+ server.rcvtimeo = 250
+ self.assertRaisesErrno(zmq.EAGAIN, server.recv)
+ self.stop_zap()
+
+ def test_keypair(self):
+ """test curve_keypair"""
+ try:
+ public, secret = zmq.curve_keypair()
+ except zmq.ZMQError:
+ raise SkipTest("CURVE unsupported")
+
+ self.assertEqual(type(secret), bytes)
+ self.assertEqual(type(public), bytes)
+ self.assertEqual(len(secret), 40)
+ self.assertEqual(len(public), 40)
+
+ # verify that it is indeed Z85
+ bsecret, bpublic = [ z85.decode(key) for key in (public, secret) ]
+ self.assertEqual(type(bsecret), bytes)
+ self.assertEqual(type(bpublic), bytes)
+ self.assertEqual(len(bsecret), 32)
+ self.assertEqual(len(bpublic), 32)
+
+
+ def test_curve(self):
+ """test CURVE encryption"""
+ server = self.socket(zmq.DEALER)
+ server.identity = b'IDENT'
+ client = self.socket(zmq.DEALER)
+ self.sockets.extend([server, client])
+ try:
+ server.curve_server = True
+ except zmq.ZMQError as e:
+ # will raise EINVAL if not linked against libsodium
+ if e.errno == zmq.EINVAL:
+ raise SkipTest("CURVE unsupported")
+
+ server_public, server_secret = zmq.curve_keypair()
+ client_public, client_secret = zmq.curve_keypair()
+
+ server.curve_secretkey = server_secret
+ server.curve_publickey = server_public
+ client.curve_serverkey = server_public
+ client.curve_publickey = client_public
+ client.curve_secretkey = client_secret
+
+ self.assertEqual(server.mechanism, zmq.CURVE)
+ self.assertEqual(client.mechanism, zmq.CURVE)
+
+ self.assertEqual(server.get(zmq.CURVE_SERVER), True)
+ self.assertEqual(client.get(zmq.CURVE_SERVER), False)
+
+ self.start_zap()
+
+ iface = 'tcp://127.0.0.1'
+ port = server.bind_to_random_port(iface)
+ client.connect("%s:%i" % (iface, port))
+ self.bounce(server, client)
+ self.stop_zap()
+
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/tests/test_socket.py b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/tests/test_socket.py
new file mode 100644
index 00000000..5c842edc
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/tests/test_socket.py
@@ -0,0 +1,450 @@
+# -*- coding: utf8 -*-
+# Copyright (C) PyZMQ Developers
+# Distributed under the terms of the Modified BSD License.
+
+import time
+import warnings
+
+import zmq
+from zmq.tests import (
+ BaseZMQTestCase, SkipTest, have_gevent, GreenTest, skip_pypy, skip_if
+)
+from zmq.utils.strtypes import bytes, unicode
+
+
+class TestSocket(BaseZMQTestCase):
+
+ def test_create(self):
+ ctx = self.Context()
+ s = ctx.socket(zmq.PUB)
+ # Superluminal protocol not yet implemented
+ self.assertRaisesErrno(zmq.EPROTONOSUPPORT, s.bind, 'ftl://a')
+ self.assertRaisesErrno(zmq.EPROTONOSUPPORT, s.connect, 'ftl://a')
+ self.assertRaisesErrno(zmq.EINVAL, s.bind, 'tcp://')
+ s.close()
+ del ctx
+
+ def test_context_manager(self):
+ url = 'inproc://a'
+ with self.Context() as ctx:
+ with ctx.socket(zmq.PUSH) as a:
+ a.bind(url)
+ with ctx.socket(zmq.PULL) as b:
+ b.connect(url)
+ msg = b'hi'
+ a.send(msg)
+ rcvd = self.recv(b)
+ self.assertEqual(rcvd, msg)
+ self.assertEqual(b.closed, True)
+ self.assertEqual(a.closed, True)
+ self.assertEqual(ctx.closed, True)
+
+ def test_dir(self):
+ ctx = self.Context()
+ s = ctx.socket(zmq.PUB)
+ self.assertTrue('send' in dir(s))
+ self.assertTrue('IDENTITY' in dir(s))
+ self.assertTrue('AFFINITY' in dir(s))
+ self.assertTrue('FD' in dir(s))
+ s.close()
+ ctx.term()
+
+ def test_bind_unicode(self):
+ s = self.socket(zmq.PUB)
+ p = s.bind_to_random_port(unicode("tcp://*"))
+
+ def test_connect_unicode(self):
+ s = self.socket(zmq.PUB)
+ s.connect(unicode("tcp://127.0.0.1:5555"))
+
+ def test_bind_to_random_port(self):
+ # Check that bind_to_random_port do not hide usefull exception
+ ctx = self.Context()
+ c = ctx.socket(zmq.PUB)
+ # Invalid format
+ try:
+ c.bind_to_random_port('tcp:*')
+ except zmq.ZMQError as e:
+ self.assertEqual(e.errno, zmq.EINVAL)
+ # Invalid protocol
+ try:
+ c.bind_to_random_port('rand://*')
+ except zmq.ZMQError as e:
+ self.assertEqual(e.errno, zmq.EPROTONOSUPPORT)
+
+ def test_identity(self):
+ s = self.context.socket(zmq.PULL)
+ self.sockets.append(s)
+ ident = b'identity\0\0'
+ s.identity = ident
+ self.assertEqual(s.get(zmq.IDENTITY), ident)
+
+ def test_unicode_sockopts(self):
+ """test setting/getting sockopts with unicode strings"""
+ topic = "tést"
+ if str is not unicode:
+ topic = topic.decode('utf8')
+ p,s = self.create_bound_pair(zmq.PUB, zmq.SUB)
+ self.assertEqual(s.send_unicode, s.send_unicode)
+ self.assertEqual(p.recv_unicode, p.recv_unicode)
+ self.assertRaises(TypeError, s.setsockopt, zmq.SUBSCRIBE, topic)
+ self.assertRaises(TypeError, s.setsockopt, zmq.IDENTITY, topic)
+ s.setsockopt_unicode(zmq.IDENTITY, topic, 'utf16')
+ self.assertRaises(TypeError, s.setsockopt, zmq.AFFINITY, topic)
+ s.setsockopt_unicode(zmq.SUBSCRIBE, topic)
+ self.assertRaises(TypeError, s.getsockopt_unicode, zmq.AFFINITY)
+ self.assertRaisesErrno(zmq.EINVAL, s.getsockopt_unicode, zmq.SUBSCRIBE)
+
+ identb = s.getsockopt(zmq.IDENTITY)
+ identu = identb.decode('utf16')
+ identu2 = s.getsockopt_unicode(zmq.IDENTITY, 'utf16')
+ self.assertEqual(identu, identu2)
+ time.sleep(0.1) # wait for connection/subscription
+ p.send_unicode(topic,zmq.SNDMORE)
+ p.send_unicode(topic*2, encoding='latin-1')
+ self.assertEqual(topic, s.recv_unicode())
+ self.assertEqual(topic*2, s.recv_unicode(encoding='latin-1'))
+
+ def test_int_sockopts(self):
+ "test integer sockopts"
+ v = zmq.zmq_version_info()
+ if v < (3,0):
+ default_hwm = 0
+ else:
+ default_hwm = 1000
+ p,s = self.create_bound_pair(zmq.PUB, zmq.SUB)
+ p.setsockopt(zmq.LINGER, 0)
+ self.assertEqual(p.getsockopt(zmq.LINGER), 0)
+ p.setsockopt(zmq.LINGER, -1)
+ self.assertEqual(p.getsockopt(zmq.LINGER), -1)
+ self.assertEqual(p.hwm, default_hwm)
+ p.hwm = 11
+ self.assertEqual(p.hwm, 11)
+ # p.setsockopt(zmq.EVENTS, zmq.POLLIN)
+ self.assertEqual(p.getsockopt(zmq.EVENTS), zmq.POLLOUT)
+ self.assertRaisesErrno(zmq.EINVAL, p.setsockopt,zmq.EVENTS, 2**7-1)
+ self.assertEqual(p.getsockopt(zmq.TYPE), p.socket_type)
+ self.assertEqual(p.getsockopt(zmq.TYPE), zmq.PUB)
+ self.assertEqual(s.getsockopt(zmq.TYPE), s.socket_type)
+ self.assertEqual(s.getsockopt(zmq.TYPE), zmq.SUB)
+
+ # check for overflow / wrong type:
+ errors = []
+ backref = {}
+ constants = zmq.constants
+ for name in constants.__all__:
+ value = getattr(constants, name)
+ if isinstance(value, int):
+ backref[value] = name
+ for opt in zmq.constants.int_sockopts.union(zmq.constants.int64_sockopts):
+ sopt = backref[opt]
+ if sopt.startswith((
+ 'ROUTER', 'XPUB', 'TCP', 'FAIL',
+ 'REQ_', 'CURVE_', 'PROBE_ROUTER',
+ 'IPC_FILTER', 'GSSAPI',
+ )):
+ # some sockopts are write-only
+ continue
+ try:
+ n = p.getsockopt(opt)
+ except zmq.ZMQError as e:
+ errors.append("getsockopt(zmq.%s) raised '%s'."%(sopt, e))
+ else:
+ if n > 2**31:
+ errors.append("getsockopt(zmq.%s) returned a ridiculous value."
+ " It is probably the wrong type."%sopt)
+ if errors:
+ self.fail('\n'.join([''] + errors))
+
+ def test_bad_sockopts(self):
+ """Test that appropriate errors are raised on bad socket options"""
+ s = self.context.socket(zmq.PUB)
+ self.sockets.append(s)
+ s.setsockopt(zmq.LINGER, 0)
+ # unrecognized int sockopts pass through to libzmq, and should raise EINVAL
+ self.assertRaisesErrno(zmq.EINVAL, s.setsockopt, 9999, 5)
+ self.assertRaisesErrno(zmq.EINVAL, s.getsockopt, 9999)
+ # but only int sockopts are allowed through this way, otherwise raise a TypeError
+ self.assertRaises(TypeError, s.setsockopt, 9999, b"5")
+ # some sockopts are valid in general, but not on every socket:
+ self.assertRaisesErrno(zmq.EINVAL, s.setsockopt, zmq.SUBSCRIBE, b'hi')
+
+ def test_sockopt_roundtrip(self):
+ "test set/getsockopt roundtrip."
+ p = self.context.socket(zmq.PUB)
+ self.sockets.append(p)
+ p.setsockopt(zmq.LINGER, 11)
+ self.assertEqual(p.getsockopt(zmq.LINGER), 11)
+
+ def test_send_unicode(self):
+ "test sending unicode objects"
+ a,b = self.create_bound_pair(zmq.PAIR, zmq.PAIR)
+ self.sockets.extend([a,b])
+ u = "çπ§"
+ if str is not unicode:
+ u = u.decode('utf8')
+ self.assertRaises(TypeError, a.send, u,copy=False)
+ self.assertRaises(TypeError, a.send, u,copy=True)
+ a.send_unicode(u)
+ s = b.recv()
+ self.assertEqual(s,u.encode('utf8'))
+ self.assertEqual(s.decode('utf8'),u)
+ a.send_unicode(u,encoding='utf16')
+ s = b.recv_unicode(encoding='utf16')
+ self.assertEqual(s,u)
+
+ @skip_pypy
+ def test_tracker(self):
+ "test the MessageTracker object for tracking when zmq is done with a buffer"
+ addr = 'tcp://127.0.0.1'
+ a = self.context.socket(zmq.PUB)
+ port = a.bind_to_random_port(addr)
+ a.close()
+ iface = "%s:%i"%(addr,port)
+ a = self.context.socket(zmq.PAIR)
+ # a.setsockopt(zmq.IDENTITY, b"a")
+ b = self.context.socket(zmq.PAIR)
+ self.sockets.extend([a,b])
+ a.connect(iface)
+ time.sleep(0.1)
+ p1 = a.send(b'something', copy=False, track=True)
+ self.assertTrue(isinstance(p1, zmq.MessageTracker))
+ self.assertFalse(p1.done)
+ p2 = a.send_multipart([b'something', b'else'], copy=False, track=True)
+ self.assert_(isinstance(p2, zmq.MessageTracker))
+ self.assertEqual(p2.done, False)
+ self.assertEqual(p1.done, False)
+
+ b.bind(iface)
+ msg = b.recv_multipart()
+ for i in range(10):
+ if p1.done:
+ break
+ time.sleep(0.1)
+ self.assertEqual(p1.done, True)
+ self.assertEqual(msg, [b'something'])
+ msg = b.recv_multipart()
+ for i in range(10):
+ if p2.done:
+ break
+ time.sleep(0.1)
+ self.assertEqual(p2.done, True)
+ self.assertEqual(msg, [b'something', b'else'])
+ m = zmq.Frame(b"again", track=True)
+ self.assertEqual(m.tracker.done, False)
+ p1 = a.send(m, copy=False)
+ p2 = a.send(m, copy=False)
+ self.assertEqual(m.tracker.done, False)
+ self.assertEqual(p1.done, False)
+ self.assertEqual(p2.done, False)
+ msg = b.recv_multipart()
+ self.assertEqual(m.tracker.done, False)
+ self.assertEqual(msg, [b'again'])
+ msg = b.recv_multipart()
+ self.assertEqual(m.tracker.done, False)
+ self.assertEqual(msg, [b'again'])
+ self.assertEqual(p1.done, False)
+ self.assertEqual(p2.done, False)
+ pm = m.tracker
+ del m
+ for i in range(10):
+ if p1.done:
+ break
+ time.sleep(0.1)
+ self.assertEqual(p1.done, True)
+ self.assertEqual(p2.done, True)
+ m = zmq.Frame(b'something', track=False)
+ self.assertRaises(ValueError, a.send, m, copy=False, track=True)
+
+
+ def test_close(self):
+ ctx = self.Context()
+ s = ctx.socket(zmq.PUB)
+ s.close()
+ self.assertRaisesErrno(zmq.ENOTSOCK, s.bind, b'')
+ self.assertRaisesErrno(zmq.ENOTSOCK, s.connect, b'')
+ self.assertRaisesErrno(zmq.ENOTSOCK, s.setsockopt, zmq.SUBSCRIBE, b'')
+ self.assertRaisesErrno(zmq.ENOTSOCK, s.send, b'asdf')
+ self.assertRaisesErrno(zmq.ENOTSOCK, s.recv)
+ del ctx
+
+ def test_attr(self):
+ """set setting/getting sockopts as attributes"""
+ s = self.context.socket(zmq.DEALER)
+ self.sockets.append(s)
+ linger = 10
+ s.linger = linger
+ self.assertEqual(linger, s.linger)
+ self.assertEqual(linger, s.getsockopt(zmq.LINGER))
+ self.assertEqual(s.fd, s.getsockopt(zmq.FD))
+
+ def test_bad_attr(self):
+ s = self.context.socket(zmq.DEALER)
+ self.sockets.append(s)
+ try:
+ s.apple='foo'
+ except AttributeError:
+ pass
+ else:
+ self.fail("bad setattr should have raised AttributeError")
+ try:
+ s.apple
+ except AttributeError:
+ pass
+ else:
+ self.fail("bad getattr should have raised AttributeError")
+
+ def test_subclass(self):
+ """subclasses can assign attributes"""
+ class S(zmq.Socket):
+ a = None
+ def __init__(self, *a, **kw):
+ self.a=-1
+ super(S, self).__init__(*a, **kw)
+
+ s = S(self.context, zmq.REP)
+ self.sockets.append(s)
+ self.assertEqual(s.a, -1)
+ s.a=1
+ self.assertEqual(s.a, 1)
+ a=s.a
+ self.assertEqual(a, 1)
+
+ def test_recv_multipart(self):
+ a,b = self.create_bound_pair()
+ msg = b'hi'
+ for i in range(3):
+ a.send(msg)
+ time.sleep(0.1)
+ for i in range(3):
+ self.assertEqual(b.recv_multipart(), [msg])
+
+ def test_close_after_destroy(self):
+ """s.close() after ctx.destroy() should be fine"""
+ ctx = self.Context()
+ s = ctx.socket(zmq.REP)
+ ctx.destroy()
+ # reaper is not instantaneous
+ time.sleep(1e-2)
+ s.close()
+ self.assertTrue(s.closed)
+
+ def test_poll(self):
+ a,b = self.create_bound_pair()
+ tic = time.time()
+ evt = a.poll(50)
+ self.assertEqual(evt, 0)
+ evt = a.poll(50, zmq.POLLOUT)
+ self.assertEqual(evt, zmq.POLLOUT)
+ msg = b'hi'
+ a.send(msg)
+ evt = b.poll(50)
+ self.assertEqual(evt, zmq.POLLIN)
+ msg2 = self.recv(b)
+ evt = b.poll(50)
+ self.assertEqual(evt, 0)
+ self.assertEqual(msg2, msg)
+
+ def test_ipc_path_max_length(self):
+ """IPC_PATH_MAX_LEN is a sensible value"""
+ if zmq.IPC_PATH_MAX_LEN == 0:
+ raise SkipTest("IPC_PATH_MAX_LEN undefined")
+
+ msg = "Surprising value for IPC_PATH_MAX_LEN: %s" % zmq.IPC_PATH_MAX_LEN
+ self.assertTrue(zmq.IPC_PATH_MAX_LEN > 30, msg)
+ self.assertTrue(zmq.IPC_PATH_MAX_LEN < 1025, msg)
+
+ def test_ipc_path_max_length_msg(self):
+ if zmq.IPC_PATH_MAX_LEN == 0:
+ raise SkipTest("IPC_PATH_MAX_LEN undefined")
+
+ s = self.context.socket(zmq.PUB)
+ self.sockets.append(s)
+ try:
+ s.bind('ipc://{0}'.format('a' * (zmq.IPC_PATH_MAX_LEN + 1)))
+ except zmq.ZMQError as e:
+ self.assertTrue(str(zmq.IPC_PATH_MAX_LEN) in e.strerror)
+
+ def test_hwm(self):
+ zmq3 = zmq.zmq_version_info()[0] >= 3
+ for stype in (zmq.PUB, zmq.ROUTER, zmq.SUB, zmq.REQ, zmq.DEALER):
+ s = self.context.socket(stype)
+ s.hwm = 100
+ self.assertEqual(s.hwm, 100)
+ if zmq3:
+ try:
+ self.assertEqual(s.sndhwm, 100)
+ except AttributeError:
+ pass
+ try:
+ self.assertEqual(s.rcvhwm, 100)
+ except AttributeError:
+ pass
+ s.close()
+
+ def test_shadow(self):
+ p = self.socket(zmq.PUSH)
+ p.bind("tcp://127.0.0.1:5555")
+ p2 = zmq.Socket.shadow(p.underlying)
+ self.assertEqual(p.underlying, p2.underlying)
+ s = self.socket(zmq.PULL)
+ s2 = zmq.Socket.shadow(s.underlying)
+ self.assertNotEqual(s.underlying, p.underlying)
+ self.assertEqual(s.underlying, s2.underlying)
+ s2.connect("tcp://127.0.0.1:5555")
+ sent = b'hi'
+ p2.send(sent)
+ rcvd = self.recv(s2)
+ self.assertEqual(rcvd, sent)
+
+ def test_shadow_pyczmq(self):
+ try:
+ from pyczmq import zctx, zsocket
+ except Exception:
+ raise SkipTest("Requires pyczmq")
+
+ ctx = zctx.new()
+ ca = zsocket.new(ctx, zmq.PUSH)
+ cb = zsocket.new(ctx, zmq.PULL)
+ a = zmq.Socket.shadow(ca)
+ b = zmq.Socket.shadow(cb)
+ a.bind("inproc://a")
+ b.connect("inproc://a")
+ a.send(b'hi')
+ rcvd = self.recv(b)
+ self.assertEqual(rcvd, b'hi')
+
+
+if have_gevent:
+ import gevent
+
+ class TestSocketGreen(GreenTest, TestSocket):
+ test_bad_attr = GreenTest.skip_green
+ test_close_after_destroy = GreenTest.skip_green
+
+ def test_timeout(self):
+ a,b = self.create_bound_pair()
+ g = gevent.spawn_later(0.5, lambda: a.send(b'hi'))
+ timeout = gevent.Timeout(0.1)
+ timeout.start()
+ self.assertRaises(gevent.Timeout, b.recv)
+ g.kill()
+
+ @skip_if(not hasattr(zmq, 'RCVTIMEO'))
+ def test_warn_set_timeo(self):
+ s = self.context.socket(zmq.REQ)
+ with warnings.catch_warnings(record=True) as w:
+ s.rcvtimeo = 5
+ s.close()
+ self.assertEqual(len(w), 1)
+ self.assertEqual(w[0].category, UserWarning)
+
+
+ @skip_if(not hasattr(zmq, 'SNDTIMEO'))
+ def test_warn_get_timeo(self):
+ s = self.context.socket(zmq.REQ)
+ with warnings.catch_warnings(record=True) as w:
+ s.sndtimeo
+ s.close()
+ self.assertEqual(len(w), 1)
+ self.assertEqual(w[0].category, UserWarning)
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/tests/test_stopwatch.py b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/tests/test_stopwatch.py
new file mode 100644
index 00000000..49fb79f2
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/tests/test_stopwatch.py
@@ -0,0 +1,42 @@
+# -*- coding: utf8 -*-
+# Copyright (C) PyZMQ Developers
+# Distributed under the terms of the Modified BSD License.
+
+
+import sys
+import time
+
+from unittest import TestCase
+
+from zmq import Stopwatch, ZMQError
+
+if sys.version_info[0] >= 3:
+ long = int
+
+class TestStopWatch(TestCase):
+
+ def test_stop_long(self):
+ """Ensure stop returns a long int."""
+ watch = Stopwatch()
+ watch.start()
+ us = watch.stop()
+ self.assertTrue(isinstance(us, long))
+
+ def test_stop_microseconds(self):
+ """Test that stop/sleep have right units."""
+ watch = Stopwatch()
+ watch.start()
+ tic = time.time()
+ watch.sleep(1)
+ us = watch.stop()
+ toc = time.time()
+ self.assertAlmostEqual(us/1e6,(toc-tic),places=0)
+
+ def test_double_stop(self):
+ """Test error raised on multiple calls to stop."""
+ watch = Stopwatch()
+ watch.start()
+ watch.stop()
+ self.assertRaises(ZMQError, watch.stop)
+ self.assertRaises(ZMQError, watch.stop)
+
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/tests/test_version.py b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/tests/test_version.py
new file mode 100644
index 00000000..6ebebf30
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/tests/test_version.py
@@ -0,0 +1,44 @@
+# Copyright (C) PyZMQ Developers
+# Distributed under the terms of the Modified BSD License.
+
+
+from unittest import TestCase
+import zmq
+from zmq.sugar import version
+
+
+class TestVersion(TestCase):
+
+ def test_pyzmq_version(self):
+ vs = zmq.pyzmq_version()
+ vs2 = zmq.__version__
+ self.assertTrue(isinstance(vs, str))
+ if zmq.__revision__:
+ self.assertEqual(vs, '@'.join(vs2, zmq.__revision__))
+ else:
+ self.assertEqual(vs, vs2)
+ if version.VERSION_EXTRA:
+ self.assertTrue(version.VERSION_EXTRA in vs)
+ self.assertTrue(version.VERSION_EXTRA in vs2)
+
+ def test_pyzmq_version_info(self):
+ info = zmq.pyzmq_version_info()
+ self.assertTrue(isinstance(info, tuple))
+ for n in info[:3]:
+ self.assertTrue(isinstance(n, int))
+ if version.VERSION_EXTRA:
+ self.assertEqual(len(info), 4)
+ self.assertEqual(info[-1], float('inf'))
+ else:
+ self.assertEqual(len(info), 3)
+
+ def test_zmq_version_info(self):
+ info = zmq.zmq_version_info()
+ self.assertTrue(isinstance(info, tuple))
+ for n in info[:3]:
+ self.assertTrue(isinstance(n, int))
+
+ def test_zmq_version(self):
+ v = zmq.zmq_version()
+ self.assertTrue(isinstance(v, str))
+
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/tests/test_win32_shim.py b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/tests/test_win32_shim.py
new file mode 100644
index 00000000..55657bda
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/tests/test_win32_shim.py
@@ -0,0 +1,56 @@
+from __future__ import print_function
+
+import os
+
+from functools import wraps
+from zmq.tests import BaseZMQTestCase
+from zmq.utils.win32 import allow_interrupt
+
+
+def count_calls(f):
+ @wraps(f)
+ def _(*args, **kwds):
+ try:
+ return f(*args, **kwds)
+ finally:
+ _.__calls__ += 1
+ _.__calls__ = 0
+ return _
+
+
+class TestWindowsConsoleControlHandler(BaseZMQTestCase):
+
+ def test_handler(self):
+ @count_calls
+ def interrupt_polling():
+ print('Caught CTRL-C!')
+
+ if os.name == 'nt':
+ from ctypes import windll
+ from ctypes.wintypes import BOOL, DWORD
+
+ kernel32 = windll.LoadLibrary('kernel32')
+
+ # <http://msdn.microsoft.com/en-us/library/ms683155.aspx>
+ GenerateConsoleCtrlEvent = kernel32.GenerateConsoleCtrlEvent
+ GenerateConsoleCtrlEvent.argtypes = (DWORD, DWORD)
+ GenerateConsoleCtrlEvent.restype = BOOL
+
+ try:
+ # Simulate CTRL-C event while handler is active.
+ with allow_interrupt(interrupt_polling):
+ result = GenerateConsoleCtrlEvent(0, 0)
+ if result == 0:
+ raise WindowsError
+ except KeyboardInterrupt:
+ pass
+ else:
+ self.fail('Expecting `KeyboardInterrupt` exception!')
+
+ # Make sure our handler was called.
+ self.assertEqual(interrupt_polling.__calls__, 1)
+ else:
+ # On non-Windows systems, this utility is just a no-op!
+ with allow_interrupt(interrupt_polling):
+ pass
+ self.assertEqual(interrupt_polling.__calls__, 0)
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/tests/test_z85.py b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/tests/test_z85.py
new file mode 100644
index 00000000..8a73cb4d
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/tests/test_z85.py
@@ -0,0 +1,63 @@
+# -*- coding: utf8 -*-
+"""Test Z85 encoding
+
+confirm values and roundtrip with test values from the reference implementation.
+"""
+
+# Copyright (C) PyZMQ Developers
+# Distributed under the terms of the Modified BSD License.
+
+from unittest import TestCase
+from zmq.utils import z85
+
+
+class TestZ85(TestCase):
+
+ def test_client_public(self):
+ client_public = \
+ b"\xBB\x88\x47\x1D\x65\xE2\x65\x9B" \
+ b"\x30\xC5\x5A\x53\x21\xCE\xBB\x5A" \
+ b"\xAB\x2B\x70\xA3\x98\x64\x5C\x26" \
+ b"\xDC\xA2\xB2\xFC\xB4\x3F\xC5\x18"
+ encoded = z85.encode(client_public)
+
+ self.assertEqual(encoded, b"Yne@$w-vo<fVvi]a<NY6T1ed:M$fCG*[IaLV{hID")
+ decoded = z85.decode(encoded)
+ self.assertEqual(decoded, client_public)
+
+ def test_client_secret(self):
+ client_secret = \
+ b"\x7B\xB8\x64\xB4\x89\xAF\xA3\x67" \
+ b"\x1F\xBE\x69\x10\x1F\x94\xB3\x89" \
+ b"\x72\xF2\x48\x16\xDF\xB0\x1B\x51" \
+ b"\x65\x6B\x3F\xEC\x8D\xFD\x08\x88"
+ encoded = z85.encode(client_secret)
+
+ self.assertEqual(encoded, b"D:)Q[IlAW!ahhC2ac:9*A}h:p?([4%wOTJ%JR%cs")
+ decoded = z85.decode(encoded)
+ self.assertEqual(decoded, client_secret)
+
+ def test_server_public(self):
+ server_public = \
+ b"\x54\xFC\xBA\x24\xE9\x32\x49\x96" \
+ b"\x93\x16\xFB\x61\x7C\x87\x2B\xB0" \
+ b"\xC1\xD1\xFF\x14\x80\x04\x27\xC5" \
+ b"\x94\xCB\xFA\xCF\x1B\xC2\xD6\x52"
+ encoded = z85.encode(server_public)
+
+ self.assertEqual(encoded, b"rq:rM>}U?@Lns47E1%kR.o@n%FcmmsL/@{H8]yf7")
+ decoded = z85.decode(encoded)
+ self.assertEqual(decoded, server_public)
+
+ def test_server_secret(self):
+ server_secret = \
+ b"\x8E\x0B\xDD\x69\x76\x28\xB9\x1D" \
+ b"\x8F\x24\x55\x87\xEE\x95\xC5\xB0" \
+ b"\x4D\x48\x96\x3F\x79\x25\x98\x77" \
+ b"\xB4\x9C\xD9\x06\x3A\xEA\xD3\xB7"
+ encoded = z85.encode(server_secret)
+
+ self.assertEqual(encoded, b"JTKVSB%%)wK0E.X)V>+}o?pNmC{O&4W4b!Ni{Lh6")
+ decoded = z85.decode(encoded)
+ self.assertEqual(decoded, server_secret)
+
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/tests/test_zmqstream.py b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/tests/test_zmqstream.py
new file mode 100644
index 00000000..cdb3a171
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/tests/test_zmqstream.py
@@ -0,0 +1,34 @@
+# -*- coding: utf8 -*-
+# Copyright (C) PyZMQ Developers
+# Distributed under the terms of the Modified BSD License.
+
+
+import sys
+import time
+
+from unittest import TestCase
+
+import zmq
+from zmq.eventloop import ioloop, zmqstream
+
+class TestZMQStream(TestCase):
+
+ def setUp(self):
+ self.context = zmq.Context()
+ self.socket = self.context.socket(zmq.REP)
+ self.loop = ioloop.IOLoop.instance()
+ self.stream = zmqstream.ZMQStream(self.socket)
+
+ def tearDown(self):
+ self.socket.close()
+ self.context.term()
+
+ def test_callable_check(self):
+ """Ensure callable check works (py3k)."""
+
+ self.stream.on_send(lambda *args: None)
+ self.stream.on_recv(lambda *args: None)
+ self.assertRaises(AssertionError, self.stream.on_recv, 1)
+ self.assertRaises(AssertionError, self.stream.on_send, 1)
+ self.assertRaises(AssertionError, self.stream.on_recv, zmq)
+
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/utils/__init__.py b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/utils/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/utils/__init__.py
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/utils/buffers.pxd b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/utils/buffers.pxd
new file mode 100644
index 00000000..998aa551
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/utils/buffers.pxd
@@ -0,0 +1,313 @@
+"""Python version-independent methods for C/Python buffers.
+
+This file was copied and adapted from mpi4py.
+
+Authors
+-------
+* MinRK
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2010 Lisandro Dalcin
+# All rights reserved.
+# Used under BSD License: http://www.opensource.org/licenses/bsd-license.php
+#
+# Retrieval:
+# Jul 23, 2010 18:00 PST (r539)
+# http://code.google.com/p/mpi4py/source/browse/trunk/src/MPI/asbuffer.pxi
+#
+# Modifications from original:
+# Copyright (c) 2010-2012 Brian Granger, Min Ragan-Kelley
+#
+# Distributed under the terms of the New BSD License. The full license is in
+# the file COPYING.BSD, distributed as part of this software.
+#-----------------------------------------------------------------------------
+
+
+#-----------------------------------------------------------------------------
+# Python includes.
+#-----------------------------------------------------------------------------
+
+# get version-independent aliases:
+cdef extern from "pyversion_compat.h":
+ pass
+
+# Python 3 buffer interface (PEP 3118)
+cdef extern from "Python.h":
+ int PY_MAJOR_VERSION
+ int PY_MINOR_VERSION
+ ctypedef int Py_ssize_t
+ ctypedef struct PyMemoryViewObject:
+ pass
+ ctypedef struct Py_buffer:
+ void *buf
+ Py_ssize_t len
+ int readonly
+ char *format
+ int ndim
+ Py_ssize_t *shape
+ Py_ssize_t *strides
+ Py_ssize_t *suboffsets
+ Py_ssize_t itemsize
+ void *internal
+ cdef enum:
+ PyBUF_SIMPLE
+ PyBUF_WRITABLE
+ PyBUF_FORMAT
+ PyBUF_ANY_CONTIGUOUS
+ int PyObject_CheckBuffer(object)
+ int PyObject_GetBuffer(object, Py_buffer *, int) except -1
+ void PyBuffer_Release(Py_buffer *)
+
+ int PyBuffer_FillInfo(Py_buffer *view, object obj, void *buf,
+ Py_ssize_t len, int readonly, int infoflags) except -1
+ object PyMemoryView_FromBuffer(Py_buffer *info)
+
+ object PyMemoryView_FromObject(object)
+
+# Python 2 buffer interface (legacy)
+cdef extern from "Python.h":
+ ctypedef void const_void "const void"
+ Py_ssize_t Py_END_OF_BUFFER
+ int PyObject_CheckReadBuffer(object)
+ int PyObject_AsReadBuffer (object, const_void **, Py_ssize_t *) except -1
+ int PyObject_AsWriteBuffer(object, void **, Py_ssize_t *) except -1
+
+ object PyBuffer_FromMemory(void *ptr, Py_ssize_t s)
+ object PyBuffer_FromReadWriteMemory(void *ptr, Py_ssize_t s)
+
+ object PyBuffer_FromObject(object, Py_ssize_t offset, Py_ssize_t size)
+ object PyBuffer_FromReadWriteObject(object, Py_ssize_t offset, Py_ssize_t size)
+
+
+#-----------------------------------------------------------------------------
+# asbuffer: C buffer from python object
+#-----------------------------------------------------------------------------
+
+
+cdef inline int memoryview_available():
+ return PY_MAJOR_VERSION >= 3 or (PY_MAJOR_VERSION >=2 and PY_MINOR_VERSION >= 7)
+
+cdef inline int oldstyle_available():
+ return PY_MAJOR_VERSION < 3
+
+
+cdef inline int check_buffer(object ob):
+ """Version independent check for whether an object is a buffer.
+
+ Parameters
+ ----------
+ object : object
+ Any Python object
+
+ Returns
+ -------
+ int : 0 if no buffer interface, 3 if newstyle buffer interface, 2 if oldstyle.
+ """
+ if PyObject_CheckBuffer(ob):
+ return 3
+ if oldstyle_available():
+ return PyObject_CheckReadBuffer(ob) and 2
+ return 0
+
+
+cdef inline object asbuffer(object ob, int writable, int format,
+ void **base, Py_ssize_t *size,
+ Py_ssize_t *itemsize):
+ """Turn an object into a C buffer in a Python version-independent way.
+
+ Parameters
+ ----------
+ ob : object
+ The object to be turned into a buffer.
+ Must provide a Python Buffer interface
+ writable : int
+ Whether the resulting buffer should be allowed to write
+ to the object.
+ format : int
+ The format of the buffer. See Python buffer docs.
+ base : void **
+ The pointer that will be used to store the resulting C buffer.
+ size : Py_ssize_t *
+ The size of the buffer(s).
+ itemsize : Py_ssize_t *
+ The size of an item, if the buffer is non-contiguous.
+
+ Returns
+ -------
+ An object describing the buffer format. Generally a str, such as 'B'.
+ """
+
+ cdef void *bptr = NULL
+ cdef Py_ssize_t blen = 0, bitemlen = 0
+ cdef Py_buffer view
+ cdef int flags = PyBUF_SIMPLE
+ cdef int mode = 0
+
+ bfmt = None
+
+ mode = check_buffer(ob)
+ if mode == 0:
+ raise TypeError("%r does not provide a buffer interface."%ob)
+
+ if mode == 3:
+ flags = PyBUF_ANY_CONTIGUOUS
+ if writable:
+ flags |= PyBUF_WRITABLE
+ if format:
+ flags |= PyBUF_FORMAT
+ PyObject_GetBuffer(ob, &view, flags)
+ bptr = view.buf
+ blen = view.len
+ if format:
+ if view.format != NULL:
+ bfmt = view.format
+ bitemlen = view.itemsize
+ PyBuffer_Release(&view)
+ else: # oldstyle
+ if writable:
+ PyObject_AsWriteBuffer(ob, &bptr, &blen)
+ else:
+ PyObject_AsReadBuffer(ob, <const_void **>&bptr, &blen)
+ if format:
+ try: # numpy.ndarray
+ dtype = ob.dtype
+ bfmt = dtype.char
+ bitemlen = dtype.itemsize
+ except AttributeError:
+ try: # array.array
+ bfmt = ob.typecode
+ bitemlen = ob.itemsize
+ except AttributeError:
+ if isinstance(ob, bytes):
+ bfmt = b"B"
+ bitemlen = 1
+ else:
+ # nothing found
+ bfmt = None
+ bitemlen = 0
+ if base: base[0] = <void *>bptr
+ if size: size[0] = <Py_ssize_t>blen
+ if itemsize: itemsize[0] = <Py_ssize_t>bitemlen
+
+ if PY_MAJOR_VERSION >= 3 and bfmt is not None:
+ return bfmt.decode('ascii')
+ return bfmt
+
+
+cdef inline object asbuffer_r(object ob, void **base, Py_ssize_t *size):
+ """Wrapper for standard calls to asbuffer with a readonly buffer."""
+ asbuffer(ob, 0, 0, base, size, NULL)
+ return ob
+
+
+cdef inline object asbuffer_w(object ob, void **base, Py_ssize_t *size):
+ """Wrapper for standard calls to asbuffer with a writable buffer."""
+ asbuffer(ob, 1, 0, base, size, NULL)
+ return ob
+
+#------------------------------------------------------------------------------
+# frombuffer: python buffer/view from C buffer
+#------------------------------------------------------------------------------
+
+
+cdef inline object frombuffer_3(void *ptr, Py_ssize_t s, int readonly):
+ """Python 3 version of frombuffer.
+
+ This is the Python 3 model, but will work on Python >= 2.6. Currently,
+ we use it only on >= 3.0.
+ """
+ cdef Py_buffer pybuf
+ cdef Py_ssize_t *shape = [s]
+ cdef str astr=""
+ PyBuffer_FillInfo(&pybuf, astr, ptr, s, readonly, PyBUF_SIMPLE)
+ pybuf.format = "B"
+ pybuf.shape = shape
+ return PyMemoryView_FromBuffer(&pybuf)
+
+
+cdef inline object frombuffer_2(void *ptr, Py_ssize_t s, int readonly):
+ """Python 2 version of frombuffer.
+
+ This must be used for Python <= 2.6, but we use it for all Python < 3.
+ """
+
+ if oldstyle_available():
+ if readonly:
+ return PyBuffer_FromMemory(ptr, s)
+ else:
+ return PyBuffer_FromReadWriteMemory(ptr, s)
+ else:
+ raise NotImplementedError("Old style buffers not available.")
+
+
+cdef inline object frombuffer(void *ptr, Py_ssize_t s, int readonly):
+ """Create a Python Buffer/View of a C array.
+
+ Parameters
+ ----------
+ ptr : void *
+ Pointer to the array to be copied.
+ s : size_t
+ Length of the buffer.
+ readonly : int
+ whether the resulting object should be allowed to write to the buffer.
+
+ Returns
+ -------
+ Python Buffer/View of the C buffer.
+ """
+ # oldstyle first priority for now
+ if oldstyle_available():
+ return frombuffer_2(ptr, s, readonly)
+ else:
+ return frombuffer_3(ptr, s, readonly)
+
+
+cdef inline object frombuffer_r(void *ptr, Py_ssize_t s):
+ """Wrapper for readonly view frombuffer."""
+ return frombuffer(ptr, s, 1)
+
+
+cdef inline object frombuffer_w(void *ptr, Py_ssize_t s):
+ """Wrapper for writable view frombuffer."""
+ return frombuffer(ptr, s, 0)
+
+#------------------------------------------------------------------------------
+# viewfromobject: python buffer/view from python object, refcounts intact
+# frombuffer(asbuffer(obj)) would lose track of refs
+#------------------------------------------------------------------------------
+
+cdef inline object viewfromobject(object obj, int readonly):
+ """Construct a Python Buffer/View object from another Python object.
+
+ This work in a Python version independent manner.
+
+ Parameters
+ ----------
+ obj : object
+ The input object to be cast as a buffer
+ readonly : int
+ Whether the result should be prevented from overwriting the original.
+
+ Returns
+ -------
+ Buffer/View of the original object.
+ """
+ if not memoryview_available():
+ if readonly:
+ return PyBuffer_FromObject(obj, 0, Py_END_OF_BUFFER)
+ else:
+ return PyBuffer_FromReadWriteObject(obj, 0, Py_END_OF_BUFFER)
+ else:
+ return PyMemoryView_FromObject(obj)
+
+
+cdef inline object viewfromobject_r(object obj):
+ """Wrapper for readonly viewfromobject."""
+ return viewfromobject(obj, 1)
+
+
+cdef inline object viewfromobject_w(object obj):
+ """Wrapper for writable viewfromobject."""
+ return viewfromobject(obj, 0)
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/utils/compiler.json b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/utils/compiler.json
new file mode 100644
index 00000000..d95952e3
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/utils/compiler.json
@@ -0,0 +1,19 @@
+{
+ "runtime_library_dirs": [
+ "$ORIGIN/.."
+ ],
+ "library_dirs": [
+ "zmq"
+ ],
+ "include_dirs": [
+ "/tmp/zmq/zmq-bin/include",
+ "zmq/utils",
+ "zmq/backend/cython",
+ "zmq/devices"
+ ],
+ "extra_link_args": [],
+ "libraries": [
+ "zmq"
+ ],
+ "define_macros": []
+} \ No newline at end of file
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/utils/config.json b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/utils/config.json
new file mode 100644
index 00000000..544b389d
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/utils/config.json
@@ -0,0 +1,13 @@
+{
+ "zmq_prefix": "/tmp/zmq/zmq-bin",
+ "build_ext": {
+ "library_dirs": "/tmp/zmq/zmq-bin/lib"
+ },
+ "skip_check_zmq": false,
+ "no_libzmq_extension": true,
+ "have_sys_un_h": false,
+ "bdist_egg": {
+ "plat-name": "linux-i686"
+ },
+ "libzmq_extension": false
+} \ No newline at end of file
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/utils/constant_names.py b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/utils/constant_names.py
new file mode 100644
index 00000000..47da9dc2
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/utils/constant_names.py
@@ -0,0 +1,365 @@
+"""0MQ Constant names"""
+
+# Copyright (C) PyZMQ Developers
+# Distributed under the terms of the Modified BSD License.
+
+# dictionaries of constants new or removed in particular versions
+
+new_in = {
+ (2,2,0) : [
+ 'RCVTIMEO',
+ 'SNDTIMEO',
+ ],
+ (3,2,2) : [
+ # errnos
+ 'EMSGSIZE',
+ 'EAFNOSUPPORT',
+ 'ENETUNREACH',
+ 'ECONNABORTED',
+ 'ECONNRESET',
+ 'ENOTCONN',
+ 'ETIMEDOUT',
+ 'EHOSTUNREACH',
+ 'ENETRESET',
+
+ # ctx opts
+ 'IO_THREADS',
+ 'MAX_SOCKETS',
+ 'IO_THREADS_DFLT',
+ 'MAX_SOCKETS_DFLT',
+
+ # socket opts
+ 'ROUTER_BEHAVIOR',
+ 'ROUTER_MANDATORY',
+ 'FAIL_UNROUTABLE',
+ 'TCP_KEEPALIVE',
+ 'TCP_KEEPALIVE_CNT',
+ 'TCP_KEEPALIVE_IDLE',
+ 'TCP_KEEPALIVE_INTVL',
+ 'DELAY_ATTACH_ON_CONNECT',
+ 'XPUB_VERBOSE',
+
+ # msg opts
+ 'MORE',
+
+ 'EVENT_CONNECTED',
+ 'EVENT_CONNECT_DELAYED',
+ 'EVENT_CONNECT_RETRIED',
+ 'EVENT_LISTENING',
+ 'EVENT_BIND_FAILED',
+ 'EVENT_ACCEPTED',
+ 'EVENT_ACCEPT_FAILED',
+ 'EVENT_CLOSED',
+ 'EVENT_CLOSE_FAILED',
+ 'EVENT_DISCONNECTED',
+ 'EVENT_ALL',
+ ],
+ (4,0,0) : [
+ # socket types
+ 'STREAM',
+
+ # socket opts
+ 'IMMEDIATE',
+ 'ROUTER_RAW',
+ 'IPV6',
+ 'MECHANISM',
+ 'PLAIN_SERVER',
+ 'PLAIN_USERNAME',
+ 'PLAIN_PASSWORD',
+ 'CURVE_SERVER',
+ 'CURVE_PUBLICKEY',
+ 'CURVE_SECRETKEY',
+ 'CURVE_SERVERKEY',
+ 'PROBE_ROUTER',
+ 'REQ_RELAXED',
+ 'REQ_CORRELATE',
+ 'CONFLATE',
+ 'ZAP_DOMAIN',
+
+ # security
+ 'NULL',
+ 'PLAIN',
+ 'CURVE',
+
+ # events
+ 'EVENT_MONITOR_STOPPED',
+ ],
+ (4,1,0) : [
+ # ctx opts
+ 'SOCKET_LIMIT',
+ 'THREAD_PRIORITY',
+ 'THREAD_PRIORITY_DFLT',
+ 'THREAD_SCHED_POLICY',
+ 'THREAD_SCHED_POLICY_DFLT',
+
+ # socket opts
+ 'ROUTER_HANDOVER',
+ 'TOS',
+ 'IPC_FILTER_PID',
+ 'IPC_FILTER_UID',
+ 'IPC_FILTER_GID',
+ 'CONNECT_RID',
+ 'GSSAPI_SERVER',
+ 'GSSAPI_PRINCIPAL',
+ 'GSSAPI_SERVICE_PRINCIPAL',
+ 'GSSAPI_PLAINTEXT',
+ 'HANDSHAKE_IVL',
+ 'IDENTITY_FD',
+ 'XPUB_NODROP',
+ 'SOCKS_PROXY',
+
+ # msg opts
+ 'SRCFD',
+ 'SHARED',
+
+ # security
+ 'GSSAPI',
+
+ ],
+}
+
+
+removed_in = {
+ (3,2,2) : [
+ 'UPSTREAM',
+ 'DOWNSTREAM',
+
+ 'HWM',
+ 'SWAP',
+ 'MCAST_LOOP',
+ 'RECOVERY_IVL_MSEC',
+ ]
+}
+
+# collections of zmq constant names based on their role
+# base names have no specific use
+# opt names are validated in get/set methods of various objects
+
+base_names = [
+ # base
+ 'VERSION',
+ 'VERSION_MAJOR',
+ 'VERSION_MINOR',
+ 'VERSION_PATCH',
+ 'NOBLOCK',
+ 'DONTWAIT',
+
+ 'POLLIN',
+ 'POLLOUT',
+ 'POLLERR',
+
+ 'SNDMORE',
+
+ 'STREAMER',
+ 'FORWARDER',
+ 'QUEUE',
+
+ 'IO_THREADS_DFLT',
+ 'MAX_SOCKETS_DFLT',
+ 'POLLITEMS_DFLT',
+ 'THREAD_PRIORITY_DFLT',
+ 'THREAD_SCHED_POLICY_DFLT',
+
+ # socktypes
+ 'PAIR',
+ 'PUB',
+ 'SUB',
+ 'REQ',
+ 'REP',
+ 'DEALER',
+ 'ROUTER',
+ 'XREQ',
+ 'XREP',
+ 'PULL',
+ 'PUSH',
+ 'XPUB',
+ 'XSUB',
+ 'UPSTREAM',
+ 'DOWNSTREAM',
+ 'STREAM',
+
+ # events
+ 'EVENT_CONNECTED',
+ 'EVENT_CONNECT_DELAYED',
+ 'EVENT_CONNECT_RETRIED',
+ 'EVENT_LISTENING',
+ 'EVENT_BIND_FAILED',
+ 'EVENT_ACCEPTED',
+ 'EVENT_ACCEPT_FAILED',
+ 'EVENT_CLOSED',
+ 'EVENT_CLOSE_FAILED',
+ 'EVENT_DISCONNECTED',
+ 'EVENT_ALL',
+ 'EVENT_MONITOR_STOPPED',
+
+ # security
+ 'NULL',
+ 'PLAIN',
+ 'CURVE',
+ 'GSSAPI',
+
+ ## ERRNO
+ # Often used (these are alse in errno.)
+ 'EAGAIN',
+ 'EINVAL',
+ 'EFAULT',
+ 'ENOMEM',
+ 'ENODEV',
+ 'EMSGSIZE',
+ 'EAFNOSUPPORT',
+ 'ENETUNREACH',
+ 'ECONNABORTED',
+ 'ECONNRESET',
+ 'ENOTCONN',
+ 'ETIMEDOUT',
+ 'EHOSTUNREACH',
+ 'ENETRESET',
+
+ # For Windows compatability
+ 'HAUSNUMERO',
+ 'ENOTSUP',
+ 'EPROTONOSUPPORT',
+ 'ENOBUFS',
+ 'ENETDOWN',
+ 'EADDRINUSE',
+ 'EADDRNOTAVAIL',
+ 'ECONNREFUSED',
+ 'EINPROGRESS',
+ 'ENOTSOCK',
+
+ # 0MQ Native
+ 'EFSM',
+ 'ENOCOMPATPROTO',
+ 'ETERM',
+ 'EMTHREAD',
+]
+
+int64_sockopt_names = [
+ 'AFFINITY',
+ 'MAXMSGSIZE',
+
+ # sockopts removed in 3.0.0
+ 'HWM',
+ 'SWAP',
+ 'MCAST_LOOP',
+ 'RECOVERY_IVL_MSEC',
+]
+
+bytes_sockopt_names = [
+ 'IDENTITY',
+ 'SUBSCRIBE',
+ 'UNSUBSCRIBE',
+ 'LAST_ENDPOINT',
+ 'TCP_ACCEPT_FILTER',
+
+ 'PLAIN_USERNAME',
+ 'PLAIN_PASSWORD',
+
+ 'CURVE_PUBLICKEY',
+ 'CURVE_SECRETKEY',
+ 'CURVE_SERVERKEY',
+ 'ZAP_DOMAIN',
+ 'CONNECT_RID',
+ 'GSSAPI_PRINCIPAL',
+ 'GSSAPI_SERVICE_PRINCIPAL',
+ 'SOCKS_PROXY',
+]
+
+fd_sockopt_names = [
+ 'FD',
+ 'IDENTITY_FD',
+]
+
+int_sockopt_names = [
+ # sockopts
+ 'RECONNECT_IVL_MAX',
+
+ # sockopts new in 2.2.0
+ 'SNDTIMEO',
+ 'RCVTIMEO',
+
+ # new in 3.x
+ 'SNDHWM',
+ 'RCVHWM',
+ 'MULTICAST_HOPS',
+ 'IPV4ONLY',
+
+ 'ROUTER_BEHAVIOR',
+ 'TCP_KEEPALIVE',
+ 'TCP_KEEPALIVE_CNT',
+ 'TCP_KEEPALIVE_IDLE',
+ 'TCP_KEEPALIVE_INTVL',
+ 'DELAY_ATTACH_ON_CONNECT',
+ 'XPUB_VERBOSE',
+
+ 'EVENTS',
+ 'TYPE',
+ 'LINGER',
+ 'RECONNECT_IVL',
+ 'BACKLOG',
+
+ 'ROUTER_MANDATORY',
+ 'FAIL_UNROUTABLE',
+
+ 'ROUTER_RAW',
+ 'IMMEDIATE',
+ 'IPV6',
+ 'MECHANISM',
+ 'PLAIN_SERVER',
+ 'CURVE_SERVER',
+ 'PROBE_ROUTER',
+ 'REQ_RELAXED',
+ 'REQ_CORRELATE',
+ 'CONFLATE',
+ 'ROUTER_HANDOVER',
+ 'TOS',
+ 'IPC_FILTER_PID',
+ 'IPC_FILTER_UID',
+ 'IPC_FILTER_GID',
+ 'GSSAPI_SERVER',
+ 'GSSAPI_PLAINTEXT',
+ 'HANDSHAKE_IVL',
+ 'XPUB_NODROP',
+]
+
+switched_sockopt_names = [
+ 'RATE',
+ 'RECOVERY_IVL',
+ 'SNDBUF',
+ 'RCVBUF',
+ 'RCVMORE',
+]
+
+ctx_opt_names = [
+ 'IO_THREADS',
+ 'MAX_SOCKETS',
+ 'SOCKET_LIMIT',
+ 'THREAD_PRIORITY',
+ 'THREAD_SCHED_POLICY',
+]
+
+msg_opt_names = [
+ 'MORE',
+ 'SRCFD',
+ 'SHARED',
+]
+
+from itertools import chain
+
+all_names = list(chain(
+ base_names,
+ ctx_opt_names,
+ bytes_sockopt_names,
+ fd_sockopt_names,
+ int_sockopt_names,
+ int64_sockopt_names,
+ switched_sockopt_names,
+ msg_opt_names,
+))
+
+del chain
+
+def no_prefix(name):
+ """does the given constant have a ZMQ_ prefix?"""
+ return name.startswith('E') and not name.startswith('EVENT')
+
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/utils/garbage.py b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/utils/garbage.py
new file mode 100644
index 00000000..80a8725a
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/utils/garbage.py
@@ -0,0 +1,180 @@
+"""Garbage collection thread for representing zmq refcount of Python objects
+used in zero-copy sends.
+"""
+
+# Copyright (C) PyZMQ Developers
+# Distributed under the terms of the Modified BSD License.
+
+
+import atexit
+import struct
+
+from os import getpid
+from collections import namedtuple
+from threading import Thread, Event, Lock
+import warnings
+
+import zmq
+
+
+gcref = namedtuple('gcref', ['obj', 'event'])
+
+class GarbageCollectorThread(Thread):
+ """Thread in which garbage collection actually happens."""
+ def __init__(self, gc):
+ super(GarbageCollectorThread, self).__init__()
+ self.gc = gc
+ self.daemon = True
+ self.pid = getpid()
+ self.ready = Event()
+
+ def run(self):
+ # detect fork at begining of the thread
+ if getpid is None or getpid() != self.pid:
+ self.ready.set()
+ return
+ try:
+ s = self.gc.context.socket(zmq.PULL)
+ s.linger = 0
+ s.bind(self.gc.url)
+ finally:
+ self.ready.set()
+
+ while True:
+ # detect fork
+ if getpid is None or getpid() != self.pid:
+ return
+ msg = s.recv()
+ if msg == b'DIE':
+ break
+ fmt = 'L' if len(msg) == 4 else 'Q'
+ key = struct.unpack(fmt, msg)[0]
+ tup = self.gc.refs.pop(key, None)
+ if tup and tup.event:
+ tup.event.set()
+ del tup
+ s.close()
+
+
+class GarbageCollector(object):
+ """PyZMQ Garbage Collector
+
+ Used for representing the reference held by libzmq during zero-copy sends.
+ This object holds a dictionary, keyed by Python id,
+ of the Python objects whose memory are currently in use by zeromq.
+
+ When zeromq is done with the memory, it sends a message on an inproc PUSH socket
+ containing the packed size_t (32 or 64-bit unsigned int),
+ which is the key in the dict.
+ When the PULL socket in the gc thread receives that message,
+ the reference is popped from the dict,
+ and any tracker events that should be signaled fire.
+ """
+
+ refs = None
+ _context = None
+ _lock = None
+ url = "inproc://pyzmq.gc.01"
+
+ def __init__(self, context=None):
+ super(GarbageCollector, self).__init__()
+ self.refs = {}
+ self.pid = None
+ self.thread = None
+ self._context = context
+ self._lock = Lock()
+ self._stay_down = False
+ atexit.register(self._atexit)
+
+ @property
+ def context(self):
+ if self._context is None:
+ self._context = zmq.Context()
+ return self._context
+
+ @context.setter
+ def context(self, ctx):
+ if self.is_alive():
+ if self.refs:
+ warnings.warn("Replacing gc context while gc is running", RuntimeWarning)
+ self.stop()
+ self._context = ctx
+
+ def _atexit(self):
+ """atexit callback
+
+ sets _stay_down flag so that gc doesn't try to start up again in other atexit handlers
+ """
+ self._stay_down = True
+ self.stop()
+
+ def stop(self):
+ """stop the garbage-collection thread"""
+ if not self.is_alive():
+ return
+ self._stop()
+
+ def _stop(self):
+ push = self.context.socket(zmq.PUSH)
+ push.connect(self.url)
+ push.send(b'DIE')
+ push.close()
+ self.thread.join()
+ self.context.term()
+ self.refs.clear()
+ self.context = None
+
+ def start(self):
+ """Start a new garbage collection thread.
+
+ Creates a new zmq Context used for garbage collection.
+ Under most circumstances, this will only be called once per process.
+ """
+ if self.thread is not None and self.pid != getpid():
+ # It's re-starting, must free earlier thread's context
+ # since a fork probably broke it
+ self._stop()
+ self.pid = getpid()
+ self.refs = {}
+ self.thread = GarbageCollectorThread(self)
+ self.thread.start()
+ self.thread.ready.wait()
+
+ def is_alive(self):
+ """Is the garbage collection thread currently running?
+
+ Includes checks for process shutdown or fork.
+ """
+ if (getpid is None or
+ getpid() != self.pid or
+ self.thread is None or
+ not self.thread.is_alive()
+ ):
+ return False
+ return True
+
+ def store(self, obj, event=None):
+ """store an object and (optionally) event for zero-copy"""
+ if not self.is_alive():
+ if self._stay_down:
+ return 0
+ # safely start the gc thread
+ # use lock and double check,
+ # so we don't start multiple threads
+ with self._lock:
+ if not self.is_alive():
+ self.start()
+ tup = gcref(obj, event)
+ theid = id(tup)
+ self.refs[theid] = tup
+ return theid
+
+ def __del__(self):
+ if not self.is_alive():
+ return
+ try:
+ self.stop()
+ except Exception as e:
+ raise (e)
+
+gc = GarbageCollector()
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/utils/getpid_compat.h b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/utils/getpid_compat.h
new file mode 100644
index 00000000..47ce90fa
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/utils/getpid_compat.h
@@ -0,0 +1,6 @@
+#ifdef _WIN32
+ #include <process.h>
+ #define getpid _getpid
+#else
+ #include <unistd.h>
+#endif
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/utils/interop.py b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/utils/interop.py
new file mode 100644
index 00000000..26c01969
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/utils/interop.py
@@ -0,0 +1,33 @@
+"""Utils for interoperability with other libraries.
+
+Just CFFI pointer casting for now.
+"""
+
+# Copyright (C) PyZMQ Developers
+# Distributed under the terms of the Modified BSD License.
+
+
+try:
+ long
+except NameError:
+ long = int # Python 3
+
+
+def cast_int_addr(n):
+ """Cast an address to a Python int
+
+ This could be a Python integer or a CFFI pointer
+ """
+ if isinstance(n, (int, long)):
+ return n
+ try:
+ import cffi
+ except ImportError:
+ pass
+ else:
+ # from pyzmq, this is an FFI void *
+ ffi = cffi.FFI()
+ if isinstance(n, ffi.CData):
+ return int(ffi.cast("size_t", n))
+
+ raise ValueError("Cannot cast %r to int" % n)
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/utils/ipcmaxlen.h b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/utils/ipcmaxlen.h
new file mode 100644
index 00000000..7218db78
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/utils/ipcmaxlen.h
@@ -0,0 +1,21 @@
+/*
+
+Platform-independant detection of IPC path max length
+
+Copyright (c) 2012 Godefroid Chapelle
+
+Distributed under the terms of the New BSD License. The full license is in
+the file COPYING.BSD, distributed as part of this software.
+ */
+
+#if defined(HAVE_SYS_UN_H)
+#include "sys/un.h"
+int get_ipc_path_max_len(void) {
+ struct sockaddr_un *dummy;
+ return sizeof(dummy->sun_path) - 1;
+}
+#else
+int get_ipc_path_max_len(void) {
+ return 0;
+}
+#endif
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/utils/jsonapi.py b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/utils/jsonapi.py
new file mode 100644
index 00000000..865ca6d5
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/utils/jsonapi.py
@@ -0,0 +1,59 @@
+"""Priority based json library imports.
+
+Always serializes to bytes instead of unicode for zeromq compatibility
+on Python 2 and 3.
+
+Use ``jsonapi.loads()`` and ``jsonapi.dumps()`` for guaranteed symmetry.
+
+Priority: ``simplejson`` > ``jsonlib2`` > stdlib ``json``
+
+``jsonapi.loads/dumps`` provide kwarg-compatibility with stdlib json.
+
+``jsonapi.jsonmod`` will be the module of the actual underlying implementation.
+"""
+
+# Copyright (C) PyZMQ Developers
+# Distributed under the terms of the Modified BSD License.
+
+from zmq.utils.strtypes import bytes, unicode
+
+jsonmod = None
+
+priority = ['simplejson', 'jsonlib2', 'json']
+for mod in priority:
+ try:
+ jsonmod = __import__(mod)
+ except ImportError:
+ pass
+ else:
+ break
+
+def dumps(o, **kwargs):
+ """Serialize object to JSON bytes (utf-8).
+
+ See jsonapi.jsonmod.dumps for details on kwargs.
+ """
+
+ if 'separators' not in kwargs:
+ kwargs['separators'] = (',', ':')
+
+ s = jsonmod.dumps(o, **kwargs)
+
+ if isinstance(s, unicode):
+ s = s.encode('utf8')
+
+ return s
+
+def loads(s, **kwargs):
+ """Load object from JSON bytes (utf-8).
+
+ See jsonapi.jsonmod.loads for details on kwargs.
+ """
+
+ if str is unicode and isinstance(s, bytes):
+ s = s.decode('utf8')
+
+ return jsonmod.loads(s, **kwargs)
+
+__all__ = ['jsonmod', 'dumps', 'loads']
+
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/utils/monitor.py b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/utils/monitor.py
new file mode 100644
index 00000000..734d54b1
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/utils/monitor.py
@@ -0,0 +1,68 @@
+# -*- coding: utf-8 -*-
+"""Module holding utility and convenience functions for zmq event monitoring."""
+
+# Copyright (C) PyZMQ Developers
+# Distributed under the terms of the Modified BSD License.
+
+import struct
+import zmq
+from zmq.error import _check_version
+
+def parse_monitor_message(msg):
+ """decode zmq_monitor event messages.
+
+ Parameters
+ ----------
+ msg : list(bytes)
+ zmq multipart message that has arrived on a monitor PAIR socket.
+
+ First frame is::
+
+ 16 bit event id
+ 32 bit event value
+ no padding
+
+ Second frame is the endpoint as a bytestring
+
+ Returns
+ -------
+ event : dict
+ event description as dict with the keys `event`, `value`, and `endpoint`.
+ """
+
+ if len(msg) != 2 or len(msg[0]) != 6:
+ raise RuntimeError("Invalid event message format: %s" % msg)
+ event = {}
+ event['event'], event['value'] = struct.unpack("=hi", msg[0])
+ event['endpoint'] = msg[1]
+ return event
+
+def recv_monitor_message(socket, flags=0):
+ """Receive and decode the given raw message from the monitoring socket and return a dict.
+
+ Requires libzmq ≥ 4.0
+
+ The returned dict will have the following entries:
+ event : int, the event id as described in libzmq.zmq_socket_monitor
+ value : int, the event value associated with the event, see libzmq.zmq_socket_monitor
+ endpoint : string, the affected endpoint
+
+ Parameters
+ ----------
+ socket : zmq PAIR socket
+ The PAIR socket (created by other.get_monitor_socket()) on which to recv the message
+ flags : bitfield (int)
+ standard zmq recv flags
+
+ Returns
+ -------
+ event : dict
+ event description as dict with the keys `event`, `value`, and `endpoint`.
+ """
+ _check_version((4,0), 'libzmq event API')
+ # will always return a list
+ msg = socket.recv_multipart(flags)
+ # 4.0-style event API
+ return parse_monitor_message(msg)
+
+__all__ = ['parse_monitor_message', 'recv_monitor_message']
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/utils/pyversion_compat.h b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/utils/pyversion_compat.h
new file mode 100644
index 00000000..fac09046
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/utils/pyversion_compat.h
@@ -0,0 +1,25 @@
+#include "Python.h"
+
+#if PY_VERSION_HEX < 0x02070000
+ #define PyMemoryView_FromBuffer(info) (PyErr_SetString(PyExc_NotImplementedError, \
+ "new buffer interface is not available"), (PyObject *)NULL)
+ #define PyMemoryView_FromObject(object) (PyErr_SetString(PyExc_NotImplementedError, \
+ "new buffer interface is not available"), (PyObject *)NULL)
+#endif
+
+#if PY_VERSION_HEX >= 0x03000000
+ // for buffers
+ #define Py_END_OF_BUFFER ((Py_ssize_t) 0)
+
+ #define PyObject_CheckReadBuffer(object) (0)
+
+ #define PyBuffer_FromMemory(ptr, s) (PyErr_SetString(PyExc_NotImplementedError, \
+ "old buffer interface is not available"), (PyObject *)NULL)
+ #define PyBuffer_FromReadWriteMemory(ptr, s) (PyErr_SetString(PyExc_NotImplementedError, \
+ "old buffer interface is not available"), (PyObject *)NULL)
+ #define PyBuffer_FromObject(object, offset, size) (PyErr_SetString(PyExc_NotImplementedError, \
+ "old buffer interface is not available"), (PyObject *)NULL)
+ #define PyBuffer_FromReadWriteObject(object, offset, size) (PyErr_SetString(PyExc_NotImplementedError, \
+ "old buffer interface is not available"), (PyObject *)NULL)
+
+#endif
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/utils/sixcerpt.py b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/utils/sixcerpt.py
new file mode 100644
index 00000000..5492fd59
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/utils/sixcerpt.py
@@ -0,0 +1,52 @@
+"""Excerpts of six.py"""
+
+# Copyright (C) 2010-2014 Benjamin Peterson
+#
+# Permission is hereby granted, free of charge, to any person obtaining a copy
+# of this software and associated documentation files (the "Software"), to deal
+# in the Software without restriction, including without limitation the rights
+# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+# copies of the Software, and to permit persons to whom the Software is
+# furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in all
+# copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+# SOFTWARE.
+
+import sys
+
+# Useful for very coarse version differentiation.
+PY2 = sys.version_info[0] == 2
+PY3 = sys.version_info[0] == 3
+
+if PY3:
+
+ def reraise(tp, value, tb=None):
+ if value.__traceback__ is not tb:
+ raise value.with_traceback(tb)
+ raise value
+
+else:
+ def exec_(_code_, _globs_=None, _locs_=None):
+ """Execute code in a namespace."""
+ if _globs_ is None:
+ frame = sys._getframe(1)
+ _globs_ = frame.f_globals
+ if _locs_ is None:
+ _locs_ = frame.f_locals
+ del frame
+ elif _locs_ is None:
+ _locs_ = _globs_
+ exec("""exec _code_ in _globs_, _locs_""")
+
+
+ exec_("""def reraise(tp, value, tb=None):
+ raise tp, value, tb
+""")
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/utils/strtypes.py b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/utils/strtypes.py
new file mode 100644
index 00000000..548410dc
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/utils/strtypes.py
@@ -0,0 +1,45 @@
+"""Declare basic string types unambiguously for various Python versions.
+
+Authors
+-------
+* MinRK
+"""
+
+# Copyright (C) PyZMQ Developers
+# Distributed under the terms of the Modified BSD License.
+
+import sys
+
+if sys.version_info[0] >= 3:
+ bytes = bytes
+ unicode = str
+ basestring = (bytes, unicode)
+else:
+ unicode = unicode
+ bytes = str
+ basestring = basestring
+
+def cast_bytes(s, encoding='utf8', errors='strict'):
+ """cast unicode or bytes to bytes"""
+ if isinstance(s, bytes):
+ return s
+ elif isinstance(s, unicode):
+ return s.encode(encoding, errors)
+ else:
+ raise TypeError("Expected unicode or bytes, got %r" % s)
+
+def cast_unicode(s, encoding='utf8', errors='strict'):
+ """cast bytes or unicode to unicode"""
+ if isinstance(s, bytes):
+ return s.decode(encoding, errors)
+ elif isinstance(s, unicode):
+ return s
+ else:
+ raise TypeError("Expected unicode or bytes, got %r" % s)
+
+# give short 'b' alias for cast_bytes, so that we can use fake b('stuff')
+# to simulate b'stuff'
+b = asbytes = cast_bytes
+u = cast_unicode
+
+__all__ = ['asbytes', 'bytes', 'unicode', 'basestring', 'b', 'u', 'cast_bytes', 'cast_unicode']
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/utils/win32.py b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/utils/win32.py
new file mode 100644
index 00000000..ea758299
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/utils/win32.py
@@ -0,0 +1,132 @@
+"""Win32 compatibility utilities."""
+
+#-----------------------------------------------------------------------------
+# Copyright (C) PyZMQ Developers
+# Distributed under the terms of the Modified BSD License.
+#-----------------------------------------------------------------------------
+
+import os
+
+# No-op implementation for other platforms.
+class _allow_interrupt(object):
+ """Utility for fixing CTRL-C events on Windows.
+
+ On Windows, the Python interpreter intercepts CTRL-C events in order to
+ translate them into ``KeyboardInterrupt`` exceptions. It (presumably)
+ does this by setting a flag in its "control control handler" and
+ checking it later at a convenient location in the interpreter.
+
+ However, when the Python interpreter is blocked waiting for the ZMQ
+ poll operation to complete, it must wait for ZMQ's ``select()``
+ operation to complete before translating the CTRL-C event into the
+ ``KeyboardInterrupt`` exception.
+
+ The only way to fix this seems to be to add our own "console control
+ handler" and perform some application-defined operation that will
+ unblock the ZMQ polling operation in order to force ZMQ to pass control
+ back to the Python interpreter.
+
+ This context manager performs all that Windows-y stuff, providing you
+ with a hook that is called when a CTRL-C event is intercepted. This
+ hook allows you to unblock your ZMQ poll operation immediately, which
+ will then result in the expected ``KeyboardInterrupt`` exception.
+
+ Without this context manager, your ZMQ-based application will not
+ respond normally to CTRL-C events on Windows. If a CTRL-C event occurs
+ while blocked on ZMQ socket polling, the translation to a
+ ``KeyboardInterrupt`` exception will be delayed until the I/O completes
+ and control returns to the Python interpreter (this may never happen if
+ you use an infinite timeout).
+
+ A no-op implementation is provided on non-Win32 systems to avoid the
+ application from having to conditionally use it.
+
+ Example usage:
+
+ .. sourcecode:: python
+
+ def stop_my_application():
+ # ...
+
+ with allow_interrupt(stop_my_application):
+ # main polling loop.
+
+ In a typical ZMQ application, you would use the "self pipe trick" to
+ send message to a ``PAIR`` socket in order to interrupt your blocking
+ socket polling operation.
+
+ In a Tornado event loop, you can use the ``IOLoop.stop`` method to
+ unblock your I/O loop.
+ """
+
+ def __init__(self, action=None):
+ """Translate ``action`` into a CTRL-C handler.
+
+ ``action`` is a callable that takes no arguments and returns no
+ value (returned value is ignored). It must *NEVER* raise an
+ exception.
+
+ If unspecified, a no-op will be used.
+ """
+ self._init_action(action)
+
+ def _init_action(self, action):
+ pass
+
+ def __enter__(self):
+ return self
+
+ def __exit__(self, *args):
+ return
+
+if os.name == 'nt':
+ from ctypes import WINFUNCTYPE, windll
+ from ctypes.wintypes import BOOL, DWORD
+
+ kernel32 = windll.LoadLibrary('kernel32')
+
+ # <http://msdn.microsoft.com/en-us/library/ms686016.aspx>
+ PHANDLER_ROUTINE = WINFUNCTYPE(BOOL, DWORD)
+ SetConsoleCtrlHandler = kernel32.SetConsoleCtrlHandler
+ SetConsoleCtrlHandler.argtypes = (PHANDLER_ROUTINE, BOOL)
+ SetConsoleCtrlHandler.restype = BOOL
+
+ class allow_interrupt(_allow_interrupt):
+ __doc__ = _allow_interrupt.__doc__
+
+ def _init_action(self, action):
+ if action is None:
+ action = lambda: None
+ self.action = action
+ @PHANDLER_ROUTINE
+ def handle(event):
+ if event == 0: # CTRL_C_EVENT
+ action()
+ # Typical C implementations would return 1 to indicate that
+ # the event was processed and other control handlers in the
+ # stack should not be executed. However, that would
+ # prevent the Python interpreter's handler from translating
+ # CTRL-C to a `KeyboardInterrupt` exception, so we pretend
+ # that we didn't handle it.
+ return 0
+ self.handle = handle
+
+ def __enter__(self):
+ """Install the custom CTRL-C handler."""
+ result = SetConsoleCtrlHandler(self.handle, 1)
+ if result == 0:
+ # Have standard library automatically call `GetLastError()` and
+ # `FormatMessage()` into a nice exception object :-)
+ raise WindowsError()
+
+ def __exit__(self, *args):
+ """Remove the custom CTRL-C handler."""
+ result = SetConsoleCtrlHandler(self.handle, 0)
+ if result == 0:
+ # Have standard library automatically call `GetLastError()` and
+ # `FormatMessage()` into a nice exception object :-)
+ raise WindowsError()
+else:
+ class allow_interrupt(_allow_interrupt):
+ __doc__ = _allow_interrupt.__doc__
+ pass
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/utils/z85.py b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/utils/z85.py
new file mode 100644
index 00000000..1bb1784e
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/utils/z85.py
@@ -0,0 +1,56 @@
+"""Python implementation of Z85 85-bit encoding
+
+Z85 encoding is a plaintext encoding for a bytestring interpreted as 32bit integers.
+Since the chunks are 32bit, a bytestring must be a multiple of 4 bytes.
+See ZMQ RFC 32 for details.
+
+
+"""
+
+# Copyright (C) PyZMQ Developers
+# Distributed under the terms of the Modified BSD License.
+
+import sys
+import struct
+
+PY3 = sys.version_info[0] >= 3
+# Z85CHARS is the base 85 symbol table
+Z85CHARS = b"0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ.-:+=^!/*?&<>()[]{}@%$#"
+# Z85MAP maps integers in [0,84] to the appropriate character in Z85CHARS
+Z85MAP = dict([(c, idx) for idx, c in enumerate(Z85CHARS)])
+
+_85s = [ 85**i for i in range(5) ][::-1]
+
+def encode(rawbytes):
+ """encode raw bytes into Z85"""
+ # Accepts only byte arrays bounded to 4 bytes
+ if len(rawbytes) % 4:
+ raise ValueError("length must be multiple of 4, not %i" % len(rawbytes))
+
+ nvalues = len(rawbytes) / 4
+
+ values = struct.unpack('>%dI' % nvalues, rawbytes)
+ encoded = []
+ for v in values:
+ for offset in _85s:
+ encoded.append(Z85CHARS[(v // offset) % 85])
+
+ # In Python 3, encoded is a list of integers (obviously?!)
+ if PY3:
+ return bytes(encoded)
+ else:
+ return b''.join(encoded)
+
+def decode(z85bytes):
+ """decode Z85 bytes to raw bytes"""
+ if len(z85bytes) % 5:
+ raise ValueError("Z85 length must be multiple of 5, not %i" % len(z85bytes))
+
+ nvalues = len(z85bytes) / 5
+ values = []
+ for i in range(0, len(z85bytes), 5):
+ value = 0
+ for j, offset in enumerate(_85s):
+ value += Z85MAP[z85bytes[i+j]] * offset
+ values.append(value)
+ return struct.pack('>%dI' % nvalues, *values)
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/utils/zmq_compat.h b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/utils/zmq_compat.h
new file mode 100644
index 00000000..81c57b69
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/utils/zmq_compat.h
@@ -0,0 +1,80 @@
+//-----------------------------------------------------------------------------
+// Copyright (c) 2010 Brian Granger, Min Ragan-Kelley
+//
+// Distributed under the terms of the New BSD License. The full license is in
+// the file COPYING.BSD, distributed as part of this software.
+//-----------------------------------------------------------------------------
+
+#if defined(_MSC_VER)
+#define pyzmq_int64_t __int64
+#else
+#include <stdint.h>
+#define pyzmq_int64_t int64_t
+#endif
+
+
+#include "zmq.h"
+// version compatibility for constants:
+#include "zmq_constants.h"
+
+#define _missing (-1)
+
+
+// define fd type (from libzmq's fd.hpp)
+#ifdef _WIN32
+ #ifdef _MSC_VER && _MSC_VER <= 1400
+ #define ZMQ_FD_T UINT_PTR
+ #else
+ #define ZMQ_FD_T SOCKET
+ #endif
+#else
+ #define ZMQ_FD_T int
+#endif
+
+// use unambiguous aliases for zmq_send/recv functions
+
+#if ZMQ_VERSION_MAJOR >= 4
+// nothing to remove
+#else
+ #define zmq_curve_keypair(z85_public_key, z85_secret_key) _missing
+#endif
+
+#if ZMQ_VERSION_MAJOR >= 4 && ZMQ_VERSION_MINOR >= 1
+// nothing to remove
+#else
+ #define zmq_msg_gets(msg, prop) _missing
+ #define zmq_has(capability) _missing
+#endif
+
+#if ZMQ_VERSION_MAJOR >= 3
+ #define zmq_sendbuf zmq_send
+ #define zmq_recvbuf zmq_recv
+
+ // 3.x deprecations - these symbols haven't been removed,
+ // but let's protect against their planned removal
+ #define zmq_device(device_type, isocket, osocket) _missing
+ #define zmq_init(io_threads) ((void*)NULL)
+ #define zmq_term zmq_ctx_destroy
+#else
+ #define zmq_ctx_set(ctx, opt, val) _missing
+ #define zmq_ctx_get(ctx, opt) _missing
+ #define zmq_ctx_destroy zmq_term
+ #define zmq_ctx_new() ((void*)NULL)
+
+ #define zmq_proxy(a,b,c) _missing
+
+ #define zmq_disconnect(s, addr) _missing
+ #define zmq_unbind(s, addr) _missing
+
+ #define zmq_msg_more(msg) _missing
+ #define zmq_msg_get(msg, opt) _missing
+ #define zmq_msg_set(msg, opt, val) _missing
+ #define zmq_msg_send(msg, s, flags) zmq_send(s, msg, flags)
+ #define zmq_msg_recv(msg, s, flags) zmq_recv(s, msg, flags)
+
+ #define zmq_sendbuf(s, buf, len, flags) _missing
+ #define zmq_recvbuf(s, buf, len, flags) _missing
+
+ #define zmq_socket_monitor(s, addr, flags) _missing
+
+#endif
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/utils/zmq_constants.h b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/utils/zmq_constants.h
new file mode 100644
index 00000000..97683022
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/utils/zmq_constants.h
@@ -0,0 +1,622 @@
+#ifndef _PYZMQ_CONSTANT_DEFS
+#define _PYZMQ_CONSTANT_DEFS
+
+#define _PYZMQ_UNDEFINED (-9999)
+#ifndef ZMQ_VERSION
+ #define ZMQ_VERSION (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_VERSION_MAJOR
+ #define ZMQ_VERSION_MAJOR (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_VERSION_MINOR
+ #define ZMQ_VERSION_MINOR (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_VERSION_PATCH
+ #define ZMQ_VERSION_PATCH (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_NOBLOCK
+ #define ZMQ_NOBLOCK (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_DONTWAIT
+ #define ZMQ_DONTWAIT (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_POLLIN
+ #define ZMQ_POLLIN (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_POLLOUT
+ #define ZMQ_POLLOUT (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_POLLERR
+ #define ZMQ_POLLERR (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_SNDMORE
+ #define ZMQ_SNDMORE (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_STREAMER
+ #define ZMQ_STREAMER (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_FORWARDER
+ #define ZMQ_FORWARDER (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_QUEUE
+ #define ZMQ_QUEUE (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_IO_THREADS_DFLT
+ #define ZMQ_IO_THREADS_DFLT (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_MAX_SOCKETS_DFLT
+ #define ZMQ_MAX_SOCKETS_DFLT (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_POLLITEMS_DFLT
+ #define ZMQ_POLLITEMS_DFLT (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_THREAD_PRIORITY_DFLT
+ #define ZMQ_THREAD_PRIORITY_DFLT (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_THREAD_SCHED_POLICY_DFLT
+ #define ZMQ_THREAD_SCHED_POLICY_DFLT (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_PAIR
+ #define ZMQ_PAIR (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_PUB
+ #define ZMQ_PUB (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_SUB
+ #define ZMQ_SUB (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_REQ
+ #define ZMQ_REQ (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_REP
+ #define ZMQ_REP (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_DEALER
+ #define ZMQ_DEALER (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_ROUTER
+ #define ZMQ_ROUTER (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_XREQ
+ #define ZMQ_XREQ (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_XREP
+ #define ZMQ_XREP (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_PULL
+ #define ZMQ_PULL (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_PUSH
+ #define ZMQ_PUSH (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_XPUB
+ #define ZMQ_XPUB (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_XSUB
+ #define ZMQ_XSUB (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_UPSTREAM
+ #define ZMQ_UPSTREAM (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_DOWNSTREAM
+ #define ZMQ_DOWNSTREAM (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_STREAM
+ #define ZMQ_STREAM (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_EVENT_CONNECTED
+ #define ZMQ_EVENT_CONNECTED (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_EVENT_CONNECT_DELAYED
+ #define ZMQ_EVENT_CONNECT_DELAYED (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_EVENT_CONNECT_RETRIED
+ #define ZMQ_EVENT_CONNECT_RETRIED (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_EVENT_LISTENING
+ #define ZMQ_EVENT_LISTENING (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_EVENT_BIND_FAILED
+ #define ZMQ_EVENT_BIND_FAILED (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_EVENT_ACCEPTED
+ #define ZMQ_EVENT_ACCEPTED (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_EVENT_ACCEPT_FAILED
+ #define ZMQ_EVENT_ACCEPT_FAILED (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_EVENT_CLOSED
+ #define ZMQ_EVENT_CLOSED (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_EVENT_CLOSE_FAILED
+ #define ZMQ_EVENT_CLOSE_FAILED (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_EVENT_DISCONNECTED
+ #define ZMQ_EVENT_DISCONNECTED (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_EVENT_ALL
+ #define ZMQ_EVENT_ALL (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_EVENT_MONITOR_STOPPED
+ #define ZMQ_EVENT_MONITOR_STOPPED (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_NULL
+ #define ZMQ_NULL (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_PLAIN
+ #define ZMQ_PLAIN (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_CURVE
+ #define ZMQ_CURVE (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_GSSAPI
+ #define ZMQ_GSSAPI (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef EAGAIN
+ #define EAGAIN (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef EINVAL
+ #define EINVAL (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef EFAULT
+ #define EFAULT (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ENOMEM
+ #define ENOMEM (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ENODEV
+ #define ENODEV (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef EMSGSIZE
+ #define EMSGSIZE (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef EAFNOSUPPORT
+ #define EAFNOSUPPORT (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ENETUNREACH
+ #define ENETUNREACH (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ECONNABORTED
+ #define ECONNABORTED (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ECONNRESET
+ #define ECONNRESET (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ENOTCONN
+ #define ENOTCONN (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ETIMEDOUT
+ #define ETIMEDOUT (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef EHOSTUNREACH
+ #define EHOSTUNREACH (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ENETRESET
+ #define ENETRESET (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_HAUSNUMERO
+ #define ZMQ_HAUSNUMERO (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ENOTSUP
+ #define ENOTSUP (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef EPROTONOSUPPORT
+ #define EPROTONOSUPPORT (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ENOBUFS
+ #define ENOBUFS (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ENETDOWN
+ #define ENETDOWN (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef EADDRINUSE
+ #define EADDRINUSE (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef EADDRNOTAVAIL
+ #define EADDRNOTAVAIL (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ECONNREFUSED
+ #define ECONNREFUSED (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef EINPROGRESS
+ #define EINPROGRESS (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ENOTSOCK
+ #define ENOTSOCK (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef EFSM
+ #define EFSM (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ENOCOMPATPROTO
+ #define ENOCOMPATPROTO (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ETERM
+ #define ETERM (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef EMTHREAD
+ #define EMTHREAD (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_IO_THREADS
+ #define ZMQ_IO_THREADS (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_MAX_SOCKETS
+ #define ZMQ_MAX_SOCKETS (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_SOCKET_LIMIT
+ #define ZMQ_SOCKET_LIMIT (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_THREAD_PRIORITY
+ #define ZMQ_THREAD_PRIORITY (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_THREAD_SCHED_POLICY
+ #define ZMQ_THREAD_SCHED_POLICY (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_IDENTITY
+ #define ZMQ_IDENTITY (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_SUBSCRIBE
+ #define ZMQ_SUBSCRIBE (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_UNSUBSCRIBE
+ #define ZMQ_UNSUBSCRIBE (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_LAST_ENDPOINT
+ #define ZMQ_LAST_ENDPOINT (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_TCP_ACCEPT_FILTER
+ #define ZMQ_TCP_ACCEPT_FILTER (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_PLAIN_USERNAME
+ #define ZMQ_PLAIN_USERNAME (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_PLAIN_PASSWORD
+ #define ZMQ_PLAIN_PASSWORD (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_CURVE_PUBLICKEY
+ #define ZMQ_CURVE_PUBLICKEY (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_CURVE_SECRETKEY
+ #define ZMQ_CURVE_SECRETKEY (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_CURVE_SERVERKEY
+ #define ZMQ_CURVE_SERVERKEY (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_ZAP_DOMAIN
+ #define ZMQ_ZAP_DOMAIN (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_CONNECT_RID
+ #define ZMQ_CONNECT_RID (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_GSSAPI_PRINCIPAL
+ #define ZMQ_GSSAPI_PRINCIPAL (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_GSSAPI_SERVICE_PRINCIPAL
+ #define ZMQ_GSSAPI_SERVICE_PRINCIPAL (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_SOCKS_PROXY
+ #define ZMQ_SOCKS_PROXY (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_FD
+ #define ZMQ_FD (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_IDENTITY_FD
+ #define ZMQ_IDENTITY_FD (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_RECONNECT_IVL_MAX
+ #define ZMQ_RECONNECT_IVL_MAX (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_SNDTIMEO
+ #define ZMQ_SNDTIMEO (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_RCVTIMEO
+ #define ZMQ_RCVTIMEO (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_SNDHWM
+ #define ZMQ_SNDHWM (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_RCVHWM
+ #define ZMQ_RCVHWM (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_MULTICAST_HOPS
+ #define ZMQ_MULTICAST_HOPS (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_IPV4ONLY
+ #define ZMQ_IPV4ONLY (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_ROUTER_BEHAVIOR
+ #define ZMQ_ROUTER_BEHAVIOR (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_TCP_KEEPALIVE
+ #define ZMQ_TCP_KEEPALIVE (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_TCP_KEEPALIVE_CNT
+ #define ZMQ_TCP_KEEPALIVE_CNT (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_TCP_KEEPALIVE_IDLE
+ #define ZMQ_TCP_KEEPALIVE_IDLE (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_TCP_KEEPALIVE_INTVL
+ #define ZMQ_TCP_KEEPALIVE_INTVL (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_DELAY_ATTACH_ON_CONNECT
+ #define ZMQ_DELAY_ATTACH_ON_CONNECT (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_XPUB_VERBOSE
+ #define ZMQ_XPUB_VERBOSE (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_EVENTS
+ #define ZMQ_EVENTS (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_TYPE
+ #define ZMQ_TYPE (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_LINGER
+ #define ZMQ_LINGER (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_RECONNECT_IVL
+ #define ZMQ_RECONNECT_IVL (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_BACKLOG
+ #define ZMQ_BACKLOG (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_ROUTER_MANDATORY
+ #define ZMQ_ROUTER_MANDATORY (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_FAIL_UNROUTABLE
+ #define ZMQ_FAIL_UNROUTABLE (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_ROUTER_RAW
+ #define ZMQ_ROUTER_RAW (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_IMMEDIATE
+ #define ZMQ_IMMEDIATE (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_IPV6
+ #define ZMQ_IPV6 (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_MECHANISM
+ #define ZMQ_MECHANISM (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_PLAIN_SERVER
+ #define ZMQ_PLAIN_SERVER (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_CURVE_SERVER
+ #define ZMQ_CURVE_SERVER (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_PROBE_ROUTER
+ #define ZMQ_PROBE_ROUTER (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_REQ_RELAXED
+ #define ZMQ_REQ_RELAXED (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_REQ_CORRELATE
+ #define ZMQ_REQ_CORRELATE (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_CONFLATE
+ #define ZMQ_CONFLATE (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_ROUTER_HANDOVER
+ #define ZMQ_ROUTER_HANDOVER (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_TOS
+ #define ZMQ_TOS (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_IPC_FILTER_PID
+ #define ZMQ_IPC_FILTER_PID (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_IPC_FILTER_UID
+ #define ZMQ_IPC_FILTER_UID (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_IPC_FILTER_GID
+ #define ZMQ_IPC_FILTER_GID (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_GSSAPI_SERVER
+ #define ZMQ_GSSAPI_SERVER (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_GSSAPI_PLAINTEXT
+ #define ZMQ_GSSAPI_PLAINTEXT (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_HANDSHAKE_IVL
+ #define ZMQ_HANDSHAKE_IVL (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_XPUB_NODROP
+ #define ZMQ_XPUB_NODROP (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_AFFINITY
+ #define ZMQ_AFFINITY (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_MAXMSGSIZE
+ #define ZMQ_MAXMSGSIZE (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_HWM
+ #define ZMQ_HWM (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_SWAP
+ #define ZMQ_SWAP (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_MCAST_LOOP
+ #define ZMQ_MCAST_LOOP (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_RECOVERY_IVL_MSEC
+ #define ZMQ_RECOVERY_IVL_MSEC (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_RATE
+ #define ZMQ_RATE (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_RECOVERY_IVL
+ #define ZMQ_RECOVERY_IVL (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_SNDBUF
+ #define ZMQ_SNDBUF (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_RCVBUF
+ #define ZMQ_RCVBUF (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_RCVMORE
+ #define ZMQ_RCVMORE (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_MORE
+ #define ZMQ_MORE (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_SRCFD
+ #define ZMQ_SRCFD (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_SHARED
+ #define ZMQ_SHARED (_PYZMQ_UNDEFINED)
+#endif
+
+
+#endif // ifndef _PYZMQ_CONSTANT_DEFS
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/__init__.py b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/__init__.py
new file mode 100644
index 00000000..3408b3ba
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/__init__.py
@@ -0,0 +1,64 @@
+"""Python bindings for 0MQ."""
+
+# Copyright (C) PyZMQ Developers
+# Distributed under the terms of the Modified BSD License.
+
+import os
+import sys
+import glob
+
+# load bundled libzmq, if there is one:
+
+here = os.path.dirname(__file__)
+
+bundled = []
+bundled_sodium = []
+for ext in ('pyd', 'so', 'dll', 'dylib'):
+ bundled_sodium.extend(glob.glob(os.path.join(here, 'libsodium*.%s*' % ext)))
+ bundled.extend(glob.glob(os.path.join(here, 'libzmq*.%s*' % ext)))
+
+if bundled:
+ import ctypes
+ if bundled_sodium:
+ if bundled[0].endswith('.pyd'):
+ # a Windows Extension
+ _libsodium = ctypes.cdll.LoadLibrary(bundled_sodium[0])
+ else:
+ _libsodium = ctypes.CDLL(bundled_sodium[0], mode=ctypes.RTLD_GLOBAL)
+ if bundled[0].endswith('.pyd'):
+ # a Windows Extension
+ _libzmq = ctypes.cdll.LoadLibrary(bundled[0])
+ else:
+ _libzmq = ctypes.CDLL(bundled[0], mode=ctypes.RTLD_GLOBAL)
+ del ctypes
+else:
+ import zipimport
+ try:
+ if isinstance(__loader__, zipimport.zipimporter):
+ # a zipped pyzmq egg
+ from zmq import libzmq as _libzmq
+ except (NameError, ImportError):
+ pass
+ finally:
+ del zipimport
+
+del os, sys, glob, here, bundled, bundled_sodium, ext
+
+# zmq top-level imports
+
+from zmq import backend
+from zmq.backend import *
+from zmq import sugar
+from zmq.sugar import *
+from zmq import devices
+
+def get_includes():
+ """Return a list of directories to include for linking against pyzmq with cython."""
+ from os.path import join, dirname, abspath, pardir
+ base = dirname(__file__)
+ parent = abspath(join(base, pardir))
+ return [ parent ] + [ join(parent, base, subdir) for subdir in ('utils',) ]
+
+
+__all__ = ['get_includes'] + sugar.__all__ + backend.__all__
+
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/auth/__init__.py b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/auth/__init__.py
new file mode 100644
index 00000000..11d3ad6b
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/auth/__init__.py
@@ -0,0 +1,10 @@
+"""Utilities for ZAP authentication.
+
+To run authentication in a background thread, see :mod:`zmq.auth.thread`.
+For integration with the tornado eventloop, see :mod:`zmq.auth.ioloop`.
+
+.. versionadded:: 14.1
+"""
+
+from .base import *
+from .certs import *
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/auth/base.py b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/auth/base.py
new file mode 100644
index 00000000..9b4aaed7
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/auth/base.py
@@ -0,0 +1,272 @@
+"""Base implementation of 0MQ authentication."""
+
+# Copyright (C) PyZMQ Developers
+# Distributed under the terms of the Modified BSD License.
+
+import logging
+
+import zmq
+from zmq.utils import z85
+from zmq.utils.strtypes import bytes, unicode, b, u
+from zmq.error import _check_version
+
+from .certs import load_certificates
+
+
+CURVE_ALLOW_ANY = '*'
+VERSION = b'1.0'
+
+class Authenticator(object):
+ """Implementation of ZAP authentication for zmq connections.
+
+ Note:
+ - libzmq provides four levels of security: default NULL (which the Authenticator does
+ not see), and authenticated NULL, PLAIN, and CURVE, which the Authenticator can see.
+ - until you add policies, all incoming NULL connections are allowed
+ (classic ZeroMQ behavior), and all PLAIN and CURVE connections are denied.
+ """
+
+ def __init__(self, context=None, encoding='utf-8', log=None):
+ _check_version((4,0), "security")
+ self.context = context or zmq.Context.instance()
+ self.encoding = encoding
+ self.allow_any = False
+ self.zap_socket = None
+ self.whitelist = set()
+ self.blacklist = set()
+ # passwords is a dict keyed by domain and contains values
+ # of dicts with username:password pairs.
+ self.passwords = {}
+ # certs is dict keyed by domain and contains values
+ # of dicts keyed by the public keys from the specified location.
+ self.certs = {}
+ self.log = log or logging.getLogger('zmq.auth')
+
+ def start(self):
+ """Create and bind the ZAP socket"""
+ self.zap_socket = self.context.socket(zmq.REP)
+ self.zap_socket.linger = 1
+ self.zap_socket.bind("inproc://zeromq.zap.01")
+
+ def stop(self):
+ """Close the ZAP socket"""
+ if self.zap_socket:
+ self.zap_socket.close()
+ self.zap_socket = None
+
+ def allow(self, *addresses):
+ """Allow (whitelist) IP address(es).
+
+ Connections from addresses not in the whitelist will be rejected.
+
+ - For NULL, all clients from this address will be accepted.
+ - For PLAIN and CURVE, they will be allowed to continue with authentication.
+
+ whitelist is mutually exclusive with blacklist.
+ """
+ if self.blacklist:
+ raise ValueError("Only use a whitelist or a blacklist, not both")
+ self.whitelist.update(addresses)
+
+ def deny(self, *addresses):
+ """Deny (blacklist) IP address(es).
+
+ Addresses not in the blacklist will be allowed to continue with authentication.
+
+ Blacklist is mutually exclusive with whitelist.
+ """
+ if self.whitelist:
+ raise ValueError("Only use a whitelist or a blacklist, not both")
+ self.blacklist.update(addresses)
+
+ def configure_plain(self, domain='*', passwords=None):
+ """Configure PLAIN authentication for a given domain.
+
+ PLAIN authentication uses a plain-text password file.
+ To cover all domains, use "*".
+ You can modify the password file at any time; it is reloaded automatically.
+ """
+ if passwords:
+ self.passwords[domain] = passwords
+
+ def configure_curve(self, domain='*', location=None):
+ """Configure CURVE authentication for a given domain.
+
+ CURVE authentication uses a directory that holds all public client certificates,
+ i.e. their public keys.
+
+ To cover all domains, use "*".
+
+ You can add and remove certificates in that directory at any time.
+
+ To allow all client keys without checking, specify CURVE_ALLOW_ANY for the location.
+ """
+ # If location is CURVE_ALLOW_ANY then allow all clients. Otherwise
+ # treat location as a directory that holds the certificates.
+ if location == CURVE_ALLOW_ANY:
+ self.allow_any = True
+ else:
+ self.allow_any = False
+ try:
+ self.certs[domain] = load_certificates(location)
+ except Exception as e:
+ self.log.error("Failed to load CURVE certs from %s: %s", location, e)
+
+ def handle_zap_message(self, msg):
+ """Perform ZAP authentication"""
+ if len(msg) < 6:
+ self.log.error("Invalid ZAP message, not enough frames: %r", msg)
+ if len(msg) < 2:
+ self.log.error("Not enough information to reply")
+ else:
+ self._send_zap_reply(msg[1], b"400", b"Not enough frames")
+ return
+
+ version, request_id, domain, address, identity, mechanism = msg[:6]
+ credentials = msg[6:]
+
+ domain = u(domain, self.encoding, 'replace')
+ address = u(address, self.encoding, 'replace')
+
+ if (version != VERSION):
+ self.log.error("Invalid ZAP version: %r", msg)
+ self._send_zap_reply(request_id, b"400", b"Invalid version")
+ return
+
+ self.log.debug("version: %r, request_id: %r, domain: %r,"
+ " address: %r, identity: %r, mechanism: %r",
+ version, request_id, domain,
+ address, identity, mechanism,
+ )
+
+
+ # Is address is explicitly whitelisted or blacklisted?
+ allowed = False
+ denied = False
+ reason = b"NO ACCESS"
+
+ if self.whitelist:
+ if address in self.whitelist:
+ allowed = True
+ self.log.debug("PASSED (whitelist) address=%s", address)
+ else:
+ denied = True
+ reason = b"Address not in whitelist"
+ self.log.debug("DENIED (not in whitelist) address=%s", address)
+
+ elif self.blacklist:
+ if address in self.blacklist:
+ denied = True
+ reason = b"Address is blacklisted"
+ self.log.debug("DENIED (blacklist) address=%s", address)
+ else:
+ allowed = True
+ self.log.debug("PASSED (not in blacklist) address=%s", address)
+
+ # Perform authentication mechanism-specific checks if necessary
+ username = u("user")
+ if not denied:
+
+ if mechanism == b'NULL' and not allowed:
+ # For NULL, we allow if the address wasn't blacklisted
+ self.log.debug("ALLOWED (NULL)")
+ allowed = True
+
+ elif mechanism == b'PLAIN':
+ # For PLAIN, even a whitelisted address must authenticate
+ if len(credentials) != 2:
+ self.log.error("Invalid PLAIN credentials: %r", credentials)
+ self._send_zap_reply(request_id, b"400", b"Invalid credentials")
+ return
+ username, password = [ u(c, self.encoding, 'replace') for c in credentials ]
+ allowed, reason = self._authenticate_plain(domain, username, password)
+
+ elif mechanism == b'CURVE':
+ # For CURVE, even a whitelisted address must authenticate
+ if len(credentials) != 1:
+ self.log.error("Invalid CURVE credentials: %r", credentials)
+ self._send_zap_reply(request_id, b"400", b"Invalid credentials")
+ return
+ key = credentials[0]
+ allowed, reason = self._authenticate_curve(domain, key)
+
+ if allowed:
+ self._send_zap_reply(request_id, b"200", b"OK", username)
+ else:
+ self._send_zap_reply(request_id, b"400", reason)
+
+ def _authenticate_plain(self, domain, username, password):
+ """PLAIN ZAP authentication"""
+ allowed = False
+ reason = b""
+ if self.passwords:
+ # If no domain is not specified then use the default domain
+ if not domain:
+ domain = '*'
+
+ if domain in self.passwords:
+ if username in self.passwords[domain]:
+ if password == self.passwords[domain][username]:
+ allowed = True
+ else:
+ reason = b"Invalid password"
+ else:
+ reason = b"Invalid username"
+ else:
+ reason = b"Invalid domain"
+
+ if allowed:
+ self.log.debug("ALLOWED (PLAIN) domain=%s username=%s password=%s",
+ domain, username, password,
+ )
+ else:
+ self.log.debug("DENIED %s", reason)
+
+ else:
+ reason = b"No passwords defined"
+ self.log.debug("DENIED (PLAIN) %s", reason)
+
+ return allowed, reason
+
+ def _authenticate_curve(self, domain, client_key):
+ """CURVE ZAP authentication"""
+ allowed = False
+ reason = b""
+ if self.allow_any:
+ allowed = True
+ reason = b"OK"
+ self.log.debug("ALLOWED (CURVE allow any client)")
+ else:
+ # If no explicit domain is specified then use the default domain
+ if not domain:
+ domain = '*'
+
+ if domain in self.certs:
+ # The certs dict stores keys in z85 format, convert binary key to z85 bytes
+ z85_client_key = z85.encode(client_key)
+ if z85_client_key in self.certs[domain] or self.certs[domain] == b'OK':
+ allowed = True
+ reason = b"OK"
+ else:
+ reason = b"Unknown key"
+
+ status = "ALLOWED" if allowed else "DENIED"
+ self.log.debug("%s (CURVE) domain=%s client_key=%s",
+ status, domain, z85_client_key,
+ )
+ else:
+ reason = b"Unknown domain"
+
+ return allowed, reason
+
+ def _send_zap_reply(self, request_id, status_code, status_text, user_id='user'):
+ """Send a ZAP reply to finish the authentication."""
+ user_id = user_id if status_code == b'200' else b''
+ if isinstance(user_id, unicode):
+ user_id = user_id.encode(self.encoding, 'replace')
+ metadata = b'' # not currently used
+ self.log.debug("ZAP reply code=%s text=%s", status_code, status_text)
+ reply = [VERSION, request_id, status_code, status_text, user_id, metadata]
+ self.zap_socket.send_multipart(reply)
+
+__all__ = ['Authenticator', 'CURVE_ALLOW_ANY']
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/auth/certs.py b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/auth/certs.py
new file mode 100644
index 00000000..4d26ad7b
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/auth/certs.py
@@ -0,0 +1,119 @@
+"""0MQ authentication related functions and classes."""
+
+# Copyright (C) PyZMQ Developers
+# Distributed under the terms of the Modified BSD License.
+
+
+import datetime
+import glob
+import io
+import os
+import zmq
+from zmq.utils.strtypes import bytes, unicode, b, u
+
+
+_cert_secret_banner = u("""# **** Generated on {0} by pyzmq ****
+# ZeroMQ CURVE **Secret** Certificate
+# DO NOT PROVIDE THIS FILE TO OTHER USERS nor change its permissions.
+
+""")
+
+_cert_public_banner = u("""# **** Generated on {0} by pyzmq ****
+# ZeroMQ CURVE Public Certificate
+# Exchange securely, or use a secure mechanism to verify the contents
+# of this file after exchange. Store public certificates in your home
+# directory, in the .curve subdirectory.
+
+""")
+
+def _write_key_file(key_filename, banner, public_key, secret_key=None, metadata=None, encoding='utf-8'):
+ """Create a certificate file"""
+ if isinstance(public_key, bytes):
+ public_key = public_key.decode(encoding)
+ if isinstance(secret_key, bytes):
+ secret_key = secret_key.decode(encoding)
+ with io.open(key_filename, 'w', encoding='utf8') as f:
+ f.write(banner.format(datetime.datetime.now()))
+
+ f.write(u('metadata\n'))
+ if metadata:
+ for k, v in metadata.items():
+ if isinstance(v, bytes):
+ v = v.decode(encoding)
+ f.write(u(" {0} = {1}\n").format(k, v))
+
+ f.write(u('curve\n'))
+ f.write(u(" public-key = \"{0}\"\n").format(public_key))
+
+ if secret_key:
+ f.write(u(" secret-key = \"{0}\"\n").format(secret_key))
+
+
+def create_certificates(key_dir, name, metadata=None):
+ """Create zmq certificates.
+
+ Returns the file paths to the public and secret certificate files.
+ """
+ public_key, secret_key = zmq.curve_keypair()
+ base_filename = os.path.join(key_dir, name)
+ secret_key_file = "{0}.key_secret".format(base_filename)
+ public_key_file = "{0}.key".format(base_filename)
+ now = datetime.datetime.now()
+
+ _write_key_file(public_key_file,
+ _cert_public_banner.format(now),
+ public_key)
+
+ _write_key_file(secret_key_file,
+ _cert_secret_banner.format(now),
+ public_key,
+ secret_key=secret_key,
+ metadata=metadata)
+
+ return public_key_file, secret_key_file
+
+
+def load_certificate(filename):
+ """Load public and secret key from a zmq certificate.
+
+ Returns (public_key, secret_key)
+
+ If the certificate file only contains the public key,
+ secret_key will be None.
+ """
+ public_key = None
+ secret_key = None
+ if not os.path.exists(filename):
+ raise IOError("Invalid certificate file: {0}".format(filename))
+
+ with open(filename, 'rb') as f:
+ for line in f:
+ line = line.strip()
+ if line.startswith(b'#'):
+ continue
+ if line.startswith(b'public-key'):
+ public_key = line.split(b"=", 1)[1].strip(b' \t\'"')
+ if line.startswith(b'secret-key'):
+ secret_key = line.split(b"=", 1)[1].strip(b' \t\'"')
+ if public_key and secret_key:
+ break
+
+ return public_key, secret_key
+
+
+def load_certificates(directory='.'):
+ """Load public keys from all certificates in a directory"""
+ certs = {}
+ if not os.path.isdir(directory):
+ raise IOError("Invalid certificate directory: {0}".format(directory))
+ # Follow czmq pattern of public keys stored in *.key files.
+ glob_string = os.path.join(directory, "*.key")
+
+ cert_files = glob.glob(glob_string)
+ for cert_file in cert_files:
+ public_key, _ = load_certificate(cert_file)
+ if public_key:
+ certs[public_key] = 'OK'
+ return certs
+
+__all__ = ['create_certificates', 'load_certificate', 'load_certificates']
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/auth/ioloop.py b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/auth/ioloop.py
new file mode 100644
index 00000000..1f448b47
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/auth/ioloop.py
@@ -0,0 +1,34 @@
+"""ZAP Authenticator integrated with the tornado IOLoop.
+
+.. versionadded:: 14.1
+"""
+
+# Copyright (C) PyZMQ Developers
+# Distributed under the terms of the Modified BSD License.
+
+from zmq.eventloop import ioloop, zmqstream
+from .base import Authenticator
+
+
+class IOLoopAuthenticator(Authenticator):
+ """ZAP authentication for use in the tornado IOLoop"""
+
+ def __init__(self, context=None, encoding='utf-8', log=None, io_loop=None):
+ super(IOLoopAuthenticator, self).__init__(context)
+ self.zap_stream = None
+ self.io_loop = io_loop or ioloop.IOLoop.instance()
+
+ def start(self):
+ """Start ZAP authentication"""
+ super(IOLoopAuthenticator, self).start()
+ self.zap_stream = zmqstream.ZMQStream(self.zap_socket, self.io_loop)
+ self.zap_stream.on_recv(self.handle_zap_message)
+
+ def stop(self):
+ """Stop ZAP authentication"""
+ if self.zap_stream:
+ self.zap_stream.close()
+ self.zap_stream = None
+ super(IOLoopAuthenticator, self).stop()
+
+__all__ = ['IOLoopAuthenticator']
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/auth/thread.py b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/auth/thread.py
new file mode 100644
index 00000000..8c3355a9
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/auth/thread.py
@@ -0,0 +1,184 @@
+"""ZAP Authenticator in a Python Thread.
+
+.. versionadded:: 14.1
+"""
+
+# Copyright (C) PyZMQ Developers
+# Distributed under the terms of the Modified BSD License.
+
+import logging
+from threading import Thread
+
+import zmq
+from zmq.utils import jsonapi
+from zmq.utils.strtypes import bytes, unicode, b, u
+
+from .base import Authenticator
+
+class AuthenticationThread(Thread):
+ """A Thread for running a zmq Authenticator
+
+ This is run in the background by ThreadedAuthenticator
+ """
+
+ def __init__(self, context, endpoint, encoding='utf-8', log=None):
+ super(AuthenticationThread, self).__init__()
+ self.context = context or zmq.Context.instance()
+ self.encoding = encoding
+ self.log = log = log or logging.getLogger('zmq.auth')
+ self.authenticator = Authenticator(context, encoding=encoding, log=log)
+
+ # create a socket to communicate back to main thread.
+ self.pipe = context.socket(zmq.PAIR)
+ self.pipe.linger = 1
+ self.pipe.connect(endpoint)
+
+ def run(self):
+ """ Start the Authentication Agent thread task """
+ self.authenticator.start()
+ zap = self.authenticator.zap_socket
+ poller = zmq.Poller()
+ poller.register(self.pipe, zmq.POLLIN)
+ poller.register(zap, zmq.POLLIN)
+ while True:
+ try:
+ socks = dict(poller.poll())
+ except zmq.ZMQError:
+ break # interrupted
+
+ if self.pipe in socks and socks[self.pipe] == zmq.POLLIN:
+ terminate = self._handle_pipe()
+ if terminate:
+ break
+
+ if zap in socks and socks[zap] == zmq.POLLIN:
+ self._handle_zap()
+
+ self.pipe.close()
+ self.authenticator.stop()
+
+ def _handle_zap(self):
+ """
+ Handle a message from the ZAP socket.
+ """
+ msg = self.authenticator.zap_socket.recv_multipart()
+ if not msg: return
+ self.authenticator.handle_zap_message(msg)
+
+ def _handle_pipe(self):
+ """
+ Handle a message from front-end API.
+ """
+ terminate = False
+
+ # Get the whole message off the pipe in one go
+ msg = self.pipe.recv_multipart()
+
+ if msg is None:
+ terminate = True
+ return terminate
+
+ command = msg[0]
+ self.log.debug("auth received API command %r", command)
+
+ if command == b'ALLOW':
+ addresses = [u(m, self.encoding) for m in msg[1:]]
+ try:
+ self.authenticator.allow(*addresses)
+ except Exception as e:
+ self.log.exception("Failed to allow %s", addresses)
+
+ elif command == b'DENY':
+ addresses = [u(m, self.encoding) for m in msg[1:]]
+ try:
+ self.authenticator.deny(*addresses)
+ except Exception as e:
+ self.log.exception("Failed to deny %s", addresses)
+
+ elif command == b'PLAIN':
+ domain = u(msg[1], self.encoding)
+ json_passwords = msg[2]
+ self.authenticator.configure_plain(domain, jsonapi.loads(json_passwords))
+
+ elif command == b'CURVE':
+ # For now we don't do anything with domains
+ domain = u(msg[1], self.encoding)
+
+ # If location is CURVE_ALLOW_ANY, allow all clients. Otherwise
+ # treat location as a directory that holds the certificates.
+ location = u(msg[2], self.encoding)
+ self.authenticator.configure_curve(domain, location)
+
+ elif command == b'TERMINATE':
+ terminate = True
+
+ else:
+ self.log.error("Invalid auth command from API: %r", command)
+
+ return terminate
+
+def _inherit_docstrings(cls):
+ """inherit docstrings from Authenticator, so we don't duplicate them"""
+ for name, method in cls.__dict__.items():
+ if name.startswith('_'):
+ continue
+ upstream_method = getattr(Authenticator, name, None)
+ if not method.__doc__:
+ method.__doc__ = upstream_method.__doc__
+ return cls
+
+@_inherit_docstrings
+class ThreadAuthenticator(object):
+ """Run ZAP authentication in a background thread"""
+
+ def __init__(self, context=None, encoding='utf-8', log=None):
+ self.context = context or zmq.Context.instance()
+ self.log = log
+ self.encoding = encoding
+ self.pipe = None
+ self.pipe_endpoint = "inproc://{0}.inproc".format(id(self))
+ self.thread = None
+
+ def allow(self, *addresses):
+ self.pipe.send_multipart([b'ALLOW'] + [b(a, self.encoding) for a in addresses])
+
+ def deny(self, *addresses):
+ self.pipe.send_multipart([b'DENY'] + [b(a, self.encoding) for a in addresses])
+
+ def configure_plain(self, domain='*', passwords=None):
+ self.pipe.send_multipart([b'PLAIN', b(domain, self.encoding), jsonapi.dumps(passwords or {})])
+
+ def configure_curve(self, domain='*', location=''):
+ domain = b(domain, self.encoding)
+ location = b(location, self.encoding)
+ self.pipe.send_multipart([b'CURVE', domain, location])
+
+ def start(self):
+ """Start the authentication thread"""
+ # create a socket to communicate with auth thread.
+ self.pipe = self.context.socket(zmq.PAIR)
+ self.pipe.linger = 1
+ self.pipe.bind(self.pipe_endpoint)
+ self.thread = AuthenticationThread(self.context, self.pipe_endpoint, encoding=self.encoding, log=self.log)
+ self.thread.start()
+
+ def stop(self):
+ """Stop the authentication thread"""
+ if self.pipe:
+ self.pipe.send(b'TERMINATE')
+ if self.is_alive():
+ self.thread.join()
+ self.thread = None
+ self.pipe.close()
+ self.pipe = None
+
+ def is_alive(self):
+ """Is the ZAP thread currently running?"""
+ if self.thread and self.thread.is_alive():
+ return True
+ return False
+
+ def __del__(self):
+ self.stop()
+
+__all__ = ['ThreadAuthenticator']
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/backend/__init__.py b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/backend/__init__.py
new file mode 100644
index 00000000..7cac725c
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/backend/__init__.py
@@ -0,0 +1,45 @@
+"""Import basic exposure of libzmq C API as a backend"""
+
+# Copyright (C) PyZMQ Developers
+# Distributed under the terms of the Modified BSD License.
+
+
+import os
+import platform
+import sys
+
+from zmq.utils.sixcerpt import reraise
+
+from .select import public_api, select_backend
+
+if 'PYZMQ_BACKEND' in os.environ:
+ backend = os.environ['PYZMQ_BACKEND']
+ if backend in ('cython', 'cffi'):
+ backend = 'zmq.backend.%s' % backend
+ _ns = select_backend(backend)
+else:
+ # default to cython, fallback to cffi
+ # (reverse on PyPy)
+ if platform.python_implementation() == 'PyPy':
+ first, second = ('zmq.backend.cffi', 'zmq.backend.cython')
+ else:
+ first, second = ('zmq.backend.cython', 'zmq.backend.cffi')
+
+ try:
+ _ns = select_backend(first)
+ except Exception:
+ exc_info = sys.exc_info()
+ exc = exc_info[1]
+ try:
+ _ns = select_backend(second)
+ except ImportError:
+ # prevent 'During handling of the above exception...' on py3
+ # can't use `raise ... from` on Python 2
+ if hasattr(exc, '__cause__'):
+ exc.__cause__ = None
+ # raise the *first* error, not the fallback
+ reraise(*exc_info)
+
+globals().update(_ns)
+
+__all__ = public_api
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/backend/cffi/__init__.py b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/backend/cffi/__init__.py
new file mode 100644
index 00000000..ca3164d3
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/backend/cffi/__init__.py
@@ -0,0 +1,22 @@
+"""CFFI backend (for PyPY)"""
+
+# Copyright (C) PyZMQ Developers
+# Distributed under the terms of the Modified BSD License.
+
+from zmq.backend.cffi import (constants, error, message, context, socket,
+ _poll, devices, utils)
+
+__all__ = []
+for submod in (constants, error, message, context, socket,
+ _poll, devices, utils):
+ __all__.extend(submod.__all__)
+
+from .constants import *
+from .error import *
+from .message import *
+from .context import *
+from .socket import *
+from .devices import *
+from ._poll import *
+from ._cffi import zmq_version_info, ffi
+from .utils import *
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/backend/cffi/_cdefs.h b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/backend/cffi/_cdefs.h
new file mode 100644
index 00000000..d3300575
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/backend/cffi/_cdefs.h
@@ -0,0 +1,68 @@
+void zmq_version(int *major, int *minor, int *patch);
+
+void* zmq_socket(void *context, int type);
+int zmq_close(void *socket);
+
+int zmq_bind(void *socket, const char *endpoint);
+int zmq_connect(void *socket, const char *endpoint);
+
+int zmq_errno(void);
+const char * zmq_strerror(int errnum);
+
+void* zmq_stopwatch_start(void);
+unsigned long zmq_stopwatch_stop(void *watch);
+void zmq_sleep(int seconds_);
+int zmq_device(int device, void *frontend, void *backend);
+
+int zmq_unbind(void *socket, const char *endpoint);
+int zmq_disconnect(void *socket, const char *endpoint);
+void* zmq_ctx_new();
+int zmq_ctx_destroy(void *context);
+int zmq_ctx_get(void *context, int opt);
+int zmq_ctx_set(void *context, int opt, int optval);
+int zmq_proxy(void *frontend, void *backend, void *capture);
+int zmq_socket_monitor(void *socket, const char *addr, int events);
+
+int zmq_curve_keypair (char *z85_public_key, char *z85_secret_key);
+int zmq_has (const char *capability);
+
+typedef struct { ...; } zmq_msg_t;
+typedef ... zmq_free_fn;
+
+int zmq_msg_init(zmq_msg_t *msg);
+int zmq_msg_init_size(zmq_msg_t *msg, size_t size);
+int zmq_msg_init_data(zmq_msg_t *msg,
+ void *data,
+ size_t size,
+ zmq_free_fn *ffn,
+ void *hint);
+
+size_t zmq_msg_size(zmq_msg_t *msg);
+void *zmq_msg_data(zmq_msg_t *msg);
+int zmq_msg_close(zmq_msg_t *msg);
+
+int zmq_msg_send(zmq_msg_t *msg, void *socket, int flags);
+int zmq_msg_recv(zmq_msg_t *msg, void *socket, int flags);
+
+int zmq_getsockopt(void *socket,
+ int option_name,
+ void *option_value,
+ size_t *option_len);
+
+int zmq_setsockopt(void *socket,
+ int option_name,
+ const void *option_value,
+ size_t option_len);
+typedef struct
+{
+ void *socket;
+ int fd;
+ short events;
+ short revents;
+} zmq_pollitem_t;
+
+int zmq_poll(zmq_pollitem_t *items, int nitems, long timeout);
+
+// miscellany
+void * memcpy(void *restrict s1, const void *restrict s2, size_t n);
+int get_ipc_path_max_len(void);
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/backend/cffi/_cffi.py b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/backend/cffi/_cffi.py
new file mode 100644
index 00000000..c73ebf83
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/backend/cffi/_cffi.py
@@ -0,0 +1,127 @@
+# coding: utf-8
+"""The main CFFI wrapping of libzmq"""
+
+# Copyright (C) PyZMQ Developers
+# Distributed under the terms of the Modified BSD License.
+
+
+import json
+import os
+from os.path import dirname, join
+from cffi import FFI
+
+from zmq.utils.constant_names import all_names, no_prefix
+
+
+base_zmq_version = (3,2,2)
+
+def load_compiler_config():
+ """load pyzmq compiler arguments"""
+ import zmq
+ zmq_dir = dirname(zmq.__file__)
+ zmq_parent = dirname(zmq_dir)
+
+ fname = join(zmq_dir, 'utils', 'compiler.json')
+ if os.path.exists(fname):
+ with open(fname) as f:
+ cfg = json.load(f)
+ else:
+ cfg = {}
+
+ cfg.setdefault("include_dirs", [])
+ cfg.setdefault("library_dirs", [])
+ cfg.setdefault("runtime_library_dirs", [])
+ cfg.setdefault("libraries", ["zmq"])
+
+ # cast to str, because cffi can't handle unicode paths (?!)
+ cfg['libraries'] = [str(lib) for lib in cfg['libraries']]
+ for key in ("include_dirs", "library_dirs", "runtime_library_dirs"):
+ # interpret paths relative to parent of zmq (like source tree)
+ abs_paths = []
+ for p in cfg[key]:
+ if p.startswith('zmq'):
+ p = join(zmq_parent, p)
+ abs_paths.append(str(p))
+ cfg[key] = abs_paths
+ return cfg
+
+
+def zmq_version_info():
+ """Get libzmq version as tuple of ints"""
+ major = ffi.new('int*')
+ minor = ffi.new('int*')
+ patch = ffi.new('int*')
+
+ C.zmq_version(major, minor, patch)
+
+ return (int(major[0]), int(minor[0]), int(patch[0]))
+
+
+cfg = load_compiler_config()
+ffi = FFI()
+
+def _make_defines(names):
+ _names = []
+ for name in names:
+ define_line = "#define %s ..." % (name)
+ _names.append(define_line)
+
+ return "\n".join(_names)
+
+c_constant_names = []
+for name in all_names:
+ if no_prefix(name):
+ c_constant_names.append(name)
+ else:
+ c_constant_names.append("ZMQ_" + name)
+
+# load ffi definitions
+here = os.path.dirname(__file__)
+with open(os.path.join(here, '_cdefs.h')) as f:
+ _cdefs = f.read()
+
+with open(os.path.join(here, '_verify.c')) as f:
+ _verify = f.read()
+
+ffi.cdef(_cdefs)
+ffi.cdef(_make_defines(c_constant_names))
+
+try:
+ C = ffi.verify(_verify,
+ modulename='_cffi_ext',
+ libraries=cfg['libraries'],
+ include_dirs=cfg['include_dirs'],
+ library_dirs=cfg['library_dirs'],
+ runtime_library_dirs=cfg['runtime_library_dirs'],
+ )
+ _version_info = zmq_version_info()
+except Exception as e:
+ raise ImportError("PyZMQ CFFI backend couldn't find zeromq: %s\n"
+ "Please check that you have zeromq headers and libraries." % e)
+
+if _version_info < (3,2,2):
+ raise ImportError("PyZMQ CFFI backend requires zeromq >= 3.2.2,"
+ " but found %i.%i.%i" % _version_info
+ )
+
+nsp = new_sizet_pointer = lambda length: ffi.new('size_t*', length)
+
+new_uint64_pointer = lambda: (ffi.new('uint64_t*'),
+ nsp(ffi.sizeof('uint64_t')))
+new_int64_pointer = lambda: (ffi.new('int64_t*'),
+ nsp(ffi.sizeof('int64_t')))
+new_int_pointer = lambda: (ffi.new('int*'),
+ nsp(ffi.sizeof('int')))
+new_binary_data = lambda length: (ffi.new('char[%d]' % (length)),
+ nsp(ffi.sizeof('char') * length))
+
+value_uint64_pointer = lambda val : (ffi.new('uint64_t*', val),
+ ffi.sizeof('uint64_t'))
+value_int64_pointer = lambda val: (ffi.new('int64_t*', val),
+ ffi.sizeof('int64_t'))
+value_int_pointer = lambda val: (ffi.new('int*', val),
+ ffi.sizeof('int'))
+value_binary_data = lambda val, length: (ffi.new('char[%d]' % (length + 1), val),
+ ffi.sizeof('char') * length)
+
+IPC_PATH_MAX_LEN = C.get_ipc_path_max_len()
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/backend/cffi/_poll.py b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/backend/cffi/_poll.py
new file mode 100644
index 00000000..9bca34ca
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/backend/cffi/_poll.py
@@ -0,0 +1,56 @@
+# coding: utf-8
+"""zmq poll function"""
+
+# Copyright (C) PyZMQ Developers
+# Distributed under the terms of the Modified BSD License.
+
+from ._cffi import C, ffi, zmq_version_info
+
+from .constants import *
+
+from zmq.error import _check_rc
+
+
+def _make_zmq_pollitem(socket, flags):
+ zmq_socket = socket._zmq_socket
+ zmq_pollitem = ffi.new('zmq_pollitem_t*')
+ zmq_pollitem.socket = zmq_socket
+ zmq_pollitem.fd = 0
+ zmq_pollitem.events = flags
+ zmq_pollitem.revents = 0
+ return zmq_pollitem[0]
+
+def _make_zmq_pollitem_fromfd(socket_fd, flags):
+ zmq_pollitem = ffi.new('zmq_pollitem_t*')
+ zmq_pollitem.socket = ffi.NULL
+ zmq_pollitem.fd = socket_fd
+ zmq_pollitem.events = flags
+ zmq_pollitem.revents = 0
+ return zmq_pollitem[0]
+
+def zmq_poll(sockets, timeout):
+ cffi_pollitem_list = []
+ low_level_to_socket_obj = {}
+ for item in sockets:
+ if isinstance(item[0], int):
+ low_level_to_socket_obj[item[0]] = item
+ cffi_pollitem_list.append(_make_zmq_pollitem_fromfd(item[0], item[1]))
+ else:
+ low_level_to_socket_obj[item[0]._zmq_socket] = item
+ cffi_pollitem_list.append(_make_zmq_pollitem(item[0], item[1]))
+ items = ffi.new('zmq_pollitem_t[]', cffi_pollitem_list)
+ list_length = ffi.cast('int', len(cffi_pollitem_list))
+ c_timeout = ffi.cast('long', timeout)
+ rc = C.zmq_poll(items, list_length, c_timeout)
+ _check_rc(rc)
+ result = []
+ for index in range(len(items)):
+ if not items[index].socket == ffi.NULL:
+ if items[index].revents > 0:
+ result.append((low_level_to_socket_obj[items[index].socket][0],
+ items[index].revents))
+ else:
+ result.append((items[index].fd, items[index].revents))
+ return result
+
+__all__ = ['zmq_poll']
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/backend/cffi/_verify.c b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/backend/cffi/_verify.c
new file mode 100644
index 00000000..547840eb
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/backend/cffi/_verify.c
@@ -0,0 +1,12 @@
+#include <stdio.h>
+#include <sys/un.h>
+#include <string.h>
+
+#include <zmq.h>
+#include <zmq_utils.h>
+#include "zmq_compat.h"
+
+int get_ipc_path_max_len(void) {
+ struct sockaddr_un *dummy;
+ return sizeof(dummy->sun_path) - 1;
+}
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/backend/cffi/constants.py b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/backend/cffi/constants.py
new file mode 100644
index 00000000..ee293e74
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/backend/cffi/constants.py
@@ -0,0 +1,15 @@
+# coding: utf-8
+"""zmq constants"""
+
+from ._cffi import C, c_constant_names
+from zmq.utils.constant_names import all_names
+
+g = globals()
+for cname in c_constant_names:
+ if cname.startswith("ZMQ_"):
+ name = cname[4:]
+ else:
+ name = cname
+ g[name] = getattr(C, cname)
+
+__all__ = all_names
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/backend/cffi/context.py b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/backend/cffi/context.py
new file mode 100644
index 00000000..16a7b257
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/backend/cffi/context.py
@@ -0,0 +1,100 @@
+# coding: utf-8
+"""zmq Context class"""
+
+# Copyright (C) PyZMQ Developers
+# Distributed under the terms of the Modified BSD License.
+
+import weakref
+
+from ._cffi import C, ffi
+
+from .socket import *
+from .constants import *
+
+from zmq.error import ZMQError, _check_rc
+
+class Context(object):
+ _zmq_ctx = None
+ _iothreads = None
+ _closed = None
+ _sockets = None
+ _shadow = False
+
+ def __init__(self, io_threads=1, shadow=None):
+
+ if shadow:
+ self._zmq_ctx = ffi.cast("void *", shadow)
+ self._shadow = True
+ else:
+ self._shadow = False
+ if not io_threads >= 0:
+ raise ZMQError(EINVAL)
+
+ self._zmq_ctx = C.zmq_ctx_new()
+ if self._zmq_ctx == ffi.NULL:
+ raise ZMQError(C.zmq_errno())
+ if not shadow:
+ C.zmq_ctx_set(self._zmq_ctx, IO_THREADS, io_threads)
+ self._closed = False
+ self._sockets = set()
+
+ @property
+ def underlying(self):
+ """The address of the underlying libzmq context"""
+ return int(ffi.cast('size_t', self._zmq_ctx))
+
+ @property
+ def closed(self):
+ return self._closed
+
+ def _add_socket(self, socket):
+ ref = weakref.ref(socket)
+ self._sockets.add(ref)
+ return ref
+
+ def _rm_socket(self, ref):
+ if ref in self._sockets:
+ self._sockets.remove(ref)
+
+ def set(self, option, value):
+ """set a context option
+
+ see zmq_ctx_set
+ """
+ rc = C.zmq_ctx_set(self._zmq_ctx, option, value)
+ _check_rc(rc)
+
+ def get(self, option):
+ """get context option
+
+ see zmq_ctx_get
+ """
+ rc = C.zmq_ctx_get(self._zmq_ctx, option)
+ _check_rc(rc)
+ return rc
+
+ def term(self):
+ if self.closed:
+ return
+
+ C.zmq_ctx_destroy(self._zmq_ctx)
+
+ self._zmq_ctx = None
+ self._closed = True
+
+ def destroy(self, linger=None):
+ if self.closed:
+ return
+
+ sockets = self._sockets
+ self._sockets = set()
+ for s in sockets:
+ s = s()
+ if s and not s.closed:
+ if linger:
+ s.setsockopt(LINGER, linger)
+ s.close()
+
+ self.term()
+
+__all__ = ['Context']
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/backend/cffi/devices.py b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/backend/cffi/devices.py
new file mode 100644
index 00000000..c7a514a8
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/backend/cffi/devices.py
@@ -0,0 +1,24 @@
+# coding: utf-8
+"""zmq device functions"""
+
+# Copyright (C) PyZMQ Developers
+# Distributed under the terms of the Modified BSD License.
+
+from ._cffi import C, ffi, zmq_version_info
+from .socket import Socket
+from zmq.error import ZMQError, _check_rc
+
+def device(device_type, frontend, backend):
+ rc = C.zmq_proxy(frontend._zmq_socket, backend._zmq_socket, ffi.NULL)
+ _check_rc(rc)
+
+def proxy(frontend, backend, capture=None):
+ if isinstance(capture, Socket):
+ capture = capture._zmq_socket
+ else:
+ capture = ffi.NULL
+
+ rc = C.zmq_proxy(frontend._zmq_socket, backend._zmq_socket, capture)
+ _check_rc(rc)
+
+__all__ = ['device', 'proxy']
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/backend/cffi/error.py b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/backend/cffi/error.py
new file mode 100644
index 00000000..3bb64de0
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/backend/cffi/error.py
@@ -0,0 +1,13 @@
+"""zmq error functions"""
+
+# Copyright (C) PyZMQ Developers
+# Distributed under the terms of the Modified BSD License.
+
+from ._cffi import C, ffi
+
+def strerror(errno):
+ return ffi.string(C.zmq_strerror(errno))
+
+zmq_errno = C.zmq_errno
+
+__all__ = ['strerror', 'zmq_errno']
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/backend/cffi/message.py b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/backend/cffi/message.py
new file mode 100644
index 00000000..c35decb6
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/backend/cffi/message.py
@@ -0,0 +1,69 @@
+"""Dummy Frame object"""
+
+# Copyright (C) PyZMQ Developers
+# Distributed under the terms of the Modified BSD License.
+
+from ._cffi import ffi, C
+
+import zmq
+from zmq.utils.strtypes import unicode
+
+try:
+ view = memoryview
+except NameError:
+ view = buffer
+
+_content = lambda x: x.tobytes() if type(x) == memoryview else x
+
+class Frame(object):
+ _data = None
+ tracker = None
+ closed = False
+ more = False
+ buffer = None
+
+
+ def __init__(self, data, track=False):
+ try:
+ view(data)
+ except TypeError:
+ raise
+
+ self._data = data
+
+ if isinstance(data, unicode):
+ raise TypeError("Unicode objects not allowed. Only: str/bytes, " +
+ "buffer interfaces.")
+
+ self.more = False
+ self.tracker = None
+ self.closed = False
+ if track:
+ self.tracker = zmq.MessageTracker()
+
+ self.buffer = view(self.bytes)
+
+ @property
+ def bytes(self):
+ data = _content(self._data)
+ return data
+
+ def __len__(self):
+ return len(self.bytes)
+
+ def __eq__(self, other):
+ return self.bytes == _content(other)
+
+ def __str__(self):
+ if str is unicode:
+ return self.bytes.decode()
+ else:
+ return self.bytes
+
+ @property
+ def done(self):
+ return True
+
+Message = Frame
+
+__all__ = ['Frame', 'Message']
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/backend/cffi/socket.py b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/backend/cffi/socket.py
new file mode 100644
index 00000000..3c427739
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/backend/cffi/socket.py
@@ -0,0 +1,244 @@
+# coding: utf-8
+"""zmq Socket class"""
+
+# Copyright (C) PyZMQ Developers
+# Distributed under the terms of the Modified BSD License.
+
+import random
+import codecs
+
+import errno as errno_mod
+
+from ._cffi import (C, ffi, new_uint64_pointer, new_int64_pointer,
+ new_int_pointer, new_binary_data, value_uint64_pointer,
+ value_int64_pointer, value_int_pointer, value_binary_data,
+ IPC_PATH_MAX_LEN)
+
+from .message import Frame
+from .constants import *
+
+import zmq
+from zmq.error import ZMQError, _check_rc, _check_version
+from zmq.utils.strtypes import unicode
+
+
+def new_pointer_from_opt(option, length=0):
+ from zmq.sugar.constants import (
+ int64_sockopts, bytes_sockopts,
+ )
+ if option in int64_sockopts:
+ return new_int64_pointer()
+ elif option in bytes_sockopts:
+ return new_binary_data(length)
+ else:
+ # default
+ return new_int_pointer()
+
+def value_from_opt_pointer(option, opt_pointer, length=0):
+ from zmq.sugar.constants import (
+ int64_sockopts, bytes_sockopts,
+ )
+ if option in int64_sockopts:
+ return int(opt_pointer[0])
+ elif option in bytes_sockopts:
+ return ffi.buffer(opt_pointer, length)[:]
+ else:
+ return int(opt_pointer[0])
+
+def initialize_opt_pointer(option, value, length=0):
+ from zmq.sugar.constants import (
+ int64_sockopts, bytes_sockopts,
+ )
+ if option in int64_sockopts:
+ return value_int64_pointer(value)
+ elif option in bytes_sockopts:
+ return value_binary_data(value, length)
+ else:
+ return value_int_pointer(value)
+
+
+class Socket(object):
+ context = None
+ socket_type = None
+ _zmq_socket = None
+ _closed = None
+ _ref = None
+ _shadow = False
+
+ def __init__(self, context=None, socket_type=None, shadow=None):
+ self.context = context
+ if shadow is not None:
+ self._zmq_socket = ffi.cast("void *", shadow)
+ self._shadow = True
+ else:
+ self._shadow = False
+ self._zmq_socket = C.zmq_socket(context._zmq_ctx, socket_type)
+ if self._zmq_socket == ffi.NULL:
+ raise ZMQError()
+ self._closed = False
+ if context:
+ self._ref = context._add_socket(self)
+
+ @property
+ def underlying(self):
+ """The address of the underlying libzmq socket"""
+ return int(ffi.cast('size_t', self._zmq_socket))
+
+ @property
+ def closed(self):
+ return self._closed
+
+ def close(self, linger=None):
+ rc = 0
+ if not self._closed and hasattr(self, '_zmq_socket'):
+ if self._zmq_socket is not None:
+ rc = C.zmq_close(self._zmq_socket)
+ self._closed = True
+ if self.context:
+ self.context._rm_socket(self._ref)
+ return rc
+
+ def bind(self, address):
+ if isinstance(address, unicode):
+ address = address.encode('utf8')
+ rc = C.zmq_bind(self._zmq_socket, address)
+ if rc < 0:
+ if IPC_PATH_MAX_LEN and C.zmq_errno() == errno_mod.ENAMETOOLONG:
+ # py3compat: address is bytes, but msg wants str
+ if str is unicode:
+ address = address.decode('utf-8', 'replace')
+ path = address.split('://', 1)[-1]
+ msg = ('ipc path "{0}" is longer than {1} '
+ 'characters (sizeof(sockaddr_un.sun_path)).'
+ .format(path, IPC_PATH_MAX_LEN))
+ raise ZMQError(C.zmq_errno(), msg=msg)
+ else:
+ _check_rc(rc)
+
+ def unbind(self, address):
+ _check_version((3,2), "unbind")
+ if isinstance(address, unicode):
+ address = address.encode('utf8')
+ rc = C.zmq_unbind(self._zmq_socket, address)
+ _check_rc(rc)
+
+ def connect(self, address):
+ if isinstance(address, unicode):
+ address = address.encode('utf8')
+ rc = C.zmq_connect(self._zmq_socket, address)
+ _check_rc(rc)
+
+ def disconnect(self, address):
+ _check_version((3,2), "disconnect")
+ if isinstance(address, unicode):
+ address = address.encode('utf8')
+ rc = C.zmq_disconnect(self._zmq_socket, address)
+ _check_rc(rc)
+
+ def set(self, option, value):
+ length = None
+ if isinstance(value, unicode):
+ raise TypeError("unicode not allowed, use bytes")
+
+ if isinstance(value, bytes):
+ if option not in zmq.constants.bytes_sockopts:
+ raise TypeError("not a bytes sockopt: %s" % option)
+ length = len(value)
+
+ c_data = initialize_opt_pointer(option, value, length)
+
+ c_value_pointer = c_data[0]
+ c_sizet = c_data[1]
+
+ rc = C.zmq_setsockopt(self._zmq_socket,
+ option,
+ ffi.cast('void*', c_value_pointer),
+ c_sizet)
+ _check_rc(rc)
+
+ def get(self, option):
+ c_data = new_pointer_from_opt(option, length=255)
+
+ c_value_pointer = c_data[0]
+ c_sizet_pointer = c_data[1]
+
+ rc = C.zmq_getsockopt(self._zmq_socket,
+ option,
+ c_value_pointer,
+ c_sizet_pointer)
+ _check_rc(rc)
+
+ sz = c_sizet_pointer[0]
+ v = value_from_opt_pointer(option, c_value_pointer, sz)
+ if option != zmq.IDENTITY and option in zmq.constants.bytes_sockopts and v.endswith(b'\0'):
+ v = v[:-1]
+ return v
+
+ def send(self, message, flags=0, copy=False, track=False):
+ if isinstance(message, unicode):
+ raise TypeError("Message must be in bytes, not an unicode Object")
+
+ if isinstance(message, Frame):
+ message = message.bytes
+
+ zmq_msg = ffi.new('zmq_msg_t*')
+ c_message = ffi.new('char[]', message)
+ rc = C.zmq_msg_init_size(zmq_msg, len(message))
+ C.memcpy(C.zmq_msg_data(zmq_msg), c_message, len(message))
+
+ rc = C.zmq_msg_send(zmq_msg, self._zmq_socket, flags)
+ C.zmq_msg_close(zmq_msg)
+ _check_rc(rc)
+
+ if track:
+ return zmq.MessageTracker()
+
+ def recv(self, flags=0, copy=True, track=False):
+ zmq_msg = ffi.new('zmq_msg_t*')
+ C.zmq_msg_init(zmq_msg)
+
+ rc = C.zmq_msg_recv(zmq_msg, self._zmq_socket, flags)
+
+ if rc < 0:
+ C.zmq_msg_close(zmq_msg)
+ _check_rc(rc)
+
+ _buffer = ffi.buffer(C.zmq_msg_data(zmq_msg), C.zmq_msg_size(zmq_msg))
+ value = _buffer[:]
+ C.zmq_msg_close(zmq_msg)
+
+ frame = Frame(value, track=track)
+ frame.more = self.getsockopt(RCVMORE)
+
+ if copy:
+ return frame.bytes
+ else:
+ return frame
+
+ def monitor(self, addr, events=-1):
+ """s.monitor(addr, flags)
+
+ Start publishing socket events on inproc.
+ See libzmq docs for zmq_monitor for details.
+
+ Note: requires libzmq >= 3.2
+
+ Parameters
+ ----------
+ addr : str
+ The inproc url used for monitoring. Passing None as
+ the addr will cause an existing socket monitor to be
+ deregistered.
+ events : int [default: zmq.EVENT_ALL]
+ The zmq event bitmask for which events will be sent to the monitor.
+ """
+
+ _check_version((3,2), "monitor")
+ if events < 0:
+ events = zmq.EVENT_ALL
+ if addr is None:
+ addr = ffi.NULL
+ rc = C.zmq_socket_monitor(self._zmq_socket, addr, events)
+
+
+__all__ = ['Socket', 'IPC_PATH_MAX_LEN']
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/backend/cffi/utils.py b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/backend/cffi/utils.py
new file mode 100644
index 00000000..fde7827b
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/backend/cffi/utils.py
@@ -0,0 +1,62 @@
+# coding: utf-8
+"""miscellaneous zmq_utils wrapping"""
+
+# Copyright (C) PyZMQ Developers
+# Distributed under the terms of the Modified BSD License.
+
+from ._cffi import ffi, C
+
+from zmq.error import ZMQError, _check_rc, _check_version
+from zmq.utils.strtypes import unicode
+
+def has(capability):
+ """Check for zmq capability by name (e.g. 'ipc', 'curve')
+
+ .. versionadded:: libzmq-4.1
+ .. versionadded:: 14.1
+ """
+ _check_version((4,1), 'zmq.has')
+ if isinstance(capability, unicode):
+ capability = capability.encode('utf8')
+ return bool(C.zmq_has(capability))
+
+def curve_keypair():
+ """generate a Z85 keypair for use with zmq.CURVE security
+
+ Requires libzmq (≥ 4.0) to have been linked with libsodium.
+
+ Returns
+ -------
+ (public, secret) : two bytestrings
+ The public and private keypair as 40 byte z85-encoded bytestrings.
+ """
+ _check_version((3,2), "monitor")
+ public = ffi.new('char[64]')
+ private = ffi.new('char[64]')
+ rc = C.zmq_curve_keypair(public, private)
+ _check_rc(rc)
+ return ffi.buffer(public)[:40], ffi.buffer(private)[:40]
+
+
+class Stopwatch(object):
+ def __init__(self):
+ self.watch = ffi.NULL
+
+ def start(self):
+ if self.watch == ffi.NULL:
+ self.watch = C.zmq_stopwatch_start()
+ else:
+ raise ZMQError('Stopwatch is already runing.')
+
+ def stop(self):
+ if self.watch == ffi.NULL:
+ raise ZMQError('Must start the Stopwatch before calling stop.')
+ else:
+ time = C.zmq_stopwatch_stop(self.watch)
+ self.watch = ffi.NULL
+ return time
+
+ def sleep(self, seconds):
+ C.zmq_sleep(seconds)
+
+__all__ = ['has', 'curve_keypair', 'Stopwatch']
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/backend/cython/__init__.py b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/backend/cython/__init__.py
new file mode 100644
index 00000000..e5358185
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/backend/cython/__init__.py
@@ -0,0 +1,23 @@
+"""Python bindings for core 0MQ objects."""
+
+# Copyright (C) PyZMQ Developers
+# Distributed under the terms of the Lesser GNU Public License (LGPL).
+
+from . import (constants, error, message, context,
+ socket, utils, _poll, _version, _device )
+
+__all__ = []
+for submod in (constants, error, message, context,
+ socket, utils, _poll, _version, _device):
+ __all__.extend(submod.__all__)
+
+from .constants import *
+from .error import *
+from .message import *
+from .context import *
+from .socket import *
+from ._poll import *
+from .utils import *
+from ._device import *
+from ._version import *
+
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/backend/cython/_device.cpython-34m.so b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/backend/cython/_device.cpython-34m.so
new file mode 100644
index 00000000..5382bf29
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/backend/cython/_device.cpython-34m.so
Binary files differ
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/backend/cython/_poll.cpython-34m.so b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/backend/cython/_poll.cpython-34m.so
new file mode 100644
index 00000000..d905a938
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/backend/cython/_poll.cpython-34m.so
Binary files differ
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/backend/cython/_version.cpython-34m.so b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/backend/cython/_version.cpython-34m.so
new file mode 100644
index 00000000..c7f7a206
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/backend/cython/_version.cpython-34m.so
Binary files differ
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/backend/cython/checkrc.pxd b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/backend/cython/checkrc.pxd
new file mode 100644
index 00000000..3bf69fc3
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/backend/cython/checkrc.pxd
@@ -0,0 +1,23 @@
+from libc.errno cimport EINTR, EAGAIN
+from cpython cimport PyErr_CheckSignals
+from libzmq cimport zmq_errno, ZMQ_ETERM
+
+cdef inline int _check_rc(int rc) except -1:
+ """internal utility for checking zmq return condition
+
+ and raising the appropriate Exception class
+ """
+ cdef int errno = zmq_errno()
+ PyErr_CheckSignals()
+ if rc < 0:
+ if errno == EAGAIN:
+ from zmq.error import Again
+ raise Again(errno)
+ elif errno == ZMQ_ETERM:
+ from zmq.error import ContextTerminated
+ raise ContextTerminated(errno)
+ else:
+ from zmq.error import ZMQError
+ raise ZMQError(errno)
+ # return -1
+ return 0
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/backend/cython/constants.cpython-34m.so b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/backend/cython/constants.cpython-34m.so
new file mode 100644
index 00000000..11c91d01
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/backend/cython/constants.cpython-34m.so
Binary files differ
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/backend/cython/context.cpython-34m.so b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/backend/cython/context.cpython-34m.so
new file mode 100644
index 00000000..8aa28b8d
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/backend/cython/context.cpython-34m.so
Binary files differ
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/backend/cython/context.pxd b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/backend/cython/context.pxd
new file mode 100644
index 00000000..9c9267a5
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/backend/cython/context.pxd
@@ -0,0 +1,41 @@
+"""0MQ Context class declaration."""
+
+#
+# Copyright (c) 2010-2011 Brian E. Granger & Min Ragan-Kelley
+#
+# This file is part of pyzmq.
+#
+# pyzmq is free software; you can redistribute it and/or modify it under
+# the terms of the Lesser GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# pyzmq is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# Lesser GNU General Public License for more details.
+#
+# You should have received a copy of the Lesser GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+#-----------------------------------------------------------------------------
+# Code
+#-----------------------------------------------------------------------------
+
+cdef class Context:
+
+ cdef object __weakref__ # enable weakref
+ cdef void *handle # The C handle for the underlying zmq object.
+ cdef bint _shadow # whether the Context is a shadow wrapper of another
+ cdef void **_sockets # A C-array containg socket handles
+ cdef size_t _n_sockets # the number of sockets
+ cdef size_t _max_sockets # the size of the _sockets array
+ cdef int _pid # the pid of the process which created me (for fork safety)
+
+ cdef public bint closed # bool property for a closed context.
+ cdef inline int _term(self)
+ # helpers for events on _sockets in Socket.__cinit__()/close()
+ cdef inline void _add_socket(self, void* handle)
+ cdef inline void _remove_socket(self, void* handle)
+
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/backend/cython/error.cpython-34m.so b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/backend/cython/error.cpython-34m.so
new file mode 100644
index 00000000..b646a114
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/backend/cython/error.cpython-34m.so
Binary files differ
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/backend/cython/libzmq.pxd b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/backend/cython/libzmq.pxd
new file mode 100644
index 00000000..e42f6d6b
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/backend/cython/libzmq.pxd
@@ -0,0 +1,110 @@
+"""All the C imports for 0MQ"""
+
+#
+# Copyright (c) 2010 Brian E. Granger & Min Ragan-Kelley
+#
+# This file is part of pyzmq.
+#
+# pyzmq is free software; you can redistribute it and/or modify it under
+# the terms of the Lesser GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# pyzmq is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# Lesser GNU General Public License for more details.
+#
+# You should have received a copy of the Lesser GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+#-----------------------------------------------------------------------------
+# Imports
+#-----------------------------------------------------------------------------
+
+#-----------------------------------------------------------------------------
+# Import the C header files
+#-----------------------------------------------------------------------------
+
+cdef extern from *:
+ ctypedef void* const_void_ptr "const void *"
+ ctypedef char* const_char_ptr "const char *"
+
+cdef extern from "zmq_compat.h":
+ ctypedef signed long long int64_t "pyzmq_int64_t"
+
+include "constant_enums.pxi"
+
+cdef extern from "zmq.h" nogil:
+
+ void _zmq_version "zmq_version"(int *major, int *minor, int *patch)
+
+ ctypedef int fd_t "ZMQ_FD_T"
+
+ enum: errno
+ char *zmq_strerror (int errnum)
+ int zmq_errno()
+
+ void *zmq_ctx_new ()
+ int zmq_ctx_destroy (void *context)
+ int zmq_ctx_set (void *context, int option, int optval)
+ int zmq_ctx_get (void *context, int option)
+ void *zmq_init (int io_threads)
+ int zmq_term (void *context)
+
+ # blackbox def for zmq_msg_t
+ ctypedef void * zmq_msg_t "zmq_msg_t"
+
+ ctypedef void zmq_free_fn(void *data, void *hint)
+
+ int zmq_msg_init (zmq_msg_t *msg)
+ int zmq_msg_init_size (zmq_msg_t *msg, size_t size)
+ int zmq_msg_init_data (zmq_msg_t *msg, void *data,
+ size_t size, zmq_free_fn *ffn, void *hint)
+ int zmq_msg_send (zmq_msg_t *msg, void *s, int flags)
+ int zmq_msg_recv (zmq_msg_t *msg, void *s, int flags)
+ int zmq_msg_close (zmq_msg_t *msg)
+ int zmq_msg_move (zmq_msg_t *dest, zmq_msg_t *src)
+ int zmq_msg_copy (zmq_msg_t *dest, zmq_msg_t *src)
+ void *zmq_msg_data (zmq_msg_t *msg)
+ size_t zmq_msg_size (zmq_msg_t *msg)
+ int zmq_msg_more (zmq_msg_t *msg)
+ int zmq_msg_get (zmq_msg_t *msg, int option)
+ int zmq_msg_set (zmq_msg_t *msg, int option, int optval)
+ const_char_ptr zmq_msg_gets (zmq_msg_t *msg, const_char_ptr property)
+ int zmq_has (const_char_ptr capability)
+
+ void *zmq_socket (void *context, int type)
+ int zmq_close (void *s)
+ int zmq_setsockopt (void *s, int option, void *optval, size_t optvallen)
+ int zmq_getsockopt (void *s, int option, void *optval, size_t *optvallen)
+ int zmq_bind (void *s, char *addr)
+ int zmq_connect (void *s, char *addr)
+ int zmq_unbind (void *s, char *addr)
+ int zmq_disconnect (void *s, char *addr)
+
+ int zmq_socket_monitor (void *s, char *addr, int flags)
+
+ # send/recv
+ int zmq_sendbuf (void *s, const_void_ptr buf, size_t n, int flags)
+ int zmq_recvbuf (void *s, void *buf, size_t n, int flags)
+
+ ctypedef struct zmq_pollitem_t:
+ void *socket
+ int fd
+ short events
+ short revents
+
+ int zmq_poll (zmq_pollitem_t *items, int nitems, long timeout)
+
+ int zmq_device (int device_, void *insocket_, void *outsocket_)
+ int zmq_proxy (void *frontend, void *backend, void *capture)
+
+cdef extern from "zmq_utils.h" nogil:
+
+ void *zmq_stopwatch_start ()
+ unsigned long zmq_stopwatch_stop (void *watch_)
+ void zmq_sleep (int seconds_)
+ int zmq_curve_keypair (char *z85_public_key, char *z85_secret_key)
+
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/backend/cython/message.cpython-34m.so b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/backend/cython/message.cpython-34m.so
new file mode 100644
index 00000000..764a54fc
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/backend/cython/message.cpython-34m.so
Binary files differ
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/backend/cython/message.pxd b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/backend/cython/message.pxd
new file mode 100644
index 00000000..4781195f
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/backend/cython/message.pxd
@@ -0,0 +1,63 @@
+"""0MQ Message related class declarations."""
+
+#
+# Copyright (c) 2010-2011 Brian E. Granger & Min Ragan-Kelley
+#
+# This file is part of pyzmq.
+#
+# pyzmq is free software; you can redistribute it and/or modify it under
+# the terms of the Lesser GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# pyzmq is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# Lesser GNU General Public License for more details.
+#
+# You should have received a copy of the Lesser GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+#-----------------------------------------------------------------------------
+# Imports
+#-----------------------------------------------------------------------------
+
+from cpython cimport PyBytes_FromStringAndSize
+
+from libzmq cimport zmq_msg_t, zmq_msg_data, zmq_msg_size
+
+#-----------------------------------------------------------------------------
+# Code
+#-----------------------------------------------------------------------------
+
+cdef class MessageTracker(object):
+
+ cdef set events # Message Event objects to track.
+ cdef set peers # Other Message or MessageTracker objects.
+
+
+cdef class Frame:
+
+ cdef zmq_msg_t zmq_msg
+ cdef object _data # The actual message data as a Python object.
+ cdef object _buffer # A Python Buffer/View of the message contents
+ cdef object _bytes # A bytes/str copy of the message.
+ cdef bint _failed_init # Flag to handle failed zmq_msg_init
+ cdef public object tracker_event # Event for use with zmq_free_fn.
+ cdef public object tracker # MessageTracker object.
+ cdef public bint more # whether RCVMORE was set
+
+ cdef Frame fast_copy(self) # Create shallow copy of Message object.
+ cdef object _getbuffer(self) # Construct self._buffer.
+
+
+cdef inline object copy_zmq_msg_bytes(zmq_msg_t *zmq_msg):
+ """ Copy the data from a zmq_msg_t """
+ cdef char *data_c = NULL
+ cdef Py_ssize_t data_len_c
+ data_c = <char *>zmq_msg_data(zmq_msg)
+ data_len_c = zmq_msg_size(zmq_msg)
+ return PyBytes_FromStringAndSize(data_c, data_len_c)
+
+
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/backend/cython/socket.cpython-34m.so b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/backend/cython/socket.cpython-34m.so
new file mode 100644
index 00000000..2d2e5508
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/backend/cython/socket.cpython-34m.so
Binary files differ
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/backend/cython/socket.pxd b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/backend/cython/socket.pxd
new file mode 100644
index 00000000..b8a331e2
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/backend/cython/socket.pxd
@@ -0,0 +1,47 @@
+"""0MQ Socket class declaration."""
+
+#
+# Copyright (c) 2010-2011 Brian E. Granger & Min Ragan-Kelley
+#
+# This file is part of pyzmq.
+#
+# pyzmq is free software; you can redistribute it and/or modify it under
+# the terms of the Lesser GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# pyzmq is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# Lesser GNU General Public License for more details.
+#
+# You should have received a copy of the Lesser GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+#-----------------------------------------------------------------------------
+# Imports
+#-----------------------------------------------------------------------------
+
+from context cimport Context
+
+#-----------------------------------------------------------------------------
+# Code
+#-----------------------------------------------------------------------------
+
+
+cdef class Socket:
+
+ cdef object __weakref__ # enable weakref
+ cdef void *handle # The C handle for the underlying zmq object.
+ cdef bint _shadow # whether the Socket is a shadow wrapper of another
+ # Hold on to a reference to the context to make sure it is not garbage
+ # collected until the socket it done with it.
+ cdef public Context context # The zmq Context object that owns this.
+ cdef public bint _closed # bool property for a closed socket.
+ cdef int _pid # the pid of the process which created me (for fork safety)
+
+ # cpdef methods for direct-cython access:
+ cpdef object send(self, object data, int flags=*, copy=*, track=*)
+ cpdef object recv(self, int flags=*, copy=*, track=*)
+
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/backend/cython/utils.cpython-34m.so b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/backend/cython/utils.cpython-34m.so
new file mode 100644
index 00000000..ffb7d75c
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/backend/cython/utils.cpython-34m.so
Binary files differ
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/backend/cython/utils.pxd b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/backend/cython/utils.pxd
new file mode 100644
index 00000000..1d7117f1
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/backend/cython/utils.pxd
@@ -0,0 +1,29 @@
+"""Wrap zmq_utils.h"""
+
+#
+# Copyright (c) 2010 Brian E. Granger & Min Ragan-Kelley
+#
+# This file is part of pyzmq.
+#
+# pyzmq is free software; you can redistribute it and/or modify it under
+# the terms of the Lesser GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# pyzmq is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# Lesser GNU General Public License for more details.
+#
+# You should have received a copy of the Lesser GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+#-----------------------------------------------------------------------------
+# Code
+#-----------------------------------------------------------------------------
+
+
+cdef class Stopwatch:
+ cdef void *watch # The C handle for the underlying zmq object
+
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/backend/select.py b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/backend/select.py
new file mode 100644
index 00000000..0a2e09a2
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/backend/select.py
@@ -0,0 +1,39 @@
+"""Import basic exposure of libzmq C API as a backend"""
+
+# Copyright (C) PyZMQ Developers
+# Distributed under the terms of the Modified BSD License.
+
+public_api = [
+ 'Context',
+ 'Socket',
+ 'Frame',
+ 'Message',
+ 'Stopwatch',
+ 'device',
+ 'proxy',
+ 'zmq_poll',
+ 'strerror',
+ 'zmq_errno',
+ 'has',
+ 'curve_keypair',
+ 'constants',
+ 'zmq_version_info',
+ 'IPC_PATH_MAX_LEN',
+]
+
+def select_backend(name):
+ """Select the pyzmq backend"""
+ try:
+ mod = __import__(name, fromlist=public_api)
+ except ImportError:
+ raise
+ except Exception as e:
+ import sys
+ from zmq.utils.sixcerpt import reraise
+ exc_info = sys.exc_info()
+ reraise(ImportError, ImportError("Importing %s failed with %s" % (name, e)), exc_info[2])
+
+ ns = {}
+ for key in public_api:
+ ns[key] = getattr(mod, key)
+ return ns
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/devices/__init__.py b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/devices/__init__.py
new file mode 100644
index 00000000..23715963
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/devices/__init__.py
@@ -0,0 +1,16 @@
+"""0MQ Device classes for running in background threads or processes."""
+
+# Copyright (C) PyZMQ Developers
+# Distributed under the terms of the Modified BSD License.
+
+from zmq import device
+from zmq.devices import basedevice, proxydevice, monitoredqueue, monitoredqueuedevice
+
+from zmq.devices.basedevice import *
+from zmq.devices.proxydevice import *
+from zmq.devices.monitoredqueue import *
+from zmq.devices.monitoredqueuedevice import *
+
+__all__ = ['device']
+for submod in (basedevice, proxydevice, monitoredqueue, monitoredqueuedevice):
+ __all__.extend(submod.__all__)
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/devices/basedevice.py b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/devices/basedevice.py
new file mode 100644
index 00000000..7ba1b7ac
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/devices/basedevice.py
@@ -0,0 +1,229 @@
+"""Classes for running 0MQ Devices in the background."""
+
+# Copyright (C) PyZMQ Developers
+# Distributed under the terms of the Modified BSD License.
+
+
+import time
+from threading import Thread
+from multiprocessing import Process
+
+from zmq import device, QUEUE, Context, ETERM, ZMQError
+
+
+class Device:
+ """A 0MQ Device to be run in the background.
+
+ You do not pass Socket instances to this, but rather Socket types::
+
+ Device(device_type, in_socket_type, out_socket_type)
+
+ For instance::
+
+ dev = Device(zmq.QUEUE, zmq.DEALER, zmq.ROUTER)
+
+ Similar to zmq.device, but socket types instead of sockets themselves are
+ passed, and the sockets are created in the work thread, to avoid issues
+ with thread safety. As a result, additional bind_{in|out} and
+ connect_{in|out} methods and setsockopt_{in|out} allow users to specify
+ connections for the sockets.
+
+ Parameters
+ ----------
+ device_type : int
+ The 0MQ Device type
+ {in|out}_type : int
+ zmq socket types, to be passed later to context.socket(). e.g.
+ zmq.PUB, zmq.SUB, zmq.REQ. If out_type is < 0, then in_socket is used
+ for both in_socket and out_socket.
+
+ Methods
+ -------
+ bind_{in_out}(iface)
+ passthrough for ``{in|out}_socket.bind(iface)``, to be called in the thread
+ connect_{in_out}(iface)
+ passthrough for ``{in|out}_socket.connect(iface)``, to be called in the
+ thread
+ setsockopt_{in_out}(opt,value)
+ passthrough for ``{in|out}_socket.setsockopt(opt, value)``, to be called in
+ the thread
+
+ Attributes
+ ----------
+ daemon : int
+ sets whether the thread should be run as a daemon
+ Default is true, because if it is false, the thread will not
+ exit unless it is killed
+ context_factory : callable (class attribute)
+ Function for creating the Context. This will be Context.instance
+ in ThreadDevices, and Context in ProcessDevices. The only reason
+ it is not instance() in ProcessDevices is that there may be a stale
+ Context instance already initialized, and the forked environment
+ should *never* try to use it.
+ """
+
+ context_factory = Context.instance
+ """Callable that returns a context. Typically either Context.instance or Context,
+ depending on whether the device should share the global instance or not.
+ """
+
+ def __init__(self, device_type=QUEUE, in_type=None, out_type=None):
+ self.device_type = device_type
+ if in_type is None:
+ raise TypeError("in_type must be specified")
+ if out_type is None:
+ raise TypeError("out_type must be specified")
+ self.in_type = in_type
+ self.out_type = out_type
+ self._in_binds = []
+ self._in_connects = []
+ self._in_sockopts = []
+ self._out_binds = []
+ self._out_connects = []
+ self._out_sockopts = []
+ self.daemon = True
+ self.done = False
+
+ def bind_in(self, addr):
+ """Enqueue ZMQ address for binding on in_socket.
+
+ See zmq.Socket.bind for details.
+ """
+ self._in_binds.append(addr)
+
+ def connect_in(self, addr):
+ """Enqueue ZMQ address for connecting on in_socket.
+
+ See zmq.Socket.connect for details.
+ """
+ self._in_connects.append(addr)
+
+ def setsockopt_in(self, opt, value):
+ """Enqueue setsockopt(opt, value) for in_socket
+
+ See zmq.Socket.setsockopt for details.
+ """
+ self._in_sockopts.append((opt, value))
+
+ def bind_out(self, addr):
+ """Enqueue ZMQ address for binding on out_socket.
+
+ See zmq.Socket.bind for details.
+ """
+ self._out_binds.append(addr)
+
+ def connect_out(self, addr):
+ """Enqueue ZMQ address for connecting on out_socket.
+
+ See zmq.Socket.connect for details.
+ """
+ self._out_connects.append(addr)
+
+ def setsockopt_out(self, opt, value):
+ """Enqueue setsockopt(opt, value) for out_socket
+
+ See zmq.Socket.setsockopt for details.
+ """
+ self._out_sockopts.append((opt, value))
+
+ def _setup_sockets(self):
+ ctx = self.context_factory()
+
+ self._context = ctx
+
+ # create the sockets
+ ins = ctx.socket(self.in_type)
+ if self.out_type < 0:
+ outs = ins
+ else:
+ outs = ctx.socket(self.out_type)
+
+ # set sockopts (must be done first, in case of zmq.IDENTITY)
+ for opt,value in self._in_sockopts:
+ ins.setsockopt(opt, value)
+ for opt,value in self._out_sockopts:
+ outs.setsockopt(opt, value)
+
+ for iface in self._in_binds:
+ ins.bind(iface)
+ for iface in self._out_binds:
+ outs.bind(iface)
+
+ for iface in self._in_connects:
+ ins.connect(iface)
+ for iface in self._out_connects:
+ outs.connect(iface)
+
+ return ins,outs
+
+ def run_device(self):
+ """The runner method.
+
+ Do not call me directly, instead call ``self.start()``, just like a Thread.
+ """
+ ins,outs = self._setup_sockets()
+ device(self.device_type, ins, outs)
+
+ def run(self):
+ """wrap run_device in try/catch ETERM"""
+ try:
+ self.run_device()
+ except ZMQError as e:
+ if e.errno == ETERM:
+ # silence TERM errors, because this should be a clean shutdown
+ pass
+ else:
+ raise
+ finally:
+ self.done = True
+
+ def start(self):
+ """Start the device. Override me in subclass for other launchers."""
+ return self.run()
+
+ def join(self,timeout=None):
+ """wait for me to finish, like Thread.join.
+
+ Reimplemented appropriately by subclasses."""
+ tic = time.time()
+ toc = tic
+ while not self.done and not (timeout is not None and toc-tic > timeout):
+ time.sleep(.001)
+ toc = time.time()
+
+
+class BackgroundDevice(Device):
+ """Base class for launching Devices in background processes and threads."""
+
+ launcher=None
+ _launch_class=None
+
+ def start(self):
+ self.launcher = self._launch_class(target=self.run)
+ self.launcher.daemon = self.daemon
+ return self.launcher.start()
+
+ def join(self, timeout=None):
+ return self.launcher.join(timeout=timeout)
+
+
+class ThreadDevice(BackgroundDevice):
+ """A Device that will be run in a background Thread.
+
+ See Device for details.
+ """
+ _launch_class=Thread
+
+class ProcessDevice(BackgroundDevice):
+ """A Device that will be run in a background Process.
+
+ See Device for details.
+ """
+ _launch_class=Process
+ context_factory = Context
+ """Callable that returns a context. Typically either Context.instance or Context,
+ depending on whether the device should share the global instance or not.
+ """
+
+
+__all__ = ['Device', 'ThreadDevice', 'ProcessDevice']
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/devices/monitoredqueue.cpython-34m.so b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/devices/monitoredqueue.cpython-34m.so
new file mode 100644
index 00000000..85f1c507
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/devices/monitoredqueue.cpython-34m.so
Binary files differ
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/devices/monitoredqueue.pxd b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/devices/monitoredqueue.pxd
new file mode 100644
index 00000000..1e26ed86
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/devices/monitoredqueue.pxd
@@ -0,0 +1,177 @@
+"""MonitoredQueue class declarations.
+
+Authors
+-------
+* MinRK
+* Brian Granger
+"""
+
+#
+# Copyright (c) 2010 Min Ragan-Kelley, Brian Granger
+#
+# This file is part of pyzmq, but is derived and adapted from zmq_queue.cpp
+# originally from libzmq-2.1.6, used under LGPLv3
+#
+# pyzmq is free software; you can redistribute it and/or modify it under
+# the terms of the Lesser GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# pyzmq is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# Lesser GNU General Public License for more details.
+#
+# You should have received a copy of the Lesser GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+#-----------------------------------------------------------------------------
+# Imports
+#-----------------------------------------------------------------------------
+
+from libzmq cimport *
+
+#-----------------------------------------------------------------------------
+# MonitoredQueue C functions
+#-----------------------------------------------------------------------------
+
+cdef inline int _relay(void *insocket_, void *outsocket_, void *sidesocket_,
+ zmq_msg_t msg, zmq_msg_t side_msg, zmq_msg_t id_msg,
+ bint swap_ids) nogil:
+ cdef int rc
+ cdef int64_t flag_2
+ cdef int flag_3
+ cdef int flags
+ cdef bint more
+ cdef size_t flagsz
+ cdef void * flag_ptr
+
+ if ZMQ_VERSION_MAJOR < 3:
+ flagsz = sizeof (int64_t)
+ flag_ptr = &flag_2
+ else:
+ flagsz = sizeof (int)
+ flag_ptr = &flag_3
+
+ if swap_ids:# both router, must send second identity first
+ # recv two ids into msg, id_msg
+ rc = zmq_msg_recv(&msg, insocket_, 0)
+ if rc < 0: return rc
+
+ rc = zmq_msg_recv(&id_msg, insocket_, 0)
+ if rc < 0: return rc
+
+ # send second id (id_msg) first
+ #!!!! always send a copy before the original !!!!
+ rc = zmq_msg_copy(&side_msg, &id_msg)
+ if rc < 0: return rc
+ rc = zmq_msg_send(&side_msg, outsocket_, ZMQ_SNDMORE)
+ if rc < 0: return rc
+ rc = zmq_msg_send(&id_msg, sidesocket_, ZMQ_SNDMORE)
+ if rc < 0: return rc
+ # send first id (msg) second
+ rc = zmq_msg_copy(&side_msg, &msg)
+ if rc < 0: return rc
+ rc = zmq_msg_send(&side_msg, outsocket_, ZMQ_SNDMORE)
+ if rc < 0: return rc
+ rc = zmq_msg_send(&msg, sidesocket_, ZMQ_SNDMORE)
+ if rc < 0: return rc
+ while (True):
+ rc = zmq_msg_recv(&msg, insocket_, 0)
+ if rc < 0: return rc
+ # assert (rc == 0)
+ rc = zmq_getsockopt (insocket_, ZMQ_RCVMORE, flag_ptr, &flagsz)
+ if rc < 0: return rc
+ flags = 0
+ if ZMQ_VERSION_MAJOR < 3:
+ if flag_2:
+ flags |= ZMQ_SNDMORE
+ else:
+ if flag_3:
+ flags |= ZMQ_SNDMORE
+ # LABEL has been removed:
+ # rc = zmq_getsockopt (insocket_, ZMQ_RCVLABEL, flag_ptr, &flagsz)
+ # if flag_3:
+ # flags |= ZMQ_SNDLABEL
+ # assert (rc == 0)
+
+ rc = zmq_msg_copy(&side_msg, &msg)
+ if rc < 0: return rc
+ if flags:
+ rc = zmq_msg_send(&side_msg, outsocket_, flags)
+ if rc < 0: return rc
+ # only SNDMORE for side-socket
+ rc = zmq_msg_send(&msg, sidesocket_, ZMQ_SNDMORE)
+ if rc < 0: return rc
+ else:
+ rc = zmq_msg_send(&side_msg, outsocket_, 0)
+ if rc < 0: return rc
+ rc = zmq_msg_send(&msg, sidesocket_, 0)
+ if rc < 0: return rc
+ break
+ return rc
+
+# the MonitoredQueue C function, adapted from zmq::queue.cpp :
+cdef inline int c_monitored_queue (void *insocket_, void *outsocket_,
+ void *sidesocket_, zmq_msg_t *in_msg_ptr,
+ zmq_msg_t *out_msg_ptr, int swap_ids) nogil:
+ """The actual C function for a monitored queue device.
+
+ See ``monitored_queue()`` for details.
+ """
+
+ cdef zmq_msg_t msg
+ cdef int rc = zmq_msg_init (&msg)
+ cdef zmq_msg_t id_msg
+ rc = zmq_msg_init (&id_msg)
+ if rc < 0: return rc
+ cdef zmq_msg_t side_msg
+ rc = zmq_msg_init (&side_msg)
+ if rc < 0: return rc
+
+ cdef zmq_pollitem_t items [2]
+ items [0].socket = insocket_
+ items [0].fd = 0
+ items [0].events = ZMQ_POLLIN
+ items [0].revents = 0
+ items [1].socket = outsocket_
+ items [1].fd = 0
+ items [1].events = ZMQ_POLLIN
+ items [1].revents = 0
+ # I don't think sidesocket should be polled?
+ # items [2].socket = sidesocket_
+ # items [2].fd = 0
+ # items [2].events = ZMQ_POLLIN
+ # items [2].revents = 0
+
+ while (True):
+
+ # // Wait while there are either requests or replies to process.
+ rc = zmq_poll (&items [0], 2, -1)
+ if rc < 0: return rc
+ # // The algorithm below asumes ratio of request and replies processed
+ # // under full load to be 1:1. Although processing requests replies
+ # // first is tempting it is suspectible to DoS attacks (overloading
+ # // the system with unsolicited replies).
+ #
+ # // Process a request.
+ if (items [0].revents & ZMQ_POLLIN):
+ # send in_prefix to side socket
+ rc = zmq_msg_copy(&side_msg, in_msg_ptr)
+ if rc < 0: return rc
+ rc = zmq_msg_send(&side_msg, sidesocket_, ZMQ_SNDMORE)
+ if rc < 0: return rc
+ # relay the rest of the message
+ rc = _relay(insocket_, outsocket_, sidesocket_, msg, side_msg, id_msg, swap_ids)
+ if rc < 0: return rc
+ if (items [1].revents & ZMQ_POLLIN):
+ # send out_prefix to side socket
+ rc = zmq_msg_copy(&side_msg, out_msg_ptr)
+ if rc < 0: return rc
+ rc = zmq_msg_send(&side_msg, sidesocket_, ZMQ_SNDMORE)
+ if rc < 0: return rc
+ # relay the rest of the message
+ rc = _relay(outsocket_, insocket_, sidesocket_, msg, side_msg, id_msg, swap_ids)
+ if rc < 0: return rc
+ return rc
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/devices/monitoredqueue.py b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/devices/monitoredqueue.py
new file mode 100644
index 00000000..c6d91429
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/devices/monitoredqueue.py
@@ -0,0 +1,37 @@
+"""pure Python monitored_queue function
+
+For use when Cython extension is unavailable (PyPy).
+
+Authors
+-------
+* MinRK
+"""
+
+# Copyright (C) PyZMQ Developers
+# Distributed under the terms of the Modified BSD License.
+
+import zmq
+
+def _relay(ins, outs, sides, prefix, swap_ids):
+ msg = ins.recv_multipart()
+ if swap_ids:
+ msg[:2] = msg[:2][::-1]
+ outs.send_multipart(msg)
+ sides.send_multipart([prefix] + msg)
+
+def monitored_queue(in_socket, out_socket, mon_socket,
+ in_prefix=b'in', out_prefix=b'out'):
+
+ swap_ids = in_socket.type == zmq.ROUTER and out_socket.type == zmq.ROUTER
+
+ poller = zmq.Poller()
+ poller.register(in_socket, zmq.POLLIN)
+ poller.register(out_socket, zmq.POLLIN)
+ while True:
+ events = dict(poller.poll())
+ if in_socket in events:
+ _relay(in_socket, out_socket, mon_socket, in_prefix, swap_ids)
+ if out_socket in events:
+ _relay(out_socket, in_socket, mon_socket, out_prefix, swap_ids)
+
+__all__ = ['monitored_queue']
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/devices/monitoredqueuedevice.py b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/devices/monitoredqueuedevice.py
new file mode 100644
index 00000000..9723f866
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/devices/monitoredqueuedevice.py
@@ -0,0 +1,66 @@
+"""MonitoredQueue classes and functions."""
+
+# Copyright (C) PyZMQ Developers
+# Distributed under the terms of the Modified BSD License.
+
+
+from zmq import ZMQError, PUB
+from zmq.devices.proxydevice import ProxyBase, Proxy, ThreadProxy, ProcessProxy
+from zmq.devices.monitoredqueue import monitored_queue
+
+
+class MonitoredQueueBase(ProxyBase):
+ """Base class for overriding methods."""
+
+ _in_prefix = b''
+ _out_prefix = b''
+
+ def __init__(self, in_type, out_type, mon_type=PUB, in_prefix=b'in', out_prefix=b'out'):
+
+ ProxyBase.__init__(self, in_type=in_type, out_type=out_type, mon_type=mon_type)
+
+ self._in_prefix = in_prefix
+ self._out_prefix = out_prefix
+
+ def run_device(self):
+ ins,outs,mons = self._setup_sockets()
+ monitored_queue(ins, outs, mons, self._in_prefix, self._out_prefix)
+
+
+class MonitoredQueue(MonitoredQueueBase, Proxy):
+ """Class for running monitored_queue in the background.
+
+ See zmq.devices.Device for most of the spec. MonitoredQueue differs from Proxy,
+ only in that it adds a ``prefix`` to messages sent on the monitor socket,
+ with a different prefix for each direction.
+
+ MQ also supports ROUTER on both sides, which zmq.proxy does not.
+
+ If a message arrives on `in_sock`, it will be prefixed with `in_prefix` on the monitor socket.
+ If it arrives on out_sock, it will be prefixed with `out_prefix`.
+
+ A PUB socket is the most logical choice for the mon_socket, but it is not required.
+ """
+ pass
+
+
+class ThreadMonitoredQueue(MonitoredQueueBase, ThreadProxy):
+ """Run zmq.monitored_queue in a background thread.
+
+ See MonitoredQueue and Proxy for details.
+ """
+ pass
+
+
+class ProcessMonitoredQueue(MonitoredQueueBase, ProcessProxy):
+ """Run zmq.monitored_queue in a background thread.
+
+ See MonitoredQueue and Proxy for details.
+ """
+
+
+__all__ = [
+ 'MonitoredQueue',
+ 'ThreadMonitoredQueue',
+ 'ProcessMonitoredQueue'
+]
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/devices/proxydevice.py b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/devices/proxydevice.py
new file mode 100644
index 00000000..68be3f15
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/devices/proxydevice.py
@@ -0,0 +1,90 @@
+"""Proxy classes and functions."""
+
+# Copyright (C) PyZMQ Developers
+# Distributed under the terms of the Modified BSD License.
+
+import zmq
+from zmq.devices.basedevice import Device, ThreadDevice, ProcessDevice
+
+
+class ProxyBase(object):
+ """Base class for overriding methods."""
+
+ def __init__(self, in_type, out_type, mon_type=zmq.PUB):
+
+ Device.__init__(self, in_type=in_type, out_type=out_type)
+ self.mon_type = mon_type
+ self._mon_binds = []
+ self._mon_connects = []
+ self._mon_sockopts = []
+
+ def bind_mon(self, addr):
+ """Enqueue ZMQ address for binding on mon_socket.
+
+ See zmq.Socket.bind for details.
+ """
+ self._mon_binds.append(addr)
+
+ def connect_mon(self, addr):
+ """Enqueue ZMQ address for connecting on mon_socket.
+
+ See zmq.Socket.bind for details.
+ """
+ self._mon_connects.append(addr)
+
+ def setsockopt_mon(self, opt, value):
+ """Enqueue setsockopt(opt, value) for mon_socket
+
+ See zmq.Socket.setsockopt for details.
+ """
+ self._mon_sockopts.append((opt, value))
+
+ def _setup_sockets(self):
+ ins,outs = Device._setup_sockets(self)
+ ctx = self._context
+ mons = ctx.socket(self.mon_type)
+
+ # set sockopts (must be done first, in case of zmq.IDENTITY)
+ for opt,value in self._mon_sockopts:
+ mons.setsockopt(opt, value)
+
+ for iface in self._mon_binds:
+ mons.bind(iface)
+
+ for iface in self._mon_connects:
+ mons.connect(iface)
+
+ return ins,outs,mons
+
+ def run_device(self):
+ ins,outs,mons = self._setup_sockets()
+ zmq.proxy(ins, outs, mons)
+
+class Proxy(ProxyBase, Device):
+ """Threadsafe Proxy object.
+
+ See zmq.devices.Device for most of the spec. This subclass adds a
+ <method>_mon version of each <method>_{in|out} method, for configuring the
+ monitor socket.
+
+ A Proxy is a 3-socket ZMQ Device that functions just like a
+ QUEUE, except each message is also sent out on the monitor socket.
+
+ A PUB socket is the most logical choice for the mon_socket, but it is not required.
+ """
+ pass
+
+class ThreadProxy(ProxyBase, ThreadDevice):
+ """Proxy in a Thread. See Proxy for more."""
+ pass
+
+class ProcessProxy(ProxyBase, ProcessDevice):
+ """Proxy in a Process. See Proxy for more."""
+ pass
+
+
+__all__ = [
+ 'Proxy',
+ 'ThreadProxy',
+ 'ProcessProxy',
+]
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/error.py b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/error.py
new file mode 100644
index 00000000..48cdaafa
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/error.py
@@ -0,0 +1,164 @@
+"""0MQ Error classes and functions."""
+
+# Copyright (C) PyZMQ Developers
+# Distributed under the terms of the Modified BSD License.
+
+
+class ZMQBaseError(Exception):
+ """Base exception class for 0MQ errors in Python."""
+ pass
+
+class ZMQError(ZMQBaseError):
+ """Wrap an errno style error.
+
+ Parameters
+ ----------
+ errno : int
+ The ZMQ errno or None. If None, then ``zmq_errno()`` is called and
+ used.
+ msg : string
+ Description of the error or None.
+ """
+ errno = None
+
+ def __init__(self, errno=None, msg=None):
+ """Wrap an errno style error.
+
+ Parameters
+ ----------
+ errno : int
+ The ZMQ errno or None. If None, then ``zmq_errno()`` is called and
+ used.
+ msg : string
+ Description of the error or None.
+ """
+ from zmq.backend import strerror, zmq_errno
+ if errno is None:
+ errno = zmq_errno()
+ if isinstance(errno, int):
+ self.errno = errno
+ if msg is None:
+ self.strerror = strerror(errno)
+ else:
+ self.strerror = msg
+ else:
+ if msg is None:
+ self.strerror = str(errno)
+ else:
+ self.strerror = msg
+ # flush signals, because there could be a SIGINT
+ # waiting to pounce, resulting in uncaught exceptions.
+ # Doing this here means getting SIGINT during a blocking
+ # libzmq call will raise a *catchable* KeyboardInterrupt
+ # PyErr_CheckSignals()
+
+ def __str__(self):
+ return self.strerror
+
+ def __repr__(self):
+ return "ZMQError('%s')"%self.strerror
+
+
+class ZMQBindError(ZMQBaseError):
+ """An error for ``Socket.bind_to_random_port()``.
+
+ See Also
+ --------
+ .Socket.bind_to_random_port
+ """
+ pass
+
+
+class NotDone(ZMQBaseError):
+ """Raised when timeout is reached while waiting for 0MQ to finish with a Message
+
+ See Also
+ --------
+ .MessageTracker.wait : object for tracking when ZeroMQ is done
+ """
+ pass
+
+
+class ContextTerminated(ZMQError):
+ """Wrapper for zmq.ETERM
+
+ .. versionadded:: 13.0
+ """
+ pass
+
+
+class Again(ZMQError):
+ """Wrapper for zmq.EAGAIN
+
+ .. versionadded:: 13.0
+ """
+ pass
+
+
+def _check_rc(rc, errno=None):
+ """internal utility for checking zmq return condition
+
+ and raising the appropriate Exception class
+ """
+ if rc < 0:
+ from zmq.backend import zmq_errno
+ if errno is None:
+ errno = zmq_errno()
+ from zmq import EAGAIN, ETERM
+ if errno == EAGAIN:
+ raise Again(errno)
+ elif errno == ETERM:
+ raise ContextTerminated(errno)
+ else:
+ raise ZMQError(errno)
+
+_zmq_version_info = None
+_zmq_version = None
+
+class ZMQVersionError(NotImplementedError):
+ """Raised when a feature is not provided by the linked version of libzmq.
+
+ .. versionadded:: 14.2
+ """
+ min_version = None
+ def __init__(self, min_version, msg='Feature'):
+ global _zmq_version
+ if _zmq_version is None:
+ from zmq import zmq_version
+ _zmq_version = zmq_version()
+ self.msg = msg
+ self.min_version = min_version
+ self.version = _zmq_version
+
+ def __repr__(self):
+ return "ZMQVersionError('%s')" % str(self)
+
+ def __str__(self):
+ return "%s requires libzmq >= %s, have %s" % (self.msg, self.min_version, self.version)
+
+
+def _check_version(min_version_info, msg='Feature'):
+ """Check for libzmq
+
+ raises ZMQVersionError if current zmq version is not at least min_version
+
+ min_version_info is a tuple of integers, and will be compared against zmq.zmq_version_info().
+ """
+ global _zmq_version_info
+ if _zmq_version_info is None:
+ from zmq import zmq_version_info
+ _zmq_version_info = zmq_version_info()
+ if _zmq_version_info < min_version_info:
+ min_version = '.'.join(str(v) for v in min_version_info)
+ raise ZMQVersionError(min_version, msg)
+
+
+__all__ = [
+ 'ZMQBaseError',
+ 'ZMQBindError',
+ 'ZMQError',
+ 'NotDone',
+ 'ContextTerminated',
+ 'Again',
+ 'ZMQVersionError',
+]
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/eventloop/__init__.py b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/eventloop/__init__.py
new file mode 100644
index 00000000..568e8e8d
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/eventloop/__init__.py
@@ -0,0 +1,5 @@
+"""A Tornado based event loop for PyZMQ."""
+
+from zmq.eventloop.ioloop import IOLoop
+
+__all__ = ['IOLoop'] \ No newline at end of file
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/eventloop/ioloop.py b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/eventloop/ioloop.py
new file mode 100644
index 00000000..35f4c418
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/eventloop/ioloop.py
@@ -0,0 +1,193 @@
+# coding: utf-8
+"""tornado IOLoop API with zmq compatibility
+
+If you have tornado ≥ 3.0, this is a subclass of tornado's IOLoop,
+otherwise we ship a minimal subset of tornado in zmq.eventloop.minitornado.
+
+The minimal shipped version of tornado's IOLoop does not include
+support for concurrent futures - this will only be available if you
+have tornado ≥ 3.0.
+"""
+
+# Copyright (C) PyZMQ Developers
+# Distributed under the terms of the Modified BSD License.
+
+from __future__ import absolute_import, division, with_statement
+
+import os
+import time
+import warnings
+
+from zmq import (
+ Poller,
+ POLLIN, POLLOUT, POLLERR,
+ ZMQError, ETERM,
+)
+
+try:
+ import tornado
+ tornado_version = tornado.version_info
+except (ImportError, AttributeError):
+ tornado_version = ()
+
+try:
+ # tornado ≥ 3
+ from tornado.ioloop import PollIOLoop, PeriodicCallback
+ from tornado.log import gen_log
+except ImportError:
+ from .minitornado.ioloop import PollIOLoop, PeriodicCallback
+ from .minitornado.log import gen_log
+
+
+class DelayedCallback(PeriodicCallback):
+ """Schedules the given callback to be called once.
+
+ The callback is called once, after callback_time milliseconds.
+
+ `start` must be called after the DelayedCallback is created.
+
+ The timeout is calculated from when `start` is called.
+ """
+ def __init__(self, callback, callback_time, io_loop=None):
+ # PeriodicCallback require callback_time to be positive
+ warnings.warn("""DelayedCallback is deprecated.
+ Use loop.add_timeout instead.""", DeprecationWarning)
+ callback_time = max(callback_time, 1e-3)
+ super(DelayedCallback, self).__init__(callback, callback_time, io_loop)
+
+ def start(self):
+ """Starts the timer."""
+ self._running = True
+ self._firstrun = True
+ self._next_timeout = time.time() + self.callback_time / 1000.0
+ self.io_loop.add_timeout(self._next_timeout, self._run)
+
+ def _run(self):
+ if not self._running: return
+ self._running = False
+ try:
+ self.callback()
+ except Exception:
+ gen_log.error("Error in delayed callback", exc_info=True)
+
+
+class ZMQPoller(object):
+ """A poller that can be used in the tornado IOLoop.
+
+ This simply wraps a regular zmq.Poller, scaling the timeout
+ by 1000, so that it is in seconds rather than milliseconds.
+ """
+
+ def __init__(self):
+ self._poller = Poller()
+
+ @staticmethod
+ def _map_events(events):
+ """translate IOLoop.READ/WRITE/ERROR event masks into zmq.POLLIN/OUT/ERR"""
+ z_events = 0
+ if events & IOLoop.READ:
+ z_events |= POLLIN
+ if events & IOLoop.WRITE:
+ z_events |= POLLOUT
+ if events & IOLoop.ERROR:
+ z_events |= POLLERR
+ return z_events
+
+ @staticmethod
+ def _remap_events(z_events):
+ """translate zmq.POLLIN/OUT/ERR event masks into IOLoop.READ/WRITE/ERROR"""
+ events = 0
+ if z_events & POLLIN:
+ events |= IOLoop.READ
+ if z_events & POLLOUT:
+ events |= IOLoop.WRITE
+ if z_events & POLLERR:
+ events |= IOLoop.ERROR
+ return events
+
+ def register(self, fd, events):
+ return self._poller.register(fd, self._map_events(events))
+
+ def modify(self, fd, events):
+ return self._poller.modify(fd, self._map_events(events))
+
+ def unregister(self, fd):
+ return self._poller.unregister(fd)
+
+ def poll(self, timeout):
+ """poll in seconds rather than milliseconds.
+
+ Event masks will be IOLoop.READ/WRITE/ERROR
+ """
+ z_events = self._poller.poll(1000*timeout)
+ return [ (fd,self._remap_events(evt)) for (fd,evt) in z_events ]
+
+ def close(self):
+ pass
+
+
+class ZMQIOLoop(PollIOLoop):
+ """ZMQ subclass of tornado's IOLoop"""
+ def initialize(self, impl=None, **kwargs):
+ impl = ZMQPoller() if impl is None else impl
+ super(ZMQIOLoop, self).initialize(impl=impl, **kwargs)
+
+ @staticmethod
+ def instance():
+ """Returns a global `IOLoop` instance.
+
+ Most applications have a single, global `IOLoop` running on the
+ main thread. Use this method to get this instance from
+ another thread. To get the current thread's `IOLoop`, use `current()`.
+ """
+ # install ZMQIOLoop as the active IOLoop implementation
+ # when using tornado 3
+ if tornado_version >= (3,):
+ PollIOLoop.configure(ZMQIOLoop)
+ return PollIOLoop.instance()
+
+ def start(self):
+ try:
+ super(ZMQIOLoop, self).start()
+ except ZMQError as e:
+ if e.errno == ETERM:
+ # quietly return on ETERM
+ pass
+ else:
+ raise e
+
+
+if tornado_version >= (3,0) and tornado_version < (3,1):
+ def backport_close(self, all_fds=False):
+ """backport IOLoop.close to 3.0 from 3.1 (supports fd.close() method)"""
+ from zmq.eventloop.minitornado.ioloop import PollIOLoop as mini_loop
+ return mini_loop.close.__get__(self)(all_fds)
+ ZMQIOLoop.close = backport_close
+
+
+# public API name
+IOLoop = ZMQIOLoop
+
+
+def install():
+ """set the tornado IOLoop instance with the pyzmq IOLoop.
+
+ After calling this function, tornado's IOLoop.instance() and pyzmq's
+ IOLoop.instance() will return the same object.
+
+ An assertion error will be raised if tornado's IOLoop has been initialized
+ prior to calling this function.
+ """
+ from tornado import ioloop
+ # check if tornado's IOLoop is already initialized to something other
+ # than the pyzmq IOLoop instance:
+ assert (not ioloop.IOLoop.initialized()) or \
+ ioloop.IOLoop.instance() is IOLoop.instance(), "tornado IOLoop already initialized"
+
+ if tornado_version >= (3,):
+ # tornado 3 has an official API for registering new defaults, yay!
+ ioloop.IOLoop.configure(ZMQIOLoop)
+ else:
+ # we have to set the global instance explicitly
+ ioloop.IOLoop._instance = IOLoop.instance()
+
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/eventloop/minitornado/__init__.py b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/eventloop/minitornado/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/eventloop/minitornado/__init__.py
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/eventloop/minitornado/concurrent.py b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/eventloop/minitornado/concurrent.py
new file mode 100644
index 00000000..519b23d5
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/eventloop/minitornado/concurrent.py
@@ -0,0 +1,11 @@
+"""pyzmq does not ship tornado's futures,
+this just raises informative NotImplementedErrors to avoid having to change too much code.
+"""
+
+class NotImplementedFuture(object):
+ def __init__(self, *args, **kwargs):
+ raise NotImplementedError("pyzmq does not ship tornado's Futures, "
+ "install tornado >= 3.0 for future support."
+ )
+
+Future = TracebackFuture = NotImplementedFuture
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/eventloop/minitornado/ioloop.py b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/eventloop/minitornado/ioloop.py
new file mode 100644
index 00000000..710a3ecb
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/eventloop/minitornado/ioloop.py
@@ -0,0 +1,829 @@
+#!/usr/bin/env python
+#
+# Copyright 2009 Facebook
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""An I/O event loop for non-blocking sockets.
+
+Typical applications will use a single `IOLoop` object, in the
+`IOLoop.instance` singleton. The `IOLoop.start` method should usually
+be called at the end of the ``main()`` function. Atypical applications may
+use more than one `IOLoop`, such as one `IOLoop` per thread, or per `unittest`
+case.
+
+In addition to I/O events, the `IOLoop` can also schedule time-based events.
+`IOLoop.add_timeout` is a non-blocking alternative to `time.sleep`.
+"""
+
+from __future__ import absolute_import, division, print_function, with_statement
+
+import datetime
+import errno
+import functools
+import heapq
+import logging
+import numbers
+import os
+import select
+import sys
+import threading
+import time
+import traceback
+
+from .concurrent import Future, TracebackFuture
+from .log import app_log, gen_log
+from . import stack_context
+from .util import Configurable
+
+try:
+ import signal
+except ImportError:
+ signal = None
+
+try:
+ import thread # py2
+except ImportError:
+ import _thread as thread # py3
+
+from .platform.auto import set_close_exec, Waker
+
+
+class TimeoutError(Exception):
+ pass
+
+
+class IOLoop(Configurable):
+ """A level-triggered I/O loop.
+
+ We use ``epoll`` (Linux) or ``kqueue`` (BSD and Mac OS X) if they
+ are available, or else we fall back on select(). If you are
+ implementing a system that needs to handle thousands of
+ simultaneous connections, you should use a system that supports
+ either ``epoll`` or ``kqueue``.
+
+ Example usage for a simple TCP server::
+
+ import errno
+ import functools
+ import ioloop
+ import socket
+
+ def connection_ready(sock, fd, events):
+ while True:
+ try:
+ connection, address = sock.accept()
+ except socket.error, e:
+ if e.args[0] not in (errno.EWOULDBLOCK, errno.EAGAIN):
+ raise
+ return
+ connection.setblocking(0)
+ handle_connection(connection, address)
+
+ sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0)
+ sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
+ sock.setblocking(0)
+ sock.bind(("", port))
+ sock.listen(128)
+
+ io_loop = ioloop.IOLoop.instance()
+ callback = functools.partial(connection_ready, sock)
+ io_loop.add_handler(sock.fileno(), callback, io_loop.READ)
+ io_loop.start()
+
+ """
+ # Constants from the epoll module
+ _EPOLLIN = 0x001
+ _EPOLLPRI = 0x002
+ _EPOLLOUT = 0x004
+ _EPOLLERR = 0x008
+ _EPOLLHUP = 0x010
+ _EPOLLRDHUP = 0x2000
+ _EPOLLONESHOT = (1 << 30)
+ _EPOLLET = (1 << 31)
+
+ # Our events map exactly to the epoll events
+ NONE = 0
+ READ = _EPOLLIN
+ WRITE = _EPOLLOUT
+ ERROR = _EPOLLERR | _EPOLLHUP
+
+ # Global lock for creating global IOLoop instance
+ _instance_lock = threading.Lock()
+
+ _current = threading.local()
+
+ @staticmethod
+ def instance():
+ """Returns a global `IOLoop` instance.
+
+ Most applications have a single, global `IOLoop` running on the
+ main thread. Use this method to get this instance from
+ another thread. To get the current thread's `IOLoop`, use `current()`.
+ """
+ if not hasattr(IOLoop, "_instance"):
+ with IOLoop._instance_lock:
+ if not hasattr(IOLoop, "_instance"):
+ # New instance after double check
+ IOLoop._instance = IOLoop()
+ return IOLoop._instance
+
+ @staticmethod
+ def initialized():
+ """Returns true if the singleton instance has been created."""
+ return hasattr(IOLoop, "_instance")
+
+ def install(self):
+ """Installs this `IOLoop` object as the singleton instance.
+
+ This is normally not necessary as `instance()` will create
+ an `IOLoop` on demand, but you may want to call `install` to use
+ a custom subclass of `IOLoop`.
+ """
+ assert not IOLoop.initialized()
+ IOLoop._instance = self
+
+ @staticmethod
+ def current():
+ """Returns the current thread's `IOLoop`.
+
+ If an `IOLoop` is currently running or has been marked as current
+ by `make_current`, returns that instance. Otherwise returns
+ `IOLoop.instance()`, i.e. the main thread's `IOLoop`.
+
+ A common pattern for classes that depend on ``IOLoops`` is to use
+ a default argument to enable programs with multiple ``IOLoops``
+ but not require the argument for simpler applications::
+
+ class MyClass(object):
+ def __init__(self, io_loop=None):
+ self.io_loop = io_loop or IOLoop.current()
+
+ In general you should use `IOLoop.current` as the default when
+ constructing an asynchronous object, and use `IOLoop.instance`
+ when you mean to communicate to the main thread from a different
+ one.
+ """
+ current = getattr(IOLoop._current, "instance", None)
+ if current is None:
+ return IOLoop.instance()
+ return current
+
+ def make_current(self):
+ """Makes this the `IOLoop` for the current thread.
+
+ An `IOLoop` automatically becomes current for its thread
+ when it is started, but it is sometimes useful to call
+ `make_current` explictly before starting the `IOLoop`,
+ so that code run at startup time can find the right
+ instance.
+ """
+ IOLoop._current.instance = self
+
+ @staticmethod
+ def clear_current():
+ IOLoop._current.instance = None
+
+ @classmethod
+ def configurable_base(cls):
+ return IOLoop
+
+ @classmethod
+ def configurable_default(cls):
+ # this is the only patch to IOLoop:
+ from zmq.eventloop.ioloop import ZMQIOLoop
+ return ZMQIOLoop
+ # the remainder of this method is unused,
+ # but left for preservation reasons
+ if hasattr(select, "epoll"):
+ from tornado.platform.epoll import EPollIOLoop
+ return EPollIOLoop
+ if hasattr(select, "kqueue"):
+ # Python 2.6+ on BSD or Mac
+ from tornado.platform.kqueue import KQueueIOLoop
+ return KQueueIOLoop
+ from tornado.platform.select import SelectIOLoop
+ return SelectIOLoop
+
+ def initialize(self):
+ pass
+
+ def close(self, all_fds=False):
+ """Closes the `IOLoop`, freeing any resources used.
+
+ If ``all_fds`` is true, all file descriptors registered on the
+ IOLoop will be closed (not just the ones created by the
+ `IOLoop` itself).
+
+ Many applications will only use a single `IOLoop` that runs for the
+ entire lifetime of the process. In that case closing the `IOLoop`
+ is not necessary since everything will be cleaned up when the
+ process exits. `IOLoop.close` is provided mainly for scenarios
+ such as unit tests, which create and destroy a large number of
+ ``IOLoops``.
+
+ An `IOLoop` must be completely stopped before it can be closed. This
+ means that `IOLoop.stop()` must be called *and* `IOLoop.start()` must
+ be allowed to return before attempting to call `IOLoop.close()`.
+ Therefore the call to `close` will usually appear just after
+ the call to `start` rather than near the call to `stop`.
+
+ .. versionchanged:: 3.1
+ If the `IOLoop` implementation supports non-integer objects
+ for "file descriptors", those objects will have their
+ ``close`` method when ``all_fds`` is true.
+ """
+ raise NotImplementedError()
+
+ def add_handler(self, fd, handler, events):
+ """Registers the given handler to receive the given events for fd.
+
+ The ``events`` argument is a bitwise or of the constants
+ ``IOLoop.READ``, ``IOLoop.WRITE``, and ``IOLoop.ERROR``.
+
+ When an event occurs, ``handler(fd, events)`` will be run.
+ """
+ raise NotImplementedError()
+
+ def update_handler(self, fd, events):
+ """Changes the events we listen for fd."""
+ raise NotImplementedError()
+
+ def remove_handler(self, fd):
+ """Stop listening for events on fd."""
+ raise NotImplementedError()
+
+ def set_blocking_signal_threshold(self, seconds, action):
+ """Sends a signal if the `IOLoop` is blocked for more than
+ ``s`` seconds.
+
+ Pass ``seconds=None`` to disable. Requires Python 2.6 on a unixy
+ platform.
+
+ The action parameter is a Python signal handler. Read the
+ documentation for the `signal` module for more information.
+ If ``action`` is None, the process will be killed if it is
+ blocked for too long.
+ """
+ raise NotImplementedError()
+
+ def set_blocking_log_threshold(self, seconds):
+ """Logs a stack trace if the `IOLoop` is blocked for more than
+ ``s`` seconds.
+
+ Equivalent to ``set_blocking_signal_threshold(seconds,
+ self.log_stack)``
+ """
+ self.set_blocking_signal_threshold(seconds, self.log_stack)
+
+ def log_stack(self, signal, frame):
+ """Signal handler to log the stack trace of the current thread.
+
+ For use with `set_blocking_signal_threshold`.
+ """
+ gen_log.warning('IOLoop blocked for %f seconds in\n%s',
+ self._blocking_signal_threshold,
+ ''.join(traceback.format_stack(frame)))
+
+ def start(self):
+ """Starts the I/O loop.
+
+ The loop will run until one of the callbacks calls `stop()`, which
+ will make the loop stop after the current event iteration completes.
+ """
+ raise NotImplementedError()
+
+ def stop(self):
+ """Stop the I/O loop.
+
+ If the event loop is not currently running, the next call to `start()`
+ will return immediately.
+
+ To use asynchronous methods from otherwise-synchronous code (such as
+ unit tests), you can start and stop the event loop like this::
+
+ ioloop = IOLoop()
+ async_method(ioloop=ioloop, callback=ioloop.stop)
+ ioloop.start()
+
+ ``ioloop.start()`` will return after ``async_method`` has run
+ its callback, whether that callback was invoked before or
+ after ``ioloop.start``.
+
+ Note that even after `stop` has been called, the `IOLoop` is not
+ completely stopped until `IOLoop.start` has also returned.
+ Some work that was scheduled before the call to `stop` may still
+ be run before the `IOLoop` shuts down.
+ """
+ raise NotImplementedError()
+
+ def run_sync(self, func, timeout=None):
+ """Starts the `IOLoop`, runs the given function, and stops the loop.
+
+ If the function returns a `.Future`, the `IOLoop` will run
+ until the future is resolved. If it raises an exception, the
+ `IOLoop` will stop and the exception will be re-raised to the
+ caller.
+
+ The keyword-only argument ``timeout`` may be used to set
+ a maximum duration for the function. If the timeout expires,
+ a `TimeoutError` is raised.
+
+ This method is useful in conjunction with `tornado.gen.coroutine`
+ to allow asynchronous calls in a ``main()`` function::
+
+ @gen.coroutine
+ def main():
+ # do stuff...
+
+ if __name__ == '__main__':
+ IOLoop.instance().run_sync(main)
+ """
+ future_cell = [None]
+
+ def run():
+ try:
+ result = func()
+ except Exception:
+ future_cell[0] = TracebackFuture()
+ future_cell[0].set_exc_info(sys.exc_info())
+ else:
+ if isinstance(result, Future):
+ future_cell[0] = result
+ else:
+ future_cell[0] = Future()
+ future_cell[0].set_result(result)
+ self.add_future(future_cell[0], lambda future: self.stop())
+ self.add_callback(run)
+ if timeout is not None:
+ timeout_handle = self.add_timeout(self.time() + timeout, self.stop)
+ self.start()
+ if timeout is not None:
+ self.remove_timeout(timeout_handle)
+ if not future_cell[0].done():
+ raise TimeoutError('Operation timed out after %s seconds' % timeout)
+ return future_cell[0].result()
+
+ def time(self):
+ """Returns the current time according to the `IOLoop`'s clock.
+
+ The return value is a floating-point number relative to an
+ unspecified time in the past.
+
+ By default, the `IOLoop`'s time function is `time.time`. However,
+ it may be configured to use e.g. `time.monotonic` instead.
+ Calls to `add_timeout` that pass a number instead of a
+ `datetime.timedelta` should use this function to compute the
+ appropriate time, so they can work no matter what time function
+ is chosen.
+ """
+ return time.time()
+
+ def add_timeout(self, deadline, callback):
+ """Runs the ``callback`` at the time ``deadline`` from the I/O loop.
+
+ Returns an opaque handle that may be passed to
+ `remove_timeout` to cancel.
+
+ ``deadline`` may be a number denoting a time (on the same
+ scale as `IOLoop.time`, normally `time.time`), or a
+ `datetime.timedelta` object for a deadline relative to the
+ current time.
+
+ Note that it is not safe to call `add_timeout` from other threads.
+ Instead, you must use `add_callback` to transfer control to the
+ `IOLoop`'s thread, and then call `add_timeout` from there.
+ """
+ raise NotImplementedError()
+
+ def remove_timeout(self, timeout):
+ """Cancels a pending timeout.
+
+ The argument is a handle as returned by `add_timeout`. It is
+ safe to call `remove_timeout` even if the callback has already
+ been run.
+ """
+ raise NotImplementedError()
+
+ def add_callback(self, callback, *args, **kwargs):
+ """Calls the given callback on the next I/O loop iteration.
+
+ It is safe to call this method from any thread at any time,
+ except from a signal handler. Note that this is the **only**
+ method in `IOLoop` that makes this thread-safety guarantee; all
+ other interaction with the `IOLoop` must be done from that
+ `IOLoop`'s thread. `add_callback()` may be used to transfer
+ control from other threads to the `IOLoop`'s thread.
+
+ To add a callback from a signal handler, see
+ `add_callback_from_signal`.
+ """
+ raise NotImplementedError()
+
+ def add_callback_from_signal(self, callback, *args, **kwargs):
+ """Calls the given callback on the next I/O loop iteration.
+
+ Safe for use from a Python signal handler; should not be used
+ otherwise.
+
+ Callbacks added with this method will be run without any
+ `.stack_context`, to avoid picking up the context of the function
+ that was interrupted by the signal.
+ """
+ raise NotImplementedError()
+
+ def add_future(self, future, callback):
+ """Schedules a callback on the ``IOLoop`` when the given
+ `.Future` is finished.
+
+ The callback is invoked with one argument, the
+ `.Future`.
+ """
+ assert isinstance(future, Future)
+ callback = stack_context.wrap(callback)
+ future.add_done_callback(
+ lambda future: self.add_callback(callback, future))
+
+ def _run_callback(self, callback):
+ """Runs a callback with error handling.
+
+ For use in subclasses.
+ """
+ try:
+ callback()
+ except Exception:
+ self.handle_callback_exception(callback)
+
+ def handle_callback_exception(self, callback):
+ """This method is called whenever a callback run by the `IOLoop`
+ throws an exception.
+
+ By default simply logs the exception as an error. Subclasses
+ may override this method to customize reporting of exceptions.
+
+ The exception itself is not passed explicitly, but is available
+ in `sys.exc_info`.
+ """
+ app_log.error("Exception in callback %r", callback, exc_info=True)
+
+
+class PollIOLoop(IOLoop):
+ """Base class for IOLoops built around a select-like function.
+
+ For concrete implementations, see `tornado.platform.epoll.EPollIOLoop`
+ (Linux), `tornado.platform.kqueue.KQueueIOLoop` (BSD and Mac), or
+ `tornado.platform.select.SelectIOLoop` (all platforms).
+ """
+ def initialize(self, impl, time_func=None):
+ super(PollIOLoop, self).initialize()
+ self._impl = impl
+ if hasattr(self._impl, 'fileno'):
+ set_close_exec(self._impl.fileno())
+ self.time_func = time_func or time.time
+ self._handlers = {}
+ self._events = {}
+ self._callbacks = []
+ self._callback_lock = threading.Lock()
+ self._timeouts = []
+ self._cancellations = 0
+ self._running = False
+ self._stopped = False
+ self._closing = False
+ self._thread_ident = None
+ self._blocking_signal_threshold = None
+
+ # Create a pipe that we send bogus data to when we want to wake
+ # the I/O loop when it is idle
+ self._waker = Waker()
+ self.add_handler(self._waker.fileno(),
+ lambda fd, events: self._waker.consume(),
+ self.READ)
+
+ def close(self, all_fds=False):
+ with self._callback_lock:
+ self._closing = True
+ self.remove_handler(self._waker.fileno())
+ if all_fds:
+ for fd in self._handlers.keys():
+ try:
+ close_method = getattr(fd, 'close', None)
+ if close_method is not None:
+ close_method()
+ else:
+ os.close(fd)
+ except Exception:
+ gen_log.debug("error closing fd %s", fd, exc_info=True)
+ self._waker.close()
+ self._impl.close()
+
+ def add_handler(self, fd, handler, events):
+ self._handlers[fd] = stack_context.wrap(handler)
+ self._impl.register(fd, events | self.ERROR)
+
+ def update_handler(self, fd, events):
+ self._impl.modify(fd, events | self.ERROR)
+
+ def remove_handler(self, fd):
+ self._handlers.pop(fd, None)
+ self._events.pop(fd, None)
+ try:
+ self._impl.unregister(fd)
+ except Exception:
+ gen_log.debug("Error deleting fd from IOLoop", exc_info=True)
+
+ def set_blocking_signal_threshold(self, seconds, action):
+ if not hasattr(signal, "setitimer"):
+ gen_log.error("set_blocking_signal_threshold requires a signal module "
+ "with the setitimer method")
+ return
+ self._blocking_signal_threshold = seconds
+ if seconds is not None:
+ signal.signal(signal.SIGALRM,
+ action if action is not None else signal.SIG_DFL)
+
+ def start(self):
+ if not logging.getLogger().handlers:
+ # The IOLoop catches and logs exceptions, so it's
+ # important that log output be visible. However, python's
+ # default behavior for non-root loggers (prior to python
+ # 3.2) is to print an unhelpful "no handlers could be
+ # found" message rather than the actual log entry, so we
+ # must explicitly configure logging if we've made it this
+ # far without anything.
+ logging.basicConfig()
+ if self._stopped:
+ self._stopped = False
+ return
+ old_current = getattr(IOLoop._current, "instance", None)
+ IOLoop._current.instance = self
+ self._thread_ident = thread.get_ident()
+ self._running = True
+
+ # signal.set_wakeup_fd closes a race condition in event loops:
+ # a signal may arrive at the beginning of select/poll/etc
+ # before it goes into its interruptible sleep, so the signal
+ # will be consumed without waking the select. The solution is
+ # for the (C, synchronous) signal handler to write to a pipe,
+ # which will then be seen by select.
+ #
+ # In python's signal handling semantics, this only matters on the
+ # main thread (fortunately, set_wakeup_fd only works on the main
+ # thread and will raise a ValueError otherwise).
+ #
+ # If someone has already set a wakeup fd, we don't want to
+ # disturb it. This is an issue for twisted, which does its
+ # SIGCHILD processing in response to its own wakeup fd being
+ # written to. As long as the wakeup fd is registered on the IOLoop,
+ # the loop will still wake up and everything should work.
+ old_wakeup_fd = None
+ if hasattr(signal, 'set_wakeup_fd') and os.name == 'posix':
+ # requires python 2.6+, unix. set_wakeup_fd exists but crashes
+ # the python process on windows.
+ try:
+ old_wakeup_fd = signal.set_wakeup_fd(self._waker.write_fileno())
+ if old_wakeup_fd != -1:
+ # Already set, restore previous value. This is a little racy,
+ # but there's no clean get_wakeup_fd and in real use the
+ # IOLoop is just started once at the beginning.
+ signal.set_wakeup_fd(old_wakeup_fd)
+ old_wakeup_fd = None
+ except ValueError: # non-main thread
+ pass
+
+ while True:
+ poll_timeout = 3600.0
+
+ # Prevent IO event starvation by delaying new callbacks
+ # to the next iteration of the event loop.
+ with self._callback_lock:
+ callbacks = self._callbacks
+ self._callbacks = []
+ for callback in callbacks:
+ self._run_callback(callback)
+
+ if self._timeouts:
+ now = self.time()
+ while self._timeouts:
+ if self._timeouts[0].callback is None:
+ # the timeout was cancelled
+ heapq.heappop(self._timeouts)
+ self._cancellations -= 1
+ elif self._timeouts[0].deadline <= now:
+ timeout = heapq.heappop(self._timeouts)
+ self._run_callback(timeout.callback)
+ else:
+ seconds = self._timeouts[0].deadline - now
+ poll_timeout = min(seconds, poll_timeout)
+ break
+ if (self._cancellations > 512
+ and self._cancellations > (len(self._timeouts) >> 1)):
+ # Clean up the timeout queue when it gets large and it's
+ # more than half cancellations.
+ self._cancellations = 0
+ self._timeouts = [x for x in self._timeouts
+ if x.callback is not None]
+ heapq.heapify(self._timeouts)
+
+ if self._callbacks:
+ # If any callbacks or timeouts called add_callback,
+ # we don't want to wait in poll() before we run them.
+ poll_timeout = 0.0
+
+ if not self._running:
+ break
+
+ if self._blocking_signal_threshold is not None:
+ # clear alarm so it doesn't fire while poll is waiting for
+ # events.
+ signal.setitimer(signal.ITIMER_REAL, 0, 0)
+
+ try:
+ event_pairs = self._impl.poll(poll_timeout)
+ except Exception as e:
+ # Depending on python version and IOLoop implementation,
+ # different exception types may be thrown and there are
+ # two ways EINTR might be signaled:
+ # * e.errno == errno.EINTR
+ # * e.args is like (errno.EINTR, 'Interrupted system call')
+ if (getattr(e, 'errno', None) == errno.EINTR or
+ (isinstance(getattr(e, 'args', None), tuple) and
+ len(e.args) == 2 and e.args[0] == errno.EINTR)):
+ continue
+ else:
+ raise
+
+ if self._blocking_signal_threshold is not None:
+ signal.setitimer(signal.ITIMER_REAL,
+ self._blocking_signal_threshold, 0)
+
+ # Pop one fd at a time from the set of pending fds and run
+ # its handler. Since that handler may perform actions on
+ # other file descriptors, there may be reentrant calls to
+ # this IOLoop that update self._events
+ self._events.update(event_pairs)
+ while self._events:
+ fd, events = self._events.popitem()
+ try:
+ self._handlers[fd](fd, events)
+ except (OSError, IOError) as e:
+ if e.args[0] == errno.EPIPE:
+ # Happens when the client closes the connection
+ pass
+ else:
+ app_log.error("Exception in I/O handler for fd %s",
+ fd, exc_info=True)
+ except Exception:
+ app_log.error("Exception in I/O handler for fd %s",
+ fd, exc_info=True)
+ # reset the stopped flag so another start/stop pair can be issued
+ self._stopped = False
+ if self._blocking_signal_threshold is not None:
+ signal.setitimer(signal.ITIMER_REAL, 0, 0)
+ IOLoop._current.instance = old_current
+ if old_wakeup_fd is not None:
+ signal.set_wakeup_fd(old_wakeup_fd)
+
+ def stop(self):
+ self._running = False
+ self._stopped = True
+ self._waker.wake()
+
+ def time(self):
+ return self.time_func()
+
+ def add_timeout(self, deadline, callback):
+ timeout = _Timeout(deadline, stack_context.wrap(callback), self)
+ heapq.heappush(self._timeouts, timeout)
+ return timeout
+
+ def remove_timeout(self, timeout):
+ # Removing from a heap is complicated, so just leave the defunct
+ # timeout object in the queue (see discussion in
+ # http://docs.python.org/library/heapq.html).
+ # If this turns out to be a problem, we could add a garbage
+ # collection pass whenever there are too many dead timeouts.
+ timeout.callback = None
+ self._cancellations += 1
+
+ def add_callback(self, callback, *args, **kwargs):
+ with self._callback_lock:
+ if self._closing:
+ raise RuntimeError("IOLoop is closing")
+ list_empty = not self._callbacks
+ self._callbacks.append(functools.partial(
+ stack_context.wrap(callback), *args, **kwargs))
+ if list_empty and thread.get_ident() != self._thread_ident:
+ # If we're in the IOLoop's thread, we know it's not currently
+ # polling. If we're not, and we added the first callback to an
+ # empty list, we may need to wake it up (it may wake up on its
+ # own, but an occasional extra wake is harmless). Waking
+ # up a polling IOLoop is relatively expensive, so we try to
+ # avoid it when we can.
+ self._waker.wake()
+
+ def add_callback_from_signal(self, callback, *args, **kwargs):
+ with stack_context.NullContext():
+ if thread.get_ident() != self._thread_ident:
+ # if the signal is handled on another thread, we can add
+ # it normally (modulo the NullContext)
+ self.add_callback(callback, *args, **kwargs)
+ else:
+ # If we're on the IOLoop's thread, we cannot use
+ # the regular add_callback because it may deadlock on
+ # _callback_lock. Blindly insert into self._callbacks.
+ # This is safe because the GIL makes list.append atomic.
+ # One subtlety is that if the signal interrupted the
+ # _callback_lock block in IOLoop.start, we may modify
+ # either the old or new version of self._callbacks,
+ # but either way will work.
+ self._callbacks.append(functools.partial(
+ stack_context.wrap(callback), *args, **kwargs))
+
+
+class _Timeout(object):
+ """An IOLoop timeout, a UNIX timestamp and a callback"""
+
+ # Reduce memory overhead when there are lots of pending callbacks
+ __slots__ = ['deadline', 'callback']
+
+ def __init__(self, deadline, callback, io_loop):
+ if isinstance(deadline, numbers.Real):
+ self.deadline = deadline
+ elif isinstance(deadline, datetime.timedelta):
+ self.deadline = io_loop.time() + _Timeout.timedelta_to_seconds(deadline)
+ else:
+ raise TypeError("Unsupported deadline %r" % deadline)
+ self.callback = callback
+
+ @staticmethod
+ def timedelta_to_seconds(td):
+ """Equivalent to td.total_seconds() (introduced in python 2.7)."""
+ return (td.microseconds + (td.seconds + td.days * 24 * 3600) * 10 ** 6) / float(10 ** 6)
+
+ # Comparison methods to sort by deadline, with object id as a tiebreaker
+ # to guarantee a consistent ordering. The heapq module uses __le__
+ # in python2.5, and __lt__ in 2.6+ (sort() and most other comparisons
+ # use __lt__).
+ def __lt__(self, other):
+ return ((self.deadline, id(self)) <
+ (other.deadline, id(other)))
+
+ def __le__(self, other):
+ return ((self.deadline, id(self)) <=
+ (other.deadline, id(other)))
+
+
+class PeriodicCallback(object):
+ """Schedules the given callback to be called periodically.
+
+ The callback is called every ``callback_time`` milliseconds.
+
+ `start` must be called after the `PeriodicCallback` is created.
+ """
+ def __init__(self, callback, callback_time, io_loop=None):
+ self.callback = callback
+ if callback_time <= 0:
+ raise ValueError("Periodic callback must have a positive callback_time")
+ self.callback_time = callback_time
+ self.io_loop = io_loop or IOLoop.current()
+ self._running = False
+ self._timeout = None
+
+ def start(self):
+ """Starts the timer."""
+ self._running = True
+ self._next_timeout = self.io_loop.time()
+ self._schedule_next()
+
+ def stop(self):
+ """Stops the timer."""
+ self._running = False
+ if self._timeout is not None:
+ self.io_loop.remove_timeout(self._timeout)
+ self._timeout = None
+
+ def _run(self):
+ if not self._running:
+ return
+ try:
+ self.callback()
+ except Exception:
+ app_log.error("Error in periodic callback", exc_info=True)
+ self._schedule_next()
+
+ def _schedule_next(self):
+ if self._running:
+ current_time = self.io_loop.time()
+ while self._next_timeout <= current_time:
+ self._next_timeout += self.callback_time / 1000.0
+ self._timeout = self.io_loop.add_timeout(self._next_timeout, self._run)
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/eventloop/minitornado/log.py b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/eventloop/minitornado/log.py
new file mode 100644
index 00000000..49051e89
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/eventloop/minitornado/log.py
@@ -0,0 +1,6 @@
+"""minimal subset of tornado.log for zmq.eventloop.minitornado"""
+
+import logging
+
+app_log = logging.getLogger("tornado.application")
+gen_log = logging.getLogger("tornado.general")
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/eventloop/minitornado/platform/__init__.py b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/eventloop/minitornado/platform/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/eventloop/minitornado/platform/__init__.py
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/eventloop/minitornado/platform/auto.py b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/eventloop/minitornado/platform/auto.py
new file mode 100644
index 00000000..b40ccd94
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/eventloop/minitornado/platform/auto.py
@@ -0,0 +1,45 @@
+#!/usr/bin/env python
+#
+# Copyright 2011 Facebook
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Implementation of platform-specific functionality.
+
+For each function or class described in `tornado.platform.interface`,
+the appropriate platform-specific implementation exists in this module.
+Most code that needs access to this functionality should do e.g.::
+
+ from tornado.platform.auto import set_close_exec
+"""
+
+from __future__ import absolute_import, division, print_function, with_statement
+
+import os
+
+if os.name == 'nt':
+ from .common import Waker
+ from .windows import set_close_exec
+else:
+ from .posix import set_close_exec, Waker
+
+try:
+ # monotime monkey-patches the time module to have a monotonic function
+ # in versions of python before 3.3.
+ import monotime
+except ImportError:
+ pass
+try:
+ from time import monotonic as monotonic_time
+except ImportError:
+ monotonic_time = None
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/eventloop/minitornado/platform/common.py b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/eventloop/minitornado/platform/common.py
new file mode 100644
index 00000000..2d75dc1e
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/eventloop/minitornado/platform/common.py
@@ -0,0 +1,91 @@
+"""Lowest-common-denominator implementations of platform functionality."""
+from __future__ import absolute_import, division, print_function, with_statement
+
+import errno
+import socket
+
+from . import interface
+
+
+class Waker(interface.Waker):
+ """Create an OS independent asynchronous pipe.
+
+ For use on platforms that don't have os.pipe() (or where pipes cannot
+ be passed to select()), but do have sockets. This includes Windows
+ and Jython.
+ """
+ def __init__(self):
+ # Based on Zope async.py: http://svn.zope.org/zc.ngi/trunk/src/zc/ngi/async.py
+
+ self.writer = socket.socket()
+ # Disable buffering -- pulling the trigger sends 1 byte,
+ # and we want that sent immediately, to wake up ASAP.
+ self.writer.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
+
+ count = 0
+ while 1:
+ count += 1
+ # Bind to a local port; for efficiency, let the OS pick
+ # a free port for us.
+ # Unfortunately, stress tests showed that we may not
+ # be able to connect to that port ("Address already in
+ # use") despite that the OS picked it. This appears
+ # to be a race bug in the Windows socket implementation.
+ # So we loop until a connect() succeeds (almost always
+ # on the first try). See the long thread at
+ # http://mail.zope.org/pipermail/zope/2005-July/160433.html
+ # for hideous details.
+ a = socket.socket()
+ a.bind(("127.0.0.1", 0))
+ a.listen(1)
+ connect_address = a.getsockname() # assigned (host, port) pair
+ try:
+ self.writer.connect(connect_address)
+ break # success
+ except socket.error as detail:
+ if (not hasattr(errno, 'WSAEADDRINUSE') or
+ detail[0] != errno.WSAEADDRINUSE):
+ # "Address already in use" is the only error
+ # I've seen on two WinXP Pro SP2 boxes, under
+ # Pythons 2.3.5 and 2.4.1.
+ raise
+ # (10048, 'Address already in use')
+ # assert count <= 2 # never triggered in Tim's tests
+ if count >= 10: # I've never seen it go above 2
+ a.close()
+ self.writer.close()
+ raise socket.error("Cannot bind trigger!")
+ # Close `a` and try again. Note: I originally put a short
+ # sleep() here, but it didn't appear to help or hurt.
+ a.close()
+
+ self.reader, addr = a.accept()
+ self.reader.setblocking(0)
+ self.writer.setblocking(0)
+ a.close()
+ self.reader_fd = self.reader.fileno()
+
+ def fileno(self):
+ return self.reader.fileno()
+
+ def write_fileno(self):
+ return self.writer.fileno()
+
+ def wake(self):
+ try:
+ self.writer.send(b"x")
+ except (IOError, socket.error):
+ pass
+
+ def consume(self):
+ try:
+ while True:
+ result = self.reader.recv(1024)
+ if not result:
+ break
+ except (IOError, socket.error):
+ pass
+
+ def close(self):
+ self.reader.close()
+ self.writer.close()
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/eventloop/minitornado/platform/interface.py b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/eventloop/minitornado/platform/interface.py
new file mode 100644
index 00000000..07da6bab
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/eventloop/minitornado/platform/interface.py
@@ -0,0 +1,63 @@
+#!/usr/bin/env python
+#
+# Copyright 2011 Facebook
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Interfaces for platform-specific functionality.
+
+This module exists primarily for documentation purposes and as base classes
+for other tornado.platform modules. Most code should import the appropriate
+implementation from `tornado.platform.auto`.
+"""
+
+from __future__ import absolute_import, division, print_function, with_statement
+
+
+def set_close_exec(fd):
+ """Sets the close-on-exec bit (``FD_CLOEXEC``)for a file descriptor."""
+ raise NotImplementedError()
+
+
+class Waker(object):
+ """A socket-like object that can wake another thread from ``select()``.
+
+ The `~tornado.ioloop.IOLoop` will add the Waker's `fileno()` to
+ its ``select`` (or ``epoll`` or ``kqueue``) calls. When another
+ thread wants to wake up the loop, it calls `wake`. Once it has woken
+ up, it will call `consume` to do any necessary per-wake cleanup. When
+ the ``IOLoop`` is closed, it closes its waker too.
+ """
+ def fileno(self):
+ """Returns the read file descriptor for this waker.
+
+ Must be suitable for use with ``select()`` or equivalent on the
+ local platform.
+ """
+ raise NotImplementedError()
+
+ def write_fileno(self):
+ """Returns the write file descriptor for this waker."""
+ raise NotImplementedError()
+
+ def wake(self):
+ """Triggers activity on the waker's file descriptor."""
+ raise NotImplementedError()
+
+ def consume(self):
+ """Called after the listen has woken up to do any necessary cleanup."""
+ raise NotImplementedError()
+
+ def close(self):
+ """Closes the waker's file descriptor(s)."""
+ raise NotImplementedError()
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/eventloop/minitornado/platform/posix.py b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/eventloop/minitornado/platform/posix.py
new file mode 100644
index 00000000..ccffbb66
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/eventloop/minitornado/platform/posix.py
@@ -0,0 +1,70 @@
+#!/usr/bin/env python
+#
+# Copyright 2011 Facebook
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Posix implementations of platform-specific functionality."""
+
+from __future__ import absolute_import, division, print_function, with_statement
+
+import fcntl
+import os
+
+from . import interface
+
+
+def set_close_exec(fd):
+ flags = fcntl.fcntl(fd, fcntl.F_GETFD)
+ fcntl.fcntl(fd, fcntl.F_SETFD, flags | fcntl.FD_CLOEXEC)
+
+
+def _set_nonblocking(fd):
+ flags = fcntl.fcntl(fd, fcntl.F_GETFL)
+ fcntl.fcntl(fd, fcntl.F_SETFL, flags | os.O_NONBLOCK)
+
+
+class Waker(interface.Waker):
+ def __init__(self):
+ r, w = os.pipe()
+ _set_nonblocking(r)
+ _set_nonblocking(w)
+ set_close_exec(r)
+ set_close_exec(w)
+ self.reader = os.fdopen(r, "rb", 0)
+ self.writer = os.fdopen(w, "wb", 0)
+
+ def fileno(self):
+ return self.reader.fileno()
+
+ def write_fileno(self):
+ return self.writer.fileno()
+
+ def wake(self):
+ try:
+ self.writer.write(b"x")
+ except IOError:
+ pass
+
+ def consume(self):
+ try:
+ while True:
+ result = self.reader.read()
+ if not result:
+ break
+ except IOError:
+ pass
+
+ def close(self):
+ self.reader.close()
+ self.writer.close()
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/eventloop/minitornado/platform/windows.py b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/eventloop/minitornado/platform/windows.py
new file mode 100644
index 00000000..817bdca1
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/eventloop/minitornado/platform/windows.py
@@ -0,0 +1,20 @@
+# NOTE: win32 support is currently experimental, and not recommended
+# for production use.
+
+
+from __future__ import absolute_import, division, print_function, with_statement
+import ctypes
+import ctypes.wintypes
+
+# See: http://msdn.microsoft.com/en-us/library/ms724935(VS.85).aspx
+SetHandleInformation = ctypes.windll.kernel32.SetHandleInformation
+SetHandleInformation.argtypes = (ctypes.wintypes.HANDLE, ctypes.wintypes.DWORD, ctypes.wintypes.DWORD)
+SetHandleInformation.restype = ctypes.wintypes.BOOL
+
+HANDLE_FLAG_INHERIT = 0x00000001
+
+
+def set_close_exec(fd):
+ success = SetHandleInformation(fd, HANDLE_FLAG_INHERIT, 0)
+ if not success:
+ raise ctypes.GetLastError()
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/eventloop/minitornado/stack_context.py b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/eventloop/minitornado/stack_context.py
new file mode 100644
index 00000000..226d8042
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/eventloop/minitornado/stack_context.py
@@ -0,0 +1,376 @@
+#!/usr/bin/env python
+#
+# Copyright 2010 Facebook
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""`StackContext` allows applications to maintain threadlocal-like state
+that follows execution as it moves to other execution contexts.
+
+The motivating examples are to eliminate the need for explicit
+``async_callback`` wrappers (as in `tornado.web.RequestHandler`), and to
+allow some additional context to be kept for logging.
+
+This is slightly magic, but it's an extension of the idea that an
+exception handler is a kind of stack-local state and when that stack
+is suspended and resumed in a new context that state needs to be
+preserved. `StackContext` shifts the burden of restoring that state
+from each call site (e.g. wrapping each `.AsyncHTTPClient` callback
+in ``async_callback``) to the mechanisms that transfer control from
+one context to another (e.g. `.AsyncHTTPClient` itself, `.IOLoop`,
+thread pools, etc).
+
+Example usage::
+
+ @contextlib.contextmanager
+ def die_on_error():
+ try:
+ yield
+ except Exception:
+ logging.error("exception in asynchronous operation",exc_info=True)
+ sys.exit(1)
+
+ with StackContext(die_on_error):
+ # Any exception thrown here *or in callback and its desendents*
+ # will cause the process to exit instead of spinning endlessly
+ # in the ioloop.
+ http_client.fetch(url, callback)
+ ioloop.start()
+
+Most applications shouln't have to work with `StackContext` directly.
+Here are a few rules of thumb for when it's necessary:
+
+* If you're writing an asynchronous library that doesn't rely on a
+ stack_context-aware library like `tornado.ioloop` or `tornado.iostream`
+ (for example, if you're writing a thread pool), use
+ `.stack_context.wrap()` before any asynchronous operations to capture the
+ stack context from where the operation was started.
+
+* If you're writing an asynchronous library that has some shared
+ resources (such as a connection pool), create those shared resources
+ within a ``with stack_context.NullContext():`` block. This will prevent
+ ``StackContexts`` from leaking from one request to another.
+
+* If you want to write something like an exception handler that will
+ persist across asynchronous calls, create a new `StackContext` (or
+ `ExceptionStackContext`), and make your asynchronous calls in a ``with``
+ block that references your `StackContext`.
+"""
+
+from __future__ import absolute_import, division, print_function, with_statement
+
+import sys
+import threading
+
+from .util import raise_exc_info
+
+
+class StackContextInconsistentError(Exception):
+ pass
+
+
+class _State(threading.local):
+ def __init__(self):
+ self.contexts = (tuple(), None)
+_state = _State()
+
+
+class StackContext(object):
+ """Establishes the given context as a StackContext that will be transferred.
+
+ Note that the parameter is a callable that returns a context
+ manager, not the context itself. That is, where for a
+ non-transferable context manager you would say::
+
+ with my_context():
+
+ StackContext takes the function itself rather than its result::
+
+ with StackContext(my_context):
+
+ The result of ``with StackContext() as cb:`` is a deactivation
+ callback. Run this callback when the StackContext is no longer
+ needed to ensure that it is not propagated any further (note that
+ deactivating a context does not affect any instances of that
+ context that are currently pending). This is an advanced feature
+ and not necessary in most applications.
+ """
+ def __init__(self, context_factory):
+ self.context_factory = context_factory
+ self.contexts = []
+ self.active = True
+
+ def _deactivate(self):
+ self.active = False
+
+ # StackContext protocol
+ def enter(self):
+ context = self.context_factory()
+ self.contexts.append(context)
+ context.__enter__()
+
+ def exit(self, type, value, traceback):
+ context = self.contexts.pop()
+ context.__exit__(type, value, traceback)
+
+ # Note that some of this code is duplicated in ExceptionStackContext
+ # below. ExceptionStackContext is more common and doesn't need
+ # the full generality of this class.
+ def __enter__(self):
+ self.old_contexts = _state.contexts
+ self.new_contexts = (self.old_contexts[0] + (self,), self)
+ _state.contexts = self.new_contexts
+
+ try:
+ self.enter()
+ except:
+ _state.contexts = self.old_contexts
+ raise
+
+ return self._deactivate
+
+ def __exit__(self, type, value, traceback):
+ try:
+ self.exit(type, value, traceback)
+ finally:
+ final_contexts = _state.contexts
+ _state.contexts = self.old_contexts
+
+ # Generator coroutines and with-statements with non-local
+ # effects interact badly. Check here for signs of
+ # the stack getting out of sync.
+ # Note that this check comes after restoring _state.context
+ # so that if it fails things are left in a (relatively)
+ # consistent state.
+ if final_contexts is not self.new_contexts:
+ raise StackContextInconsistentError(
+ 'stack_context inconsistency (may be caused by yield '
+ 'within a "with StackContext" block)')
+
+ # Break up a reference to itself to allow for faster GC on CPython.
+ self.new_contexts = None
+
+
+class ExceptionStackContext(object):
+ """Specialization of StackContext for exception handling.
+
+ The supplied ``exception_handler`` function will be called in the
+ event of an uncaught exception in this context. The semantics are
+ similar to a try/finally clause, and intended use cases are to log
+ an error, close a socket, or similar cleanup actions. The
+ ``exc_info`` triple ``(type, value, traceback)`` will be passed to the
+ exception_handler function.
+
+ If the exception handler returns true, the exception will be
+ consumed and will not be propagated to other exception handlers.
+ """
+ def __init__(self, exception_handler):
+ self.exception_handler = exception_handler
+ self.active = True
+
+ def _deactivate(self):
+ self.active = False
+
+ def exit(self, type, value, traceback):
+ if type is not None:
+ return self.exception_handler(type, value, traceback)
+
+ def __enter__(self):
+ self.old_contexts = _state.contexts
+ self.new_contexts = (self.old_contexts[0], self)
+ _state.contexts = self.new_contexts
+
+ return self._deactivate
+
+ def __exit__(self, type, value, traceback):
+ try:
+ if type is not None:
+ return self.exception_handler(type, value, traceback)
+ finally:
+ final_contexts = _state.contexts
+ _state.contexts = self.old_contexts
+
+ if final_contexts is not self.new_contexts:
+ raise StackContextInconsistentError(
+ 'stack_context inconsistency (may be caused by yield '
+ 'within a "with StackContext" block)')
+
+ # Break up a reference to itself to allow for faster GC on CPython.
+ self.new_contexts = None
+
+
+class NullContext(object):
+ """Resets the `StackContext`.
+
+ Useful when creating a shared resource on demand (e.g. an
+ `.AsyncHTTPClient`) where the stack that caused the creating is
+ not relevant to future operations.
+ """
+ def __enter__(self):
+ self.old_contexts = _state.contexts
+ _state.contexts = (tuple(), None)
+
+ def __exit__(self, type, value, traceback):
+ _state.contexts = self.old_contexts
+
+
+def _remove_deactivated(contexts):
+ """Remove deactivated handlers from the chain"""
+ # Clean ctx handlers
+ stack_contexts = tuple([h for h in contexts[0] if h.active])
+
+ # Find new head
+ head = contexts[1]
+ while head is not None and not head.active:
+ head = head.old_contexts[1]
+
+ # Process chain
+ ctx = head
+ while ctx is not None:
+ parent = ctx.old_contexts[1]
+
+ while parent is not None:
+ if parent.active:
+ break
+ ctx.old_contexts = parent.old_contexts
+ parent = parent.old_contexts[1]
+
+ ctx = parent
+
+ return (stack_contexts, head)
+
+
+def wrap(fn):
+ """Returns a callable object that will restore the current `StackContext`
+ when executed.
+
+ Use this whenever saving a callback to be executed later in a
+ different execution context (either in a different thread or
+ asynchronously in the same thread).
+ """
+ # Check if function is already wrapped
+ if fn is None or hasattr(fn, '_wrapped'):
+ return fn
+
+ # Capture current stack head
+ # TODO: Any other better way to store contexts and update them in wrapped function?
+ cap_contexts = [_state.contexts]
+
+ def wrapped(*args, **kwargs):
+ ret = None
+ try:
+ # Capture old state
+ current_state = _state.contexts
+
+ # Remove deactivated items
+ cap_contexts[0] = contexts = _remove_deactivated(cap_contexts[0])
+
+ # Force new state
+ _state.contexts = contexts
+
+ # Current exception
+ exc = (None, None, None)
+ top = None
+
+ # Apply stack contexts
+ last_ctx = 0
+ stack = contexts[0]
+
+ # Apply state
+ for n in stack:
+ try:
+ n.enter()
+ last_ctx += 1
+ except:
+ # Exception happened. Record exception info and store top-most handler
+ exc = sys.exc_info()
+ top = n.old_contexts[1]
+
+ # Execute callback if no exception happened while restoring state
+ if top is None:
+ try:
+ ret = fn(*args, **kwargs)
+ except:
+ exc = sys.exc_info()
+ top = contexts[1]
+
+ # If there was exception, try to handle it by going through the exception chain
+ if top is not None:
+ exc = _handle_exception(top, exc)
+ else:
+ # Otherwise take shorter path and run stack contexts in reverse order
+ while last_ctx > 0:
+ last_ctx -= 1
+ c = stack[last_ctx]
+
+ try:
+ c.exit(*exc)
+ except:
+ exc = sys.exc_info()
+ top = c.old_contexts[1]
+ break
+ else:
+ top = None
+
+ # If if exception happened while unrolling, take longer exception handler path
+ if top is not None:
+ exc = _handle_exception(top, exc)
+
+ # If exception was not handled, raise it
+ if exc != (None, None, None):
+ raise_exc_info(exc)
+ finally:
+ _state.contexts = current_state
+ return ret
+
+ wrapped._wrapped = True
+ return wrapped
+
+
+def _handle_exception(tail, exc):
+ while tail is not None:
+ try:
+ if tail.exit(*exc):
+ exc = (None, None, None)
+ except:
+ exc = sys.exc_info()
+
+ tail = tail.old_contexts[1]
+
+ return exc
+
+
+def run_with_stack_context(context, func):
+ """Run a coroutine ``func`` in the given `StackContext`.
+
+ It is not safe to have a ``yield`` statement within a ``with StackContext``
+ block, so it is difficult to use stack context with `.gen.coroutine`.
+ This helper function runs the function in the correct context while
+ keeping the ``yield`` and ``with`` statements syntactically separate.
+
+ Example::
+
+ @gen.coroutine
+ def incorrect():
+ with StackContext(ctx):
+ # ERROR: this will raise StackContextInconsistentError
+ yield other_coroutine()
+
+ @gen.coroutine
+ def correct():
+ yield run_with_stack_context(StackContext(ctx), other_coroutine)
+
+ .. versionadded:: 3.1
+ """
+ with context:
+ return func()
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/eventloop/minitornado/util.py b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/eventloop/minitornado/util.py
new file mode 100644
index 00000000..c1e2eb95
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/eventloop/minitornado/util.py
@@ -0,0 +1,184 @@
+"""Miscellaneous utility functions and classes.
+
+This module is used internally by Tornado. It is not necessarily expected
+that the functions and classes defined here will be useful to other
+applications, but they are documented here in case they are.
+
+The one public-facing part of this module is the `Configurable` class
+and its `~Configurable.configure` method, which becomes a part of the
+interface of its subclasses, including `.AsyncHTTPClient`, `.IOLoop`,
+and `.Resolver`.
+"""
+
+from __future__ import absolute_import, division, print_function, with_statement
+
+import sys
+
+
+def import_object(name):
+ """Imports an object by name.
+
+ import_object('x') is equivalent to 'import x'.
+ import_object('x.y.z') is equivalent to 'from x.y import z'.
+
+ >>> import tornado.escape
+ >>> import_object('tornado.escape') is tornado.escape
+ True
+ >>> import_object('tornado.escape.utf8') is tornado.escape.utf8
+ True
+ >>> import_object('tornado') is tornado
+ True
+ >>> import_object('tornado.missing_module')
+ Traceback (most recent call last):
+ ...
+ ImportError: No module named missing_module
+ """
+ if name.count('.') == 0:
+ return __import__(name, None, None)
+
+ parts = name.split('.')
+ obj = __import__('.'.join(parts[:-1]), None, None, [parts[-1]], 0)
+ try:
+ return getattr(obj, parts[-1])
+ except AttributeError:
+ raise ImportError("No module named %s" % parts[-1])
+
+
+# Fake unicode literal support: Python 3.2 doesn't have the u'' marker for
+# literal strings, and alternative solutions like "from __future__ import
+# unicode_literals" have other problems (see PEP 414). u() can be applied
+# to ascii strings that include \u escapes (but they must not contain
+# literal non-ascii characters).
+if type('') is not type(b''):
+ def u(s):
+ return s
+ bytes_type = bytes
+ unicode_type = str
+ basestring_type = str
+else:
+ def u(s):
+ return s.decode('unicode_escape')
+ bytes_type = str
+ unicode_type = unicode
+ basestring_type = basestring
+
+
+if sys.version_info > (3,):
+ exec("""
+def raise_exc_info(exc_info):
+ raise exc_info[1].with_traceback(exc_info[2])
+
+def exec_in(code, glob, loc=None):
+ if isinstance(code, str):
+ code = compile(code, '<string>', 'exec', dont_inherit=True)
+ exec(code, glob, loc)
+""")
+else:
+ exec("""
+def raise_exc_info(exc_info):
+ raise exc_info[0], exc_info[1], exc_info[2]
+
+def exec_in(code, glob, loc=None):
+ if isinstance(code, basestring):
+ # exec(string) inherits the caller's future imports; compile
+ # the string first to prevent that.
+ code = compile(code, '<string>', 'exec', dont_inherit=True)
+ exec code in glob, loc
+""")
+
+
+class Configurable(object):
+ """Base class for configurable interfaces.
+
+ A configurable interface is an (abstract) class whose constructor
+ acts as a factory function for one of its implementation subclasses.
+ The implementation subclass as well as optional keyword arguments to
+ its initializer can be set globally at runtime with `configure`.
+
+ By using the constructor as the factory method, the interface
+ looks like a normal class, `isinstance` works as usual, etc. This
+ pattern is most useful when the choice of implementation is likely
+ to be a global decision (e.g. when `~select.epoll` is available,
+ always use it instead of `~select.select`), or when a
+ previously-monolithic class has been split into specialized
+ subclasses.
+
+ Configurable subclasses must define the class methods
+ `configurable_base` and `configurable_default`, and use the instance
+ method `initialize` instead of ``__init__``.
+ """
+ __impl_class = None
+ __impl_kwargs = None
+
+ def __new__(cls, **kwargs):
+ base = cls.configurable_base()
+ args = {}
+ if cls is base:
+ impl = cls.configured_class()
+ if base.__impl_kwargs:
+ args.update(base.__impl_kwargs)
+ else:
+ impl = cls
+ args.update(kwargs)
+ instance = super(Configurable, cls).__new__(impl)
+ # initialize vs __init__ chosen for compatiblity with AsyncHTTPClient
+ # singleton magic. If we get rid of that we can switch to __init__
+ # here too.
+ instance.initialize(**args)
+ return instance
+
+ @classmethod
+ def configurable_base(cls):
+ """Returns the base class of a configurable hierarchy.
+
+ This will normally return the class in which it is defined.
+ (which is *not* necessarily the same as the cls classmethod parameter).
+ """
+ raise NotImplementedError()
+
+ @classmethod
+ def configurable_default(cls):
+ """Returns the implementation class to be used if none is configured."""
+ raise NotImplementedError()
+
+ def initialize(self):
+ """Initialize a `Configurable` subclass instance.
+
+ Configurable classes should use `initialize` instead of ``__init__``.
+ """
+
+ @classmethod
+ def configure(cls, impl, **kwargs):
+ """Sets the class to use when the base class is instantiated.
+
+ Keyword arguments will be saved and added to the arguments passed
+ to the constructor. This can be used to set global defaults for
+ some parameters.
+ """
+ base = cls.configurable_base()
+ if isinstance(impl, (unicode_type, bytes_type)):
+ impl = import_object(impl)
+ if impl is not None and not issubclass(impl, cls):
+ raise ValueError("Invalid subclass of %s" % cls)
+ base.__impl_class = impl
+ base.__impl_kwargs = kwargs
+
+ @classmethod
+ def configured_class(cls):
+ """Returns the currently configured class."""
+ base = cls.configurable_base()
+ if cls.__impl_class is None:
+ base.__impl_class = cls.configurable_default()
+ return base.__impl_class
+
+ @classmethod
+ def _save_configuration(cls):
+ base = cls.configurable_base()
+ return (base.__impl_class, base.__impl_kwargs)
+
+ @classmethod
+ def _restore_configuration(cls, saved):
+ base = cls.configurable_base()
+ base.__impl_class = saved[0]
+ base.__impl_kwargs = saved[1]
+
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/eventloop/zmqstream.py b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/eventloop/zmqstream.py
new file mode 100644
index 00000000..86a97e44
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/eventloop/zmqstream.py
@@ -0,0 +1,529 @@
+#
+# Copyright 2009 Facebook
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""A utility class to send to and recv from a non-blocking socket."""
+
+from __future__ import with_statement
+
+import sys
+
+import zmq
+from zmq.utils import jsonapi
+
+try:
+ import cPickle as pickle
+except ImportError:
+ import pickle
+
+from .ioloop import IOLoop
+
+try:
+ # gen_log will only import from >= 3.0
+ from tornado.log import gen_log
+ from tornado import stack_context
+except ImportError:
+ from .minitornado.log import gen_log
+ from .minitornado import stack_context
+
+try:
+ from queue import Queue
+except ImportError:
+ from Queue import Queue
+
+from zmq.utils.strtypes import bytes, unicode, basestring
+
+try:
+ callable
+except NameError:
+ callable = lambda obj: hasattr(obj, '__call__')
+
+
+class ZMQStream(object):
+ """A utility class to register callbacks when a zmq socket sends and receives
+
+ For use with zmq.eventloop.ioloop
+
+ There are three main methods
+
+ Methods:
+
+ * **on_recv(callback, copy=True):**
+ register a callback to be run every time the socket has something to receive
+ * **on_send(callback):**
+ register a callback to be run every time you call send
+ * **send(self, msg, flags=0, copy=False, callback=None):**
+ perform a send that will trigger the callback
+ if callback is passed, on_send is also called.
+
+ There are also send_multipart(), send_json(), send_pyobj()
+
+ Three other methods for deactivating the callbacks:
+
+ * **stop_on_recv():**
+ turn off the recv callback
+ * **stop_on_send():**
+ turn off the send callback
+
+ which simply call ``on_<evt>(None)``.
+
+ The entire socket interface, excluding direct recv methods, is also
+ provided, primarily through direct-linking the methods.
+ e.g.
+
+ >>> stream.bind is stream.socket.bind
+ True
+
+ """
+
+ socket = None
+ io_loop = None
+ poller = None
+
+ def __init__(self, socket, io_loop=None):
+ self.socket = socket
+ self.io_loop = io_loop or IOLoop.instance()
+ self.poller = zmq.Poller()
+
+ self._send_queue = Queue()
+ self._recv_callback = None
+ self._send_callback = None
+ self._close_callback = None
+ self._recv_copy = False
+ self._flushed = False
+
+ self._state = self.io_loop.ERROR
+ self._init_io_state()
+
+ # shortcircuit some socket methods
+ self.bind = self.socket.bind
+ self.bind_to_random_port = self.socket.bind_to_random_port
+ self.connect = self.socket.connect
+ self.setsockopt = self.socket.setsockopt
+ self.getsockopt = self.socket.getsockopt
+ self.setsockopt_string = self.socket.setsockopt_string
+ self.getsockopt_string = self.socket.getsockopt_string
+ self.setsockopt_unicode = self.socket.setsockopt_unicode
+ self.getsockopt_unicode = self.socket.getsockopt_unicode
+
+
+ def stop_on_recv(self):
+ """Disable callback and automatic receiving."""
+ return self.on_recv(None)
+
+ def stop_on_send(self):
+ """Disable callback on sending."""
+ return self.on_send(None)
+
+ def stop_on_err(self):
+ """DEPRECATED, does nothing"""
+ gen_log.warn("on_err does nothing, and will be removed")
+
+ def on_err(self, callback):
+ """DEPRECATED, does nothing"""
+ gen_log.warn("on_err does nothing, and will be removed")
+
+ def on_recv(self, callback, copy=True):
+ """Register a callback for when a message is ready to recv.
+
+ There can be only one callback registered at a time, so each
+ call to `on_recv` replaces previously registered callbacks.
+
+ on_recv(None) disables recv event polling.
+
+ Use on_recv_stream(callback) instead, to register a callback that will receive
+ both this ZMQStream and the message, instead of just the message.
+
+ Parameters
+ ----------
+
+ callback : callable
+ callback must take exactly one argument, which will be a
+ list, as returned by socket.recv_multipart()
+ if callback is None, recv callbacks are disabled.
+ copy : bool
+ copy is passed directly to recv, so if copy is False,
+ callback will receive Message objects. If copy is True,
+ then callback will receive bytes/str objects.
+
+ Returns : None
+ """
+
+ self._check_closed()
+ assert callback is None or callable(callback)
+ self._recv_callback = stack_context.wrap(callback)
+ self._recv_copy = copy
+ if callback is None:
+ self._drop_io_state(self.io_loop.READ)
+ else:
+ self._add_io_state(self.io_loop.READ)
+
+ def on_recv_stream(self, callback, copy=True):
+ """Same as on_recv, but callback will get this stream as first argument
+
+ callback must take exactly two arguments, as it will be called as::
+
+ callback(stream, msg)
+
+ Useful when a single callback should be used with multiple streams.
+ """
+ if callback is None:
+ self.stop_on_recv()
+ else:
+ self.on_recv(lambda msg: callback(self, msg), copy=copy)
+
+ def on_send(self, callback):
+ """Register a callback to be called on each send
+
+ There will be two arguments::
+
+ callback(msg, status)
+
+ * `msg` will be the list of sendable objects that was just sent
+ * `status` will be the return result of socket.send_multipart(msg) -
+ MessageTracker or None.
+
+ Non-copying sends return a MessageTracker object whose
+ `done` attribute will be True when the send is complete.
+ This allows users to track when an object is safe to write to
+ again.
+
+ The second argument will always be None if copy=True
+ on the send.
+
+ Use on_send_stream(callback) to register a callback that will be passed
+ this ZMQStream as the first argument, in addition to the other two.
+
+ on_send(None) disables recv event polling.
+
+ Parameters
+ ----------
+
+ callback : callable
+ callback must take exactly two arguments, which will be
+ the message being sent (always a list),
+ and the return result of socket.send_multipart(msg) -
+ MessageTracker or None.
+
+ if callback is None, send callbacks are disabled.
+ """
+
+ self._check_closed()
+ assert callback is None or callable(callback)
+ self._send_callback = stack_context.wrap(callback)
+
+
+ def on_send_stream(self, callback):
+ """Same as on_send, but callback will get this stream as first argument
+
+ Callback will be passed three arguments::
+
+ callback(stream, msg, status)
+
+ Useful when a single callback should be used with multiple streams.
+ """
+ if callback is None:
+ self.stop_on_send()
+ else:
+ self.on_send(lambda msg, status: callback(self, msg, status))
+
+
+ def send(self, msg, flags=0, copy=True, track=False, callback=None):
+ """Send a message, optionally also register a new callback for sends.
+ See zmq.socket.send for details.
+ """
+ return self.send_multipart([msg], flags=flags, copy=copy, track=track, callback=callback)
+
+ def send_multipart(self, msg, flags=0, copy=True, track=False, callback=None):
+ """Send a multipart message, optionally also register a new callback for sends.
+ See zmq.socket.send_multipart for details.
+ """
+ kwargs = dict(flags=flags, copy=copy, track=track)
+ self._send_queue.put((msg, kwargs))
+ callback = callback or self._send_callback
+ if callback is not None:
+ self.on_send(callback)
+ else:
+ # noop callback
+ self.on_send(lambda *args: None)
+ self._add_io_state(self.io_loop.WRITE)
+
+ def send_string(self, u, flags=0, encoding='utf-8', callback=None):
+ """Send a unicode message with an encoding.
+ See zmq.socket.send_unicode for details.
+ """
+ if not isinstance(u, basestring):
+ raise TypeError("unicode/str objects only")
+ return self.send(u.encode(encoding), flags=flags, callback=callback)
+
+ send_unicode = send_string
+
+ def send_json(self, obj, flags=0, callback=None):
+ """Send json-serialized version of an object.
+ See zmq.socket.send_json for details.
+ """
+ if jsonapi is None:
+ raise ImportError('jsonlib{1,2}, json or simplejson library is required.')
+ else:
+ msg = jsonapi.dumps(obj)
+ return self.send(msg, flags=flags, callback=callback)
+
+ def send_pyobj(self, obj, flags=0, protocol=-1, callback=None):
+ """Send a Python object as a message using pickle to serialize.
+
+ See zmq.socket.send_json for details.
+ """
+ msg = pickle.dumps(obj, protocol)
+ return self.send(msg, flags, callback=callback)
+
+ def _finish_flush(self):
+ """callback for unsetting _flushed flag."""
+ self._flushed = False
+
+ def flush(self, flag=zmq.POLLIN|zmq.POLLOUT, limit=None):
+ """Flush pending messages.
+
+ This method safely handles all pending incoming and/or outgoing messages,
+ bypassing the inner loop, passing them to the registered callbacks.
+
+ A limit can be specified, to prevent blocking under high load.
+
+ flush will return the first time ANY of these conditions are met:
+ * No more events matching the flag are pending.
+ * the total number of events handled reaches the limit.
+
+ Note that if ``flag|POLLIN != 0``, recv events will be flushed even if no callback
+ is registered, unlike normal IOLoop operation. This allows flush to be
+ used to remove *and ignore* incoming messages.
+
+ Parameters
+ ----------
+ flag : int, default=POLLIN|POLLOUT
+ 0MQ poll flags.
+ If flag|POLLIN, recv events will be flushed.
+ If flag|POLLOUT, send events will be flushed.
+ Both flags can be set at once, which is the default.
+ limit : None or int, optional
+ The maximum number of messages to send or receive.
+ Both send and recv count against this limit.
+
+ Returns
+ -------
+ int : count of events handled (both send and recv)
+ """
+ self._check_closed()
+ # unset self._flushed, so callbacks will execute, in case flush has
+ # already been called this iteration
+ already_flushed = self._flushed
+ self._flushed = False
+ # initialize counters
+ count = 0
+ def update_flag():
+ """Update the poll flag, to prevent registering POLLOUT events
+ if we don't have pending sends."""
+ return flag & zmq.POLLIN | (self.sending() and flag & zmq.POLLOUT)
+ flag = update_flag()
+ if not flag:
+ # nothing to do
+ return 0
+ self.poller.register(self.socket, flag)
+ events = self.poller.poll(0)
+ while events and (not limit or count < limit):
+ s,event = events[0]
+ if event & zmq.POLLIN: # receiving
+ self._handle_recv()
+ count += 1
+ if self.socket is None:
+ # break if socket was closed during callback
+ break
+ if event & zmq.POLLOUT and self.sending():
+ self._handle_send()
+ count += 1
+ if self.socket is None:
+ # break if socket was closed during callback
+ break
+
+ flag = update_flag()
+ if flag:
+ self.poller.register(self.socket, flag)
+ events = self.poller.poll(0)
+ else:
+ events = []
+ if count: # only bypass loop if we actually flushed something
+ # skip send/recv callbacks this iteration
+ self._flushed = True
+ # reregister them at the end of the loop
+ if not already_flushed: # don't need to do it again
+ self.io_loop.add_callback(self._finish_flush)
+ elif already_flushed:
+ self._flushed = True
+
+ # update ioloop poll state, which may have changed
+ self._rebuild_io_state()
+ return count
+
+ def set_close_callback(self, callback):
+ """Call the given callback when the stream is closed."""
+ self._close_callback = stack_context.wrap(callback)
+
+ def close(self, linger=None):
+ """Close this stream."""
+ if self.socket is not None:
+ self.io_loop.remove_handler(self.socket)
+ self.socket.close(linger)
+ self.socket = None
+ if self._close_callback:
+ self._run_callback(self._close_callback)
+
+ def receiving(self):
+ """Returns True if we are currently receiving from the stream."""
+ return self._recv_callback is not None
+
+ def sending(self):
+ """Returns True if we are currently sending to the stream."""
+ return not self._send_queue.empty()
+
+ def closed(self):
+ return self.socket is None
+
+ def _run_callback(self, callback, *args, **kwargs):
+ """Wrap running callbacks in try/except to allow us to
+ close our socket."""
+ try:
+ # Use a NullContext to ensure that all StackContexts are run
+ # inside our blanket exception handler rather than outside.
+ with stack_context.NullContext():
+ callback(*args, **kwargs)
+ except:
+ gen_log.error("Uncaught exception, closing connection.",
+ exc_info=True)
+ # Close the socket on an uncaught exception from a user callback
+ # (It would eventually get closed when the socket object is
+ # gc'd, but we don't want to rely on gc happening before we
+ # run out of file descriptors)
+ self.close()
+ # Re-raise the exception so that IOLoop.handle_callback_exception
+ # can see it and log the error
+ raise
+
+ def _handle_events(self, fd, events):
+ """This method is the actual handler for IOLoop, that gets called whenever
+ an event on my socket is posted. It dispatches to _handle_recv, etc."""
+ # print "handling events"
+ if not self.socket:
+ gen_log.warning("Got events for closed stream %s", fd)
+ return
+ try:
+ # dispatch events:
+ if events & IOLoop.ERROR:
+ gen_log.error("got POLLERR event on ZMQStream, which doesn't make sense")
+ return
+ if events & IOLoop.READ:
+ self._handle_recv()
+ if not self.socket:
+ return
+ if events & IOLoop.WRITE:
+ self._handle_send()
+ if not self.socket:
+ return
+
+ # rebuild the poll state
+ self._rebuild_io_state()
+ except:
+ gen_log.error("Uncaught exception, closing connection.",
+ exc_info=True)
+ self.close()
+ raise
+
+ def _handle_recv(self):
+ """Handle a recv event."""
+ if self._flushed:
+ return
+ try:
+ msg = self.socket.recv_multipart(zmq.NOBLOCK, copy=self._recv_copy)
+ except zmq.ZMQError as e:
+ if e.errno == zmq.EAGAIN:
+ # state changed since poll event
+ pass
+ else:
+ gen_log.error("RECV Error: %s"%zmq.strerror(e.errno))
+ else:
+ if self._recv_callback:
+ callback = self._recv_callback
+ # self._recv_callback = None
+ self._run_callback(callback, msg)
+
+ # self.update_state()
+
+
+ def _handle_send(self):
+ """Handle a send event."""
+ if self._flushed:
+ return
+ if not self.sending():
+ gen_log.error("Shouldn't have handled a send event")
+ return
+
+ msg, kwargs = self._send_queue.get()
+ try:
+ status = self.socket.send_multipart(msg, **kwargs)
+ except zmq.ZMQError as e:
+ gen_log.error("SEND Error: %s", e)
+ status = e
+ if self._send_callback:
+ callback = self._send_callback
+ self._run_callback(callback, msg, status)
+
+ # self.update_state()
+
+ def _check_closed(self):
+ if not self.socket:
+ raise IOError("Stream is closed")
+
+ def _rebuild_io_state(self):
+ """rebuild io state based on self.sending() and receiving()"""
+ if self.socket is None:
+ return
+ state = self.io_loop.ERROR
+ if self.receiving():
+ state |= self.io_loop.READ
+ if self.sending():
+ state |= self.io_loop.WRITE
+ if state != self._state:
+ self._state = state
+ self._update_handler(state)
+
+ def _add_io_state(self, state):
+ """Add io_state to poller."""
+ if not self._state & state:
+ self._state = self._state | state
+ self._update_handler(self._state)
+
+ def _drop_io_state(self, state):
+ """Stop poller from watching an io_state."""
+ if self._state & state:
+ self._state = self._state & (~state)
+ self._update_handler(self._state)
+
+ def _update_handler(self, state):
+ """Update IOLoop handler with state."""
+ if self.socket is None:
+ return
+ self.io_loop.update_handler(self.socket, state)
+
+ def _init_io_state(self):
+ """initialize the ioloop event handler"""
+ with stack_context.NullContext():
+ self.io_loop.add_handler(self.socket, self._handle_events, self._state)
+
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/green/__init__.py b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/green/__init__.py
new file mode 100644
index 00000000..ff7e5965
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/green/__init__.py
@@ -0,0 +1,40 @@
+# -*- coding: utf-8 -*-
+#-----------------------------------------------------------------------------
+# Copyright (C) 2011-2012 Travis Cline
+#
+# This file is part of pyzmq
+# It is adapted from upstream project zeromq_gevent under the New BSD License
+#
+# Distributed under the terms of the New BSD License. The full license is in
+# the file COPYING.BSD, distributed as part of this software.
+#-----------------------------------------------------------------------------
+
+"""zmq.green - gevent compatibility with zeromq.
+
+Usage
+-----
+
+Instead of importing zmq directly, do so in the following manner:
+
+..
+
+ import zmq.green as zmq
+
+
+Any calls that would have blocked the current thread will now only block the
+current green thread.
+
+This compatibility is accomplished by ensuring the nonblocking flag is set
+before any blocking operation and the ØMQ file descriptor is polled internally
+to trigger needed events.
+"""
+
+from zmq import *
+from zmq.green.core import _Context, _Socket
+from zmq.green.poll import _Poller
+Context = _Context
+Socket = _Socket
+Poller = _Poller
+
+from zmq.green.device import device
+
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/green/core.py b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/green/core.py
new file mode 100644
index 00000000..9fc73e32
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/green/core.py
@@ -0,0 +1,287 @@
+#-----------------------------------------------------------------------------
+# Copyright (C) 2011-2012 Travis Cline
+#
+# This file is part of pyzmq
+# It is adapted from upstream project zeromq_gevent under the New BSD License
+#
+# Distributed under the terms of the New BSD License. The full license is in
+# the file COPYING.BSD, distributed as part of this software.
+#-----------------------------------------------------------------------------
+
+"""This module wraps the :class:`Socket` and :class:`Context` found in :mod:`pyzmq <zmq>` to be non blocking
+"""
+
+from __future__ import print_function
+
+import sys
+import time
+import warnings
+
+import zmq
+
+from zmq import Context as _original_Context
+from zmq import Socket as _original_Socket
+from .poll import _Poller
+
+import gevent
+from gevent.event import AsyncResult
+from gevent.hub import get_hub
+
+if hasattr(zmq, 'RCVTIMEO'):
+ TIMEOS = (zmq.RCVTIMEO, zmq.SNDTIMEO)
+else:
+ TIMEOS = ()
+
+def _stop(evt):
+ """simple wrapper for stopping an Event, allowing for method rename in gevent 1.0"""
+ try:
+ evt.stop()
+ except AttributeError as e:
+ # gevent<1.0 compat
+ evt.cancel()
+
+class _Socket(_original_Socket):
+ """Green version of :class:`zmq.Socket`
+
+ The following methods are overridden:
+
+ * send
+ * recv
+
+ To ensure that the ``zmq.NOBLOCK`` flag is set and that sending or receiving
+ is deferred to the hub if a ``zmq.EAGAIN`` (retry) error is raised.
+
+ The `__state_changed` method is triggered when the zmq.FD for the socket is
+ marked as readable and triggers the necessary read and write events (which
+ are waited for in the recv and send methods).
+
+ Some double underscore prefixes are used to minimize pollution of
+ :class:`zmq.Socket`'s namespace.
+ """
+ __in_send_multipart = False
+ __in_recv_multipart = False
+ __writable = None
+ __readable = None
+ _state_event = None
+ _gevent_bug_timeout = 11.6 # timeout for not trusting gevent
+ _debug_gevent = False # turn on if you think gevent is missing events
+ _poller_class = _Poller
+
+ def __init__(self, context, socket_type):
+ _original_Socket.__init__(self, context, socket_type)
+ self.__in_send_multipart = False
+ self.__in_recv_multipart = False
+ self.__setup_events()
+
+
+ def __del__(self):
+ self.close()
+
+ def close(self, linger=None):
+ super(_Socket, self).close(linger)
+ self.__cleanup_events()
+
+ def __cleanup_events(self):
+ # close the _state_event event, keeps the number of active file descriptors down
+ if getattr(self, '_state_event', None):
+ _stop(self._state_event)
+ self._state_event = None
+ # if the socket has entered a close state resume any waiting greenlets
+ self.__writable.set()
+ self.__readable.set()
+
+ def __setup_events(self):
+ self.__readable = AsyncResult()
+ self.__writable = AsyncResult()
+ self.__readable.set()
+ self.__writable.set()
+
+ try:
+ self._state_event = get_hub().loop.io(self.getsockopt(zmq.FD), 1) # read state watcher
+ self._state_event.start(self.__state_changed)
+ except AttributeError:
+ # for gevent<1.0 compatibility
+ from gevent.core import read_event
+ self._state_event = read_event(self.getsockopt(zmq.FD), self.__state_changed, persist=True)
+
+ def __state_changed(self, event=None, _evtype=None):
+ if self.closed:
+ self.__cleanup_events()
+ return
+ try:
+ # avoid triggering __state_changed from inside __state_changed
+ events = super(_Socket, self).getsockopt(zmq.EVENTS)
+ except zmq.ZMQError as exc:
+ self.__writable.set_exception(exc)
+ self.__readable.set_exception(exc)
+ else:
+ if events & zmq.POLLOUT:
+ self.__writable.set()
+ if events & zmq.POLLIN:
+ self.__readable.set()
+
+ def _wait_write(self):
+ assert self.__writable.ready(), "Only one greenlet can be waiting on this event"
+ self.__writable = AsyncResult()
+ # timeout is because libzmq cannot be trusted to properly signal a new send event:
+ # this is effectively a maximum poll interval of 1s
+ tic = time.time()
+ dt = self._gevent_bug_timeout
+ if dt:
+ timeout = gevent.Timeout(seconds=dt)
+ else:
+ timeout = None
+ try:
+ if timeout:
+ timeout.start()
+ self.__writable.get(block=True)
+ except gevent.Timeout as t:
+ if t is not timeout:
+ raise
+ toc = time.time()
+ # gevent bug: get can raise timeout even on clean return
+ # don't display zmq bug warning for gevent bug (this is getting ridiculous)
+ if self._debug_gevent and timeout and toc-tic > dt and \
+ self.getsockopt(zmq.EVENTS) & zmq.POLLOUT:
+ print("BUG: gevent may have missed a libzmq send event on %i!" % self.FD, file=sys.stderr)
+ finally:
+ if timeout:
+ timeout.cancel()
+ self.__writable.set()
+
+ def _wait_read(self):
+ assert self.__readable.ready(), "Only one greenlet can be waiting on this event"
+ self.__readable = AsyncResult()
+ # timeout is because libzmq cannot always be trusted to play nice with libevent.
+ # I can only confirm that this actually happens for send, but lets be symmetrical
+ # with our dirty hacks.
+ # this is effectively a maximum poll interval of 1s
+ tic = time.time()
+ dt = self._gevent_bug_timeout
+ if dt:
+ timeout = gevent.Timeout(seconds=dt)
+ else:
+ timeout = None
+ try:
+ if timeout:
+ timeout.start()
+ self.__readable.get(block=True)
+ except gevent.Timeout as t:
+ if t is not timeout:
+ raise
+ toc = time.time()
+ # gevent bug: get can raise timeout even on clean return
+ # don't display zmq bug warning for gevent bug (this is getting ridiculous)
+ if self._debug_gevent and timeout and toc-tic > dt and \
+ self.getsockopt(zmq.EVENTS) & zmq.POLLIN:
+ print("BUG: gevent may have missed a libzmq recv event on %i!" % self.FD, file=sys.stderr)
+ finally:
+ if timeout:
+ timeout.cancel()
+ self.__readable.set()
+
+ def send(self, data, flags=0, copy=True, track=False):
+ """send, which will only block current greenlet
+
+ state_changed always fires exactly once (success or fail) at the
+ end of this method.
+ """
+
+ # if we're given the NOBLOCK flag act as normal and let the EAGAIN get raised
+ if flags & zmq.NOBLOCK:
+ try:
+ msg = super(_Socket, self).send(data, flags, copy, track)
+ finally:
+ if not self.__in_send_multipart:
+ self.__state_changed()
+ return msg
+ # ensure the zmq.NOBLOCK flag is part of flags
+ flags |= zmq.NOBLOCK
+ while True: # Attempt to complete this operation indefinitely, blocking the current greenlet
+ try:
+ # attempt the actual call
+ msg = super(_Socket, self).send(data, flags, copy, track)
+ except zmq.ZMQError as e:
+ # if the raised ZMQError is not EAGAIN, reraise
+ if e.errno != zmq.EAGAIN:
+ if not self.__in_send_multipart:
+ self.__state_changed()
+ raise
+ else:
+ if not self.__in_send_multipart:
+ self.__state_changed()
+ return msg
+ # defer to the event loop until we're notified the socket is writable
+ self._wait_write()
+
+ def recv(self, flags=0, copy=True, track=False):
+ """recv, which will only block current greenlet
+
+ state_changed always fires exactly once (success or fail) at the
+ end of this method.
+ """
+ if flags & zmq.NOBLOCK:
+ try:
+ msg = super(_Socket, self).recv(flags, copy, track)
+ finally:
+ if not self.__in_recv_multipart:
+ self.__state_changed()
+ return msg
+
+ flags |= zmq.NOBLOCK
+ while True:
+ try:
+ msg = super(_Socket, self).recv(flags, copy, track)
+ except zmq.ZMQError as e:
+ if e.errno != zmq.EAGAIN:
+ if not self.__in_recv_multipart:
+ self.__state_changed()
+ raise
+ else:
+ if not self.__in_recv_multipart:
+ self.__state_changed()
+ return msg
+ self._wait_read()
+
+ def send_multipart(self, *args, **kwargs):
+ """wrap send_multipart to prevent state_changed on each partial send"""
+ self.__in_send_multipart = True
+ try:
+ msg = super(_Socket, self).send_multipart(*args, **kwargs)
+ finally:
+ self.__in_send_multipart = False
+ self.__state_changed()
+ return msg
+
+ def recv_multipart(self, *args, **kwargs):
+ """wrap recv_multipart to prevent state_changed on each partial recv"""
+ self.__in_recv_multipart = True
+ try:
+ msg = super(_Socket, self).recv_multipart(*args, **kwargs)
+ finally:
+ self.__in_recv_multipart = False
+ self.__state_changed()
+ return msg
+
+ def get(self, opt):
+ """trigger state_changed on getsockopt(EVENTS)"""
+ if opt in TIMEOS:
+ warnings.warn("TIMEO socket options have no effect in zmq.green", UserWarning)
+ optval = super(_Socket, self).get(opt)
+ if opt == zmq.EVENTS:
+ self.__state_changed()
+ return optval
+
+ def set(self, opt, val):
+ """set socket option"""
+ if opt in TIMEOS:
+ warnings.warn("TIMEO socket options have no effect in zmq.green", UserWarning)
+ return super(_Socket, self).set(opt, val)
+
+
+class _Context(_original_Context):
+ """Replacement for :class:`zmq.Context`
+
+ Ensures that the greened Socket above is used in calls to `socket`.
+ """
+ _socket_class = _Socket
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/green/device.py b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/green/device.py
new file mode 100644
index 00000000..4b070237
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/green/device.py
@@ -0,0 +1,32 @@
+# Copyright (C) PyZMQ Developers
+# Distributed under the terms of the Modified BSD License.
+
+import zmq
+from zmq.green import Poller
+
+def device(device_type, isocket, osocket):
+ """Start a zeromq device (gevent-compatible).
+
+ Unlike the true zmq.device, this does not release the GIL.
+
+ Parameters
+ ----------
+ device_type : (QUEUE, FORWARDER, STREAMER)
+ The type of device to start (ignored).
+ isocket : Socket
+ The Socket instance for the incoming traffic.
+ osocket : Socket
+ The Socket instance for the outbound traffic.
+ """
+ p = Poller()
+ if osocket == -1:
+ osocket = isocket
+ p.register(isocket, zmq.POLLIN)
+ p.register(osocket, zmq.POLLIN)
+
+ while True:
+ events = dict(p.poll())
+ if isocket in events:
+ osocket.send_multipart(isocket.recv_multipart())
+ if osocket in events:
+ isocket.send_multipart(osocket.recv_multipart())
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/green/eventloop/__init__.py b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/green/eventloop/__init__.py
new file mode 100644
index 00000000..c5150efe
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/green/eventloop/__init__.py
@@ -0,0 +1,3 @@
+from zmq.green.eventloop.ioloop import IOLoop
+
+__all__ = ['IOLoop'] \ No newline at end of file
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/green/eventloop/ioloop.py b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/green/eventloop/ioloop.py
new file mode 100644
index 00000000..e12fd5e9
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/green/eventloop/ioloop.py
@@ -0,0 +1,33 @@
+from zmq.eventloop.ioloop import *
+from zmq.green import Poller
+
+RealIOLoop = IOLoop
+RealZMQPoller = ZMQPoller
+
+class IOLoop(RealIOLoop):
+
+ def initialize(self, impl=None):
+ impl = _poll() if impl is None else impl
+ super(IOLoop, self).initialize(impl)
+
+ @staticmethod
+ def instance():
+ """Returns a global `IOLoop` instance.
+
+ Most applications have a single, global `IOLoop` running on the
+ main thread. Use this method to get this instance from
+ another thread. To get the current thread's `IOLoop`, use `current()`.
+ """
+ # install this class as the active IOLoop implementation
+ # when using tornado 3
+ if tornado_version >= (3,):
+ PollIOLoop.configure(IOLoop)
+ return PollIOLoop.instance()
+
+
+class ZMQPoller(RealZMQPoller):
+ """gevent-compatible version of ioloop.ZMQPoller"""
+ def __init__(self):
+ self._poller = Poller()
+
+_poll = ZMQPoller
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/green/eventloop/zmqstream.py b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/green/eventloop/zmqstream.py
new file mode 100644
index 00000000..90fbd1f5
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/green/eventloop/zmqstream.py
@@ -0,0 +1,11 @@
+from zmq.eventloop.zmqstream import *
+
+from zmq.green.eventloop.ioloop import IOLoop
+
+RealZMQStream = ZMQStream
+
+class ZMQStream(RealZMQStream):
+
+ def __init__(self, socket, io_loop=None):
+ io_loop = io_loop or IOLoop.instance()
+ super(ZMQStream, self).__init__(socket, io_loop=io_loop)
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/green/poll.py b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/green/poll.py
new file mode 100644
index 00000000..8f016129
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/green/poll.py
@@ -0,0 +1,95 @@
+import zmq
+import gevent
+from gevent import select
+
+from zmq import Poller as _original_Poller
+
+
+class _Poller(_original_Poller):
+ """Replacement for :class:`zmq.Poller`
+
+ Ensures that the greened Poller below is used in calls to
+ :meth:`zmq.Poller.poll`.
+ """
+ _gevent_bug_timeout = 1.33 # minimum poll interval, for working around gevent bug
+
+ def _get_descriptors(self):
+ """Returns three elements tuple with socket descriptors ready
+ for gevent.select.select
+ """
+ rlist = []
+ wlist = []
+ xlist = []
+
+ for socket, flags in self.sockets:
+ if isinstance(socket, zmq.Socket):
+ rlist.append(socket.getsockopt(zmq.FD))
+ continue
+ elif isinstance(socket, int):
+ fd = socket
+ elif hasattr(socket, 'fileno'):
+ try:
+ fd = int(socket.fileno())
+ except:
+ raise ValueError('fileno() must return an valid integer fd')
+ else:
+ raise TypeError('Socket must be a 0MQ socket, an integer fd '
+ 'or have a fileno() method: %r' % socket)
+
+ if flags & zmq.POLLIN:
+ rlist.append(fd)
+ if flags & zmq.POLLOUT:
+ wlist.append(fd)
+ if flags & zmq.POLLERR:
+ xlist.append(fd)
+
+ return (rlist, wlist, xlist)
+
+ def poll(self, timeout=-1):
+ """Overridden method to ensure that the green version of
+ Poller is used.
+
+ Behaves the same as :meth:`zmq.core.Poller.poll`
+ """
+
+ if timeout is None:
+ timeout = -1
+
+ if timeout < 0:
+ timeout = -1
+
+ rlist = None
+ wlist = None
+ xlist = None
+
+ if timeout > 0:
+ tout = gevent.Timeout.start_new(timeout/1000.0)
+
+ try:
+ # Loop until timeout or events available
+ rlist, wlist, xlist = self._get_descriptors()
+ while True:
+ events = super(_Poller, self).poll(0)
+ if events or timeout == 0:
+ return events
+
+ # wait for activity on sockets in a green way
+ # set a minimum poll frequency,
+ # because gevent < 1.0 cannot be trusted to catch edge-triggered FD events
+ _bug_timeout = gevent.Timeout.start_new(self._gevent_bug_timeout)
+ try:
+ select.select(rlist, wlist, xlist)
+ except gevent.Timeout as t:
+ if t is not _bug_timeout:
+ raise
+ finally:
+ _bug_timeout.cancel()
+
+ except gevent.Timeout as t:
+ if t is not tout:
+ raise
+ return []
+ finally:
+ if timeout > 0:
+ tout.cancel()
+
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/libzmq.so.3 b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/libzmq.so.3
new file mode 100644
index 00000000..b5e3bab2
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/libzmq.so.3
Binary files differ
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/log/__init__.py b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/log/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/log/__init__.py
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/log/handlers.py b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/log/handlers.py
new file mode 100644
index 00000000..5ff21bf3
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/log/handlers.py
@@ -0,0 +1,146 @@
+"""pyzmq logging handlers.
+
+This mainly defines the PUBHandler object for publishing logging messages over
+a zmq.PUB socket.
+
+The PUBHandler can be used with the regular logging module, as in::
+
+ >>> import logging
+ >>> handler = PUBHandler('tcp://127.0.0.1:12345')
+ >>> handler.root_topic = 'foo'
+ >>> logger = logging.getLogger('foobar')
+ >>> logger.setLevel(logging.DEBUG)
+ >>> logger.addHandler(handler)
+
+After this point, all messages logged by ``logger`` will be published on the
+PUB socket.
+
+Code adapted from StarCluster:
+
+ http://github.com/jtriley/StarCluster/blob/master/starcluster/logger.py
+"""
+
+# Copyright (C) PyZMQ Developers
+# Distributed under the terms of the Modified BSD License.
+
+
+import logging
+from logging import INFO, DEBUG, WARN, ERROR, FATAL
+
+import zmq
+from zmq.utils.strtypes import bytes, unicode, cast_bytes
+
+
+TOPIC_DELIM="::" # delimiter for splitting topics on the receiving end.
+
+
+class PUBHandler(logging.Handler):
+ """A basic logging handler that emits log messages through a PUB socket.
+
+ Takes a PUB socket already bound to interfaces or an interface to bind to.
+
+ Example::
+
+ sock = context.socket(zmq.PUB)
+ sock.bind('inproc://log')
+ handler = PUBHandler(sock)
+
+ Or::
+
+ handler = PUBHandler('inproc://loc')
+
+ These are equivalent.
+
+ Log messages handled by this handler are broadcast with ZMQ topics
+ ``this.root_topic`` comes first, followed by the log level
+ (DEBUG,INFO,etc.), followed by any additional subtopics specified in the
+ message by: log.debug("subtopic.subsub::the real message")
+ """
+ root_topic=""
+ socket = None
+
+ formatters = {
+ logging.DEBUG: logging.Formatter(
+ "%(levelname)s %(filename)s:%(lineno)d - %(message)s\n"),
+ logging.INFO: logging.Formatter("%(message)s\n"),
+ logging.WARN: logging.Formatter(
+ "%(levelname)s %(filename)s:%(lineno)d - %(message)s\n"),
+ logging.ERROR: logging.Formatter(
+ "%(levelname)s %(filename)s:%(lineno)d - %(message)s - %(exc_info)s\n"),
+ logging.CRITICAL: logging.Formatter(
+ "%(levelname)s %(filename)s:%(lineno)d - %(message)s\n")}
+
+ def __init__(self, interface_or_socket, context=None):
+ logging.Handler.__init__(self)
+ if isinstance(interface_or_socket, zmq.Socket):
+ self.socket = interface_or_socket
+ self.ctx = self.socket.context
+ else:
+ self.ctx = context or zmq.Context()
+ self.socket = self.ctx.socket(zmq.PUB)
+ self.socket.bind(interface_or_socket)
+
+ def format(self,record):
+ """Format a record."""
+ return self.formatters[record.levelno].format(record)
+
+ def emit(self, record):
+ """Emit a log message on my socket."""
+ try:
+ topic, record.msg = record.msg.split(TOPIC_DELIM,1)
+ except Exception:
+ topic = ""
+ try:
+ bmsg = cast_bytes(self.format(record))
+ except Exception:
+ self.handleError(record)
+ return
+
+ topic_list = []
+
+ if self.root_topic:
+ topic_list.append(self.root_topic)
+
+ topic_list.append(record.levelname)
+
+ if topic:
+ topic_list.append(topic)
+
+ btopic = b'.'.join(cast_bytes(t) for t in topic_list)
+
+ self.socket.send_multipart([btopic, bmsg])
+
+
+class TopicLogger(logging.Logger):
+ """A simple wrapper that takes an additional argument to log methods.
+
+ All the regular methods exist, but instead of one msg argument, two
+ arguments: topic, msg are passed.
+
+ That is::
+
+ logger.debug('msg')
+
+ Would become::
+
+ logger.debug('topic.sub', 'msg')
+ """
+ def log(self, level, topic, msg, *args, **kwargs):
+ """Log 'msg % args' with level and topic.
+
+ To pass exception information, use the keyword argument exc_info
+ with a True value::
+
+ logger.log(level, "zmq.fun", "We have a %s",
+ "mysterious problem", exc_info=1)
+ """
+ logging.Logger.log(self, level, '%s::%s'%(topic,msg), *args, **kwargs)
+
+# Generate the methods of TopicLogger, since they are just adding a
+# topic prefix to a message.
+for name in "debug warn warning error critical fatal".split():
+ meth = getattr(logging.Logger,name)
+ setattr(TopicLogger, name,
+ lambda self, level, topic, msg, *args, **kwargs:
+ meth(self, level, topic+TOPIC_DELIM+msg,*args, **kwargs))
+
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/ssh/__init__.py b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/ssh/__init__.py
new file mode 100644
index 00000000..57f09568
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/ssh/__init__.py
@@ -0,0 +1 @@
+from zmq.ssh.tunnel import *
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/ssh/forward.py b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/ssh/forward.py
new file mode 100644
index 00000000..2d619462
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/ssh/forward.py
@@ -0,0 +1,91 @@
+#
+# This file is adapted from a paramiko demo, and thus licensed under LGPL 2.1.
+# Original Copyright (C) 2003-2007 Robey Pointer <robeypointer@gmail.com>
+# Edits Copyright (C) 2010 The IPython Team
+#
+# Paramiko is free software; you can redistribute it and/or modify it under the
+# terms of the GNU Lesser General Public License as published by the Free
+# Software Foundation; either version 2.1 of the License, or (at your option)
+# any later version.
+#
+# Paramiko is distrubuted in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
+# A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
+# details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with Paramiko; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02111-1301 USA.
+
+"""
+Sample script showing how to do local port forwarding over paramiko.
+
+This script connects to the requested SSH server and sets up local port
+forwarding (the openssh -L option) from a local port through a tunneled
+connection to a destination reachable from the SSH server machine.
+"""
+
+from __future__ import print_function
+
+import logging
+import select
+try: # Python 3
+ import socketserver
+except ImportError: # Python 2
+ import SocketServer as socketserver
+
+logger = logging.getLogger('ssh')
+
+class ForwardServer (socketserver.ThreadingTCPServer):
+ daemon_threads = True
+ allow_reuse_address = True
+
+
+class Handler (socketserver.BaseRequestHandler):
+
+ def handle(self):
+ try:
+ chan = self.ssh_transport.open_channel('direct-tcpip',
+ (self.chain_host, self.chain_port),
+ self.request.getpeername())
+ except Exception as e:
+ logger.debug('Incoming request to %s:%d failed: %s' % (self.chain_host,
+ self.chain_port,
+ repr(e)))
+ return
+ if chan is None:
+ logger.debug('Incoming request to %s:%d was rejected by the SSH server.' %
+ (self.chain_host, self.chain_port))
+ return
+
+ logger.debug('Connected! Tunnel open %r -> %r -> %r' % (self.request.getpeername(),
+ chan.getpeername(), (self.chain_host, self.chain_port)))
+ while True:
+ r, w, x = select.select([self.request, chan], [], [])
+ if self.request in r:
+ data = self.request.recv(1024)
+ if len(data) == 0:
+ break
+ chan.send(data)
+ if chan in r:
+ data = chan.recv(1024)
+ if len(data) == 0:
+ break
+ self.request.send(data)
+ chan.close()
+ self.request.close()
+ logger.debug('Tunnel closed ')
+
+
+def forward_tunnel(local_port, remote_host, remote_port, transport):
+ # this is a little convoluted, but lets me configure things for the Handler
+ # object. (SocketServer doesn't give Handlers any way to access the outer
+ # server normally.)
+ class SubHander (Handler):
+ chain_host = remote_host
+ chain_port = remote_port
+ ssh_transport = transport
+ ForwardServer(('127.0.0.1', local_port), SubHander).serve_forever()
+
+
+__all__ = ['forward_tunnel']
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/ssh/tunnel.py b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/ssh/tunnel.py
new file mode 100644
index 00000000..5a0c5433
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/ssh/tunnel.py
@@ -0,0 +1,376 @@
+"""Basic ssh tunnel utilities, and convenience functions for tunneling
+zeromq connections.
+"""
+
+# Copyright (C) 2010-2011 IPython Development Team
+# Copyright (C) 2011- PyZMQ Developers
+#
+# Redistributed from IPython under the terms of the BSD License.
+
+
+from __future__ import print_function
+
+import atexit
+import os
+import signal
+import socket
+import sys
+import warnings
+from getpass import getpass, getuser
+from multiprocessing import Process
+
+try:
+ with warnings.catch_warnings():
+ warnings.simplefilter('ignore', DeprecationWarning)
+ import paramiko
+ SSHException = paramiko.ssh_exception.SSHException
+except ImportError:
+ paramiko = None
+ class SSHException(Exception):
+ pass
+else:
+ from .forward import forward_tunnel
+
+try:
+ import pexpect
+except ImportError:
+ pexpect = None
+
+
+_random_ports = set()
+
+def select_random_ports(n):
+ """Selects and return n random ports that are available."""
+ ports = []
+ for i in range(n):
+ sock = socket.socket()
+ sock.bind(('', 0))
+ while sock.getsockname()[1] in _random_ports:
+ sock.close()
+ sock = socket.socket()
+ sock.bind(('', 0))
+ ports.append(sock)
+ for i, sock in enumerate(ports):
+ port = sock.getsockname()[1]
+ sock.close()
+ ports[i] = port
+ _random_ports.add(port)
+ return ports
+
+
+#-----------------------------------------------------------------------------
+# Check for passwordless login
+#-----------------------------------------------------------------------------
+
+def try_passwordless_ssh(server, keyfile, paramiko=None):
+ """Attempt to make an ssh connection without a password.
+ This is mainly used for requiring password input only once
+ when many tunnels may be connected to the same server.
+
+ If paramiko is None, the default for the platform is chosen.
+ """
+ if paramiko is None:
+ paramiko = sys.platform == 'win32'
+ if not paramiko:
+ f = _try_passwordless_openssh
+ else:
+ f = _try_passwordless_paramiko
+ return f(server, keyfile)
+
+def _try_passwordless_openssh(server, keyfile):
+ """Try passwordless login with shell ssh command."""
+ if pexpect is None:
+ raise ImportError("pexpect unavailable, use paramiko")
+ cmd = 'ssh -f '+ server
+ if keyfile:
+ cmd += ' -i ' + keyfile
+ cmd += ' exit'
+
+ # pop SSH_ASKPASS from env
+ env = os.environ.copy()
+ env.pop('SSH_ASKPASS', None)
+
+ ssh_newkey = 'Are you sure you want to continue connecting'
+ p = pexpect.spawn(cmd, env=env)
+ while True:
+ try:
+ i = p.expect([ssh_newkey, '[Pp]assword:'], timeout=.1)
+ if i==0:
+ raise SSHException('The authenticity of the host can\'t be established.')
+ except pexpect.TIMEOUT:
+ continue
+ except pexpect.EOF:
+ return True
+ else:
+ return False
+
+def _try_passwordless_paramiko(server, keyfile):
+ """Try passwordless login with paramiko."""
+ if paramiko is None:
+ msg = "Paramiko unavaliable, "
+ if sys.platform == 'win32':
+ msg += "Paramiko is required for ssh tunneled connections on Windows."
+ else:
+ msg += "use OpenSSH."
+ raise ImportError(msg)
+ username, server, port = _split_server(server)
+ client = paramiko.SSHClient()
+ client.load_system_host_keys()
+ client.set_missing_host_key_policy(paramiko.WarningPolicy())
+ try:
+ client.connect(server, port, username=username, key_filename=keyfile,
+ look_for_keys=True)
+ except paramiko.AuthenticationException:
+ return False
+ else:
+ client.close()
+ return True
+
+
+def tunnel_connection(socket, addr, server, keyfile=None, password=None, paramiko=None, timeout=60):
+ """Connect a socket to an address via an ssh tunnel.
+
+ This is a wrapper for socket.connect(addr), when addr is not accessible
+ from the local machine. It simply creates an ssh tunnel using the remaining args,
+ and calls socket.connect('tcp://localhost:lport') where lport is the randomly
+ selected local port of the tunnel.
+
+ """
+ new_url, tunnel = open_tunnel(addr, server, keyfile=keyfile, password=password, paramiko=paramiko, timeout=timeout)
+ socket.connect(new_url)
+ return tunnel
+
+
+def open_tunnel(addr, server, keyfile=None, password=None, paramiko=None, timeout=60):
+ """Open a tunneled connection from a 0MQ url.
+
+ For use inside tunnel_connection.
+
+ Returns
+ -------
+
+ (url, tunnel) : (str, object)
+ The 0MQ url that has been forwarded, and the tunnel object
+ """
+
+ lport = select_random_ports(1)[0]
+ transport, addr = addr.split('://')
+ ip,rport = addr.split(':')
+ rport = int(rport)
+ if paramiko is None:
+ paramiko = sys.platform == 'win32'
+ if paramiko:
+ tunnelf = paramiko_tunnel
+ else:
+ tunnelf = openssh_tunnel
+
+ tunnel = tunnelf(lport, rport, server, remoteip=ip, keyfile=keyfile, password=password, timeout=timeout)
+ return 'tcp://127.0.0.1:%i'%lport, tunnel
+
+def openssh_tunnel(lport, rport, server, remoteip='127.0.0.1', keyfile=None, password=None, timeout=60):
+ """Create an ssh tunnel using command-line ssh that connects port lport
+ on this machine to localhost:rport on server. The tunnel
+ will automatically close when not in use, remaining open
+ for a minimum of timeout seconds for an initial connection.
+
+ This creates a tunnel redirecting `localhost:lport` to `remoteip:rport`,
+ as seen from `server`.
+
+ keyfile and password may be specified, but ssh config is checked for defaults.
+
+ Parameters
+ ----------
+
+ lport : int
+ local port for connecting to the tunnel from this machine.
+ rport : int
+ port on the remote machine to connect to.
+ server : str
+ The ssh server to connect to. The full ssh server string will be parsed.
+ user@server:port
+ remoteip : str [Default: 127.0.0.1]
+ The remote ip, specifying the destination of the tunnel.
+ Default is localhost, which means that the tunnel would redirect
+ localhost:lport on this machine to localhost:rport on the *server*.
+
+ keyfile : str; path to public key file
+ This specifies a key to be used in ssh login, default None.
+ Regular default ssh keys will be used without specifying this argument.
+ password : str;
+ Your ssh password to the ssh server. Note that if this is left None,
+ you will be prompted for it if passwordless key based login is unavailable.
+ timeout : int [default: 60]
+ The time (in seconds) after which no activity will result in the tunnel
+ closing. This prevents orphaned tunnels from running forever.
+ """
+ if pexpect is None:
+ raise ImportError("pexpect unavailable, use paramiko_tunnel")
+ ssh="ssh "
+ if keyfile:
+ ssh += "-i " + keyfile
+
+ if ':' in server:
+ server, port = server.split(':')
+ ssh += " -p %s" % port
+
+ cmd = "%s -O check %s" % (ssh, server)
+ (output, exitstatus) = pexpect.run(cmd, withexitstatus=True)
+ if not exitstatus:
+ pid = int(output[output.find("(pid=")+5:output.find(")")])
+ cmd = "%s -O forward -L 127.0.0.1:%i:%s:%i %s" % (
+ ssh, lport, remoteip, rport, server)
+ (output, exitstatus) = pexpect.run(cmd, withexitstatus=True)
+ if not exitstatus:
+ atexit.register(_stop_tunnel, cmd.replace("-O forward", "-O cancel", 1))
+ return pid
+ cmd = "%s -f -S none -L 127.0.0.1:%i:%s:%i %s sleep %i" % (
+ ssh, lport, remoteip, rport, server, timeout)
+
+ # pop SSH_ASKPASS from env
+ env = os.environ.copy()
+ env.pop('SSH_ASKPASS', None)
+
+ ssh_newkey = 'Are you sure you want to continue connecting'
+ tunnel = pexpect.spawn(cmd, env=env)
+ failed = False
+ while True:
+ try:
+ i = tunnel.expect([ssh_newkey, '[Pp]assword:'], timeout=.1)
+ if i==0:
+ raise SSHException('The authenticity of the host can\'t be established.')
+ except pexpect.TIMEOUT:
+ continue
+ except pexpect.EOF:
+ if tunnel.exitstatus:
+ print(tunnel.exitstatus)
+ print(tunnel.before)
+ print(tunnel.after)
+ raise RuntimeError("tunnel '%s' failed to start"%(cmd))
+ else:
+ return tunnel.pid
+ else:
+ if failed:
+ print("Password rejected, try again")
+ password=None
+ if password is None:
+ password = getpass("%s's password: "%(server))
+ tunnel.sendline(password)
+ failed = True
+
+def _stop_tunnel(cmd):
+ pexpect.run(cmd)
+
+def _split_server(server):
+ if '@' in server:
+ username,server = server.split('@', 1)
+ else:
+ username = getuser()
+ if ':' in server:
+ server, port = server.split(':')
+ port = int(port)
+ else:
+ port = 22
+ return username, server, port
+
+def paramiko_tunnel(lport, rport, server, remoteip='127.0.0.1', keyfile=None, password=None, timeout=60):
+ """launch a tunner with paramiko in a subprocess. This should only be used
+ when shell ssh is unavailable (e.g. Windows).
+
+ This creates a tunnel redirecting `localhost:lport` to `remoteip:rport`,
+ as seen from `server`.
+
+ If you are familiar with ssh tunnels, this creates the tunnel:
+
+ ssh server -L localhost:lport:remoteip:rport
+
+ keyfile and password may be specified, but ssh config is checked for defaults.
+
+
+ Parameters
+ ----------
+
+ lport : int
+ local port for connecting to the tunnel from this machine.
+ rport : int
+ port on the remote machine to connect to.
+ server : str
+ The ssh server to connect to. The full ssh server string will be parsed.
+ user@server:port
+ remoteip : str [Default: 127.0.0.1]
+ The remote ip, specifying the destination of the tunnel.
+ Default is localhost, which means that the tunnel would redirect
+ localhost:lport on this machine to localhost:rport on the *server*.
+
+ keyfile : str; path to public key file
+ This specifies a key to be used in ssh login, default None.
+ Regular default ssh keys will be used without specifying this argument.
+ password : str;
+ Your ssh password to the ssh server. Note that if this is left None,
+ you will be prompted for it if passwordless key based login is unavailable.
+ timeout : int [default: 60]
+ The time (in seconds) after which no activity will result in the tunnel
+ closing. This prevents orphaned tunnels from running forever.
+
+ """
+ if paramiko is None:
+ raise ImportError("Paramiko not available")
+
+ if password is None:
+ if not _try_passwordless_paramiko(server, keyfile):
+ password = getpass("%s's password: "%(server))
+
+ p = Process(target=_paramiko_tunnel,
+ args=(lport, rport, server, remoteip),
+ kwargs=dict(keyfile=keyfile, password=password))
+ p.daemon=False
+ p.start()
+ atexit.register(_shutdown_process, p)
+ return p
+
+def _shutdown_process(p):
+ if p.is_alive():
+ p.terminate()
+
+def _paramiko_tunnel(lport, rport, server, remoteip, keyfile=None, password=None):
+ """Function for actually starting a paramiko tunnel, to be passed
+ to multiprocessing.Process(target=this), and not called directly.
+ """
+ username, server, port = _split_server(server)
+ client = paramiko.SSHClient()
+ client.load_system_host_keys()
+ client.set_missing_host_key_policy(paramiko.WarningPolicy())
+
+ try:
+ client.connect(server, port, username=username, key_filename=keyfile,
+ look_for_keys=True, password=password)
+# except paramiko.AuthenticationException:
+# if password is None:
+# password = getpass("%s@%s's password: "%(username, server))
+# client.connect(server, port, username=username, password=password)
+# else:
+# raise
+ except Exception as e:
+ print('*** Failed to connect to %s:%d: %r' % (server, port, e))
+ sys.exit(1)
+
+ # Don't let SIGINT kill the tunnel subprocess
+ signal.signal(signal.SIGINT, signal.SIG_IGN)
+
+ try:
+ forward_tunnel(lport, remoteip, rport, client.get_transport())
+ except KeyboardInterrupt:
+ print('SIGINT: Port forwarding stopped cleanly')
+ sys.exit(0)
+ except Exception as e:
+ print("Port forwarding stopped uncleanly: %s"%e)
+ sys.exit(255)
+
+if sys.platform == 'win32':
+ ssh_tunnel = paramiko_tunnel
+else:
+ ssh_tunnel = openssh_tunnel
+
+
+__all__ = ['tunnel_connection', 'ssh_tunnel', 'openssh_tunnel', 'paramiko_tunnel', 'try_passwordless_ssh']
+
+
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/sugar/__init__.py b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/sugar/__init__.py
new file mode 100644
index 00000000..d0510a44
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/sugar/__init__.py
@@ -0,0 +1,27 @@
+"""pure-Python sugar wrappers for core 0MQ objects."""
+
+# Copyright (C) PyZMQ Developers
+# Distributed under the terms of the Modified BSD License.
+
+
+from zmq.sugar import (
+ constants, context, frame, poll, socket, tracker, version
+)
+from zmq import error
+
+__all__ = ['constants']
+for submod in (
+ constants, context, error, frame, poll, socket, tracker, version
+):
+ __all__.extend(submod.__all__)
+
+from zmq.error import *
+from zmq.sugar.context import *
+from zmq.sugar.tracker import *
+from zmq.sugar.socket import *
+from zmq.sugar.constants import *
+from zmq.sugar.frame import *
+from zmq.sugar.poll import *
+# from zmq.sugar.stopwatch import *
+# from zmq.sugar._device import *
+from zmq.sugar.version import *
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/sugar/attrsettr.py b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/sugar/attrsettr.py
new file mode 100644
index 00000000..4bbd36d6
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/sugar/attrsettr.py
@@ -0,0 +1,52 @@
+# coding: utf-8
+"""Mixin for mapping set/getattr to self.set/get"""
+
+# Copyright (C) PyZMQ Developers
+# Distributed under the terms of the Modified BSD License.
+
+
+from . import constants
+
+class AttributeSetter(object):
+
+ def __setattr__(self, key, value):
+ """set zmq options by attribute"""
+
+ # regular setattr only allowed for class-defined attributes
+ for obj in [self] + self.__class__.mro():
+ if key in obj.__dict__:
+ object.__setattr__(self, key, value)
+ return
+
+ upper_key = key.upper()
+ try:
+ opt = getattr(constants, upper_key)
+ except AttributeError:
+ raise AttributeError("%s has no such option: %s" % (
+ self.__class__.__name__, upper_key)
+ )
+ else:
+ self._set_attr_opt(upper_key, opt, value)
+
+ def _set_attr_opt(self, name, opt, value):
+ """override if setattr should do something other than call self.set"""
+ self.set(opt, value)
+
+ def __getattr__(self, key):
+ """get zmq options by attribute"""
+ upper_key = key.upper()
+ try:
+ opt = getattr(constants, upper_key)
+ except AttributeError:
+ raise AttributeError("%s has no such option: %s" % (
+ self.__class__.__name__, upper_key)
+ )
+ else:
+ return self._get_attr_opt(upper_key, opt)
+
+ def _get_attr_opt(self, name, opt):
+ """override if getattr should do something other than call self.get"""
+ return self.get(opt)
+
+
+__all__ = ['AttributeSetter']
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/sugar/constants.py b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/sugar/constants.py
new file mode 100644
index 00000000..88281176
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/sugar/constants.py
@@ -0,0 +1,98 @@
+"""0MQ Constants."""
+
+# Copyright (c) PyZMQ Developers.
+# Distributed under the terms of the Modified BSD License.
+
+from zmq.backend import constants
+from zmq.utils.constant_names import (
+ base_names,
+ switched_sockopt_names,
+ int_sockopt_names,
+ int64_sockopt_names,
+ bytes_sockopt_names,
+ fd_sockopt_names,
+ ctx_opt_names,
+ msg_opt_names,
+)
+
+#-----------------------------------------------------------------------------
+# Python module level constants
+#-----------------------------------------------------------------------------
+
+__all__ = [
+ 'int_sockopts',
+ 'int64_sockopts',
+ 'bytes_sockopts',
+ 'ctx_opts',
+ 'ctx_opt_names',
+ ]
+
+int_sockopts = set()
+int64_sockopts = set()
+bytes_sockopts = set()
+fd_sockopts = set()
+ctx_opts = set()
+msg_opts = set()
+
+
+if constants.VERSION < 30000:
+ int64_sockopt_names.extend(switched_sockopt_names)
+else:
+ int_sockopt_names.extend(switched_sockopt_names)
+
+_UNDEFINED = -9999
+
+def _add_constant(name, container=None):
+ """add a constant to be defined
+
+ optionally add it to one of the sets for use in get/setopt checkers
+ """
+ c = getattr(constants, name, _UNDEFINED)
+ if c == _UNDEFINED:
+ return
+ globals()[name] = c
+ __all__.append(name)
+ if container is not None:
+ container.add(c)
+ return c
+
+for name in base_names:
+ _add_constant(name)
+
+for name in int_sockopt_names:
+ _add_constant(name, int_sockopts)
+
+for name in int64_sockopt_names:
+ _add_constant(name, int64_sockopts)
+
+for name in bytes_sockopt_names:
+ _add_constant(name, bytes_sockopts)
+
+for name in fd_sockopt_names:
+ _add_constant(name, fd_sockopts)
+
+for name in ctx_opt_names:
+ _add_constant(name, ctx_opts)
+
+for name in msg_opt_names:
+ _add_constant(name, msg_opts)
+
+# ensure some aliases are always defined
+aliases = [
+ ('DONTWAIT', 'NOBLOCK'),
+ ('XREQ', 'DEALER'),
+ ('XREP', 'ROUTER'),
+]
+for group in aliases:
+ undefined = set()
+ found = None
+ for name in group:
+ value = getattr(constants, name, -1)
+ if value != -1:
+ found = value
+ else:
+ undefined.add(name)
+ if found is not None:
+ for name in undefined:
+ globals()[name] = found
+ __all__.append(name)
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/sugar/context.py b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/sugar/context.py
new file mode 100644
index 00000000..86a9c5dc
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/sugar/context.py
@@ -0,0 +1,192 @@
+# coding: utf-8
+"""Python bindings for 0MQ."""
+
+# Copyright (C) PyZMQ Developers
+# Distributed under the terms of the Modified BSD License.
+
+import atexit
+import weakref
+
+from zmq.backend import Context as ContextBase
+from . import constants
+from .attrsettr import AttributeSetter
+from .constants import ENOTSUP, ctx_opt_names
+from .socket import Socket
+from zmq.error import ZMQError
+
+from zmq.utils.interop import cast_int_addr
+
+
+class Context(ContextBase, AttributeSetter):
+ """Create a zmq Context
+
+ A zmq Context creates sockets via its ``ctx.socket`` method.
+ """
+ sockopts = None
+ _instance = None
+ _shadow = False
+ _exiting = False
+
+ def __init__(self, io_threads=1, **kwargs):
+ super(Context, self).__init__(io_threads=io_threads, **kwargs)
+ if kwargs.get('shadow', False):
+ self._shadow = True
+ else:
+ self._shadow = False
+ self.sockopts = {}
+
+ self._exiting = False
+ if not self._shadow:
+ ctx_ref = weakref.ref(self)
+ def _notify_atexit():
+ ctx = ctx_ref()
+ if ctx is not None:
+ ctx._exiting = True
+ atexit.register(_notify_atexit)
+
+ def __del__(self):
+ """deleting a Context should terminate it, without trying non-threadsafe destroy"""
+ if not self._shadow and not self._exiting:
+ self.term()
+
+ def __enter__(self):
+ return self
+
+ def __exit__(self, *args, **kwargs):
+ self.term()
+
+ @classmethod
+ def shadow(cls, address):
+ """Shadow an existing libzmq context
+
+ address is the integer address of the libzmq context
+ or an FFI pointer to it.
+
+ .. versionadded:: 14.1
+ """
+ address = cast_int_addr(address)
+ return cls(shadow=address)
+
+ @classmethod
+ def shadow_pyczmq(cls, ctx):
+ """Shadow an existing pyczmq context
+
+ ctx is the FFI `zctx_t *` pointer
+
+ .. versionadded:: 14.1
+ """
+ from pyczmq import zctx
+
+ underlying = zctx.underlying(ctx)
+ address = cast_int_addr(underlying)
+ return cls(shadow=address)
+
+ # static method copied from tornado IOLoop.instance
+ @classmethod
+ def instance(cls, io_threads=1):
+ """Returns a global Context instance.
+
+ Most single-threaded applications have a single, global Context.
+ Use this method instead of passing around Context instances
+ throughout your code.
+
+ A common pattern for classes that depend on Contexts is to use
+ a default argument to enable programs with multiple Contexts
+ but not require the argument for simpler applications:
+
+ class MyClass(object):
+ def __init__(self, context=None):
+ self.context = context or Context.instance()
+ """
+ if cls._instance is None or cls._instance.closed:
+ cls._instance = cls(io_threads=io_threads)
+ return cls._instance
+
+ #-------------------------------------------------------------------------
+ # Hooks for ctxopt completion
+ #-------------------------------------------------------------------------
+
+ def __dir__(self):
+ keys = dir(self.__class__)
+
+ for collection in (
+ ctx_opt_names,
+ ):
+ keys.extend(collection)
+ return keys
+
+ #-------------------------------------------------------------------------
+ # Creating Sockets
+ #-------------------------------------------------------------------------
+
+ @property
+ def _socket_class(self):
+ return Socket
+
+ def socket(self, socket_type):
+ """Create a Socket associated with this Context.
+
+ Parameters
+ ----------
+ socket_type : int
+ The socket type, which can be any of the 0MQ socket types:
+ REQ, REP, PUB, SUB, PAIR, DEALER, ROUTER, PULL, PUSH, etc.
+ """
+ if self.closed:
+ raise ZMQError(ENOTSUP)
+ s = self._socket_class(self, socket_type)
+ for opt, value in self.sockopts.items():
+ try:
+ s.setsockopt(opt, value)
+ except ZMQError:
+ # ignore ZMQErrors, which are likely for socket options
+ # that do not apply to a particular socket type, e.g.
+ # SUBSCRIBE for non-SUB sockets.
+ pass
+ return s
+
+ def setsockopt(self, opt, value):
+ """set default socket options for new sockets created by this Context
+
+ .. versionadded:: 13.0
+ """
+ self.sockopts[opt] = value
+
+ def getsockopt(self, opt):
+ """get default socket options for new sockets created by this Context
+
+ .. versionadded:: 13.0
+ """
+ return self.sockopts[opt]
+
+ def _set_attr_opt(self, name, opt, value):
+ """set default sockopts as attributes"""
+ if name in constants.ctx_opt_names:
+ return self.set(opt, value)
+ else:
+ self.sockopts[opt] = value
+
+ def _get_attr_opt(self, name, opt):
+ """get default sockopts as attributes"""
+ if name in constants.ctx_opt_names:
+ return self.get(opt)
+ else:
+ if opt not in self.sockopts:
+ raise AttributeError(name)
+ else:
+ return self.sockopts[opt]
+
+ def __delattr__(self, key):
+ """delete default sockopts as attributes"""
+ key = key.upper()
+ try:
+ opt = getattr(constants, key)
+ except AttributeError:
+ raise AttributeError("no such socket option: %s" % key)
+ else:
+ if opt not in self.sockopts:
+ raise AttributeError(key)
+ else:
+ del self.sockopts[opt]
+
+__all__ = ['Context']
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/sugar/frame.py b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/sugar/frame.py
new file mode 100644
index 00000000..9f556c86
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/sugar/frame.py
@@ -0,0 +1,19 @@
+# coding: utf-8
+"""0MQ Frame pure Python methods."""
+
+# Copyright (C) PyZMQ Developers
+# Distributed under the terms of the Modified BSD License.
+
+
+from .attrsettr import AttributeSetter
+from zmq.backend import Frame as FrameBase
+
+
+class Frame(FrameBase, AttributeSetter):
+ def __getitem__(self, key):
+ # map Frame['User-Id'] to Frame.get('User-Id')
+ return self.get(key)
+
+# keep deprecated alias
+Message = Frame
+__all__ = ['Frame', 'Message'] \ No newline at end of file
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/sugar/poll.py b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/sugar/poll.py
new file mode 100644
index 00000000..c7b1d1bb
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/sugar/poll.py
@@ -0,0 +1,161 @@
+"""0MQ polling related functions and classes."""
+
+# Copyright (C) PyZMQ Developers
+# Distributed under the terms of the Modified BSD License.
+
+
+import zmq
+from zmq.backend import zmq_poll
+from .constants import POLLIN, POLLOUT, POLLERR
+
+#-----------------------------------------------------------------------------
+# Polling related methods
+#-----------------------------------------------------------------------------
+
+
+class Poller(object):
+ """A stateful poll interface that mirrors Python's built-in poll."""
+ sockets = None
+ _map = {}
+
+ def __init__(self):
+ self.sockets = []
+ self._map = {}
+
+ def __contains__(self, socket):
+ return socket in self._map
+
+ def register(self, socket, flags=POLLIN|POLLOUT):
+ """p.register(socket, flags=POLLIN|POLLOUT)
+
+ Register a 0MQ socket or native fd for I/O monitoring.
+
+ register(s,0) is equivalent to unregister(s).
+
+ Parameters
+ ----------
+ socket : zmq.Socket or native socket
+ A zmq.Socket or any Python object having a ``fileno()``
+ method that returns a valid file descriptor.
+ flags : int
+ The events to watch for. Can be POLLIN, POLLOUT or POLLIN|POLLOUT.
+ If `flags=0`, socket will be unregistered.
+ """
+ if flags:
+ if socket in self._map:
+ idx = self._map[socket]
+ self.sockets[idx] = (socket, flags)
+ else:
+ idx = len(self.sockets)
+ self.sockets.append((socket, flags))
+ self._map[socket] = idx
+ elif socket in self._map:
+ # uregister sockets registered with no events
+ self.unregister(socket)
+ else:
+ # ignore new sockets with no events
+ pass
+
+ def modify(self, socket, flags=POLLIN|POLLOUT):
+ """Modify the flags for an already registered 0MQ socket or native fd."""
+ self.register(socket, flags)
+
+ def unregister(self, socket):
+ """Remove a 0MQ socket or native fd for I/O monitoring.
+
+ Parameters
+ ----------
+ socket : Socket
+ The socket instance to stop polling.
+ """
+ idx = self._map.pop(socket)
+ self.sockets.pop(idx)
+ # shift indices after deletion
+ for socket, flags in self.sockets[idx:]:
+ self._map[socket] -= 1
+
+ def poll(self, timeout=None):
+ """Poll the registered 0MQ or native fds for I/O.
+
+ Parameters
+ ----------
+ timeout : float, int
+ The timeout in milliseconds. If None, no `timeout` (infinite). This
+ is in milliseconds to be compatible with ``select.poll()``. The
+ underlying zmq_poll uses microseconds and we convert to that in
+ this function.
+
+ Returns
+ -------
+ events : list of tuples
+ The list of events that are ready to be processed.
+ This is a list of tuples of the form ``(socket, event)``, where the 0MQ Socket
+ or integer fd is the first element, and the poll event mask (POLLIN, POLLOUT) is the second.
+ It is common to call ``events = dict(poller.poll())``,
+ which turns the list of tuples into a mapping of ``socket : event``.
+ """
+ if timeout is None or timeout < 0:
+ timeout = -1
+ elif isinstance(timeout, float):
+ timeout = int(timeout)
+ return zmq_poll(self.sockets, timeout=timeout)
+
+
+def select(rlist, wlist, xlist, timeout=None):
+ """select(rlist, wlist, xlist, timeout=None) -> (rlist, wlist, xlist)
+
+ Return the result of poll as a lists of sockets ready for r/w/exception.
+
+ This has the same interface as Python's built-in ``select.select()`` function.
+
+ Parameters
+ ----------
+ timeout : float, int, optional
+ The timeout in seconds. If None, no timeout (infinite). This is in seconds to be
+ compatible with ``select.select()``. The underlying zmq_poll uses microseconds
+ and we convert to that in this function.
+ rlist : list of sockets/FDs
+ sockets/FDs to be polled for read events
+ wlist : list of sockets/FDs
+ sockets/FDs to be polled for write events
+ xlist : list of sockets/FDs
+ sockets/FDs to be polled for error events
+
+ Returns
+ -------
+ (rlist, wlist, xlist) : tuple of lists of sockets (length 3)
+ Lists correspond to sockets available for read/write/error events respectively.
+ """
+ if timeout is None:
+ timeout = -1
+ # Convert from sec -> us for zmq_poll.
+ # zmq_poll accepts 3.x style timeout in ms
+ timeout = int(timeout*1000.0)
+ if timeout < 0:
+ timeout = -1
+ sockets = []
+ for s in set(rlist + wlist + xlist):
+ flags = 0
+ if s in rlist:
+ flags |= POLLIN
+ if s in wlist:
+ flags |= POLLOUT
+ if s in xlist:
+ flags |= POLLERR
+ sockets.append((s, flags))
+ return_sockets = zmq_poll(sockets, timeout)
+ rlist, wlist, xlist = [], [], []
+ for s, flags in return_sockets:
+ if flags & POLLIN:
+ rlist.append(s)
+ if flags & POLLOUT:
+ wlist.append(s)
+ if flags & POLLERR:
+ xlist.append(s)
+ return rlist, wlist, xlist
+
+#-----------------------------------------------------------------------------
+# Symbols to export
+#-----------------------------------------------------------------------------
+
+__all__ = [ 'Poller', 'select' ]
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/sugar/socket.py b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/sugar/socket.py
new file mode 100644
index 00000000..c91589d7
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/sugar/socket.py
@@ -0,0 +1,495 @@
+# coding: utf-8
+"""0MQ Socket pure Python methods."""
+
+# Copyright (C) PyZMQ Developers
+# Distributed under the terms of the Modified BSD License.
+
+
+import codecs
+import random
+import warnings
+
+import zmq
+from zmq.backend import Socket as SocketBase
+from .poll import Poller
+from . import constants
+from .attrsettr import AttributeSetter
+from zmq.error import ZMQError, ZMQBindError
+from zmq.utils import jsonapi
+from zmq.utils.strtypes import bytes,unicode,basestring
+from zmq.utils.interop import cast_int_addr
+
+from .constants import (
+ SNDMORE, ENOTSUP, POLLIN,
+ int64_sockopt_names,
+ int_sockopt_names,
+ bytes_sockopt_names,
+ fd_sockopt_names,
+)
+try:
+ import cPickle
+ pickle = cPickle
+except:
+ cPickle = None
+ import pickle
+
+try:
+ DEFAULT_PROTOCOL = pickle.DEFAULT_PROTOCOL
+except AttributeError:
+ DEFAULT_PROTOCOL = pickle.HIGHEST_PROTOCOL
+
+
+class Socket(SocketBase, AttributeSetter):
+ """The ZMQ socket object
+
+ To create a Socket, first create a Context::
+
+ ctx = zmq.Context.instance()
+
+ then call ``ctx.socket(socket_type)``::
+
+ s = ctx.socket(zmq.ROUTER)
+
+ """
+ _shadow = False
+
+ def __del__(self):
+ if not self._shadow:
+ self.close()
+
+ # socket as context manager:
+ def __enter__(self):
+ """Sockets are context managers
+
+ .. versionadded:: 14.4
+ """
+ return self
+
+ def __exit__(self, *args, **kwargs):
+ self.close()
+
+ #-------------------------------------------------------------------------
+ # Socket creation
+ #-------------------------------------------------------------------------
+
+ @classmethod
+ def shadow(cls, address):
+ """Shadow an existing libzmq socket
+
+ address is the integer address of the libzmq socket
+ or an FFI pointer to it.
+
+ .. versionadded:: 14.1
+ """
+ address = cast_int_addr(address)
+ return cls(shadow=address)
+
+ #-------------------------------------------------------------------------
+ # Deprecated aliases
+ #-------------------------------------------------------------------------
+
+ @property
+ def socket_type(self):
+ warnings.warn("Socket.socket_type is deprecated, use Socket.type",
+ DeprecationWarning
+ )
+ return self.type
+
+ #-------------------------------------------------------------------------
+ # Hooks for sockopt completion
+ #-------------------------------------------------------------------------
+
+ def __dir__(self):
+ keys = dir(self.__class__)
+ for collection in (
+ bytes_sockopt_names,
+ int_sockopt_names,
+ int64_sockopt_names,
+ fd_sockopt_names,
+ ):
+ keys.extend(collection)
+ return keys
+
+ #-------------------------------------------------------------------------
+ # Getting/Setting options
+ #-------------------------------------------------------------------------
+ setsockopt = SocketBase.set
+ getsockopt = SocketBase.get
+
+ def set_string(self, option, optval, encoding='utf-8'):
+ """set socket options with a unicode object
+
+ This is simply a wrapper for setsockopt to protect from encoding ambiguity.
+
+ See the 0MQ documentation for details on specific options.
+
+ Parameters
+ ----------
+ option : int
+ The name of the option to set. Can be any of: SUBSCRIBE,
+ UNSUBSCRIBE, IDENTITY
+ optval : unicode string (unicode on py2, str on py3)
+ The value of the option to set.
+ encoding : str
+ The encoding to be used, default is utf8
+ """
+ if not isinstance(optval, unicode):
+ raise TypeError("unicode strings only")
+ return self.set(option, optval.encode(encoding))
+
+ setsockopt_unicode = setsockopt_string = set_string
+
+ def get_string(self, option, encoding='utf-8'):
+ """get the value of a socket option
+
+ See the 0MQ documentation for details on specific options.
+
+ Parameters
+ ----------
+ option : int
+ The option to retrieve.
+
+ Returns
+ -------
+ optval : unicode string (unicode on py2, str on py3)
+ The value of the option as a unicode string.
+ """
+
+ if option not in constants.bytes_sockopts:
+ raise TypeError("option %i will not return a string to be decoded"%option)
+ return self.getsockopt(option).decode(encoding)
+
+ getsockopt_unicode = getsockopt_string = get_string
+
+ def bind_to_random_port(self, addr, min_port=49152, max_port=65536, max_tries=100):
+ """bind this socket to a random port in a range
+
+ Parameters
+ ----------
+ addr : str
+ The address string without the port to pass to ``Socket.bind()``.
+ min_port : int, optional
+ The minimum port in the range of ports to try (inclusive).
+ max_port : int, optional
+ The maximum port in the range of ports to try (exclusive).
+ max_tries : int, optional
+ The maximum number of bind attempts to make.
+
+ Returns
+ -------
+ port : int
+ The port the socket was bound to.
+
+ Raises
+ ------
+ ZMQBindError
+ if `max_tries` reached before successful bind
+ """
+ for i in range(max_tries):
+ try:
+ port = random.randrange(min_port, max_port)
+ self.bind('%s:%s' % (addr, port))
+ except ZMQError as exception:
+ if not exception.errno == zmq.EADDRINUSE:
+ raise
+ else:
+ return port
+ raise ZMQBindError("Could not bind socket to random port.")
+
+ def get_hwm(self):
+ """get the High Water Mark
+
+ On libzmq ≥ 3, this gets SNDHWM if available, otherwise RCVHWM
+ """
+ major = zmq.zmq_version_info()[0]
+ if major >= 3:
+ # return sndhwm, fallback on rcvhwm
+ try:
+ return self.getsockopt(zmq.SNDHWM)
+ except zmq.ZMQError as e:
+ pass
+
+ return self.getsockopt(zmq.RCVHWM)
+ else:
+ return self.getsockopt(zmq.HWM)
+
+ def set_hwm(self, value):
+ """set the High Water Mark
+
+ On libzmq ≥ 3, this sets both SNDHWM and RCVHWM
+ """
+ major = zmq.zmq_version_info()[0]
+ if major >= 3:
+ raised = None
+ try:
+ self.sndhwm = value
+ except Exception as e:
+ raised = e
+ try:
+ self.rcvhwm = value
+ except Exception:
+ raised = e
+
+ if raised:
+ raise raised
+ else:
+ return self.setsockopt(zmq.HWM, value)
+
+ hwm = property(get_hwm, set_hwm,
+ """property for High Water Mark
+
+ Setting hwm sets both SNDHWM and RCVHWM as appropriate.
+ It gets SNDHWM if available, otherwise RCVHWM.
+ """
+ )
+
+ #-------------------------------------------------------------------------
+ # Sending and receiving messages
+ #-------------------------------------------------------------------------
+
+ def send_multipart(self, msg_parts, flags=0, copy=True, track=False):
+ """send a sequence of buffers as a multipart message
+
+ The zmq.SNDMORE flag is added to all msg parts before the last.
+
+ Parameters
+ ----------
+ msg_parts : iterable
+ A sequence of objects to send as a multipart message. Each element
+ can be any sendable object (Frame, bytes, buffer-providers)
+ flags : int, optional
+ SNDMORE is handled automatically for frames before the last.
+ copy : bool, optional
+ Should the frame(s) be sent in a copying or non-copying manner.
+ track : bool, optional
+ Should the frame(s) be tracked for notification that ZMQ has
+ finished with it (ignored if copy=True).
+
+ Returns
+ -------
+ None : if copy or not track
+ MessageTracker : if track and not copy
+ a MessageTracker object, whose `pending` property will
+ be True until the last send is completed.
+ """
+ for msg in msg_parts[:-1]:
+ self.send(msg, SNDMORE|flags, copy=copy, track=track)
+ # Send the last part without the extra SNDMORE flag.
+ return self.send(msg_parts[-1], flags, copy=copy, track=track)
+
+ def recv_multipart(self, flags=0, copy=True, track=False):
+ """receive a multipart message as a list of bytes or Frame objects
+
+ Parameters
+ ----------
+ flags : int, optional
+ Any supported flag: NOBLOCK. If NOBLOCK is set, this method
+ will raise a ZMQError with EAGAIN if a message is not ready.
+ If NOBLOCK is not set, then this method will block until a
+ message arrives.
+ copy : bool, optional
+ Should the message frame(s) be received in a copying or non-copying manner?
+ If False a Frame object is returned for each part, if True a copy of
+ the bytes is made for each frame.
+ track : bool, optional
+ Should the message frame(s) be tracked for notification that ZMQ has
+ finished with it? (ignored if copy=True)
+
+ Returns
+ -------
+ msg_parts : list
+ A list of frames in the multipart message; either Frames or bytes,
+ depending on `copy`.
+
+ """
+ parts = [self.recv(flags, copy=copy, track=track)]
+ # have first part already, only loop while more to receive
+ while self.getsockopt(zmq.RCVMORE):
+ part = self.recv(flags, copy=copy, track=track)
+ parts.append(part)
+
+ return parts
+
+ def send_string(self, u, flags=0, copy=True, encoding='utf-8'):
+ """send a Python unicode string as a message with an encoding
+
+ 0MQ communicates with raw bytes, so you must encode/decode
+ text (unicode on py2, str on py3) around 0MQ.
+
+ Parameters
+ ----------
+ u : Python unicode string (unicode on py2, str on py3)
+ The unicode string to send.
+ flags : int, optional
+ Any valid send flag.
+ encoding : str [default: 'utf-8']
+ The encoding to be used
+ """
+ if not isinstance(u, basestring):
+ raise TypeError("unicode/str objects only")
+ return self.send(u.encode(encoding), flags=flags, copy=copy)
+
+ send_unicode = send_string
+
+ def recv_string(self, flags=0, encoding='utf-8'):
+ """receive a unicode string, as sent by send_string
+
+ Parameters
+ ----------
+ flags : int
+ Any valid recv flag.
+ encoding : str [default: 'utf-8']
+ The encoding to be used
+
+ Returns
+ -------
+ s : unicode string (unicode on py2, str on py3)
+ The Python unicode string that arrives as encoded bytes.
+ """
+ b = self.recv(flags=flags)
+ return b.decode(encoding)
+
+ recv_unicode = recv_string
+
+ def send_pyobj(self, obj, flags=0, protocol=DEFAULT_PROTOCOL):
+ """send a Python object as a message using pickle to serialize
+
+ Parameters
+ ----------
+ obj : Python object
+ The Python object to send.
+ flags : int
+ Any valid send flag.
+ protocol : int
+ The pickle protocol number to use. The default is pickle.DEFAULT_PROTOCOl
+ where defined, and pickle.HIGHEST_PROTOCOL elsewhere.
+ """
+ msg = pickle.dumps(obj, protocol)
+ return self.send(msg, flags)
+
+ def recv_pyobj(self, flags=0):
+ """receive a Python object as a message using pickle to serialize
+
+ Parameters
+ ----------
+ flags : int
+ Any valid recv flag.
+
+ Returns
+ -------
+ obj : Python object
+ The Python object that arrives as a message.
+ """
+ s = self.recv(flags)
+ return pickle.loads(s)
+
+ def send_json(self, obj, flags=0, **kwargs):
+ """send a Python object as a message using json to serialize
+
+ Keyword arguments are passed on to json.dumps
+
+ Parameters
+ ----------
+ obj : Python object
+ The Python object to send
+ flags : int
+ Any valid send flag
+ """
+ msg = jsonapi.dumps(obj, **kwargs)
+ return self.send(msg, flags)
+
+ def recv_json(self, flags=0, **kwargs):
+ """receive a Python object as a message using json to serialize
+
+ Keyword arguments are passed on to json.loads
+
+ Parameters
+ ----------
+ flags : int
+ Any valid recv flag.
+
+ Returns
+ -------
+ obj : Python object
+ The Python object that arrives as a message.
+ """
+ msg = self.recv(flags)
+ return jsonapi.loads(msg, **kwargs)
+
+ _poller_class = Poller
+
+ def poll(self, timeout=None, flags=POLLIN):
+ """poll the socket for events
+
+ The default is to poll forever for incoming
+ events. Timeout is in milliseconds, if specified.
+
+ Parameters
+ ----------
+ timeout : int [default: None]
+ The timeout (in milliseconds) to wait for an event. If unspecified
+ (or specified None), will wait forever for an event.
+ flags : bitfield (int) [default: POLLIN]
+ The event flags to poll for (any combination of POLLIN|POLLOUT).
+ The default is to check for incoming events (POLLIN).
+
+ Returns
+ -------
+ events : bitfield (int)
+ The events that are ready and waiting. Will be 0 if no events were ready
+ by the time timeout was reached.
+ """
+
+ if self.closed:
+ raise ZMQError(ENOTSUP)
+
+ p = self._poller_class()
+ p.register(self, flags)
+ evts = dict(p.poll(timeout))
+ # return 0 if no events, otherwise return event bitfield
+ return evts.get(self, 0)
+
+ def get_monitor_socket(self, events=None, addr=None):
+ """Return a connected PAIR socket ready to receive the event notifications.
+
+ .. versionadded:: libzmq-4.0
+ .. versionadded:: 14.0
+
+ Parameters
+ ----------
+ events : bitfield (int) [default: ZMQ_EVENTS_ALL]
+ The bitmask defining which events are wanted.
+ addr : string [default: None]
+ The optional endpoint for the monitoring sockets.
+
+ Returns
+ -------
+ socket : (PAIR)
+ The socket is already connected and ready to receive messages.
+ """
+ # safe-guard, method only available on libzmq >= 4
+ if zmq.zmq_version_info() < (4,):
+ raise NotImplementedError("get_monitor_socket requires libzmq >= 4, have %s" % zmq.zmq_version())
+ if addr is None:
+ # create endpoint name from internal fd
+ addr = "inproc://monitor.s-%d" % self.FD
+ if events is None:
+ # use all events
+ events = zmq.EVENT_ALL
+ # attach monitoring socket
+ self.monitor(addr, events)
+ # create new PAIR socket and connect it
+ ret = self.context.socket(zmq.PAIR)
+ ret.connect(addr)
+ return ret
+
+ def disable_monitor(self):
+ """Shutdown the PAIR socket (created using get_monitor_socket)
+ that is serving socket events.
+
+ .. versionadded:: 14.4
+ """
+ self.monitor(None, 0)
+
+
+__all__ = ['Socket']
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/sugar/tracker.py b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/sugar/tracker.py
new file mode 100644
index 00000000..fb8c007f
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/sugar/tracker.py
@@ -0,0 +1,120 @@
+"""Tracker for zero-copy messages with 0MQ."""
+
+# Copyright (C) PyZMQ Developers
+# Distributed under the terms of the Modified BSD License.
+
+import time
+
+try:
+ # below 3.3
+ from threading import _Event as Event
+except (ImportError, AttributeError):
+ # python throws ImportError, cython throws AttributeError
+ from threading import Event
+
+from zmq.error import NotDone
+from zmq.backend import Frame
+
+class MessageTracker(object):
+ """MessageTracker(*towatch)
+
+ A class for tracking if 0MQ is done using one or more messages.
+
+ When you send a 0MQ message, it is not sent immediately. The 0MQ IO thread
+ sends the message at some later time. Often you want to know when 0MQ has
+ actually sent the message though. This is complicated by the fact that
+ a single 0MQ message can be sent multiple times using different sockets.
+ This class allows you to track all of the 0MQ usages of a message.
+
+ Parameters
+ ----------
+ *towatch : tuple of Event, MessageTracker, Message instances.
+ This list of objects to track. This class can track the low-level
+ Events used by the Message class, other MessageTrackers or
+ actual Messages.
+ """
+ events = None
+ peers = None
+
+ def __init__(self, *towatch):
+ """MessageTracker(*towatch)
+
+ Create a message tracker to track a set of mesages.
+
+ Parameters
+ ----------
+ *towatch : tuple of Event, MessageTracker, Message instances.
+ This list of objects to track. This class can track the low-level
+ Events used by the Message class, other MessageTrackers or
+ actual Messages.
+ """
+ self.events = set()
+ self.peers = set()
+ for obj in towatch:
+ if isinstance(obj, Event):
+ self.events.add(obj)
+ elif isinstance(obj, MessageTracker):
+ self.peers.add(obj)
+ elif isinstance(obj, Frame):
+ if not obj.tracker:
+ raise ValueError("Not a tracked message")
+ self.peers.add(obj.tracker)
+ else:
+ raise TypeError("Require Events or Message Frames, not %s"%type(obj))
+
+ @property
+ def done(self):
+ """Is 0MQ completely done with the message(s) being tracked?"""
+ for evt in self.events:
+ if not evt.is_set():
+ return False
+ for pm in self.peers:
+ if not pm.done:
+ return False
+ return True
+
+ def wait(self, timeout=-1):
+ """mt.wait(timeout=-1)
+
+ Wait for 0MQ to be done with the message or until `timeout`.
+
+ Parameters
+ ----------
+ timeout : float [default: -1, wait forever]
+ Maximum time in (s) to wait before raising NotDone.
+
+ Returns
+ -------
+ None
+ if done before `timeout`
+
+ Raises
+ ------
+ NotDone
+ if `timeout` reached before I am done.
+ """
+ tic = time.time()
+ if timeout is False or timeout < 0:
+ remaining = 3600*24*7 # a week
+ else:
+ remaining = timeout
+ done = False
+ for evt in self.events:
+ if remaining < 0:
+ raise NotDone
+ evt.wait(timeout=remaining)
+ if not evt.is_set():
+ raise NotDone
+ toc = time.time()
+ remaining -= (toc-tic)
+ tic = toc
+
+ for peer in self.peers:
+ if remaining < 0:
+ raise NotDone
+ peer.wait(timeout=remaining)
+ toc = time.time()
+ remaining -= (toc-tic)
+ tic = toc
+
+__all__ = ['MessageTracker'] \ No newline at end of file
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/sugar/version.py b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/sugar/version.py
new file mode 100644
index 00000000..ea8fbbc4
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/sugar/version.py
@@ -0,0 +1,48 @@
+"""PyZMQ and 0MQ version functions."""
+
+# Copyright (C) PyZMQ Developers
+# Distributed under the terms of the Modified BSD License.
+
+
+from zmq.backend import zmq_version_info
+
+
+VERSION_MAJOR = 14
+VERSION_MINOR = 5
+VERSION_PATCH = 0
+VERSION_EXTRA = ""
+__version__ = '%i.%i.%i' % (VERSION_MAJOR, VERSION_MINOR, VERSION_PATCH)
+
+if VERSION_EXTRA:
+ __version__ = "%s-%s" % (__version__, VERSION_EXTRA)
+ version_info = (VERSION_MAJOR, VERSION_MINOR, VERSION_PATCH, float('inf'))
+else:
+ version_info = (VERSION_MAJOR, VERSION_MINOR, VERSION_PATCH)
+
+__revision__ = ''
+
+def pyzmq_version():
+ """return the version of pyzmq as a string"""
+ if __revision__:
+ return '@'.join([__version__,__revision__[:6]])
+ else:
+ return __version__
+
+def pyzmq_version_info():
+ """return the pyzmq version as a tuple of at least three numbers
+
+ If pyzmq is a development version, `inf` will be appended after the third integer.
+ """
+ return version_info
+
+
+def zmq_version():
+ """return the version of libzmq as a string"""
+ return "%i.%i.%i" % zmq_version_info()
+
+
+__all__ = ['zmq_version', 'zmq_version_info',
+ 'pyzmq_version','pyzmq_version_info',
+ '__version__', '__revision__'
+]
+
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/tests/__init__.py b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/tests/__init__.py
new file mode 100644
index 00000000..325a3f19
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/tests/__init__.py
@@ -0,0 +1,211 @@
+# Copyright (c) PyZMQ Developers.
+# Distributed under the terms of the Modified BSD License.
+
+import functools
+import sys
+import time
+from threading import Thread
+
+from unittest import TestCase
+
+import zmq
+from zmq.utils import jsonapi
+
+try:
+ import gevent
+ from zmq import green as gzmq
+ have_gevent = True
+except ImportError:
+ have_gevent = False
+
+try:
+ from unittest import SkipTest
+except ImportError:
+ try:
+ from nose import SkipTest
+ except ImportError:
+ class SkipTest(Exception):
+ pass
+
+PYPY = 'PyPy' in sys.version
+
+#-----------------------------------------------------------------------------
+# skip decorators (directly from unittest)
+#-----------------------------------------------------------------------------
+
+_id = lambda x: x
+
+def skip(reason):
+ """
+ Unconditionally skip a test.
+ """
+ def decorator(test_item):
+ if not (isinstance(test_item, type) and issubclass(test_item, TestCase)):
+ @functools.wraps(test_item)
+ def skip_wrapper(*args, **kwargs):
+ raise SkipTest(reason)
+ test_item = skip_wrapper
+
+ test_item.__unittest_skip__ = True
+ test_item.__unittest_skip_why__ = reason
+ return test_item
+ return decorator
+
+def skip_if(condition, reason="Skipped"):
+ """
+ Skip a test if the condition is true.
+ """
+ if condition:
+ return skip(reason)
+ return _id
+
+skip_pypy = skip_if(PYPY, "Doesn't work on PyPy")
+
+#-----------------------------------------------------------------------------
+# Base test class
+#-----------------------------------------------------------------------------
+
+class BaseZMQTestCase(TestCase):
+ green = False
+
+ @property
+ def Context(self):
+ if self.green:
+ return gzmq.Context
+ else:
+ return zmq.Context
+
+ def socket(self, socket_type):
+ s = self.context.socket(socket_type)
+ self.sockets.append(s)
+ return s
+
+ def setUp(self):
+ if self.green and not have_gevent:
+ raise SkipTest("requires gevent")
+ self.context = self.Context.instance()
+ self.sockets = []
+
+ def tearDown(self):
+ contexts = set([self.context])
+ while self.sockets:
+ sock = self.sockets.pop()
+ contexts.add(sock.context) # in case additional contexts are created
+ sock.close(0)
+ for ctx in contexts:
+ t = Thread(target=ctx.term)
+ t.daemon = True
+ t.start()
+ t.join(timeout=2)
+ if t.is_alive():
+ # reset Context.instance, so the failure to term doesn't corrupt subsequent tests
+ zmq.sugar.context.Context._instance = None
+ raise RuntimeError("context could not terminate, open sockets likely remain in test")
+
+ def create_bound_pair(self, type1=zmq.PAIR, type2=zmq.PAIR, interface='tcp://127.0.0.1'):
+ """Create a bound socket pair using a random port."""
+ s1 = self.context.socket(type1)
+ s1.setsockopt(zmq.LINGER, 0)
+ port = s1.bind_to_random_port(interface)
+ s2 = self.context.socket(type2)
+ s2.setsockopt(zmq.LINGER, 0)
+ s2.connect('%s:%s' % (interface, port))
+ self.sockets.extend([s1,s2])
+ return s1, s2
+
+ def ping_pong(self, s1, s2, msg):
+ s1.send(msg)
+ msg2 = s2.recv()
+ s2.send(msg2)
+ msg3 = s1.recv()
+ return msg3
+
+ def ping_pong_json(self, s1, s2, o):
+ if jsonapi.jsonmod is None:
+ raise SkipTest("No json library")
+ s1.send_json(o)
+ o2 = s2.recv_json()
+ s2.send_json(o2)
+ o3 = s1.recv_json()
+ return o3
+
+ def ping_pong_pyobj(self, s1, s2, o):
+ s1.send_pyobj(o)
+ o2 = s2.recv_pyobj()
+ s2.send_pyobj(o2)
+ o3 = s1.recv_pyobj()
+ return o3
+
+ def assertRaisesErrno(self, errno, func, *args, **kwargs):
+ try:
+ func(*args, **kwargs)
+ except zmq.ZMQError as e:
+ self.assertEqual(e.errno, errno, "wrong error raised, expected '%s' \
+got '%s'" % (zmq.ZMQError(errno), zmq.ZMQError(e.errno)))
+ else:
+ self.fail("Function did not raise any error")
+
+ def _select_recv(self, multipart, socket, **kwargs):
+ """call recv[_multipart] in a way that raises if there is nothing to receive"""
+ if zmq.zmq_version_info() >= (3,1,0):
+ # zmq 3.1 has a bug, where poll can return false positives,
+ # so we wait a little bit just in case
+ # See LIBZMQ-280 on JIRA
+ time.sleep(0.1)
+
+ r,w,x = zmq.select([socket], [], [], timeout=5)
+ assert len(r) > 0, "Should have received a message"
+ kwargs['flags'] = zmq.DONTWAIT | kwargs.get('flags', 0)
+
+ recv = socket.recv_multipart if multipart else socket.recv
+ return recv(**kwargs)
+
+ def recv(self, socket, **kwargs):
+ """call recv in a way that raises if there is nothing to receive"""
+ return self._select_recv(False, socket, **kwargs)
+
+ def recv_multipart(self, socket, **kwargs):
+ """call recv_multipart in a way that raises if there is nothing to receive"""
+ return self._select_recv(True, socket, **kwargs)
+
+
+class PollZMQTestCase(BaseZMQTestCase):
+ pass
+
+class GreenTest:
+ """Mixin for making green versions of test classes"""
+ green = True
+
+ def assertRaisesErrno(self, errno, func, *args, **kwargs):
+ if errno == zmq.EAGAIN:
+ raise SkipTest("Skipping because we're green.")
+ try:
+ func(*args, **kwargs)
+ except zmq.ZMQError:
+ e = sys.exc_info()[1]
+ self.assertEqual(e.errno, errno, "wrong error raised, expected '%s' \
+got '%s'" % (zmq.ZMQError(errno), zmq.ZMQError(e.errno)))
+ else:
+ self.fail("Function did not raise any error")
+
+ def tearDown(self):
+ contexts = set([self.context])
+ while self.sockets:
+ sock = self.sockets.pop()
+ contexts.add(sock.context) # in case additional contexts are created
+ sock.close()
+ try:
+ gevent.joinall([gevent.spawn(ctx.term) for ctx in contexts], timeout=2, raise_error=True)
+ except gevent.Timeout:
+ raise RuntimeError("context could not terminate, open sockets likely remain in test")
+
+ def skip_green(self):
+ raise SkipTest("Skipping because we are green")
+
+def skip_green(f):
+ def skipping_test(self, *args, **kwargs):
+ if self.green:
+ raise SkipTest("Skipping because we are green")
+ else:
+ return f(self, *args, **kwargs)
+ return skipping_test
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/tests/test_auth.py b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/tests/test_auth.py
new file mode 100644
index 00000000..d350f61f
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/tests/test_auth.py
@@ -0,0 +1,431 @@
+# -*- coding: utf8 -*-
+
+# Copyright (C) PyZMQ Developers
+# Distributed under the terms of the Modified BSD License.
+
+import logging
+import os
+import shutil
+import sys
+import tempfile
+
+import zmq.auth
+from zmq.auth.ioloop import IOLoopAuthenticator
+from zmq.auth.thread import ThreadAuthenticator
+
+from zmq.eventloop import ioloop, zmqstream
+from zmq.tests import (BaseZMQTestCase, SkipTest)
+
+class BaseAuthTestCase(BaseZMQTestCase):
+ def setUp(self):
+ if zmq.zmq_version_info() < (4,0):
+ raise SkipTest("security is new in libzmq 4.0")
+ try:
+ zmq.curve_keypair()
+ except zmq.ZMQError:
+ raise SkipTest("security requires libzmq to be linked against libsodium")
+ super(BaseAuthTestCase, self).setUp()
+ # enable debug logging while we run tests
+ logging.getLogger('zmq.auth').setLevel(logging.DEBUG)
+ self.auth = self.make_auth()
+ self.auth.start()
+ self.base_dir, self.public_keys_dir, self.secret_keys_dir = self.create_certs()
+
+ def make_auth(self):
+ raise NotImplementedError()
+
+ def tearDown(self):
+ if self.auth:
+ self.auth.stop()
+ self.auth = None
+ self.remove_certs(self.base_dir)
+ super(BaseAuthTestCase, self).tearDown()
+
+ def create_certs(self):
+ """Create CURVE certificates for a test"""
+
+ # Create temporary CURVE keypairs for this test run. We create all keys in a
+ # temp directory and then move them into the appropriate private or public
+ # directory.
+
+ base_dir = tempfile.mkdtemp()
+ keys_dir = os.path.join(base_dir, 'certificates')
+ public_keys_dir = os.path.join(base_dir, 'public_keys')
+ secret_keys_dir = os.path.join(base_dir, 'private_keys')
+
+ os.mkdir(keys_dir)
+ os.mkdir(public_keys_dir)
+ os.mkdir(secret_keys_dir)
+
+ server_public_file, server_secret_file = zmq.auth.create_certificates(keys_dir, "server")
+ client_public_file, client_secret_file = zmq.auth.create_certificates(keys_dir, "client")
+
+ for key_file in os.listdir(keys_dir):
+ if key_file.endswith(".key"):
+ shutil.move(os.path.join(keys_dir, key_file),
+ os.path.join(public_keys_dir, '.'))
+
+ for key_file in os.listdir(keys_dir):
+ if key_file.endswith(".key_secret"):
+ shutil.move(os.path.join(keys_dir, key_file),
+ os.path.join(secret_keys_dir, '.'))
+
+ return (base_dir, public_keys_dir, secret_keys_dir)
+
+ def remove_certs(self, base_dir):
+ """Remove certificates for a test"""
+ shutil.rmtree(base_dir)
+
+ def load_certs(self, secret_keys_dir):
+ """Return server and client certificate keys"""
+ server_secret_file = os.path.join(secret_keys_dir, "server.key_secret")
+ client_secret_file = os.path.join(secret_keys_dir, "client.key_secret")
+
+ server_public, server_secret = zmq.auth.load_certificate(server_secret_file)
+ client_public, client_secret = zmq.auth.load_certificate(client_secret_file)
+
+ return server_public, server_secret, client_public, client_secret
+
+
+class TestThreadAuthentication(BaseAuthTestCase):
+ """Test authentication running in a thread"""
+
+ def make_auth(self):
+ return ThreadAuthenticator(self.context)
+
+ def can_connect(self, server, client):
+ """Check if client can connect to server using tcp transport"""
+ result = False
+ iface = 'tcp://127.0.0.1'
+ port = server.bind_to_random_port(iface)
+ client.connect("%s:%i" % (iface, port))
+ msg = [b"Hello World"]
+ server.send_multipart(msg)
+ if client.poll(1000):
+ rcvd_msg = client.recv_multipart()
+ self.assertEqual(rcvd_msg, msg)
+ result = True
+ return result
+
+ def test_null(self):
+ """threaded auth - NULL"""
+ # A default NULL connection should always succeed, and not
+ # go through our authentication infrastructure at all.
+ self.auth.stop()
+ self.auth = None
+
+ server = self.socket(zmq.PUSH)
+ client = self.socket(zmq.PULL)
+ self.assertTrue(self.can_connect(server, client))
+
+ # By setting a domain we switch on authentication for NULL sockets,
+ # though no policies are configured yet. The client connection
+ # should still be allowed.
+ server = self.socket(zmq.PUSH)
+ server.zap_domain = b'global'
+ client = self.socket(zmq.PULL)
+ self.assertTrue(self.can_connect(server, client))
+
+ def test_blacklist(self):
+ """threaded auth - Blacklist"""
+ # Blacklist 127.0.0.1, connection should fail
+ self.auth.deny('127.0.0.1')
+ server = self.socket(zmq.PUSH)
+ # By setting a domain we switch on authentication for NULL sockets,
+ # though no policies are configured yet.
+ server.zap_domain = b'global'
+ client = self.socket(zmq.PULL)
+ self.assertFalse(self.can_connect(server, client))
+
+ def test_whitelist(self):
+ """threaded auth - Whitelist"""
+ # Whitelist 127.0.0.1, connection should pass"
+ self.auth.allow('127.0.0.1')
+ server = self.socket(zmq.PUSH)
+ # By setting a domain we switch on authentication for NULL sockets,
+ # though no policies are configured yet.
+ server.zap_domain = b'global'
+ client = self.socket(zmq.PULL)
+ self.assertTrue(self.can_connect(server, client))
+
+ def test_plain(self):
+ """threaded auth - PLAIN"""
+
+ # Try PLAIN authentication - without configuring server, connection should fail
+ server = self.socket(zmq.PUSH)
+ server.plain_server = True
+ client = self.socket(zmq.PULL)
+ client.plain_username = b'admin'
+ client.plain_password = b'Password'
+ self.assertFalse(self.can_connect(server, client))
+
+ # Try PLAIN authentication - with server configured, connection should pass
+ server = self.socket(zmq.PUSH)
+ server.plain_server = True
+ client = self.socket(zmq.PULL)
+ client.plain_username = b'admin'
+ client.plain_password = b'Password'
+ self.auth.configure_plain(domain='*', passwords={'admin': 'Password'})
+ self.assertTrue(self.can_connect(server, client))
+
+ # Try PLAIN authentication - with bogus credentials, connection should fail
+ server = self.socket(zmq.PUSH)
+ server.plain_server = True
+ client = self.socket(zmq.PULL)
+ client.plain_username = b'admin'
+ client.plain_password = b'Bogus'
+ self.assertFalse(self.can_connect(server, client))
+
+ # Remove authenticator and check that a normal connection works
+ self.auth.stop()
+ self.auth = None
+
+ server = self.socket(zmq.PUSH)
+ client = self.socket(zmq.PULL)
+ self.assertTrue(self.can_connect(server, client))
+ client.close()
+ server.close()
+
+ def test_curve(self):
+ """threaded auth - CURVE"""
+ self.auth.allow('127.0.0.1')
+ certs = self.load_certs(self.secret_keys_dir)
+ server_public, server_secret, client_public, client_secret = certs
+
+ #Try CURVE authentication - without configuring server, connection should fail
+ server = self.socket(zmq.PUSH)
+ server.curve_publickey = server_public
+ server.curve_secretkey = server_secret
+ server.curve_server = True
+ client = self.socket(zmq.PULL)
+ client.curve_publickey = client_public
+ client.curve_secretkey = client_secret
+ client.curve_serverkey = server_public
+ self.assertFalse(self.can_connect(server, client))
+
+ #Try CURVE authentication - with server configured to CURVE_ALLOW_ANY, connection should pass
+ self.auth.configure_curve(domain='*', location=zmq.auth.CURVE_ALLOW_ANY)
+ server = self.socket(zmq.PUSH)
+ server.curve_publickey = server_public
+ server.curve_secretkey = server_secret
+ server.curve_server = True
+ client = self.socket(zmq.PULL)
+ client.curve_publickey = client_public
+ client.curve_secretkey = client_secret
+ client.curve_serverkey = server_public
+ self.assertTrue(self.can_connect(server, client))
+
+ # Try CURVE authentication - with server configured, connection should pass
+ self.auth.configure_curve(domain='*', location=self.public_keys_dir)
+ server = self.socket(zmq.PUSH)
+ server.curve_publickey = server_public
+ server.curve_secretkey = server_secret
+ server.curve_server = True
+ client = self.socket(zmq.PULL)
+ client.curve_publickey = client_public
+ client.curve_secretkey = client_secret
+ client.curve_serverkey = server_public
+ self.assertTrue(self.can_connect(server, client))
+
+ # Remove authenticator and check that a normal connection works
+ self.auth.stop()
+ self.auth = None
+
+ # Try connecting using NULL and no authentication enabled, connection should pass
+ server = self.socket(zmq.PUSH)
+ client = self.socket(zmq.PULL)
+ self.assertTrue(self.can_connect(server, client))
+
+
+def with_ioloop(method, expect_success=True):
+ """decorator for running tests with an IOLoop"""
+ def test_method(self):
+ r = method(self)
+
+ loop = self.io_loop
+ if expect_success:
+ self.pullstream.on_recv(self.on_message_succeed)
+ else:
+ self.pullstream.on_recv(self.on_message_fail)
+
+ t = loop.time()
+ loop.add_callback(self.attempt_connection)
+ loop.add_callback(self.send_msg)
+ if expect_success:
+ loop.add_timeout(t + 1, self.on_test_timeout_fail)
+ else:
+ loop.add_timeout(t + 1, self.on_test_timeout_succeed)
+
+ loop.start()
+ if self.fail_msg:
+ self.fail(self.fail_msg)
+
+ return r
+ return test_method
+
+def should_auth(method):
+ return with_ioloop(method, True)
+
+def should_not_auth(method):
+ return with_ioloop(method, False)
+
+class TestIOLoopAuthentication(BaseAuthTestCase):
+ """Test authentication running in ioloop"""
+
+ def setUp(self):
+ self.fail_msg = None
+ self.io_loop = ioloop.IOLoop()
+ super(TestIOLoopAuthentication, self).setUp()
+ self.server = self.socket(zmq.PUSH)
+ self.client = self.socket(zmq.PULL)
+ self.pushstream = zmqstream.ZMQStream(self.server, self.io_loop)
+ self.pullstream = zmqstream.ZMQStream(self.client, self.io_loop)
+
+ def make_auth(self):
+ return IOLoopAuthenticator(self.context, io_loop=self.io_loop)
+
+ def tearDown(self):
+ if self.auth:
+ self.auth.stop()
+ self.auth = None
+ self.io_loop.close(all_fds=True)
+ super(TestIOLoopAuthentication, self).tearDown()
+
+ def attempt_connection(self):
+ """Check if client can connect to server using tcp transport"""
+ iface = 'tcp://127.0.0.1'
+ port = self.server.bind_to_random_port(iface)
+ self.client.connect("%s:%i" % (iface, port))
+
+ def send_msg(self):
+ """Send a message from server to a client"""
+ msg = [b"Hello World"]
+ self.pushstream.send_multipart(msg)
+
+ def on_message_succeed(self, frames):
+ """A message was received, as expected."""
+ if frames != [b"Hello World"]:
+ self.fail_msg = "Unexpected message received"
+ self.io_loop.stop()
+
+ def on_message_fail(self, frames):
+ """A message was received, unexpectedly."""
+ self.fail_msg = 'Received messaged unexpectedly, security failed'
+ self.io_loop.stop()
+
+ def on_test_timeout_succeed(self):
+ """Test timer expired, indicates test success"""
+ self.io_loop.stop()
+
+ def on_test_timeout_fail(self):
+ """Test timer expired, indicates test failure"""
+ self.fail_msg = 'Test timed out'
+ self.io_loop.stop()
+
+ @should_auth
+ def test_none(self):
+ """ioloop auth - NONE"""
+ # A default NULL connection should always succeed, and not
+ # go through our authentication infrastructure at all.
+ # no auth should be running
+ self.auth.stop()
+ self.auth = None
+
+ @should_auth
+ def test_null(self):
+ """ioloop auth - NULL"""
+ # By setting a domain we switch on authentication for NULL sockets,
+ # though no policies are configured yet. The client connection
+ # should still be allowed.
+ self.server.zap_domain = b'global'
+
+ @should_not_auth
+ def test_blacklist(self):
+ """ioloop auth - Blacklist"""
+ # Blacklist 127.0.0.1, connection should fail
+ self.auth.deny('127.0.0.1')
+ self.server.zap_domain = b'global'
+
+ @should_auth
+ def test_whitelist(self):
+ """ioloop auth - Whitelist"""
+ # Whitelist 127.0.0.1, which overrides the blacklist, connection should pass"
+ self.auth.allow('127.0.0.1')
+
+ self.server.setsockopt(zmq.ZAP_DOMAIN, b'global')
+
+ @should_not_auth
+ def test_plain_unconfigured_server(self):
+ """ioloop auth - PLAIN, unconfigured server"""
+ self.client.plain_username = b'admin'
+ self.client.plain_password = b'Password'
+ # Try PLAIN authentication - without configuring server, connection should fail
+ self.server.plain_server = True
+
+ @should_auth
+ def test_plain_configured_server(self):
+ """ioloop auth - PLAIN, configured server"""
+ self.client.plain_username = b'admin'
+ self.client.plain_password = b'Password'
+ # Try PLAIN authentication - with server configured, connection should pass
+ self.server.plain_server = True
+ self.auth.configure_plain(domain='*', passwords={'admin': 'Password'})
+
+ @should_not_auth
+ def test_plain_bogus_credentials(self):
+ """ioloop auth - PLAIN, bogus credentials"""
+ self.client.plain_username = b'admin'
+ self.client.plain_password = b'Bogus'
+ self.server.plain_server = True
+
+ self.auth.configure_plain(domain='*', passwords={'admin': 'Password'})
+
+ @should_not_auth
+ def test_curve_unconfigured_server(self):
+ """ioloop auth - CURVE, unconfigured server"""
+ certs = self.load_certs(self.secret_keys_dir)
+ server_public, server_secret, client_public, client_secret = certs
+
+ self.auth.allow('127.0.0.1')
+
+ self.server.curve_publickey = server_public
+ self.server.curve_secretkey = server_secret
+ self.server.curve_server = True
+
+ self.client.curve_publickey = client_public
+ self.client.curve_secretkey = client_secret
+ self.client.curve_serverkey = server_public
+
+ @should_auth
+ def test_curve_allow_any(self):
+ """ioloop auth - CURVE, CURVE_ALLOW_ANY"""
+ certs = self.load_certs(self.secret_keys_dir)
+ server_public, server_secret, client_public, client_secret = certs
+
+ self.auth.allow('127.0.0.1')
+ self.auth.configure_curve(domain='*', location=zmq.auth.CURVE_ALLOW_ANY)
+
+ self.server.curve_publickey = server_public
+ self.server.curve_secretkey = server_secret
+ self.server.curve_server = True
+
+ self.client.curve_publickey = client_public
+ self.client.curve_secretkey = client_secret
+ self.client.curve_serverkey = server_public
+
+ @should_auth
+ def test_curve_configured_server(self):
+ """ioloop auth - CURVE, configured server"""
+ self.auth.allow('127.0.0.1')
+ certs = self.load_certs(self.secret_keys_dir)
+ server_public, server_secret, client_public, client_secret = certs
+
+ self.auth.configure_curve(domain='*', location=self.public_keys_dir)
+
+ self.server.curve_publickey = server_public
+ self.server.curve_secretkey = server_secret
+ self.server.curve_server = True
+
+ self.client.curve_publickey = client_public
+ self.client.curve_secretkey = client_secret
+ self.client.curve_serverkey = server_public
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/tests/test_cffi_backend.py b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/tests/test_cffi_backend.py
new file mode 100644
index 00000000..1f85eebf
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/tests/test_cffi_backend.py
@@ -0,0 +1,310 @@
+# -*- coding: utf8 -*-
+
+import sys
+import time
+
+from unittest import TestCase
+
+from zmq.tests import BaseZMQTestCase, SkipTest
+
+try:
+ from zmq.backend.cffi import (
+ zmq_version_info,
+ PUSH, PULL, IDENTITY,
+ REQ, REP, POLLIN, POLLOUT,
+ )
+ from zmq.backend.cffi._cffi import ffi, C
+ have_ffi_backend = True
+except ImportError:
+ have_ffi_backend = False
+
+
+class TestCFFIBackend(TestCase):
+
+ def setUp(self):
+ if not have_ffi_backend or not 'PyPy' in sys.version:
+ raise SkipTest('PyPy Tests Only')
+
+ def test_zmq_version_info(self):
+ version = zmq_version_info()
+
+ assert version[0] in range(2,11)
+
+ def test_zmq_ctx_new_destroy(self):
+ ctx = C.zmq_ctx_new()
+
+ assert ctx != ffi.NULL
+ assert 0 == C.zmq_ctx_destroy(ctx)
+
+ def test_zmq_socket_open_close(self):
+ ctx = C.zmq_ctx_new()
+ socket = C.zmq_socket(ctx, PUSH)
+
+ assert ctx != ffi.NULL
+ assert ffi.NULL != socket
+ assert 0 == C.zmq_close(socket)
+ assert 0 == C.zmq_ctx_destroy(ctx)
+
+ def test_zmq_setsockopt(self):
+ ctx = C.zmq_ctx_new()
+ socket = C.zmq_socket(ctx, PUSH)
+
+ identity = ffi.new('char[3]', 'zmq')
+ ret = C.zmq_setsockopt(socket, IDENTITY, ffi.cast('void*', identity), 3)
+
+ assert ret == 0
+ assert ctx != ffi.NULL
+ assert ffi.NULL != socket
+ assert 0 == C.zmq_close(socket)
+ assert 0 == C.zmq_ctx_destroy(ctx)
+
+ def test_zmq_getsockopt(self):
+ ctx = C.zmq_ctx_new()
+ socket = C.zmq_socket(ctx, PUSH)
+
+ identity = ffi.new('char[]', 'zmq')
+ ret = C.zmq_setsockopt(socket, IDENTITY, ffi.cast('void*', identity), 3)
+ assert ret == 0
+
+ option_len = ffi.new('size_t*', 3)
+ option = ffi.new('char*')
+ ret = C.zmq_getsockopt(socket,
+ IDENTITY,
+ ffi.cast('void*', option),
+ option_len)
+
+ assert ret == 0
+ assert ffi.string(ffi.cast('char*', option))[0] == "z"
+ assert ffi.string(ffi.cast('char*', option))[1] == "m"
+ assert ffi.string(ffi.cast('char*', option))[2] == "q"
+ assert ctx != ffi.NULL
+ assert ffi.NULL != socket
+ assert 0 == C.zmq_close(socket)
+ assert 0 == C.zmq_ctx_destroy(ctx)
+
+ def test_zmq_bind(self):
+ ctx = C.zmq_ctx_new()
+ socket = C.zmq_socket(ctx, 8)
+
+ assert 0 == C.zmq_bind(socket, 'tcp://*:4444')
+ assert ctx != ffi.NULL
+ assert ffi.NULL != socket
+ assert 0 == C.zmq_close(socket)
+ assert 0 == C.zmq_ctx_destroy(ctx)
+
+ def test_zmq_bind_connect(self):
+ ctx = C.zmq_ctx_new()
+
+ socket1 = C.zmq_socket(ctx, PUSH)
+ socket2 = C.zmq_socket(ctx, PULL)
+
+ assert 0 == C.zmq_bind(socket1, 'tcp://*:4444')
+ assert 0 == C.zmq_connect(socket2, 'tcp://127.0.0.1:4444')
+ assert ctx != ffi.NULL
+ assert ffi.NULL != socket1
+ assert ffi.NULL != socket2
+ assert 0 == C.zmq_close(socket1)
+ assert 0 == C.zmq_close(socket2)
+ assert 0 == C.zmq_ctx_destroy(ctx)
+
+ def test_zmq_msg_init_close(self):
+ zmq_msg = ffi.new('zmq_msg_t*')
+
+ assert ffi.NULL != zmq_msg
+ assert 0 == C.zmq_msg_init(zmq_msg)
+ assert 0 == C.zmq_msg_close(zmq_msg)
+
+ def test_zmq_msg_init_size(self):
+ zmq_msg = ffi.new('zmq_msg_t*')
+
+ assert ffi.NULL != zmq_msg
+ assert 0 == C.zmq_msg_init_size(zmq_msg, 10)
+ assert 0 == C.zmq_msg_close(zmq_msg)
+
+ def test_zmq_msg_init_data(self):
+ zmq_msg = ffi.new('zmq_msg_t*')
+ message = ffi.new('char[5]', 'Hello')
+
+ assert 0 == C.zmq_msg_init_data(zmq_msg,
+ ffi.cast('void*', message),
+ 5,
+ ffi.NULL,
+ ffi.NULL)
+
+ assert ffi.NULL != zmq_msg
+ assert 0 == C.zmq_msg_close(zmq_msg)
+
+ def test_zmq_msg_data(self):
+ zmq_msg = ffi.new('zmq_msg_t*')
+ message = ffi.new('char[]', 'Hello')
+ assert 0 == C.zmq_msg_init_data(zmq_msg,
+ ffi.cast('void*', message),
+ 5,
+ ffi.NULL,
+ ffi.NULL)
+
+ data = C.zmq_msg_data(zmq_msg)
+
+ assert ffi.NULL != zmq_msg
+ assert ffi.string(ffi.cast("char*", data)) == 'Hello'
+ assert 0 == C.zmq_msg_close(zmq_msg)
+
+
+ def test_zmq_send(self):
+ ctx = C.zmq_ctx_new()
+
+ sender = C.zmq_socket(ctx, REQ)
+ receiver = C.zmq_socket(ctx, REP)
+
+ assert 0 == C.zmq_bind(receiver, 'tcp://*:7777')
+ assert 0 == C.zmq_connect(sender, 'tcp://127.0.0.1:7777')
+
+ time.sleep(0.1)
+
+ zmq_msg = ffi.new('zmq_msg_t*')
+ message = ffi.new('char[5]', 'Hello')
+
+ C.zmq_msg_init_data(zmq_msg,
+ ffi.cast('void*', message),
+ ffi.cast('size_t', 5),
+ ffi.NULL,
+ ffi.NULL)
+
+ assert 5 == C.zmq_msg_send(zmq_msg, sender, 0)
+ assert 0 == C.zmq_msg_close(zmq_msg)
+ assert C.zmq_close(sender) == 0
+ assert C.zmq_close(receiver) == 0
+ assert C.zmq_ctx_destroy(ctx) == 0
+
+ def test_zmq_recv(self):
+ ctx = C.zmq_ctx_new()
+
+ sender = C.zmq_socket(ctx, REQ)
+ receiver = C.zmq_socket(ctx, REP)
+
+ assert 0 == C.zmq_bind(receiver, 'tcp://*:2222')
+ assert 0 == C.zmq_connect(sender, 'tcp://127.0.0.1:2222')
+
+ time.sleep(0.1)
+
+ zmq_msg = ffi.new('zmq_msg_t*')
+ message = ffi.new('char[5]', 'Hello')
+
+ C.zmq_msg_init_data(zmq_msg,
+ ffi.cast('void*', message),
+ ffi.cast('size_t', 5),
+ ffi.NULL,
+ ffi.NULL)
+
+ zmq_msg2 = ffi.new('zmq_msg_t*')
+ C.zmq_msg_init(zmq_msg2)
+
+ assert 5 == C.zmq_msg_send(zmq_msg, sender, 0)
+ assert 5 == C.zmq_msg_recv(zmq_msg2, receiver, 0)
+ assert 5 == C.zmq_msg_size(zmq_msg2)
+ assert b"Hello" == ffi.buffer(C.zmq_msg_data(zmq_msg2),
+ C.zmq_msg_size(zmq_msg2))[:]
+ assert C.zmq_close(sender) == 0
+ assert C.zmq_close(receiver) == 0
+ assert C.zmq_ctx_destroy(ctx) == 0
+
+ def test_zmq_poll(self):
+ ctx = C.zmq_ctx_new()
+
+ sender = C.zmq_socket(ctx, REQ)
+ receiver = C.zmq_socket(ctx, REP)
+
+ r1 = C.zmq_bind(receiver, 'tcp://*:3333')
+ r2 = C.zmq_connect(sender, 'tcp://127.0.0.1:3333')
+
+ zmq_msg = ffi.new('zmq_msg_t*')
+ message = ffi.new('char[5]', 'Hello')
+
+ C.zmq_msg_init_data(zmq_msg,
+ ffi.cast('void*', message),
+ ffi.cast('size_t', 5),
+ ffi.NULL,
+ ffi.NULL)
+
+ receiver_pollitem = ffi.new('zmq_pollitem_t*')
+ receiver_pollitem.socket = receiver
+ receiver_pollitem.fd = 0
+ receiver_pollitem.events = POLLIN | POLLOUT
+ receiver_pollitem.revents = 0
+
+ ret = C.zmq_poll(ffi.NULL, 0, 0)
+ assert ret == 0
+
+ ret = C.zmq_poll(receiver_pollitem, 1, 0)
+ assert ret == 0
+
+ ret = C.zmq_msg_send(zmq_msg, sender, 0)
+ print(ffi.string(C.zmq_strerror(C.zmq_errno())))
+ assert ret == 5
+
+ time.sleep(0.2)
+
+ ret = C.zmq_poll(receiver_pollitem, 1, 0)
+ assert ret == 1
+
+ assert int(receiver_pollitem.revents) & POLLIN
+ assert not int(receiver_pollitem.revents) & POLLOUT
+
+ zmq_msg2 = ffi.new('zmq_msg_t*')
+ C.zmq_msg_init(zmq_msg2)
+
+ ret_recv = C.zmq_msg_recv(zmq_msg2, receiver, 0)
+ assert ret_recv == 5
+
+ assert 5 == C.zmq_msg_size(zmq_msg2)
+ assert b"Hello" == ffi.buffer(C.zmq_msg_data(zmq_msg2),
+ C.zmq_msg_size(zmq_msg2))[:]
+
+ sender_pollitem = ffi.new('zmq_pollitem_t*')
+ sender_pollitem.socket = sender
+ sender_pollitem.fd = 0
+ sender_pollitem.events = POLLIN | POLLOUT
+ sender_pollitem.revents = 0
+
+ ret = C.zmq_poll(sender_pollitem, 1, 0)
+ assert ret == 0
+
+ zmq_msg_again = ffi.new('zmq_msg_t*')
+ message_again = ffi.new('char[11]', 'Hello Again')
+
+ C.zmq_msg_init_data(zmq_msg_again,
+ ffi.cast('void*', message_again),
+ ffi.cast('size_t', 11),
+ ffi.NULL,
+ ffi.NULL)
+
+ assert 11 == C.zmq_msg_send(zmq_msg_again, receiver, 0)
+
+ time.sleep(0.2)
+
+ assert 0 <= C.zmq_poll(sender_pollitem, 1, 0)
+ assert int(sender_pollitem.revents) & POLLIN
+ assert 11 == C.zmq_msg_recv(zmq_msg2, sender, 0)
+ assert 11 == C.zmq_msg_size(zmq_msg2)
+ assert b"Hello Again" == ffi.buffer(C.zmq_msg_data(zmq_msg2),
+ int(C.zmq_msg_size(zmq_msg2)))[:]
+ assert 0 == C.zmq_close(sender)
+ assert 0 == C.zmq_close(receiver)
+ assert 0 == C.zmq_ctx_destroy(ctx)
+ assert 0 == C.zmq_msg_close(zmq_msg)
+ assert 0 == C.zmq_msg_close(zmq_msg2)
+ assert 0 == C.zmq_msg_close(zmq_msg_again)
+
+ def test_zmq_stopwatch_functions(self):
+ stopwatch = C.zmq_stopwatch_start()
+ ret = C.zmq_stopwatch_stop(stopwatch)
+
+ assert ffi.NULL != stopwatch
+ assert 0 < int(ret)
+
+ def test_zmq_sleep(self):
+ try:
+ C.zmq_sleep(1)
+ except Exception as e:
+ raise AssertionError("Error executing zmq_sleep(int)")
+
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/tests/test_constants.py b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/tests/test_constants.py
new file mode 100644
index 00000000..d32b2b48
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/tests/test_constants.py
@@ -0,0 +1,104 @@
+# Copyright (C) PyZMQ Developers
+# Distributed under the terms of the Modified BSD License.
+
+import json
+from unittest import TestCase
+
+import zmq
+
+from zmq.utils import constant_names
+from zmq.sugar import constants as sugar_constants
+from zmq.backend import constants as backend_constants
+
+all_set = set(constant_names.all_names)
+
+class TestConstants(TestCase):
+
+ def _duplicate_test(self, namelist, listname):
+ """test that a given list has no duplicates"""
+ dupes = {}
+ for name in set(namelist):
+ cnt = namelist.count(name)
+ if cnt > 1:
+ dupes[name] = cnt
+ if dupes:
+ self.fail("The following names occur more than once in %s: %s" % (listname, json.dumps(dupes, indent=2)))
+
+ def test_duplicate_all(self):
+ return self._duplicate_test(constant_names.all_names, "all_names")
+
+ def _change_key(self, change, version):
+ """return changed-in key"""
+ return "%s-in %d.%d.%d" % tuple([change] + list(version))
+
+ def test_duplicate_changed(self):
+ all_changed = []
+ for change in ("new", "removed"):
+ d = getattr(constant_names, change + "_in")
+ for version, namelist in d.items():
+ all_changed.extend(namelist)
+ self._duplicate_test(namelist, self._change_key(change, version))
+
+ self._duplicate_test(all_changed, "all-changed")
+
+ def test_changed_in_all(self):
+ missing = {}
+ for change in ("new", "removed"):
+ d = getattr(constant_names, change + "_in")
+ for version, namelist in d.items():
+ key = self._change_key(change, version)
+ for name in namelist:
+ if name not in all_set:
+ if key not in missing:
+ missing[key] = []
+ missing[key].append(name)
+
+ if missing:
+ self.fail(
+ "The following names are missing in `all_names`: %s" % json.dumps(missing, indent=2)
+ )
+
+ def test_no_negative_constants(self):
+ for name in sugar_constants.__all__:
+ self.assertNotEqual(getattr(zmq, name), sugar_constants._UNDEFINED)
+
+ def test_undefined_constants(self):
+ all_aliases = []
+ for alias_group in sugar_constants.aliases:
+ all_aliases.extend(alias_group)
+
+ for name in all_set.difference(all_aliases):
+ raw = getattr(backend_constants, name)
+ if raw == sugar_constants._UNDEFINED:
+ self.assertRaises(AttributeError, getattr, zmq, name)
+ else:
+ self.assertEqual(getattr(zmq, name), raw)
+
+ def test_new(self):
+ zmq_version = zmq.zmq_version_info()
+ for version, new_names in constant_names.new_in.items():
+ should_have = zmq_version >= version
+ for name in new_names:
+ try:
+ value = getattr(zmq, name)
+ except AttributeError:
+ if should_have:
+ self.fail("AttributeError: zmq.%s" % name)
+ else:
+ if not should_have:
+ self.fail("Shouldn't have: zmq.%s=%s" % (name, value))
+
+ def test_removed(self):
+ zmq_version = zmq.zmq_version_info()
+ for version, new_names in constant_names.removed_in.items():
+ should_have = zmq_version < version
+ for name in new_names:
+ try:
+ value = getattr(zmq, name)
+ except AttributeError:
+ if should_have:
+ self.fail("AttributeError: zmq.%s" % name)
+ else:
+ if not should_have:
+ self.fail("Shouldn't have: zmq.%s=%s" % (name, value))
+
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/tests/test_context.py b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/tests/test_context.py
new file mode 100644
index 00000000..e3280778
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/tests/test_context.py
@@ -0,0 +1,257 @@
+# Copyright (C) PyZMQ Developers
+# Distributed under the terms of the Modified BSD License.
+
+import gc
+import sys
+import time
+from threading import Thread, Event
+
+import zmq
+from zmq.tests import (
+ BaseZMQTestCase, have_gevent, GreenTest, skip_green, PYPY, SkipTest,
+)
+
+
+class TestContext(BaseZMQTestCase):
+
+ def test_init(self):
+ c1 = self.Context()
+ self.assert_(isinstance(c1, self.Context))
+ del c1
+ c2 = self.Context()
+ self.assert_(isinstance(c2, self.Context))
+ del c2
+ c3 = self.Context()
+ self.assert_(isinstance(c3, self.Context))
+ del c3
+
+ def test_dir(self):
+ ctx = self.Context()
+ self.assertTrue('socket' in dir(ctx))
+ if zmq.zmq_version_info() > (3,):
+ self.assertTrue('IO_THREADS' in dir(ctx))
+ ctx.term()
+
+ def test_term(self):
+ c = self.Context()
+ c.term()
+ self.assert_(c.closed)
+
+ def test_context_manager(self):
+ with self.Context() as c:
+ pass
+ self.assert_(c.closed)
+
+ def test_fail_init(self):
+ self.assertRaisesErrno(zmq.EINVAL, self.Context, -1)
+
+ def test_term_hang(self):
+ rep,req = self.create_bound_pair(zmq.ROUTER, zmq.DEALER)
+ req.setsockopt(zmq.LINGER, 0)
+ req.send(b'hello', copy=False)
+ req.close()
+ rep.close()
+ self.context.term()
+
+ def test_instance(self):
+ ctx = self.Context.instance()
+ c2 = self.Context.instance(io_threads=2)
+ self.assertTrue(c2 is ctx)
+ c2.term()
+ c3 = self.Context.instance()
+ c4 = self.Context.instance()
+ self.assertFalse(c3 is c2)
+ self.assertFalse(c3.closed)
+ self.assertTrue(c3 is c4)
+
+ def test_many_sockets(self):
+ """opening and closing many sockets shouldn't cause problems"""
+ ctx = self.Context()
+ for i in range(16):
+ sockets = [ ctx.socket(zmq.REP) for i in range(65) ]
+ [ s.close() for s in sockets ]
+ # give the reaper a chance
+ time.sleep(1e-2)
+ ctx.term()
+
+ def test_sockopts(self):
+ """setting socket options with ctx attributes"""
+ ctx = self.Context()
+ ctx.linger = 5
+ self.assertEqual(ctx.linger, 5)
+ s = ctx.socket(zmq.REQ)
+ self.assertEqual(s.linger, 5)
+ self.assertEqual(s.getsockopt(zmq.LINGER), 5)
+ s.close()
+ # check that subscribe doesn't get set on sockets that don't subscribe:
+ ctx.subscribe = b''
+ s = ctx.socket(zmq.REQ)
+ s.close()
+
+ ctx.term()
+
+
+ def test_destroy(self):
+ """Context.destroy should close sockets"""
+ ctx = self.Context()
+ sockets = [ ctx.socket(zmq.REP) for i in range(65) ]
+
+ # close half of the sockets
+ [ s.close() for s in sockets[::2] ]
+
+ ctx.destroy()
+ # reaper is not instantaneous
+ time.sleep(1e-2)
+ for s in sockets:
+ self.assertTrue(s.closed)
+
+ def test_destroy_linger(self):
+ """Context.destroy should set linger on closing sockets"""
+ req,rep = self.create_bound_pair(zmq.REQ, zmq.REP)
+ req.send(b'hi')
+ time.sleep(1e-2)
+ self.context.destroy(linger=0)
+ # reaper is not instantaneous
+ time.sleep(1e-2)
+ for s in (req,rep):
+ self.assertTrue(s.closed)
+
+ def test_term_noclose(self):
+ """Context.term won't close sockets"""
+ ctx = self.Context()
+ s = ctx.socket(zmq.REQ)
+ self.assertFalse(s.closed)
+ t = Thread(target=ctx.term)
+ t.start()
+ t.join(timeout=0.1)
+ self.assertTrue(t.is_alive(), "Context should be waiting")
+ s.close()
+ t.join(timeout=0.1)
+ self.assertFalse(t.is_alive(), "Context should have closed")
+
+ def test_gc(self):
+ """test close&term by garbage collection alone"""
+ if PYPY:
+ raise SkipTest("GC doesn't work ")
+
+ # test credit @dln (GH #137):
+ def gcf():
+ def inner():
+ ctx = self.Context()
+ s = ctx.socket(zmq.PUSH)
+ inner()
+ gc.collect()
+ t = Thread(target=gcf)
+ t.start()
+ t.join(timeout=1)
+ self.assertFalse(t.is_alive(), "Garbage collection should have cleaned up context")
+
+ def test_cyclic_destroy(self):
+ """ctx.destroy should succeed when cyclic ref prevents gc"""
+ # test credit @dln (GH #137):
+ class CyclicReference(object):
+ def __init__(self, parent=None):
+ self.parent = parent
+
+ def crash(self, sock):
+ self.sock = sock
+ self.child = CyclicReference(self)
+
+ def crash_zmq():
+ ctx = self.Context()
+ sock = ctx.socket(zmq.PULL)
+ c = CyclicReference()
+ c.crash(sock)
+ ctx.destroy()
+
+ crash_zmq()
+
+ def test_term_thread(self):
+ """ctx.term should not crash active threads (#139)"""
+ ctx = self.Context()
+ evt = Event()
+ evt.clear()
+
+ def block():
+ s = ctx.socket(zmq.REP)
+ s.bind_to_random_port('tcp://127.0.0.1')
+ evt.set()
+ try:
+ s.recv()
+ except zmq.ZMQError as e:
+ self.assertEqual(e.errno, zmq.ETERM)
+ return
+ finally:
+ s.close()
+ self.fail("recv should have been interrupted with ETERM")
+ t = Thread(target=block)
+ t.start()
+
+ evt.wait(1)
+ self.assertTrue(evt.is_set(), "sync event never fired")
+ time.sleep(0.01)
+ ctx.term()
+ t.join(timeout=1)
+ self.assertFalse(t.is_alive(), "term should have interrupted s.recv()")
+
+ def test_destroy_no_sockets(self):
+ ctx = self.Context()
+ s = ctx.socket(zmq.PUB)
+ s.bind_to_random_port('tcp://127.0.0.1')
+ s.close()
+ ctx.destroy()
+ assert s.closed
+ assert ctx.closed
+
+ def test_ctx_opts(self):
+ if zmq.zmq_version_info() < (3,):
+ raise SkipTest("context options require libzmq 3")
+ ctx = self.Context()
+ ctx.set(zmq.MAX_SOCKETS, 2)
+ self.assertEqual(ctx.get(zmq.MAX_SOCKETS), 2)
+ ctx.max_sockets = 100
+ self.assertEqual(ctx.max_sockets, 100)
+ self.assertEqual(ctx.get(zmq.MAX_SOCKETS), 100)
+
+ def test_shadow(self):
+ ctx = self.Context()
+ ctx2 = self.Context.shadow(ctx.underlying)
+ self.assertEqual(ctx.underlying, ctx2.underlying)
+ s = ctx.socket(zmq.PUB)
+ s.close()
+ del ctx2
+ self.assertFalse(ctx.closed)
+ s = ctx.socket(zmq.PUB)
+ ctx2 = self.Context.shadow(ctx.underlying)
+ s2 = ctx2.socket(zmq.PUB)
+ s.close()
+ s2.close()
+ ctx.term()
+ self.assertRaisesErrno(zmq.EFAULT, ctx2.socket, zmq.PUB)
+ del ctx2
+
+ def test_shadow_pyczmq(self):
+ try:
+ from pyczmq import zctx, zsocket, zstr
+ except Exception:
+ raise SkipTest("Requires pyczmq")
+
+ ctx = zctx.new()
+ a = zsocket.new(ctx, zmq.PUSH)
+ zsocket.bind(a, "inproc://a")
+ ctx2 = self.Context.shadow_pyczmq(ctx)
+ b = ctx2.socket(zmq.PULL)
+ b.connect("inproc://a")
+ zstr.send(a, b'hi')
+ rcvd = self.recv(b)
+ self.assertEqual(rcvd, b'hi')
+ b.close()
+
+
+if False: # disable green context tests
+ class TestContextGreen(GreenTest, TestContext):
+ """gevent subclass of context tests"""
+ # skip tests that use real threads:
+ test_gc = GreenTest.skip_green
+ test_term_thread = GreenTest.skip_green
+ test_destroy_linger = GreenTest.skip_green
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/tests/test_device.py b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/tests/test_device.py
new file mode 100644
index 00000000..f8305074
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/tests/test_device.py
@@ -0,0 +1,146 @@
+# Copyright (C) PyZMQ Developers
+# Distributed under the terms of the Modified BSD License.
+
+import time
+
+import zmq
+from zmq import devices
+from zmq.tests import BaseZMQTestCase, SkipTest, have_gevent, GreenTest, PYPY
+from zmq.utils.strtypes import (bytes,unicode,basestring)
+
+if PYPY:
+ # cleanup of shared Context doesn't work on PyPy
+ devices.Device.context_factory = zmq.Context
+
+class TestDevice(BaseZMQTestCase):
+
+ def test_device_types(self):
+ for devtype in (zmq.STREAMER, zmq.FORWARDER, zmq.QUEUE):
+ dev = devices.Device(devtype, zmq.PAIR, zmq.PAIR)
+ self.assertEqual(dev.device_type, devtype)
+ del dev
+
+ def test_device_attributes(self):
+ dev = devices.Device(zmq.QUEUE, zmq.SUB, zmq.PUB)
+ self.assertEqual(dev.in_type, zmq.SUB)
+ self.assertEqual(dev.out_type, zmq.PUB)
+ self.assertEqual(dev.device_type, zmq.QUEUE)
+ self.assertEqual(dev.daemon, True)
+ del dev
+
+ def test_tsdevice_attributes(self):
+ dev = devices.Device(zmq.QUEUE, zmq.SUB, zmq.PUB)
+ self.assertEqual(dev.in_type, zmq.SUB)
+ self.assertEqual(dev.out_type, zmq.PUB)
+ self.assertEqual(dev.device_type, zmq.QUEUE)
+ self.assertEqual(dev.daemon, True)
+ del dev
+
+
+ def test_single_socket_forwarder_connect(self):
+ dev = devices.ThreadDevice(zmq.QUEUE, zmq.REP, -1)
+ req = self.context.socket(zmq.REQ)
+ port = req.bind_to_random_port('tcp://127.0.0.1')
+ dev.connect_in('tcp://127.0.0.1:%i'%port)
+ dev.start()
+ time.sleep(.25)
+ msg = b'hello'
+ req.send(msg)
+ self.assertEqual(msg, self.recv(req))
+ del dev
+ req.close()
+ dev = devices.ThreadDevice(zmq.QUEUE, zmq.REP, -1)
+ req = self.context.socket(zmq.REQ)
+ port = req.bind_to_random_port('tcp://127.0.0.1')
+ dev.connect_out('tcp://127.0.0.1:%i'%port)
+ dev.start()
+ time.sleep(.25)
+ msg = b'hello again'
+ req.send(msg)
+ self.assertEqual(msg, self.recv(req))
+ del dev
+ req.close()
+
+ def test_single_socket_forwarder_bind(self):
+ dev = devices.ThreadDevice(zmq.QUEUE, zmq.REP, -1)
+ # select random port:
+ binder = self.context.socket(zmq.REQ)
+ port = binder.bind_to_random_port('tcp://127.0.0.1')
+ binder.close()
+ time.sleep(0.1)
+ req = self.context.socket(zmq.REQ)
+ req.connect('tcp://127.0.0.1:%i'%port)
+ dev.bind_in('tcp://127.0.0.1:%i'%port)
+ dev.start()
+ time.sleep(.25)
+ msg = b'hello'
+ req.send(msg)
+ self.assertEqual(msg, self.recv(req))
+ del dev
+ req.close()
+ dev = devices.ThreadDevice(zmq.QUEUE, zmq.REP, -1)
+ # select random port:
+ binder = self.context.socket(zmq.REQ)
+ port = binder.bind_to_random_port('tcp://127.0.0.1')
+ binder.close()
+ time.sleep(0.1)
+ req = self.context.socket(zmq.REQ)
+ req.connect('tcp://127.0.0.1:%i'%port)
+ dev.bind_in('tcp://127.0.0.1:%i'%port)
+ dev.start()
+ time.sleep(.25)
+ msg = b'hello again'
+ req.send(msg)
+ self.assertEqual(msg, self.recv(req))
+ del dev
+ req.close()
+
+ def test_proxy(self):
+ if zmq.zmq_version_info() < (3,2):
+ raise SkipTest("Proxies only in libzmq >= 3")
+ dev = devices.ThreadProxy(zmq.PULL, zmq.PUSH, zmq.PUSH)
+ binder = self.context.socket(zmq.REQ)
+ iface = 'tcp://127.0.0.1'
+ port = binder.bind_to_random_port(iface)
+ port2 = binder.bind_to_random_port(iface)
+ port3 = binder.bind_to_random_port(iface)
+ binder.close()
+ time.sleep(0.1)
+ dev.bind_in("%s:%i" % (iface, port))
+ dev.bind_out("%s:%i" % (iface, port2))
+ dev.bind_mon("%s:%i" % (iface, port3))
+ dev.start()
+ time.sleep(0.25)
+ msg = b'hello'
+ push = self.context.socket(zmq.PUSH)
+ push.connect("%s:%i" % (iface, port))
+ pull = self.context.socket(zmq.PULL)
+ pull.connect("%s:%i" % (iface, port2))
+ mon = self.context.socket(zmq.PULL)
+ mon.connect("%s:%i" % (iface, port3))
+ push.send(msg)
+ self.sockets.extend([push, pull, mon])
+ self.assertEqual(msg, self.recv(pull))
+ self.assertEqual(msg, self.recv(mon))
+
+if have_gevent:
+ import gevent
+ import zmq.green
+
+ class TestDeviceGreen(GreenTest, BaseZMQTestCase):
+
+ def test_green_device(self):
+ rep = self.context.socket(zmq.REP)
+ req = self.context.socket(zmq.REQ)
+ self.sockets.extend([req, rep])
+ port = rep.bind_to_random_port('tcp://127.0.0.1')
+ g = gevent.spawn(zmq.green.device, zmq.QUEUE, rep, rep)
+ req.connect('tcp://127.0.0.1:%i' % port)
+ req.send(b'hi')
+ timeout = gevent.Timeout(3)
+ timeout.start()
+ receiver = gevent.spawn(req.recv)
+ self.assertEqual(receiver.get(2), b'hi')
+ timeout.cancel()
+ g.kill(block=True)
+
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/tests/test_error.py b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/tests/test_error.py
new file mode 100644
index 00000000..a2eee14a
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/tests/test_error.py
@@ -0,0 +1,43 @@
+# -*- coding: utf8 -*-
+# Copyright (C) PyZMQ Developers
+# Distributed under the terms of the Modified BSD License.
+
+import sys
+import time
+
+import zmq
+from zmq import ZMQError, strerror, Again, ContextTerminated
+from zmq.tests import BaseZMQTestCase
+
+if sys.version_info[0] >= 3:
+ long = int
+
+class TestZMQError(BaseZMQTestCase):
+
+ def test_strerror(self):
+ """test that strerror gets the right type."""
+ for i in range(10):
+ e = strerror(i)
+ self.assertTrue(isinstance(e, str))
+
+ def test_zmqerror(self):
+ for errno in range(10):
+ e = ZMQError(errno)
+ self.assertEqual(e.errno, errno)
+ self.assertEqual(str(e), strerror(errno))
+
+ def test_again(self):
+ s = self.context.socket(zmq.REP)
+ self.assertRaises(Again, s.recv, zmq.NOBLOCK)
+ self.assertRaisesErrno(zmq.EAGAIN, s.recv, zmq.NOBLOCK)
+ s.close()
+
+ def atest_ctxterm(self):
+ s = self.context.socket(zmq.REP)
+ t = Thread(target=self.context.term)
+ t.start()
+ self.assertRaises(ContextTerminated, s.recv, zmq.NOBLOCK)
+ self.assertRaisesErrno(zmq.TERM, s.recv, zmq.NOBLOCK)
+ s.close()
+ t.join()
+
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/tests/test_etc.py b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/tests/test_etc.py
new file mode 100644
index 00000000..ad224064
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/tests/test_etc.py
@@ -0,0 +1,15 @@
+# Copyright (c) PyZMQ Developers.
+# Distributed under the terms of the Modified BSD License.
+
+import sys
+
+import zmq
+
+from . import skip_if
+
+@skip_if(zmq.zmq_version_info() < (4,1), "libzmq < 4.1")
+def test_has():
+ assert not zmq.has('something weird')
+ has_ipc = zmq.has('ipc')
+ not_windows = not sys.platform.startswith('win')
+ assert has_ipc == not_windows
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/tests/test_imports.py b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/tests/test_imports.py
new file mode 100644
index 00000000..c0ddfaac
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/tests/test_imports.py
@@ -0,0 +1,62 @@
+# Copyright (C) PyZMQ Developers
+# Distributed under the terms of the Modified BSD License.
+
+import sys
+from unittest import TestCase
+
+class TestImports(TestCase):
+ """Test Imports - the quickest test to ensure that we haven't
+ introduced version-incompatible syntax errors."""
+
+ def test_toplevel(self):
+ """test toplevel import"""
+ import zmq
+
+ def test_core(self):
+ """test core imports"""
+ from zmq import Context
+ from zmq import Socket
+ from zmq import Poller
+ from zmq import Frame
+ from zmq import constants
+ from zmq import device, proxy
+ from zmq import Stopwatch
+ from zmq import (
+ zmq_version,
+ zmq_version_info,
+ pyzmq_version,
+ pyzmq_version_info,
+ )
+
+ def test_devices(self):
+ """test device imports"""
+ import zmq.devices
+ from zmq.devices import basedevice
+ from zmq.devices import monitoredqueue
+ from zmq.devices import monitoredqueuedevice
+
+ def test_log(self):
+ """test log imports"""
+ import zmq.log
+ from zmq.log import handlers
+
+ def test_eventloop(self):
+ """test eventloop imports"""
+ import zmq.eventloop
+ from zmq.eventloop import ioloop
+ from zmq.eventloop import zmqstream
+ from zmq.eventloop.minitornado.platform import auto
+ from zmq.eventloop.minitornado import ioloop
+
+ def test_utils(self):
+ """test util imports"""
+ import zmq.utils
+ from zmq.utils import strtypes
+ from zmq.utils import jsonapi
+
+ def test_ssh(self):
+ """test ssh imports"""
+ from zmq.ssh import tunnel
+
+
+
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/tests/test_ioloop.py b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/tests/test_ioloop.py
new file mode 100644
index 00000000..2a8b1153
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/tests/test_ioloop.py
@@ -0,0 +1,113 @@
+# Copyright (C) PyZMQ Developers
+# Distributed under the terms of the Modified BSD License.
+
+import time
+import os
+import threading
+
+import zmq
+from zmq.tests import BaseZMQTestCase
+from zmq.eventloop import ioloop
+from zmq.eventloop.minitornado.ioloop import _Timeout
+try:
+ from tornado.ioloop import PollIOLoop, IOLoop as BaseIOLoop
+except ImportError:
+ from zmq.eventloop.minitornado.ioloop import IOLoop as BaseIOLoop
+
+
+def printer():
+ os.system("say hello")
+ raise Exception
+ print (time.time())
+
+
+class Delay(threading.Thread):
+ def __init__(self, f, delay=1):
+ self.f=f
+ self.delay=delay
+ self.aborted=False
+ self.cond=threading.Condition()
+ super(Delay, self).__init__()
+
+ def run(self):
+ self.cond.acquire()
+ self.cond.wait(self.delay)
+ self.cond.release()
+ if not self.aborted:
+ self.f()
+
+ def abort(self):
+ self.aborted=True
+ self.cond.acquire()
+ self.cond.notify()
+ self.cond.release()
+
+
+class TestIOLoop(BaseZMQTestCase):
+
+ def test_simple(self):
+ """simple IOLoop creation test"""
+ loop = ioloop.IOLoop()
+ dc = ioloop.PeriodicCallback(loop.stop, 200, loop)
+ pc = ioloop.PeriodicCallback(lambda : None, 10, loop)
+ pc.start()
+ dc.start()
+ t = Delay(loop.stop,1)
+ t.start()
+ loop.start()
+ if t.isAlive():
+ t.abort()
+ else:
+ self.fail("IOLoop failed to exit")
+
+ def test_timeout_compare(self):
+ """test timeout comparisons"""
+ loop = ioloop.IOLoop()
+ t = _Timeout(1, 2, loop)
+ t2 = _Timeout(1, 3, loop)
+ self.assertEqual(t < t2, id(t) < id(t2))
+ t2 = _Timeout(2,1, loop)
+ self.assertTrue(t < t2)
+
+ def test_poller_events(self):
+ """Tornado poller implementation maps events correctly"""
+ req,rep = self.create_bound_pair(zmq.REQ, zmq.REP)
+ poller = ioloop.ZMQPoller()
+ poller.register(req, ioloop.IOLoop.READ)
+ poller.register(rep, ioloop.IOLoop.READ)
+ events = dict(poller.poll(0))
+ self.assertEqual(events.get(rep), None)
+ self.assertEqual(events.get(req), None)
+
+ poller.register(req, ioloop.IOLoop.WRITE)
+ poller.register(rep, ioloop.IOLoop.WRITE)
+ events = dict(poller.poll(1))
+ self.assertEqual(events.get(req), ioloop.IOLoop.WRITE)
+ self.assertEqual(events.get(rep), None)
+
+ poller.register(rep, ioloop.IOLoop.READ)
+ req.send(b'hi')
+ events = dict(poller.poll(1))
+ self.assertEqual(events.get(rep), ioloop.IOLoop.READ)
+ self.assertEqual(events.get(req), None)
+
+ def test_instance(self):
+ """Test IOLoop.instance returns the right object"""
+ loop = ioloop.IOLoop.instance()
+ self.assertEqual(loop.__class__, ioloop.IOLoop)
+ loop = BaseIOLoop.instance()
+ self.assertEqual(loop.__class__, ioloop.IOLoop)
+
+ def test_close_all(self):
+ """Test close(all_fds=True)"""
+ loop = ioloop.IOLoop.instance()
+ req,rep = self.create_bound_pair(zmq.REQ, zmq.REP)
+ loop.add_handler(req, lambda msg: msg, ioloop.IOLoop.READ)
+ loop.add_handler(rep, lambda msg: msg, ioloop.IOLoop.READ)
+ self.assertEqual(req.closed, False)
+ self.assertEqual(rep.closed, False)
+ loop.close(all_fds=True)
+ self.assertEqual(req.closed, True)
+ self.assertEqual(rep.closed, True)
+
+
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/tests/test_log.py b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/tests/test_log.py
new file mode 100644
index 00000000..9206f095
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/tests/test_log.py
@@ -0,0 +1,116 @@
+# encoding: utf-8
+
+# Copyright (C) PyZMQ Developers
+# Distributed under the terms of the Modified BSD License.
+
+
+import logging
+import time
+from unittest import TestCase
+
+import zmq
+from zmq.log import handlers
+from zmq.utils.strtypes import b, u
+from zmq.tests import BaseZMQTestCase
+
+
+class TestPubLog(BaseZMQTestCase):
+
+ iface = 'inproc://zmqlog'
+ topic= 'zmq'
+
+ @property
+ def logger(self):
+ # print dir(self)
+ logger = logging.getLogger('zmqtest')
+ logger.setLevel(logging.DEBUG)
+ return logger
+
+ def connect_handler(self, topic=None):
+ topic = self.topic if topic is None else topic
+ logger = self.logger
+ pub,sub = self.create_bound_pair(zmq.PUB, zmq.SUB)
+ handler = handlers.PUBHandler(pub)
+ handler.setLevel(logging.DEBUG)
+ handler.root_topic = topic
+ logger.addHandler(handler)
+ sub.setsockopt(zmq.SUBSCRIBE, b(topic))
+ time.sleep(0.1)
+ return logger, handler, sub
+
+ def test_init_iface(self):
+ logger = self.logger
+ ctx = self.context
+ handler = handlers.PUBHandler(self.iface)
+ self.assertFalse(handler.ctx is ctx)
+ self.sockets.append(handler.socket)
+ # handler.ctx.term()
+ handler = handlers.PUBHandler(self.iface, self.context)
+ self.sockets.append(handler.socket)
+ self.assertTrue(handler.ctx is ctx)
+ handler.setLevel(logging.DEBUG)
+ handler.root_topic = self.topic
+ logger.addHandler(handler)
+ sub = ctx.socket(zmq.SUB)
+ self.sockets.append(sub)
+ sub.setsockopt(zmq.SUBSCRIBE, b(self.topic))
+ sub.connect(self.iface)
+ import time; time.sleep(0.25)
+ msg1 = 'message'
+ logger.info(msg1)
+
+ (topic, msg2) = sub.recv_multipart()
+ self.assertEqual(topic, b'zmq.INFO')
+ self.assertEqual(msg2, b(msg1)+b'\n')
+ logger.removeHandler(handler)
+
+ def test_init_socket(self):
+ pub,sub = self.create_bound_pair(zmq.PUB, zmq.SUB)
+ logger = self.logger
+ handler = handlers.PUBHandler(pub)
+ handler.setLevel(logging.DEBUG)
+ handler.root_topic = self.topic
+ logger.addHandler(handler)
+
+ self.assertTrue(handler.socket is pub)
+ self.assertTrue(handler.ctx is pub.context)
+ self.assertTrue(handler.ctx is self.context)
+ sub.setsockopt(zmq.SUBSCRIBE, b(self.topic))
+ import time; time.sleep(0.1)
+ msg1 = 'message'
+ logger.info(msg1)
+
+ (topic, msg2) = sub.recv_multipart()
+ self.assertEqual(topic, b'zmq.INFO')
+ self.assertEqual(msg2, b(msg1)+b'\n')
+ logger.removeHandler(handler)
+
+ def test_root_topic(self):
+ logger, handler, sub = self.connect_handler()
+ handler.socket.bind(self.iface)
+ sub2 = sub.context.socket(zmq.SUB)
+ self.sockets.append(sub2)
+ sub2.connect(self.iface)
+ sub2.setsockopt(zmq.SUBSCRIBE, b'')
+ handler.root_topic = b'twoonly'
+ msg1 = 'ignored'
+ logger.info(msg1)
+ self.assertRaisesErrno(zmq.EAGAIN, sub.recv, zmq.NOBLOCK)
+ topic,msg2 = sub2.recv_multipart()
+ self.assertEqual(topic, b'twoonly.INFO')
+ self.assertEqual(msg2, b(msg1)+b'\n')
+
+ logger.removeHandler(handler)
+
+ def test_unicode_message(self):
+ logger, handler, sub = self.connect_handler()
+ base_topic = b(self.topic + '.INFO')
+ for msg, expected in [
+ (u('hello'), [base_topic, b('hello\n')]),
+ (u('héllo'), [base_topic, b('héllo\n')]),
+ (u('tøpic::héllo'), [base_topic + b('.tøpic'), b('héllo\n')]),
+ ]:
+ logger.info(msg)
+ received = sub.recv_multipart()
+ self.assertEqual(received, expected)
+
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/tests/test_message.py b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/tests/test_message.py
new file mode 100644
index 00000000..d8770bdf
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/tests/test_message.py
@@ -0,0 +1,362 @@
+# -*- coding: utf8 -*-
+# Copyright (C) PyZMQ Developers
+# Distributed under the terms of the Modified BSD License.
+
+
+import copy
+import sys
+try:
+ from sys import getrefcount as grc
+except ImportError:
+ grc = None
+
+import time
+from pprint import pprint
+from unittest import TestCase
+
+import zmq
+from zmq.tests import BaseZMQTestCase, SkipTest, skip_pypy, PYPY
+from zmq.utils.strtypes import unicode, bytes, b, u
+
+
+# some useful constants:
+
+x = b'x'
+
+try:
+ view = memoryview
+except NameError:
+ view = buffer
+
+if grc:
+ rc0 = grc(x)
+ v = view(x)
+ view_rc = grc(x) - rc0
+
+def await_gc(obj, rc):
+ """wait for refcount on an object to drop to an expected value
+
+ Necessary because of the zero-copy gc thread,
+ which can take some time to receive its DECREF message.
+ """
+ for i in range(50):
+ # rc + 2 because of the refs in this function
+ if grc(obj) <= rc + 2:
+ return
+ time.sleep(0.05)
+
+class TestFrame(BaseZMQTestCase):
+
+ @skip_pypy
+ def test_above_30(self):
+ """Message above 30 bytes are never copied by 0MQ."""
+ for i in range(5, 16): # 32, 64,..., 65536
+ s = (2**i)*x
+ self.assertEqual(grc(s), 2)
+ m = zmq.Frame(s)
+ self.assertEqual(grc(s), 4)
+ del m
+ await_gc(s, 2)
+ self.assertEqual(grc(s), 2)
+ del s
+
+ def test_str(self):
+ """Test the str representations of the Frames."""
+ for i in range(16):
+ s = (2**i)*x
+ m = zmq.Frame(s)
+ m_str = str(m)
+ m_str_b = b(m_str) # py3compat
+ self.assertEqual(s, m_str_b)
+
+ def test_bytes(self):
+ """Test the Frame.bytes property."""
+ for i in range(1,16):
+ s = (2**i)*x
+ m = zmq.Frame(s)
+ b = m.bytes
+ self.assertEqual(s, m.bytes)
+ if not PYPY:
+ # check that it copies
+ self.assert_(b is not s)
+ # check that it copies only once
+ self.assert_(b is m.bytes)
+
+ def test_unicode(self):
+ """Test the unicode representations of the Frames."""
+ s = u('asdf')
+ self.assertRaises(TypeError, zmq.Frame, s)
+ for i in range(16):
+ s = (2**i)*u('§')
+ m = zmq.Frame(s.encode('utf8'))
+ self.assertEqual(s, unicode(m.bytes,'utf8'))
+
+ def test_len(self):
+ """Test the len of the Frames."""
+ for i in range(16):
+ s = (2**i)*x
+ m = zmq.Frame(s)
+ self.assertEqual(len(s), len(m))
+
+ @skip_pypy
+ def test_lifecycle1(self):
+ """Run through a ref counting cycle with a copy."""
+ for i in range(5, 16): # 32, 64,..., 65536
+ s = (2**i)*x
+ rc = 2
+ self.assertEqual(grc(s), rc)
+ m = zmq.Frame(s)
+ rc += 2
+ self.assertEqual(grc(s), rc)
+ m2 = copy.copy(m)
+ rc += 1
+ self.assertEqual(grc(s), rc)
+ buf = m2.buffer
+
+ rc += view_rc
+ self.assertEqual(grc(s), rc)
+
+ self.assertEqual(s, b(str(m)))
+ self.assertEqual(s, bytes(m2))
+ self.assertEqual(s, m.bytes)
+ # self.assert_(s is str(m))
+ # self.assert_(s is str(m2))
+ del m2
+ rc -= 1
+ self.assertEqual(grc(s), rc)
+ rc -= view_rc
+ del buf
+ self.assertEqual(grc(s), rc)
+ del m
+ rc -= 2
+ await_gc(s, rc)
+ self.assertEqual(grc(s), rc)
+ self.assertEqual(rc, 2)
+ del s
+
+ @skip_pypy
+ def test_lifecycle2(self):
+ """Run through a different ref counting cycle with a copy."""
+ for i in range(5, 16): # 32, 64,..., 65536
+ s = (2**i)*x
+ rc = 2
+ self.assertEqual(grc(s), rc)
+ m = zmq.Frame(s)
+ rc += 2
+ self.assertEqual(grc(s), rc)
+ m2 = copy.copy(m)
+ rc += 1
+ self.assertEqual(grc(s), rc)
+ buf = m.buffer
+ rc += view_rc
+ self.assertEqual(grc(s), rc)
+ self.assertEqual(s, b(str(m)))
+ self.assertEqual(s, bytes(m2))
+ self.assertEqual(s, m2.bytes)
+ self.assertEqual(s, m.bytes)
+ # self.assert_(s is str(m))
+ # self.assert_(s is str(m2))
+ del buf
+ self.assertEqual(grc(s), rc)
+ del m
+ # m.buffer is kept until m is del'd
+ rc -= view_rc
+ rc -= 1
+ self.assertEqual(grc(s), rc)
+ del m2
+ rc -= 2
+ await_gc(s, rc)
+ self.assertEqual(grc(s), rc)
+ self.assertEqual(rc, 2)
+ del s
+
+ @skip_pypy
+ def test_tracker(self):
+ m = zmq.Frame(b'asdf', track=True)
+ self.assertFalse(m.tracker.done)
+ pm = zmq.MessageTracker(m)
+ self.assertFalse(pm.done)
+ del m
+ for i in range(10):
+ if pm.done:
+ break
+ time.sleep(0.1)
+ self.assertTrue(pm.done)
+
+ def test_no_tracker(self):
+ m = zmq.Frame(b'asdf', track=False)
+ self.assertEqual(m.tracker, None)
+ m2 = copy.copy(m)
+ self.assertEqual(m2.tracker, None)
+ self.assertRaises(ValueError, zmq.MessageTracker, m)
+
+ @skip_pypy
+ def test_multi_tracker(self):
+ m = zmq.Frame(b'asdf', track=True)
+ m2 = zmq.Frame(b'whoda', track=True)
+ mt = zmq.MessageTracker(m,m2)
+ self.assertFalse(m.tracker.done)
+ self.assertFalse(mt.done)
+ self.assertRaises(zmq.NotDone, mt.wait, 0.1)
+ del m
+ time.sleep(0.1)
+ self.assertRaises(zmq.NotDone, mt.wait, 0.1)
+ self.assertFalse(mt.done)
+ del m2
+ self.assertTrue(mt.wait() is None)
+ self.assertTrue(mt.done)
+
+
+ def test_buffer_in(self):
+ """test using a buffer as input"""
+ ins = b("§§¶•ªº˜µ¬˚…∆˙åß∂©œ∑´†≈ç√")
+ m = zmq.Frame(view(ins))
+
+ def test_bad_buffer_in(self):
+ """test using a bad object"""
+ self.assertRaises(TypeError, zmq.Frame, 5)
+ self.assertRaises(TypeError, zmq.Frame, object())
+
+ def test_buffer_out(self):
+ """receiving buffered output"""
+ ins = b("§§¶•ªº˜µ¬˚…∆˙åß∂©œ∑´†≈ç√")
+ m = zmq.Frame(ins)
+ outb = m.buffer
+ self.assertTrue(isinstance(outb, view))
+ self.assert_(outb is m.buffer)
+ self.assert_(m.buffer is m.buffer)
+
+ def test_multisend(self):
+ """ensure that a message remains intact after multiple sends"""
+ a,b = self.create_bound_pair(zmq.PAIR, zmq.PAIR)
+ s = b"message"
+ m = zmq.Frame(s)
+ self.assertEqual(s, m.bytes)
+
+ a.send(m, copy=False)
+ time.sleep(0.1)
+ self.assertEqual(s, m.bytes)
+ a.send(m, copy=False)
+ time.sleep(0.1)
+ self.assertEqual(s, m.bytes)
+ a.send(m, copy=True)
+ time.sleep(0.1)
+ self.assertEqual(s, m.bytes)
+ a.send(m, copy=True)
+ time.sleep(0.1)
+ self.assertEqual(s, m.bytes)
+ for i in range(4):
+ r = b.recv()
+ self.assertEqual(s,r)
+ self.assertEqual(s, m.bytes)
+
+ def test_buffer_numpy(self):
+ """test non-copying numpy array messages"""
+ try:
+ import numpy
+ except ImportError:
+ raise SkipTest("numpy required")
+ rand = numpy.random.randint
+ shapes = [ rand(2,16) for i in range(5) ]
+ for i in range(1,len(shapes)+1):
+ shape = shapes[:i]
+ A = numpy.random.random(shape)
+ m = zmq.Frame(A)
+ if view.__name__ == 'buffer':
+ self.assertEqual(A.data, m.buffer)
+ B = numpy.frombuffer(m.buffer,dtype=A.dtype).reshape(A.shape)
+ else:
+ self.assertEqual(memoryview(A), m.buffer)
+ B = numpy.array(m.buffer,dtype=A.dtype).reshape(A.shape)
+ self.assertEqual((A==B).all(), True)
+
+ def test_memoryview(self):
+ """test messages from memoryview"""
+ major,minor = sys.version_info[:2]
+ if not (major >= 3 or (major == 2 and minor >= 7)):
+ raise SkipTest("memoryviews only in python >= 2.7")
+
+ s = b'carrotjuice'
+ v = memoryview(s)
+ m = zmq.Frame(s)
+ buf = m.buffer
+ s2 = buf.tobytes()
+ self.assertEqual(s2,s)
+ self.assertEqual(m.bytes,s)
+
+ def test_noncopying_recv(self):
+ """check for clobbering message buffers"""
+ null = b'\0'*64
+ sa,sb = self.create_bound_pair(zmq.PAIR, zmq.PAIR)
+ for i in range(32):
+ # try a few times
+ sb.send(null, copy=False)
+ m = sa.recv(copy=False)
+ mb = m.bytes
+ # buf = view(m)
+ buf = m.buffer
+ del m
+ for i in range(5):
+ ff=b'\xff'*(40 + i*10)
+ sb.send(ff, copy=False)
+ m2 = sa.recv(copy=False)
+ if view.__name__ == 'buffer':
+ b = bytes(buf)
+ else:
+ b = buf.tobytes()
+ self.assertEqual(b, null)
+ self.assertEqual(mb, null)
+ self.assertEqual(m2.bytes, ff)
+
+ @skip_pypy
+ def test_buffer_numpy(self):
+ """test non-copying numpy array messages"""
+ try:
+ import numpy
+ except ImportError:
+ raise SkipTest("requires numpy")
+ if sys.version_info < (2,7):
+ raise SkipTest("requires new-style buffer interface (py >= 2.7)")
+ rand = numpy.random.randint
+ shapes = [ rand(2,5) for i in range(5) ]
+ a,b = self.create_bound_pair(zmq.PAIR, zmq.PAIR)
+ dtypes = [int, float, '>i4', 'B']
+ for i in range(1,len(shapes)+1):
+ shape = shapes[:i]
+ for dt in dtypes:
+ A = numpy.empty(shape, dtype=dt)
+ while numpy.isnan(A).any():
+ # don't let nan sneak in
+ A = numpy.ndarray(shape, dtype=dt)
+ a.send(A, copy=False)
+ msg = b.recv(copy=False)
+
+ B = numpy.frombuffer(msg, A.dtype).reshape(A.shape)
+ self.assertEqual(A.shape, B.shape)
+ self.assertTrue((A==B).all())
+ A = numpy.empty(shape, dtype=[('a', int), ('b', float), ('c', 'a32')])
+ A['a'] = 1024
+ A['b'] = 1e9
+ A['c'] = 'hello there'
+ a.send(A, copy=False)
+ msg = b.recv(copy=False)
+
+ B = numpy.frombuffer(msg, A.dtype).reshape(A.shape)
+ self.assertEqual(A.shape, B.shape)
+ self.assertTrue((A==B).all())
+
+ def test_frame_more(self):
+ """test Frame.more attribute"""
+ frame = zmq.Frame(b"hello")
+ self.assertFalse(frame.more)
+ sa,sb = self.create_bound_pair(zmq.PAIR, zmq.PAIR)
+ sa.send_multipart([b'hi', b'there'])
+ frame = self.recv(sb, copy=False)
+ self.assertTrue(frame.more)
+ if zmq.zmq_version_info()[0] >= 3 and not PYPY:
+ self.assertTrue(frame.get(zmq.MORE))
+ frame = self.recv(sb, copy=False)
+ self.assertFalse(frame.more)
+ if zmq.zmq_version_info()[0] >= 3 and not PYPY:
+ self.assertFalse(frame.get(zmq.MORE))
+
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/tests/test_monitor.py b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/tests/test_monitor.py
new file mode 100644
index 00000000..4f035388
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/tests/test_monitor.py
@@ -0,0 +1,71 @@
+# -*- coding: utf-8 -*-
+# Copyright (C) PyZMQ Developers
+# Distributed under the terms of the Modified BSD License.
+
+
+import sys
+import time
+import struct
+
+from unittest import TestCase
+
+import zmq
+from zmq.tests import BaseZMQTestCase, skip_if, skip_pypy
+from zmq.utils.monitor import recv_monitor_message
+
+skip_lt_4 = skip_if(zmq.zmq_version_info() < (4,), "requires zmq >= 4")
+
+class TestSocketMonitor(BaseZMQTestCase):
+
+ @skip_lt_4
+ def test_monitor(self):
+ """Test monitoring interface for sockets."""
+ s_rep = self.context.socket(zmq.REP)
+ s_req = self.context.socket(zmq.REQ)
+ self.sockets.extend([s_rep, s_req])
+ s_req.bind("tcp://127.0.0.1:6666")
+ # try monitoring the REP socket
+
+ s_rep.monitor("inproc://monitor.rep", zmq.EVENT_ALL)
+ # create listening socket for monitor
+ s_event = self.context.socket(zmq.PAIR)
+ self.sockets.append(s_event)
+ s_event.connect("inproc://monitor.rep")
+ s_event.linger = 0
+ # test receive event for connect event
+ s_rep.connect("tcp://127.0.0.1:6666")
+ m = recv_monitor_message(s_event)
+ if m['event'] == zmq.EVENT_CONNECT_DELAYED:
+ self.assertEqual(m['endpoint'], b"tcp://127.0.0.1:6666")
+ # test receive event for connected event
+ m = recv_monitor_message(s_event)
+ self.assertEqual(m['event'], zmq.EVENT_CONNECTED)
+ self.assertEqual(m['endpoint'], b"tcp://127.0.0.1:6666")
+
+ # test monitor can be disabled.
+ s_rep.disable_monitor()
+ m = recv_monitor_message(s_event)
+ self.assertEqual(m['event'], zmq.EVENT_MONITOR_STOPPED)
+
+
+ @skip_lt_4
+ def test_monitor_connected(self):
+ """Test connected monitoring socket."""
+ s_rep = self.context.socket(zmq.REP)
+ s_req = self.context.socket(zmq.REQ)
+ self.sockets.extend([s_rep, s_req])
+ s_req.bind("tcp://127.0.0.1:6667")
+ # try monitoring the REP socket
+ # create listening socket for monitor
+ s_event = s_rep.get_monitor_socket()
+ s_event.linger = 0
+ self.sockets.append(s_event)
+ # test receive event for connect event
+ s_rep.connect("tcp://127.0.0.1:6667")
+ m = recv_monitor_message(s_event)
+ if m['event'] == zmq.EVENT_CONNECT_DELAYED:
+ self.assertEqual(m['endpoint'], b"tcp://127.0.0.1:6667")
+ # test receive event for connected event
+ m = recv_monitor_message(s_event)
+ self.assertEqual(m['event'], zmq.EVENT_CONNECTED)
+ self.assertEqual(m['endpoint'], b"tcp://127.0.0.1:6667")
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/tests/test_monqueue.py b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/tests/test_monqueue.py
new file mode 100644
index 00000000..e855602e
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/tests/test_monqueue.py
@@ -0,0 +1,227 @@
+# Copyright (C) PyZMQ Developers
+# Distributed under the terms of the Modified BSD License.
+
+import time
+from unittest import TestCase
+
+import zmq
+from zmq import devices
+
+from zmq.tests import BaseZMQTestCase, SkipTest, PYPY
+from zmq.utils.strtypes import unicode
+
+
+if PYPY or zmq.zmq_version_info() >= (4,1):
+ # cleanup of shared Context doesn't work on PyPy
+ # there also seems to be a bug in cleanup in libzmq-4.1 (zeromq/libzmq#1052)
+ devices.Device.context_factory = zmq.Context
+
+
+class TestMonitoredQueue(BaseZMQTestCase):
+
+ sockets = []
+
+ def build_device(self, mon_sub=b"", in_prefix=b'in', out_prefix=b'out'):
+ self.device = devices.ThreadMonitoredQueue(zmq.PAIR, zmq.PAIR, zmq.PUB,
+ in_prefix, out_prefix)
+ alice = self.context.socket(zmq.PAIR)
+ bob = self.context.socket(zmq.PAIR)
+ mon = self.context.socket(zmq.SUB)
+
+ aport = alice.bind_to_random_port('tcp://127.0.0.1')
+ bport = bob.bind_to_random_port('tcp://127.0.0.1')
+ mport = mon.bind_to_random_port('tcp://127.0.0.1')
+ mon.setsockopt(zmq.SUBSCRIBE, mon_sub)
+
+ self.device.connect_in("tcp://127.0.0.1:%i"%aport)
+ self.device.connect_out("tcp://127.0.0.1:%i"%bport)
+ self.device.connect_mon("tcp://127.0.0.1:%i"%mport)
+ self.device.start()
+ time.sleep(.2)
+ try:
+ # this is currenlty necessary to ensure no dropped monitor messages
+ # see LIBZMQ-248 for more info
+ mon.recv_multipart(zmq.NOBLOCK)
+ except zmq.ZMQError:
+ pass
+ self.sockets.extend([alice, bob, mon])
+ return alice, bob, mon
+
+
+ def teardown_device(self):
+ for socket in self.sockets:
+ socket.close()
+ del socket
+ del self.device
+
+ def test_reply(self):
+ alice, bob, mon = self.build_device()
+ alices = b"hello bob".split()
+ alice.send_multipart(alices)
+ bobs = self.recv_multipart(bob)
+ self.assertEqual(alices, bobs)
+ bobs = b"hello alice".split()
+ bob.send_multipart(bobs)
+ alices = self.recv_multipart(alice)
+ self.assertEqual(alices, bobs)
+ self.teardown_device()
+
+ def test_queue(self):
+ alice, bob, mon = self.build_device()
+ alices = b"hello bob".split()
+ alice.send_multipart(alices)
+ alices2 = b"hello again".split()
+ alice.send_multipart(alices2)
+ alices3 = b"hello again and again".split()
+ alice.send_multipart(alices3)
+ bobs = self.recv_multipart(bob)
+ self.assertEqual(alices, bobs)
+ bobs = self.recv_multipart(bob)
+ self.assertEqual(alices2, bobs)
+ bobs = self.recv_multipart(bob)
+ self.assertEqual(alices3, bobs)
+ bobs = b"hello alice".split()
+ bob.send_multipart(bobs)
+ alices = self.recv_multipart(alice)
+ self.assertEqual(alices, bobs)
+ self.teardown_device()
+
+ def test_monitor(self):
+ alice, bob, mon = self.build_device()
+ alices = b"hello bob".split()
+ alice.send_multipart(alices)
+ alices2 = b"hello again".split()
+ alice.send_multipart(alices2)
+ alices3 = b"hello again and again".split()
+ alice.send_multipart(alices3)
+ bobs = self.recv_multipart(bob)
+ self.assertEqual(alices, bobs)
+ mons = self.recv_multipart(mon)
+ self.assertEqual([b'in']+bobs, mons)
+ bobs = self.recv_multipart(bob)
+ self.assertEqual(alices2, bobs)
+ bobs = self.recv_multipart(bob)
+ self.assertEqual(alices3, bobs)
+ mons = self.recv_multipart(mon)
+ self.assertEqual([b'in']+alices2, mons)
+ bobs = b"hello alice".split()
+ bob.send_multipart(bobs)
+ alices = self.recv_multipart(alice)
+ self.assertEqual(alices, bobs)
+ mons = self.recv_multipart(mon)
+ self.assertEqual([b'in']+alices3, mons)
+ mons = self.recv_multipart(mon)
+ self.assertEqual([b'out']+bobs, mons)
+ self.teardown_device()
+
+ def test_prefix(self):
+ alice, bob, mon = self.build_device(b"", b'foo', b'bar')
+ alices = b"hello bob".split()
+ alice.send_multipart(alices)
+ alices2 = b"hello again".split()
+ alice.send_multipart(alices2)
+ alices3 = b"hello again and again".split()
+ alice.send_multipart(alices3)
+ bobs = self.recv_multipart(bob)
+ self.assertEqual(alices, bobs)
+ mons = self.recv_multipart(mon)
+ self.assertEqual([b'foo']+bobs, mons)
+ bobs = self.recv_multipart(bob)
+ self.assertEqual(alices2, bobs)
+ bobs = self.recv_multipart(bob)
+ self.assertEqual(alices3, bobs)
+ mons = self.recv_multipart(mon)
+ self.assertEqual([b'foo']+alices2, mons)
+ bobs = b"hello alice".split()
+ bob.send_multipart(bobs)
+ alices = self.recv_multipart(alice)
+ self.assertEqual(alices, bobs)
+ mons = self.recv_multipart(mon)
+ self.assertEqual([b'foo']+alices3, mons)
+ mons = self.recv_multipart(mon)
+ self.assertEqual([b'bar']+bobs, mons)
+ self.teardown_device()
+
+ def test_monitor_subscribe(self):
+ alice, bob, mon = self.build_device(b"out")
+ alices = b"hello bob".split()
+ alice.send_multipart(alices)
+ alices2 = b"hello again".split()
+ alice.send_multipart(alices2)
+ alices3 = b"hello again and again".split()
+ alice.send_multipart(alices3)
+ bobs = self.recv_multipart(bob)
+ self.assertEqual(alices, bobs)
+ bobs = self.recv_multipart(bob)
+ self.assertEqual(alices2, bobs)
+ bobs = self.recv_multipart(bob)
+ self.assertEqual(alices3, bobs)
+ bobs = b"hello alice".split()
+ bob.send_multipart(bobs)
+ alices = self.recv_multipart(alice)
+ self.assertEqual(alices, bobs)
+ mons = self.recv_multipart(mon)
+ self.assertEqual([b'out']+bobs, mons)
+ self.teardown_device()
+
+ def test_router_router(self):
+ """test router-router MQ devices"""
+ dev = devices.ThreadMonitoredQueue(zmq.ROUTER, zmq.ROUTER, zmq.PUB, b'in', b'out')
+ self.device = dev
+ dev.setsockopt_in(zmq.LINGER, 0)
+ dev.setsockopt_out(zmq.LINGER, 0)
+ dev.setsockopt_mon(zmq.LINGER, 0)
+
+ binder = self.context.socket(zmq.DEALER)
+ porta = binder.bind_to_random_port('tcp://127.0.0.1')
+ portb = binder.bind_to_random_port('tcp://127.0.0.1')
+ binder.close()
+ time.sleep(0.1)
+ a = self.context.socket(zmq.DEALER)
+ a.identity = b'a'
+ b = self.context.socket(zmq.DEALER)
+ b.identity = b'b'
+ self.sockets.extend([a, b])
+
+ a.connect('tcp://127.0.0.1:%i'%porta)
+ dev.bind_in('tcp://127.0.0.1:%i'%porta)
+ b.connect('tcp://127.0.0.1:%i'%portb)
+ dev.bind_out('tcp://127.0.0.1:%i'%portb)
+ dev.start()
+ time.sleep(0.2)
+ if zmq.zmq_version_info() >= (3,1,0):
+ # flush erroneous poll state, due to LIBZMQ-280
+ ping_msg = [ b'ping', b'pong' ]
+ for s in (a,b):
+ s.send_multipart(ping_msg)
+ try:
+ s.recv(zmq.NOBLOCK)
+ except zmq.ZMQError:
+ pass
+ msg = [ b'hello', b'there' ]
+ a.send_multipart([b'b']+msg)
+ bmsg = self.recv_multipart(b)
+ self.assertEqual(bmsg, [b'a']+msg)
+ b.send_multipart(bmsg)
+ amsg = self.recv_multipart(a)
+ self.assertEqual(amsg, [b'b']+msg)
+ self.teardown_device()
+
+ def test_default_mq_args(self):
+ self.device = dev = devices.ThreadMonitoredQueue(zmq.ROUTER, zmq.DEALER, zmq.PUB)
+ dev.setsockopt_in(zmq.LINGER, 0)
+ dev.setsockopt_out(zmq.LINGER, 0)
+ dev.setsockopt_mon(zmq.LINGER, 0)
+ # this will raise if default args are wrong
+ dev.start()
+ self.teardown_device()
+
+ def test_mq_check_prefix(self):
+ ins = self.context.socket(zmq.ROUTER)
+ outs = self.context.socket(zmq.DEALER)
+ mons = self.context.socket(zmq.PUB)
+ self.sockets.extend([ins, outs, mons])
+
+ ins = unicode('in')
+ outs = unicode('out')
+ self.assertRaises(TypeError, devices.monitoredqueue, ins, outs, mons)
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/tests/test_multipart.py b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/tests/test_multipart.py
new file mode 100644
index 00000000..24d41be0
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/tests/test_multipart.py
@@ -0,0 +1,35 @@
+# Copyright (C) PyZMQ Developers
+# Distributed under the terms of the Modified BSD License.
+
+
+import zmq
+
+
+from zmq.tests import BaseZMQTestCase, SkipTest, have_gevent, GreenTest
+
+
+class TestMultipart(BaseZMQTestCase):
+
+ def test_router_dealer(self):
+ router, dealer = self.create_bound_pair(zmq.ROUTER, zmq.DEALER)
+
+ msg1 = b'message1'
+ dealer.send(msg1)
+ ident = self.recv(router)
+ more = router.rcvmore
+ self.assertEqual(more, True)
+ msg2 = self.recv(router)
+ self.assertEqual(msg1, msg2)
+ more = router.rcvmore
+ self.assertEqual(more, False)
+
+ def test_basic_multipart(self):
+ a,b = self.create_bound_pair(zmq.PAIR, zmq.PAIR)
+ msg = [ b'hi', b'there', b'b']
+ a.send_multipart(msg)
+ recvd = b.recv_multipart()
+ self.assertEqual(msg, recvd)
+
+if have_gevent:
+ class TestMultipartGreen(GreenTest, TestMultipart):
+ pass
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/tests/test_pair.py b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/tests/test_pair.py
new file mode 100644
index 00000000..e88c1e8b
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/tests/test_pair.py
@@ -0,0 +1,53 @@
+# Copyright (C) PyZMQ Developers
+# Distributed under the terms of the Modified BSD License.
+
+
+import zmq
+
+
+from zmq.tests import BaseZMQTestCase, have_gevent, GreenTest
+
+
+x = b' '
+class TestPair(BaseZMQTestCase):
+
+ def test_basic(self):
+ s1, s2 = self.create_bound_pair(zmq.PAIR, zmq.PAIR)
+
+ msg1 = b'message1'
+ msg2 = self.ping_pong(s1, s2, msg1)
+ self.assertEqual(msg1, msg2)
+
+ def test_multiple(self):
+ s1, s2 = self.create_bound_pair(zmq.PAIR, zmq.PAIR)
+
+ for i in range(10):
+ msg = i*x
+ s1.send(msg)
+
+ for i in range(10):
+ msg = i*x
+ s2.send(msg)
+
+ for i in range(10):
+ msg = s1.recv()
+ self.assertEqual(msg, i*x)
+
+ for i in range(10):
+ msg = s2.recv()
+ self.assertEqual(msg, i*x)
+
+ def test_json(self):
+ s1, s2 = self.create_bound_pair(zmq.PAIR, zmq.PAIR)
+ o = dict(a=10,b=list(range(10)))
+ o2 = self.ping_pong_json(s1, s2, o)
+
+ def test_pyobj(self):
+ s1, s2 = self.create_bound_pair(zmq.PAIR, zmq.PAIR)
+ o = dict(a=10,b=range(10))
+ o2 = self.ping_pong_pyobj(s1, s2, o)
+
+if have_gevent:
+ class TestReqRepGreen(GreenTest, TestPair):
+ pass
+
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/tests/test_poll.py b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/tests/test_poll.py
new file mode 100644
index 00000000..57346c89
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/tests/test_poll.py
@@ -0,0 +1,229 @@
+# Copyright (C) PyZMQ Developers
+# Distributed under the terms of the Modified BSD License.
+
+
+import time
+from unittest import TestCase
+
+import zmq
+
+from zmq.tests import PollZMQTestCase, have_gevent, GreenTest
+
+def wait():
+ time.sleep(.25)
+
+
+class TestPoll(PollZMQTestCase):
+
+ Poller = zmq.Poller
+
+ # This test is failing due to this issue:
+ # http://github.com/sustrik/zeromq2/issues#issue/26
+ def test_pair(self):
+ s1, s2 = self.create_bound_pair(zmq.PAIR, zmq.PAIR)
+
+ # Sleep to allow sockets to connect.
+ wait()
+
+ poller = self.Poller()
+ poller.register(s1, zmq.POLLIN|zmq.POLLOUT)
+ poller.register(s2, zmq.POLLIN|zmq.POLLOUT)
+ # Poll result should contain both sockets
+ socks = dict(poller.poll())
+ # Now make sure that both are send ready.
+ self.assertEqual(socks[s1], zmq.POLLOUT)
+ self.assertEqual(socks[s2], zmq.POLLOUT)
+ # Now do a send on both, wait and test for zmq.POLLOUT|zmq.POLLIN
+ s1.send(b'msg1')
+ s2.send(b'msg2')
+ wait()
+ socks = dict(poller.poll())
+ self.assertEqual(socks[s1], zmq.POLLOUT|zmq.POLLIN)
+ self.assertEqual(socks[s2], zmq.POLLOUT|zmq.POLLIN)
+ # Make sure that both are in POLLOUT after recv.
+ s1.recv()
+ s2.recv()
+ socks = dict(poller.poll())
+ self.assertEqual(socks[s1], zmq.POLLOUT)
+ self.assertEqual(socks[s2], zmq.POLLOUT)
+
+ poller.unregister(s1)
+ poller.unregister(s2)
+
+ # Wait for everything to finish.
+ wait()
+
+ def test_reqrep(self):
+ s1, s2 = self.create_bound_pair(zmq.REP, zmq.REQ)
+
+ # Sleep to allow sockets to connect.
+ wait()
+
+ poller = self.Poller()
+ poller.register(s1, zmq.POLLIN|zmq.POLLOUT)
+ poller.register(s2, zmq.POLLIN|zmq.POLLOUT)
+
+ # Make sure that s1 is in state 0 and s2 is in POLLOUT
+ socks = dict(poller.poll())
+ self.assertEqual(s1 in socks, 0)
+ self.assertEqual(socks[s2], zmq.POLLOUT)
+
+ # Make sure that s2 goes immediately into state 0 after send.
+ s2.send(b'msg1')
+ socks = dict(poller.poll())
+ self.assertEqual(s2 in socks, 0)
+
+ # Make sure that s1 goes into POLLIN state after a time.sleep().
+ time.sleep(0.5)
+ socks = dict(poller.poll())
+ self.assertEqual(socks[s1], zmq.POLLIN)
+
+ # Make sure that s1 goes into POLLOUT after recv.
+ s1.recv()
+ socks = dict(poller.poll())
+ self.assertEqual(socks[s1], zmq.POLLOUT)
+
+ # Make sure s1 goes into state 0 after send.
+ s1.send(b'msg2')
+ socks = dict(poller.poll())
+ self.assertEqual(s1 in socks, 0)
+
+ # Wait and then see that s2 is in POLLIN.
+ time.sleep(0.5)
+ socks = dict(poller.poll())
+ self.assertEqual(socks[s2], zmq.POLLIN)
+
+ # Make sure that s2 is in POLLOUT after recv.
+ s2.recv()
+ socks = dict(poller.poll())
+ self.assertEqual(socks[s2], zmq.POLLOUT)
+
+ poller.unregister(s1)
+ poller.unregister(s2)
+
+ # Wait for everything to finish.
+ wait()
+
+ def test_no_events(self):
+ s1, s2 = self.create_bound_pair(zmq.PAIR, zmq.PAIR)
+ poller = self.Poller()
+ poller.register(s1, zmq.POLLIN|zmq.POLLOUT)
+ poller.register(s2, 0)
+ self.assertTrue(s1 in poller)
+ self.assertFalse(s2 in poller)
+ poller.register(s1, 0)
+ self.assertFalse(s1 in poller)
+
+ def test_pubsub(self):
+ s1, s2 = self.create_bound_pair(zmq.PUB, zmq.SUB)
+ s2.setsockopt(zmq.SUBSCRIBE, b'')
+
+ # Sleep to allow sockets to connect.
+ wait()
+
+ poller = self.Poller()
+ poller.register(s1, zmq.POLLIN|zmq.POLLOUT)
+ poller.register(s2, zmq.POLLIN)
+
+ # Now make sure that both are send ready.
+ socks = dict(poller.poll())
+ self.assertEqual(socks[s1], zmq.POLLOUT)
+ self.assertEqual(s2 in socks, 0)
+ # Make sure that s1 stays in POLLOUT after a send.
+ s1.send(b'msg1')
+ socks = dict(poller.poll())
+ self.assertEqual(socks[s1], zmq.POLLOUT)
+
+ # Make sure that s2 is POLLIN after waiting.
+ wait()
+ socks = dict(poller.poll())
+ self.assertEqual(socks[s2], zmq.POLLIN)
+
+ # Make sure that s2 goes into 0 after recv.
+ s2.recv()
+ socks = dict(poller.poll())
+ self.assertEqual(s2 in socks, 0)
+
+ poller.unregister(s1)
+ poller.unregister(s2)
+
+ # Wait for everything to finish.
+ wait()
+ def test_timeout(self):
+ """make sure Poller.poll timeout has the right units (milliseconds)."""
+ s1, s2 = self.create_bound_pair(zmq.PAIR, zmq.PAIR)
+ poller = self.Poller()
+ poller.register(s1, zmq.POLLIN)
+ tic = time.time()
+ evt = poller.poll(.005)
+ toc = time.time()
+ self.assertTrue(toc-tic < 0.1)
+ tic = time.time()
+ evt = poller.poll(5)
+ toc = time.time()
+ self.assertTrue(toc-tic < 0.1)
+ self.assertTrue(toc-tic > .001)
+ tic = time.time()
+ evt = poller.poll(500)
+ toc = time.time()
+ self.assertTrue(toc-tic < 1)
+ self.assertTrue(toc-tic > 0.1)
+
+class TestSelect(PollZMQTestCase):
+
+ def test_pair(self):
+ s1, s2 = self.create_bound_pair(zmq.PAIR, zmq.PAIR)
+
+ # Sleep to allow sockets to connect.
+ wait()
+
+ rlist, wlist, xlist = zmq.select([s1, s2], [s1, s2], [s1, s2])
+ self.assert_(s1 in wlist)
+ self.assert_(s2 in wlist)
+ self.assert_(s1 not in rlist)
+ self.assert_(s2 not in rlist)
+
+ def test_timeout(self):
+ """make sure select timeout has the right units (seconds)."""
+ s1, s2 = self.create_bound_pair(zmq.PAIR, zmq.PAIR)
+ tic = time.time()
+ r,w,x = zmq.select([s1,s2],[],[],.005)
+ toc = time.time()
+ self.assertTrue(toc-tic < 1)
+ self.assertTrue(toc-tic > 0.001)
+ tic = time.time()
+ r,w,x = zmq.select([s1,s2],[],[],.25)
+ toc = time.time()
+ self.assertTrue(toc-tic < 1)
+ self.assertTrue(toc-tic > 0.1)
+
+
+if have_gevent:
+ import gevent
+ from zmq import green as gzmq
+
+ class TestPollGreen(GreenTest, TestPoll):
+ Poller = gzmq.Poller
+
+ def test_wakeup(self):
+ s1, s2 = self.create_bound_pair(zmq.PAIR, zmq.PAIR)
+ poller = self.Poller()
+ poller.register(s2, zmq.POLLIN)
+
+ tic = time.time()
+ r = gevent.spawn(lambda: poller.poll(10000))
+ s = gevent.spawn(lambda: s1.send(b'msg1'))
+ r.join()
+ toc = time.time()
+ self.assertTrue(toc-tic < 1)
+
+ def test_socket_poll(self):
+ s1, s2 = self.create_bound_pair(zmq.PAIR, zmq.PAIR)
+
+ tic = time.time()
+ r = gevent.spawn(lambda: s2.poll(10000))
+ s = gevent.spawn(lambda: s1.send(b'msg1'))
+ r.join()
+ toc = time.time()
+ self.assertTrue(toc-tic < 1)
+
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/tests/test_pubsub.py b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/tests/test_pubsub.py
new file mode 100644
index 00000000..a3ee22aa
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/tests/test_pubsub.py
@@ -0,0 +1,41 @@
+# Copyright (C) PyZMQ Developers
+# Distributed under the terms of the Modified BSD License.
+
+
+import time
+from unittest import TestCase
+
+import zmq
+
+from zmq.tests import BaseZMQTestCase, have_gevent, GreenTest
+
+
+class TestPubSub(BaseZMQTestCase):
+
+ pass
+
+ # We are disabling this test while an issue is being resolved.
+ def test_basic(self):
+ s1, s2 = self.create_bound_pair(zmq.PUB, zmq.SUB)
+ s2.setsockopt(zmq.SUBSCRIBE,b'')
+ time.sleep(0.1)
+ msg1 = b'message'
+ s1.send(msg1)
+ msg2 = s2.recv() # This is blocking!
+ self.assertEqual(msg1, msg2)
+
+ def test_topic(self):
+ s1, s2 = self.create_bound_pair(zmq.PUB, zmq.SUB)
+ s2.setsockopt(zmq.SUBSCRIBE, b'x')
+ time.sleep(0.1)
+ msg1 = b'message'
+ s1.send(msg1)
+ self.assertRaisesErrno(zmq.EAGAIN, s2.recv, zmq.NOBLOCK)
+ msg1 = b'xmessage'
+ s1.send(msg1)
+ msg2 = s2.recv()
+ self.assertEqual(msg1, msg2)
+
+if have_gevent:
+ class TestPubSubGreen(GreenTest, TestPubSub):
+ pass
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/tests/test_reqrep.py b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/tests/test_reqrep.py
new file mode 100644
index 00000000..de17f2b3
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/tests/test_reqrep.py
@@ -0,0 +1,62 @@
+# Copyright (C) PyZMQ Developers
+# Distributed under the terms of the Modified BSD License.
+
+
+from unittest import TestCase
+
+import zmq
+from zmq.tests import BaseZMQTestCase, have_gevent, GreenTest
+
+
+class TestReqRep(BaseZMQTestCase):
+
+ def test_basic(self):
+ s1, s2 = self.create_bound_pair(zmq.REQ, zmq.REP)
+
+ msg1 = b'message 1'
+ msg2 = self.ping_pong(s1, s2, msg1)
+ self.assertEqual(msg1, msg2)
+
+ def test_multiple(self):
+ s1, s2 = self.create_bound_pair(zmq.REQ, zmq.REP)
+
+ for i in range(10):
+ msg1 = i*b' '
+ msg2 = self.ping_pong(s1, s2, msg1)
+ self.assertEqual(msg1, msg2)
+
+ def test_bad_send_recv(self):
+ s1, s2 = self.create_bound_pair(zmq.REQ, zmq.REP)
+
+ if zmq.zmq_version() != '2.1.8':
+ # this doesn't work on 2.1.8
+ for copy in (True,False):
+ self.assertRaisesErrno(zmq.EFSM, s1.recv, copy=copy)
+ self.assertRaisesErrno(zmq.EFSM, s2.send, b'asdf', copy=copy)
+
+ # I have to have this or we die on an Abort trap.
+ msg1 = b'asdf'
+ msg2 = self.ping_pong(s1, s2, msg1)
+ self.assertEqual(msg1, msg2)
+
+ def test_json(self):
+ s1, s2 = self.create_bound_pair(zmq.REQ, zmq.REP)
+ o = dict(a=10,b=list(range(10)))
+ o2 = self.ping_pong_json(s1, s2, o)
+
+ def test_pyobj(self):
+ s1, s2 = self.create_bound_pair(zmq.REQ, zmq.REP)
+ o = dict(a=10,b=range(10))
+ o2 = self.ping_pong_pyobj(s1, s2, o)
+
+ def test_large_msg(self):
+ s1, s2 = self.create_bound_pair(zmq.REQ, zmq.REP)
+ msg1 = 10000*b'X'
+
+ for i in range(10):
+ msg2 = self.ping_pong(s1, s2, msg1)
+ self.assertEqual(msg1, msg2)
+
+if have_gevent:
+ class TestReqRepGreen(GreenTest, TestReqRep):
+ pass
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/tests/test_security.py b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/tests/test_security.py
new file mode 100644
index 00000000..687b7e0f
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/tests/test_security.py
@@ -0,0 +1,212 @@
+"""Test libzmq security (libzmq >= 3.3.0)"""
+# -*- coding: utf8 -*-
+
+# Copyright (C) PyZMQ Developers
+# Distributed under the terms of the Modified BSD License.
+
+import os
+from threading import Thread
+
+import zmq
+from zmq.tests import (
+ BaseZMQTestCase, SkipTest, PYPY
+)
+from zmq.utils import z85
+
+
+USER = b"admin"
+PASS = b"password"
+
+class TestSecurity(BaseZMQTestCase):
+
+ def setUp(self):
+ if zmq.zmq_version_info() < (4,0):
+ raise SkipTest("security is new in libzmq 4.0")
+ try:
+ zmq.curve_keypair()
+ except zmq.ZMQError:
+ raise SkipTest("security requires libzmq to be linked against libsodium")
+ super(TestSecurity, self).setUp()
+
+
+ def zap_handler(self):
+ socket = self.context.socket(zmq.REP)
+ socket.bind("inproc://zeromq.zap.01")
+ try:
+ msg = self.recv_multipart(socket)
+
+ version, sequence, domain, address, identity, mechanism = msg[:6]
+ if mechanism == b'PLAIN':
+ username, password = msg[6:]
+ elif mechanism == b'CURVE':
+ key = msg[6]
+
+ self.assertEqual(version, b"1.0")
+ self.assertEqual(identity, b"IDENT")
+ reply = [version, sequence]
+ if mechanism == b'CURVE' or \
+ (mechanism == b'PLAIN' and username == USER and password == PASS) or \
+ (mechanism == b'NULL'):
+ reply.extend([
+ b"200",
+ b"OK",
+ b"anonymous",
+ b"\5Hello\0\0\0\5World",
+ ])
+ else:
+ reply.extend([
+ b"400",
+ b"Invalid username or password",
+ b"",
+ b"",
+ ])
+ socket.send_multipart(reply)
+ finally:
+ socket.close()
+
+ def start_zap(self):
+ self.zap_thread = Thread(target=self.zap_handler)
+ self.zap_thread.start()
+
+ def stop_zap(self):
+ self.zap_thread.join()
+
+ def bounce(self, server, client, test_metadata=True):
+ msg = [os.urandom(64), os.urandom(64)]
+ client.send_multipart(msg)
+ frames = self.recv_multipart(server, copy=False)
+ recvd = list(map(lambda x: x.bytes, frames))
+
+ try:
+ if test_metadata and not PYPY:
+ for frame in frames:
+ self.assertEqual(frame.get('User-Id'), 'anonymous')
+ self.assertEqual(frame.get('Hello'), 'World')
+ self.assertEqual(frame['Socket-Type'], 'DEALER')
+ except zmq.ZMQVersionError:
+ pass
+
+ self.assertEqual(recvd, msg)
+ server.send_multipart(recvd)
+ msg2 = self.recv_multipart(client)
+ self.assertEqual(msg2, msg)
+
+ def test_null(self):
+ """test NULL (default) security"""
+ server = self.socket(zmq.DEALER)
+ client = self.socket(zmq.DEALER)
+ self.assertEqual(client.MECHANISM, zmq.NULL)
+ self.assertEqual(server.mechanism, zmq.NULL)
+ self.assertEqual(client.plain_server, 0)
+ self.assertEqual(server.plain_server, 0)
+ iface = 'tcp://127.0.0.1'
+ port = server.bind_to_random_port(iface)
+ client.connect("%s:%i" % (iface, port))
+ self.bounce(server, client, False)
+
+ def test_plain(self):
+ """test PLAIN authentication"""
+ server = self.socket(zmq.DEALER)
+ server.identity = b'IDENT'
+ client = self.socket(zmq.DEALER)
+ self.assertEqual(client.plain_username, b'')
+ self.assertEqual(client.plain_password, b'')
+ client.plain_username = USER
+ client.plain_password = PASS
+ self.assertEqual(client.getsockopt(zmq.PLAIN_USERNAME), USER)
+ self.assertEqual(client.getsockopt(zmq.PLAIN_PASSWORD), PASS)
+ self.assertEqual(client.plain_server, 0)
+ self.assertEqual(server.plain_server, 0)
+ server.plain_server = True
+ self.assertEqual(server.mechanism, zmq.PLAIN)
+ self.assertEqual(client.mechanism, zmq.PLAIN)
+
+ assert not client.plain_server
+ assert server.plain_server
+
+ self.start_zap()
+
+ iface = 'tcp://127.0.0.1'
+ port = server.bind_to_random_port(iface)
+ client.connect("%s:%i" % (iface, port))
+ self.bounce(server, client)
+ self.stop_zap()
+
+ def skip_plain_inauth(self):
+ """test PLAIN failed authentication"""
+ server = self.socket(zmq.DEALER)
+ server.identity = b'IDENT'
+ client = self.socket(zmq.DEALER)
+ self.sockets.extend([server, client])
+ client.plain_username = USER
+ client.plain_password = b'incorrect'
+ server.plain_server = True
+ self.assertEqual(server.mechanism, zmq.PLAIN)
+ self.assertEqual(client.mechanism, zmq.PLAIN)
+
+ self.start_zap()
+
+ iface = 'tcp://127.0.0.1'
+ port = server.bind_to_random_port(iface)
+ client.connect("%s:%i" % (iface, port))
+ client.send(b'ping')
+ server.rcvtimeo = 250
+ self.assertRaisesErrno(zmq.EAGAIN, server.recv)
+ self.stop_zap()
+
+ def test_keypair(self):
+ """test curve_keypair"""
+ try:
+ public, secret = zmq.curve_keypair()
+ except zmq.ZMQError:
+ raise SkipTest("CURVE unsupported")
+
+ self.assertEqual(type(secret), bytes)
+ self.assertEqual(type(public), bytes)
+ self.assertEqual(len(secret), 40)
+ self.assertEqual(len(public), 40)
+
+ # verify that it is indeed Z85
+ bsecret, bpublic = [ z85.decode(key) for key in (public, secret) ]
+ self.assertEqual(type(bsecret), bytes)
+ self.assertEqual(type(bpublic), bytes)
+ self.assertEqual(len(bsecret), 32)
+ self.assertEqual(len(bpublic), 32)
+
+
+ def test_curve(self):
+ """test CURVE encryption"""
+ server = self.socket(zmq.DEALER)
+ server.identity = b'IDENT'
+ client = self.socket(zmq.DEALER)
+ self.sockets.extend([server, client])
+ try:
+ server.curve_server = True
+ except zmq.ZMQError as e:
+ # will raise EINVAL if not linked against libsodium
+ if e.errno == zmq.EINVAL:
+ raise SkipTest("CURVE unsupported")
+
+ server_public, server_secret = zmq.curve_keypair()
+ client_public, client_secret = zmq.curve_keypair()
+
+ server.curve_secretkey = server_secret
+ server.curve_publickey = server_public
+ client.curve_serverkey = server_public
+ client.curve_publickey = client_public
+ client.curve_secretkey = client_secret
+
+ self.assertEqual(server.mechanism, zmq.CURVE)
+ self.assertEqual(client.mechanism, zmq.CURVE)
+
+ self.assertEqual(server.get(zmq.CURVE_SERVER), True)
+ self.assertEqual(client.get(zmq.CURVE_SERVER), False)
+
+ self.start_zap()
+
+ iface = 'tcp://127.0.0.1'
+ port = server.bind_to_random_port(iface)
+ client.connect("%s:%i" % (iface, port))
+ self.bounce(server, client)
+ self.stop_zap()
+
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/tests/test_socket.py b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/tests/test_socket.py
new file mode 100644
index 00000000..5c842edc
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/tests/test_socket.py
@@ -0,0 +1,450 @@
+# -*- coding: utf8 -*-
+# Copyright (C) PyZMQ Developers
+# Distributed under the terms of the Modified BSD License.
+
+import time
+import warnings
+
+import zmq
+from zmq.tests import (
+ BaseZMQTestCase, SkipTest, have_gevent, GreenTest, skip_pypy, skip_if
+)
+from zmq.utils.strtypes import bytes, unicode
+
+
+class TestSocket(BaseZMQTestCase):
+
+ def test_create(self):
+ ctx = self.Context()
+ s = ctx.socket(zmq.PUB)
+ # Superluminal protocol not yet implemented
+ self.assertRaisesErrno(zmq.EPROTONOSUPPORT, s.bind, 'ftl://a')
+ self.assertRaisesErrno(zmq.EPROTONOSUPPORT, s.connect, 'ftl://a')
+ self.assertRaisesErrno(zmq.EINVAL, s.bind, 'tcp://')
+ s.close()
+ del ctx
+
+ def test_context_manager(self):
+ url = 'inproc://a'
+ with self.Context() as ctx:
+ with ctx.socket(zmq.PUSH) as a:
+ a.bind(url)
+ with ctx.socket(zmq.PULL) as b:
+ b.connect(url)
+ msg = b'hi'
+ a.send(msg)
+ rcvd = self.recv(b)
+ self.assertEqual(rcvd, msg)
+ self.assertEqual(b.closed, True)
+ self.assertEqual(a.closed, True)
+ self.assertEqual(ctx.closed, True)
+
+ def test_dir(self):
+ ctx = self.Context()
+ s = ctx.socket(zmq.PUB)
+ self.assertTrue('send' in dir(s))
+ self.assertTrue('IDENTITY' in dir(s))
+ self.assertTrue('AFFINITY' in dir(s))
+ self.assertTrue('FD' in dir(s))
+ s.close()
+ ctx.term()
+
+ def test_bind_unicode(self):
+ s = self.socket(zmq.PUB)
+ p = s.bind_to_random_port(unicode("tcp://*"))
+
+ def test_connect_unicode(self):
+ s = self.socket(zmq.PUB)
+ s.connect(unicode("tcp://127.0.0.1:5555"))
+
+ def test_bind_to_random_port(self):
+ # Check that bind_to_random_port do not hide usefull exception
+ ctx = self.Context()
+ c = ctx.socket(zmq.PUB)
+ # Invalid format
+ try:
+ c.bind_to_random_port('tcp:*')
+ except zmq.ZMQError as e:
+ self.assertEqual(e.errno, zmq.EINVAL)
+ # Invalid protocol
+ try:
+ c.bind_to_random_port('rand://*')
+ except zmq.ZMQError as e:
+ self.assertEqual(e.errno, zmq.EPROTONOSUPPORT)
+
+ def test_identity(self):
+ s = self.context.socket(zmq.PULL)
+ self.sockets.append(s)
+ ident = b'identity\0\0'
+ s.identity = ident
+ self.assertEqual(s.get(zmq.IDENTITY), ident)
+
+ def test_unicode_sockopts(self):
+ """test setting/getting sockopts with unicode strings"""
+ topic = "tést"
+ if str is not unicode:
+ topic = topic.decode('utf8')
+ p,s = self.create_bound_pair(zmq.PUB, zmq.SUB)
+ self.assertEqual(s.send_unicode, s.send_unicode)
+ self.assertEqual(p.recv_unicode, p.recv_unicode)
+ self.assertRaises(TypeError, s.setsockopt, zmq.SUBSCRIBE, topic)
+ self.assertRaises(TypeError, s.setsockopt, zmq.IDENTITY, topic)
+ s.setsockopt_unicode(zmq.IDENTITY, topic, 'utf16')
+ self.assertRaises(TypeError, s.setsockopt, zmq.AFFINITY, topic)
+ s.setsockopt_unicode(zmq.SUBSCRIBE, topic)
+ self.assertRaises(TypeError, s.getsockopt_unicode, zmq.AFFINITY)
+ self.assertRaisesErrno(zmq.EINVAL, s.getsockopt_unicode, zmq.SUBSCRIBE)
+
+ identb = s.getsockopt(zmq.IDENTITY)
+ identu = identb.decode('utf16')
+ identu2 = s.getsockopt_unicode(zmq.IDENTITY, 'utf16')
+ self.assertEqual(identu, identu2)
+ time.sleep(0.1) # wait for connection/subscription
+ p.send_unicode(topic,zmq.SNDMORE)
+ p.send_unicode(topic*2, encoding='latin-1')
+ self.assertEqual(topic, s.recv_unicode())
+ self.assertEqual(topic*2, s.recv_unicode(encoding='latin-1'))
+
+ def test_int_sockopts(self):
+ "test integer sockopts"
+ v = zmq.zmq_version_info()
+ if v < (3,0):
+ default_hwm = 0
+ else:
+ default_hwm = 1000
+ p,s = self.create_bound_pair(zmq.PUB, zmq.SUB)
+ p.setsockopt(zmq.LINGER, 0)
+ self.assertEqual(p.getsockopt(zmq.LINGER), 0)
+ p.setsockopt(zmq.LINGER, -1)
+ self.assertEqual(p.getsockopt(zmq.LINGER), -1)
+ self.assertEqual(p.hwm, default_hwm)
+ p.hwm = 11
+ self.assertEqual(p.hwm, 11)
+ # p.setsockopt(zmq.EVENTS, zmq.POLLIN)
+ self.assertEqual(p.getsockopt(zmq.EVENTS), zmq.POLLOUT)
+ self.assertRaisesErrno(zmq.EINVAL, p.setsockopt,zmq.EVENTS, 2**7-1)
+ self.assertEqual(p.getsockopt(zmq.TYPE), p.socket_type)
+ self.assertEqual(p.getsockopt(zmq.TYPE), zmq.PUB)
+ self.assertEqual(s.getsockopt(zmq.TYPE), s.socket_type)
+ self.assertEqual(s.getsockopt(zmq.TYPE), zmq.SUB)
+
+ # check for overflow / wrong type:
+ errors = []
+ backref = {}
+ constants = zmq.constants
+ for name in constants.__all__:
+ value = getattr(constants, name)
+ if isinstance(value, int):
+ backref[value] = name
+ for opt in zmq.constants.int_sockopts.union(zmq.constants.int64_sockopts):
+ sopt = backref[opt]
+ if sopt.startswith((
+ 'ROUTER', 'XPUB', 'TCP', 'FAIL',
+ 'REQ_', 'CURVE_', 'PROBE_ROUTER',
+ 'IPC_FILTER', 'GSSAPI',
+ )):
+ # some sockopts are write-only
+ continue
+ try:
+ n = p.getsockopt(opt)
+ except zmq.ZMQError as e:
+ errors.append("getsockopt(zmq.%s) raised '%s'."%(sopt, e))
+ else:
+ if n > 2**31:
+ errors.append("getsockopt(zmq.%s) returned a ridiculous value."
+ " It is probably the wrong type."%sopt)
+ if errors:
+ self.fail('\n'.join([''] + errors))
+
+ def test_bad_sockopts(self):
+ """Test that appropriate errors are raised on bad socket options"""
+ s = self.context.socket(zmq.PUB)
+ self.sockets.append(s)
+ s.setsockopt(zmq.LINGER, 0)
+ # unrecognized int sockopts pass through to libzmq, and should raise EINVAL
+ self.assertRaisesErrno(zmq.EINVAL, s.setsockopt, 9999, 5)
+ self.assertRaisesErrno(zmq.EINVAL, s.getsockopt, 9999)
+ # but only int sockopts are allowed through this way, otherwise raise a TypeError
+ self.assertRaises(TypeError, s.setsockopt, 9999, b"5")
+ # some sockopts are valid in general, but not on every socket:
+ self.assertRaisesErrno(zmq.EINVAL, s.setsockopt, zmq.SUBSCRIBE, b'hi')
+
+ def test_sockopt_roundtrip(self):
+ "test set/getsockopt roundtrip."
+ p = self.context.socket(zmq.PUB)
+ self.sockets.append(p)
+ p.setsockopt(zmq.LINGER, 11)
+ self.assertEqual(p.getsockopt(zmq.LINGER), 11)
+
+ def test_send_unicode(self):
+ "test sending unicode objects"
+ a,b = self.create_bound_pair(zmq.PAIR, zmq.PAIR)
+ self.sockets.extend([a,b])
+ u = "çπ§"
+ if str is not unicode:
+ u = u.decode('utf8')
+ self.assertRaises(TypeError, a.send, u,copy=False)
+ self.assertRaises(TypeError, a.send, u,copy=True)
+ a.send_unicode(u)
+ s = b.recv()
+ self.assertEqual(s,u.encode('utf8'))
+ self.assertEqual(s.decode('utf8'),u)
+ a.send_unicode(u,encoding='utf16')
+ s = b.recv_unicode(encoding='utf16')
+ self.assertEqual(s,u)
+
+ @skip_pypy
+ def test_tracker(self):
+ "test the MessageTracker object for tracking when zmq is done with a buffer"
+ addr = 'tcp://127.0.0.1'
+ a = self.context.socket(zmq.PUB)
+ port = a.bind_to_random_port(addr)
+ a.close()
+ iface = "%s:%i"%(addr,port)
+ a = self.context.socket(zmq.PAIR)
+ # a.setsockopt(zmq.IDENTITY, b"a")
+ b = self.context.socket(zmq.PAIR)
+ self.sockets.extend([a,b])
+ a.connect(iface)
+ time.sleep(0.1)
+ p1 = a.send(b'something', copy=False, track=True)
+ self.assertTrue(isinstance(p1, zmq.MessageTracker))
+ self.assertFalse(p1.done)
+ p2 = a.send_multipart([b'something', b'else'], copy=False, track=True)
+ self.assert_(isinstance(p2, zmq.MessageTracker))
+ self.assertEqual(p2.done, False)
+ self.assertEqual(p1.done, False)
+
+ b.bind(iface)
+ msg = b.recv_multipart()
+ for i in range(10):
+ if p1.done:
+ break
+ time.sleep(0.1)
+ self.assertEqual(p1.done, True)
+ self.assertEqual(msg, [b'something'])
+ msg = b.recv_multipart()
+ for i in range(10):
+ if p2.done:
+ break
+ time.sleep(0.1)
+ self.assertEqual(p2.done, True)
+ self.assertEqual(msg, [b'something', b'else'])
+ m = zmq.Frame(b"again", track=True)
+ self.assertEqual(m.tracker.done, False)
+ p1 = a.send(m, copy=False)
+ p2 = a.send(m, copy=False)
+ self.assertEqual(m.tracker.done, False)
+ self.assertEqual(p1.done, False)
+ self.assertEqual(p2.done, False)
+ msg = b.recv_multipart()
+ self.assertEqual(m.tracker.done, False)
+ self.assertEqual(msg, [b'again'])
+ msg = b.recv_multipart()
+ self.assertEqual(m.tracker.done, False)
+ self.assertEqual(msg, [b'again'])
+ self.assertEqual(p1.done, False)
+ self.assertEqual(p2.done, False)
+ pm = m.tracker
+ del m
+ for i in range(10):
+ if p1.done:
+ break
+ time.sleep(0.1)
+ self.assertEqual(p1.done, True)
+ self.assertEqual(p2.done, True)
+ m = zmq.Frame(b'something', track=False)
+ self.assertRaises(ValueError, a.send, m, copy=False, track=True)
+
+
+ def test_close(self):
+ ctx = self.Context()
+ s = ctx.socket(zmq.PUB)
+ s.close()
+ self.assertRaisesErrno(zmq.ENOTSOCK, s.bind, b'')
+ self.assertRaisesErrno(zmq.ENOTSOCK, s.connect, b'')
+ self.assertRaisesErrno(zmq.ENOTSOCK, s.setsockopt, zmq.SUBSCRIBE, b'')
+ self.assertRaisesErrno(zmq.ENOTSOCK, s.send, b'asdf')
+ self.assertRaisesErrno(zmq.ENOTSOCK, s.recv)
+ del ctx
+
+ def test_attr(self):
+ """set setting/getting sockopts as attributes"""
+ s = self.context.socket(zmq.DEALER)
+ self.sockets.append(s)
+ linger = 10
+ s.linger = linger
+ self.assertEqual(linger, s.linger)
+ self.assertEqual(linger, s.getsockopt(zmq.LINGER))
+ self.assertEqual(s.fd, s.getsockopt(zmq.FD))
+
+ def test_bad_attr(self):
+ s = self.context.socket(zmq.DEALER)
+ self.sockets.append(s)
+ try:
+ s.apple='foo'
+ except AttributeError:
+ pass
+ else:
+ self.fail("bad setattr should have raised AttributeError")
+ try:
+ s.apple
+ except AttributeError:
+ pass
+ else:
+ self.fail("bad getattr should have raised AttributeError")
+
+ def test_subclass(self):
+ """subclasses can assign attributes"""
+ class S(zmq.Socket):
+ a = None
+ def __init__(self, *a, **kw):
+ self.a=-1
+ super(S, self).__init__(*a, **kw)
+
+ s = S(self.context, zmq.REP)
+ self.sockets.append(s)
+ self.assertEqual(s.a, -1)
+ s.a=1
+ self.assertEqual(s.a, 1)
+ a=s.a
+ self.assertEqual(a, 1)
+
+ def test_recv_multipart(self):
+ a,b = self.create_bound_pair()
+ msg = b'hi'
+ for i in range(3):
+ a.send(msg)
+ time.sleep(0.1)
+ for i in range(3):
+ self.assertEqual(b.recv_multipart(), [msg])
+
+ def test_close_after_destroy(self):
+ """s.close() after ctx.destroy() should be fine"""
+ ctx = self.Context()
+ s = ctx.socket(zmq.REP)
+ ctx.destroy()
+ # reaper is not instantaneous
+ time.sleep(1e-2)
+ s.close()
+ self.assertTrue(s.closed)
+
+ def test_poll(self):
+ a,b = self.create_bound_pair()
+ tic = time.time()
+ evt = a.poll(50)
+ self.assertEqual(evt, 0)
+ evt = a.poll(50, zmq.POLLOUT)
+ self.assertEqual(evt, zmq.POLLOUT)
+ msg = b'hi'
+ a.send(msg)
+ evt = b.poll(50)
+ self.assertEqual(evt, zmq.POLLIN)
+ msg2 = self.recv(b)
+ evt = b.poll(50)
+ self.assertEqual(evt, 0)
+ self.assertEqual(msg2, msg)
+
+ def test_ipc_path_max_length(self):
+ """IPC_PATH_MAX_LEN is a sensible value"""
+ if zmq.IPC_PATH_MAX_LEN == 0:
+ raise SkipTest("IPC_PATH_MAX_LEN undefined")
+
+ msg = "Surprising value for IPC_PATH_MAX_LEN: %s" % zmq.IPC_PATH_MAX_LEN
+ self.assertTrue(zmq.IPC_PATH_MAX_LEN > 30, msg)
+ self.assertTrue(zmq.IPC_PATH_MAX_LEN < 1025, msg)
+
+ def test_ipc_path_max_length_msg(self):
+ if zmq.IPC_PATH_MAX_LEN == 0:
+ raise SkipTest("IPC_PATH_MAX_LEN undefined")
+
+ s = self.context.socket(zmq.PUB)
+ self.sockets.append(s)
+ try:
+ s.bind('ipc://{0}'.format('a' * (zmq.IPC_PATH_MAX_LEN + 1)))
+ except zmq.ZMQError as e:
+ self.assertTrue(str(zmq.IPC_PATH_MAX_LEN) in e.strerror)
+
+ def test_hwm(self):
+ zmq3 = zmq.zmq_version_info()[0] >= 3
+ for stype in (zmq.PUB, zmq.ROUTER, zmq.SUB, zmq.REQ, zmq.DEALER):
+ s = self.context.socket(stype)
+ s.hwm = 100
+ self.assertEqual(s.hwm, 100)
+ if zmq3:
+ try:
+ self.assertEqual(s.sndhwm, 100)
+ except AttributeError:
+ pass
+ try:
+ self.assertEqual(s.rcvhwm, 100)
+ except AttributeError:
+ pass
+ s.close()
+
+ def test_shadow(self):
+ p = self.socket(zmq.PUSH)
+ p.bind("tcp://127.0.0.1:5555")
+ p2 = zmq.Socket.shadow(p.underlying)
+ self.assertEqual(p.underlying, p2.underlying)
+ s = self.socket(zmq.PULL)
+ s2 = zmq.Socket.shadow(s.underlying)
+ self.assertNotEqual(s.underlying, p.underlying)
+ self.assertEqual(s.underlying, s2.underlying)
+ s2.connect("tcp://127.0.0.1:5555")
+ sent = b'hi'
+ p2.send(sent)
+ rcvd = self.recv(s2)
+ self.assertEqual(rcvd, sent)
+
+ def test_shadow_pyczmq(self):
+ try:
+ from pyczmq import zctx, zsocket
+ except Exception:
+ raise SkipTest("Requires pyczmq")
+
+ ctx = zctx.new()
+ ca = zsocket.new(ctx, zmq.PUSH)
+ cb = zsocket.new(ctx, zmq.PULL)
+ a = zmq.Socket.shadow(ca)
+ b = zmq.Socket.shadow(cb)
+ a.bind("inproc://a")
+ b.connect("inproc://a")
+ a.send(b'hi')
+ rcvd = self.recv(b)
+ self.assertEqual(rcvd, b'hi')
+
+
+if have_gevent:
+ import gevent
+
+ class TestSocketGreen(GreenTest, TestSocket):
+ test_bad_attr = GreenTest.skip_green
+ test_close_after_destroy = GreenTest.skip_green
+
+ def test_timeout(self):
+ a,b = self.create_bound_pair()
+ g = gevent.spawn_later(0.5, lambda: a.send(b'hi'))
+ timeout = gevent.Timeout(0.1)
+ timeout.start()
+ self.assertRaises(gevent.Timeout, b.recv)
+ g.kill()
+
+ @skip_if(not hasattr(zmq, 'RCVTIMEO'))
+ def test_warn_set_timeo(self):
+ s = self.context.socket(zmq.REQ)
+ with warnings.catch_warnings(record=True) as w:
+ s.rcvtimeo = 5
+ s.close()
+ self.assertEqual(len(w), 1)
+ self.assertEqual(w[0].category, UserWarning)
+
+
+ @skip_if(not hasattr(zmq, 'SNDTIMEO'))
+ def test_warn_get_timeo(self):
+ s = self.context.socket(zmq.REQ)
+ with warnings.catch_warnings(record=True) as w:
+ s.sndtimeo
+ s.close()
+ self.assertEqual(len(w), 1)
+ self.assertEqual(w[0].category, UserWarning)
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/tests/test_stopwatch.py b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/tests/test_stopwatch.py
new file mode 100644
index 00000000..49fb79f2
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/tests/test_stopwatch.py
@@ -0,0 +1,42 @@
+# -*- coding: utf8 -*-
+# Copyright (C) PyZMQ Developers
+# Distributed under the terms of the Modified BSD License.
+
+
+import sys
+import time
+
+from unittest import TestCase
+
+from zmq import Stopwatch, ZMQError
+
+if sys.version_info[0] >= 3:
+ long = int
+
+class TestStopWatch(TestCase):
+
+ def test_stop_long(self):
+ """Ensure stop returns a long int."""
+ watch = Stopwatch()
+ watch.start()
+ us = watch.stop()
+ self.assertTrue(isinstance(us, long))
+
+ def test_stop_microseconds(self):
+ """Test that stop/sleep have right units."""
+ watch = Stopwatch()
+ watch.start()
+ tic = time.time()
+ watch.sleep(1)
+ us = watch.stop()
+ toc = time.time()
+ self.assertAlmostEqual(us/1e6,(toc-tic),places=0)
+
+ def test_double_stop(self):
+ """Test error raised on multiple calls to stop."""
+ watch = Stopwatch()
+ watch.start()
+ watch.stop()
+ self.assertRaises(ZMQError, watch.stop)
+ self.assertRaises(ZMQError, watch.stop)
+
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/tests/test_version.py b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/tests/test_version.py
new file mode 100644
index 00000000..6ebebf30
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/tests/test_version.py
@@ -0,0 +1,44 @@
+# Copyright (C) PyZMQ Developers
+# Distributed under the terms of the Modified BSD License.
+
+
+from unittest import TestCase
+import zmq
+from zmq.sugar import version
+
+
+class TestVersion(TestCase):
+
+ def test_pyzmq_version(self):
+ vs = zmq.pyzmq_version()
+ vs2 = zmq.__version__
+ self.assertTrue(isinstance(vs, str))
+ if zmq.__revision__:
+ self.assertEqual(vs, '@'.join(vs2, zmq.__revision__))
+ else:
+ self.assertEqual(vs, vs2)
+ if version.VERSION_EXTRA:
+ self.assertTrue(version.VERSION_EXTRA in vs)
+ self.assertTrue(version.VERSION_EXTRA in vs2)
+
+ def test_pyzmq_version_info(self):
+ info = zmq.pyzmq_version_info()
+ self.assertTrue(isinstance(info, tuple))
+ for n in info[:3]:
+ self.assertTrue(isinstance(n, int))
+ if version.VERSION_EXTRA:
+ self.assertEqual(len(info), 4)
+ self.assertEqual(info[-1], float('inf'))
+ else:
+ self.assertEqual(len(info), 3)
+
+ def test_zmq_version_info(self):
+ info = zmq.zmq_version_info()
+ self.assertTrue(isinstance(info, tuple))
+ for n in info[:3]:
+ self.assertTrue(isinstance(n, int))
+
+ def test_zmq_version(self):
+ v = zmq.zmq_version()
+ self.assertTrue(isinstance(v, str))
+
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/tests/test_win32_shim.py b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/tests/test_win32_shim.py
new file mode 100644
index 00000000..55657bda
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/tests/test_win32_shim.py
@@ -0,0 +1,56 @@
+from __future__ import print_function
+
+import os
+
+from functools import wraps
+from zmq.tests import BaseZMQTestCase
+from zmq.utils.win32 import allow_interrupt
+
+
+def count_calls(f):
+ @wraps(f)
+ def _(*args, **kwds):
+ try:
+ return f(*args, **kwds)
+ finally:
+ _.__calls__ += 1
+ _.__calls__ = 0
+ return _
+
+
+class TestWindowsConsoleControlHandler(BaseZMQTestCase):
+
+ def test_handler(self):
+ @count_calls
+ def interrupt_polling():
+ print('Caught CTRL-C!')
+
+ if os.name == 'nt':
+ from ctypes import windll
+ from ctypes.wintypes import BOOL, DWORD
+
+ kernel32 = windll.LoadLibrary('kernel32')
+
+ # <http://msdn.microsoft.com/en-us/library/ms683155.aspx>
+ GenerateConsoleCtrlEvent = kernel32.GenerateConsoleCtrlEvent
+ GenerateConsoleCtrlEvent.argtypes = (DWORD, DWORD)
+ GenerateConsoleCtrlEvent.restype = BOOL
+
+ try:
+ # Simulate CTRL-C event while handler is active.
+ with allow_interrupt(interrupt_polling):
+ result = GenerateConsoleCtrlEvent(0, 0)
+ if result == 0:
+ raise WindowsError
+ except KeyboardInterrupt:
+ pass
+ else:
+ self.fail('Expecting `KeyboardInterrupt` exception!')
+
+ # Make sure our handler was called.
+ self.assertEqual(interrupt_polling.__calls__, 1)
+ else:
+ # On non-Windows systems, this utility is just a no-op!
+ with allow_interrupt(interrupt_polling):
+ pass
+ self.assertEqual(interrupt_polling.__calls__, 0)
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/tests/test_z85.py b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/tests/test_z85.py
new file mode 100644
index 00000000..8a73cb4d
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/tests/test_z85.py
@@ -0,0 +1,63 @@
+# -*- coding: utf8 -*-
+"""Test Z85 encoding
+
+confirm values and roundtrip with test values from the reference implementation.
+"""
+
+# Copyright (C) PyZMQ Developers
+# Distributed under the terms of the Modified BSD License.
+
+from unittest import TestCase
+from zmq.utils import z85
+
+
+class TestZ85(TestCase):
+
+ def test_client_public(self):
+ client_public = \
+ b"\xBB\x88\x47\x1D\x65\xE2\x65\x9B" \
+ b"\x30\xC5\x5A\x53\x21\xCE\xBB\x5A" \
+ b"\xAB\x2B\x70\xA3\x98\x64\x5C\x26" \
+ b"\xDC\xA2\xB2\xFC\xB4\x3F\xC5\x18"
+ encoded = z85.encode(client_public)
+
+ self.assertEqual(encoded, b"Yne@$w-vo<fVvi]a<NY6T1ed:M$fCG*[IaLV{hID")
+ decoded = z85.decode(encoded)
+ self.assertEqual(decoded, client_public)
+
+ def test_client_secret(self):
+ client_secret = \
+ b"\x7B\xB8\x64\xB4\x89\xAF\xA3\x67" \
+ b"\x1F\xBE\x69\x10\x1F\x94\xB3\x89" \
+ b"\x72\xF2\x48\x16\xDF\xB0\x1B\x51" \
+ b"\x65\x6B\x3F\xEC\x8D\xFD\x08\x88"
+ encoded = z85.encode(client_secret)
+
+ self.assertEqual(encoded, b"D:)Q[IlAW!ahhC2ac:9*A}h:p?([4%wOTJ%JR%cs")
+ decoded = z85.decode(encoded)
+ self.assertEqual(decoded, client_secret)
+
+ def test_server_public(self):
+ server_public = \
+ b"\x54\xFC\xBA\x24\xE9\x32\x49\x96" \
+ b"\x93\x16\xFB\x61\x7C\x87\x2B\xB0" \
+ b"\xC1\xD1\xFF\x14\x80\x04\x27\xC5" \
+ b"\x94\xCB\xFA\xCF\x1B\xC2\xD6\x52"
+ encoded = z85.encode(server_public)
+
+ self.assertEqual(encoded, b"rq:rM>}U?@Lns47E1%kR.o@n%FcmmsL/@{H8]yf7")
+ decoded = z85.decode(encoded)
+ self.assertEqual(decoded, server_public)
+
+ def test_server_secret(self):
+ server_secret = \
+ b"\x8E\x0B\xDD\x69\x76\x28\xB9\x1D" \
+ b"\x8F\x24\x55\x87\xEE\x95\xC5\xB0" \
+ b"\x4D\x48\x96\x3F\x79\x25\x98\x77" \
+ b"\xB4\x9C\xD9\x06\x3A\xEA\xD3\xB7"
+ encoded = z85.encode(server_secret)
+
+ self.assertEqual(encoded, b"JTKVSB%%)wK0E.X)V>+}o?pNmC{O&4W4b!Ni{Lh6")
+ decoded = z85.decode(encoded)
+ self.assertEqual(decoded, server_secret)
+
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/tests/test_zmqstream.py b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/tests/test_zmqstream.py
new file mode 100644
index 00000000..cdb3a171
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/tests/test_zmqstream.py
@@ -0,0 +1,34 @@
+# -*- coding: utf8 -*-
+# Copyright (C) PyZMQ Developers
+# Distributed under the terms of the Modified BSD License.
+
+
+import sys
+import time
+
+from unittest import TestCase
+
+import zmq
+from zmq.eventloop import ioloop, zmqstream
+
+class TestZMQStream(TestCase):
+
+ def setUp(self):
+ self.context = zmq.Context()
+ self.socket = self.context.socket(zmq.REP)
+ self.loop = ioloop.IOLoop.instance()
+ self.stream = zmqstream.ZMQStream(self.socket)
+
+ def tearDown(self):
+ self.socket.close()
+ self.context.term()
+
+ def test_callable_check(self):
+ """Ensure callable check works (py3k)."""
+
+ self.stream.on_send(lambda *args: None)
+ self.stream.on_recv(lambda *args: None)
+ self.assertRaises(AssertionError, self.stream.on_recv, 1)
+ self.assertRaises(AssertionError, self.stream.on_send, 1)
+ self.assertRaises(AssertionError, self.stream.on_recv, zmq)
+
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/utils/__init__.py b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/utils/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/utils/__init__.py
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/utils/buffers.pxd b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/utils/buffers.pxd
new file mode 100644
index 00000000..998aa551
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/utils/buffers.pxd
@@ -0,0 +1,313 @@
+"""Python version-independent methods for C/Python buffers.
+
+This file was copied and adapted from mpi4py.
+
+Authors
+-------
+* MinRK
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2010 Lisandro Dalcin
+# All rights reserved.
+# Used under BSD License: http://www.opensource.org/licenses/bsd-license.php
+#
+# Retrieval:
+# Jul 23, 2010 18:00 PST (r539)
+# http://code.google.com/p/mpi4py/source/browse/trunk/src/MPI/asbuffer.pxi
+#
+# Modifications from original:
+# Copyright (c) 2010-2012 Brian Granger, Min Ragan-Kelley
+#
+# Distributed under the terms of the New BSD License. The full license is in
+# the file COPYING.BSD, distributed as part of this software.
+#-----------------------------------------------------------------------------
+
+
+#-----------------------------------------------------------------------------
+# Python includes.
+#-----------------------------------------------------------------------------
+
+# get version-independent aliases:
+cdef extern from "pyversion_compat.h":
+ pass
+
+# Python 3 buffer interface (PEP 3118)
+cdef extern from "Python.h":
+ int PY_MAJOR_VERSION
+ int PY_MINOR_VERSION
+ ctypedef int Py_ssize_t
+ ctypedef struct PyMemoryViewObject:
+ pass
+ ctypedef struct Py_buffer:
+ void *buf
+ Py_ssize_t len
+ int readonly
+ char *format
+ int ndim
+ Py_ssize_t *shape
+ Py_ssize_t *strides
+ Py_ssize_t *suboffsets
+ Py_ssize_t itemsize
+ void *internal
+ cdef enum:
+ PyBUF_SIMPLE
+ PyBUF_WRITABLE
+ PyBUF_FORMAT
+ PyBUF_ANY_CONTIGUOUS
+ int PyObject_CheckBuffer(object)
+ int PyObject_GetBuffer(object, Py_buffer *, int) except -1
+ void PyBuffer_Release(Py_buffer *)
+
+ int PyBuffer_FillInfo(Py_buffer *view, object obj, void *buf,
+ Py_ssize_t len, int readonly, int infoflags) except -1
+ object PyMemoryView_FromBuffer(Py_buffer *info)
+
+ object PyMemoryView_FromObject(object)
+
+# Python 2 buffer interface (legacy)
+cdef extern from "Python.h":
+ ctypedef void const_void "const void"
+ Py_ssize_t Py_END_OF_BUFFER
+ int PyObject_CheckReadBuffer(object)
+ int PyObject_AsReadBuffer (object, const_void **, Py_ssize_t *) except -1
+ int PyObject_AsWriteBuffer(object, void **, Py_ssize_t *) except -1
+
+ object PyBuffer_FromMemory(void *ptr, Py_ssize_t s)
+ object PyBuffer_FromReadWriteMemory(void *ptr, Py_ssize_t s)
+
+ object PyBuffer_FromObject(object, Py_ssize_t offset, Py_ssize_t size)
+ object PyBuffer_FromReadWriteObject(object, Py_ssize_t offset, Py_ssize_t size)
+
+
+#-----------------------------------------------------------------------------
+# asbuffer: C buffer from python object
+#-----------------------------------------------------------------------------
+
+
+cdef inline int memoryview_available():
+ return PY_MAJOR_VERSION >= 3 or (PY_MAJOR_VERSION >=2 and PY_MINOR_VERSION >= 7)
+
+cdef inline int oldstyle_available():
+ return PY_MAJOR_VERSION < 3
+
+
+cdef inline int check_buffer(object ob):
+ """Version independent check for whether an object is a buffer.
+
+ Parameters
+ ----------
+ object : object
+ Any Python object
+
+ Returns
+ -------
+ int : 0 if no buffer interface, 3 if newstyle buffer interface, 2 if oldstyle.
+ """
+ if PyObject_CheckBuffer(ob):
+ return 3
+ if oldstyle_available():
+ return PyObject_CheckReadBuffer(ob) and 2
+ return 0
+
+
+cdef inline object asbuffer(object ob, int writable, int format,
+ void **base, Py_ssize_t *size,
+ Py_ssize_t *itemsize):
+ """Turn an object into a C buffer in a Python version-independent way.
+
+ Parameters
+ ----------
+ ob : object
+ The object to be turned into a buffer.
+ Must provide a Python Buffer interface
+ writable : int
+ Whether the resulting buffer should be allowed to write
+ to the object.
+ format : int
+ The format of the buffer. See Python buffer docs.
+ base : void **
+ The pointer that will be used to store the resulting C buffer.
+ size : Py_ssize_t *
+ The size of the buffer(s).
+ itemsize : Py_ssize_t *
+ The size of an item, if the buffer is non-contiguous.
+
+ Returns
+ -------
+ An object describing the buffer format. Generally a str, such as 'B'.
+ """
+
+ cdef void *bptr = NULL
+ cdef Py_ssize_t blen = 0, bitemlen = 0
+ cdef Py_buffer view
+ cdef int flags = PyBUF_SIMPLE
+ cdef int mode = 0
+
+ bfmt = None
+
+ mode = check_buffer(ob)
+ if mode == 0:
+ raise TypeError("%r does not provide a buffer interface."%ob)
+
+ if mode == 3:
+ flags = PyBUF_ANY_CONTIGUOUS
+ if writable:
+ flags |= PyBUF_WRITABLE
+ if format:
+ flags |= PyBUF_FORMAT
+ PyObject_GetBuffer(ob, &view, flags)
+ bptr = view.buf
+ blen = view.len
+ if format:
+ if view.format != NULL:
+ bfmt = view.format
+ bitemlen = view.itemsize
+ PyBuffer_Release(&view)
+ else: # oldstyle
+ if writable:
+ PyObject_AsWriteBuffer(ob, &bptr, &blen)
+ else:
+ PyObject_AsReadBuffer(ob, <const_void **>&bptr, &blen)
+ if format:
+ try: # numpy.ndarray
+ dtype = ob.dtype
+ bfmt = dtype.char
+ bitemlen = dtype.itemsize
+ except AttributeError:
+ try: # array.array
+ bfmt = ob.typecode
+ bitemlen = ob.itemsize
+ except AttributeError:
+ if isinstance(ob, bytes):
+ bfmt = b"B"
+ bitemlen = 1
+ else:
+ # nothing found
+ bfmt = None
+ bitemlen = 0
+ if base: base[0] = <void *>bptr
+ if size: size[0] = <Py_ssize_t>blen
+ if itemsize: itemsize[0] = <Py_ssize_t>bitemlen
+
+ if PY_MAJOR_VERSION >= 3 and bfmt is not None:
+ return bfmt.decode('ascii')
+ return bfmt
+
+
+cdef inline object asbuffer_r(object ob, void **base, Py_ssize_t *size):
+ """Wrapper for standard calls to asbuffer with a readonly buffer."""
+ asbuffer(ob, 0, 0, base, size, NULL)
+ return ob
+
+
+cdef inline object asbuffer_w(object ob, void **base, Py_ssize_t *size):
+ """Wrapper for standard calls to asbuffer with a writable buffer."""
+ asbuffer(ob, 1, 0, base, size, NULL)
+ return ob
+
+#------------------------------------------------------------------------------
+# frombuffer: python buffer/view from C buffer
+#------------------------------------------------------------------------------
+
+
+cdef inline object frombuffer_3(void *ptr, Py_ssize_t s, int readonly):
+ """Python 3 version of frombuffer.
+
+ This is the Python 3 model, but will work on Python >= 2.6. Currently,
+ we use it only on >= 3.0.
+ """
+ cdef Py_buffer pybuf
+ cdef Py_ssize_t *shape = [s]
+ cdef str astr=""
+ PyBuffer_FillInfo(&pybuf, astr, ptr, s, readonly, PyBUF_SIMPLE)
+ pybuf.format = "B"
+ pybuf.shape = shape
+ return PyMemoryView_FromBuffer(&pybuf)
+
+
+cdef inline object frombuffer_2(void *ptr, Py_ssize_t s, int readonly):
+ """Python 2 version of frombuffer.
+
+ This must be used for Python <= 2.6, but we use it for all Python < 3.
+ """
+
+ if oldstyle_available():
+ if readonly:
+ return PyBuffer_FromMemory(ptr, s)
+ else:
+ return PyBuffer_FromReadWriteMemory(ptr, s)
+ else:
+ raise NotImplementedError("Old style buffers not available.")
+
+
+cdef inline object frombuffer(void *ptr, Py_ssize_t s, int readonly):
+ """Create a Python Buffer/View of a C array.
+
+ Parameters
+ ----------
+ ptr : void *
+ Pointer to the array to be copied.
+ s : size_t
+ Length of the buffer.
+ readonly : int
+ whether the resulting object should be allowed to write to the buffer.
+
+ Returns
+ -------
+ Python Buffer/View of the C buffer.
+ """
+ # oldstyle first priority for now
+ if oldstyle_available():
+ return frombuffer_2(ptr, s, readonly)
+ else:
+ return frombuffer_3(ptr, s, readonly)
+
+
+cdef inline object frombuffer_r(void *ptr, Py_ssize_t s):
+ """Wrapper for readonly view frombuffer."""
+ return frombuffer(ptr, s, 1)
+
+
+cdef inline object frombuffer_w(void *ptr, Py_ssize_t s):
+ """Wrapper for writable view frombuffer."""
+ return frombuffer(ptr, s, 0)
+
+#------------------------------------------------------------------------------
+# viewfromobject: python buffer/view from python object, refcounts intact
+# frombuffer(asbuffer(obj)) would lose track of refs
+#------------------------------------------------------------------------------
+
+cdef inline object viewfromobject(object obj, int readonly):
+ """Construct a Python Buffer/View object from another Python object.
+
+ This work in a Python version independent manner.
+
+ Parameters
+ ----------
+ obj : object
+ The input object to be cast as a buffer
+ readonly : int
+ Whether the result should be prevented from overwriting the original.
+
+ Returns
+ -------
+ Buffer/View of the original object.
+ """
+ if not memoryview_available():
+ if readonly:
+ return PyBuffer_FromObject(obj, 0, Py_END_OF_BUFFER)
+ else:
+ return PyBuffer_FromReadWriteObject(obj, 0, Py_END_OF_BUFFER)
+ else:
+ return PyMemoryView_FromObject(obj)
+
+
+cdef inline object viewfromobject_r(object obj):
+ """Wrapper for readonly viewfromobject."""
+ return viewfromobject(obj, 1)
+
+
+cdef inline object viewfromobject_w(object obj):
+ """Wrapper for writable viewfromobject."""
+ return viewfromobject(obj, 0)
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/utils/compiler.json b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/utils/compiler.json
new file mode 100644
index 00000000..de3c54c1
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/utils/compiler.json
@@ -0,0 +1,19 @@
+{
+ "include_dirs": [
+ "/tmp/zmq/zmq-bin/include",
+ "zmq/utils",
+ "zmq/backend/cython",
+ "zmq/devices"
+ ],
+ "define_macros": [],
+ "library_dirs": [
+ "/tmp/zmq/zmq-bin/lib"
+ ],
+ "runtime_library_dirs": [
+ "/tmp/zmq/zmq-bin/lib"
+ ],
+ "libraries": [
+ "zmq"
+ ],
+ "extra_link_args": []
+} \ No newline at end of file
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/utils/config.json b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/utils/config.json
new file mode 100644
index 00000000..f332fd20
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/utils/config.json
@@ -0,0 +1,13 @@
+{
+ "skip_check_zmq": false,
+ "zmq_prefix": "/tmp/zmq/zmq-bin",
+ "libzmq_extension": false,
+ "bdist_egg": {},
+ "have_sys_un_h": false,
+ "no_libzmq_extension": true,
+ "build_ext": {
+ "libraries": "python2.7 util dl",
+ "include_dirs": "/tmp/zmq/zmq-bin/include:/sw/packages/python/2.7.6/include",
+ "library_dirs": "/tmp/zmq/zmq-bin/lib:/sw/packages/python/2.7.6/lib"
+ }
+} \ No newline at end of file
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/utils/constant_names.py b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/utils/constant_names.py
new file mode 100644
index 00000000..47da9dc2
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/utils/constant_names.py
@@ -0,0 +1,365 @@
+"""0MQ Constant names"""
+
+# Copyright (C) PyZMQ Developers
+# Distributed under the terms of the Modified BSD License.
+
+# dictionaries of constants new or removed in particular versions
+
+new_in = {
+ (2,2,0) : [
+ 'RCVTIMEO',
+ 'SNDTIMEO',
+ ],
+ (3,2,2) : [
+ # errnos
+ 'EMSGSIZE',
+ 'EAFNOSUPPORT',
+ 'ENETUNREACH',
+ 'ECONNABORTED',
+ 'ECONNRESET',
+ 'ENOTCONN',
+ 'ETIMEDOUT',
+ 'EHOSTUNREACH',
+ 'ENETRESET',
+
+ # ctx opts
+ 'IO_THREADS',
+ 'MAX_SOCKETS',
+ 'IO_THREADS_DFLT',
+ 'MAX_SOCKETS_DFLT',
+
+ # socket opts
+ 'ROUTER_BEHAVIOR',
+ 'ROUTER_MANDATORY',
+ 'FAIL_UNROUTABLE',
+ 'TCP_KEEPALIVE',
+ 'TCP_KEEPALIVE_CNT',
+ 'TCP_KEEPALIVE_IDLE',
+ 'TCP_KEEPALIVE_INTVL',
+ 'DELAY_ATTACH_ON_CONNECT',
+ 'XPUB_VERBOSE',
+
+ # msg opts
+ 'MORE',
+
+ 'EVENT_CONNECTED',
+ 'EVENT_CONNECT_DELAYED',
+ 'EVENT_CONNECT_RETRIED',
+ 'EVENT_LISTENING',
+ 'EVENT_BIND_FAILED',
+ 'EVENT_ACCEPTED',
+ 'EVENT_ACCEPT_FAILED',
+ 'EVENT_CLOSED',
+ 'EVENT_CLOSE_FAILED',
+ 'EVENT_DISCONNECTED',
+ 'EVENT_ALL',
+ ],
+ (4,0,0) : [
+ # socket types
+ 'STREAM',
+
+ # socket opts
+ 'IMMEDIATE',
+ 'ROUTER_RAW',
+ 'IPV6',
+ 'MECHANISM',
+ 'PLAIN_SERVER',
+ 'PLAIN_USERNAME',
+ 'PLAIN_PASSWORD',
+ 'CURVE_SERVER',
+ 'CURVE_PUBLICKEY',
+ 'CURVE_SECRETKEY',
+ 'CURVE_SERVERKEY',
+ 'PROBE_ROUTER',
+ 'REQ_RELAXED',
+ 'REQ_CORRELATE',
+ 'CONFLATE',
+ 'ZAP_DOMAIN',
+
+ # security
+ 'NULL',
+ 'PLAIN',
+ 'CURVE',
+
+ # events
+ 'EVENT_MONITOR_STOPPED',
+ ],
+ (4,1,0) : [
+ # ctx opts
+ 'SOCKET_LIMIT',
+ 'THREAD_PRIORITY',
+ 'THREAD_PRIORITY_DFLT',
+ 'THREAD_SCHED_POLICY',
+ 'THREAD_SCHED_POLICY_DFLT',
+
+ # socket opts
+ 'ROUTER_HANDOVER',
+ 'TOS',
+ 'IPC_FILTER_PID',
+ 'IPC_FILTER_UID',
+ 'IPC_FILTER_GID',
+ 'CONNECT_RID',
+ 'GSSAPI_SERVER',
+ 'GSSAPI_PRINCIPAL',
+ 'GSSAPI_SERVICE_PRINCIPAL',
+ 'GSSAPI_PLAINTEXT',
+ 'HANDSHAKE_IVL',
+ 'IDENTITY_FD',
+ 'XPUB_NODROP',
+ 'SOCKS_PROXY',
+
+ # msg opts
+ 'SRCFD',
+ 'SHARED',
+
+ # security
+ 'GSSAPI',
+
+ ],
+}
+
+
+removed_in = {
+ (3,2,2) : [
+ 'UPSTREAM',
+ 'DOWNSTREAM',
+
+ 'HWM',
+ 'SWAP',
+ 'MCAST_LOOP',
+ 'RECOVERY_IVL_MSEC',
+ ]
+}
+
+# collections of zmq constant names based on their role
+# base names have no specific use
+# opt names are validated in get/set methods of various objects
+
+base_names = [
+ # base
+ 'VERSION',
+ 'VERSION_MAJOR',
+ 'VERSION_MINOR',
+ 'VERSION_PATCH',
+ 'NOBLOCK',
+ 'DONTWAIT',
+
+ 'POLLIN',
+ 'POLLOUT',
+ 'POLLERR',
+
+ 'SNDMORE',
+
+ 'STREAMER',
+ 'FORWARDER',
+ 'QUEUE',
+
+ 'IO_THREADS_DFLT',
+ 'MAX_SOCKETS_DFLT',
+ 'POLLITEMS_DFLT',
+ 'THREAD_PRIORITY_DFLT',
+ 'THREAD_SCHED_POLICY_DFLT',
+
+ # socktypes
+ 'PAIR',
+ 'PUB',
+ 'SUB',
+ 'REQ',
+ 'REP',
+ 'DEALER',
+ 'ROUTER',
+ 'XREQ',
+ 'XREP',
+ 'PULL',
+ 'PUSH',
+ 'XPUB',
+ 'XSUB',
+ 'UPSTREAM',
+ 'DOWNSTREAM',
+ 'STREAM',
+
+ # events
+ 'EVENT_CONNECTED',
+ 'EVENT_CONNECT_DELAYED',
+ 'EVENT_CONNECT_RETRIED',
+ 'EVENT_LISTENING',
+ 'EVENT_BIND_FAILED',
+ 'EVENT_ACCEPTED',
+ 'EVENT_ACCEPT_FAILED',
+ 'EVENT_CLOSED',
+ 'EVENT_CLOSE_FAILED',
+ 'EVENT_DISCONNECTED',
+ 'EVENT_ALL',
+ 'EVENT_MONITOR_STOPPED',
+
+ # security
+ 'NULL',
+ 'PLAIN',
+ 'CURVE',
+ 'GSSAPI',
+
+ ## ERRNO
+ # Often used (these are alse in errno.)
+ 'EAGAIN',
+ 'EINVAL',
+ 'EFAULT',
+ 'ENOMEM',
+ 'ENODEV',
+ 'EMSGSIZE',
+ 'EAFNOSUPPORT',
+ 'ENETUNREACH',
+ 'ECONNABORTED',
+ 'ECONNRESET',
+ 'ENOTCONN',
+ 'ETIMEDOUT',
+ 'EHOSTUNREACH',
+ 'ENETRESET',
+
+ # For Windows compatability
+ 'HAUSNUMERO',
+ 'ENOTSUP',
+ 'EPROTONOSUPPORT',
+ 'ENOBUFS',
+ 'ENETDOWN',
+ 'EADDRINUSE',
+ 'EADDRNOTAVAIL',
+ 'ECONNREFUSED',
+ 'EINPROGRESS',
+ 'ENOTSOCK',
+
+ # 0MQ Native
+ 'EFSM',
+ 'ENOCOMPATPROTO',
+ 'ETERM',
+ 'EMTHREAD',
+]
+
+int64_sockopt_names = [
+ 'AFFINITY',
+ 'MAXMSGSIZE',
+
+ # sockopts removed in 3.0.0
+ 'HWM',
+ 'SWAP',
+ 'MCAST_LOOP',
+ 'RECOVERY_IVL_MSEC',
+]
+
+bytes_sockopt_names = [
+ 'IDENTITY',
+ 'SUBSCRIBE',
+ 'UNSUBSCRIBE',
+ 'LAST_ENDPOINT',
+ 'TCP_ACCEPT_FILTER',
+
+ 'PLAIN_USERNAME',
+ 'PLAIN_PASSWORD',
+
+ 'CURVE_PUBLICKEY',
+ 'CURVE_SECRETKEY',
+ 'CURVE_SERVERKEY',
+ 'ZAP_DOMAIN',
+ 'CONNECT_RID',
+ 'GSSAPI_PRINCIPAL',
+ 'GSSAPI_SERVICE_PRINCIPAL',
+ 'SOCKS_PROXY',
+]
+
+fd_sockopt_names = [
+ 'FD',
+ 'IDENTITY_FD',
+]
+
+int_sockopt_names = [
+ # sockopts
+ 'RECONNECT_IVL_MAX',
+
+ # sockopts new in 2.2.0
+ 'SNDTIMEO',
+ 'RCVTIMEO',
+
+ # new in 3.x
+ 'SNDHWM',
+ 'RCVHWM',
+ 'MULTICAST_HOPS',
+ 'IPV4ONLY',
+
+ 'ROUTER_BEHAVIOR',
+ 'TCP_KEEPALIVE',
+ 'TCP_KEEPALIVE_CNT',
+ 'TCP_KEEPALIVE_IDLE',
+ 'TCP_KEEPALIVE_INTVL',
+ 'DELAY_ATTACH_ON_CONNECT',
+ 'XPUB_VERBOSE',
+
+ 'EVENTS',
+ 'TYPE',
+ 'LINGER',
+ 'RECONNECT_IVL',
+ 'BACKLOG',
+
+ 'ROUTER_MANDATORY',
+ 'FAIL_UNROUTABLE',
+
+ 'ROUTER_RAW',
+ 'IMMEDIATE',
+ 'IPV6',
+ 'MECHANISM',
+ 'PLAIN_SERVER',
+ 'CURVE_SERVER',
+ 'PROBE_ROUTER',
+ 'REQ_RELAXED',
+ 'REQ_CORRELATE',
+ 'CONFLATE',
+ 'ROUTER_HANDOVER',
+ 'TOS',
+ 'IPC_FILTER_PID',
+ 'IPC_FILTER_UID',
+ 'IPC_FILTER_GID',
+ 'GSSAPI_SERVER',
+ 'GSSAPI_PLAINTEXT',
+ 'HANDSHAKE_IVL',
+ 'XPUB_NODROP',
+]
+
+switched_sockopt_names = [
+ 'RATE',
+ 'RECOVERY_IVL',
+ 'SNDBUF',
+ 'RCVBUF',
+ 'RCVMORE',
+]
+
+ctx_opt_names = [
+ 'IO_THREADS',
+ 'MAX_SOCKETS',
+ 'SOCKET_LIMIT',
+ 'THREAD_PRIORITY',
+ 'THREAD_SCHED_POLICY',
+]
+
+msg_opt_names = [
+ 'MORE',
+ 'SRCFD',
+ 'SHARED',
+]
+
+from itertools import chain
+
+all_names = list(chain(
+ base_names,
+ ctx_opt_names,
+ bytes_sockopt_names,
+ fd_sockopt_names,
+ int_sockopt_names,
+ int64_sockopt_names,
+ switched_sockopt_names,
+ msg_opt_names,
+))
+
+del chain
+
+def no_prefix(name):
+ """does the given constant have a ZMQ_ prefix?"""
+ return name.startswith('E') and not name.startswith('EVENT')
+
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/utils/garbage.py b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/utils/garbage.py
new file mode 100644
index 00000000..80a8725a
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/utils/garbage.py
@@ -0,0 +1,180 @@
+"""Garbage collection thread for representing zmq refcount of Python objects
+used in zero-copy sends.
+"""
+
+# Copyright (C) PyZMQ Developers
+# Distributed under the terms of the Modified BSD License.
+
+
+import atexit
+import struct
+
+from os import getpid
+from collections import namedtuple
+from threading import Thread, Event, Lock
+import warnings
+
+import zmq
+
+
+gcref = namedtuple('gcref', ['obj', 'event'])
+
+class GarbageCollectorThread(Thread):
+ """Thread in which garbage collection actually happens."""
+ def __init__(self, gc):
+ super(GarbageCollectorThread, self).__init__()
+ self.gc = gc
+ self.daemon = True
+ self.pid = getpid()
+ self.ready = Event()
+
+ def run(self):
+ # detect fork at begining of the thread
+ if getpid is None or getpid() != self.pid:
+ self.ready.set()
+ return
+ try:
+ s = self.gc.context.socket(zmq.PULL)
+ s.linger = 0
+ s.bind(self.gc.url)
+ finally:
+ self.ready.set()
+
+ while True:
+ # detect fork
+ if getpid is None or getpid() != self.pid:
+ return
+ msg = s.recv()
+ if msg == b'DIE':
+ break
+ fmt = 'L' if len(msg) == 4 else 'Q'
+ key = struct.unpack(fmt, msg)[0]
+ tup = self.gc.refs.pop(key, None)
+ if tup and tup.event:
+ tup.event.set()
+ del tup
+ s.close()
+
+
+class GarbageCollector(object):
+ """PyZMQ Garbage Collector
+
+ Used for representing the reference held by libzmq during zero-copy sends.
+ This object holds a dictionary, keyed by Python id,
+ of the Python objects whose memory are currently in use by zeromq.
+
+ When zeromq is done with the memory, it sends a message on an inproc PUSH socket
+ containing the packed size_t (32 or 64-bit unsigned int),
+ which is the key in the dict.
+ When the PULL socket in the gc thread receives that message,
+ the reference is popped from the dict,
+ and any tracker events that should be signaled fire.
+ """
+
+ refs = None
+ _context = None
+ _lock = None
+ url = "inproc://pyzmq.gc.01"
+
+ def __init__(self, context=None):
+ super(GarbageCollector, self).__init__()
+ self.refs = {}
+ self.pid = None
+ self.thread = None
+ self._context = context
+ self._lock = Lock()
+ self._stay_down = False
+ atexit.register(self._atexit)
+
+ @property
+ def context(self):
+ if self._context is None:
+ self._context = zmq.Context()
+ return self._context
+
+ @context.setter
+ def context(self, ctx):
+ if self.is_alive():
+ if self.refs:
+ warnings.warn("Replacing gc context while gc is running", RuntimeWarning)
+ self.stop()
+ self._context = ctx
+
+ def _atexit(self):
+ """atexit callback
+
+ sets _stay_down flag so that gc doesn't try to start up again in other atexit handlers
+ """
+ self._stay_down = True
+ self.stop()
+
+ def stop(self):
+ """stop the garbage-collection thread"""
+ if not self.is_alive():
+ return
+ self._stop()
+
+ def _stop(self):
+ push = self.context.socket(zmq.PUSH)
+ push.connect(self.url)
+ push.send(b'DIE')
+ push.close()
+ self.thread.join()
+ self.context.term()
+ self.refs.clear()
+ self.context = None
+
+ def start(self):
+ """Start a new garbage collection thread.
+
+ Creates a new zmq Context used for garbage collection.
+ Under most circumstances, this will only be called once per process.
+ """
+ if self.thread is not None and self.pid != getpid():
+ # It's re-starting, must free earlier thread's context
+ # since a fork probably broke it
+ self._stop()
+ self.pid = getpid()
+ self.refs = {}
+ self.thread = GarbageCollectorThread(self)
+ self.thread.start()
+ self.thread.ready.wait()
+
+ def is_alive(self):
+ """Is the garbage collection thread currently running?
+
+ Includes checks for process shutdown or fork.
+ """
+ if (getpid is None or
+ getpid() != self.pid or
+ self.thread is None or
+ not self.thread.is_alive()
+ ):
+ return False
+ return True
+
+ def store(self, obj, event=None):
+ """store an object and (optionally) event for zero-copy"""
+ if not self.is_alive():
+ if self._stay_down:
+ return 0
+ # safely start the gc thread
+ # use lock and double check,
+ # so we don't start multiple threads
+ with self._lock:
+ if not self.is_alive():
+ self.start()
+ tup = gcref(obj, event)
+ theid = id(tup)
+ self.refs[theid] = tup
+ return theid
+
+ def __del__(self):
+ if not self.is_alive():
+ return
+ try:
+ self.stop()
+ except Exception as e:
+ raise (e)
+
+gc = GarbageCollector()
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/utils/getpid_compat.h b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/utils/getpid_compat.h
new file mode 100644
index 00000000..47ce90fa
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/utils/getpid_compat.h
@@ -0,0 +1,6 @@
+#ifdef _WIN32
+ #include <process.h>
+ #define getpid _getpid
+#else
+ #include <unistd.h>
+#endif
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/utils/interop.py b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/utils/interop.py
new file mode 100644
index 00000000..26c01969
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/utils/interop.py
@@ -0,0 +1,33 @@
+"""Utils for interoperability with other libraries.
+
+Just CFFI pointer casting for now.
+"""
+
+# Copyright (C) PyZMQ Developers
+# Distributed under the terms of the Modified BSD License.
+
+
+try:
+ long
+except NameError:
+ long = int # Python 3
+
+
+def cast_int_addr(n):
+ """Cast an address to a Python int
+
+ This could be a Python integer or a CFFI pointer
+ """
+ if isinstance(n, (int, long)):
+ return n
+ try:
+ import cffi
+ except ImportError:
+ pass
+ else:
+ # from pyzmq, this is an FFI void *
+ ffi = cffi.FFI()
+ if isinstance(n, ffi.CData):
+ return int(ffi.cast("size_t", n))
+
+ raise ValueError("Cannot cast %r to int" % n)
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/utils/ipcmaxlen.h b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/utils/ipcmaxlen.h
new file mode 100644
index 00000000..7218db78
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/utils/ipcmaxlen.h
@@ -0,0 +1,21 @@
+/*
+
+Platform-independant detection of IPC path max length
+
+Copyright (c) 2012 Godefroid Chapelle
+
+Distributed under the terms of the New BSD License. The full license is in
+the file COPYING.BSD, distributed as part of this software.
+ */
+
+#if defined(HAVE_SYS_UN_H)
+#include "sys/un.h"
+int get_ipc_path_max_len(void) {
+ struct sockaddr_un *dummy;
+ return sizeof(dummy->sun_path) - 1;
+}
+#else
+int get_ipc_path_max_len(void) {
+ return 0;
+}
+#endif
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/utils/jsonapi.py b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/utils/jsonapi.py
new file mode 100644
index 00000000..865ca6d5
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/utils/jsonapi.py
@@ -0,0 +1,59 @@
+"""Priority based json library imports.
+
+Always serializes to bytes instead of unicode for zeromq compatibility
+on Python 2 and 3.
+
+Use ``jsonapi.loads()`` and ``jsonapi.dumps()`` for guaranteed symmetry.
+
+Priority: ``simplejson`` > ``jsonlib2`` > stdlib ``json``
+
+``jsonapi.loads/dumps`` provide kwarg-compatibility with stdlib json.
+
+``jsonapi.jsonmod`` will be the module of the actual underlying implementation.
+"""
+
+# Copyright (C) PyZMQ Developers
+# Distributed under the terms of the Modified BSD License.
+
+from zmq.utils.strtypes import bytes, unicode
+
+jsonmod = None
+
+priority = ['simplejson', 'jsonlib2', 'json']
+for mod in priority:
+ try:
+ jsonmod = __import__(mod)
+ except ImportError:
+ pass
+ else:
+ break
+
+def dumps(o, **kwargs):
+ """Serialize object to JSON bytes (utf-8).
+
+ See jsonapi.jsonmod.dumps for details on kwargs.
+ """
+
+ if 'separators' not in kwargs:
+ kwargs['separators'] = (',', ':')
+
+ s = jsonmod.dumps(o, **kwargs)
+
+ if isinstance(s, unicode):
+ s = s.encode('utf8')
+
+ return s
+
+def loads(s, **kwargs):
+ """Load object from JSON bytes (utf-8).
+
+ See jsonapi.jsonmod.loads for details on kwargs.
+ """
+
+ if str is unicode and isinstance(s, bytes):
+ s = s.decode('utf8')
+
+ return jsonmod.loads(s, **kwargs)
+
+__all__ = ['jsonmod', 'dumps', 'loads']
+
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/utils/monitor.py b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/utils/monitor.py
new file mode 100644
index 00000000..734d54b1
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/utils/monitor.py
@@ -0,0 +1,68 @@
+# -*- coding: utf-8 -*-
+"""Module holding utility and convenience functions for zmq event monitoring."""
+
+# Copyright (C) PyZMQ Developers
+# Distributed under the terms of the Modified BSD License.
+
+import struct
+import zmq
+from zmq.error import _check_version
+
+def parse_monitor_message(msg):
+ """decode zmq_monitor event messages.
+
+ Parameters
+ ----------
+ msg : list(bytes)
+ zmq multipart message that has arrived on a monitor PAIR socket.
+
+ First frame is::
+
+ 16 bit event id
+ 32 bit event value
+ no padding
+
+ Second frame is the endpoint as a bytestring
+
+ Returns
+ -------
+ event : dict
+ event description as dict with the keys `event`, `value`, and `endpoint`.
+ """
+
+ if len(msg) != 2 or len(msg[0]) != 6:
+ raise RuntimeError("Invalid event message format: %s" % msg)
+ event = {}
+ event['event'], event['value'] = struct.unpack("=hi", msg[0])
+ event['endpoint'] = msg[1]
+ return event
+
+def recv_monitor_message(socket, flags=0):
+ """Receive and decode the given raw message from the monitoring socket and return a dict.
+
+ Requires libzmq ≥ 4.0
+
+ The returned dict will have the following entries:
+ event : int, the event id as described in libzmq.zmq_socket_monitor
+ value : int, the event value associated with the event, see libzmq.zmq_socket_monitor
+ endpoint : string, the affected endpoint
+
+ Parameters
+ ----------
+ socket : zmq PAIR socket
+ The PAIR socket (created by other.get_monitor_socket()) on which to recv the message
+ flags : bitfield (int)
+ standard zmq recv flags
+
+ Returns
+ -------
+ event : dict
+ event description as dict with the keys `event`, `value`, and `endpoint`.
+ """
+ _check_version((4,0), 'libzmq event API')
+ # will always return a list
+ msg = socket.recv_multipart(flags)
+ # 4.0-style event API
+ return parse_monitor_message(msg)
+
+__all__ = ['parse_monitor_message', 'recv_monitor_message']
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/utils/pyversion_compat.h b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/utils/pyversion_compat.h
new file mode 100644
index 00000000..fac09046
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/utils/pyversion_compat.h
@@ -0,0 +1,25 @@
+#include "Python.h"
+
+#if PY_VERSION_HEX < 0x02070000
+ #define PyMemoryView_FromBuffer(info) (PyErr_SetString(PyExc_NotImplementedError, \
+ "new buffer interface is not available"), (PyObject *)NULL)
+ #define PyMemoryView_FromObject(object) (PyErr_SetString(PyExc_NotImplementedError, \
+ "new buffer interface is not available"), (PyObject *)NULL)
+#endif
+
+#if PY_VERSION_HEX >= 0x03000000
+ // for buffers
+ #define Py_END_OF_BUFFER ((Py_ssize_t) 0)
+
+ #define PyObject_CheckReadBuffer(object) (0)
+
+ #define PyBuffer_FromMemory(ptr, s) (PyErr_SetString(PyExc_NotImplementedError, \
+ "old buffer interface is not available"), (PyObject *)NULL)
+ #define PyBuffer_FromReadWriteMemory(ptr, s) (PyErr_SetString(PyExc_NotImplementedError, \
+ "old buffer interface is not available"), (PyObject *)NULL)
+ #define PyBuffer_FromObject(object, offset, size) (PyErr_SetString(PyExc_NotImplementedError, \
+ "old buffer interface is not available"), (PyObject *)NULL)
+ #define PyBuffer_FromReadWriteObject(object, offset, size) (PyErr_SetString(PyExc_NotImplementedError, \
+ "old buffer interface is not available"), (PyObject *)NULL)
+
+#endif
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/utils/sixcerpt.py b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/utils/sixcerpt.py
new file mode 100644
index 00000000..5492fd59
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/utils/sixcerpt.py
@@ -0,0 +1,52 @@
+"""Excerpts of six.py"""
+
+# Copyright (C) 2010-2014 Benjamin Peterson
+#
+# Permission is hereby granted, free of charge, to any person obtaining a copy
+# of this software and associated documentation files (the "Software"), to deal
+# in the Software without restriction, including without limitation the rights
+# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+# copies of the Software, and to permit persons to whom the Software is
+# furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in all
+# copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+# SOFTWARE.
+
+import sys
+
+# Useful for very coarse version differentiation.
+PY2 = sys.version_info[0] == 2
+PY3 = sys.version_info[0] == 3
+
+if PY3:
+
+ def reraise(tp, value, tb=None):
+ if value.__traceback__ is not tb:
+ raise value.with_traceback(tb)
+ raise value
+
+else:
+ def exec_(_code_, _globs_=None, _locs_=None):
+ """Execute code in a namespace."""
+ if _globs_ is None:
+ frame = sys._getframe(1)
+ _globs_ = frame.f_globals
+ if _locs_ is None:
+ _locs_ = frame.f_locals
+ del frame
+ elif _locs_ is None:
+ _locs_ = _globs_
+ exec("""exec _code_ in _globs_, _locs_""")
+
+
+ exec_("""def reraise(tp, value, tb=None):
+ raise tp, value, tb
+""")
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/utils/strtypes.py b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/utils/strtypes.py
new file mode 100644
index 00000000..548410dc
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/utils/strtypes.py
@@ -0,0 +1,45 @@
+"""Declare basic string types unambiguously for various Python versions.
+
+Authors
+-------
+* MinRK
+"""
+
+# Copyright (C) PyZMQ Developers
+# Distributed under the terms of the Modified BSD License.
+
+import sys
+
+if sys.version_info[0] >= 3:
+ bytes = bytes
+ unicode = str
+ basestring = (bytes, unicode)
+else:
+ unicode = unicode
+ bytes = str
+ basestring = basestring
+
+def cast_bytes(s, encoding='utf8', errors='strict'):
+ """cast unicode or bytes to bytes"""
+ if isinstance(s, bytes):
+ return s
+ elif isinstance(s, unicode):
+ return s.encode(encoding, errors)
+ else:
+ raise TypeError("Expected unicode or bytes, got %r" % s)
+
+def cast_unicode(s, encoding='utf8', errors='strict'):
+ """cast bytes or unicode to unicode"""
+ if isinstance(s, bytes):
+ return s.decode(encoding, errors)
+ elif isinstance(s, unicode):
+ return s
+ else:
+ raise TypeError("Expected unicode or bytes, got %r" % s)
+
+# give short 'b' alias for cast_bytes, so that we can use fake b('stuff')
+# to simulate b'stuff'
+b = asbytes = cast_bytes
+u = cast_unicode
+
+__all__ = ['asbytes', 'bytes', 'unicode', 'basestring', 'b', 'u', 'cast_bytes', 'cast_unicode']
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/utils/win32.py b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/utils/win32.py
new file mode 100644
index 00000000..ea758299
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/utils/win32.py
@@ -0,0 +1,132 @@
+"""Win32 compatibility utilities."""
+
+#-----------------------------------------------------------------------------
+# Copyright (C) PyZMQ Developers
+# Distributed under the terms of the Modified BSD License.
+#-----------------------------------------------------------------------------
+
+import os
+
+# No-op implementation for other platforms.
+class _allow_interrupt(object):
+ """Utility for fixing CTRL-C events on Windows.
+
+ On Windows, the Python interpreter intercepts CTRL-C events in order to
+ translate them into ``KeyboardInterrupt`` exceptions. It (presumably)
+ does this by setting a flag in its "control control handler" and
+ checking it later at a convenient location in the interpreter.
+
+ However, when the Python interpreter is blocked waiting for the ZMQ
+ poll operation to complete, it must wait for ZMQ's ``select()``
+ operation to complete before translating the CTRL-C event into the
+ ``KeyboardInterrupt`` exception.
+
+ The only way to fix this seems to be to add our own "console control
+ handler" and perform some application-defined operation that will
+ unblock the ZMQ polling operation in order to force ZMQ to pass control
+ back to the Python interpreter.
+
+ This context manager performs all that Windows-y stuff, providing you
+ with a hook that is called when a CTRL-C event is intercepted. This
+ hook allows you to unblock your ZMQ poll operation immediately, which
+ will then result in the expected ``KeyboardInterrupt`` exception.
+
+ Without this context manager, your ZMQ-based application will not
+ respond normally to CTRL-C events on Windows. If a CTRL-C event occurs
+ while blocked on ZMQ socket polling, the translation to a
+ ``KeyboardInterrupt`` exception will be delayed until the I/O completes
+ and control returns to the Python interpreter (this may never happen if
+ you use an infinite timeout).
+
+ A no-op implementation is provided on non-Win32 systems to avoid the
+ application from having to conditionally use it.
+
+ Example usage:
+
+ .. sourcecode:: python
+
+ def stop_my_application():
+ # ...
+
+ with allow_interrupt(stop_my_application):
+ # main polling loop.
+
+ In a typical ZMQ application, you would use the "self pipe trick" to
+ send message to a ``PAIR`` socket in order to interrupt your blocking
+ socket polling operation.
+
+ In a Tornado event loop, you can use the ``IOLoop.stop`` method to
+ unblock your I/O loop.
+ """
+
+ def __init__(self, action=None):
+ """Translate ``action`` into a CTRL-C handler.
+
+ ``action`` is a callable that takes no arguments and returns no
+ value (returned value is ignored). It must *NEVER* raise an
+ exception.
+
+ If unspecified, a no-op will be used.
+ """
+ self._init_action(action)
+
+ def _init_action(self, action):
+ pass
+
+ def __enter__(self):
+ return self
+
+ def __exit__(self, *args):
+ return
+
+if os.name == 'nt':
+ from ctypes import WINFUNCTYPE, windll
+ from ctypes.wintypes import BOOL, DWORD
+
+ kernel32 = windll.LoadLibrary('kernel32')
+
+ # <http://msdn.microsoft.com/en-us/library/ms686016.aspx>
+ PHANDLER_ROUTINE = WINFUNCTYPE(BOOL, DWORD)
+ SetConsoleCtrlHandler = kernel32.SetConsoleCtrlHandler
+ SetConsoleCtrlHandler.argtypes = (PHANDLER_ROUTINE, BOOL)
+ SetConsoleCtrlHandler.restype = BOOL
+
+ class allow_interrupt(_allow_interrupt):
+ __doc__ = _allow_interrupt.__doc__
+
+ def _init_action(self, action):
+ if action is None:
+ action = lambda: None
+ self.action = action
+ @PHANDLER_ROUTINE
+ def handle(event):
+ if event == 0: # CTRL_C_EVENT
+ action()
+ # Typical C implementations would return 1 to indicate that
+ # the event was processed and other control handlers in the
+ # stack should not be executed. However, that would
+ # prevent the Python interpreter's handler from translating
+ # CTRL-C to a `KeyboardInterrupt` exception, so we pretend
+ # that we didn't handle it.
+ return 0
+ self.handle = handle
+
+ def __enter__(self):
+ """Install the custom CTRL-C handler."""
+ result = SetConsoleCtrlHandler(self.handle, 1)
+ if result == 0:
+ # Have standard library automatically call `GetLastError()` and
+ # `FormatMessage()` into a nice exception object :-)
+ raise WindowsError()
+
+ def __exit__(self, *args):
+ """Remove the custom CTRL-C handler."""
+ result = SetConsoleCtrlHandler(self.handle, 0)
+ if result == 0:
+ # Have standard library automatically call `GetLastError()` and
+ # `FormatMessage()` into a nice exception object :-)
+ raise WindowsError()
+else:
+ class allow_interrupt(_allow_interrupt):
+ __doc__ = _allow_interrupt.__doc__
+ pass
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/utils/z85.py b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/utils/z85.py
new file mode 100644
index 00000000..1bb1784e
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/utils/z85.py
@@ -0,0 +1,56 @@
+"""Python implementation of Z85 85-bit encoding
+
+Z85 encoding is a plaintext encoding for a bytestring interpreted as 32bit integers.
+Since the chunks are 32bit, a bytestring must be a multiple of 4 bytes.
+See ZMQ RFC 32 for details.
+
+
+"""
+
+# Copyright (C) PyZMQ Developers
+# Distributed under the terms of the Modified BSD License.
+
+import sys
+import struct
+
+PY3 = sys.version_info[0] >= 3
+# Z85CHARS is the base 85 symbol table
+Z85CHARS = b"0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ.-:+=^!/*?&<>()[]{}@%$#"
+# Z85MAP maps integers in [0,84] to the appropriate character in Z85CHARS
+Z85MAP = dict([(c, idx) for idx, c in enumerate(Z85CHARS)])
+
+_85s = [ 85**i for i in range(5) ][::-1]
+
+def encode(rawbytes):
+ """encode raw bytes into Z85"""
+ # Accepts only byte arrays bounded to 4 bytes
+ if len(rawbytes) % 4:
+ raise ValueError("length must be multiple of 4, not %i" % len(rawbytes))
+
+ nvalues = len(rawbytes) / 4
+
+ values = struct.unpack('>%dI' % nvalues, rawbytes)
+ encoded = []
+ for v in values:
+ for offset in _85s:
+ encoded.append(Z85CHARS[(v // offset) % 85])
+
+ # In Python 3, encoded is a list of integers (obviously?!)
+ if PY3:
+ return bytes(encoded)
+ else:
+ return b''.join(encoded)
+
+def decode(z85bytes):
+ """decode Z85 bytes to raw bytes"""
+ if len(z85bytes) % 5:
+ raise ValueError("Z85 length must be multiple of 5, not %i" % len(z85bytes))
+
+ nvalues = len(z85bytes) / 5
+ values = []
+ for i in range(0, len(z85bytes), 5):
+ value = 0
+ for j, offset in enumerate(_85s):
+ value += Z85MAP[z85bytes[i+j]] * offset
+ values.append(value)
+ return struct.pack('>%dI' % nvalues, *values)
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/utils/zmq_compat.h b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/utils/zmq_compat.h
new file mode 100644
index 00000000..81c57b69
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/utils/zmq_compat.h
@@ -0,0 +1,80 @@
+//-----------------------------------------------------------------------------
+// Copyright (c) 2010 Brian Granger, Min Ragan-Kelley
+//
+// Distributed under the terms of the New BSD License. The full license is in
+// the file COPYING.BSD, distributed as part of this software.
+//-----------------------------------------------------------------------------
+
+#if defined(_MSC_VER)
+#define pyzmq_int64_t __int64
+#else
+#include <stdint.h>
+#define pyzmq_int64_t int64_t
+#endif
+
+
+#include "zmq.h"
+// version compatibility for constants:
+#include "zmq_constants.h"
+
+#define _missing (-1)
+
+
+// define fd type (from libzmq's fd.hpp)
+#ifdef _WIN32
+ #ifdef _MSC_VER && _MSC_VER <= 1400
+ #define ZMQ_FD_T UINT_PTR
+ #else
+ #define ZMQ_FD_T SOCKET
+ #endif
+#else
+ #define ZMQ_FD_T int
+#endif
+
+// use unambiguous aliases for zmq_send/recv functions
+
+#if ZMQ_VERSION_MAJOR >= 4
+// nothing to remove
+#else
+ #define zmq_curve_keypair(z85_public_key, z85_secret_key) _missing
+#endif
+
+#if ZMQ_VERSION_MAJOR >= 4 && ZMQ_VERSION_MINOR >= 1
+// nothing to remove
+#else
+ #define zmq_msg_gets(msg, prop) _missing
+ #define zmq_has(capability) _missing
+#endif
+
+#if ZMQ_VERSION_MAJOR >= 3
+ #define zmq_sendbuf zmq_send
+ #define zmq_recvbuf zmq_recv
+
+ // 3.x deprecations - these symbols haven't been removed,
+ // but let's protect against their planned removal
+ #define zmq_device(device_type, isocket, osocket) _missing
+ #define zmq_init(io_threads) ((void*)NULL)
+ #define zmq_term zmq_ctx_destroy
+#else
+ #define zmq_ctx_set(ctx, opt, val) _missing
+ #define zmq_ctx_get(ctx, opt) _missing
+ #define zmq_ctx_destroy zmq_term
+ #define zmq_ctx_new() ((void*)NULL)
+
+ #define zmq_proxy(a,b,c) _missing
+
+ #define zmq_disconnect(s, addr) _missing
+ #define zmq_unbind(s, addr) _missing
+
+ #define zmq_msg_more(msg) _missing
+ #define zmq_msg_get(msg, opt) _missing
+ #define zmq_msg_set(msg, opt, val) _missing
+ #define zmq_msg_send(msg, s, flags) zmq_send(s, msg, flags)
+ #define zmq_msg_recv(msg, s, flags) zmq_recv(s, msg, flags)
+
+ #define zmq_sendbuf(s, buf, len, flags) _missing
+ #define zmq_recvbuf(s, buf, len, flags) _missing
+
+ #define zmq_socket_monitor(s, addr, flags) _missing
+
+#endif
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/utils/zmq_constants.h b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/utils/zmq_constants.h
new file mode 100644
index 00000000..97683022
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/utils/zmq_constants.h
@@ -0,0 +1,622 @@
+#ifndef _PYZMQ_CONSTANT_DEFS
+#define _PYZMQ_CONSTANT_DEFS
+
+#define _PYZMQ_UNDEFINED (-9999)
+#ifndef ZMQ_VERSION
+ #define ZMQ_VERSION (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_VERSION_MAJOR
+ #define ZMQ_VERSION_MAJOR (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_VERSION_MINOR
+ #define ZMQ_VERSION_MINOR (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_VERSION_PATCH
+ #define ZMQ_VERSION_PATCH (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_NOBLOCK
+ #define ZMQ_NOBLOCK (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_DONTWAIT
+ #define ZMQ_DONTWAIT (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_POLLIN
+ #define ZMQ_POLLIN (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_POLLOUT
+ #define ZMQ_POLLOUT (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_POLLERR
+ #define ZMQ_POLLERR (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_SNDMORE
+ #define ZMQ_SNDMORE (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_STREAMER
+ #define ZMQ_STREAMER (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_FORWARDER
+ #define ZMQ_FORWARDER (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_QUEUE
+ #define ZMQ_QUEUE (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_IO_THREADS_DFLT
+ #define ZMQ_IO_THREADS_DFLT (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_MAX_SOCKETS_DFLT
+ #define ZMQ_MAX_SOCKETS_DFLT (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_POLLITEMS_DFLT
+ #define ZMQ_POLLITEMS_DFLT (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_THREAD_PRIORITY_DFLT
+ #define ZMQ_THREAD_PRIORITY_DFLT (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_THREAD_SCHED_POLICY_DFLT
+ #define ZMQ_THREAD_SCHED_POLICY_DFLT (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_PAIR
+ #define ZMQ_PAIR (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_PUB
+ #define ZMQ_PUB (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_SUB
+ #define ZMQ_SUB (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_REQ
+ #define ZMQ_REQ (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_REP
+ #define ZMQ_REP (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_DEALER
+ #define ZMQ_DEALER (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_ROUTER
+ #define ZMQ_ROUTER (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_XREQ
+ #define ZMQ_XREQ (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_XREP
+ #define ZMQ_XREP (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_PULL
+ #define ZMQ_PULL (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_PUSH
+ #define ZMQ_PUSH (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_XPUB
+ #define ZMQ_XPUB (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_XSUB
+ #define ZMQ_XSUB (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_UPSTREAM
+ #define ZMQ_UPSTREAM (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_DOWNSTREAM
+ #define ZMQ_DOWNSTREAM (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_STREAM
+ #define ZMQ_STREAM (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_EVENT_CONNECTED
+ #define ZMQ_EVENT_CONNECTED (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_EVENT_CONNECT_DELAYED
+ #define ZMQ_EVENT_CONNECT_DELAYED (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_EVENT_CONNECT_RETRIED
+ #define ZMQ_EVENT_CONNECT_RETRIED (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_EVENT_LISTENING
+ #define ZMQ_EVENT_LISTENING (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_EVENT_BIND_FAILED
+ #define ZMQ_EVENT_BIND_FAILED (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_EVENT_ACCEPTED
+ #define ZMQ_EVENT_ACCEPTED (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_EVENT_ACCEPT_FAILED
+ #define ZMQ_EVENT_ACCEPT_FAILED (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_EVENT_CLOSED
+ #define ZMQ_EVENT_CLOSED (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_EVENT_CLOSE_FAILED
+ #define ZMQ_EVENT_CLOSE_FAILED (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_EVENT_DISCONNECTED
+ #define ZMQ_EVENT_DISCONNECTED (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_EVENT_ALL
+ #define ZMQ_EVENT_ALL (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_EVENT_MONITOR_STOPPED
+ #define ZMQ_EVENT_MONITOR_STOPPED (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_NULL
+ #define ZMQ_NULL (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_PLAIN
+ #define ZMQ_PLAIN (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_CURVE
+ #define ZMQ_CURVE (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_GSSAPI
+ #define ZMQ_GSSAPI (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef EAGAIN
+ #define EAGAIN (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef EINVAL
+ #define EINVAL (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef EFAULT
+ #define EFAULT (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ENOMEM
+ #define ENOMEM (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ENODEV
+ #define ENODEV (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef EMSGSIZE
+ #define EMSGSIZE (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef EAFNOSUPPORT
+ #define EAFNOSUPPORT (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ENETUNREACH
+ #define ENETUNREACH (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ECONNABORTED
+ #define ECONNABORTED (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ECONNRESET
+ #define ECONNRESET (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ENOTCONN
+ #define ENOTCONN (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ETIMEDOUT
+ #define ETIMEDOUT (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef EHOSTUNREACH
+ #define EHOSTUNREACH (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ENETRESET
+ #define ENETRESET (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_HAUSNUMERO
+ #define ZMQ_HAUSNUMERO (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ENOTSUP
+ #define ENOTSUP (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef EPROTONOSUPPORT
+ #define EPROTONOSUPPORT (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ENOBUFS
+ #define ENOBUFS (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ENETDOWN
+ #define ENETDOWN (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef EADDRINUSE
+ #define EADDRINUSE (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef EADDRNOTAVAIL
+ #define EADDRNOTAVAIL (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ECONNREFUSED
+ #define ECONNREFUSED (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef EINPROGRESS
+ #define EINPROGRESS (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ENOTSOCK
+ #define ENOTSOCK (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef EFSM
+ #define EFSM (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ENOCOMPATPROTO
+ #define ENOCOMPATPROTO (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ETERM
+ #define ETERM (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef EMTHREAD
+ #define EMTHREAD (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_IO_THREADS
+ #define ZMQ_IO_THREADS (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_MAX_SOCKETS
+ #define ZMQ_MAX_SOCKETS (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_SOCKET_LIMIT
+ #define ZMQ_SOCKET_LIMIT (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_THREAD_PRIORITY
+ #define ZMQ_THREAD_PRIORITY (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_THREAD_SCHED_POLICY
+ #define ZMQ_THREAD_SCHED_POLICY (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_IDENTITY
+ #define ZMQ_IDENTITY (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_SUBSCRIBE
+ #define ZMQ_SUBSCRIBE (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_UNSUBSCRIBE
+ #define ZMQ_UNSUBSCRIBE (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_LAST_ENDPOINT
+ #define ZMQ_LAST_ENDPOINT (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_TCP_ACCEPT_FILTER
+ #define ZMQ_TCP_ACCEPT_FILTER (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_PLAIN_USERNAME
+ #define ZMQ_PLAIN_USERNAME (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_PLAIN_PASSWORD
+ #define ZMQ_PLAIN_PASSWORD (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_CURVE_PUBLICKEY
+ #define ZMQ_CURVE_PUBLICKEY (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_CURVE_SECRETKEY
+ #define ZMQ_CURVE_SECRETKEY (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_CURVE_SERVERKEY
+ #define ZMQ_CURVE_SERVERKEY (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_ZAP_DOMAIN
+ #define ZMQ_ZAP_DOMAIN (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_CONNECT_RID
+ #define ZMQ_CONNECT_RID (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_GSSAPI_PRINCIPAL
+ #define ZMQ_GSSAPI_PRINCIPAL (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_GSSAPI_SERVICE_PRINCIPAL
+ #define ZMQ_GSSAPI_SERVICE_PRINCIPAL (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_SOCKS_PROXY
+ #define ZMQ_SOCKS_PROXY (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_FD
+ #define ZMQ_FD (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_IDENTITY_FD
+ #define ZMQ_IDENTITY_FD (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_RECONNECT_IVL_MAX
+ #define ZMQ_RECONNECT_IVL_MAX (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_SNDTIMEO
+ #define ZMQ_SNDTIMEO (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_RCVTIMEO
+ #define ZMQ_RCVTIMEO (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_SNDHWM
+ #define ZMQ_SNDHWM (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_RCVHWM
+ #define ZMQ_RCVHWM (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_MULTICAST_HOPS
+ #define ZMQ_MULTICAST_HOPS (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_IPV4ONLY
+ #define ZMQ_IPV4ONLY (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_ROUTER_BEHAVIOR
+ #define ZMQ_ROUTER_BEHAVIOR (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_TCP_KEEPALIVE
+ #define ZMQ_TCP_KEEPALIVE (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_TCP_KEEPALIVE_CNT
+ #define ZMQ_TCP_KEEPALIVE_CNT (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_TCP_KEEPALIVE_IDLE
+ #define ZMQ_TCP_KEEPALIVE_IDLE (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_TCP_KEEPALIVE_INTVL
+ #define ZMQ_TCP_KEEPALIVE_INTVL (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_DELAY_ATTACH_ON_CONNECT
+ #define ZMQ_DELAY_ATTACH_ON_CONNECT (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_XPUB_VERBOSE
+ #define ZMQ_XPUB_VERBOSE (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_EVENTS
+ #define ZMQ_EVENTS (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_TYPE
+ #define ZMQ_TYPE (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_LINGER
+ #define ZMQ_LINGER (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_RECONNECT_IVL
+ #define ZMQ_RECONNECT_IVL (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_BACKLOG
+ #define ZMQ_BACKLOG (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_ROUTER_MANDATORY
+ #define ZMQ_ROUTER_MANDATORY (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_FAIL_UNROUTABLE
+ #define ZMQ_FAIL_UNROUTABLE (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_ROUTER_RAW
+ #define ZMQ_ROUTER_RAW (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_IMMEDIATE
+ #define ZMQ_IMMEDIATE (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_IPV6
+ #define ZMQ_IPV6 (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_MECHANISM
+ #define ZMQ_MECHANISM (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_PLAIN_SERVER
+ #define ZMQ_PLAIN_SERVER (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_CURVE_SERVER
+ #define ZMQ_CURVE_SERVER (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_PROBE_ROUTER
+ #define ZMQ_PROBE_ROUTER (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_REQ_RELAXED
+ #define ZMQ_REQ_RELAXED (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_REQ_CORRELATE
+ #define ZMQ_REQ_CORRELATE (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_CONFLATE
+ #define ZMQ_CONFLATE (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_ROUTER_HANDOVER
+ #define ZMQ_ROUTER_HANDOVER (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_TOS
+ #define ZMQ_TOS (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_IPC_FILTER_PID
+ #define ZMQ_IPC_FILTER_PID (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_IPC_FILTER_UID
+ #define ZMQ_IPC_FILTER_UID (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_IPC_FILTER_GID
+ #define ZMQ_IPC_FILTER_GID (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_GSSAPI_SERVER
+ #define ZMQ_GSSAPI_SERVER (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_GSSAPI_PLAINTEXT
+ #define ZMQ_GSSAPI_PLAINTEXT (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_HANDSHAKE_IVL
+ #define ZMQ_HANDSHAKE_IVL (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_XPUB_NODROP
+ #define ZMQ_XPUB_NODROP (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_AFFINITY
+ #define ZMQ_AFFINITY (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_MAXMSGSIZE
+ #define ZMQ_MAXMSGSIZE (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_HWM
+ #define ZMQ_HWM (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_SWAP
+ #define ZMQ_SWAP (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_MCAST_LOOP
+ #define ZMQ_MCAST_LOOP (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_RECOVERY_IVL_MSEC
+ #define ZMQ_RECOVERY_IVL_MSEC (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_RATE
+ #define ZMQ_RATE (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_RECOVERY_IVL
+ #define ZMQ_RECOVERY_IVL (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_SNDBUF
+ #define ZMQ_SNDBUF (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_RCVBUF
+ #define ZMQ_RCVBUF (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_RCVMORE
+ #define ZMQ_RCVMORE (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_MORE
+ #define ZMQ_MORE (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_SRCFD
+ #define ZMQ_SRCFD (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_SHARED
+ #define ZMQ_SHARED (_PYZMQ_UNDEFINED)
+#endif
+
+
+#endif // ifndef _PYZMQ_CONSTANT_DEFS
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/__init__.py b/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/__init__.py
new file mode 100644
index 00000000..3408b3ba
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/__init__.py
@@ -0,0 +1,64 @@
+"""Python bindings for 0MQ."""
+
+# Copyright (C) PyZMQ Developers
+# Distributed under the terms of the Modified BSD License.
+
+import os
+import sys
+import glob
+
+# load bundled libzmq, if there is one:
+
+here = os.path.dirname(__file__)
+
+bundled = []
+bundled_sodium = []
+for ext in ('pyd', 'so', 'dll', 'dylib'):
+ bundled_sodium.extend(glob.glob(os.path.join(here, 'libsodium*.%s*' % ext)))
+ bundled.extend(glob.glob(os.path.join(here, 'libzmq*.%s*' % ext)))
+
+if bundled:
+ import ctypes
+ if bundled_sodium:
+ if bundled[0].endswith('.pyd'):
+ # a Windows Extension
+ _libsodium = ctypes.cdll.LoadLibrary(bundled_sodium[0])
+ else:
+ _libsodium = ctypes.CDLL(bundled_sodium[0], mode=ctypes.RTLD_GLOBAL)
+ if bundled[0].endswith('.pyd'):
+ # a Windows Extension
+ _libzmq = ctypes.cdll.LoadLibrary(bundled[0])
+ else:
+ _libzmq = ctypes.CDLL(bundled[0], mode=ctypes.RTLD_GLOBAL)
+ del ctypes
+else:
+ import zipimport
+ try:
+ if isinstance(__loader__, zipimport.zipimporter):
+ # a zipped pyzmq egg
+ from zmq import libzmq as _libzmq
+ except (NameError, ImportError):
+ pass
+ finally:
+ del zipimport
+
+del os, sys, glob, here, bundled, bundled_sodium, ext
+
+# zmq top-level imports
+
+from zmq import backend
+from zmq.backend import *
+from zmq import sugar
+from zmq.sugar import *
+from zmq import devices
+
+def get_includes():
+ """Return a list of directories to include for linking against pyzmq with cython."""
+ from os.path import join, dirname, abspath, pardir
+ base = dirname(__file__)
+ parent = abspath(join(base, pardir))
+ return [ parent ] + [ join(parent, base, subdir) for subdir in ('utils',) ]
+
+
+__all__ = ['get_includes'] + sugar.__all__ + backend.__all__
+
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/auth/__init__.py b/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/auth/__init__.py
new file mode 100644
index 00000000..11d3ad6b
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/auth/__init__.py
@@ -0,0 +1,10 @@
+"""Utilities for ZAP authentication.
+
+To run authentication in a background thread, see :mod:`zmq.auth.thread`.
+For integration with the tornado eventloop, see :mod:`zmq.auth.ioloop`.
+
+.. versionadded:: 14.1
+"""
+
+from .base import *
+from .certs import *
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/auth/base.py b/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/auth/base.py
new file mode 100644
index 00000000..9b4aaed7
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/auth/base.py
@@ -0,0 +1,272 @@
+"""Base implementation of 0MQ authentication."""
+
+# Copyright (C) PyZMQ Developers
+# Distributed under the terms of the Modified BSD License.
+
+import logging
+
+import zmq
+from zmq.utils import z85
+from zmq.utils.strtypes import bytes, unicode, b, u
+from zmq.error import _check_version
+
+from .certs import load_certificates
+
+
+CURVE_ALLOW_ANY = '*'
+VERSION = b'1.0'
+
+class Authenticator(object):
+ """Implementation of ZAP authentication for zmq connections.
+
+ Note:
+ - libzmq provides four levels of security: default NULL (which the Authenticator does
+ not see), and authenticated NULL, PLAIN, and CURVE, which the Authenticator can see.
+ - until you add policies, all incoming NULL connections are allowed
+ (classic ZeroMQ behavior), and all PLAIN and CURVE connections are denied.
+ """
+
+ def __init__(self, context=None, encoding='utf-8', log=None):
+ _check_version((4,0), "security")
+ self.context = context or zmq.Context.instance()
+ self.encoding = encoding
+ self.allow_any = False
+ self.zap_socket = None
+ self.whitelist = set()
+ self.blacklist = set()
+ # passwords is a dict keyed by domain and contains values
+ # of dicts with username:password pairs.
+ self.passwords = {}
+ # certs is dict keyed by domain and contains values
+ # of dicts keyed by the public keys from the specified location.
+ self.certs = {}
+ self.log = log or logging.getLogger('zmq.auth')
+
+ def start(self):
+ """Create and bind the ZAP socket"""
+ self.zap_socket = self.context.socket(zmq.REP)
+ self.zap_socket.linger = 1
+ self.zap_socket.bind("inproc://zeromq.zap.01")
+
+ def stop(self):
+ """Close the ZAP socket"""
+ if self.zap_socket:
+ self.zap_socket.close()
+ self.zap_socket = None
+
+ def allow(self, *addresses):
+ """Allow (whitelist) IP address(es).
+
+ Connections from addresses not in the whitelist will be rejected.
+
+ - For NULL, all clients from this address will be accepted.
+ - For PLAIN and CURVE, they will be allowed to continue with authentication.
+
+ whitelist is mutually exclusive with blacklist.
+ """
+ if self.blacklist:
+ raise ValueError("Only use a whitelist or a blacklist, not both")
+ self.whitelist.update(addresses)
+
+ def deny(self, *addresses):
+ """Deny (blacklist) IP address(es).
+
+ Addresses not in the blacklist will be allowed to continue with authentication.
+
+ Blacklist is mutually exclusive with whitelist.
+ """
+ if self.whitelist:
+ raise ValueError("Only use a whitelist or a blacklist, not both")
+ self.blacklist.update(addresses)
+
+ def configure_plain(self, domain='*', passwords=None):
+ """Configure PLAIN authentication for a given domain.
+
+ PLAIN authentication uses a plain-text password file.
+ To cover all domains, use "*".
+ You can modify the password file at any time; it is reloaded automatically.
+ """
+ if passwords:
+ self.passwords[domain] = passwords
+
+ def configure_curve(self, domain='*', location=None):
+ """Configure CURVE authentication for a given domain.
+
+ CURVE authentication uses a directory that holds all public client certificates,
+ i.e. their public keys.
+
+ To cover all domains, use "*".
+
+ You can add and remove certificates in that directory at any time.
+
+ To allow all client keys without checking, specify CURVE_ALLOW_ANY for the location.
+ """
+ # If location is CURVE_ALLOW_ANY then allow all clients. Otherwise
+ # treat location as a directory that holds the certificates.
+ if location == CURVE_ALLOW_ANY:
+ self.allow_any = True
+ else:
+ self.allow_any = False
+ try:
+ self.certs[domain] = load_certificates(location)
+ except Exception as e:
+ self.log.error("Failed to load CURVE certs from %s: %s", location, e)
+
+ def handle_zap_message(self, msg):
+ """Perform ZAP authentication"""
+ if len(msg) < 6:
+ self.log.error("Invalid ZAP message, not enough frames: %r", msg)
+ if len(msg) < 2:
+ self.log.error("Not enough information to reply")
+ else:
+ self._send_zap_reply(msg[1], b"400", b"Not enough frames")
+ return
+
+ version, request_id, domain, address, identity, mechanism = msg[:6]
+ credentials = msg[6:]
+
+ domain = u(domain, self.encoding, 'replace')
+ address = u(address, self.encoding, 'replace')
+
+ if (version != VERSION):
+ self.log.error("Invalid ZAP version: %r", msg)
+ self._send_zap_reply(request_id, b"400", b"Invalid version")
+ return
+
+ self.log.debug("version: %r, request_id: %r, domain: %r,"
+ " address: %r, identity: %r, mechanism: %r",
+ version, request_id, domain,
+ address, identity, mechanism,
+ )
+
+
+ # Is address is explicitly whitelisted or blacklisted?
+ allowed = False
+ denied = False
+ reason = b"NO ACCESS"
+
+ if self.whitelist:
+ if address in self.whitelist:
+ allowed = True
+ self.log.debug("PASSED (whitelist) address=%s", address)
+ else:
+ denied = True
+ reason = b"Address not in whitelist"
+ self.log.debug("DENIED (not in whitelist) address=%s", address)
+
+ elif self.blacklist:
+ if address in self.blacklist:
+ denied = True
+ reason = b"Address is blacklisted"
+ self.log.debug("DENIED (blacklist) address=%s", address)
+ else:
+ allowed = True
+ self.log.debug("PASSED (not in blacklist) address=%s", address)
+
+ # Perform authentication mechanism-specific checks if necessary
+ username = u("user")
+ if not denied:
+
+ if mechanism == b'NULL' and not allowed:
+ # For NULL, we allow if the address wasn't blacklisted
+ self.log.debug("ALLOWED (NULL)")
+ allowed = True
+
+ elif mechanism == b'PLAIN':
+ # For PLAIN, even a whitelisted address must authenticate
+ if len(credentials) != 2:
+ self.log.error("Invalid PLAIN credentials: %r", credentials)
+ self._send_zap_reply(request_id, b"400", b"Invalid credentials")
+ return
+ username, password = [ u(c, self.encoding, 'replace') for c in credentials ]
+ allowed, reason = self._authenticate_plain(domain, username, password)
+
+ elif mechanism == b'CURVE':
+ # For CURVE, even a whitelisted address must authenticate
+ if len(credentials) != 1:
+ self.log.error("Invalid CURVE credentials: %r", credentials)
+ self._send_zap_reply(request_id, b"400", b"Invalid credentials")
+ return
+ key = credentials[0]
+ allowed, reason = self._authenticate_curve(domain, key)
+
+ if allowed:
+ self._send_zap_reply(request_id, b"200", b"OK", username)
+ else:
+ self._send_zap_reply(request_id, b"400", reason)
+
+ def _authenticate_plain(self, domain, username, password):
+ """PLAIN ZAP authentication"""
+ allowed = False
+ reason = b""
+ if self.passwords:
+ # If no domain is not specified then use the default domain
+ if not domain:
+ domain = '*'
+
+ if domain in self.passwords:
+ if username in self.passwords[domain]:
+ if password == self.passwords[domain][username]:
+ allowed = True
+ else:
+ reason = b"Invalid password"
+ else:
+ reason = b"Invalid username"
+ else:
+ reason = b"Invalid domain"
+
+ if allowed:
+ self.log.debug("ALLOWED (PLAIN) domain=%s username=%s password=%s",
+ domain, username, password,
+ )
+ else:
+ self.log.debug("DENIED %s", reason)
+
+ else:
+ reason = b"No passwords defined"
+ self.log.debug("DENIED (PLAIN) %s", reason)
+
+ return allowed, reason
+
+ def _authenticate_curve(self, domain, client_key):
+ """CURVE ZAP authentication"""
+ allowed = False
+ reason = b""
+ if self.allow_any:
+ allowed = True
+ reason = b"OK"
+ self.log.debug("ALLOWED (CURVE allow any client)")
+ else:
+ # If no explicit domain is specified then use the default domain
+ if not domain:
+ domain = '*'
+
+ if domain in self.certs:
+ # The certs dict stores keys in z85 format, convert binary key to z85 bytes
+ z85_client_key = z85.encode(client_key)
+ if z85_client_key in self.certs[domain] or self.certs[domain] == b'OK':
+ allowed = True
+ reason = b"OK"
+ else:
+ reason = b"Unknown key"
+
+ status = "ALLOWED" if allowed else "DENIED"
+ self.log.debug("%s (CURVE) domain=%s client_key=%s",
+ status, domain, z85_client_key,
+ )
+ else:
+ reason = b"Unknown domain"
+
+ return allowed, reason
+
+ def _send_zap_reply(self, request_id, status_code, status_text, user_id='user'):
+ """Send a ZAP reply to finish the authentication."""
+ user_id = user_id if status_code == b'200' else b''
+ if isinstance(user_id, unicode):
+ user_id = user_id.encode(self.encoding, 'replace')
+ metadata = b'' # not currently used
+ self.log.debug("ZAP reply code=%s text=%s", status_code, status_text)
+ reply = [VERSION, request_id, status_code, status_text, user_id, metadata]
+ self.zap_socket.send_multipart(reply)
+
+__all__ = ['Authenticator', 'CURVE_ALLOW_ANY']
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/auth/certs.py b/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/auth/certs.py
new file mode 100644
index 00000000..4d26ad7b
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/auth/certs.py
@@ -0,0 +1,119 @@
+"""0MQ authentication related functions and classes."""
+
+# Copyright (C) PyZMQ Developers
+# Distributed under the terms of the Modified BSD License.
+
+
+import datetime
+import glob
+import io
+import os
+import zmq
+from zmq.utils.strtypes import bytes, unicode, b, u
+
+
+_cert_secret_banner = u("""# **** Generated on {0} by pyzmq ****
+# ZeroMQ CURVE **Secret** Certificate
+# DO NOT PROVIDE THIS FILE TO OTHER USERS nor change its permissions.
+
+""")
+
+_cert_public_banner = u("""# **** Generated on {0} by pyzmq ****
+# ZeroMQ CURVE Public Certificate
+# Exchange securely, or use a secure mechanism to verify the contents
+# of this file after exchange. Store public certificates in your home
+# directory, in the .curve subdirectory.
+
+""")
+
+def _write_key_file(key_filename, banner, public_key, secret_key=None, metadata=None, encoding='utf-8'):
+ """Create a certificate file"""
+ if isinstance(public_key, bytes):
+ public_key = public_key.decode(encoding)
+ if isinstance(secret_key, bytes):
+ secret_key = secret_key.decode(encoding)
+ with io.open(key_filename, 'w', encoding='utf8') as f:
+ f.write(banner.format(datetime.datetime.now()))
+
+ f.write(u('metadata\n'))
+ if metadata:
+ for k, v in metadata.items():
+ if isinstance(v, bytes):
+ v = v.decode(encoding)
+ f.write(u(" {0} = {1}\n").format(k, v))
+
+ f.write(u('curve\n'))
+ f.write(u(" public-key = \"{0}\"\n").format(public_key))
+
+ if secret_key:
+ f.write(u(" secret-key = \"{0}\"\n").format(secret_key))
+
+
+def create_certificates(key_dir, name, metadata=None):
+ """Create zmq certificates.
+
+ Returns the file paths to the public and secret certificate files.
+ """
+ public_key, secret_key = zmq.curve_keypair()
+ base_filename = os.path.join(key_dir, name)
+ secret_key_file = "{0}.key_secret".format(base_filename)
+ public_key_file = "{0}.key".format(base_filename)
+ now = datetime.datetime.now()
+
+ _write_key_file(public_key_file,
+ _cert_public_banner.format(now),
+ public_key)
+
+ _write_key_file(secret_key_file,
+ _cert_secret_banner.format(now),
+ public_key,
+ secret_key=secret_key,
+ metadata=metadata)
+
+ return public_key_file, secret_key_file
+
+
+def load_certificate(filename):
+ """Load public and secret key from a zmq certificate.
+
+ Returns (public_key, secret_key)
+
+ If the certificate file only contains the public key,
+ secret_key will be None.
+ """
+ public_key = None
+ secret_key = None
+ if not os.path.exists(filename):
+ raise IOError("Invalid certificate file: {0}".format(filename))
+
+ with open(filename, 'rb') as f:
+ for line in f:
+ line = line.strip()
+ if line.startswith(b'#'):
+ continue
+ if line.startswith(b'public-key'):
+ public_key = line.split(b"=", 1)[1].strip(b' \t\'"')
+ if line.startswith(b'secret-key'):
+ secret_key = line.split(b"=", 1)[1].strip(b' \t\'"')
+ if public_key and secret_key:
+ break
+
+ return public_key, secret_key
+
+
+def load_certificates(directory='.'):
+ """Load public keys from all certificates in a directory"""
+ certs = {}
+ if not os.path.isdir(directory):
+ raise IOError("Invalid certificate directory: {0}".format(directory))
+ # Follow czmq pattern of public keys stored in *.key files.
+ glob_string = os.path.join(directory, "*.key")
+
+ cert_files = glob.glob(glob_string)
+ for cert_file in cert_files:
+ public_key, _ = load_certificate(cert_file)
+ if public_key:
+ certs[public_key] = 'OK'
+ return certs
+
+__all__ = ['create_certificates', 'load_certificate', 'load_certificates']
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/auth/ioloop.py b/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/auth/ioloop.py
new file mode 100644
index 00000000..1f448b47
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/auth/ioloop.py
@@ -0,0 +1,34 @@
+"""ZAP Authenticator integrated with the tornado IOLoop.
+
+.. versionadded:: 14.1
+"""
+
+# Copyright (C) PyZMQ Developers
+# Distributed under the terms of the Modified BSD License.
+
+from zmq.eventloop import ioloop, zmqstream
+from .base import Authenticator
+
+
+class IOLoopAuthenticator(Authenticator):
+ """ZAP authentication for use in the tornado IOLoop"""
+
+ def __init__(self, context=None, encoding='utf-8', log=None, io_loop=None):
+ super(IOLoopAuthenticator, self).__init__(context)
+ self.zap_stream = None
+ self.io_loop = io_loop or ioloop.IOLoop.instance()
+
+ def start(self):
+ """Start ZAP authentication"""
+ super(IOLoopAuthenticator, self).start()
+ self.zap_stream = zmqstream.ZMQStream(self.zap_socket, self.io_loop)
+ self.zap_stream.on_recv(self.handle_zap_message)
+
+ def stop(self):
+ """Stop ZAP authentication"""
+ if self.zap_stream:
+ self.zap_stream.close()
+ self.zap_stream = None
+ super(IOLoopAuthenticator, self).stop()
+
+__all__ = ['IOLoopAuthenticator']
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/auth/thread.py b/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/auth/thread.py
new file mode 100644
index 00000000..8c3355a9
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/auth/thread.py
@@ -0,0 +1,184 @@
+"""ZAP Authenticator in a Python Thread.
+
+.. versionadded:: 14.1
+"""
+
+# Copyright (C) PyZMQ Developers
+# Distributed under the terms of the Modified BSD License.
+
+import logging
+from threading import Thread
+
+import zmq
+from zmq.utils import jsonapi
+from zmq.utils.strtypes import bytes, unicode, b, u
+
+from .base import Authenticator
+
+class AuthenticationThread(Thread):
+ """A Thread for running a zmq Authenticator
+
+ This is run in the background by ThreadedAuthenticator
+ """
+
+ def __init__(self, context, endpoint, encoding='utf-8', log=None):
+ super(AuthenticationThread, self).__init__()
+ self.context = context or zmq.Context.instance()
+ self.encoding = encoding
+ self.log = log = log or logging.getLogger('zmq.auth')
+ self.authenticator = Authenticator(context, encoding=encoding, log=log)
+
+ # create a socket to communicate back to main thread.
+ self.pipe = context.socket(zmq.PAIR)
+ self.pipe.linger = 1
+ self.pipe.connect(endpoint)
+
+ def run(self):
+ """ Start the Authentication Agent thread task """
+ self.authenticator.start()
+ zap = self.authenticator.zap_socket
+ poller = zmq.Poller()
+ poller.register(self.pipe, zmq.POLLIN)
+ poller.register(zap, zmq.POLLIN)
+ while True:
+ try:
+ socks = dict(poller.poll())
+ except zmq.ZMQError:
+ break # interrupted
+
+ if self.pipe in socks and socks[self.pipe] == zmq.POLLIN:
+ terminate = self._handle_pipe()
+ if terminate:
+ break
+
+ if zap in socks and socks[zap] == zmq.POLLIN:
+ self._handle_zap()
+
+ self.pipe.close()
+ self.authenticator.stop()
+
+ def _handle_zap(self):
+ """
+ Handle a message from the ZAP socket.
+ """
+ msg = self.authenticator.zap_socket.recv_multipart()
+ if not msg: return
+ self.authenticator.handle_zap_message(msg)
+
+ def _handle_pipe(self):
+ """
+ Handle a message from front-end API.
+ """
+ terminate = False
+
+ # Get the whole message off the pipe in one go
+ msg = self.pipe.recv_multipart()
+
+ if msg is None:
+ terminate = True
+ return terminate
+
+ command = msg[0]
+ self.log.debug("auth received API command %r", command)
+
+ if command == b'ALLOW':
+ addresses = [u(m, self.encoding) for m in msg[1:]]
+ try:
+ self.authenticator.allow(*addresses)
+ except Exception as e:
+ self.log.exception("Failed to allow %s", addresses)
+
+ elif command == b'DENY':
+ addresses = [u(m, self.encoding) for m in msg[1:]]
+ try:
+ self.authenticator.deny(*addresses)
+ except Exception as e:
+ self.log.exception("Failed to deny %s", addresses)
+
+ elif command == b'PLAIN':
+ domain = u(msg[1], self.encoding)
+ json_passwords = msg[2]
+ self.authenticator.configure_plain(domain, jsonapi.loads(json_passwords))
+
+ elif command == b'CURVE':
+ # For now we don't do anything with domains
+ domain = u(msg[1], self.encoding)
+
+ # If location is CURVE_ALLOW_ANY, allow all clients. Otherwise
+ # treat location as a directory that holds the certificates.
+ location = u(msg[2], self.encoding)
+ self.authenticator.configure_curve(domain, location)
+
+ elif command == b'TERMINATE':
+ terminate = True
+
+ else:
+ self.log.error("Invalid auth command from API: %r", command)
+
+ return terminate
+
+def _inherit_docstrings(cls):
+ """inherit docstrings from Authenticator, so we don't duplicate them"""
+ for name, method in cls.__dict__.items():
+ if name.startswith('_'):
+ continue
+ upstream_method = getattr(Authenticator, name, None)
+ if not method.__doc__:
+ method.__doc__ = upstream_method.__doc__
+ return cls
+
+@_inherit_docstrings
+class ThreadAuthenticator(object):
+ """Run ZAP authentication in a background thread"""
+
+ def __init__(self, context=None, encoding='utf-8', log=None):
+ self.context = context or zmq.Context.instance()
+ self.log = log
+ self.encoding = encoding
+ self.pipe = None
+ self.pipe_endpoint = "inproc://{0}.inproc".format(id(self))
+ self.thread = None
+
+ def allow(self, *addresses):
+ self.pipe.send_multipart([b'ALLOW'] + [b(a, self.encoding) for a in addresses])
+
+ def deny(self, *addresses):
+ self.pipe.send_multipart([b'DENY'] + [b(a, self.encoding) for a in addresses])
+
+ def configure_plain(self, domain='*', passwords=None):
+ self.pipe.send_multipart([b'PLAIN', b(domain, self.encoding), jsonapi.dumps(passwords or {})])
+
+ def configure_curve(self, domain='*', location=''):
+ domain = b(domain, self.encoding)
+ location = b(location, self.encoding)
+ self.pipe.send_multipart([b'CURVE', domain, location])
+
+ def start(self):
+ """Start the authentication thread"""
+ # create a socket to communicate with auth thread.
+ self.pipe = self.context.socket(zmq.PAIR)
+ self.pipe.linger = 1
+ self.pipe.bind(self.pipe_endpoint)
+ self.thread = AuthenticationThread(self.context, self.pipe_endpoint, encoding=self.encoding, log=self.log)
+ self.thread.start()
+
+ def stop(self):
+ """Stop the authentication thread"""
+ if self.pipe:
+ self.pipe.send(b'TERMINATE')
+ if self.is_alive():
+ self.thread.join()
+ self.thread = None
+ self.pipe.close()
+ self.pipe = None
+
+ def is_alive(self):
+ """Is the ZAP thread currently running?"""
+ if self.thread and self.thread.is_alive():
+ return True
+ return False
+
+ def __del__(self):
+ self.stop()
+
+__all__ = ['ThreadAuthenticator']
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/backend/__init__.py b/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/backend/__init__.py
new file mode 100644
index 00000000..7cac725c
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/backend/__init__.py
@@ -0,0 +1,45 @@
+"""Import basic exposure of libzmq C API as a backend"""
+
+# Copyright (C) PyZMQ Developers
+# Distributed under the terms of the Modified BSD License.
+
+
+import os
+import platform
+import sys
+
+from zmq.utils.sixcerpt import reraise
+
+from .select import public_api, select_backend
+
+if 'PYZMQ_BACKEND' in os.environ:
+ backend = os.environ['PYZMQ_BACKEND']
+ if backend in ('cython', 'cffi'):
+ backend = 'zmq.backend.%s' % backend
+ _ns = select_backend(backend)
+else:
+ # default to cython, fallback to cffi
+ # (reverse on PyPy)
+ if platform.python_implementation() == 'PyPy':
+ first, second = ('zmq.backend.cffi', 'zmq.backend.cython')
+ else:
+ first, second = ('zmq.backend.cython', 'zmq.backend.cffi')
+
+ try:
+ _ns = select_backend(first)
+ except Exception:
+ exc_info = sys.exc_info()
+ exc = exc_info[1]
+ try:
+ _ns = select_backend(second)
+ except ImportError:
+ # prevent 'During handling of the above exception...' on py3
+ # can't use `raise ... from` on Python 2
+ if hasattr(exc, '__cause__'):
+ exc.__cause__ = None
+ # raise the *first* error, not the fallback
+ reraise(*exc_info)
+
+globals().update(_ns)
+
+__all__ = public_api
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/backend/cffi/__init__.py b/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/backend/cffi/__init__.py
new file mode 100644
index 00000000..ca3164d3
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/backend/cffi/__init__.py
@@ -0,0 +1,22 @@
+"""CFFI backend (for PyPY)"""
+
+# Copyright (C) PyZMQ Developers
+# Distributed under the terms of the Modified BSD License.
+
+from zmq.backend.cffi import (constants, error, message, context, socket,
+ _poll, devices, utils)
+
+__all__ = []
+for submod in (constants, error, message, context, socket,
+ _poll, devices, utils):
+ __all__.extend(submod.__all__)
+
+from .constants import *
+from .error import *
+from .message import *
+from .context import *
+from .socket import *
+from .devices import *
+from ._poll import *
+from ._cffi import zmq_version_info, ffi
+from .utils import *
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/backend/cffi/_cdefs.h b/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/backend/cffi/_cdefs.h
new file mode 100644
index 00000000..d3300575
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/backend/cffi/_cdefs.h
@@ -0,0 +1,68 @@
+void zmq_version(int *major, int *minor, int *patch);
+
+void* zmq_socket(void *context, int type);
+int zmq_close(void *socket);
+
+int zmq_bind(void *socket, const char *endpoint);
+int zmq_connect(void *socket, const char *endpoint);
+
+int zmq_errno(void);
+const char * zmq_strerror(int errnum);
+
+void* zmq_stopwatch_start(void);
+unsigned long zmq_stopwatch_stop(void *watch);
+void zmq_sleep(int seconds_);
+int zmq_device(int device, void *frontend, void *backend);
+
+int zmq_unbind(void *socket, const char *endpoint);
+int zmq_disconnect(void *socket, const char *endpoint);
+void* zmq_ctx_new();
+int zmq_ctx_destroy(void *context);
+int zmq_ctx_get(void *context, int opt);
+int zmq_ctx_set(void *context, int opt, int optval);
+int zmq_proxy(void *frontend, void *backend, void *capture);
+int zmq_socket_monitor(void *socket, const char *addr, int events);
+
+int zmq_curve_keypair (char *z85_public_key, char *z85_secret_key);
+int zmq_has (const char *capability);
+
+typedef struct { ...; } zmq_msg_t;
+typedef ... zmq_free_fn;
+
+int zmq_msg_init(zmq_msg_t *msg);
+int zmq_msg_init_size(zmq_msg_t *msg, size_t size);
+int zmq_msg_init_data(zmq_msg_t *msg,
+ void *data,
+ size_t size,
+ zmq_free_fn *ffn,
+ void *hint);
+
+size_t zmq_msg_size(zmq_msg_t *msg);
+void *zmq_msg_data(zmq_msg_t *msg);
+int zmq_msg_close(zmq_msg_t *msg);
+
+int zmq_msg_send(zmq_msg_t *msg, void *socket, int flags);
+int zmq_msg_recv(zmq_msg_t *msg, void *socket, int flags);
+
+int zmq_getsockopt(void *socket,
+ int option_name,
+ void *option_value,
+ size_t *option_len);
+
+int zmq_setsockopt(void *socket,
+ int option_name,
+ const void *option_value,
+ size_t option_len);
+typedef struct
+{
+ void *socket;
+ int fd;
+ short events;
+ short revents;
+} zmq_pollitem_t;
+
+int zmq_poll(zmq_pollitem_t *items, int nitems, long timeout);
+
+// miscellany
+void * memcpy(void *restrict s1, const void *restrict s2, size_t n);
+int get_ipc_path_max_len(void);
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/backend/cffi/_cffi.py b/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/backend/cffi/_cffi.py
new file mode 100644
index 00000000..c73ebf83
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/backend/cffi/_cffi.py
@@ -0,0 +1,127 @@
+# coding: utf-8
+"""The main CFFI wrapping of libzmq"""
+
+# Copyright (C) PyZMQ Developers
+# Distributed under the terms of the Modified BSD License.
+
+
+import json
+import os
+from os.path import dirname, join
+from cffi import FFI
+
+from zmq.utils.constant_names import all_names, no_prefix
+
+
+base_zmq_version = (3,2,2)
+
+def load_compiler_config():
+ """load pyzmq compiler arguments"""
+ import zmq
+ zmq_dir = dirname(zmq.__file__)
+ zmq_parent = dirname(zmq_dir)
+
+ fname = join(zmq_dir, 'utils', 'compiler.json')
+ if os.path.exists(fname):
+ with open(fname) as f:
+ cfg = json.load(f)
+ else:
+ cfg = {}
+
+ cfg.setdefault("include_dirs", [])
+ cfg.setdefault("library_dirs", [])
+ cfg.setdefault("runtime_library_dirs", [])
+ cfg.setdefault("libraries", ["zmq"])
+
+ # cast to str, because cffi can't handle unicode paths (?!)
+ cfg['libraries'] = [str(lib) for lib in cfg['libraries']]
+ for key in ("include_dirs", "library_dirs", "runtime_library_dirs"):
+ # interpret paths relative to parent of zmq (like source tree)
+ abs_paths = []
+ for p in cfg[key]:
+ if p.startswith('zmq'):
+ p = join(zmq_parent, p)
+ abs_paths.append(str(p))
+ cfg[key] = abs_paths
+ return cfg
+
+
+def zmq_version_info():
+ """Get libzmq version as tuple of ints"""
+ major = ffi.new('int*')
+ minor = ffi.new('int*')
+ patch = ffi.new('int*')
+
+ C.zmq_version(major, minor, patch)
+
+ return (int(major[0]), int(minor[0]), int(patch[0]))
+
+
+cfg = load_compiler_config()
+ffi = FFI()
+
+def _make_defines(names):
+ _names = []
+ for name in names:
+ define_line = "#define %s ..." % (name)
+ _names.append(define_line)
+
+ return "\n".join(_names)
+
+c_constant_names = []
+for name in all_names:
+ if no_prefix(name):
+ c_constant_names.append(name)
+ else:
+ c_constant_names.append("ZMQ_" + name)
+
+# load ffi definitions
+here = os.path.dirname(__file__)
+with open(os.path.join(here, '_cdefs.h')) as f:
+ _cdefs = f.read()
+
+with open(os.path.join(here, '_verify.c')) as f:
+ _verify = f.read()
+
+ffi.cdef(_cdefs)
+ffi.cdef(_make_defines(c_constant_names))
+
+try:
+ C = ffi.verify(_verify,
+ modulename='_cffi_ext',
+ libraries=cfg['libraries'],
+ include_dirs=cfg['include_dirs'],
+ library_dirs=cfg['library_dirs'],
+ runtime_library_dirs=cfg['runtime_library_dirs'],
+ )
+ _version_info = zmq_version_info()
+except Exception as e:
+ raise ImportError("PyZMQ CFFI backend couldn't find zeromq: %s\n"
+ "Please check that you have zeromq headers and libraries." % e)
+
+if _version_info < (3,2,2):
+ raise ImportError("PyZMQ CFFI backend requires zeromq >= 3.2.2,"
+ " but found %i.%i.%i" % _version_info
+ )
+
+nsp = new_sizet_pointer = lambda length: ffi.new('size_t*', length)
+
+new_uint64_pointer = lambda: (ffi.new('uint64_t*'),
+ nsp(ffi.sizeof('uint64_t')))
+new_int64_pointer = lambda: (ffi.new('int64_t*'),
+ nsp(ffi.sizeof('int64_t')))
+new_int_pointer = lambda: (ffi.new('int*'),
+ nsp(ffi.sizeof('int')))
+new_binary_data = lambda length: (ffi.new('char[%d]' % (length)),
+ nsp(ffi.sizeof('char') * length))
+
+value_uint64_pointer = lambda val : (ffi.new('uint64_t*', val),
+ ffi.sizeof('uint64_t'))
+value_int64_pointer = lambda val: (ffi.new('int64_t*', val),
+ ffi.sizeof('int64_t'))
+value_int_pointer = lambda val: (ffi.new('int*', val),
+ ffi.sizeof('int'))
+value_binary_data = lambda val, length: (ffi.new('char[%d]' % (length + 1), val),
+ ffi.sizeof('char') * length)
+
+IPC_PATH_MAX_LEN = C.get_ipc_path_max_len()
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/backend/cffi/_poll.py b/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/backend/cffi/_poll.py
new file mode 100644
index 00000000..9bca34ca
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/backend/cffi/_poll.py
@@ -0,0 +1,56 @@
+# coding: utf-8
+"""zmq poll function"""
+
+# Copyright (C) PyZMQ Developers
+# Distributed under the terms of the Modified BSD License.
+
+from ._cffi import C, ffi, zmq_version_info
+
+from .constants import *
+
+from zmq.error import _check_rc
+
+
+def _make_zmq_pollitem(socket, flags):
+ zmq_socket = socket._zmq_socket
+ zmq_pollitem = ffi.new('zmq_pollitem_t*')
+ zmq_pollitem.socket = zmq_socket
+ zmq_pollitem.fd = 0
+ zmq_pollitem.events = flags
+ zmq_pollitem.revents = 0
+ return zmq_pollitem[0]
+
+def _make_zmq_pollitem_fromfd(socket_fd, flags):
+ zmq_pollitem = ffi.new('zmq_pollitem_t*')
+ zmq_pollitem.socket = ffi.NULL
+ zmq_pollitem.fd = socket_fd
+ zmq_pollitem.events = flags
+ zmq_pollitem.revents = 0
+ return zmq_pollitem[0]
+
+def zmq_poll(sockets, timeout):
+ cffi_pollitem_list = []
+ low_level_to_socket_obj = {}
+ for item in sockets:
+ if isinstance(item[0], int):
+ low_level_to_socket_obj[item[0]] = item
+ cffi_pollitem_list.append(_make_zmq_pollitem_fromfd(item[0], item[1]))
+ else:
+ low_level_to_socket_obj[item[0]._zmq_socket] = item
+ cffi_pollitem_list.append(_make_zmq_pollitem(item[0], item[1]))
+ items = ffi.new('zmq_pollitem_t[]', cffi_pollitem_list)
+ list_length = ffi.cast('int', len(cffi_pollitem_list))
+ c_timeout = ffi.cast('long', timeout)
+ rc = C.zmq_poll(items, list_length, c_timeout)
+ _check_rc(rc)
+ result = []
+ for index in range(len(items)):
+ if not items[index].socket == ffi.NULL:
+ if items[index].revents > 0:
+ result.append((low_level_to_socket_obj[items[index].socket][0],
+ items[index].revents))
+ else:
+ result.append((items[index].fd, items[index].revents))
+ return result
+
+__all__ = ['zmq_poll']
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/backend/cffi/_verify.c b/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/backend/cffi/_verify.c
new file mode 100644
index 00000000..547840eb
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/backend/cffi/_verify.c
@@ -0,0 +1,12 @@
+#include <stdio.h>
+#include <sys/un.h>
+#include <string.h>
+
+#include <zmq.h>
+#include <zmq_utils.h>
+#include "zmq_compat.h"
+
+int get_ipc_path_max_len(void) {
+ struct sockaddr_un *dummy;
+ return sizeof(dummy->sun_path) - 1;
+}
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/backend/cffi/constants.py b/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/backend/cffi/constants.py
new file mode 100644
index 00000000..ee293e74
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/backend/cffi/constants.py
@@ -0,0 +1,15 @@
+# coding: utf-8
+"""zmq constants"""
+
+from ._cffi import C, c_constant_names
+from zmq.utils.constant_names import all_names
+
+g = globals()
+for cname in c_constant_names:
+ if cname.startswith("ZMQ_"):
+ name = cname[4:]
+ else:
+ name = cname
+ g[name] = getattr(C, cname)
+
+__all__ = all_names
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/backend/cffi/context.py b/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/backend/cffi/context.py
new file mode 100644
index 00000000..16a7b257
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/backend/cffi/context.py
@@ -0,0 +1,100 @@
+# coding: utf-8
+"""zmq Context class"""
+
+# Copyright (C) PyZMQ Developers
+# Distributed under the terms of the Modified BSD License.
+
+import weakref
+
+from ._cffi import C, ffi
+
+from .socket import *
+from .constants import *
+
+from zmq.error import ZMQError, _check_rc
+
+class Context(object):
+ _zmq_ctx = None
+ _iothreads = None
+ _closed = None
+ _sockets = None
+ _shadow = False
+
+ def __init__(self, io_threads=1, shadow=None):
+
+ if shadow:
+ self._zmq_ctx = ffi.cast("void *", shadow)
+ self._shadow = True
+ else:
+ self._shadow = False
+ if not io_threads >= 0:
+ raise ZMQError(EINVAL)
+
+ self._zmq_ctx = C.zmq_ctx_new()
+ if self._zmq_ctx == ffi.NULL:
+ raise ZMQError(C.zmq_errno())
+ if not shadow:
+ C.zmq_ctx_set(self._zmq_ctx, IO_THREADS, io_threads)
+ self._closed = False
+ self._sockets = set()
+
+ @property
+ def underlying(self):
+ """The address of the underlying libzmq context"""
+ return int(ffi.cast('size_t', self._zmq_ctx))
+
+ @property
+ def closed(self):
+ return self._closed
+
+ def _add_socket(self, socket):
+ ref = weakref.ref(socket)
+ self._sockets.add(ref)
+ return ref
+
+ def _rm_socket(self, ref):
+ if ref in self._sockets:
+ self._sockets.remove(ref)
+
+ def set(self, option, value):
+ """set a context option
+
+ see zmq_ctx_set
+ """
+ rc = C.zmq_ctx_set(self._zmq_ctx, option, value)
+ _check_rc(rc)
+
+ def get(self, option):
+ """get context option
+
+ see zmq_ctx_get
+ """
+ rc = C.zmq_ctx_get(self._zmq_ctx, option)
+ _check_rc(rc)
+ return rc
+
+ def term(self):
+ if self.closed:
+ return
+
+ C.zmq_ctx_destroy(self._zmq_ctx)
+
+ self._zmq_ctx = None
+ self._closed = True
+
+ def destroy(self, linger=None):
+ if self.closed:
+ return
+
+ sockets = self._sockets
+ self._sockets = set()
+ for s in sockets:
+ s = s()
+ if s and not s.closed:
+ if linger:
+ s.setsockopt(LINGER, linger)
+ s.close()
+
+ self.term()
+
+__all__ = ['Context']
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/backend/cffi/devices.py b/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/backend/cffi/devices.py
new file mode 100644
index 00000000..c7a514a8
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/backend/cffi/devices.py
@@ -0,0 +1,24 @@
+# coding: utf-8
+"""zmq device functions"""
+
+# Copyright (C) PyZMQ Developers
+# Distributed under the terms of the Modified BSD License.
+
+from ._cffi import C, ffi, zmq_version_info
+from .socket import Socket
+from zmq.error import ZMQError, _check_rc
+
+def device(device_type, frontend, backend):
+ rc = C.zmq_proxy(frontend._zmq_socket, backend._zmq_socket, ffi.NULL)
+ _check_rc(rc)
+
+def proxy(frontend, backend, capture=None):
+ if isinstance(capture, Socket):
+ capture = capture._zmq_socket
+ else:
+ capture = ffi.NULL
+
+ rc = C.zmq_proxy(frontend._zmq_socket, backend._zmq_socket, capture)
+ _check_rc(rc)
+
+__all__ = ['device', 'proxy']
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/backend/cffi/error.py b/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/backend/cffi/error.py
new file mode 100644
index 00000000..3bb64de0
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/backend/cffi/error.py
@@ -0,0 +1,13 @@
+"""zmq error functions"""
+
+# Copyright (C) PyZMQ Developers
+# Distributed under the terms of the Modified BSD License.
+
+from ._cffi import C, ffi
+
+def strerror(errno):
+ return ffi.string(C.zmq_strerror(errno))
+
+zmq_errno = C.zmq_errno
+
+__all__ = ['strerror', 'zmq_errno']
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/backend/cffi/message.py b/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/backend/cffi/message.py
new file mode 100644
index 00000000..c35decb6
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/backend/cffi/message.py
@@ -0,0 +1,69 @@
+"""Dummy Frame object"""
+
+# Copyright (C) PyZMQ Developers
+# Distributed under the terms of the Modified BSD License.
+
+from ._cffi import ffi, C
+
+import zmq
+from zmq.utils.strtypes import unicode
+
+try:
+ view = memoryview
+except NameError:
+ view = buffer
+
+_content = lambda x: x.tobytes() if type(x) == memoryview else x
+
+class Frame(object):
+ _data = None
+ tracker = None
+ closed = False
+ more = False
+ buffer = None
+
+
+ def __init__(self, data, track=False):
+ try:
+ view(data)
+ except TypeError:
+ raise
+
+ self._data = data
+
+ if isinstance(data, unicode):
+ raise TypeError("Unicode objects not allowed. Only: str/bytes, " +
+ "buffer interfaces.")
+
+ self.more = False
+ self.tracker = None
+ self.closed = False
+ if track:
+ self.tracker = zmq.MessageTracker()
+
+ self.buffer = view(self.bytes)
+
+ @property
+ def bytes(self):
+ data = _content(self._data)
+ return data
+
+ def __len__(self):
+ return len(self.bytes)
+
+ def __eq__(self, other):
+ return self.bytes == _content(other)
+
+ def __str__(self):
+ if str is unicode:
+ return self.bytes.decode()
+ else:
+ return self.bytes
+
+ @property
+ def done(self):
+ return True
+
+Message = Frame
+
+__all__ = ['Frame', 'Message']
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/backend/cffi/socket.py b/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/backend/cffi/socket.py
new file mode 100644
index 00000000..3c427739
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/backend/cffi/socket.py
@@ -0,0 +1,244 @@
+# coding: utf-8
+"""zmq Socket class"""
+
+# Copyright (C) PyZMQ Developers
+# Distributed under the terms of the Modified BSD License.
+
+import random
+import codecs
+
+import errno as errno_mod
+
+from ._cffi import (C, ffi, new_uint64_pointer, new_int64_pointer,
+ new_int_pointer, new_binary_data, value_uint64_pointer,
+ value_int64_pointer, value_int_pointer, value_binary_data,
+ IPC_PATH_MAX_LEN)
+
+from .message import Frame
+from .constants import *
+
+import zmq
+from zmq.error import ZMQError, _check_rc, _check_version
+from zmq.utils.strtypes import unicode
+
+
+def new_pointer_from_opt(option, length=0):
+ from zmq.sugar.constants import (
+ int64_sockopts, bytes_sockopts,
+ )
+ if option in int64_sockopts:
+ return new_int64_pointer()
+ elif option in bytes_sockopts:
+ return new_binary_data(length)
+ else:
+ # default
+ return new_int_pointer()
+
+def value_from_opt_pointer(option, opt_pointer, length=0):
+ from zmq.sugar.constants import (
+ int64_sockopts, bytes_sockopts,
+ )
+ if option in int64_sockopts:
+ return int(opt_pointer[0])
+ elif option in bytes_sockopts:
+ return ffi.buffer(opt_pointer, length)[:]
+ else:
+ return int(opt_pointer[0])
+
+def initialize_opt_pointer(option, value, length=0):
+ from zmq.sugar.constants import (
+ int64_sockopts, bytes_sockopts,
+ )
+ if option in int64_sockopts:
+ return value_int64_pointer(value)
+ elif option in bytes_sockopts:
+ return value_binary_data(value, length)
+ else:
+ return value_int_pointer(value)
+
+
+class Socket(object):
+ context = None
+ socket_type = None
+ _zmq_socket = None
+ _closed = None
+ _ref = None
+ _shadow = False
+
+ def __init__(self, context=None, socket_type=None, shadow=None):
+ self.context = context
+ if shadow is not None:
+ self._zmq_socket = ffi.cast("void *", shadow)
+ self._shadow = True
+ else:
+ self._shadow = False
+ self._zmq_socket = C.zmq_socket(context._zmq_ctx, socket_type)
+ if self._zmq_socket == ffi.NULL:
+ raise ZMQError()
+ self._closed = False
+ if context:
+ self._ref = context._add_socket(self)
+
+ @property
+ def underlying(self):
+ """The address of the underlying libzmq socket"""
+ return int(ffi.cast('size_t', self._zmq_socket))
+
+ @property
+ def closed(self):
+ return self._closed
+
+ def close(self, linger=None):
+ rc = 0
+ if not self._closed and hasattr(self, '_zmq_socket'):
+ if self._zmq_socket is not None:
+ rc = C.zmq_close(self._zmq_socket)
+ self._closed = True
+ if self.context:
+ self.context._rm_socket(self._ref)
+ return rc
+
+ def bind(self, address):
+ if isinstance(address, unicode):
+ address = address.encode('utf8')
+ rc = C.zmq_bind(self._zmq_socket, address)
+ if rc < 0:
+ if IPC_PATH_MAX_LEN and C.zmq_errno() == errno_mod.ENAMETOOLONG:
+ # py3compat: address is bytes, but msg wants str
+ if str is unicode:
+ address = address.decode('utf-8', 'replace')
+ path = address.split('://', 1)[-1]
+ msg = ('ipc path "{0}" is longer than {1} '
+ 'characters (sizeof(sockaddr_un.sun_path)).'
+ .format(path, IPC_PATH_MAX_LEN))
+ raise ZMQError(C.zmq_errno(), msg=msg)
+ else:
+ _check_rc(rc)
+
+ def unbind(self, address):
+ _check_version((3,2), "unbind")
+ if isinstance(address, unicode):
+ address = address.encode('utf8')
+ rc = C.zmq_unbind(self._zmq_socket, address)
+ _check_rc(rc)
+
+ def connect(self, address):
+ if isinstance(address, unicode):
+ address = address.encode('utf8')
+ rc = C.zmq_connect(self._zmq_socket, address)
+ _check_rc(rc)
+
+ def disconnect(self, address):
+ _check_version((3,2), "disconnect")
+ if isinstance(address, unicode):
+ address = address.encode('utf8')
+ rc = C.zmq_disconnect(self._zmq_socket, address)
+ _check_rc(rc)
+
+ def set(self, option, value):
+ length = None
+ if isinstance(value, unicode):
+ raise TypeError("unicode not allowed, use bytes")
+
+ if isinstance(value, bytes):
+ if option not in zmq.constants.bytes_sockopts:
+ raise TypeError("not a bytes sockopt: %s" % option)
+ length = len(value)
+
+ c_data = initialize_opt_pointer(option, value, length)
+
+ c_value_pointer = c_data[0]
+ c_sizet = c_data[1]
+
+ rc = C.zmq_setsockopt(self._zmq_socket,
+ option,
+ ffi.cast('void*', c_value_pointer),
+ c_sizet)
+ _check_rc(rc)
+
+ def get(self, option):
+ c_data = new_pointer_from_opt(option, length=255)
+
+ c_value_pointer = c_data[0]
+ c_sizet_pointer = c_data[1]
+
+ rc = C.zmq_getsockopt(self._zmq_socket,
+ option,
+ c_value_pointer,
+ c_sizet_pointer)
+ _check_rc(rc)
+
+ sz = c_sizet_pointer[0]
+ v = value_from_opt_pointer(option, c_value_pointer, sz)
+ if option != zmq.IDENTITY and option in zmq.constants.bytes_sockopts and v.endswith(b'\0'):
+ v = v[:-1]
+ return v
+
+ def send(self, message, flags=0, copy=False, track=False):
+ if isinstance(message, unicode):
+ raise TypeError("Message must be in bytes, not an unicode Object")
+
+ if isinstance(message, Frame):
+ message = message.bytes
+
+ zmq_msg = ffi.new('zmq_msg_t*')
+ c_message = ffi.new('char[]', message)
+ rc = C.zmq_msg_init_size(zmq_msg, len(message))
+ C.memcpy(C.zmq_msg_data(zmq_msg), c_message, len(message))
+
+ rc = C.zmq_msg_send(zmq_msg, self._zmq_socket, flags)
+ C.zmq_msg_close(zmq_msg)
+ _check_rc(rc)
+
+ if track:
+ return zmq.MessageTracker()
+
+ def recv(self, flags=0, copy=True, track=False):
+ zmq_msg = ffi.new('zmq_msg_t*')
+ C.zmq_msg_init(zmq_msg)
+
+ rc = C.zmq_msg_recv(zmq_msg, self._zmq_socket, flags)
+
+ if rc < 0:
+ C.zmq_msg_close(zmq_msg)
+ _check_rc(rc)
+
+ _buffer = ffi.buffer(C.zmq_msg_data(zmq_msg), C.zmq_msg_size(zmq_msg))
+ value = _buffer[:]
+ C.zmq_msg_close(zmq_msg)
+
+ frame = Frame(value, track=track)
+ frame.more = self.getsockopt(RCVMORE)
+
+ if copy:
+ return frame.bytes
+ else:
+ return frame
+
+ def monitor(self, addr, events=-1):
+ """s.monitor(addr, flags)
+
+ Start publishing socket events on inproc.
+ See libzmq docs for zmq_monitor for details.
+
+ Note: requires libzmq >= 3.2
+
+ Parameters
+ ----------
+ addr : str
+ The inproc url used for monitoring. Passing None as
+ the addr will cause an existing socket monitor to be
+ deregistered.
+ events : int [default: zmq.EVENT_ALL]
+ The zmq event bitmask for which events will be sent to the monitor.
+ """
+
+ _check_version((3,2), "monitor")
+ if events < 0:
+ events = zmq.EVENT_ALL
+ if addr is None:
+ addr = ffi.NULL
+ rc = C.zmq_socket_monitor(self._zmq_socket, addr, events)
+
+
+__all__ = ['Socket', 'IPC_PATH_MAX_LEN']
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/backend/cffi/utils.py b/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/backend/cffi/utils.py
new file mode 100644
index 00000000..fde7827b
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/backend/cffi/utils.py
@@ -0,0 +1,62 @@
+# coding: utf-8
+"""miscellaneous zmq_utils wrapping"""
+
+# Copyright (C) PyZMQ Developers
+# Distributed under the terms of the Modified BSD License.
+
+from ._cffi import ffi, C
+
+from zmq.error import ZMQError, _check_rc, _check_version
+from zmq.utils.strtypes import unicode
+
+def has(capability):
+ """Check for zmq capability by name (e.g. 'ipc', 'curve')
+
+ .. versionadded:: libzmq-4.1
+ .. versionadded:: 14.1
+ """
+ _check_version((4,1), 'zmq.has')
+ if isinstance(capability, unicode):
+ capability = capability.encode('utf8')
+ return bool(C.zmq_has(capability))
+
+def curve_keypair():
+ """generate a Z85 keypair for use with zmq.CURVE security
+
+ Requires libzmq (≥ 4.0) to have been linked with libsodium.
+
+ Returns
+ -------
+ (public, secret) : two bytestrings
+ The public and private keypair as 40 byte z85-encoded bytestrings.
+ """
+ _check_version((3,2), "monitor")
+ public = ffi.new('char[64]')
+ private = ffi.new('char[64]')
+ rc = C.zmq_curve_keypair(public, private)
+ _check_rc(rc)
+ return ffi.buffer(public)[:40], ffi.buffer(private)[:40]
+
+
+class Stopwatch(object):
+ def __init__(self):
+ self.watch = ffi.NULL
+
+ def start(self):
+ if self.watch == ffi.NULL:
+ self.watch = C.zmq_stopwatch_start()
+ else:
+ raise ZMQError('Stopwatch is already runing.')
+
+ def stop(self):
+ if self.watch == ffi.NULL:
+ raise ZMQError('Must start the Stopwatch before calling stop.')
+ else:
+ time = C.zmq_stopwatch_stop(self.watch)
+ self.watch = ffi.NULL
+ return time
+
+ def sleep(self, seconds):
+ C.zmq_sleep(seconds)
+
+__all__ = ['has', 'curve_keypair', 'Stopwatch']
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/backend/cython/__init__.py b/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/backend/cython/__init__.py
new file mode 100644
index 00000000..e5358185
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/backend/cython/__init__.py
@@ -0,0 +1,23 @@
+"""Python bindings for core 0MQ objects."""
+
+# Copyright (C) PyZMQ Developers
+# Distributed under the terms of the Lesser GNU Public License (LGPL).
+
+from . import (constants, error, message, context,
+ socket, utils, _poll, _version, _device )
+
+__all__ = []
+for submod in (constants, error, message, context,
+ socket, utils, _poll, _version, _device):
+ __all__.extend(submod.__all__)
+
+from .constants import *
+from .error import *
+from .message import *
+from .context import *
+from .socket import *
+from ._poll import *
+from .utils import *
+from ._device import *
+from ._version import *
+
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/backend/cython/_device.cpython-34m.so b/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/backend/cython/_device.cpython-34m.so
new file mode 100644
index 00000000..247f8a55
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/backend/cython/_device.cpython-34m.so
Binary files differ
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/backend/cython/_poll.cpython-34m.so b/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/backend/cython/_poll.cpython-34m.so
new file mode 100644
index 00000000..aff94217
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/backend/cython/_poll.cpython-34m.so
Binary files differ
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/backend/cython/_version.cpython-34m.so b/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/backend/cython/_version.cpython-34m.so
new file mode 100644
index 00000000..8ae81d79
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/backend/cython/_version.cpython-34m.so
Binary files differ
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/backend/cython/checkrc.pxd b/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/backend/cython/checkrc.pxd
new file mode 100644
index 00000000..3bf69fc3
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/backend/cython/checkrc.pxd
@@ -0,0 +1,23 @@
+from libc.errno cimport EINTR, EAGAIN
+from cpython cimport PyErr_CheckSignals
+from libzmq cimport zmq_errno, ZMQ_ETERM
+
+cdef inline int _check_rc(int rc) except -1:
+ """internal utility for checking zmq return condition
+
+ and raising the appropriate Exception class
+ """
+ cdef int errno = zmq_errno()
+ PyErr_CheckSignals()
+ if rc < 0:
+ if errno == EAGAIN:
+ from zmq.error import Again
+ raise Again(errno)
+ elif errno == ZMQ_ETERM:
+ from zmq.error import ContextTerminated
+ raise ContextTerminated(errno)
+ else:
+ from zmq.error import ZMQError
+ raise ZMQError(errno)
+ # return -1
+ return 0
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/backend/cython/constants.cpython-34m.so b/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/backend/cython/constants.cpython-34m.so
new file mode 100644
index 00000000..2f89225f
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/backend/cython/constants.cpython-34m.so
Binary files differ
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/backend/cython/context.cpython-34m.so b/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/backend/cython/context.cpython-34m.so
new file mode 100644
index 00000000..35a33ecc
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/backend/cython/context.cpython-34m.so
Binary files differ
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/backend/cython/context.pxd b/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/backend/cython/context.pxd
new file mode 100644
index 00000000..9c9267a5
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/backend/cython/context.pxd
@@ -0,0 +1,41 @@
+"""0MQ Context class declaration."""
+
+#
+# Copyright (c) 2010-2011 Brian E. Granger & Min Ragan-Kelley
+#
+# This file is part of pyzmq.
+#
+# pyzmq is free software; you can redistribute it and/or modify it under
+# the terms of the Lesser GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# pyzmq is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# Lesser GNU General Public License for more details.
+#
+# You should have received a copy of the Lesser GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+#-----------------------------------------------------------------------------
+# Code
+#-----------------------------------------------------------------------------
+
+cdef class Context:
+
+ cdef object __weakref__ # enable weakref
+ cdef void *handle # The C handle for the underlying zmq object.
+ cdef bint _shadow # whether the Context is a shadow wrapper of another
+ cdef void **_sockets # A C-array containg socket handles
+ cdef size_t _n_sockets # the number of sockets
+ cdef size_t _max_sockets # the size of the _sockets array
+ cdef int _pid # the pid of the process which created me (for fork safety)
+
+ cdef public bint closed # bool property for a closed context.
+ cdef inline int _term(self)
+ # helpers for events on _sockets in Socket.__cinit__()/close()
+ cdef inline void _add_socket(self, void* handle)
+ cdef inline void _remove_socket(self, void* handle)
+
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/backend/cython/error.cpython-34m.so b/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/backend/cython/error.cpython-34m.so
new file mode 100644
index 00000000..7774ada6
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/backend/cython/error.cpython-34m.so
Binary files differ
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/backend/cython/libzmq.pxd b/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/backend/cython/libzmq.pxd
new file mode 100644
index 00000000..e42f6d6b
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/backend/cython/libzmq.pxd
@@ -0,0 +1,110 @@
+"""All the C imports for 0MQ"""
+
+#
+# Copyright (c) 2010 Brian E. Granger & Min Ragan-Kelley
+#
+# This file is part of pyzmq.
+#
+# pyzmq is free software; you can redistribute it and/or modify it under
+# the terms of the Lesser GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# pyzmq is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# Lesser GNU General Public License for more details.
+#
+# You should have received a copy of the Lesser GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+#-----------------------------------------------------------------------------
+# Imports
+#-----------------------------------------------------------------------------
+
+#-----------------------------------------------------------------------------
+# Import the C header files
+#-----------------------------------------------------------------------------
+
+cdef extern from *:
+ ctypedef void* const_void_ptr "const void *"
+ ctypedef char* const_char_ptr "const char *"
+
+cdef extern from "zmq_compat.h":
+ ctypedef signed long long int64_t "pyzmq_int64_t"
+
+include "constant_enums.pxi"
+
+cdef extern from "zmq.h" nogil:
+
+ void _zmq_version "zmq_version"(int *major, int *minor, int *patch)
+
+ ctypedef int fd_t "ZMQ_FD_T"
+
+ enum: errno
+ char *zmq_strerror (int errnum)
+ int zmq_errno()
+
+ void *zmq_ctx_new ()
+ int zmq_ctx_destroy (void *context)
+ int zmq_ctx_set (void *context, int option, int optval)
+ int zmq_ctx_get (void *context, int option)
+ void *zmq_init (int io_threads)
+ int zmq_term (void *context)
+
+ # blackbox def for zmq_msg_t
+ ctypedef void * zmq_msg_t "zmq_msg_t"
+
+ ctypedef void zmq_free_fn(void *data, void *hint)
+
+ int zmq_msg_init (zmq_msg_t *msg)
+ int zmq_msg_init_size (zmq_msg_t *msg, size_t size)
+ int zmq_msg_init_data (zmq_msg_t *msg, void *data,
+ size_t size, zmq_free_fn *ffn, void *hint)
+ int zmq_msg_send (zmq_msg_t *msg, void *s, int flags)
+ int zmq_msg_recv (zmq_msg_t *msg, void *s, int flags)
+ int zmq_msg_close (zmq_msg_t *msg)
+ int zmq_msg_move (zmq_msg_t *dest, zmq_msg_t *src)
+ int zmq_msg_copy (zmq_msg_t *dest, zmq_msg_t *src)
+ void *zmq_msg_data (zmq_msg_t *msg)
+ size_t zmq_msg_size (zmq_msg_t *msg)
+ int zmq_msg_more (zmq_msg_t *msg)
+ int zmq_msg_get (zmq_msg_t *msg, int option)
+ int zmq_msg_set (zmq_msg_t *msg, int option, int optval)
+ const_char_ptr zmq_msg_gets (zmq_msg_t *msg, const_char_ptr property)
+ int zmq_has (const_char_ptr capability)
+
+ void *zmq_socket (void *context, int type)
+ int zmq_close (void *s)
+ int zmq_setsockopt (void *s, int option, void *optval, size_t optvallen)
+ int zmq_getsockopt (void *s, int option, void *optval, size_t *optvallen)
+ int zmq_bind (void *s, char *addr)
+ int zmq_connect (void *s, char *addr)
+ int zmq_unbind (void *s, char *addr)
+ int zmq_disconnect (void *s, char *addr)
+
+ int zmq_socket_monitor (void *s, char *addr, int flags)
+
+ # send/recv
+ int zmq_sendbuf (void *s, const_void_ptr buf, size_t n, int flags)
+ int zmq_recvbuf (void *s, void *buf, size_t n, int flags)
+
+ ctypedef struct zmq_pollitem_t:
+ void *socket
+ int fd
+ short events
+ short revents
+
+ int zmq_poll (zmq_pollitem_t *items, int nitems, long timeout)
+
+ int zmq_device (int device_, void *insocket_, void *outsocket_)
+ int zmq_proxy (void *frontend, void *backend, void *capture)
+
+cdef extern from "zmq_utils.h" nogil:
+
+ void *zmq_stopwatch_start ()
+ unsigned long zmq_stopwatch_stop (void *watch_)
+ void zmq_sleep (int seconds_)
+ int zmq_curve_keypair (char *z85_public_key, char *z85_secret_key)
+
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/backend/cython/message.cpython-34m.so b/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/backend/cython/message.cpython-34m.so
new file mode 100644
index 00000000..2c3ca77f
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/backend/cython/message.cpython-34m.so
Binary files differ
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/backend/cython/message.pxd b/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/backend/cython/message.pxd
new file mode 100644
index 00000000..4781195f
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/backend/cython/message.pxd
@@ -0,0 +1,63 @@
+"""0MQ Message related class declarations."""
+
+#
+# Copyright (c) 2010-2011 Brian E. Granger & Min Ragan-Kelley
+#
+# This file is part of pyzmq.
+#
+# pyzmq is free software; you can redistribute it and/or modify it under
+# the terms of the Lesser GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# pyzmq is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# Lesser GNU General Public License for more details.
+#
+# You should have received a copy of the Lesser GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+#-----------------------------------------------------------------------------
+# Imports
+#-----------------------------------------------------------------------------
+
+from cpython cimport PyBytes_FromStringAndSize
+
+from libzmq cimport zmq_msg_t, zmq_msg_data, zmq_msg_size
+
+#-----------------------------------------------------------------------------
+# Code
+#-----------------------------------------------------------------------------
+
+cdef class MessageTracker(object):
+
+ cdef set events # Message Event objects to track.
+ cdef set peers # Other Message or MessageTracker objects.
+
+
+cdef class Frame:
+
+ cdef zmq_msg_t zmq_msg
+ cdef object _data # The actual message data as a Python object.
+ cdef object _buffer # A Python Buffer/View of the message contents
+ cdef object _bytes # A bytes/str copy of the message.
+ cdef bint _failed_init # Flag to handle failed zmq_msg_init
+ cdef public object tracker_event # Event for use with zmq_free_fn.
+ cdef public object tracker # MessageTracker object.
+ cdef public bint more # whether RCVMORE was set
+
+ cdef Frame fast_copy(self) # Create shallow copy of Message object.
+ cdef object _getbuffer(self) # Construct self._buffer.
+
+
+cdef inline object copy_zmq_msg_bytes(zmq_msg_t *zmq_msg):
+ """ Copy the data from a zmq_msg_t """
+ cdef char *data_c = NULL
+ cdef Py_ssize_t data_len_c
+ data_c = <char *>zmq_msg_data(zmq_msg)
+ data_len_c = zmq_msg_size(zmq_msg)
+ return PyBytes_FromStringAndSize(data_c, data_len_c)
+
+
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/backend/cython/socket.cpython-34m.so b/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/backend/cython/socket.cpython-34m.so
new file mode 100644
index 00000000..daca1379
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/backend/cython/socket.cpython-34m.so
Binary files differ
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/backend/cython/socket.pxd b/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/backend/cython/socket.pxd
new file mode 100644
index 00000000..b8a331e2
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/backend/cython/socket.pxd
@@ -0,0 +1,47 @@
+"""0MQ Socket class declaration."""
+
+#
+# Copyright (c) 2010-2011 Brian E. Granger & Min Ragan-Kelley
+#
+# This file is part of pyzmq.
+#
+# pyzmq is free software; you can redistribute it and/or modify it under
+# the terms of the Lesser GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# pyzmq is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# Lesser GNU General Public License for more details.
+#
+# You should have received a copy of the Lesser GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+#-----------------------------------------------------------------------------
+# Imports
+#-----------------------------------------------------------------------------
+
+from context cimport Context
+
+#-----------------------------------------------------------------------------
+# Code
+#-----------------------------------------------------------------------------
+
+
+cdef class Socket:
+
+ cdef object __weakref__ # enable weakref
+ cdef void *handle # The C handle for the underlying zmq object.
+ cdef bint _shadow # whether the Socket is a shadow wrapper of another
+ # Hold on to a reference to the context to make sure it is not garbage
+ # collected until the socket it done with it.
+ cdef public Context context # The zmq Context object that owns this.
+ cdef public bint _closed # bool property for a closed socket.
+ cdef int _pid # the pid of the process which created me (for fork safety)
+
+ # cpdef methods for direct-cython access:
+ cpdef object send(self, object data, int flags=*, copy=*, track=*)
+ cpdef object recv(self, int flags=*, copy=*, track=*)
+
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/backend/cython/utils.cpython-34m.so b/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/backend/cython/utils.cpython-34m.so
new file mode 100644
index 00000000..74ed0a10
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/backend/cython/utils.cpython-34m.so
Binary files differ
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/backend/cython/utils.pxd b/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/backend/cython/utils.pxd
new file mode 100644
index 00000000..1d7117f1
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/backend/cython/utils.pxd
@@ -0,0 +1,29 @@
+"""Wrap zmq_utils.h"""
+
+#
+# Copyright (c) 2010 Brian E. Granger & Min Ragan-Kelley
+#
+# This file is part of pyzmq.
+#
+# pyzmq is free software; you can redistribute it and/or modify it under
+# the terms of the Lesser GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# pyzmq is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# Lesser GNU General Public License for more details.
+#
+# You should have received a copy of the Lesser GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+#-----------------------------------------------------------------------------
+# Code
+#-----------------------------------------------------------------------------
+
+
+cdef class Stopwatch:
+ cdef void *watch # The C handle for the underlying zmq object
+
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/backend/select.py b/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/backend/select.py
new file mode 100644
index 00000000..0a2e09a2
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/backend/select.py
@@ -0,0 +1,39 @@
+"""Import basic exposure of libzmq C API as a backend"""
+
+# Copyright (C) PyZMQ Developers
+# Distributed under the terms of the Modified BSD License.
+
+public_api = [
+ 'Context',
+ 'Socket',
+ 'Frame',
+ 'Message',
+ 'Stopwatch',
+ 'device',
+ 'proxy',
+ 'zmq_poll',
+ 'strerror',
+ 'zmq_errno',
+ 'has',
+ 'curve_keypair',
+ 'constants',
+ 'zmq_version_info',
+ 'IPC_PATH_MAX_LEN',
+]
+
+def select_backend(name):
+ """Select the pyzmq backend"""
+ try:
+ mod = __import__(name, fromlist=public_api)
+ except ImportError:
+ raise
+ except Exception as e:
+ import sys
+ from zmq.utils.sixcerpt import reraise
+ exc_info = sys.exc_info()
+ reraise(ImportError, ImportError("Importing %s failed with %s" % (name, e)), exc_info[2])
+
+ ns = {}
+ for key in public_api:
+ ns[key] = getattr(mod, key)
+ return ns
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/devices/__init__.py b/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/devices/__init__.py
new file mode 100644
index 00000000..23715963
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/devices/__init__.py
@@ -0,0 +1,16 @@
+"""0MQ Device classes for running in background threads or processes."""
+
+# Copyright (C) PyZMQ Developers
+# Distributed under the terms of the Modified BSD License.
+
+from zmq import device
+from zmq.devices import basedevice, proxydevice, monitoredqueue, monitoredqueuedevice
+
+from zmq.devices.basedevice import *
+from zmq.devices.proxydevice import *
+from zmq.devices.monitoredqueue import *
+from zmq.devices.monitoredqueuedevice import *
+
+__all__ = ['device']
+for submod in (basedevice, proxydevice, monitoredqueue, monitoredqueuedevice):
+ __all__.extend(submod.__all__)
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/devices/basedevice.py b/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/devices/basedevice.py
new file mode 100644
index 00000000..7ba1b7ac
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/devices/basedevice.py
@@ -0,0 +1,229 @@
+"""Classes for running 0MQ Devices in the background."""
+
+# Copyright (C) PyZMQ Developers
+# Distributed under the terms of the Modified BSD License.
+
+
+import time
+from threading import Thread
+from multiprocessing import Process
+
+from zmq import device, QUEUE, Context, ETERM, ZMQError
+
+
+class Device:
+ """A 0MQ Device to be run in the background.
+
+ You do not pass Socket instances to this, but rather Socket types::
+
+ Device(device_type, in_socket_type, out_socket_type)
+
+ For instance::
+
+ dev = Device(zmq.QUEUE, zmq.DEALER, zmq.ROUTER)
+
+ Similar to zmq.device, but socket types instead of sockets themselves are
+ passed, and the sockets are created in the work thread, to avoid issues
+ with thread safety. As a result, additional bind_{in|out} and
+ connect_{in|out} methods and setsockopt_{in|out} allow users to specify
+ connections for the sockets.
+
+ Parameters
+ ----------
+ device_type : int
+ The 0MQ Device type
+ {in|out}_type : int
+ zmq socket types, to be passed later to context.socket(). e.g.
+ zmq.PUB, zmq.SUB, zmq.REQ. If out_type is < 0, then in_socket is used
+ for both in_socket and out_socket.
+
+ Methods
+ -------
+ bind_{in_out}(iface)
+ passthrough for ``{in|out}_socket.bind(iface)``, to be called in the thread
+ connect_{in_out}(iface)
+ passthrough for ``{in|out}_socket.connect(iface)``, to be called in the
+ thread
+ setsockopt_{in_out}(opt,value)
+ passthrough for ``{in|out}_socket.setsockopt(opt, value)``, to be called in
+ the thread
+
+ Attributes
+ ----------
+ daemon : int
+ sets whether the thread should be run as a daemon
+ Default is true, because if it is false, the thread will not
+ exit unless it is killed
+ context_factory : callable (class attribute)
+ Function for creating the Context. This will be Context.instance
+ in ThreadDevices, and Context in ProcessDevices. The only reason
+ it is not instance() in ProcessDevices is that there may be a stale
+ Context instance already initialized, and the forked environment
+ should *never* try to use it.
+ """
+
+ context_factory = Context.instance
+ """Callable that returns a context. Typically either Context.instance or Context,
+ depending on whether the device should share the global instance or not.
+ """
+
+ def __init__(self, device_type=QUEUE, in_type=None, out_type=None):
+ self.device_type = device_type
+ if in_type is None:
+ raise TypeError("in_type must be specified")
+ if out_type is None:
+ raise TypeError("out_type must be specified")
+ self.in_type = in_type
+ self.out_type = out_type
+ self._in_binds = []
+ self._in_connects = []
+ self._in_sockopts = []
+ self._out_binds = []
+ self._out_connects = []
+ self._out_sockopts = []
+ self.daemon = True
+ self.done = False
+
+ def bind_in(self, addr):
+ """Enqueue ZMQ address for binding on in_socket.
+
+ See zmq.Socket.bind for details.
+ """
+ self._in_binds.append(addr)
+
+ def connect_in(self, addr):
+ """Enqueue ZMQ address for connecting on in_socket.
+
+ See zmq.Socket.connect for details.
+ """
+ self._in_connects.append(addr)
+
+ def setsockopt_in(self, opt, value):
+ """Enqueue setsockopt(opt, value) for in_socket
+
+ See zmq.Socket.setsockopt for details.
+ """
+ self._in_sockopts.append((opt, value))
+
+ def bind_out(self, addr):
+ """Enqueue ZMQ address for binding on out_socket.
+
+ See zmq.Socket.bind for details.
+ """
+ self._out_binds.append(addr)
+
+ def connect_out(self, addr):
+ """Enqueue ZMQ address for connecting on out_socket.
+
+ See zmq.Socket.connect for details.
+ """
+ self._out_connects.append(addr)
+
+ def setsockopt_out(self, opt, value):
+ """Enqueue setsockopt(opt, value) for out_socket
+
+ See zmq.Socket.setsockopt for details.
+ """
+ self._out_sockopts.append((opt, value))
+
+ def _setup_sockets(self):
+ ctx = self.context_factory()
+
+ self._context = ctx
+
+ # create the sockets
+ ins = ctx.socket(self.in_type)
+ if self.out_type < 0:
+ outs = ins
+ else:
+ outs = ctx.socket(self.out_type)
+
+ # set sockopts (must be done first, in case of zmq.IDENTITY)
+ for opt,value in self._in_sockopts:
+ ins.setsockopt(opt, value)
+ for opt,value in self._out_sockopts:
+ outs.setsockopt(opt, value)
+
+ for iface in self._in_binds:
+ ins.bind(iface)
+ for iface in self._out_binds:
+ outs.bind(iface)
+
+ for iface in self._in_connects:
+ ins.connect(iface)
+ for iface in self._out_connects:
+ outs.connect(iface)
+
+ return ins,outs
+
+ def run_device(self):
+ """The runner method.
+
+ Do not call me directly, instead call ``self.start()``, just like a Thread.
+ """
+ ins,outs = self._setup_sockets()
+ device(self.device_type, ins, outs)
+
+ def run(self):
+ """wrap run_device in try/catch ETERM"""
+ try:
+ self.run_device()
+ except ZMQError as e:
+ if e.errno == ETERM:
+ # silence TERM errors, because this should be a clean shutdown
+ pass
+ else:
+ raise
+ finally:
+ self.done = True
+
+ def start(self):
+ """Start the device. Override me in subclass for other launchers."""
+ return self.run()
+
+ def join(self,timeout=None):
+ """wait for me to finish, like Thread.join.
+
+ Reimplemented appropriately by subclasses."""
+ tic = time.time()
+ toc = tic
+ while not self.done and not (timeout is not None and toc-tic > timeout):
+ time.sleep(.001)
+ toc = time.time()
+
+
+class BackgroundDevice(Device):
+ """Base class for launching Devices in background processes and threads."""
+
+ launcher=None
+ _launch_class=None
+
+ def start(self):
+ self.launcher = self._launch_class(target=self.run)
+ self.launcher.daemon = self.daemon
+ return self.launcher.start()
+
+ def join(self, timeout=None):
+ return self.launcher.join(timeout=timeout)
+
+
+class ThreadDevice(BackgroundDevice):
+ """A Device that will be run in a background Thread.
+
+ See Device for details.
+ """
+ _launch_class=Thread
+
+class ProcessDevice(BackgroundDevice):
+ """A Device that will be run in a background Process.
+
+ See Device for details.
+ """
+ _launch_class=Process
+ context_factory = Context
+ """Callable that returns a context. Typically either Context.instance or Context,
+ depending on whether the device should share the global instance or not.
+ """
+
+
+__all__ = ['Device', 'ThreadDevice', 'ProcessDevice']
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/devices/monitoredqueue.cpython-34m.so b/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/devices/monitoredqueue.cpython-34m.so
new file mode 100644
index 00000000..fdc7655a
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/devices/monitoredqueue.cpython-34m.so
Binary files differ
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/devices/monitoredqueue.pxd b/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/devices/monitoredqueue.pxd
new file mode 100644
index 00000000..1e26ed86
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/devices/monitoredqueue.pxd
@@ -0,0 +1,177 @@
+"""MonitoredQueue class declarations.
+
+Authors
+-------
+* MinRK
+* Brian Granger
+"""
+
+#
+# Copyright (c) 2010 Min Ragan-Kelley, Brian Granger
+#
+# This file is part of pyzmq, but is derived and adapted from zmq_queue.cpp
+# originally from libzmq-2.1.6, used under LGPLv3
+#
+# pyzmq is free software; you can redistribute it and/or modify it under
+# the terms of the Lesser GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# pyzmq is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# Lesser GNU General Public License for more details.
+#
+# You should have received a copy of the Lesser GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+#-----------------------------------------------------------------------------
+# Imports
+#-----------------------------------------------------------------------------
+
+from libzmq cimport *
+
+#-----------------------------------------------------------------------------
+# MonitoredQueue C functions
+#-----------------------------------------------------------------------------
+
+cdef inline int _relay(void *insocket_, void *outsocket_, void *sidesocket_,
+ zmq_msg_t msg, zmq_msg_t side_msg, zmq_msg_t id_msg,
+ bint swap_ids) nogil:
+ cdef int rc
+ cdef int64_t flag_2
+ cdef int flag_3
+ cdef int flags
+ cdef bint more
+ cdef size_t flagsz
+ cdef void * flag_ptr
+
+ if ZMQ_VERSION_MAJOR < 3:
+ flagsz = sizeof (int64_t)
+ flag_ptr = &flag_2
+ else:
+ flagsz = sizeof (int)
+ flag_ptr = &flag_3
+
+ if swap_ids:# both router, must send second identity first
+ # recv two ids into msg, id_msg
+ rc = zmq_msg_recv(&msg, insocket_, 0)
+ if rc < 0: return rc
+
+ rc = zmq_msg_recv(&id_msg, insocket_, 0)
+ if rc < 0: return rc
+
+ # send second id (id_msg) first
+ #!!!! always send a copy before the original !!!!
+ rc = zmq_msg_copy(&side_msg, &id_msg)
+ if rc < 0: return rc
+ rc = zmq_msg_send(&side_msg, outsocket_, ZMQ_SNDMORE)
+ if rc < 0: return rc
+ rc = zmq_msg_send(&id_msg, sidesocket_, ZMQ_SNDMORE)
+ if rc < 0: return rc
+ # send first id (msg) second
+ rc = zmq_msg_copy(&side_msg, &msg)
+ if rc < 0: return rc
+ rc = zmq_msg_send(&side_msg, outsocket_, ZMQ_SNDMORE)
+ if rc < 0: return rc
+ rc = zmq_msg_send(&msg, sidesocket_, ZMQ_SNDMORE)
+ if rc < 0: return rc
+ while (True):
+ rc = zmq_msg_recv(&msg, insocket_, 0)
+ if rc < 0: return rc
+ # assert (rc == 0)
+ rc = zmq_getsockopt (insocket_, ZMQ_RCVMORE, flag_ptr, &flagsz)
+ if rc < 0: return rc
+ flags = 0
+ if ZMQ_VERSION_MAJOR < 3:
+ if flag_2:
+ flags |= ZMQ_SNDMORE
+ else:
+ if flag_3:
+ flags |= ZMQ_SNDMORE
+ # LABEL has been removed:
+ # rc = zmq_getsockopt (insocket_, ZMQ_RCVLABEL, flag_ptr, &flagsz)
+ # if flag_3:
+ # flags |= ZMQ_SNDLABEL
+ # assert (rc == 0)
+
+ rc = zmq_msg_copy(&side_msg, &msg)
+ if rc < 0: return rc
+ if flags:
+ rc = zmq_msg_send(&side_msg, outsocket_, flags)
+ if rc < 0: return rc
+ # only SNDMORE for side-socket
+ rc = zmq_msg_send(&msg, sidesocket_, ZMQ_SNDMORE)
+ if rc < 0: return rc
+ else:
+ rc = zmq_msg_send(&side_msg, outsocket_, 0)
+ if rc < 0: return rc
+ rc = zmq_msg_send(&msg, sidesocket_, 0)
+ if rc < 0: return rc
+ break
+ return rc
+
+# the MonitoredQueue C function, adapted from zmq::queue.cpp :
+cdef inline int c_monitored_queue (void *insocket_, void *outsocket_,
+ void *sidesocket_, zmq_msg_t *in_msg_ptr,
+ zmq_msg_t *out_msg_ptr, int swap_ids) nogil:
+ """The actual C function for a monitored queue device.
+
+ See ``monitored_queue()`` for details.
+ """
+
+ cdef zmq_msg_t msg
+ cdef int rc = zmq_msg_init (&msg)
+ cdef zmq_msg_t id_msg
+ rc = zmq_msg_init (&id_msg)
+ if rc < 0: return rc
+ cdef zmq_msg_t side_msg
+ rc = zmq_msg_init (&side_msg)
+ if rc < 0: return rc
+
+ cdef zmq_pollitem_t items [2]
+ items [0].socket = insocket_
+ items [0].fd = 0
+ items [0].events = ZMQ_POLLIN
+ items [0].revents = 0
+ items [1].socket = outsocket_
+ items [1].fd = 0
+ items [1].events = ZMQ_POLLIN
+ items [1].revents = 0
+ # I don't think sidesocket should be polled?
+ # items [2].socket = sidesocket_
+ # items [2].fd = 0
+ # items [2].events = ZMQ_POLLIN
+ # items [2].revents = 0
+
+ while (True):
+
+ # // Wait while there are either requests or replies to process.
+ rc = zmq_poll (&items [0], 2, -1)
+ if rc < 0: return rc
+ # // The algorithm below asumes ratio of request and replies processed
+ # // under full load to be 1:1. Although processing requests replies
+ # // first is tempting it is suspectible to DoS attacks (overloading
+ # // the system with unsolicited replies).
+ #
+ # // Process a request.
+ if (items [0].revents & ZMQ_POLLIN):
+ # send in_prefix to side socket
+ rc = zmq_msg_copy(&side_msg, in_msg_ptr)
+ if rc < 0: return rc
+ rc = zmq_msg_send(&side_msg, sidesocket_, ZMQ_SNDMORE)
+ if rc < 0: return rc
+ # relay the rest of the message
+ rc = _relay(insocket_, outsocket_, sidesocket_, msg, side_msg, id_msg, swap_ids)
+ if rc < 0: return rc
+ if (items [1].revents & ZMQ_POLLIN):
+ # send out_prefix to side socket
+ rc = zmq_msg_copy(&side_msg, out_msg_ptr)
+ if rc < 0: return rc
+ rc = zmq_msg_send(&side_msg, sidesocket_, ZMQ_SNDMORE)
+ if rc < 0: return rc
+ # relay the rest of the message
+ rc = _relay(outsocket_, insocket_, sidesocket_, msg, side_msg, id_msg, swap_ids)
+ if rc < 0: return rc
+ return rc
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/devices/monitoredqueue.py b/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/devices/monitoredqueue.py
new file mode 100644
index 00000000..c6d91429
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/devices/monitoredqueue.py
@@ -0,0 +1,37 @@
+"""pure Python monitored_queue function
+
+For use when Cython extension is unavailable (PyPy).
+
+Authors
+-------
+* MinRK
+"""
+
+# Copyright (C) PyZMQ Developers
+# Distributed under the terms of the Modified BSD License.
+
+import zmq
+
+def _relay(ins, outs, sides, prefix, swap_ids):
+ msg = ins.recv_multipart()
+ if swap_ids:
+ msg[:2] = msg[:2][::-1]
+ outs.send_multipart(msg)
+ sides.send_multipart([prefix] + msg)
+
+def monitored_queue(in_socket, out_socket, mon_socket,
+ in_prefix=b'in', out_prefix=b'out'):
+
+ swap_ids = in_socket.type == zmq.ROUTER and out_socket.type == zmq.ROUTER
+
+ poller = zmq.Poller()
+ poller.register(in_socket, zmq.POLLIN)
+ poller.register(out_socket, zmq.POLLIN)
+ while True:
+ events = dict(poller.poll())
+ if in_socket in events:
+ _relay(in_socket, out_socket, mon_socket, in_prefix, swap_ids)
+ if out_socket in events:
+ _relay(out_socket, in_socket, mon_socket, out_prefix, swap_ids)
+
+__all__ = ['monitored_queue']
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/devices/monitoredqueuedevice.py b/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/devices/monitoredqueuedevice.py
new file mode 100644
index 00000000..9723f866
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/devices/monitoredqueuedevice.py
@@ -0,0 +1,66 @@
+"""MonitoredQueue classes and functions."""
+
+# Copyright (C) PyZMQ Developers
+# Distributed under the terms of the Modified BSD License.
+
+
+from zmq import ZMQError, PUB
+from zmq.devices.proxydevice import ProxyBase, Proxy, ThreadProxy, ProcessProxy
+from zmq.devices.monitoredqueue import monitored_queue
+
+
+class MonitoredQueueBase(ProxyBase):
+ """Base class for overriding methods."""
+
+ _in_prefix = b''
+ _out_prefix = b''
+
+ def __init__(self, in_type, out_type, mon_type=PUB, in_prefix=b'in', out_prefix=b'out'):
+
+ ProxyBase.__init__(self, in_type=in_type, out_type=out_type, mon_type=mon_type)
+
+ self._in_prefix = in_prefix
+ self._out_prefix = out_prefix
+
+ def run_device(self):
+ ins,outs,mons = self._setup_sockets()
+ monitored_queue(ins, outs, mons, self._in_prefix, self._out_prefix)
+
+
+class MonitoredQueue(MonitoredQueueBase, Proxy):
+ """Class for running monitored_queue in the background.
+
+ See zmq.devices.Device for most of the spec. MonitoredQueue differs from Proxy,
+ only in that it adds a ``prefix`` to messages sent on the monitor socket,
+ with a different prefix for each direction.
+
+ MQ also supports ROUTER on both sides, which zmq.proxy does not.
+
+ If a message arrives on `in_sock`, it will be prefixed with `in_prefix` on the monitor socket.
+ If it arrives on out_sock, it will be prefixed with `out_prefix`.
+
+ A PUB socket is the most logical choice for the mon_socket, but it is not required.
+ """
+ pass
+
+
+class ThreadMonitoredQueue(MonitoredQueueBase, ThreadProxy):
+ """Run zmq.monitored_queue in a background thread.
+
+ See MonitoredQueue and Proxy for details.
+ """
+ pass
+
+
+class ProcessMonitoredQueue(MonitoredQueueBase, ProcessProxy):
+ """Run zmq.monitored_queue in a background thread.
+
+ See MonitoredQueue and Proxy for details.
+ """
+
+
+__all__ = [
+ 'MonitoredQueue',
+ 'ThreadMonitoredQueue',
+ 'ProcessMonitoredQueue'
+]
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/devices/proxydevice.py b/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/devices/proxydevice.py
new file mode 100644
index 00000000..68be3f15
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/devices/proxydevice.py
@@ -0,0 +1,90 @@
+"""Proxy classes and functions."""
+
+# Copyright (C) PyZMQ Developers
+# Distributed under the terms of the Modified BSD License.
+
+import zmq
+from zmq.devices.basedevice import Device, ThreadDevice, ProcessDevice
+
+
+class ProxyBase(object):
+ """Base class for overriding methods."""
+
+ def __init__(self, in_type, out_type, mon_type=zmq.PUB):
+
+ Device.__init__(self, in_type=in_type, out_type=out_type)
+ self.mon_type = mon_type
+ self._mon_binds = []
+ self._mon_connects = []
+ self._mon_sockopts = []
+
+ def bind_mon(self, addr):
+ """Enqueue ZMQ address for binding on mon_socket.
+
+ See zmq.Socket.bind for details.
+ """
+ self._mon_binds.append(addr)
+
+ def connect_mon(self, addr):
+ """Enqueue ZMQ address for connecting on mon_socket.
+
+ See zmq.Socket.bind for details.
+ """
+ self._mon_connects.append(addr)
+
+ def setsockopt_mon(self, opt, value):
+ """Enqueue setsockopt(opt, value) for mon_socket
+
+ See zmq.Socket.setsockopt for details.
+ """
+ self._mon_sockopts.append((opt, value))
+
+ def _setup_sockets(self):
+ ins,outs = Device._setup_sockets(self)
+ ctx = self._context
+ mons = ctx.socket(self.mon_type)
+
+ # set sockopts (must be done first, in case of zmq.IDENTITY)
+ for opt,value in self._mon_sockopts:
+ mons.setsockopt(opt, value)
+
+ for iface in self._mon_binds:
+ mons.bind(iface)
+
+ for iface in self._mon_connects:
+ mons.connect(iface)
+
+ return ins,outs,mons
+
+ def run_device(self):
+ ins,outs,mons = self._setup_sockets()
+ zmq.proxy(ins, outs, mons)
+
+class Proxy(ProxyBase, Device):
+ """Threadsafe Proxy object.
+
+ See zmq.devices.Device for most of the spec. This subclass adds a
+ <method>_mon version of each <method>_{in|out} method, for configuring the
+ monitor socket.
+
+ A Proxy is a 3-socket ZMQ Device that functions just like a
+ QUEUE, except each message is also sent out on the monitor socket.
+
+ A PUB socket is the most logical choice for the mon_socket, but it is not required.
+ """
+ pass
+
+class ThreadProxy(ProxyBase, ThreadDevice):
+ """Proxy in a Thread. See Proxy for more."""
+ pass
+
+class ProcessProxy(ProxyBase, ProcessDevice):
+ """Proxy in a Process. See Proxy for more."""
+ pass
+
+
+__all__ = [
+ 'Proxy',
+ 'ThreadProxy',
+ 'ProcessProxy',
+]
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/error.py b/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/error.py
new file mode 100644
index 00000000..48cdaafa
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/error.py
@@ -0,0 +1,164 @@
+"""0MQ Error classes and functions."""
+
+# Copyright (C) PyZMQ Developers
+# Distributed under the terms of the Modified BSD License.
+
+
+class ZMQBaseError(Exception):
+ """Base exception class for 0MQ errors in Python."""
+ pass
+
+class ZMQError(ZMQBaseError):
+ """Wrap an errno style error.
+
+ Parameters
+ ----------
+ errno : int
+ The ZMQ errno or None. If None, then ``zmq_errno()`` is called and
+ used.
+ msg : string
+ Description of the error or None.
+ """
+ errno = None
+
+ def __init__(self, errno=None, msg=None):
+ """Wrap an errno style error.
+
+ Parameters
+ ----------
+ errno : int
+ The ZMQ errno or None. If None, then ``zmq_errno()`` is called and
+ used.
+ msg : string
+ Description of the error or None.
+ """
+ from zmq.backend import strerror, zmq_errno
+ if errno is None:
+ errno = zmq_errno()
+ if isinstance(errno, int):
+ self.errno = errno
+ if msg is None:
+ self.strerror = strerror(errno)
+ else:
+ self.strerror = msg
+ else:
+ if msg is None:
+ self.strerror = str(errno)
+ else:
+ self.strerror = msg
+ # flush signals, because there could be a SIGINT
+ # waiting to pounce, resulting in uncaught exceptions.
+ # Doing this here means getting SIGINT during a blocking
+ # libzmq call will raise a *catchable* KeyboardInterrupt
+ # PyErr_CheckSignals()
+
+ def __str__(self):
+ return self.strerror
+
+ def __repr__(self):
+ return "ZMQError('%s')"%self.strerror
+
+
+class ZMQBindError(ZMQBaseError):
+ """An error for ``Socket.bind_to_random_port()``.
+
+ See Also
+ --------
+ .Socket.bind_to_random_port
+ """
+ pass
+
+
+class NotDone(ZMQBaseError):
+ """Raised when timeout is reached while waiting for 0MQ to finish with a Message
+
+ See Also
+ --------
+ .MessageTracker.wait : object for tracking when ZeroMQ is done
+ """
+ pass
+
+
+class ContextTerminated(ZMQError):
+ """Wrapper for zmq.ETERM
+
+ .. versionadded:: 13.0
+ """
+ pass
+
+
+class Again(ZMQError):
+ """Wrapper for zmq.EAGAIN
+
+ .. versionadded:: 13.0
+ """
+ pass
+
+
+def _check_rc(rc, errno=None):
+ """internal utility for checking zmq return condition
+
+ and raising the appropriate Exception class
+ """
+ if rc < 0:
+ from zmq.backend import zmq_errno
+ if errno is None:
+ errno = zmq_errno()
+ from zmq import EAGAIN, ETERM
+ if errno == EAGAIN:
+ raise Again(errno)
+ elif errno == ETERM:
+ raise ContextTerminated(errno)
+ else:
+ raise ZMQError(errno)
+
+_zmq_version_info = None
+_zmq_version = None
+
+class ZMQVersionError(NotImplementedError):
+ """Raised when a feature is not provided by the linked version of libzmq.
+
+ .. versionadded:: 14.2
+ """
+ min_version = None
+ def __init__(self, min_version, msg='Feature'):
+ global _zmq_version
+ if _zmq_version is None:
+ from zmq import zmq_version
+ _zmq_version = zmq_version()
+ self.msg = msg
+ self.min_version = min_version
+ self.version = _zmq_version
+
+ def __repr__(self):
+ return "ZMQVersionError('%s')" % str(self)
+
+ def __str__(self):
+ return "%s requires libzmq >= %s, have %s" % (self.msg, self.min_version, self.version)
+
+
+def _check_version(min_version_info, msg='Feature'):
+ """Check for libzmq
+
+ raises ZMQVersionError if current zmq version is not at least min_version
+
+ min_version_info is a tuple of integers, and will be compared against zmq.zmq_version_info().
+ """
+ global _zmq_version_info
+ if _zmq_version_info is None:
+ from zmq import zmq_version_info
+ _zmq_version_info = zmq_version_info()
+ if _zmq_version_info < min_version_info:
+ min_version = '.'.join(str(v) for v in min_version_info)
+ raise ZMQVersionError(min_version, msg)
+
+
+__all__ = [
+ 'ZMQBaseError',
+ 'ZMQBindError',
+ 'ZMQError',
+ 'NotDone',
+ 'ContextTerminated',
+ 'Again',
+ 'ZMQVersionError',
+]
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/eventloop/__init__.py b/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/eventloop/__init__.py
new file mode 100644
index 00000000..568e8e8d
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/eventloop/__init__.py
@@ -0,0 +1,5 @@
+"""A Tornado based event loop for PyZMQ."""
+
+from zmq.eventloop.ioloop import IOLoop
+
+__all__ = ['IOLoop'] \ No newline at end of file
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/eventloop/ioloop.py b/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/eventloop/ioloop.py
new file mode 100644
index 00000000..35f4c418
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/eventloop/ioloop.py
@@ -0,0 +1,193 @@
+# coding: utf-8
+"""tornado IOLoop API with zmq compatibility
+
+If you have tornado ≥ 3.0, this is a subclass of tornado's IOLoop,
+otherwise we ship a minimal subset of tornado in zmq.eventloop.minitornado.
+
+The minimal shipped version of tornado's IOLoop does not include
+support for concurrent futures - this will only be available if you
+have tornado ≥ 3.0.
+"""
+
+# Copyright (C) PyZMQ Developers
+# Distributed under the terms of the Modified BSD License.
+
+from __future__ import absolute_import, division, with_statement
+
+import os
+import time
+import warnings
+
+from zmq import (
+ Poller,
+ POLLIN, POLLOUT, POLLERR,
+ ZMQError, ETERM,
+)
+
+try:
+ import tornado
+ tornado_version = tornado.version_info
+except (ImportError, AttributeError):
+ tornado_version = ()
+
+try:
+ # tornado ≥ 3
+ from tornado.ioloop import PollIOLoop, PeriodicCallback
+ from tornado.log import gen_log
+except ImportError:
+ from .minitornado.ioloop import PollIOLoop, PeriodicCallback
+ from .minitornado.log import gen_log
+
+
+class DelayedCallback(PeriodicCallback):
+ """Schedules the given callback to be called once.
+
+ The callback is called once, after callback_time milliseconds.
+
+ `start` must be called after the DelayedCallback is created.
+
+ The timeout is calculated from when `start` is called.
+ """
+ def __init__(self, callback, callback_time, io_loop=None):
+ # PeriodicCallback require callback_time to be positive
+ warnings.warn("""DelayedCallback is deprecated.
+ Use loop.add_timeout instead.""", DeprecationWarning)
+ callback_time = max(callback_time, 1e-3)
+ super(DelayedCallback, self).__init__(callback, callback_time, io_loop)
+
+ def start(self):
+ """Starts the timer."""
+ self._running = True
+ self._firstrun = True
+ self._next_timeout = time.time() + self.callback_time / 1000.0
+ self.io_loop.add_timeout(self._next_timeout, self._run)
+
+ def _run(self):
+ if not self._running: return
+ self._running = False
+ try:
+ self.callback()
+ except Exception:
+ gen_log.error("Error in delayed callback", exc_info=True)
+
+
+class ZMQPoller(object):
+ """A poller that can be used in the tornado IOLoop.
+
+ This simply wraps a regular zmq.Poller, scaling the timeout
+ by 1000, so that it is in seconds rather than milliseconds.
+ """
+
+ def __init__(self):
+ self._poller = Poller()
+
+ @staticmethod
+ def _map_events(events):
+ """translate IOLoop.READ/WRITE/ERROR event masks into zmq.POLLIN/OUT/ERR"""
+ z_events = 0
+ if events & IOLoop.READ:
+ z_events |= POLLIN
+ if events & IOLoop.WRITE:
+ z_events |= POLLOUT
+ if events & IOLoop.ERROR:
+ z_events |= POLLERR
+ return z_events
+
+ @staticmethod
+ def _remap_events(z_events):
+ """translate zmq.POLLIN/OUT/ERR event masks into IOLoop.READ/WRITE/ERROR"""
+ events = 0
+ if z_events & POLLIN:
+ events |= IOLoop.READ
+ if z_events & POLLOUT:
+ events |= IOLoop.WRITE
+ if z_events & POLLERR:
+ events |= IOLoop.ERROR
+ return events
+
+ def register(self, fd, events):
+ return self._poller.register(fd, self._map_events(events))
+
+ def modify(self, fd, events):
+ return self._poller.modify(fd, self._map_events(events))
+
+ def unregister(self, fd):
+ return self._poller.unregister(fd)
+
+ def poll(self, timeout):
+ """poll in seconds rather than milliseconds.
+
+ Event masks will be IOLoop.READ/WRITE/ERROR
+ """
+ z_events = self._poller.poll(1000*timeout)
+ return [ (fd,self._remap_events(evt)) for (fd,evt) in z_events ]
+
+ def close(self):
+ pass
+
+
+class ZMQIOLoop(PollIOLoop):
+ """ZMQ subclass of tornado's IOLoop"""
+ def initialize(self, impl=None, **kwargs):
+ impl = ZMQPoller() if impl is None else impl
+ super(ZMQIOLoop, self).initialize(impl=impl, **kwargs)
+
+ @staticmethod
+ def instance():
+ """Returns a global `IOLoop` instance.
+
+ Most applications have a single, global `IOLoop` running on the
+ main thread. Use this method to get this instance from
+ another thread. To get the current thread's `IOLoop`, use `current()`.
+ """
+ # install ZMQIOLoop as the active IOLoop implementation
+ # when using tornado 3
+ if tornado_version >= (3,):
+ PollIOLoop.configure(ZMQIOLoop)
+ return PollIOLoop.instance()
+
+ def start(self):
+ try:
+ super(ZMQIOLoop, self).start()
+ except ZMQError as e:
+ if e.errno == ETERM:
+ # quietly return on ETERM
+ pass
+ else:
+ raise e
+
+
+if tornado_version >= (3,0) and tornado_version < (3,1):
+ def backport_close(self, all_fds=False):
+ """backport IOLoop.close to 3.0 from 3.1 (supports fd.close() method)"""
+ from zmq.eventloop.minitornado.ioloop import PollIOLoop as mini_loop
+ return mini_loop.close.__get__(self)(all_fds)
+ ZMQIOLoop.close = backport_close
+
+
+# public API name
+IOLoop = ZMQIOLoop
+
+
+def install():
+ """set the tornado IOLoop instance with the pyzmq IOLoop.
+
+ After calling this function, tornado's IOLoop.instance() and pyzmq's
+ IOLoop.instance() will return the same object.
+
+ An assertion error will be raised if tornado's IOLoop has been initialized
+ prior to calling this function.
+ """
+ from tornado import ioloop
+ # check if tornado's IOLoop is already initialized to something other
+ # than the pyzmq IOLoop instance:
+ assert (not ioloop.IOLoop.initialized()) or \
+ ioloop.IOLoop.instance() is IOLoop.instance(), "tornado IOLoop already initialized"
+
+ if tornado_version >= (3,):
+ # tornado 3 has an official API for registering new defaults, yay!
+ ioloop.IOLoop.configure(ZMQIOLoop)
+ else:
+ # we have to set the global instance explicitly
+ ioloop.IOLoop._instance = IOLoop.instance()
+
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/eventloop/minitornado/__init__.py b/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/eventloop/minitornado/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/eventloop/minitornado/__init__.py
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/eventloop/minitornado/concurrent.py b/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/eventloop/minitornado/concurrent.py
new file mode 100644
index 00000000..519b23d5
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/eventloop/minitornado/concurrent.py
@@ -0,0 +1,11 @@
+"""pyzmq does not ship tornado's futures,
+this just raises informative NotImplementedErrors to avoid having to change too much code.
+"""
+
+class NotImplementedFuture(object):
+ def __init__(self, *args, **kwargs):
+ raise NotImplementedError("pyzmq does not ship tornado's Futures, "
+ "install tornado >= 3.0 for future support."
+ )
+
+Future = TracebackFuture = NotImplementedFuture
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/eventloop/minitornado/ioloop.py b/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/eventloop/minitornado/ioloop.py
new file mode 100644
index 00000000..710a3ecb
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/eventloop/minitornado/ioloop.py
@@ -0,0 +1,829 @@
+#!/usr/bin/env python
+#
+# Copyright 2009 Facebook
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""An I/O event loop for non-blocking sockets.
+
+Typical applications will use a single `IOLoop` object, in the
+`IOLoop.instance` singleton. The `IOLoop.start` method should usually
+be called at the end of the ``main()`` function. Atypical applications may
+use more than one `IOLoop`, such as one `IOLoop` per thread, or per `unittest`
+case.
+
+In addition to I/O events, the `IOLoop` can also schedule time-based events.
+`IOLoop.add_timeout` is a non-blocking alternative to `time.sleep`.
+"""
+
+from __future__ import absolute_import, division, print_function, with_statement
+
+import datetime
+import errno
+import functools
+import heapq
+import logging
+import numbers
+import os
+import select
+import sys
+import threading
+import time
+import traceback
+
+from .concurrent import Future, TracebackFuture
+from .log import app_log, gen_log
+from . import stack_context
+from .util import Configurable
+
+try:
+ import signal
+except ImportError:
+ signal = None
+
+try:
+ import thread # py2
+except ImportError:
+ import _thread as thread # py3
+
+from .platform.auto import set_close_exec, Waker
+
+
+class TimeoutError(Exception):
+ pass
+
+
+class IOLoop(Configurable):
+ """A level-triggered I/O loop.
+
+ We use ``epoll`` (Linux) or ``kqueue`` (BSD and Mac OS X) if they
+ are available, or else we fall back on select(). If you are
+ implementing a system that needs to handle thousands of
+ simultaneous connections, you should use a system that supports
+ either ``epoll`` or ``kqueue``.
+
+ Example usage for a simple TCP server::
+
+ import errno
+ import functools
+ import ioloop
+ import socket
+
+ def connection_ready(sock, fd, events):
+ while True:
+ try:
+ connection, address = sock.accept()
+ except socket.error, e:
+ if e.args[0] not in (errno.EWOULDBLOCK, errno.EAGAIN):
+ raise
+ return
+ connection.setblocking(0)
+ handle_connection(connection, address)
+
+ sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0)
+ sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
+ sock.setblocking(0)
+ sock.bind(("", port))
+ sock.listen(128)
+
+ io_loop = ioloop.IOLoop.instance()
+ callback = functools.partial(connection_ready, sock)
+ io_loop.add_handler(sock.fileno(), callback, io_loop.READ)
+ io_loop.start()
+
+ """
+ # Constants from the epoll module
+ _EPOLLIN = 0x001
+ _EPOLLPRI = 0x002
+ _EPOLLOUT = 0x004
+ _EPOLLERR = 0x008
+ _EPOLLHUP = 0x010
+ _EPOLLRDHUP = 0x2000
+ _EPOLLONESHOT = (1 << 30)
+ _EPOLLET = (1 << 31)
+
+ # Our events map exactly to the epoll events
+ NONE = 0
+ READ = _EPOLLIN
+ WRITE = _EPOLLOUT
+ ERROR = _EPOLLERR | _EPOLLHUP
+
+ # Global lock for creating global IOLoop instance
+ _instance_lock = threading.Lock()
+
+ _current = threading.local()
+
+ @staticmethod
+ def instance():
+ """Returns a global `IOLoop` instance.
+
+ Most applications have a single, global `IOLoop` running on the
+ main thread. Use this method to get this instance from
+ another thread. To get the current thread's `IOLoop`, use `current()`.
+ """
+ if not hasattr(IOLoop, "_instance"):
+ with IOLoop._instance_lock:
+ if not hasattr(IOLoop, "_instance"):
+ # New instance after double check
+ IOLoop._instance = IOLoop()
+ return IOLoop._instance
+
+ @staticmethod
+ def initialized():
+ """Returns true if the singleton instance has been created."""
+ return hasattr(IOLoop, "_instance")
+
+ def install(self):
+ """Installs this `IOLoop` object as the singleton instance.
+
+ This is normally not necessary as `instance()` will create
+ an `IOLoop` on demand, but you may want to call `install` to use
+ a custom subclass of `IOLoop`.
+ """
+ assert not IOLoop.initialized()
+ IOLoop._instance = self
+
+ @staticmethod
+ def current():
+ """Returns the current thread's `IOLoop`.
+
+ If an `IOLoop` is currently running or has been marked as current
+ by `make_current`, returns that instance. Otherwise returns
+ `IOLoop.instance()`, i.e. the main thread's `IOLoop`.
+
+ A common pattern for classes that depend on ``IOLoops`` is to use
+ a default argument to enable programs with multiple ``IOLoops``
+ but not require the argument for simpler applications::
+
+ class MyClass(object):
+ def __init__(self, io_loop=None):
+ self.io_loop = io_loop or IOLoop.current()
+
+ In general you should use `IOLoop.current` as the default when
+ constructing an asynchronous object, and use `IOLoop.instance`
+ when you mean to communicate to the main thread from a different
+ one.
+ """
+ current = getattr(IOLoop._current, "instance", None)
+ if current is None:
+ return IOLoop.instance()
+ return current
+
+ def make_current(self):
+ """Makes this the `IOLoop` for the current thread.
+
+ An `IOLoop` automatically becomes current for its thread
+ when it is started, but it is sometimes useful to call
+ `make_current` explictly before starting the `IOLoop`,
+ so that code run at startup time can find the right
+ instance.
+ """
+ IOLoop._current.instance = self
+
+ @staticmethod
+ def clear_current():
+ IOLoop._current.instance = None
+
+ @classmethod
+ def configurable_base(cls):
+ return IOLoop
+
+ @classmethod
+ def configurable_default(cls):
+ # this is the only patch to IOLoop:
+ from zmq.eventloop.ioloop import ZMQIOLoop
+ return ZMQIOLoop
+ # the remainder of this method is unused,
+ # but left for preservation reasons
+ if hasattr(select, "epoll"):
+ from tornado.platform.epoll import EPollIOLoop
+ return EPollIOLoop
+ if hasattr(select, "kqueue"):
+ # Python 2.6+ on BSD or Mac
+ from tornado.platform.kqueue import KQueueIOLoop
+ return KQueueIOLoop
+ from tornado.platform.select import SelectIOLoop
+ return SelectIOLoop
+
+ def initialize(self):
+ pass
+
+ def close(self, all_fds=False):
+ """Closes the `IOLoop`, freeing any resources used.
+
+ If ``all_fds`` is true, all file descriptors registered on the
+ IOLoop will be closed (not just the ones created by the
+ `IOLoop` itself).
+
+ Many applications will only use a single `IOLoop` that runs for the
+ entire lifetime of the process. In that case closing the `IOLoop`
+ is not necessary since everything will be cleaned up when the
+ process exits. `IOLoop.close` is provided mainly for scenarios
+ such as unit tests, which create and destroy a large number of
+ ``IOLoops``.
+
+ An `IOLoop` must be completely stopped before it can be closed. This
+ means that `IOLoop.stop()` must be called *and* `IOLoop.start()` must
+ be allowed to return before attempting to call `IOLoop.close()`.
+ Therefore the call to `close` will usually appear just after
+ the call to `start` rather than near the call to `stop`.
+
+ .. versionchanged:: 3.1
+ If the `IOLoop` implementation supports non-integer objects
+ for "file descriptors", those objects will have their
+ ``close`` method when ``all_fds`` is true.
+ """
+ raise NotImplementedError()
+
+ def add_handler(self, fd, handler, events):
+ """Registers the given handler to receive the given events for fd.
+
+ The ``events`` argument is a bitwise or of the constants
+ ``IOLoop.READ``, ``IOLoop.WRITE``, and ``IOLoop.ERROR``.
+
+ When an event occurs, ``handler(fd, events)`` will be run.
+ """
+ raise NotImplementedError()
+
+ def update_handler(self, fd, events):
+ """Changes the events we listen for fd."""
+ raise NotImplementedError()
+
+ def remove_handler(self, fd):
+ """Stop listening for events on fd."""
+ raise NotImplementedError()
+
+ def set_blocking_signal_threshold(self, seconds, action):
+ """Sends a signal if the `IOLoop` is blocked for more than
+ ``s`` seconds.
+
+ Pass ``seconds=None`` to disable. Requires Python 2.6 on a unixy
+ platform.
+
+ The action parameter is a Python signal handler. Read the
+ documentation for the `signal` module for more information.
+ If ``action`` is None, the process will be killed if it is
+ blocked for too long.
+ """
+ raise NotImplementedError()
+
+ def set_blocking_log_threshold(self, seconds):
+ """Logs a stack trace if the `IOLoop` is blocked for more than
+ ``s`` seconds.
+
+ Equivalent to ``set_blocking_signal_threshold(seconds,
+ self.log_stack)``
+ """
+ self.set_blocking_signal_threshold(seconds, self.log_stack)
+
+ def log_stack(self, signal, frame):
+ """Signal handler to log the stack trace of the current thread.
+
+ For use with `set_blocking_signal_threshold`.
+ """
+ gen_log.warning('IOLoop blocked for %f seconds in\n%s',
+ self._blocking_signal_threshold,
+ ''.join(traceback.format_stack(frame)))
+
+ def start(self):
+ """Starts the I/O loop.
+
+ The loop will run until one of the callbacks calls `stop()`, which
+ will make the loop stop after the current event iteration completes.
+ """
+ raise NotImplementedError()
+
+ def stop(self):
+ """Stop the I/O loop.
+
+ If the event loop is not currently running, the next call to `start()`
+ will return immediately.
+
+ To use asynchronous methods from otherwise-synchronous code (such as
+ unit tests), you can start and stop the event loop like this::
+
+ ioloop = IOLoop()
+ async_method(ioloop=ioloop, callback=ioloop.stop)
+ ioloop.start()
+
+ ``ioloop.start()`` will return after ``async_method`` has run
+ its callback, whether that callback was invoked before or
+ after ``ioloop.start``.
+
+ Note that even after `stop` has been called, the `IOLoop` is not
+ completely stopped until `IOLoop.start` has also returned.
+ Some work that was scheduled before the call to `stop` may still
+ be run before the `IOLoop` shuts down.
+ """
+ raise NotImplementedError()
+
+ def run_sync(self, func, timeout=None):
+ """Starts the `IOLoop`, runs the given function, and stops the loop.
+
+ If the function returns a `.Future`, the `IOLoop` will run
+ until the future is resolved. If it raises an exception, the
+ `IOLoop` will stop and the exception will be re-raised to the
+ caller.
+
+ The keyword-only argument ``timeout`` may be used to set
+ a maximum duration for the function. If the timeout expires,
+ a `TimeoutError` is raised.
+
+ This method is useful in conjunction with `tornado.gen.coroutine`
+ to allow asynchronous calls in a ``main()`` function::
+
+ @gen.coroutine
+ def main():
+ # do stuff...
+
+ if __name__ == '__main__':
+ IOLoop.instance().run_sync(main)
+ """
+ future_cell = [None]
+
+ def run():
+ try:
+ result = func()
+ except Exception:
+ future_cell[0] = TracebackFuture()
+ future_cell[0].set_exc_info(sys.exc_info())
+ else:
+ if isinstance(result, Future):
+ future_cell[0] = result
+ else:
+ future_cell[0] = Future()
+ future_cell[0].set_result(result)
+ self.add_future(future_cell[0], lambda future: self.stop())
+ self.add_callback(run)
+ if timeout is not None:
+ timeout_handle = self.add_timeout(self.time() + timeout, self.stop)
+ self.start()
+ if timeout is not None:
+ self.remove_timeout(timeout_handle)
+ if not future_cell[0].done():
+ raise TimeoutError('Operation timed out after %s seconds' % timeout)
+ return future_cell[0].result()
+
+ def time(self):
+ """Returns the current time according to the `IOLoop`'s clock.
+
+ The return value is a floating-point number relative to an
+ unspecified time in the past.
+
+ By default, the `IOLoop`'s time function is `time.time`. However,
+ it may be configured to use e.g. `time.monotonic` instead.
+ Calls to `add_timeout` that pass a number instead of a
+ `datetime.timedelta` should use this function to compute the
+ appropriate time, so they can work no matter what time function
+ is chosen.
+ """
+ return time.time()
+
+ def add_timeout(self, deadline, callback):
+ """Runs the ``callback`` at the time ``deadline`` from the I/O loop.
+
+ Returns an opaque handle that may be passed to
+ `remove_timeout` to cancel.
+
+ ``deadline`` may be a number denoting a time (on the same
+ scale as `IOLoop.time`, normally `time.time`), or a
+ `datetime.timedelta` object for a deadline relative to the
+ current time.
+
+ Note that it is not safe to call `add_timeout` from other threads.
+ Instead, you must use `add_callback` to transfer control to the
+ `IOLoop`'s thread, and then call `add_timeout` from there.
+ """
+ raise NotImplementedError()
+
+ def remove_timeout(self, timeout):
+ """Cancels a pending timeout.
+
+ The argument is a handle as returned by `add_timeout`. It is
+ safe to call `remove_timeout` even if the callback has already
+ been run.
+ """
+ raise NotImplementedError()
+
+ def add_callback(self, callback, *args, **kwargs):
+ """Calls the given callback on the next I/O loop iteration.
+
+ It is safe to call this method from any thread at any time,
+ except from a signal handler. Note that this is the **only**
+ method in `IOLoop` that makes this thread-safety guarantee; all
+ other interaction with the `IOLoop` must be done from that
+ `IOLoop`'s thread. `add_callback()` may be used to transfer
+ control from other threads to the `IOLoop`'s thread.
+
+ To add a callback from a signal handler, see
+ `add_callback_from_signal`.
+ """
+ raise NotImplementedError()
+
+ def add_callback_from_signal(self, callback, *args, **kwargs):
+ """Calls the given callback on the next I/O loop iteration.
+
+ Safe for use from a Python signal handler; should not be used
+ otherwise.
+
+ Callbacks added with this method will be run without any
+ `.stack_context`, to avoid picking up the context of the function
+ that was interrupted by the signal.
+ """
+ raise NotImplementedError()
+
+ def add_future(self, future, callback):
+ """Schedules a callback on the ``IOLoop`` when the given
+ `.Future` is finished.
+
+ The callback is invoked with one argument, the
+ `.Future`.
+ """
+ assert isinstance(future, Future)
+ callback = stack_context.wrap(callback)
+ future.add_done_callback(
+ lambda future: self.add_callback(callback, future))
+
+ def _run_callback(self, callback):
+ """Runs a callback with error handling.
+
+ For use in subclasses.
+ """
+ try:
+ callback()
+ except Exception:
+ self.handle_callback_exception(callback)
+
+ def handle_callback_exception(self, callback):
+ """This method is called whenever a callback run by the `IOLoop`
+ throws an exception.
+
+ By default simply logs the exception as an error. Subclasses
+ may override this method to customize reporting of exceptions.
+
+ The exception itself is not passed explicitly, but is available
+ in `sys.exc_info`.
+ """
+ app_log.error("Exception in callback %r", callback, exc_info=True)
+
+
+class PollIOLoop(IOLoop):
+ """Base class for IOLoops built around a select-like function.
+
+ For concrete implementations, see `tornado.platform.epoll.EPollIOLoop`
+ (Linux), `tornado.platform.kqueue.KQueueIOLoop` (BSD and Mac), or
+ `tornado.platform.select.SelectIOLoop` (all platforms).
+ """
+ def initialize(self, impl, time_func=None):
+ super(PollIOLoop, self).initialize()
+ self._impl = impl
+ if hasattr(self._impl, 'fileno'):
+ set_close_exec(self._impl.fileno())
+ self.time_func = time_func or time.time
+ self._handlers = {}
+ self._events = {}
+ self._callbacks = []
+ self._callback_lock = threading.Lock()
+ self._timeouts = []
+ self._cancellations = 0
+ self._running = False
+ self._stopped = False
+ self._closing = False
+ self._thread_ident = None
+ self._blocking_signal_threshold = None
+
+ # Create a pipe that we send bogus data to when we want to wake
+ # the I/O loop when it is idle
+ self._waker = Waker()
+ self.add_handler(self._waker.fileno(),
+ lambda fd, events: self._waker.consume(),
+ self.READ)
+
+ def close(self, all_fds=False):
+ with self._callback_lock:
+ self._closing = True
+ self.remove_handler(self._waker.fileno())
+ if all_fds:
+ for fd in self._handlers.keys():
+ try:
+ close_method = getattr(fd, 'close', None)
+ if close_method is not None:
+ close_method()
+ else:
+ os.close(fd)
+ except Exception:
+ gen_log.debug("error closing fd %s", fd, exc_info=True)
+ self._waker.close()
+ self._impl.close()
+
+ def add_handler(self, fd, handler, events):
+ self._handlers[fd] = stack_context.wrap(handler)
+ self._impl.register(fd, events | self.ERROR)
+
+ def update_handler(self, fd, events):
+ self._impl.modify(fd, events | self.ERROR)
+
+ def remove_handler(self, fd):
+ self._handlers.pop(fd, None)
+ self._events.pop(fd, None)
+ try:
+ self._impl.unregister(fd)
+ except Exception:
+ gen_log.debug("Error deleting fd from IOLoop", exc_info=True)
+
+ def set_blocking_signal_threshold(self, seconds, action):
+ if not hasattr(signal, "setitimer"):
+ gen_log.error("set_blocking_signal_threshold requires a signal module "
+ "with the setitimer method")
+ return
+ self._blocking_signal_threshold = seconds
+ if seconds is not None:
+ signal.signal(signal.SIGALRM,
+ action if action is not None else signal.SIG_DFL)
+
+ def start(self):
+ if not logging.getLogger().handlers:
+ # The IOLoop catches and logs exceptions, so it's
+ # important that log output be visible. However, python's
+ # default behavior for non-root loggers (prior to python
+ # 3.2) is to print an unhelpful "no handlers could be
+ # found" message rather than the actual log entry, so we
+ # must explicitly configure logging if we've made it this
+ # far without anything.
+ logging.basicConfig()
+ if self._stopped:
+ self._stopped = False
+ return
+ old_current = getattr(IOLoop._current, "instance", None)
+ IOLoop._current.instance = self
+ self._thread_ident = thread.get_ident()
+ self._running = True
+
+ # signal.set_wakeup_fd closes a race condition in event loops:
+ # a signal may arrive at the beginning of select/poll/etc
+ # before it goes into its interruptible sleep, so the signal
+ # will be consumed without waking the select. The solution is
+ # for the (C, synchronous) signal handler to write to a pipe,
+ # which will then be seen by select.
+ #
+ # In python's signal handling semantics, this only matters on the
+ # main thread (fortunately, set_wakeup_fd only works on the main
+ # thread and will raise a ValueError otherwise).
+ #
+ # If someone has already set a wakeup fd, we don't want to
+ # disturb it. This is an issue for twisted, which does its
+ # SIGCHILD processing in response to its own wakeup fd being
+ # written to. As long as the wakeup fd is registered on the IOLoop,
+ # the loop will still wake up and everything should work.
+ old_wakeup_fd = None
+ if hasattr(signal, 'set_wakeup_fd') and os.name == 'posix':
+ # requires python 2.6+, unix. set_wakeup_fd exists but crashes
+ # the python process on windows.
+ try:
+ old_wakeup_fd = signal.set_wakeup_fd(self._waker.write_fileno())
+ if old_wakeup_fd != -1:
+ # Already set, restore previous value. This is a little racy,
+ # but there's no clean get_wakeup_fd and in real use the
+ # IOLoop is just started once at the beginning.
+ signal.set_wakeup_fd(old_wakeup_fd)
+ old_wakeup_fd = None
+ except ValueError: # non-main thread
+ pass
+
+ while True:
+ poll_timeout = 3600.0
+
+ # Prevent IO event starvation by delaying new callbacks
+ # to the next iteration of the event loop.
+ with self._callback_lock:
+ callbacks = self._callbacks
+ self._callbacks = []
+ for callback in callbacks:
+ self._run_callback(callback)
+
+ if self._timeouts:
+ now = self.time()
+ while self._timeouts:
+ if self._timeouts[0].callback is None:
+ # the timeout was cancelled
+ heapq.heappop(self._timeouts)
+ self._cancellations -= 1
+ elif self._timeouts[0].deadline <= now:
+ timeout = heapq.heappop(self._timeouts)
+ self._run_callback(timeout.callback)
+ else:
+ seconds = self._timeouts[0].deadline - now
+ poll_timeout = min(seconds, poll_timeout)
+ break
+ if (self._cancellations > 512
+ and self._cancellations > (len(self._timeouts) >> 1)):
+ # Clean up the timeout queue when it gets large and it's
+ # more than half cancellations.
+ self._cancellations = 0
+ self._timeouts = [x for x in self._timeouts
+ if x.callback is not None]
+ heapq.heapify(self._timeouts)
+
+ if self._callbacks:
+ # If any callbacks or timeouts called add_callback,
+ # we don't want to wait in poll() before we run them.
+ poll_timeout = 0.0
+
+ if not self._running:
+ break
+
+ if self._blocking_signal_threshold is not None:
+ # clear alarm so it doesn't fire while poll is waiting for
+ # events.
+ signal.setitimer(signal.ITIMER_REAL, 0, 0)
+
+ try:
+ event_pairs = self._impl.poll(poll_timeout)
+ except Exception as e:
+ # Depending on python version and IOLoop implementation,
+ # different exception types may be thrown and there are
+ # two ways EINTR might be signaled:
+ # * e.errno == errno.EINTR
+ # * e.args is like (errno.EINTR, 'Interrupted system call')
+ if (getattr(e, 'errno', None) == errno.EINTR or
+ (isinstance(getattr(e, 'args', None), tuple) and
+ len(e.args) == 2 and e.args[0] == errno.EINTR)):
+ continue
+ else:
+ raise
+
+ if self._blocking_signal_threshold is not None:
+ signal.setitimer(signal.ITIMER_REAL,
+ self._blocking_signal_threshold, 0)
+
+ # Pop one fd at a time from the set of pending fds and run
+ # its handler. Since that handler may perform actions on
+ # other file descriptors, there may be reentrant calls to
+ # this IOLoop that update self._events
+ self._events.update(event_pairs)
+ while self._events:
+ fd, events = self._events.popitem()
+ try:
+ self._handlers[fd](fd, events)
+ except (OSError, IOError) as e:
+ if e.args[0] == errno.EPIPE:
+ # Happens when the client closes the connection
+ pass
+ else:
+ app_log.error("Exception in I/O handler for fd %s",
+ fd, exc_info=True)
+ except Exception:
+ app_log.error("Exception in I/O handler for fd %s",
+ fd, exc_info=True)
+ # reset the stopped flag so another start/stop pair can be issued
+ self._stopped = False
+ if self._blocking_signal_threshold is not None:
+ signal.setitimer(signal.ITIMER_REAL, 0, 0)
+ IOLoop._current.instance = old_current
+ if old_wakeup_fd is not None:
+ signal.set_wakeup_fd(old_wakeup_fd)
+
+ def stop(self):
+ self._running = False
+ self._stopped = True
+ self._waker.wake()
+
+ def time(self):
+ return self.time_func()
+
+ def add_timeout(self, deadline, callback):
+ timeout = _Timeout(deadline, stack_context.wrap(callback), self)
+ heapq.heappush(self._timeouts, timeout)
+ return timeout
+
+ def remove_timeout(self, timeout):
+ # Removing from a heap is complicated, so just leave the defunct
+ # timeout object in the queue (see discussion in
+ # http://docs.python.org/library/heapq.html).
+ # If this turns out to be a problem, we could add a garbage
+ # collection pass whenever there are too many dead timeouts.
+ timeout.callback = None
+ self._cancellations += 1
+
+ def add_callback(self, callback, *args, **kwargs):
+ with self._callback_lock:
+ if self._closing:
+ raise RuntimeError("IOLoop is closing")
+ list_empty = not self._callbacks
+ self._callbacks.append(functools.partial(
+ stack_context.wrap(callback), *args, **kwargs))
+ if list_empty and thread.get_ident() != self._thread_ident:
+ # If we're in the IOLoop's thread, we know it's not currently
+ # polling. If we're not, and we added the first callback to an
+ # empty list, we may need to wake it up (it may wake up on its
+ # own, but an occasional extra wake is harmless). Waking
+ # up a polling IOLoop is relatively expensive, so we try to
+ # avoid it when we can.
+ self._waker.wake()
+
+ def add_callback_from_signal(self, callback, *args, **kwargs):
+ with stack_context.NullContext():
+ if thread.get_ident() != self._thread_ident:
+ # if the signal is handled on another thread, we can add
+ # it normally (modulo the NullContext)
+ self.add_callback(callback, *args, **kwargs)
+ else:
+ # If we're on the IOLoop's thread, we cannot use
+ # the regular add_callback because it may deadlock on
+ # _callback_lock. Blindly insert into self._callbacks.
+ # This is safe because the GIL makes list.append atomic.
+ # One subtlety is that if the signal interrupted the
+ # _callback_lock block in IOLoop.start, we may modify
+ # either the old or new version of self._callbacks,
+ # but either way will work.
+ self._callbacks.append(functools.partial(
+ stack_context.wrap(callback), *args, **kwargs))
+
+
+class _Timeout(object):
+ """An IOLoop timeout, a UNIX timestamp and a callback"""
+
+ # Reduce memory overhead when there are lots of pending callbacks
+ __slots__ = ['deadline', 'callback']
+
+ def __init__(self, deadline, callback, io_loop):
+ if isinstance(deadline, numbers.Real):
+ self.deadline = deadline
+ elif isinstance(deadline, datetime.timedelta):
+ self.deadline = io_loop.time() + _Timeout.timedelta_to_seconds(deadline)
+ else:
+ raise TypeError("Unsupported deadline %r" % deadline)
+ self.callback = callback
+
+ @staticmethod
+ def timedelta_to_seconds(td):
+ """Equivalent to td.total_seconds() (introduced in python 2.7)."""
+ return (td.microseconds + (td.seconds + td.days * 24 * 3600) * 10 ** 6) / float(10 ** 6)
+
+ # Comparison methods to sort by deadline, with object id as a tiebreaker
+ # to guarantee a consistent ordering. The heapq module uses __le__
+ # in python2.5, and __lt__ in 2.6+ (sort() and most other comparisons
+ # use __lt__).
+ def __lt__(self, other):
+ return ((self.deadline, id(self)) <
+ (other.deadline, id(other)))
+
+ def __le__(self, other):
+ return ((self.deadline, id(self)) <=
+ (other.deadline, id(other)))
+
+
+class PeriodicCallback(object):
+ """Schedules the given callback to be called periodically.
+
+ The callback is called every ``callback_time`` milliseconds.
+
+ `start` must be called after the `PeriodicCallback` is created.
+ """
+ def __init__(self, callback, callback_time, io_loop=None):
+ self.callback = callback
+ if callback_time <= 0:
+ raise ValueError("Periodic callback must have a positive callback_time")
+ self.callback_time = callback_time
+ self.io_loop = io_loop or IOLoop.current()
+ self._running = False
+ self._timeout = None
+
+ def start(self):
+ """Starts the timer."""
+ self._running = True
+ self._next_timeout = self.io_loop.time()
+ self._schedule_next()
+
+ def stop(self):
+ """Stops the timer."""
+ self._running = False
+ if self._timeout is not None:
+ self.io_loop.remove_timeout(self._timeout)
+ self._timeout = None
+
+ def _run(self):
+ if not self._running:
+ return
+ try:
+ self.callback()
+ except Exception:
+ app_log.error("Error in periodic callback", exc_info=True)
+ self._schedule_next()
+
+ def _schedule_next(self):
+ if self._running:
+ current_time = self.io_loop.time()
+ while self._next_timeout <= current_time:
+ self._next_timeout += self.callback_time / 1000.0
+ self._timeout = self.io_loop.add_timeout(self._next_timeout, self._run)
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/eventloop/minitornado/log.py b/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/eventloop/minitornado/log.py
new file mode 100644
index 00000000..49051e89
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/eventloop/minitornado/log.py
@@ -0,0 +1,6 @@
+"""minimal subset of tornado.log for zmq.eventloop.minitornado"""
+
+import logging
+
+app_log = logging.getLogger("tornado.application")
+gen_log = logging.getLogger("tornado.general")
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/eventloop/minitornado/platform/__init__.py b/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/eventloop/minitornado/platform/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/eventloop/minitornado/platform/__init__.py
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/eventloop/minitornado/platform/auto.py b/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/eventloop/minitornado/platform/auto.py
new file mode 100644
index 00000000..b40ccd94
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/eventloop/minitornado/platform/auto.py
@@ -0,0 +1,45 @@
+#!/usr/bin/env python
+#
+# Copyright 2011 Facebook
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Implementation of platform-specific functionality.
+
+For each function or class described in `tornado.platform.interface`,
+the appropriate platform-specific implementation exists in this module.
+Most code that needs access to this functionality should do e.g.::
+
+ from tornado.platform.auto import set_close_exec
+"""
+
+from __future__ import absolute_import, division, print_function, with_statement
+
+import os
+
+if os.name == 'nt':
+ from .common import Waker
+ from .windows import set_close_exec
+else:
+ from .posix import set_close_exec, Waker
+
+try:
+ # monotime monkey-patches the time module to have a monotonic function
+ # in versions of python before 3.3.
+ import monotime
+except ImportError:
+ pass
+try:
+ from time import monotonic as monotonic_time
+except ImportError:
+ monotonic_time = None
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/eventloop/minitornado/platform/common.py b/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/eventloop/minitornado/platform/common.py
new file mode 100644
index 00000000..2d75dc1e
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/eventloop/minitornado/platform/common.py
@@ -0,0 +1,91 @@
+"""Lowest-common-denominator implementations of platform functionality."""
+from __future__ import absolute_import, division, print_function, with_statement
+
+import errno
+import socket
+
+from . import interface
+
+
+class Waker(interface.Waker):
+ """Create an OS independent asynchronous pipe.
+
+ For use on platforms that don't have os.pipe() (or where pipes cannot
+ be passed to select()), but do have sockets. This includes Windows
+ and Jython.
+ """
+ def __init__(self):
+ # Based on Zope async.py: http://svn.zope.org/zc.ngi/trunk/src/zc/ngi/async.py
+
+ self.writer = socket.socket()
+ # Disable buffering -- pulling the trigger sends 1 byte,
+ # and we want that sent immediately, to wake up ASAP.
+ self.writer.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
+
+ count = 0
+ while 1:
+ count += 1
+ # Bind to a local port; for efficiency, let the OS pick
+ # a free port for us.
+ # Unfortunately, stress tests showed that we may not
+ # be able to connect to that port ("Address already in
+ # use") despite that the OS picked it. This appears
+ # to be a race bug in the Windows socket implementation.
+ # So we loop until a connect() succeeds (almost always
+ # on the first try). See the long thread at
+ # http://mail.zope.org/pipermail/zope/2005-July/160433.html
+ # for hideous details.
+ a = socket.socket()
+ a.bind(("127.0.0.1", 0))
+ a.listen(1)
+ connect_address = a.getsockname() # assigned (host, port) pair
+ try:
+ self.writer.connect(connect_address)
+ break # success
+ except socket.error as detail:
+ if (not hasattr(errno, 'WSAEADDRINUSE') or
+ detail[0] != errno.WSAEADDRINUSE):
+ # "Address already in use" is the only error
+ # I've seen on two WinXP Pro SP2 boxes, under
+ # Pythons 2.3.5 and 2.4.1.
+ raise
+ # (10048, 'Address already in use')
+ # assert count <= 2 # never triggered in Tim's tests
+ if count >= 10: # I've never seen it go above 2
+ a.close()
+ self.writer.close()
+ raise socket.error("Cannot bind trigger!")
+ # Close `a` and try again. Note: I originally put a short
+ # sleep() here, but it didn't appear to help or hurt.
+ a.close()
+
+ self.reader, addr = a.accept()
+ self.reader.setblocking(0)
+ self.writer.setblocking(0)
+ a.close()
+ self.reader_fd = self.reader.fileno()
+
+ def fileno(self):
+ return self.reader.fileno()
+
+ def write_fileno(self):
+ return self.writer.fileno()
+
+ def wake(self):
+ try:
+ self.writer.send(b"x")
+ except (IOError, socket.error):
+ pass
+
+ def consume(self):
+ try:
+ while True:
+ result = self.reader.recv(1024)
+ if not result:
+ break
+ except (IOError, socket.error):
+ pass
+
+ def close(self):
+ self.reader.close()
+ self.writer.close()
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/eventloop/minitornado/platform/interface.py b/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/eventloop/minitornado/platform/interface.py
new file mode 100644
index 00000000..07da6bab
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/eventloop/minitornado/platform/interface.py
@@ -0,0 +1,63 @@
+#!/usr/bin/env python
+#
+# Copyright 2011 Facebook
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Interfaces for platform-specific functionality.
+
+This module exists primarily for documentation purposes and as base classes
+for other tornado.platform modules. Most code should import the appropriate
+implementation from `tornado.platform.auto`.
+"""
+
+from __future__ import absolute_import, division, print_function, with_statement
+
+
+def set_close_exec(fd):
+ """Sets the close-on-exec bit (``FD_CLOEXEC``)for a file descriptor."""
+ raise NotImplementedError()
+
+
+class Waker(object):
+ """A socket-like object that can wake another thread from ``select()``.
+
+ The `~tornado.ioloop.IOLoop` will add the Waker's `fileno()` to
+ its ``select`` (or ``epoll`` or ``kqueue``) calls. When another
+ thread wants to wake up the loop, it calls `wake`. Once it has woken
+ up, it will call `consume` to do any necessary per-wake cleanup. When
+ the ``IOLoop`` is closed, it closes its waker too.
+ """
+ def fileno(self):
+ """Returns the read file descriptor for this waker.
+
+ Must be suitable for use with ``select()`` or equivalent on the
+ local platform.
+ """
+ raise NotImplementedError()
+
+ def write_fileno(self):
+ """Returns the write file descriptor for this waker."""
+ raise NotImplementedError()
+
+ def wake(self):
+ """Triggers activity on the waker's file descriptor."""
+ raise NotImplementedError()
+
+ def consume(self):
+ """Called after the listen has woken up to do any necessary cleanup."""
+ raise NotImplementedError()
+
+ def close(self):
+ """Closes the waker's file descriptor(s)."""
+ raise NotImplementedError()
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/eventloop/minitornado/platform/posix.py b/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/eventloop/minitornado/platform/posix.py
new file mode 100644
index 00000000..ccffbb66
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/eventloop/minitornado/platform/posix.py
@@ -0,0 +1,70 @@
+#!/usr/bin/env python
+#
+# Copyright 2011 Facebook
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Posix implementations of platform-specific functionality."""
+
+from __future__ import absolute_import, division, print_function, with_statement
+
+import fcntl
+import os
+
+from . import interface
+
+
+def set_close_exec(fd):
+ flags = fcntl.fcntl(fd, fcntl.F_GETFD)
+ fcntl.fcntl(fd, fcntl.F_SETFD, flags | fcntl.FD_CLOEXEC)
+
+
+def _set_nonblocking(fd):
+ flags = fcntl.fcntl(fd, fcntl.F_GETFL)
+ fcntl.fcntl(fd, fcntl.F_SETFL, flags | os.O_NONBLOCK)
+
+
+class Waker(interface.Waker):
+ def __init__(self):
+ r, w = os.pipe()
+ _set_nonblocking(r)
+ _set_nonblocking(w)
+ set_close_exec(r)
+ set_close_exec(w)
+ self.reader = os.fdopen(r, "rb", 0)
+ self.writer = os.fdopen(w, "wb", 0)
+
+ def fileno(self):
+ return self.reader.fileno()
+
+ def write_fileno(self):
+ return self.writer.fileno()
+
+ def wake(self):
+ try:
+ self.writer.write(b"x")
+ except IOError:
+ pass
+
+ def consume(self):
+ try:
+ while True:
+ result = self.reader.read()
+ if not result:
+ break
+ except IOError:
+ pass
+
+ def close(self):
+ self.reader.close()
+ self.writer.close()
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/eventloop/minitornado/platform/windows.py b/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/eventloop/minitornado/platform/windows.py
new file mode 100644
index 00000000..817bdca1
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/eventloop/minitornado/platform/windows.py
@@ -0,0 +1,20 @@
+# NOTE: win32 support is currently experimental, and not recommended
+# for production use.
+
+
+from __future__ import absolute_import, division, print_function, with_statement
+import ctypes
+import ctypes.wintypes
+
+# See: http://msdn.microsoft.com/en-us/library/ms724935(VS.85).aspx
+SetHandleInformation = ctypes.windll.kernel32.SetHandleInformation
+SetHandleInformation.argtypes = (ctypes.wintypes.HANDLE, ctypes.wintypes.DWORD, ctypes.wintypes.DWORD)
+SetHandleInformation.restype = ctypes.wintypes.BOOL
+
+HANDLE_FLAG_INHERIT = 0x00000001
+
+
+def set_close_exec(fd):
+ success = SetHandleInformation(fd, HANDLE_FLAG_INHERIT, 0)
+ if not success:
+ raise ctypes.GetLastError()
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/eventloop/minitornado/stack_context.py b/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/eventloop/minitornado/stack_context.py
new file mode 100644
index 00000000..226d8042
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/eventloop/minitornado/stack_context.py
@@ -0,0 +1,376 @@
+#!/usr/bin/env python
+#
+# Copyright 2010 Facebook
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""`StackContext` allows applications to maintain threadlocal-like state
+that follows execution as it moves to other execution contexts.
+
+The motivating examples are to eliminate the need for explicit
+``async_callback`` wrappers (as in `tornado.web.RequestHandler`), and to
+allow some additional context to be kept for logging.
+
+This is slightly magic, but it's an extension of the idea that an
+exception handler is a kind of stack-local state and when that stack
+is suspended and resumed in a new context that state needs to be
+preserved. `StackContext` shifts the burden of restoring that state
+from each call site (e.g. wrapping each `.AsyncHTTPClient` callback
+in ``async_callback``) to the mechanisms that transfer control from
+one context to another (e.g. `.AsyncHTTPClient` itself, `.IOLoop`,
+thread pools, etc).
+
+Example usage::
+
+ @contextlib.contextmanager
+ def die_on_error():
+ try:
+ yield
+ except Exception:
+ logging.error("exception in asynchronous operation",exc_info=True)
+ sys.exit(1)
+
+ with StackContext(die_on_error):
+ # Any exception thrown here *or in callback and its desendents*
+ # will cause the process to exit instead of spinning endlessly
+ # in the ioloop.
+ http_client.fetch(url, callback)
+ ioloop.start()
+
+Most applications shouln't have to work with `StackContext` directly.
+Here are a few rules of thumb for when it's necessary:
+
+* If you're writing an asynchronous library that doesn't rely on a
+ stack_context-aware library like `tornado.ioloop` or `tornado.iostream`
+ (for example, if you're writing a thread pool), use
+ `.stack_context.wrap()` before any asynchronous operations to capture the
+ stack context from where the operation was started.
+
+* If you're writing an asynchronous library that has some shared
+ resources (such as a connection pool), create those shared resources
+ within a ``with stack_context.NullContext():`` block. This will prevent
+ ``StackContexts`` from leaking from one request to another.
+
+* If you want to write something like an exception handler that will
+ persist across asynchronous calls, create a new `StackContext` (or
+ `ExceptionStackContext`), and make your asynchronous calls in a ``with``
+ block that references your `StackContext`.
+"""
+
+from __future__ import absolute_import, division, print_function, with_statement
+
+import sys
+import threading
+
+from .util import raise_exc_info
+
+
+class StackContextInconsistentError(Exception):
+ pass
+
+
+class _State(threading.local):
+ def __init__(self):
+ self.contexts = (tuple(), None)
+_state = _State()
+
+
+class StackContext(object):
+ """Establishes the given context as a StackContext that will be transferred.
+
+ Note that the parameter is a callable that returns a context
+ manager, not the context itself. That is, where for a
+ non-transferable context manager you would say::
+
+ with my_context():
+
+ StackContext takes the function itself rather than its result::
+
+ with StackContext(my_context):
+
+ The result of ``with StackContext() as cb:`` is a deactivation
+ callback. Run this callback when the StackContext is no longer
+ needed to ensure that it is not propagated any further (note that
+ deactivating a context does not affect any instances of that
+ context that are currently pending). This is an advanced feature
+ and not necessary in most applications.
+ """
+ def __init__(self, context_factory):
+ self.context_factory = context_factory
+ self.contexts = []
+ self.active = True
+
+ def _deactivate(self):
+ self.active = False
+
+ # StackContext protocol
+ def enter(self):
+ context = self.context_factory()
+ self.contexts.append(context)
+ context.__enter__()
+
+ def exit(self, type, value, traceback):
+ context = self.contexts.pop()
+ context.__exit__(type, value, traceback)
+
+ # Note that some of this code is duplicated in ExceptionStackContext
+ # below. ExceptionStackContext is more common and doesn't need
+ # the full generality of this class.
+ def __enter__(self):
+ self.old_contexts = _state.contexts
+ self.new_contexts = (self.old_contexts[0] + (self,), self)
+ _state.contexts = self.new_contexts
+
+ try:
+ self.enter()
+ except:
+ _state.contexts = self.old_contexts
+ raise
+
+ return self._deactivate
+
+ def __exit__(self, type, value, traceback):
+ try:
+ self.exit(type, value, traceback)
+ finally:
+ final_contexts = _state.contexts
+ _state.contexts = self.old_contexts
+
+ # Generator coroutines and with-statements with non-local
+ # effects interact badly. Check here for signs of
+ # the stack getting out of sync.
+ # Note that this check comes after restoring _state.context
+ # so that if it fails things are left in a (relatively)
+ # consistent state.
+ if final_contexts is not self.new_contexts:
+ raise StackContextInconsistentError(
+ 'stack_context inconsistency (may be caused by yield '
+ 'within a "with StackContext" block)')
+
+ # Break up a reference to itself to allow for faster GC on CPython.
+ self.new_contexts = None
+
+
+class ExceptionStackContext(object):
+ """Specialization of StackContext for exception handling.
+
+ The supplied ``exception_handler`` function will be called in the
+ event of an uncaught exception in this context. The semantics are
+ similar to a try/finally clause, and intended use cases are to log
+ an error, close a socket, or similar cleanup actions. The
+ ``exc_info`` triple ``(type, value, traceback)`` will be passed to the
+ exception_handler function.
+
+ If the exception handler returns true, the exception will be
+ consumed and will not be propagated to other exception handlers.
+ """
+ def __init__(self, exception_handler):
+ self.exception_handler = exception_handler
+ self.active = True
+
+ def _deactivate(self):
+ self.active = False
+
+ def exit(self, type, value, traceback):
+ if type is not None:
+ return self.exception_handler(type, value, traceback)
+
+ def __enter__(self):
+ self.old_contexts = _state.contexts
+ self.new_contexts = (self.old_contexts[0], self)
+ _state.contexts = self.new_contexts
+
+ return self._deactivate
+
+ def __exit__(self, type, value, traceback):
+ try:
+ if type is not None:
+ return self.exception_handler(type, value, traceback)
+ finally:
+ final_contexts = _state.contexts
+ _state.contexts = self.old_contexts
+
+ if final_contexts is not self.new_contexts:
+ raise StackContextInconsistentError(
+ 'stack_context inconsistency (may be caused by yield '
+ 'within a "with StackContext" block)')
+
+ # Break up a reference to itself to allow for faster GC on CPython.
+ self.new_contexts = None
+
+
+class NullContext(object):
+ """Resets the `StackContext`.
+
+ Useful when creating a shared resource on demand (e.g. an
+ `.AsyncHTTPClient`) where the stack that caused the creating is
+ not relevant to future operations.
+ """
+ def __enter__(self):
+ self.old_contexts = _state.contexts
+ _state.contexts = (tuple(), None)
+
+ def __exit__(self, type, value, traceback):
+ _state.contexts = self.old_contexts
+
+
+def _remove_deactivated(contexts):
+ """Remove deactivated handlers from the chain"""
+ # Clean ctx handlers
+ stack_contexts = tuple([h for h in contexts[0] if h.active])
+
+ # Find new head
+ head = contexts[1]
+ while head is not None and not head.active:
+ head = head.old_contexts[1]
+
+ # Process chain
+ ctx = head
+ while ctx is not None:
+ parent = ctx.old_contexts[1]
+
+ while parent is not None:
+ if parent.active:
+ break
+ ctx.old_contexts = parent.old_contexts
+ parent = parent.old_contexts[1]
+
+ ctx = parent
+
+ return (stack_contexts, head)
+
+
+def wrap(fn):
+ """Returns a callable object that will restore the current `StackContext`
+ when executed.
+
+ Use this whenever saving a callback to be executed later in a
+ different execution context (either in a different thread or
+ asynchronously in the same thread).
+ """
+ # Check if function is already wrapped
+ if fn is None or hasattr(fn, '_wrapped'):
+ return fn
+
+ # Capture current stack head
+ # TODO: Any other better way to store contexts and update them in wrapped function?
+ cap_contexts = [_state.contexts]
+
+ def wrapped(*args, **kwargs):
+ ret = None
+ try:
+ # Capture old state
+ current_state = _state.contexts
+
+ # Remove deactivated items
+ cap_contexts[0] = contexts = _remove_deactivated(cap_contexts[0])
+
+ # Force new state
+ _state.contexts = contexts
+
+ # Current exception
+ exc = (None, None, None)
+ top = None
+
+ # Apply stack contexts
+ last_ctx = 0
+ stack = contexts[0]
+
+ # Apply state
+ for n in stack:
+ try:
+ n.enter()
+ last_ctx += 1
+ except:
+ # Exception happened. Record exception info and store top-most handler
+ exc = sys.exc_info()
+ top = n.old_contexts[1]
+
+ # Execute callback if no exception happened while restoring state
+ if top is None:
+ try:
+ ret = fn(*args, **kwargs)
+ except:
+ exc = sys.exc_info()
+ top = contexts[1]
+
+ # If there was exception, try to handle it by going through the exception chain
+ if top is not None:
+ exc = _handle_exception(top, exc)
+ else:
+ # Otherwise take shorter path and run stack contexts in reverse order
+ while last_ctx > 0:
+ last_ctx -= 1
+ c = stack[last_ctx]
+
+ try:
+ c.exit(*exc)
+ except:
+ exc = sys.exc_info()
+ top = c.old_contexts[1]
+ break
+ else:
+ top = None
+
+ # If if exception happened while unrolling, take longer exception handler path
+ if top is not None:
+ exc = _handle_exception(top, exc)
+
+ # If exception was not handled, raise it
+ if exc != (None, None, None):
+ raise_exc_info(exc)
+ finally:
+ _state.contexts = current_state
+ return ret
+
+ wrapped._wrapped = True
+ return wrapped
+
+
+def _handle_exception(tail, exc):
+ while tail is not None:
+ try:
+ if tail.exit(*exc):
+ exc = (None, None, None)
+ except:
+ exc = sys.exc_info()
+
+ tail = tail.old_contexts[1]
+
+ return exc
+
+
+def run_with_stack_context(context, func):
+ """Run a coroutine ``func`` in the given `StackContext`.
+
+ It is not safe to have a ``yield`` statement within a ``with StackContext``
+ block, so it is difficult to use stack context with `.gen.coroutine`.
+ This helper function runs the function in the correct context while
+ keeping the ``yield`` and ``with`` statements syntactically separate.
+
+ Example::
+
+ @gen.coroutine
+ def incorrect():
+ with StackContext(ctx):
+ # ERROR: this will raise StackContextInconsistentError
+ yield other_coroutine()
+
+ @gen.coroutine
+ def correct():
+ yield run_with_stack_context(StackContext(ctx), other_coroutine)
+
+ .. versionadded:: 3.1
+ """
+ with context:
+ return func()
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/eventloop/minitornado/util.py b/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/eventloop/minitornado/util.py
new file mode 100644
index 00000000..c1e2eb95
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/eventloop/minitornado/util.py
@@ -0,0 +1,184 @@
+"""Miscellaneous utility functions and classes.
+
+This module is used internally by Tornado. It is not necessarily expected
+that the functions and classes defined here will be useful to other
+applications, but they are documented here in case they are.
+
+The one public-facing part of this module is the `Configurable` class
+and its `~Configurable.configure` method, which becomes a part of the
+interface of its subclasses, including `.AsyncHTTPClient`, `.IOLoop`,
+and `.Resolver`.
+"""
+
+from __future__ import absolute_import, division, print_function, with_statement
+
+import sys
+
+
+def import_object(name):
+ """Imports an object by name.
+
+ import_object('x') is equivalent to 'import x'.
+ import_object('x.y.z') is equivalent to 'from x.y import z'.
+
+ >>> import tornado.escape
+ >>> import_object('tornado.escape') is tornado.escape
+ True
+ >>> import_object('tornado.escape.utf8') is tornado.escape.utf8
+ True
+ >>> import_object('tornado') is tornado
+ True
+ >>> import_object('tornado.missing_module')
+ Traceback (most recent call last):
+ ...
+ ImportError: No module named missing_module
+ """
+ if name.count('.') == 0:
+ return __import__(name, None, None)
+
+ parts = name.split('.')
+ obj = __import__('.'.join(parts[:-1]), None, None, [parts[-1]], 0)
+ try:
+ return getattr(obj, parts[-1])
+ except AttributeError:
+ raise ImportError("No module named %s" % parts[-1])
+
+
+# Fake unicode literal support: Python 3.2 doesn't have the u'' marker for
+# literal strings, and alternative solutions like "from __future__ import
+# unicode_literals" have other problems (see PEP 414). u() can be applied
+# to ascii strings that include \u escapes (but they must not contain
+# literal non-ascii characters).
+if type('') is not type(b''):
+ def u(s):
+ return s
+ bytes_type = bytes
+ unicode_type = str
+ basestring_type = str
+else:
+ def u(s):
+ return s.decode('unicode_escape')
+ bytes_type = str
+ unicode_type = unicode
+ basestring_type = basestring
+
+
+if sys.version_info > (3,):
+ exec("""
+def raise_exc_info(exc_info):
+ raise exc_info[1].with_traceback(exc_info[2])
+
+def exec_in(code, glob, loc=None):
+ if isinstance(code, str):
+ code = compile(code, '<string>', 'exec', dont_inherit=True)
+ exec(code, glob, loc)
+""")
+else:
+ exec("""
+def raise_exc_info(exc_info):
+ raise exc_info[0], exc_info[1], exc_info[2]
+
+def exec_in(code, glob, loc=None):
+ if isinstance(code, basestring):
+ # exec(string) inherits the caller's future imports; compile
+ # the string first to prevent that.
+ code = compile(code, '<string>', 'exec', dont_inherit=True)
+ exec code in glob, loc
+""")
+
+
+class Configurable(object):
+ """Base class for configurable interfaces.
+
+ A configurable interface is an (abstract) class whose constructor
+ acts as a factory function for one of its implementation subclasses.
+ The implementation subclass as well as optional keyword arguments to
+ its initializer can be set globally at runtime with `configure`.
+
+ By using the constructor as the factory method, the interface
+ looks like a normal class, `isinstance` works as usual, etc. This
+ pattern is most useful when the choice of implementation is likely
+ to be a global decision (e.g. when `~select.epoll` is available,
+ always use it instead of `~select.select`), or when a
+ previously-monolithic class has been split into specialized
+ subclasses.
+
+ Configurable subclasses must define the class methods
+ `configurable_base` and `configurable_default`, and use the instance
+ method `initialize` instead of ``__init__``.
+ """
+ __impl_class = None
+ __impl_kwargs = None
+
+ def __new__(cls, **kwargs):
+ base = cls.configurable_base()
+ args = {}
+ if cls is base:
+ impl = cls.configured_class()
+ if base.__impl_kwargs:
+ args.update(base.__impl_kwargs)
+ else:
+ impl = cls
+ args.update(kwargs)
+ instance = super(Configurable, cls).__new__(impl)
+ # initialize vs __init__ chosen for compatiblity with AsyncHTTPClient
+ # singleton magic. If we get rid of that we can switch to __init__
+ # here too.
+ instance.initialize(**args)
+ return instance
+
+ @classmethod
+ def configurable_base(cls):
+ """Returns the base class of a configurable hierarchy.
+
+ This will normally return the class in which it is defined.
+ (which is *not* necessarily the same as the cls classmethod parameter).
+ """
+ raise NotImplementedError()
+
+ @classmethod
+ def configurable_default(cls):
+ """Returns the implementation class to be used if none is configured."""
+ raise NotImplementedError()
+
+ def initialize(self):
+ """Initialize a `Configurable` subclass instance.
+
+ Configurable classes should use `initialize` instead of ``__init__``.
+ """
+
+ @classmethod
+ def configure(cls, impl, **kwargs):
+ """Sets the class to use when the base class is instantiated.
+
+ Keyword arguments will be saved and added to the arguments passed
+ to the constructor. This can be used to set global defaults for
+ some parameters.
+ """
+ base = cls.configurable_base()
+ if isinstance(impl, (unicode_type, bytes_type)):
+ impl = import_object(impl)
+ if impl is not None and not issubclass(impl, cls):
+ raise ValueError("Invalid subclass of %s" % cls)
+ base.__impl_class = impl
+ base.__impl_kwargs = kwargs
+
+ @classmethod
+ def configured_class(cls):
+ """Returns the currently configured class."""
+ base = cls.configurable_base()
+ if cls.__impl_class is None:
+ base.__impl_class = cls.configurable_default()
+ return base.__impl_class
+
+ @classmethod
+ def _save_configuration(cls):
+ base = cls.configurable_base()
+ return (base.__impl_class, base.__impl_kwargs)
+
+ @classmethod
+ def _restore_configuration(cls, saved):
+ base = cls.configurable_base()
+ base.__impl_class = saved[0]
+ base.__impl_kwargs = saved[1]
+
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/eventloop/zmqstream.py b/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/eventloop/zmqstream.py
new file mode 100644
index 00000000..86a97e44
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/eventloop/zmqstream.py
@@ -0,0 +1,529 @@
+#
+# Copyright 2009 Facebook
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""A utility class to send to and recv from a non-blocking socket."""
+
+from __future__ import with_statement
+
+import sys
+
+import zmq
+from zmq.utils import jsonapi
+
+try:
+ import cPickle as pickle
+except ImportError:
+ import pickle
+
+from .ioloop import IOLoop
+
+try:
+ # gen_log will only import from >= 3.0
+ from tornado.log import gen_log
+ from tornado import stack_context
+except ImportError:
+ from .minitornado.log import gen_log
+ from .minitornado import stack_context
+
+try:
+ from queue import Queue
+except ImportError:
+ from Queue import Queue
+
+from zmq.utils.strtypes import bytes, unicode, basestring
+
+try:
+ callable
+except NameError:
+ callable = lambda obj: hasattr(obj, '__call__')
+
+
+class ZMQStream(object):
+ """A utility class to register callbacks when a zmq socket sends and receives
+
+ For use with zmq.eventloop.ioloop
+
+ There are three main methods
+
+ Methods:
+
+ * **on_recv(callback, copy=True):**
+ register a callback to be run every time the socket has something to receive
+ * **on_send(callback):**
+ register a callback to be run every time you call send
+ * **send(self, msg, flags=0, copy=False, callback=None):**
+ perform a send that will trigger the callback
+ if callback is passed, on_send is also called.
+
+ There are also send_multipart(), send_json(), send_pyobj()
+
+ Three other methods for deactivating the callbacks:
+
+ * **stop_on_recv():**
+ turn off the recv callback
+ * **stop_on_send():**
+ turn off the send callback
+
+ which simply call ``on_<evt>(None)``.
+
+ The entire socket interface, excluding direct recv methods, is also
+ provided, primarily through direct-linking the methods.
+ e.g.
+
+ >>> stream.bind is stream.socket.bind
+ True
+
+ """
+
+ socket = None
+ io_loop = None
+ poller = None
+
+ def __init__(self, socket, io_loop=None):
+ self.socket = socket
+ self.io_loop = io_loop or IOLoop.instance()
+ self.poller = zmq.Poller()
+
+ self._send_queue = Queue()
+ self._recv_callback = None
+ self._send_callback = None
+ self._close_callback = None
+ self._recv_copy = False
+ self._flushed = False
+
+ self._state = self.io_loop.ERROR
+ self._init_io_state()
+
+ # shortcircuit some socket methods
+ self.bind = self.socket.bind
+ self.bind_to_random_port = self.socket.bind_to_random_port
+ self.connect = self.socket.connect
+ self.setsockopt = self.socket.setsockopt
+ self.getsockopt = self.socket.getsockopt
+ self.setsockopt_string = self.socket.setsockopt_string
+ self.getsockopt_string = self.socket.getsockopt_string
+ self.setsockopt_unicode = self.socket.setsockopt_unicode
+ self.getsockopt_unicode = self.socket.getsockopt_unicode
+
+
+ def stop_on_recv(self):
+ """Disable callback and automatic receiving."""
+ return self.on_recv(None)
+
+ def stop_on_send(self):
+ """Disable callback on sending."""
+ return self.on_send(None)
+
+ def stop_on_err(self):
+ """DEPRECATED, does nothing"""
+ gen_log.warn("on_err does nothing, and will be removed")
+
+ def on_err(self, callback):
+ """DEPRECATED, does nothing"""
+ gen_log.warn("on_err does nothing, and will be removed")
+
+ def on_recv(self, callback, copy=True):
+ """Register a callback for when a message is ready to recv.
+
+ There can be only one callback registered at a time, so each
+ call to `on_recv` replaces previously registered callbacks.
+
+ on_recv(None) disables recv event polling.
+
+ Use on_recv_stream(callback) instead, to register a callback that will receive
+ both this ZMQStream and the message, instead of just the message.
+
+ Parameters
+ ----------
+
+ callback : callable
+ callback must take exactly one argument, which will be a
+ list, as returned by socket.recv_multipart()
+ if callback is None, recv callbacks are disabled.
+ copy : bool
+ copy is passed directly to recv, so if copy is False,
+ callback will receive Message objects. If copy is True,
+ then callback will receive bytes/str objects.
+
+ Returns : None
+ """
+
+ self._check_closed()
+ assert callback is None or callable(callback)
+ self._recv_callback = stack_context.wrap(callback)
+ self._recv_copy = copy
+ if callback is None:
+ self._drop_io_state(self.io_loop.READ)
+ else:
+ self._add_io_state(self.io_loop.READ)
+
+ def on_recv_stream(self, callback, copy=True):
+ """Same as on_recv, but callback will get this stream as first argument
+
+ callback must take exactly two arguments, as it will be called as::
+
+ callback(stream, msg)
+
+ Useful when a single callback should be used with multiple streams.
+ """
+ if callback is None:
+ self.stop_on_recv()
+ else:
+ self.on_recv(lambda msg: callback(self, msg), copy=copy)
+
+ def on_send(self, callback):
+ """Register a callback to be called on each send
+
+ There will be two arguments::
+
+ callback(msg, status)
+
+ * `msg` will be the list of sendable objects that was just sent
+ * `status` will be the return result of socket.send_multipart(msg) -
+ MessageTracker or None.
+
+ Non-copying sends return a MessageTracker object whose
+ `done` attribute will be True when the send is complete.
+ This allows users to track when an object is safe to write to
+ again.
+
+ The second argument will always be None if copy=True
+ on the send.
+
+ Use on_send_stream(callback) to register a callback that will be passed
+ this ZMQStream as the first argument, in addition to the other two.
+
+ on_send(None) disables recv event polling.
+
+ Parameters
+ ----------
+
+ callback : callable
+ callback must take exactly two arguments, which will be
+ the message being sent (always a list),
+ and the return result of socket.send_multipart(msg) -
+ MessageTracker or None.
+
+ if callback is None, send callbacks are disabled.
+ """
+
+ self._check_closed()
+ assert callback is None or callable(callback)
+ self._send_callback = stack_context.wrap(callback)
+
+
+ def on_send_stream(self, callback):
+ """Same as on_send, but callback will get this stream as first argument
+
+ Callback will be passed three arguments::
+
+ callback(stream, msg, status)
+
+ Useful when a single callback should be used with multiple streams.
+ """
+ if callback is None:
+ self.stop_on_send()
+ else:
+ self.on_send(lambda msg, status: callback(self, msg, status))
+
+
+ def send(self, msg, flags=0, copy=True, track=False, callback=None):
+ """Send a message, optionally also register a new callback for sends.
+ See zmq.socket.send for details.
+ """
+ return self.send_multipart([msg], flags=flags, copy=copy, track=track, callback=callback)
+
+ def send_multipart(self, msg, flags=0, copy=True, track=False, callback=None):
+ """Send a multipart message, optionally also register a new callback for sends.
+ See zmq.socket.send_multipart for details.
+ """
+ kwargs = dict(flags=flags, copy=copy, track=track)
+ self._send_queue.put((msg, kwargs))
+ callback = callback or self._send_callback
+ if callback is not None:
+ self.on_send(callback)
+ else:
+ # noop callback
+ self.on_send(lambda *args: None)
+ self._add_io_state(self.io_loop.WRITE)
+
+ def send_string(self, u, flags=0, encoding='utf-8', callback=None):
+ """Send a unicode message with an encoding.
+ See zmq.socket.send_unicode for details.
+ """
+ if not isinstance(u, basestring):
+ raise TypeError("unicode/str objects only")
+ return self.send(u.encode(encoding), flags=flags, callback=callback)
+
+ send_unicode = send_string
+
+ def send_json(self, obj, flags=0, callback=None):
+ """Send json-serialized version of an object.
+ See zmq.socket.send_json for details.
+ """
+ if jsonapi is None:
+ raise ImportError('jsonlib{1,2}, json or simplejson library is required.')
+ else:
+ msg = jsonapi.dumps(obj)
+ return self.send(msg, flags=flags, callback=callback)
+
+ def send_pyobj(self, obj, flags=0, protocol=-1, callback=None):
+ """Send a Python object as a message using pickle to serialize.
+
+ See zmq.socket.send_json for details.
+ """
+ msg = pickle.dumps(obj, protocol)
+ return self.send(msg, flags, callback=callback)
+
+ def _finish_flush(self):
+ """callback for unsetting _flushed flag."""
+ self._flushed = False
+
+ def flush(self, flag=zmq.POLLIN|zmq.POLLOUT, limit=None):
+ """Flush pending messages.
+
+ This method safely handles all pending incoming and/or outgoing messages,
+ bypassing the inner loop, passing them to the registered callbacks.
+
+ A limit can be specified, to prevent blocking under high load.
+
+ flush will return the first time ANY of these conditions are met:
+ * No more events matching the flag are pending.
+ * the total number of events handled reaches the limit.
+
+ Note that if ``flag|POLLIN != 0``, recv events will be flushed even if no callback
+ is registered, unlike normal IOLoop operation. This allows flush to be
+ used to remove *and ignore* incoming messages.
+
+ Parameters
+ ----------
+ flag : int, default=POLLIN|POLLOUT
+ 0MQ poll flags.
+ If flag|POLLIN, recv events will be flushed.
+ If flag|POLLOUT, send events will be flushed.
+ Both flags can be set at once, which is the default.
+ limit : None or int, optional
+ The maximum number of messages to send or receive.
+ Both send and recv count against this limit.
+
+ Returns
+ -------
+ int : count of events handled (both send and recv)
+ """
+ self._check_closed()
+ # unset self._flushed, so callbacks will execute, in case flush has
+ # already been called this iteration
+ already_flushed = self._flushed
+ self._flushed = False
+ # initialize counters
+ count = 0
+ def update_flag():
+ """Update the poll flag, to prevent registering POLLOUT events
+ if we don't have pending sends."""
+ return flag & zmq.POLLIN | (self.sending() and flag & zmq.POLLOUT)
+ flag = update_flag()
+ if not flag:
+ # nothing to do
+ return 0
+ self.poller.register(self.socket, flag)
+ events = self.poller.poll(0)
+ while events and (not limit or count < limit):
+ s,event = events[0]
+ if event & zmq.POLLIN: # receiving
+ self._handle_recv()
+ count += 1
+ if self.socket is None:
+ # break if socket was closed during callback
+ break
+ if event & zmq.POLLOUT and self.sending():
+ self._handle_send()
+ count += 1
+ if self.socket is None:
+ # break if socket was closed during callback
+ break
+
+ flag = update_flag()
+ if flag:
+ self.poller.register(self.socket, flag)
+ events = self.poller.poll(0)
+ else:
+ events = []
+ if count: # only bypass loop if we actually flushed something
+ # skip send/recv callbacks this iteration
+ self._flushed = True
+ # reregister them at the end of the loop
+ if not already_flushed: # don't need to do it again
+ self.io_loop.add_callback(self._finish_flush)
+ elif already_flushed:
+ self._flushed = True
+
+ # update ioloop poll state, which may have changed
+ self._rebuild_io_state()
+ return count
+
+ def set_close_callback(self, callback):
+ """Call the given callback when the stream is closed."""
+ self._close_callback = stack_context.wrap(callback)
+
+ def close(self, linger=None):
+ """Close this stream."""
+ if self.socket is not None:
+ self.io_loop.remove_handler(self.socket)
+ self.socket.close(linger)
+ self.socket = None
+ if self._close_callback:
+ self._run_callback(self._close_callback)
+
+ def receiving(self):
+ """Returns True if we are currently receiving from the stream."""
+ return self._recv_callback is not None
+
+ def sending(self):
+ """Returns True if we are currently sending to the stream."""
+ return not self._send_queue.empty()
+
+ def closed(self):
+ return self.socket is None
+
+ def _run_callback(self, callback, *args, **kwargs):
+ """Wrap running callbacks in try/except to allow us to
+ close our socket."""
+ try:
+ # Use a NullContext to ensure that all StackContexts are run
+ # inside our blanket exception handler rather than outside.
+ with stack_context.NullContext():
+ callback(*args, **kwargs)
+ except:
+ gen_log.error("Uncaught exception, closing connection.",
+ exc_info=True)
+ # Close the socket on an uncaught exception from a user callback
+ # (It would eventually get closed when the socket object is
+ # gc'd, but we don't want to rely on gc happening before we
+ # run out of file descriptors)
+ self.close()
+ # Re-raise the exception so that IOLoop.handle_callback_exception
+ # can see it and log the error
+ raise
+
+ def _handle_events(self, fd, events):
+ """This method is the actual handler for IOLoop, that gets called whenever
+ an event on my socket is posted. It dispatches to _handle_recv, etc."""
+ # print "handling events"
+ if not self.socket:
+ gen_log.warning("Got events for closed stream %s", fd)
+ return
+ try:
+ # dispatch events:
+ if events & IOLoop.ERROR:
+ gen_log.error("got POLLERR event on ZMQStream, which doesn't make sense")
+ return
+ if events & IOLoop.READ:
+ self._handle_recv()
+ if not self.socket:
+ return
+ if events & IOLoop.WRITE:
+ self._handle_send()
+ if not self.socket:
+ return
+
+ # rebuild the poll state
+ self._rebuild_io_state()
+ except:
+ gen_log.error("Uncaught exception, closing connection.",
+ exc_info=True)
+ self.close()
+ raise
+
+ def _handle_recv(self):
+ """Handle a recv event."""
+ if self._flushed:
+ return
+ try:
+ msg = self.socket.recv_multipart(zmq.NOBLOCK, copy=self._recv_copy)
+ except zmq.ZMQError as e:
+ if e.errno == zmq.EAGAIN:
+ # state changed since poll event
+ pass
+ else:
+ gen_log.error("RECV Error: %s"%zmq.strerror(e.errno))
+ else:
+ if self._recv_callback:
+ callback = self._recv_callback
+ # self._recv_callback = None
+ self._run_callback(callback, msg)
+
+ # self.update_state()
+
+
+ def _handle_send(self):
+ """Handle a send event."""
+ if self._flushed:
+ return
+ if not self.sending():
+ gen_log.error("Shouldn't have handled a send event")
+ return
+
+ msg, kwargs = self._send_queue.get()
+ try:
+ status = self.socket.send_multipart(msg, **kwargs)
+ except zmq.ZMQError as e:
+ gen_log.error("SEND Error: %s", e)
+ status = e
+ if self._send_callback:
+ callback = self._send_callback
+ self._run_callback(callback, msg, status)
+
+ # self.update_state()
+
+ def _check_closed(self):
+ if not self.socket:
+ raise IOError("Stream is closed")
+
+ def _rebuild_io_state(self):
+ """rebuild io state based on self.sending() and receiving()"""
+ if self.socket is None:
+ return
+ state = self.io_loop.ERROR
+ if self.receiving():
+ state |= self.io_loop.READ
+ if self.sending():
+ state |= self.io_loop.WRITE
+ if state != self._state:
+ self._state = state
+ self._update_handler(state)
+
+ def _add_io_state(self, state):
+ """Add io_state to poller."""
+ if not self._state & state:
+ self._state = self._state | state
+ self._update_handler(self._state)
+
+ def _drop_io_state(self, state):
+ """Stop poller from watching an io_state."""
+ if self._state & state:
+ self._state = self._state & (~state)
+ self._update_handler(self._state)
+
+ def _update_handler(self, state):
+ """Update IOLoop handler with state."""
+ if self.socket is None:
+ return
+ self.io_loop.update_handler(self.socket, state)
+
+ def _init_io_state(self):
+ """initialize the ioloop event handler"""
+ with stack_context.NullContext():
+ self.io_loop.add_handler(self.socket, self._handle_events, self._state)
+
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/green/__init__.py b/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/green/__init__.py
new file mode 100644
index 00000000..ff7e5965
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/green/__init__.py
@@ -0,0 +1,40 @@
+# -*- coding: utf-8 -*-
+#-----------------------------------------------------------------------------
+# Copyright (C) 2011-2012 Travis Cline
+#
+# This file is part of pyzmq
+# It is adapted from upstream project zeromq_gevent under the New BSD License
+#
+# Distributed under the terms of the New BSD License. The full license is in
+# the file COPYING.BSD, distributed as part of this software.
+#-----------------------------------------------------------------------------
+
+"""zmq.green - gevent compatibility with zeromq.
+
+Usage
+-----
+
+Instead of importing zmq directly, do so in the following manner:
+
+..
+
+ import zmq.green as zmq
+
+
+Any calls that would have blocked the current thread will now only block the
+current green thread.
+
+This compatibility is accomplished by ensuring the nonblocking flag is set
+before any blocking operation and the ØMQ file descriptor is polled internally
+to trigger needed events.
+"""
+
+from zmq import *
+from zmq.green.core import _Context, _Socket
+from zmq.green.poll import _Poller
+Context = _Context
+Socket = _Socket
+Poller = _Poller
+
+from zmq.green.device import device
+
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/green/core.py b/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/green/core.py
new file mode 100644
index 00000000..9fc73e32
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/green/core.py
@@ -0,0 +1,287 @@
+#-----------------------------------------------------------------------------
+# Copyright (C) 2011-2012 Travis Cline
+#
+# This file is part of pyzmq
+# It is adapted from upstream project zeromq_gevent under the New BSD License
+#
+# Distributed under the terms of the New BSD License. The full license is in
+# the file COPYING.BSD, distributed as part of this software.
+#-----------------------------------------------------------------------------
+
+"""This module wraps the :class:`Socket` and :class:`Context` found in :mod:`pyzmq <zmq>` to be non blocking
+"""
+
+from __future__ import print_function
+
+import sys
+import time
+import warnings
+
+import zmq
+
+from zmq import Context as _original_Context
+from zmq import Socket as _original_Socket
+from .poll import _Poller
+
+import gevent
+from gevent.event import AsyncResult
+from gevent.hub import get_hub
+
+if hasattr(zmq, 'RCVTIMEO'):
+ TIMEOS = (zmq.RCVTIMEO, zmq.SNDTIMEO)
+else:
+ TIMEOS = ()
+
+def _stop(evt):
+ """simple wrapper for stopping an Event, allowing for method rename in gevent 1.0"""
+ try:
+ evt.stop()
+ except AttributeError as e:
+ # gevent<1.0 compat
+ evt.cancel()
+
+class _Socket(_original_Socket):
+ """Green version of :class:`zmq.Socket`
+
+ The following methods are overridden:
+
+ * send
+ * recv
+
+ To ensure that the ``zmq.NOBLOCK`` flag is set and that sending or receiving
+ is deferred to the hub if a ``zmq.EAGAIN`` (retry) error is raised.
+
+ The `__state_changed` method is triggered when the zmq.FD for the socket is
+ marked as readable and triggers the necessary read and write events (which
+ are waited for in the recv and send methods).
+
+ Some double underscore prefixes are used to minimize pollution of
+ :class:`zmq.Socket`'s namespace.
+ """
+ __in_send_multipart = False
+ __in_recv_multipart = False
+ __writable = None
+ __readable = None
+ _state_event = None
+ _gevent_bug_timeout = 11.6 # timeout for not trusting gevent
+ _debug_gevent = False # turn on if you think gevent is missing events
+ _poller_class = _Poller
+
+ def __init__(self, context, socket_type):
+ _original_Socket.__init__(self, context, socket_type)
+ self.__in_send_multipart = False
+ self.__in_recv_multipart = False
+ self.__setup_events()
+
+
+ def __del__(self):
+ self.close()
+
+ def close(self, linger=None):
+ super(_Socket, self).close(linger)
+ self.__cleanup_events()
+
+ def __cleanup_events(self):
+ # close the _state_event event, keeps the number of active file descriptors down
+ if getattr(self, '_state_event', None):
+ _stop(self._state_event)
+ self._state_event = None
+ # if the socket has entered a close state resume any waiting greenlets
+ self.__writable.set()
+ self.__readable.set()
+
+ def __setup_events(self):
+ self.__readable = AsyncResult()
+ self.__writable = AsyncResult()
+ self.__readable.set()
+ self.__writable.set()
+
+ try:
+ self._state_event = get_hub().loop.io(self.getsockopt(zmq.FD), 1) # read state watcher
+ self._state_event.start(self.__state_changed)
+ except AttributeError:
+ # for gevent<1.0 compatibility
+ from gevent.core import read_event
+ self._state_event = read_event(self.getsockopt(zmq.FD), self.__state_changed, persist=True)
+
+ def __state_changed(self, event=None, _evtype=None):
+ if self.closed:
+ self.__cleanup_events()
+ return
+ try:
+ # avoid triggering __state_changed from inside __state_changed
+ events = super(_Socket, self).getsockopt(zmq.EVENTS)
+ except zmq.ZMQError as exc:
+ self.__writable.set_exception(exc)
+ self.__readable.set_exception(exc)
+ else:
+ if events & zmq.POLLOUT:
+ self.__writable.set()
+ if events & zmq.POLLIN:
+ self.__readable.set()
+
+ def _wait_write(self):
+ assert self.__writable.ready(), "Only one greenlet can be waiting on this event"
+ self.__writable = AsyncResult()
+ # timeout is because libzmq cannot be trusted to properly signal a new send event:
+ # this is effectively a maximum poll interval of 1s
+ tic = time.time()
+ dt = self._gevent_bug_timeout
+ if dt:
+ timeout = gevent.Timeout(seconds=dt)
+ else:
+ timeout = None
+ try:
+ if timeout:
+ timeout.start()
+ self.__writable.get(block=True)
+ except gevent.Timeout as t:
+ if t is not timeout:
+ raise
+ toc = time.time()
+ # gevent bug: get can raise timeout even on clean return
+ # don't display zmq bug warning for gevent bug (this is getting ridiculous)
+ if self._debug_gevent and timeout and toc-tic > dt and \
+ self.getsockopt(zmq.EVENTS) & zmq.POLLOUT:
+ print("BUG: gevent may have missed a libzmq send event on %i!" % self.FD, file=sys.stderr)
+ finally:
+ if timeout:
+ timeout.cancel()
+ self.__writable.set()
+
+ def _wait_read(self):
+ assert self.__readable.ready(), "Only one greenlet can be waiting on this event"
+ self.__readable = AsyncResult()
+ # timeout is because libzmq cannot always be trusted to play nice with libevent.
+ # I can only confirm that this actually happens for send, but lets be symmetrical
+ # with our dirty hacks.
+ # this is effectively a maximum poll interval of 1s
+ tic = time.time()
+ dt = self._gevent_bug_timeout
+ if dt:
+ timeout = gevent.Timeout(seconds=dt)
+ else:
+ timeout = None
+ try:
+ if timeout:
+ timeout.start()
+ self.__readable.get(block=True)
+ except gevent.Timeout as t:
+ if t is not timeout:
+ raise
+ toc = time.time()
+ # gevent bug: get can raise timeout even on clean return
+ # don't display zmq bug warning for gevent bug (this is getting ridiculous)
+ if self._debug_gevent and timeout and toc-tic > dt and \
+ self.getsockopt(zmq.EVENTS) & zmq.POLLIN:
+ print("BUG: gevent may have missed a libzmq recv event on %i!" % self.FD, file=sys.stderr)
+ finally:
+ if timeout:
+ timeout.cancel()
+ self.__readable.set()
+
+ def send(self, data, flags=0, copy=True, track=False):
+ """send, which will only block current greenlet
+
+ state_changed always fires exactly once (success or fail) at the
+ end of this method.
+ """
+
+ # if we're given the NOBLOCK flag act as normal and let the EAGAIN get raised
+ if flags & zmq.NOBLOCK:
+ try:
+ msg = super(_Socket, self).send(data, flags, copy, track)
+ finally:
+ if not self.__in_send_multipart:
+ self.__state_changed()
+ return msg
+ # ensure the zmq.NOBLOCK flag is part of flags
+ flags |= zmq.NOBLOCK
+ while True: # Attempt to complete this operation indefinitely, blocking the current greenlet
+ try:
+ # attempt the actual call
+ msg = super(_Socket, self).send(data, flags, copy, track)
+ except zmq.ZMQError as e:
+ # if the raised ZMQError is not EAGAIN, reraise
+ if e.errno != zmq.EAGAIN:
+ if not self.__in_send_multipart:
+ self.__state_changed()
+ raise
+ else:
+ if not self.__in_send_multipart:
+ self.__state_changed()
+ return msg
+ # defer to the event loop until we're notified the socket is writable
+ self._wait_write()
+
+ def recv(self, flags=0, copy=True, track=False):
+ """recv, which will only block current greenlet
+
+ state_changed always fires exactly once (success or fail) at the
+ end of this method.
+ """
+ if flags & zmq.NOBLOCK:
+ try:
+ msg = super(_Socket, self).recv(flags, copy, track)
+ finally:
+ if not self.__in_recv_multipart:
+ self.__state_changed()
+ return msg
+
+ flags |= zmq.NOBLOCK
+ while True:
+ try:
+ msg = super(_Socket, self).recv(flags, copy, track)
+ except zmq.ZMQError as e:
+ if e.errno != zmq.EAGAIN:
+ if not self.__in_recv_multipart:
+ self.__state_changed()
+ raise
+ else:
+ if not self.__in_recv_multipart:
+ self.__state_changed()
+ return msg
+ self._wait_read()
+
+ def send_multipart(self, *args, **kwargs):
+ """wrap send_multipart to prevent state_changed on each partial send"""
+ self.__in_send_multipart = True
+ try:
+ msg = super(_Socket, self).send_multipart(*args, **kwargs)
+ finally:
+ self.__in_send_multipart = False
+ self.__state_changed()
+ return msg
+
+ def recv_multipart(self, *args, **kwargs):
+ """wrap recv_multipart to prevent state_changed on each partial recv"""
+ self.__in_recv_multipart = True
+ try:
+ msg = super(_Socket, self).recv_multipart(*args, **kwargs)
+ finally:
+ self.__in_recv_multipart = False
+ self.__state_changed()
+ return msg
+
+ def get(self, opt):
+ """trigger state_changed on getsockopt(EVENTS)"""
+ if opt in TIMEOS:
+ warnings.warn("TIMEO socket options have no effect in zmq.green", UserWarning)
+ optval = super(_Socket, self).get(opt)
+ if opt == zmq.EVENTS:
+ self.__state_changed()
+ return optval
+
+ def set(self, opt, val):
+ """set socket option"""
+ if opt in TIMEOS:
+ warnings.warn("TIMEO socket options have no effect in zmq.green", UserWarning)
+ return super(_Socket, self).set(opt, val)
+
+
+class _Context(_original_Context):
+ """Replacement for :class:`zmq.Context`
+
+ Ensures that the greened Socket above is used in calls to `socket`.
+ """
+ _socket_class = _Socket
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/green/device.py b/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/green/device.py
new file mode 100644
index 00000000..4b070237
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/green/device.py
@@ -0,0 +1,32 @@
+# Copyright (C) PyZMQ Developers
+# Distributed under the terms of the Modified BSD License.
+
+import zmq
+from zmq.green import Poller
+
+def device(device_type, isocket, osocket):
+ """Start a zeromq device (gevent-compatible).
+
+ Unlike the true zmq.device, this does not release the GIL.
+
+ Parameters
+ ----------
+ device_type : (QUEUE, FORWARDER, STREAMER)
+ The type of device to start (ignored).
+ isocket : Socket
+ The Socket instance for the incoming traffic.
+ osocket : Socket
+ The Socket instance for the outbound traffic.
+ """
+ p = Poller()
+ if osocket == -1:
+ osocket = isocket
+ p.register(isocket, zmq.POLLIN)
+ p.register(osocket, zmq.POLLIN)
+
+ while True:
+ events = dict(p.poll())
+ if isocket in events:
+ osocket.send_multipart(isocket.recv_multipart())
+ if osocket in events:
+ isocket.send_multipart(osocket.recv_multipart())
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/green/eventloop/__init__.py b/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/green/eventloop/__init__.py
new file mode 100644
index 00000000..c5150efe
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/green/eventloop/__init__.py
@@ -0,0 +1,3 @@
+from zmq.green.eventloop.ioloop import IOLoop
+
+__all__ = ['IOLoop'] \ No newline at end of file
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/green/eventloop/ioloop.py b/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/green/eventloop/ioloop.py
new file mode 100644
index 00000000..e12fd5e9
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/green/eventloop/ioloop.py
@@ -0,0 +1,33 @@
+from zmq.eventloop.ioloop import *
+from zmq.green import Poller
+
+RealIOLoop = IOLoop
+RealZMQPoller = ZMQPoller
+
+class IOLoop(RealIOLoop):
+
+ def initialize(self, impl=None):
+ impl = _poll() if impl is None else impl
+ super(IOLoop, self).initialize(impl)
+
+ @staticmethod
+ def instance():
+ """Returns a global `IOLoop` instance.
+
+ Most applications have a single, global `IOLoop` running on the
+ main thread. Use this method to get this instance from
+ another thread. To get the current thread's `IOLoop`, use `current()`.
+ """
+ # install this class as the active IOLoop implementation
+ # when using tornado 3
+ if tornado_version >= (3,):
+ PollIOLoop.configure(IOLoop)
+ return PollIOLoop.instance()
+
+
+class ZMQPoller(RealZMQPoller):
+ """gevent-compatible version of ioloop.ZMQPoller"""
+ def __init__(self):
+ self._poller = Poller()
+
+_poll = ZMQPoller
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/green/eventloop/zmqstream.py b/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/green/eventloop/zmqstream.py
new file mode 100644
index 00000000..90fbd1f5
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/green/eventloop/zmqstream.py
@@ -0,0 +1,11 @@
+from zmq.eventloop.zmqstream import *
+
+from zmq.green.eventloop.ioloop import IOLoop
+
+RealZMQStream = ZMQStream
+
+class ZMQStream(RealZMQStream):
+
+ def __init__(self, socket, io_loop=None):
+ io_loop = io_loop or IOLoop.instance()
+ super(ZMQStream, self).__init__(socket, io_loop=io_loop)
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/green/poll.py b/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/green/poll.py
new file mode 100644
index 00000000..8f016129
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/green/poll.py
@@ -0,0 +1,95 @@
+import zmq
+import gevent
+from gevent import select
+
+from zmq import Poller as _original_Poller
+
+
+class _Poller(_original_Poller):
+ """Replacement for :class:`zmq.Poller`
+
+ Ensures that the greened Poller below is used in calls to
+ :meth:`zmq.Poller.poll`.
+ """
+ _gevent_bug_timeout = 1.33 # minimum poll interval, for working around gevent bug
+
+ def _get_descriptors(self):
+ """Returns three elements tuple with socket descriptors ready
+ for gevent.select.select
+ """
+ rlist = []
+ wlist = []
+ xlist = []
+
+ for socket, flags in self.sockets:
+ if isinstance(socket, zmq.Socket):
+ rlist.append(socket.getsockopt(zmq.FD))
+ continue
+ elif isinstance(socket, int):
+ fd = socket
+ elif hasattr(socket, 'fileno'):
+ try:
+ fd = int(socket.fileno())
+ except:
+ raise ValueError('fileno() must return an valid integer fd')
+ else:
+ raise TypeError('Socket must be a 0MQ socket, an integer fd '
+ 'or have a fileno() method: %r' % socket)
+
+ if flags & zmq.POLLIN:
+ rlist.append(fd)
+ if flags & zmq.POLLOUT:
+ wlist.append(fd)
+ if flags & zmq.POLLERR:
+ xlist.append(fd)
+
+ return (rlist, wlist, xlist)
+
+ def poll(self, timeout=-1):
+ """Overridden method to ensure that the green version of
+ Poller is used.
+
+ Behaves the same as :meth:`zmq.core.Poller.poll`
+ """
+
+ if timeout is None:
+ timeout = -1
+
+ if timeout < 0:
+ timeout = -1
+
+ rlist = None
+ wlist = None
+ xlist = None
+
+ if timeout > 0:
+ tout = gevent.Timeout.start_new(timeout/1000.0)
+
+ try:
+ # Loop until timeout or events available
+ rlist, wlist, xlist = self._get_descriptors()
+ while True:
+ events = super(_Poller, self).poll(0)
+ if events or timeout == 0:
+ return events
+
+ # wait for activity on sockets in a green way
+ # set a minimum poll frequency,
+ # because gevent < 1.0 cannot be trusted to catch edge-triggered FD events
+ _bug_timeout = gevent.Timeout.start_new(self._gevent_bug_timeout)
+ try:
+ select.select(rlist, wlist, xlist)
+ except gevent.Timeout as t:
+ if t is not _bug_timeout:
+ raise
+ finally:
+ _bug_timeout.cancel()
+
+ except gevent.Timeout as t:
+ if t is not tout:
+ raise
+ return []
+ finally:
+ if timeout > 0:
+ tout.cancel()
+
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/libzmq.so.3 b/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/libzmq.so.3
new file mode 100644
index 00000000..16980c27
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/libzmq.so.3
Binary files differ
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/log/__init__.py b/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/log/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/log/__init__.py
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/log/handlers.py b/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/log/handlers.py
new file mode 100644
index 00000000..5ff21bf3
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/log/handlers.py
@@ -0,0 +1,146 @@
+"""pyzmq logging handlers.
+
+This mainly defines the PUBHandler object for publishing logging messages over
+a zmq.PUB socket.
+
+The PUBHandler can be used with the regular logging module, as in::
+
+ >>> import logging
+ >>> handler = PUBHandler('tcp://127.0.0.1:12345')
+ >>> handler.root_topic = 'foo'
+ >>> logger = logging.getLogger('foobar')
+ >>> logger.setLevel(logging.DEBUG)
+ >>> logger.addHandler(handler)
+
+After this point, all messages logged by ``logger`` will be published on the
+PUB socket.
+
+Code adapted from StarCluster:
+
+ http://github.com/jtriley/StarCluster/blob/master/starcluster/logger.py
+"""
+
+# Copyright (C) PyZMQ Developers
+# Distributed under the terms of the Modified BSD License.
+
+
+import logging
+from logging import INFO, DEBUG, WARN, ERROR, FATAL
+
+import zmq
+from zmq.utils.strtypes import bytes, unicode, cast_bytes
+
+
+TOPIC_DELIM="::" # delimiter for splitting topics on the receiving end.
+
+
+class PUBHandler(logging.Handler):
+ """A basic logging handler that emits log messages through a PUB socket.
+
+ Takes a PUB socket already bound to interfaces or an interface to bind to.
+
+ Example::
+
+ sock = context.socket(zmq.PUB)
+ sock.bind('inproc://log')
+ handler = PUBHandler(sock)
+
+ Or::
+
+ handler = PUBHandler('inproc://loc')
+
+ These are equivalent.
+
+ Log messages handled by this handler are broadcast with ZMQ topics
+ ``this.root_topic`` comes first, followed by the log level
+ (DEBUG,INFO,etc.), followed by any additional subtopics specified in the
+ message by: log.debug("subtopic.subsub::the real message")
+ """
+ root_topic=""
+ socket = None
+
+ formatters = {
+ logging.DEBUG: logging.Formatter(
+ "%(levelname)s %(filename)s:%(lineno)d - %(message)s\n"),
+ logging.INFO: logging.Formatter("%(message)s\n"),
+ logging.WARN: logging.Formatter(
+ "%(levelname)s %(filename)s:%(lineno)d - %(message)s\n"),
+ logging.ERROR: logging.Formatter(
+ "%(levelname)s %(filename)s:%(lineno)d - %(message)s - %(exc_info)s\n"),
+ logging.CRITICAL: logging.Formatter(
+ "%(levelname)s %(filename)s:%(lineno)d - %(message)s\n")}
+
+ def __init__(self, interface_or_socket, context=None):
+ logging.Handler.__init__(self)
+ if isinstance(interface_or_socket, zmq.Socket):
+ self.socket = interface_or_socket
+ self.ctx = self.socket.context
+ else:
+ self.ctx = context or zmq.Context()
+ self.socket = self.ctx.socket(zmq.PUB)
+ self.socket.bind(interface_or_socket)
+
+ def format(self,record):
+ """Format a record."""
+ return self.formatters[record.levelno].format(record)
+
+ def emit(self, record):
+ """Emit a log message on my socket."""
+ try:
+ topic, record.msg = record.msg.split(TOPIC_DELIM,1)
+ except Exception:
+ topic = ""
+ try:
+ bmsg = cast_bytes(self.format(record))
+ except Exception:
+ self.handleError(record)
+ return
+
+ topic_list = []
+
+ if self.root_topic:
+ topic_list.append(self.root_topic)
+
+ topic_list.append(record.levelname)
+
+ if topic:
+ topic_list.append(topic)
+
+ btopic = b'.'.join(cast_bytes(t) for t in topic_list)
+
+ self.socket.send_multipart([btopic, bmsg])
+
+
+class TopicLogger(logging.Logger):
+ """A simple wrapper that takes an additional argument to log methods.
+
+ All the regular methods exist, but instead of one msg argument, two
+ arguments: topic, msg are passed.
+
+ That is::
+
+ logger.debug('msg')
+
+ Would become::
+
+ logger.debug('topic.sub', 'msg')
+ """
+ def log(self, level, topic, msg, *args, **kwargs):
+ """Log 'msg % args' with level and topic.
+
+ To pass exception information, use the keyword argument exc_info
+ with a True value::
+
+ logger.log(level, "zmq.fun", "We have a %s",
+ "mysterious problem", exc_info=1)
+ """
+ logging.Logger.log(self, level, '%s::%s'%(topic,msg), *args, **kwargs)
+
+# Generate the methods of TopicLogger, since they are just adding a
+# topic prefix to a message.
+for name in "debug warn warning error critical fatal".split():
+ meth = getattr(logging.Logger,name)
+ setattr(TopicLogger, name,
+ lambda self, level, topic, msg, *args, **kwargs:
+ meth(self, level, topic+TOPIC_DELIM+msg,*args, **kwargs))
+
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/ssh/__init__.py b/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/ssh/__init__.py
new file mode 100644
index 00000000..57f09568
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/ssh/__init__.py
@@ -0,0 +1 @@
+from zmq.ssh.tunnel import *
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/ssh/forward.py b/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/ssh/forward.py
new file mode 100644
index 00000000..2d619462
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/ssh/forward.py
@@ -0,0 +1,91 @@
+#
+# This file is adapted from a paramiko demo, and thus licensed under LGPL 2.1.
+# Original Copyright (C) 2003-2007 Robey Pointer <robeypointer@gmail.com>
+# Edits Copyright (C) 2010 The IPython Team
+#
+# Paramiko is free software; you can redistribute it and/or modify it under the
+# terms of the GNU Lesser General Public License as published by the Free
+# Software Foundation; either version 2.1 of the License, or (at your option)
+# any later version.
+#
+# Paramiko is distrubuted in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
+# A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
+# details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with Paramiko; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02111-1301 USA.
+
+"""
+Sample script showing how to do local port forwarding over paramiko.
+
+This script connects to the requested SSH server and sets up local port
+forwarding (the openssh -L option) from a local port through a tunneled
+connection to a destination reachable from the SSH server machine.
+"""
+
+from __future__ import print_function
+
+import logging
+import select
+try: # Python 3
+ import socketserver
+except ImportError: # Python 2
+ import SocketServer as socketserver
+
+logger = logging.getLogger('ssh')
+
+class ForwardServer (socketserver.ThreadingTCPServer):
+ daemon_threads = True
+ allow_reuse_address = True
+
+
+class Handler (socketserver.BaseRequestHandler):
+
+ def handle(self):
+ try:
+ chan = self.ssh_transport.open_channel('direct-tcpip',
+ (self.chain_host, self.chain_port),
+ self.request.getpeername())
+ except Exception as e:
+ logger.debug('Incoming request to %s:%d failed: %s' % (self.chain_host,
+ self.chain_port,
+ repr(e)))
+ return
+ if chan is None:
+ logger.debug('Incoming request to %s:%d was rejected by the SSH server.' %
+ (self.chain_host, self.chain_port))
+ return
+
+ logger.debug('Connected! Tunnel open %r -> %r -> %r' % (self.request.getpeername(),
+ chan.getpeername(), (self.chain_host, self.chain_port)))
+ while True:
+ r, w, x = select.select([self.request, chan], [], [])
+ if self.request in r:
+ data = self.request.recv(1024)
+ if len(data) == 0:
+ break
+ chan.send(data)
+ if chan in r:
+ data = chan.recv(1024)
+ if len(data) == 0:
+ break
+ self.request.send(data)
+ chan.close()
+ self.request.close()
+ logger.debug('Tunnel closed ')
+
+
+def forward_tunnel(local_port, remote_host, remote_port, transport):
+ # this is a little convoluted, but lets me configure things for the Handler
+ # object. (SocketServer doesn't give Handlers any way to access the outer
+ # server normally.)
+ class SubHander (Handler):
+ chain_host = remote_host
+ chain_port = remote_port
+ ssh_transport = transport
+ ForwardServer(('127.0.0.1', local_port), SubHander).serve_forever()
+
+
+__all__ = ['forward_tunnel']
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/ssh/tunnel.py b/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/ssh/tunnel.py
new file mode 100644
index 00000000..5a0c5433
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/ssh/tunnel.py
@@ -0,0 +1,376 @@
+"""Basic ssh tunnel utilities, and convenience functions for tunneling
+zeromq connections.
+"""
+
+# Copyright (C) 2010-2011 IPython Development Team
+# Copyright (C) 2011- PyZMQ Developers
+#
+# Redistributed from IPython under the terms of the BSD License.
+
+
+from __future__ import print_function
+
+import atexit
+import os
+import signal
+import socket
+import sys
+import warnings
+from getpass import getpass, getuser
+from multiprocessing import Process
+
+try:
+ with warnings.catch_warnings():
+ warnings.simplefilter('ignore', DeprecationWarning)
+ import paramiko
+ SSHException = paramiko.ssh_exception.SSHException
+except ImportError:
+ paramiko = None
+ class SSHException(Exception):
+ pass
+else:
+ from .forward import forward_tunnel
+
+try:
+ import pexpect
+except ImportError:
+ pexpect = None
+
+
+_random_ports = set()
+
+def select_random_ports(n):
+ """Selects and return n random ports that are available."""
+ ports = []
+ for i in range(n):
+ sock = socket.socket()
+ sock.bind(('', 0))
+ while sock.getsockname()[1] in _random_ports:
+ sock.close()
+ sock = socket.socket()
+ sock.bind(('', 0))
+ ports.append(sock)
+ for i, sock in enumerate(ports):
+ port = sock.getsockname()[1]
+ sock.close()
+ ports[i] = port
+ _random_ports.add(port)
+ return ports
+
+
+#-----------------------------------------------------------------------------
+# Check for passwordless login
+#-----------------------------------------------------------------------------
+
+def try_passwordless_ssh(server, keyfile, paramiko=None):
+ """Attempt to make an ssh connection without a password.
+ This is mainly used for requiring password input only once
+ when many tunnels may be connected to the same server.
+
+ If paramiko is None, the default for the platform is chosen.
+ """
+ if paramiko is None:
+ paramiko = sys.platform == 'win32'
+ if not paramiko:
+ f = _try_passwordless_openssh
+ else:
+ f = _try_passwordless_paramiko
+ return f(server, keyfile)
+
+def _try_passwordless_openssh(server, keyfile):
+ """Try passwordless login with shell ssh command."""
+ if pexpect is None:
+ raise ImportError("pexpect unavailable, use paramiko")
+ cmd = 'ssh -f '+ server
+ if keyfile:
+ cmd += ' -i ' + keyfile
+ cmd += ' exit'
+
+ # pop SSH_ASKPASS from env
+ env = os.environ.copy()
+ env.pop('SSH_ASKPASS', None)
+
+ ssh_newkey = 'Are you sure you want to continue connecting'
+ p = pexpect.spawn(cmd, env=env)
+ while True:
+ try:
+ i = p.expect([ssh_newkey, '[Pp]assword:'], timeout=.1)
+ if i==0:
+ raise SSHException('The authenticity of the host can\'t be established.')
+ except pexpect.TIMEOUT:
+ continue
+ except pexpect.EOF:
+ return True
+ else:
+ return False
+
+def _try_passwordless_paramiko(server, keyfile):
+ """Try passwordless login with paramiko."""
+ if paramiko is None:
+ msg = "Paramiko unavaliable, "
+ if sys.platform == 'win32':
+ msg += "Paramiko is required for ssh tunneled connections on Windows."
+ else:
+ msg += "use OpenSSH."
+ raise ImportError(msg)
+ username, server, port = _split_server(server)
+ client = paramiko.SSHClient()
+ client.load_system_host_keys()
+ client.set_missing_host_key_policy(paramiko.WarningPolicy())
+ try:
+ client.connect(server, port, username=username, key_filename=keyfile,
+ look_for_keys=True)
+ except paramiko.AuthenticationException:
+ return False
+ else:
+ client.close()
+ return True
+
+
+def tunnel_connection(socket, addr, server, keyfile=None, password=None, paramiko=None, timeout=60):
+ """Connect a socket to an address via an ssh tunnel.
+
+ This is a wrapper for socket.connect(addr), when addr is not accessible
+ from the local machine. It simply creates an ssh tunnel using the remaining args,
+ and calls socket.connect('tcp://localhost:lport') where lport is the randomly
+ selected local port of the tunnel.
+
+ """
+ new_url, tunnel = open_tunnel(addr, server, keyfile=keyfile, password=password, paramiko=paramiko, timeout=timeout)
+ socket.connect(new_url)
+ return tunnel
+
+
+def open_tunnel(addr, server, keyfile=None, password=None, paramiko=None, timeout=60):
+ """Open a tunneled connection from a 0MQ url.
+
+ For use inside tunnel_connection.
+
+ Returns
+ -------
+
+ (url, tunnel) : (str, object)
+ The 0MQ url that has been forwarded, and the tunnel object
+ """
+
+ lport = select_random_ports(1)[0]
+ transport, addr = addr.split('://')
+ ip,rport = addr.split(':')
+ rport = int(rport)
+ if paramiko is None:
+ paramiko = sys.platform == 'win32'
+ if paramiko:
+ tunnelf = paramiko_tunnel
+ else:
+ tunnelf = openssh_tunnel
+
+ tunnel = tunnelf(lport, rport, server, remoteip=ip, keyfile=keyfile, password=password, timeout=timeout)
+ return 'tcp://127.0.0.1:%i'%lport, tunnel
+
+def openssh_tunnel(lport, rport, server, remoteip='127.0.0.1', keyfile=None, password=None, timeout=60):
+ """Create an ssh tunnel using command-line ssh that connects port lport
+ on this machine to localhost:rport on server. The tunnel
+ will automatically close when not in use, remaining open
+ for a minimum of timeout seconds for an initial connection.
+
+ This creates a tunnel redirecting `localhost:lport` to `remoteip:rport`,
+ as seen from `server`.
+
+ keyfile and password may be specified, but ssh config is checked for defaults.
+
+ Parameters
+ ----------
+
+ lport : int
+ local port for connecting to the tunnel from this machine.
+ rport : int
+ port on the remote machine to connect to.
+ server : str
+ The ssh server to connect to. The full ssh server string will be parsed.
+ user@server:port
+ remoteip : str [Default: 127.0.0.1]
+ The remote ip, specifying the destination of the tunnel.
+ Default is localhost, which means that the tunnel would redirect
+ localhost:lport on this machine to localhost:rport on the *server*.
+
+ keyfile : str; path to public key file
+ This specifies a key to be used in ssh login, default None.
+ Regular default ssh keys will be used without specifying this argument.
+ password : str;
+ Your ssh password to the ssh server. Note that if this is left None,
+ you will be prompted for it if passwordless key based login is unavailable.
+ timeout : int [default: 60]
+ The time (in seconds) after which no activity will result in the tunnel
+ closing. This prevents orphaned tunnels from running forever.
+ """
+ if pexpect is None:
+ raise ImportError("pexpect unavailable, use paramiko_tunnel")
+ ssh="ssh "
+ if keyfile:
+ ssh += "-i " + keyfile
+
+ if ':' in server:
+ server, port = server.split(':')
+ ssh += " -p %s" % port
+
+ cmd = "%s -O check %s" % (ssh, server)
+ (output, exitstatus) = pexpect.run(cmd, withexitstatus=True)
+ if not exitstatus:
+ pid = int(output[output.find("(pid=")+5:output.find(")")])
+ cmd = "%s -O forward -L 127.0.0.1:%i:%s:%i %s" % (
+ ssh, lport, remoteip, rport, server)
+ (output, exitstatus) = pexpect.run(cmd, withexitstatus=True)
+ if not exitstatus:
+ atexit.register(_stop_tunnel, cmd.replace("-O forward", "-O cancel", 1))
+ return pid
+ cmd = "%s -f -S none -L 127.0.0.1:%i:%s:%i %s sleep %i" % (
+ ssh, lport, remoteip, rport, server, timeout)
+
+ # pop SSH_ASKPASS from env
+ env = os.environ.copy()
+ env.pop('SSH_ASKPASS', None)
+
+ ssh_newkey = 'Are you sure you want to continue connecting'
+ tunnel = pexpect.spawn(cmd, env=env)
+ failed = False
+ while True:
+ try:
+ i = tunnel.expect([ssh_newkey, '[Pp]assword:'], timeout=.1)
+ if i==0:
+ raise SSHException('The authenticity of the host can\'t be established.')
+ except pexpect.TIMEOUT:
+ continue
+ except pexpect.EOF:
+ if tunnel.exitstatus:
+ print(tunnel.exitstatus)
+ print(tunnel.before)
+ print(tunnel.after)
+ raise RuntimeError("tunnel '%s' failed to start"%(cmd))
+ else:
+ return tunnel.pid
+ else:
+ if failed:
+ print("Password rejected, try again")
+ password=None
+ if password is None:
+ password = getpass("%s's password: "%(server))
+ tunnel.sendline(password)
+ failed = True
+
+def _stop_tunnel(cmd):
+ pexpect.run(cmd)
+
+def _split_server(server):
+ if '@' in server:
+ username,server = server.split('@', 1)
+ else:
+ username = getuser()
+ if ':' in server:
+ server, port = server.split(':')
+ port = int(port)
+ else:
+ port = 22
+ return username, server, port
+
+def paramiko_tunnel(lport, rport, server, remoteip='127.0.0.1', keyfile=None, password=None, timeout=60):
+ """launch a tunner with paramiko in a subprocess. This should only be used
+ when shell ssh is unavailable (e.g. Windows).
+
+ This creates a tunnel redirecting `localhost:lport` to `remoteip:rport`,
+ as seen from `server`.
+
+ If you are familiar with ssh tunnels, this creates the tunnel:
+
+ ssh server -L localhost:lport:remoteip:rport
+
+ keyfile and password may be specified, but ssh config is checked for defaults.
+
+
+ Parameters
+ ----------
+
+ lport : int
+ local port for connecting to the tunnel from this machine.
+ rport : int
+ port on the remote machine to connect to.
+ server : str
+ The ssh server to connect to. The full ssh server string will be parsed.
+ user@server:port
+ remoteip : str [Default: 127.0.0.1]
+ The remote ip, specifying the destination of the tunnel.
+ Default is localhost, which means that the tunnel would redirect
+ localhost:lport on this machine to localhost:rport on the *server*.
+
+ keyfile : str; path to public key file
+ This specifies a key to be used in ssh login, default None.
+ Regular default ssh keys will be used without specifying this argument.
+ password : str;
+ Your ssh password to the ssh server. Note that if this is left None,
+ you will be prompted for it if passwordless key based login is unavailable.
+ timeout : int [default: 60]
+ The time (in seconds) after which no activity will result in the tunnel
+ closing. This prevents orphaned tunnels from running forever.
+
+ """
+ if paramiko is None:
+ raise ImportError("Paramiko not available")
+
+ if password is None:
+ if not _try_passwordless_paramiko(server, keyfile):
+ password = getpass("%s's password: "%(server))
+
+ p = Process(target=_paramiko_tunnel,
+ args=(lport, rport, server, remoteip),
+ kwargs=dict(keyfile=keyfile, password=password))
+ p.daemon=False
+ p.start()
+ atexit.register(_shutdown_process, p)
+ return p
+
+def _shutdown_process(p):
+ if p.is_alive():
+ p.terminate()
+
+def _paramiko_tunnel(lport, rport, server, remoteip, keyfile=None, password=None):
+ """Function for actually starting a paramiko tunnel, to be passed
+ to multiprocessing.Process(target=this), and not called directly.
+ """
+ username, server, port = _split_server(server)
+ client = paramiko.SSHClient()
+ client.load_system_host_keys()
+ client.set_missing_host_key_policy(paramiko.WarningPolicy())
+
+ try:
+ client.connect(server, port, username=username, key_filename=keyfile,
+ look_for_keys=True, password=password)
+# except paramiko.AuthenticationException:
+# if password is None:
+# password = getpass("%s@%s's password: "%(username, server))
+# client.connect(server, port, username=username, password=password)
+# else:
+# raise
+ except Exception as e:
+ print('*** Failed to connect to %s:%d: %r' % (server, port, e))
+ sys.exit(1)
+
+ # Don't let SIGINT kill the tunnel subprocess
+ signal.signal(signal.SIGINT, signal.SIG_IGN)
+
+ try:
+ forward_tunnel(lport, remoteip, rport, client.get_transport())
+ except KeyboardInterrupt:
+ print('SIGINT: Port forwarding stopped cleanly')
+ sys.exit(0)
+ except Exception as e:
+ print("Port forwarding stopped uncleanly: %s"%e)
+ sys.exit(255)
+
+if sys.platform == 'win32':
+ ssh_tunnel = paramiko_tunnel
+else:
+ ssh_tunnel = openssh_tunnel
+
+
+__all__ = ['tunnel_connection', 'ssh_tunnel', 'openssh_tunnel', 'paramiko_tunnel', 'try_passwordless_ssh']
+
+
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/sugar/__init__.py b/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/sugar/__init__.py
new file mode 100644
index 00000000..d0510a44
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/sugar/__init__.py
@@ -0,0 +1,27 @@
+"""pure-Python sugar wrappers for core 0MQ objects."""
+
+# Copyright (C) PyZMQ Developers
+# Distributed under the terms of the Modified BSD License.
+
+
+from zmq.sugar import (
+ constants, context, frame, poll, socket, tracker, version
+)
+from zmq import error
+
+__all__ = ['constants']
+for submod in (
+ constants, context, error, frame, poll, socket, tracker, version
+):
+ __all__.extend(submod.__all__)
+
+from zmq.error import *
+from zmq.sugar.context import *
+from zmq.sugar.tracker import *
+from zmq.sugar.socket import *
+from zmq.sugar.constants import *
+from zmq.sugar.frame import *
+from zmq.sugar.poll import *
+# from zmq.sugar.stopwatch import *
+# from zmq.sugar._device import *
+from zmq.sugar.version import *
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/sugar/attrsettr.py b/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/sugar/attrsettr.py
new file mode 100644
index 00000000..4bbd36d6
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/sugar/attrsettr.py
@@ -0,0 +1,52 @@
+# coding: utf-8
+"""Mixin for mapping set/getattr to self.set/get"""
+
+# Copyright (C) PyZMQ Developers
+# Distributed under the terms of the Modified BSD License.
+
+
+from . import constants
+
+class AttributeSetter(object):
+
+ def __setattr__(self, key, value):
+ """set zmq options by attribute"""
+
+ # regular setattr only allowed for class-defined attributes
+ for obj in [self] + self.__class__.mro():
+ if key in obj.__dict__:
+ object.__setattr__(self, key, value)
+ return
+
+ upper_key = key.upper()
+ try:
+ opt = getattr(constants, upper_key)
+ except AttributeError:
+ raise AttributeError("%s has no such option: %s" % (
+ self.__class__.__name__, upper_key)
+ )
+ else:
+ self._set_attr_opt(upper_key, opt, value)
+
+ def _set_attr_opt(self, name, opt, value):
+ """override if setattr should do something other than call self.set"""
+ self.set(opt, value)
+
+ def __getattr__(self, key):
+ """get zmq options by attribute"""
+ upper_key = key.upper()
+ try:
+ opt = getattr(constants, upper_key)
+ except AttributeError:
+ raise AttributeError("%s has no such option: %s" % (
+ self.__class__.__name__, upper_key)
+ )
+ else:
+ return self._get_attr_opt(upper_key, opt)
+
+ def _get_attr_opt(self, name, opt):
+ """override if getattr should do something other than call self.get"""
+ return self.get(opt)
+
+
+__all__ = ['AttributeSetter']
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/sugar/constants.py b/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/sugar/constants.py
new file mode 100644
index 00000000..88281176
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/sugar/constants.py
@@ -0,0 +1,98 @@
+"""0MQ Constants."""
+
+# Copyright (c) PyZMQ Developers.
+# Distributed under the terms of the Modified BSD License.
+
+from zmq.backend import constants
+from zmq.utils.constant_names import (
+ base_names,
+ switched_sockopt_names,
+ int_sockopt_names,
+ int64_sockopt_names,
+ bytes_sockopt_names,
+ fd_sockopt_names,
+ ctx_opt_names,
+ msg_opt_names,
+)
+
+#-----------------------------------------------------------------------------
+# Python module level constants
+#-----------------------------------------------------------------------------
+
+__all__ = [
+ 'int_sockopts',
+ 'int64_sockopts',
+ 'bytes_sockopts',
+ 'ctx_opts',
+ 'ctx_opt_names',
+ ]
+
+int_sockopts = set()
+int64_sockopts = set()
+bytes_sockopts = set()
+fd_sockopts = set()
+ctx_opts = set()
+msg_opts = set()
+
+
+if constants.VERSION < 30000:
+ int64_sockopt_names.extend(switched_sockopt_names)
+else:
+ int_sockopt_names.extend(switched_sockopt_names)
+
+_UNDEFINED = -9999
+
+def _add_constant(name, container=None):
+ """add a constant to be defined
+
+ optionally add it to one of the sets for use in get/setopt checkers
+ """
+ c = getattr(constants, name, _UNDEFINED)
+ if c == _UNDEFINED:
+ return
+ globals()[name] = c
+ __all__.append(name)
+ if container is not None:
+ container.add(c)
+ return c
+
+for name in base_names:
+ _add_constant(name)
+
+for name in int_sockopt_names:
+ _add_constant(name, int_sockopts)
+
+for name in int64_sockopt_names:
+ _add_constant(name, int64_sockopts)
+
+for name in bytes_sockopt_names:
+ _add_constant(name, bytes_sockopts)
+
+for name in fd_sockopt_names:
+ _add_constant(name, fd_sockopts)
+
+for name in ctx_opt_names:
+ _add_constant(name, ctx_opts)
+
+for name in msg_opt_names:
+ _add_constant(name, msg_opts)
+
+# ensure some aliases are always defined
+aliases = [
+ ('DONTWAIT', 'NOBLOCK'),
+ ('XREQ', 'DEALER'),
+ ('XREP', 'ROUTER'),
+]
+for group in aliases:
+ undefined = set()
+ found = None
+ for name in group:
+ value = getattr(constants, name, -1)
+ if value != -1:
+ found = value
+ else:
+ undefined.add(name)
+ if found is not None:
+ for name in undefined:
+ globals()[name] = found
+ __all__.append(name)
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/sugar/context.py b/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/sugar/context.py
new file mode 100644
index 00000000..86a9c5dc
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/sugar/context.py
@@ -0,0 +1,192 @@
+# coding: utf-8
+"""Python bindings for 0MQ."""
+
+# Copyright (C) PyZMQ Developers
+# Distributed under the terms of the Modified BSD License.
+
+import atexit
+import weakref
+
+from zmq.backend import Context as ContextBase
+from . import constants
+from .attrsettr import AttributeSetter
+from .constants import ENOTSUP, ctx_opt_names
+from .socket import Socket
+from zmq.error import ZMQError
+
+from zmq.utils.interop import cast_int_addr
+
+
+class Context(ContextBase, AttributeSetter):
+ """Create a zmq Context
+
+ A zmq Context creates sockets via its ``ctx.socket`` method.
+ """
+ sockopts = None
+ _instance = None
+ _shadow = False
+ _exiting = False
+
+ def __init__(self, io_threads=1, **kwargs):
+ super(Context, self).__init__(io_threads=io_threads, **kwargs)
+ if kwargs.get('shadow', False):
+ self._shadow = True
+ else:
+ self._shadow = False
+ self.sockopts = {}
+
+ self._exiting = False
+ if not self._shadow:
+ ctx_ref = weakref.ref(self)
+ def _notify_atexit():
+ ctx = ctx_ref()
+ if ctx is not None:
+ ctx._exiting = True
+ atexit.register(_notify_atexit)
+
+ def __del__(self):
+ """deleting a Context should terminate it, without trying non-threadsafe destroy"""
+ if not self._shadow and not self._exiting:
+ self.term()
+
+ def __enter__(self):
+ return self
+
+ def __exit__(self, *args, **kwargs):
+ self.term()
+
+ @classmethod
+ def shadow(cls, address):
+ """Shadow an existing libzmq context
+
+ address is the integer address of the libzmq context
+ or an FFI pointer to it.
+
+ .. versionadded:: 14.1
+ """
+ address = cast_int_addr(address)
+ return cls(shadow=address)
+
+ @classmethod
+ def shadow_pyczmq(cls, ctx):
+ """Shadow an existing pyczmq context
+
+ ctx is the FFI `zctx_t *` pointer
+
+ .. versionadded:: 14.1
+ """
+ from pyczmq import zctx
+
+ underlying = zctx.underlying(ctx)
+ address = cast_int_addr(underlying)
+ return cls(shadow=address)
+
+ # static method copied from tornado IOLoop.instance
+ @classmethod
+ def instance(cls, io_threads=1):
+ """Returns a global Context instance.
+
+ Most single-threaded applications have a single, global Context.
+ Use this method instead of passing around Context instances
+ throughout your code.
+
+ A common pattern for classes that depend on Contexts is to use
+ a default argument to enable programs with multiple Contexts
+ but not require the argument for simpler applications:
+
+ class MyClass(object):
+ def __init__(self, context=None):
+ self.context = context or Context.instance()
+ """
+ if cls._instance is None or cls._instance.closed:
+ cls._instance = cls(io_threads=io_threads)
+ return cls._instance
+
+ #-------------------------------------------------------------------------
+ # Hooks for ctxopt completion
+ #-------------------------------------------------------------------------
+
+ def __dir__(self):
+ keys = dir(self.__class__)
+
+ for collection in (
+ ctx_opt_names,
+ ):
+ keys.extend(collection)
+ return keys
+
+ #-------------------------------------------------------------------------
+ # Creating Sockets
+ #-------------------------------------------------------------------------
+
+ @property
+ def _socket_class(self):
+ return Socket
+
+ def socket(self, socket_type):
+ """Create a Socket associated with this Context.
+
+ Parameters
+ ----------
+ socket_type : int
+ The socket type, which can be any of the 0MQ socket types:
+ REQ, REP, PUB, SUB, PAIR, DEALER, ROUTER, PULL, PUSH, etc.
+ """
+ if self.closed:
+ raise ZMQError(ENOTSUP)
+ s = self._socket_class(self, socket_type)
+ for opt, value in self.sockopts.items():
+ try:
+ s.setsockopt(opt, value)
+ except ZMQError:
+ # ignore ZMQErrors, which are likely for socket options
+ # that do not apply to a particular socket type, e.g.
+ # SUBSCRIBE for non-SUB sockets.
+ pass
+ return s
+
+ def setsockopt(self, opt, value):
+ """set default socket options for new sockets created by this Context
+
+ .. versionadded:: 13.0
+ """
+ self.sockopts[opt] = value
+
+ def getsockopt(self, opt):
+ """get default socket options for new sockets created by this Context
+
+ .. versionadded:: 13.0
+ """
+ return self.sockopts[opt]
+
+ def _set_attr_opt(self, name, opt, value):
+ """set default sockopts as attributes"""
+ if name in constants.ctx_opt_names:
+ return self.set(opt, value)
+ else:
+ self.sockopts[opt] = value
+
+ def _get_attr_opt(self, name, opt):
+ """get default sockopts as attributes"""
+ if name in constants.ctx_opt_names:
+ return self.get(opt)
+ else:
+ if opt not in self.sockopts:
+ raise AttributeError(name)
+ else:
+ return self.sockopts[opt]
+
+ def __delattr__(self, key):
+ """delete default sockopts as attributes"""
+ key = key.upper()
+ try:
+ opt = getattr(constants, key)
+ except AttributeError:
+ raise AttributeError("no such socket option: %s" % key)
+ else:
+ if opt not in self.sockopts:
+ raise AttributeError(key)
+ else:
+ del self.sockopts[opt]
+
+__all__ = ['Context']
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/sugar/frame.py b/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/sugar/frame.py
new file mode 100644
index 00000000..9f556c86
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/sugar/frame.py
@@ -0,0 +1,19 @@
+# coding: utf-8
+"""0MQ Frame pure Python methods."""
+
+# Copyright (C) PyZMQ Developers
+# Distributed under the terms of the Modified BSD License.
+
+
+from .attrsettr import AttributeSetter
+from zmq.backend import Frame as FrameBase
+
+
+class Frame(FrameBase, AttributeSetter):
+ def __getitem__(self, key):
+ # map Frame['User-Id'] to Frame.get('User-Id')
+ return self.get(key)
+
+# keep deprecated alias
+Message = Frame
+__all__ = ['Frame', 'Message'] \ No newline at end of file
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/sugar/poll.py b/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/sugar/poll.py
new file mode 100644
index 00000000..c7b1d1bb
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/sugar/poll.py
@@ -0,0 +1,161 @@
+"""0MQ polling related functions and classes."""
+
+# Copyright (C) PyZMQ Developers
+# Distributed under the terms of the Modified BSD License.
+
+
+import zmq
+from zmq.backend import zmq_poll
+from .constants import POLLIN, POLLOUT, POLLERR
+
+#-----------------------------------------------------------------------------
+# Polling related methods
+#-----------------------------------------------------------------------------
+
+
+class Poller(object):
+ """A stateful poll interface that mirrors Python's built-in poll."""
+ sockets = None
+ _map = {}
+
+ def __init__(self):
+ self.sockets = []
+ self._map = {}
+
+ def __contains__(self, socket):
+ return socket in self._map
+
+ def register(self, socket, flags=POLLIN|POLLOUT):
+ """p.register(socket, flags=POLLIN|POLLOUT)
+
+ Register a 0MQ socket or native fd for I/O monitoring.
+
+ register(s,0) is equivalent to unregister(s).
+
+ Parameters
+ ----------
+ socket : zmq.Socket or native socket
+ A zmq.Socket or any Python object having a ``fileno()``
+ method that returns a valid file descriptor.
+ flags : int
+ The events to watch for. Can be POLLIN, POLLOUT or POLLIN|POLLOUT.
+ If `flags=0`, socket will be unregistered.
+ """
+ if flags:
+ if socket in self._map:
+ idx = self._map[socket]
+ self.sockets[idx] = (socket, flags)
+ else:
+ idx = len(self.sockets)
+ self.sockets.append((socket, flags))
+ self._map[socket] = idx
+ elif socket in self._map:
+ # uregister sockets registered with no events
+ self.unregister(socket)
+ else:
+ # ignore new sockets with no events
+ pass
+
+ def modify(self, socket, flags=POLLIN|POLLOUT):
+ """Modify the flags for an already registered 0MQ socket or native fd."""
+ self.register(socket, flags)
+
+ def unregister(self, socket):
+ """Remove a 0MQ socket or native fd for I/O monitoring.
+
+ Parameters
+ ----------
+ socket : Socket
+ The socket instance to stop polling.
+ """
+ idx = self._map.pop(socket)
+ self.sockets.pop(idx)
+ # shift indices after deletion
+ for socket, flags in self.sockets[idx:]:
+ self._map[socket] -= 1
+
+ def poll(self, timeout=None):
+ """Poll the registered 0MQ or native fds for I/O.
+
+ Parameters
+ ----------
+ timeout : float, int
+ The timeout in milliseconds. If None, no `timeout` (infinite). This
+ is in milliseconds to be compatible with ``select.poll()``. The
+ underlying zmq_poll uses microseconds and we convert to that in
+ this function.
+
+ Returns
+ -------
+ events : list of tuples
+ The list of events that are ready to be processed.
+ This is a list of tuples of the form ``(socket, event)``, where the 0MQ Socket
+ or integer fd is the first element, and the poll event mask (POLLIN, POLLOUT) is the second.
+ It is common to call ``events = dict(poller.poll())``,
+ which turns the list of tuples into a mapping of ``socket : event``.
+ """
+ if timeout is None or timeout < 0:
+ timeout = -1
+ elif isinstance(timeout, float):
+ timeout = int(timeout)
+ return zmq_poll(self.sockets, timeout=timeout)
+
+
+def select(rlist, wlist, xlist, timeout=None):
+ """select(rlist, wlist, xlist, timeout=None) -> (rlist, wlist, xlist)
+
+ Return the result of poll as a lists of sockets ready for r/w/exception.
+
+ This has the same interface as Python's built-in ``select.select()`` function.
+
+ Parameters
+ ----------
+ timeout : float, int, optional
+ The timeout in seconds. If None, no timeout (infinite). This is in seconds to be
+ compatible with ``select.select()``. The underlying zmq_poll uses microseconds
+ and we convert to that in this function.
+ rlist : list of sockets/FDs
+ sockets/FDs to be polled for read events
+ wlist : list of sockets/FDs
+ sockets/FDs to be polled for write events
+ xlist : list of sockets/FDs
+ sockets/FDs to be polled for error events
+
+ Returns
+ -------
+ (rlist, wlist, xlist) : tuple of lists of sockets (length 3)
+ Lists correspond to sockets available for read/write/error events respectively.
+ """
+ if timeout is None:
+ timeout = -1
+ # Convert from sec -> us for zmq_poll.
+ # zmq_poll accepts 3.x style timeout in ms
+ timeout = int(timeout*1000.0)
+ if timeout < 0:
+ timeout = -1
+ sockets = []
+ for s in set(rlist + wlist + xlist):
+ flags = 0
+ if s in rlist:
+ flags |= POLLIN
+ if s in wlist:
+ flags |= POLLOUT
+ if s in xlist:
+ flags |= POLLERR
+ sockets.append((s, flags))
+ return_sockets = zmq_poll(sockets, timeout)
+ rlist, wlist, xlist = [], [], []
+ for s, flags in return_sockets:
+ if flags & POLLIN:
+ rlist.append(s)
+ if flags & POLLOUT:
+ wlist.append(s)
+ if flags & POLLERR:
+ xlist.append(s)
+ return rlist, wlist, xlist
+
+#-----------------------------------------------------------------------------
+# Symbols to export
+#-----------------------------------------------------------------------------
+
+__all__ = [ 'Poller', 'select' ]
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/sugar/socket.py b/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/sugar/socket.py
new file mode 100644
index 00000000..c91589d7
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/sugar/socket.py
@@ -0,0 +1,495 @@
+# coding: utf-8
+"""0MQ Socket pure Python methods."""
+
+# Copyright (C) PyZMQ Developers
+# Distributed under the terms of the Modified BSD License.
+
+
+import codecs
+import random
+import warnings
+
+import zmq
+from zmq.backend import Socket as SocketBase
+from .poll import Poller
+from . import constants
+from .attrsettr import AttributeSetter
+from zmq.error import ZMQError, ZMQBindError
+from zmq.utils import jsonapi
+from zmq.utils.strtypes import bytes,unicode,basestring
+from zmq.utils.interop import cast_int_addr
+
+from .constants import (
+ SNDMORE, ENOTSUP, POLLIN,
+ int64_sockopt_names,
+ int_sockopt_names,
+ bytes_sockopt_names,
+ fd_sockopt_names,
+)
+try:
+ import cPickle
+ pickle = cPickle
+except:
+ cPickle = None
+ import pickle
+
+try:
+ DEFAULT_PROTOCOL = pickle.DEFAULT_PROTOCOL
+except AttributeError:
+ DEFAULT_PROTOCOL = pickle.HIGHEST_PROTOCOL
+
+
+class Socket(SocketBase, AttributeSetter):
+ """The ZMQ socket object
+
+ To create a Socket, first create a Context::
+
+ ctx = zmq.Context.instance()
+
+ then call ``ctx.socket(socket_type)``::
+
+ s = ctx.socket(zmq.ROUTER)
+
+ """
+ _shadow = False
+
+ def __del__(self):
+ if not self._shadow:
+ self.close()
+
+ # socket as context manager:
+ def __enter__(self):
+ """Sockets are context managers
+
+ .. versionadded:: 14.4
+ """
+ return self
+
+ def __exit__(self, *args, **kwargs):
+ self.close()
+
+ #-------------------------------------------------------------------------
+ # Socket creation
+ #-------------------------------------------------------------------------
+
+ @classmethod
+ def shadow(cls, address):
+ """Shadow an existing libzmq socket
+
+ address is the integer address of the libzmq socket
+ or an FFI pointer to it.
+
+ .. versionadded:: 14.1
+ """
+ address = cast_int_addr(address)
+ return cls(shadow=address)
+
+ #-------------------------------------------------------------------------
+ # Deprecated aliases
+ #-------------------------------------------------------------------------
+
+ @property
+ def socket_type(self):
+ warnings.warn("Socket.socket_type is deprecated, use Socket.type",
+ DeprecationWarning
+ )
+ return self.type
+
+ #-------------------------------------------------------------------------
+ # Hooks for sockopt completion
+ #-------------------------------------------------------------------------
+
+ def __dir__(self):
+ keys = dir(self.__class__)
+ for collection in (
+ bytes_sockopt_names,
+ int_sockopt_names,
+ int64_sockopt_names,
+ fd_sockopt_names,
+ ):
+ keys.extend(collection)
+ return keys
+
+ #-------------------------------------------------------------------------
+ # Getting/Setting options
+ #-------------------------------------------------------------------------
+ setsockopt = SocketBase.set
+ getsockopt = SocketBase.get
+
+ def set_string(self, option, optval, encoding='utf-8'):
+ """set socket options with a unicode object
+
+ This is simply a wrapper for setsockopt to protect from encoding ambiguity.
+
+ See the 0MQ documentation for details on specific options.
+
+ Parameters
+ ----------
+ option : int
+ The name of the option to set. Can be any of: SUBSCRIBE,
+ UNSUBSCRIBE, IDENTITY
+ optval : unicode string (unicode on py2, str on py3)
+ The value of the option to set.
+ encoding : str
+ The encoding to be used, default is utf8
+ """
+ if not isinstance(optval, unicode):
+ raise TypeError("unicode strings only")
+ return self.set(option, optval.encode(encoding))
+
+ setsockopt_unicode = setsockopt_string = set_string
+
+ def get_string(self, option, encoding='utf-8'):
+ """get the value of a socket option
+
+ See the 0MQ documentation for details on specific options.
+
+ Parameters
+ ----------
+ option : int
+ The option to retrieve.
+
+ Returns
+ -------
+ optval : unicode string (unicode on py2, str on py3)
+ The value of the option as a unicode string.
+ """
+
+ if option not in constants.bytes_sockopts:
+ raise TypeError("option %i will not return a string to be decoded"%option)
+ return self.getsockopt(option).decode(encoding)
+
+ getsockopt_unicode = getsockopt_string = get_string
+
+ def bind_to_random_port(self, addr, min_port=49152, max_port=65536, max_tries=100):
+ """bind this socket to a random port in a range
+
+ Parameters
+ ----------
+ addr : str
+ The address string without the port to pass to ``Socket.bind()``.
+ min_port : int, optional
+ The minimum port in the range of ports to try (inclusive).
+ max_port : int, optional
+ The maximum port in the range of ports to try (exclusive).
+ max_tries : int, optional
+ The maximum number of bind attempts to make.
+
+ Returns
+ -------
+ port : int
+ The port the socket was bound to.
+
+ Raises
+ ------
+ ZMQBindError
+ if `max_tries` reached before successful bind
+ """
+ for i in range(max_tries):
+ try:
+ port = random.randrange(min_port, max_port)
+ self.bind('%s:%s' % (addr, port))
+ except ZMQError as exception:
+ if not exception.errno == zmq.EADDRINUSE:
+ raise
+ else:
+ return port
+ raise ZMQBindError("Could not bind socket to random port.")
+
+ def get_hwm(self):
+ """get the High Water Mark
+
+ On libzmq ≥ 3, this gets SNDHWM if available, otherwise RCVHWM
+ """
+ major = zmq.zmq_version_info()[0]
+ if major >= 3:
+ # return sndhwm, fallback on rcvhwm
+ try:
+ return self.getsockopt(zmq.SNDHWM)
+ except zmq.ZMQError as e:
+ pass
+
+ return self.getsockopt(zmq.RCVHWM)
+ else:
+ return self.getsockopt(zmq.HWM)
+
+ def set_hwm(self, value):
+ """set the High Water Mark
+
+ On libzmq ≥ 3, this sets both SNDHWM and RCVHWM
+ """
+ major = zmq.zmq_version_info()[0]
+ if major >= 3:
+ raised = None
+ try:
+ self.sndhwm = value
+ except Exception as e:
+ raised = e
+ try:
+ self.rcvhwm = value
+ except Exception:
+ raised = e
+
+ if raised:
+ raise raised
+ else:
+ return self.setsockopt(zmq.HWM, value)
+
+ hwm = property(get_hwm, set_hwm,
+ """property for High Water Mark
+
+ Setting hwm sets both SNDHWM and RCVHWM as appropriate.
+ It gets SNDHWM if available, otherwise RCVHWM.
+ """
+ )
+
+ #-------------------------------------------------------------------------
+ # Sending and receiving messages
+ #-------------------------------------------------------------------------
+
+ def send_multipart(self, msg_parts, flags=0, copy=True, track=False):
+ """send a sequence of buffers as a multipart message
+
+ The zmq.SNDMORE flag is added to all msg parts before the last.
+
+ Parameters
+ ----------
+ msg_parts : iterable
+ A sequence of objects to send as a multipart message. Each element
+ can be any sendable object (Frame, bytes, buffer-providers)
+ flags : int, optional
+ SNDMORE is handled automatically for frames before the last.
+ copy : bool, optional
+ Should the frame(s) be sent in a copying or non-copying manner.
+ track : bool, optional
+ Should the frame(s) be tracked for notification that ZMQ has
+ finished with it (ignored if copy=True).
+
+ Returns
+ -------
+ None : if copy or not track
+ MessageTracker : if track and not copy
+ a MessageTracker object, whose `pending` property will
+ be True until the last send is completed.
+ """
+ for msg in msg_parts[:-1]:
+ self.send(msg, SNDMORE|flags, copy=copy, track=track)
+ # Send the last part without the extra SNDMORE flag.
+ return self.send(msg_parts[-1], flags, copy=copy, track=track)
+
+ def recv_multipart(self, flags=0, copy=True, track=False):
+ """receive a multipart message as a list of bytes or Frame objects
+
+ Parameters
+ ----------
+ flags : int, optional
+ Any supported flag: NOBLOCK. If NOBLOCK is set, this method
+ will raise a ZMQError with EAGAIN if a message is not ready.
+ If NOBLOCK is not set, then this method will block until a
+ message arrives.
+ copy : bool, optional
+ Should the message frame(s) be received in a copying or non-copying manner?
+ If False a Frame object is returned for each part, if True a copy of
+ the bytes is made for each frame.
+ track : bool, optional
+ Should the message frame(s) be tracked for notification that ZMQ has
+ finished with it? (ignored if copy=True)
+
+ Returns
+ -------
+ msg_parts : list
+ A list of frames in the multipart message; either Frames or bytes,
+ depending on `copy`.
+
+ """
+ parts = [self.recv(flags, copy=copy, track=track)]
+ # have first part already, only loop while more to receive
+ while self.getsockopt(zmq.RCVMORE):
+ part = self.recv(flags, copy=copy, track=track)
+ parts.append(part)
+
+ return parts
+
+ def send_string(self, u, flags=0, copy=True, encoding='utf-8'):
+ """send a Python unicode string as a message with an encoding
+
+ 0MQ communicates with raw bytes, so you must encode/decode
+ text (unicode on py2, str on py3) around 0MQ.
+
+ Parameters
+ ----------
+ u : Python unicode string (unicode on py2, str on py3)
+ The unicode string to send.
+ flags : int, optional
+ Any valid send flag.
+ encoding : str [default: 'utf-8']
+ The encoding to be used
+ """
+ if not isinstance(u, basestring):
+ raise TypeError("unicode/str objects only")
+ return self.send(u.encode(encoding), flags=flags, copy=copy)
+
+ send_unicode = send_string
+
+ def recv_string(self, flags=0, encoding='utf-8'):
+ """receive a unicode string, as sent by send_string
+
+ Parameters
+ ----------
+ flags : int
+ Any valid recv flag.
+ encoding : str [default: 'utf-8']
+ The encoding to be used
+
+ Returns
+ -------
+ s : unicode string (unicode on py2, str on py3)
+ The Python unicode string that arrives as encoded bytes.
+ """
+ b = self.recv(flags=flags)
+ return b.decode(encoding)
+
+ recv_unicode = recv_string
+
+ def send_pyobj(self, obj, flags=0, protocol=DEFAULT_PROTOCOL):
+ """send a Python object as a message using pickle to serialize
+
+ Parameters
+ ----------
+ obj : Python object
+ The Python object to send.
+ flags : int
+ Any valid send flag.
+ protocol : int
+ The pickle protocol number to use. The default is pickle.DEFAULT_PROTOCOl
+ where defined, and pickle.HIGHEST_PROTOCOL elsewhere.
+ """
+ msg = pickle.dumps(obj, protocol)
+ return self.send(msg, flags)
+
+ def recv_pyobj(self, flags=0):
+ """receive a Python object as a message using pickle to serialize
+
+ Parameters
+ ----------
+ flags : int
+ Any valid recv flag.
+
+ Returns
+ -------
+ obj : Python object
+ The Python object that arrives as a message.
+ """
+ s = self.recv(flags)
+ return pickle.loads(s)
+
+ def send_json(self, obj, flags=0, **kwargs):
+ """send a Python object as a message using json to serialize
+
+ Keyword arguments are passed on to json.dumps
+
+ Parameters
+ ----------
+ obj : Python object
+ The Python object to send
+ flags : int
+ Any valid send flag
+ """
+ msg = jsonapi.dumps(obj, **kwargs)
+ return self.send(msg, flags)
+
+ def recv_json(self, flags=0, **kwargs):
+ """receive a Python object as a message using json to serialize
+
+ Keyword arguments are passed on to json.loads
+
+ Parameters
+ ----------
+ flags : int
+ Any valid recv flag.
+
+ Returns
+ -------
+ obj : Python object
+ The Python object that arrives as a message.
+ """
+ msg = self.recv(flags)
+ return jsonapi.loads(msg, **kwargs)
+
+ _poller_class = Poller
+
+ def poll(self, timeout=None, flags=POLLIN):
+ """poll the socket for events
+
+ The default is to poll forever for incoming
+ events. Timeout is in milliseconds, if specified.
+
+ Parameters
+ ----------
+ timeout : int [default: None]
+ The timeout (in milliseconds) to wait for an event. If unspecified
+ (or specified None), will wait forever for an event.
+ flags : bitfield (int) [default: POLLIN]
+ The event flags to poll for (any combination of POLLIN|POLLOUT).
+ The default is to check for incoming events (POLLIN).
+
+ Returns
+ -------
+ events : bitfield (int)
+ The events that are ready and waiting. Will be 0 if no events were ready
+ by the time timeout was reached.
+ """
+
+ if self.closed:
+ raise ZMQError(ENOTSUP)
+
+ p = self._poller_class()
+ p.register(self, flags)
+ evts = dict(p.poll(timeout))
+ # return 0 if no events, otherwise return event bitfield
+ return evts.get(self, 0)
+
+ def get_monitor_socket(self, events=None, addr=None):
+ """Return a connected PAIR socket ready to receive the event notifications.
+
+ .. versionadded:: libzmq-4.0
+ .. versionadded:: 14.0
+
+ Parameters
+ ----------
+ events : bitfield (int) [default: ZMQ_EVENTS_ALL]
+ The bitmask defining which events are wanted.
+ addr : string [default: None]
+ The optional endpoint for the monitoring sockets.
+
+ Returns
+ -------
+ socket : (PAIR)
+ The socket is already connected and ready to receive messages.
+ """
+ # safe-guard, method only available on libzmq >= 4
+ if zmq.zmq_version_info() < (4,):
+ raise NotImplementedError("get_monitor_socket requires libzmq >= 4, have %s" % zmq.zmq_version())
+ if addr is None:
+ # create endpoint name from internal fd
+ addr = "inproc://monitor.s-%d" % self.FD
+ if events is None:
+ # use all events
+ events = zmq.EVENT_ALL
+ # attach monitoring socket
+ self.monitor(addr, events)
+ # create new PAIR socket and connect it
+ ret = self.context.socket(zmq.PAIR)
+ ret.connect(addr)
+ return ret
+
+ def disable_monitor(self):
+ """Shutdown the PAIR socket (created using get_monitor_socket)
+ that is serving socket events.
+
+ .. versionadded:: 14.4
+ """
+ self.monitor(None, 0)
+
+
+__all__ = ['Socket']
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/sugar/tracker.py b/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/sugar/tracker.py
new file mode 100644
index 00000000..fb8c007f
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/sugar/tracker.py
@@ -0,0 +1,120 @@
+"""Tracker for zero-copy messages with 0MQ."""
+
+# Copyright (C) PyZMQ Developers
+# Distributed under the terms of the Modified BSD License.
+
+import time
+
+try:
+ # below 3.3
+ from threading import _Event as Event
+except (ImportError, AttributeError):
+ # python throws ImportError, cython throws AttributeError
+ from threading import Event
+
+from zmq.error import NotDone
+from zmq.backend import Frame
+
+class MessageTracker(object):
+ """MessageTracker(*towatch)
+
+ A class for tracking if 0MQ is done using one or more messages.
+
+ When you send a 0MQ message, it is not sent immediately. The 0MQ IO thread
+ sends the message at some later time. Often you want to know when 0MQ has
+ actually sent the message though. This is complicated by the fact that
+ a single 0MQ message can be sent multiple times using different sockets.
+ This class allows you to track all of the 0MQ usages of a message.
+
+ Parameters
+ ----------
+ *towatch : tuple of Event, MessageTracker, Message instances.
+ This list of objects to track. This class can track the low-level
+ Events used by the Message class, other MessageTrackers or
+ actual Messages.
+ """
+ events = None
+ peers = None
+
+ def __init__(self, *towatch):
+ """MessageTracker(*towatch)
+
+ Create a message tracker to track a set of mesages.
+
+ Parameters
+ ----------
+ *towatch : tuple of Event, MessageTracker, Message instances.
+ This list of objects to track. This class can track the low-level
+ Events used by the Message class, other MessageTrackers or
+ actual Messages.
+ """
+ self.events = set()
+ self.peers = set()
+ for obj in towatch:
+ if isinstance(obj, Event):
+ self.events.add(obj)
+ elif isinstance(obj, MessageTracker):
+ self.peers.add(obj)
+ elif isinstance(obj, Frame):
+ if not obj.tracker:
+ raise ValueError("Not a tracked message")
+ self.peers.add(obj.tracker)
+ else:
+ raise TypeError("Require Events or Message Frames, not %s"%type(obj))
+
+ @property
+ def done(self):
+ """Is 0MQ completely done with the message(s) being tracked?"""
+ for evt in self.events:
+ if not evt.is_set():
+ return False
+ for pm in self.peers:
+ if not pm.done:
+ return False
+ return True
+
+ def wait(self, timeout=-1):
+ """mt.wait(timeout=-1)
+
+ Wait for 0MQ to be done with the message or until `timeout`.
+
+ Parameters
+ ----------
+ timeout : float [default: -1, wait forever]
+ Maximum time in (s) to wait before raising NotDone.
+
+ Returns
+ -------
+ None
+ if done before `timeout`
+
+ Raises
+ ------
+ NotDone
+ if `timeout` reached before I am done.
+ """
+ tic = time.time()
+ if timeout is False or timeout < 0:
+ remaining = 3600*24*7 # a week
+ else:
+ remaining = timeout
+ done = False
+ for evt in self.events:
+ if remaining < 0:
+ raise NotDone
+ evt.wait(timeout=remaining)
+ if not evt.is_set():
+ raise NotDone
+ toc = time.time()
+ remaining -= (toc-tic)
+ tic = toc
+
+ for peer in self.peers:
+ if remaining < 0:
+ raise NotDone
+ peer.wait(timeout=remaining)
+ toc = time.time()
+ remaining -= (toc-tic)
+ tic = toc
+
+__all__ = ['MessageTracker'] \ No newline at end of file
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/sugar/version.py b/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/sugar/version.py
new file mode 100644
index 00000000..ea8fbbc4
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/sugar/version.py
@@ -0,0 +1,48 @@
+"""PyZMQ and 0MQ version functions."""
+
+# Copyright (C) PyZMQ Developers
+# Distributed under the terms of the Modified BSD License.
+
+
+from zmq.backend import zmq_version_info
+
+
+VERSION_MAJOR = 14
+VERSION_MINOR = 5
+VERSION_PATCH = 0
+VERSION_EXTRA = ""
+__version__ = '%i.%i.%i' % (VERSION_MAJOR, VERSION_MINOR, VERSION_PATCH)
+
+if VERSION_EXTRA:
+ __version__ = "%s-%s" % (__version__, VERSION_EXTRA)
+ version_info = (VERSION_MAJOR, VERSION_MINOR, VERSION_PATCH, float('inf'))
+else:
+ version_info = (VERSION_MAJOR, VERSION_MINOR, VERSION_PATCH)
+
+__revision__ = ''
+
+def pyzmq_version():
+ """return the version of pyzmq as a string"""
+ if __revision__:
+ return '@'.join([__version__,__revision__[:6]])
+ else:
+ return __version__
+
+def pyzmq_version_info():
+ """return the pyzmq version as a tuple of at least three numbers
+
+ If pyzmq is a development version, `inf` will be appended after the third integer.
+ """
+ return version_info
+
+
+def zmq_version():
+ """return the version of libzmq as a string"""
+ return "%i.%i.%i" % zmq_version_info()
+
+
+__all__ = ['zmq_version', 'zmq_version_info',
+ 'pyzmq_version','pyzmq_version_info',
+ '__version__', '__revision__'
+]
+
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/tests/__init__.py b/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/tests/__init__.py
new file mode 100644
index 00000000..325a3f19
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/tests/__init__.py
@@ -0,0 +1,211 @@
+# Copyright (c) PyZMQ Developers.
+# Distributed under the terms of the Modified BSD License.
+
+import functools
+import sys
+import time
+from threading import Thread
+
+from unittest import TestCase
+
+import zmq
+from zmq.utils import jsonapi
+
+try:
+ import gevent
+ from zmq import green as gzmq
+ have_gevent = True
+except ImportError:
+ have_gevent = False
+
+try:
+ from unittest import SkipTest
+except ImportError:
+ try:
+ from nose import SkipTest
+ except ImportError:
+ class SkipTest(Exception):
+ pass
+
+PYPY = 'PyPy' in sys.version
+
+#-----------------------------------------------------------------------------
+# skip decorators (directly from unittest)
+#-----------------------------------------------------------------------------
+
+_id = lambda x: x
+
+def skip(reason):
+ """
+ Unconditionally skip a test.
+ """
+ def decorator(test_item):
+ if not (isinstance(test_item, type) and issubclass(test_item, TestCase)):
+ @functools.wraps(test_item)
+ def skip_wrapper(*args, **kwargs):
+ raise SkipTest(reason)
+ test_item = skip_wrapper
+
+ test_item.__unittest_skip__ = True
+ test_item.__unittest_skip_why__ = reason
+ return test_item
+ return decorator
+
+def skip_if(condition, reason="Skipped"):
+ """
+ Skip a test if the condition is true.
+ """
+ if condition:
+ return skip(reason)
+ return _id
+
+skip_pypy = skip_if(PYPY, "Doesn't work on PyPy")
+
+#-----------------------------------------------------------------------------
+# Base test class
+#-----------------------------------------------------------------------------
+
+class BaseZMQTestCase(TestCase):
+ green = False
+
+ @property
+ def Context(self):
+ if self.green:
+ return gzmq.Context
+ else:
+ return zmq.Context
+
+ def socket(self, socket_type):
+ s = self.context.socket(socket_type)
+ self.sockets.append(s)
+ return s
+
+ def setUp(self):
+ if self.green and not have_gevent:
+ raise SkipTest("requires gevent")
+ self.context = self.Context.instance()
+ self.sockets = []
+
+ def tearDown(self):
+ contexts = set([self.context])
+ while self.sockets:
+ sock = self.sockets.pop()
+ contexts.add(sock.context) # in case additional contexts are created
+ sock.close(0)
+ for ctx in contexts:
+ t = Thread(target=ctx.term)
+ t.daemon = True
+ t.start()
+ t.join(timeout=2)
+ if t.is_alive():
+ # reset Context.instance, so the failure to term doesn't corrupt subsequent tests
+ zmq.sugar.context.Context._instance = None
+ raise RuntimeError("context could not terminate, open sockets likely remain in test")
+
+ def create_bound_pair(self, type1=zmq.PAIR, type2=zmq.PAIR, interface='tcp://127.0.0.1'):
+ """Create a bound socket pair using a random port."""
+ s1 = self.context.socket(type1)
+ s1.setsockopt(zmq.LINGER, 0)
+ port = s1.bind_to_random_port(interface)
+ s2 = self.context.socket(type2)
+ s2.setsockopt(zmq.LINGER, 0)
+ s2.connect('%s:%s' % (interface, port))
+ self.sockets.extend([s1,s2])
+ return s1, s2
+
+ def ping_pong(self, s1, s2, msg):
+ s1.send(msg)
+ msg2 = s2.recv()
+ s2.send(msg2)
+ msg3 = s1.recv()
+ return msg3
+
+ def ping_pong_json(self, s1, s2, o):
+ if jsonapi.jsonmod is None:
+ raise SkipTest("No json library")
+ s1.send_json(o)
+ o2 = s2.recv_json()
+ s2.send_json(o2)
+ o3 = s1.recv_json()
+ return o3
+
+ def ping_pong_pyobj(self, s1, s2, o):
+ s1.send_pyobj(o)
+ o2 = s2.recv_pyobj()
+ s2.send_pyobj(o2)
+ o3 = s1.recv_pyobj()
+ return o3
+
+ def assertRaisesErrno(self, errno, func, *args, **kwargs):
+ try:
+ func(*args, **kwargs)
+ except zmq.ZMQError as e:
+ self.assertEqual(e.errno, errno, "wrong error raised, expected '%s' \
+got '%s'" % (zmq.ZMQError(errno), zmq.ZMQError(e.errno)))
+ else:
+ self.fail("Function did not raise any error")
+
+ def _select_recv(self, multipart, socket, **kwargs):
+ """call recv[_multipart] in a way that raises if there is nothing to receive"""
+ if zmq.zmq_version_info() >= (3,1,0):
+ # zmq 3.1 has a bug, where poll can return false positives,
+ # so we wait a little bit just in case
+ # See LIBZMQ-280 on JIRA
+ time.sleep(0.1)
+
+ r,w,x = zmq.select([socket], [], [], timeout=5)
+ assert len(r) > 0, "Should have received a message"
+ kwargs['flags'] = zmq.DONTWAIT | kwargs.get('flags', 0)
+
+ recv = socket.recv_multipart if multipart else socket.recv
+ return recv(**kwargs)
+
+ def recv(self, socket, **kwargs):
+ """call recv in a way that raises if there is nothing to receive"""
+ return self._select_recv(False, socket, **kwargs)
+
+ def recv_multipart(self, socket, **kwargs):
+ """call recv_multipart in a way that raises if there is nothing to receive"""
+ return self._select_recv(True, socket, **kwargs)
+
+
+class PollZMQTestCase(BaseZMQTestCase):
+ pass
+
+class GreenTest:
+ """Mixin for making green versions of test classes"""
+ green = True
+
+ def assertRaisesErrno(self, errno, func, *args, **kwargs):
+ if errno == zmq.EAGAIN:
+ raise SkipTest("Skipping because we're green.")
+ try:
+ func(*args, **kwargs)
+ except zmq.ZMQError:
+ e = sys.exc_info()[1]
+ self.assertEqual(e.errno, errno, "wrong error raised, expected '%s' \
+got '%s'" % (zmq.ZMQError(errno), zmq.ZMQError(e.errno)))
+ else:
+ self.fail("Function did not raise any error")
+
+ def tearDown(self):
+ contexts = set([self.context])
+ while self.sockets:
+ sock = self.sockets.pop()
+ contexts.add(sock.context) # in case additional contexts are created
+ sock.close()
+ try:
+ gevent.joinall([gevent.spawn(ctx.term) for ctx in contexts], timeout=2, raise_error=True)
+ except gevent.Timeout:
+ raise RuntimeError("context could not terminate, open sockets likely remain in test")
+
+ def skip_green(self):
+ raise SkipTest("Skipping because we are green")
+
+def skip_green(f):
+ def skipping_test(self, *args, **kwargs):
+ if self.green:
+ raise SkipTest("Skipping because we are green")
+ else:
+ return f(self, *args, **kwargs)
+ return skipping_test
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/tests/test_auth.py b/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/tests/test_auth.py
new file mode 100644
index 00000000..d350f61f
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/tests/test_auth.py
@@ -0,0 +1,431 @@
+# -*- coding: utf8 -*-
+
+# Copyright (C) PyZMQ Developers
+# Distributed under the terms of the Modified BSD License.
+
+import logging
+import os
+import shutil
+import sys
+import tempfile
+
+import zmq.auth
+from zmq.auth.ioloop import IOLoopAuthenticator
+from zmq.auth.thread import ThreadAuthenticator
+
+from zmq.eventloop import ioloop, zmqstream
+from zmq.tests import (BaseZMQTestCase, SkipTest)
+
+class BaseAuthTestCase(BaseZMQTestCase):
+ def setUp(self):
+ if zmq.zmq_version_info() < (4,0):
+ raise SkipTest("security is new in libzmq 4.0")
+ try:
+ zmq.curve_keypair()
+ except zmq.ZMQError:
+ raise SkipTest("security requires libzmq to be linked against libsodium")
+ super(BaseAuthTestCase, self).setUp()
+ # enable debug logging while we run tests
+ logging.getLogger('zmq.auth').setLevel(logging.DEBUG)
+ self.auth = self.make_auth()
+ self.auth.start()
+ self.base_dir, self.public_keys_dir, self.secret_keys_dir = self.create_certs()
+
+ def make_auth(self):
+ raise NotImplementedError()
+
+ def tearDown(self):
+ if self.auth:
+ self.auth.stop()
+ self.auth = None
+ self.remove_certs(self.base_dir)
+ super(BaseAuthTestCase, self).tearDown()
+
+ def create_certs(self):
+ """Create CURVE certificates for a test"""
+
+ # Create temporary CURVE keypairs for this test run. We create all keys in a
+ # temp directory and then move them into the appropriate private or public
+ # directory.
+
+ base_dir = tempfile.mkdtemp()
+ keys_dir = os.path.join(base_dir, 'certificates')
+ public_keys_dir = os.path.join(base_dir, 'public_keys')
+ secret_keys_dir = os.path.join(base_dir, 'private_keys')
+
+ os.mkdir(keys_dir)
+ os.mkdir(public_keys_dir)
+ os.mkdir(secret_keys_dir)
+
+ server_public_file, server_secret_file = zmq.auth.create_certificates(keys_dir, "server")
+ client_public_file, client_secret_file = zmq.auth.create_certificates(keys_dir, "client")
+
+ for key_file in os.listdir(keys_dir):
+ if key_file.endswith(".key"):
+ shutil.move(os.path.join(keys_dir, key_file),
+ os.path.join(public_keys_dir, '.'))
+
+ for key_file in os.listdir(keys_dir):
+ if key_file.endswith(".key_secret"):
+ shutil.move(os.path.join(keys_dir, key_file),
+ os.path.join(secret_keys_dir, '.'))
+
+ return (base_dir, public_keys_dir, secret_keys_dir)
+
+ def remove_certs(self, base_dir):
+ """Remove certificates for a test"""
+ shutil.rmtree(base_dir)
+
+ def load_certs(self, secret_keys_dir):
+ """Return server and client certificate keys"""
+ server_secret_file = os.path.join(secret_keys_dir, "server.key_secret")
+ client_secret_file = os.path.join(secret_keys_dir, "client.key_secret")
+
+ server_public, server_secret = zmq.auth.load_certificate(server_secret_file)
+ client_public, client_secret = zmq.auth.load_certificate(client_secret_file)
+
+ return server_public, server_secret, client_public, client_secret
+
+
+class TestThreadAuthentication(BaseAuthTestCase):
+ """Test authentication running in a thread"""
+
+ def make_auth(self):
+ return ThreadAuthenticator(self.context)
+
+ def can_connect(self, server, client):
+ """Check if client can connect to server using tcp transport"""
+ result = False
+ iface = 'tcp://127.0.0.1'
+ port = server.bind_to_random_port(iface)
+ client.connect("%s:%i" % (iface, port))
+ msg = [b"Hello World"]
+ server.send_multipart(msg)
+ if client.poll(1000):
+ rcvd_msg = client.recv_multipart()
+ self.assertEqual(rcvd_msg, msg)
+ result = True
+ return result
+
+ def test_null(self):
+ """threaded auth - NULL"""
+ # A default NULL connection should always succeed, and not
+ # go through our authentication infrastructure at all.
+ self.auth.stop()
+ self.auth = None
+
+ server = self.socket(zmq.PUSH)
+ client = self.socket(zmq.PULL)
+ self.assertTrue(self.can_connect(server, client))
+
+ # By setting a domain we switch on authentication for NULL sockets,
+ # though no policies are configured yet. The client connection
+ # should still be allowed.
+ server = self.socket(zmq.PUSH)
+ server.zap_domain = b'global'
+ client = self.socket(zmq.PULL)
+ self.assertTrue(self.can_connect(server, client))
+
+ def test_blacklist(self):
+ """threaded auth - Blacklist"""
+ # Blacklist 127.0.0.1, connection should fail
+ self.auth.deny('127.0.0.1')
+ server = self.socket(zmq.PUSH)
+ # By setting a domain we switch on authentication for NULL sockets,
+ # though no policies are configured yet.
+ server.zap_domain = b'global'
+ client = self.socket(zmq.PULL)
+ self.assertFalse(self.can_connect(server, client))
+
+ def test_whitelist(self):
+ """threaded auth - Whitelist"""
+ # Whitelist 127.0.0.1, connection should pass"
+ self.auth.allow('127.0.0.1')
+ server = self.socket(zmq.PUSH)
+ # By setting a domain we switch on authentication for NULL sockets,
+ # though no policies are configured yet.
+ server.zap_domain = b'global'
+ client = self.socket(zmq.PULL)
+ self.assertTrue(self.can_connect(server, client))
+
+ def test_plain(self):
+ """threaded auth - PLAIN"""
+
+ # Try PLAIN authentication - without configuring server, connection should fail
+ server = self.socket(zmq.PUSH)
+ server.plain_server = True
+ client = self.socket(zmq.PULL)
+ client.plain_username = b'admin'
+ client.plain_password = b'Password'
+ self.assertFalse(self.can_connect(server, client))
+
+ # Try PLAIN authentication - with server configured, connection should pass
+ server = self.socket(zmq.PUSH)
+ server.plain_server = True
+ client = self.socket(zmq.PULL)
+ client.plain_username = b'admin'
+ client.plain_password = b'Password'
+ self.auth.configure_plain(domain='*', passwords={'admin': 'Password'})
+ self.assertTrue(self.can_connect(server, client))
+
+ # Try PLAIN authentication - with bogus credentials, connection should fail
+ server = self.socket(zmq.PUSH)
+ server.plain_server = True
+ client = self.socket(zmq.PULL)
+ client.plain_username = b'admin'
+ client.plain_password = b'Bogus'
+ self.assertFalse(self.can_connect(server, client))
+
+ # Remove authenticator and check that a normal connection works
+ self.auth.stop()
+ self.auth = None
+
+ server = self.socket(zmq.PUSH)
+ client = self.socket(zmq.PULL)
+ self.assertTrue(self.can_connect(server, client))
+ client.close()
+ server.close()
+
+ def test_curve(self):
+ """threaded auth - CURVE"""
+ self.auth.allow('127.0.0.1')
+ certs = self.load_certs(self.secret_keys_dir)
+ server_public, server_secret, client_public, client_secret = certs
+
+ #Try CURVE authentication - without configuring server, connection should fail
+ server = self.socket(zmq.PUSH)
+ server.curve_publickey = server_public
+ server.curve_secretkey = server_secret
+ server.curve_server = True
+ client = self.socket(zmq.PULL)
+ client.curve_publickey = client_public
+ client.curve_secretkey = client_secret
+ client.curve_serverkey = server_public
+ self.assertFalse(self.can_connect(server, client))
+
+ #Try CURVE authentication - with server configured to CURVE_ALLOW_ANY, connection should pass
+ self.auth.configure_curve(domain='*', location=zmq.auth.CURVE_ALLOW_ANY)
+ server = self.socket(zmq.PUSH)
+ server.curve_publickey = server_public
+ server.curve_secretkey = server_secret
+ server.curve_server = True
+ client = self.socket(zmq.PULL)
+ client.curve_publickey = client_public
+ client.curve_secretkey = client_secret
+ client.curve_serverkey = server_public
+ self.assertTrue(self.can_connect(server, client))
+
+ # Try CURVE authentication - with server configured, connection should pass
+ self.auth.configure_curve(domain='*', location=self.public_keys_dir)
+ server = self.socket(zmq.PUSH)
+ server.curve_publickey = server_public
+ server.curve_secretkey = server_secret
+ server.curve_server = True
+ client = self.socket(zmq.PULL)
+ client.curve_publickey = client_public
+ client.curve_secretkey = client_secret
+ client.curve_serverkey = server_public
+ self.assertTrue(self.can_connect(server, client))
+
+ # Remove authenticator and check that a normal connection works
+ self.auth.stop()
+ self.auth = None
+
+ # Try connecting using NULL and no authentication enabled, connection should pass
+ server = self.socket(zmq.PUSH)
+ client = self.socket(zmq.PULL)
+ self.assertTrue(self.can_connect(server, client))
+
+
+def with_ioloop(method, expect_success=True):
+ """decorator for running tests with an IOLoop"""
+ def test_method(self):
+ r = method(self)
+
+ loop = self.io_loop
+ if expect_success:
+ self.pullstream.on_recv(self.on_message_succeed)
+ else:
+ self.pullstream.on_recv(self.on_message_fail)
+
+ t = loop.time()
+ loop.add_callback(self.attempt_connection)
+ loop.add_callback(self.send_msg)
+ if expect_success:
+ loop.add_timeout(t + 1, self.on_test_timeout_fail)
+ else:
+ loop.add_timeout(t + 1, self.on_test_timeout_succeed)
+
+ loop.start()
+ if self.fail_msg:
+ self.fail(self.fail_msg)
+
+ return r
+ return test_method
+
+def should_auth(method):
+ return with_ioloop(method, True)
+
+def should_not_auth(method):
+ return with_ioloop(method, False)
+
+class TestIOLoopAuthentication(BaseAuthTestCase):
+ """Test authentication running in ioloop"""
+
+ def setUp(self):
+ self.fail_msg = None
+ self.io_loop = ioloop.IOLoop()
+ super(TestIOLoopAuthentication, self).setUp()
+ self.server = self.socket(zmq.PUSH)
+ self.client = self.socket(zmq.PULL)
+ self.pushstream = zmqstream.ZMQStream(self.server, self.io_loop)
+ self.pullstream = zmqstream.ZMQStream(self.client, self.io_loop)
+
+ def make_auth(self):
+ return IOLoopAuthenticator(self.context, io_loop=self.io_loop)
+
+ def tearDown(self):
+ if self.auth:
+ self.auth.stop()
+ self.auth = None
+ self.io_loop.close(all_fds=True)
+ super(TestIOLoopAuthentication, self).tearDown()
+
+ def attempt_connection(self):
+ """Check if client can connect to server using tcp transport"""
+ iface = 'tcp://127.0.0.1'
+ port = self.server.bind_to_random_port(iface)
+ self.client.connect("%s:%i" % (iface, port))
+
+ def send_msg(self):
+ """Send a message from server to a client"""
+ msg = [b"Hello World"]
+ self.pushstream.send_multipart(msg)
+
+ def on_message_succeed(self, frames):
+ """A message was received, as expected."""
+ if frames != [b"Hello World"]:
+ self.fail_msg = "Unexpected message received"
+ self.io_loop.stop()
+
+ def on_message_fail(self, frames):
+ """A message was received, unexpectedly."""
+ self.fail_msg = 'Received messaged unexpectedly, security failed'
+ self.io_loop.stop()
+
+ def on_test_timeout_succeed(self):
+ """Test timer expired, indicates test success"""
+ self.io_loop.stop()
+
+ def on_test_timeout_fail(self):
+ """Test timer expired, indicates test failure"""
+ self.fail_msg = 'Test timed out'
+ self.io_loop.stop()
+
+ @should_auth
+ def test_none(self):
+ """ioloop auth - NONE"""
+ # A default NULL connection should always succeed, and not
+ # go through our authentication infrastructure at all.
+ # no auth should be running
+ self.auth.stop()
+ self.auth = None
+
+ @should_auth
+ def test_null(self):
+ """ioloop auth - NULL"""
+ # By setting a domain we switch on authentication for NULL sockets,
+ # though no policies are configured yet. The client connection
+ # should still be allowed.
+ self.server.zap_domain = b'global'
+
+ @should_not_auth
+ def test_blacklist(self):
+ """ioloop auth - Blacklist"""
+ # Blacklist 127.0.0.1, connection should fail
+ self.auth.deny('127.0.0.1')
+ self.server.zap_domain = b'global'
+
+ @should_auth
+ def test_whitelist(self):
+ """ioloop auth - Whitelist"""
+ # Whitelist 127.0.0.1, which overrides the blacklist, connection should pass"
+ self.auth.allow('127.0.0.1')
+
+ self.server.setsockopt(zmq.ZAP_DOMAIN, b'global')
+
+ @should_not_auth
+ def test_plain_unconfigured_server(self):
+ """ioloop auth - PLAIN, unconfigured server"""
+ self.client.plain_username = b'admin'
+ self.client.plain_password = b'Password'
+ # Try PLAIN authentication - without configuring server, connection should fail
+ self.server.plain_server = True
+
+ @should_auth
+ def test_plain_configured_server(self):
+ """ioloop auth - PLAIN, configured server"""
+ self.client.plain_username = b'admin'
+ self.client.plain_password = b'Password'
+ # Try PLAIN authentication - with server configured, connection should pass
+ self.server.plain_server = True
+ self.auth.configure_plain(domain='*', passwords={'admin': 'Password'})
+
+ @should_not_auth
+ def test_plain_bogus_credentials(self):
+ """ioloop auth - PLAIN, bogus credentials"""
+ self.client.plain_username = b'admin'
+ self.client.plain_password = b'Bogus'
+ self.server.plain_server = True
+
+ self.auth.configure_plain(domain='*', passwords={'admin': 'Password'})
+
+ @should_not_auth
+ def test_curve_unconfigured_server(self):
+ """ioloop auth - CURVE, unconfigured server"""
+ certs = self.load_certs(self.secret_keys_dir)
+ server_public, server_secret, client_public, client_secret = certs
+
+ self.auth.allow('127.0.0.1')
+
+ self.server.curve_publickey = server_public
+ self.server.curve_secretkey = server_secret
+ self.server.curve_server = True
+
+ self.client.curve_publickey = client_public
+ self.client.curve_secretkey = client_secret
+ self.client.curve_serverkey = server_public
+
+ @should_auth
+ def test_curve_allow_any(self):
+ """ioloop auth - CURVE, CURVE_ALLOW_ANY"""
+ certs = self.load_certs(self.secret_keys_dir)
+ server_public, server_secret, client_public, client_secret = certs
+
+ self.auth.allow('127.0.0.1')
+ self.auth.configure_curve(domain='*', location=zmq.auth.CURVE_ALLOW_ANY)
+
+ self.server.curve_publickey = server_public
+ self.server.curve_secretkey = server_secret
+ self.server.curve_server = True
+
+ self.client.curve_publickey = client_public
+ self.client.curve_secretkey = client_secret
+ self.client.curve_serverkey = server_public
+
+ @should_auth
+ def test_curve_configured_server(self):
+ """ioloop auth - CURVE, configured server"""
+ self.auth.allow('127.0.0.1')
+ certs = self.load_certs(self.secret_keys_dir)
+ server_public, server_secret, client_public, client_secret = certs
+
+ self.auth.configure_curve(domain='*', location=self.public_keys_dir)
+
+ self.server.curve_publickey = server_public
+ self.server.curve_secretkey = server_secret
+ self.server.curve_server = True
+
+ self.client.curve_publickey = client_public
+ self.client.curve_secretkey = client_secret
+ self.client.curve_serverkey = server_public
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/tests/test_cffi_backend.py b/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/tests/test_cffi_backend.py
new file mode 100644
index 00000000..1f85eebf
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/tests/test_cffi_backend.py
@@ -0,0 +1,310 @@
+# -*- coding: utf8 -*-
+
+import sys
+import time
+
+from unittest import TestCase
+
+from zmq.tests import BaseZMQTestCase, SkipTest
+
+try:
+ from zmq.backend.cffi import (
+ zmq_version_info,
+ PUSH, PULL, IDENTITY,
+ REQ, REP, POLLIN, POLLOUT,
+ )
+ from zmq.backend.cffi._cffi import ffi, C
+ have_ffi_backend = True
+except ImportError:
+ have_ffi_backend = False
+
+
+class TestCFFIBackend(TestCase):
+
+ def setUp(self):
+ if not have_ffi_backend or not 'PyPy' in sys.version:
+ raise SkipTest('PyPy Tests Only')
+
+ def test_zmq_version_info(self):
+ version = zmq_version_info()
+
+ assert version[0] in range(2,11)
+
+ def test_zmq_ctx_new_destroy(self):
+ ctx = C.zmq_ctx_new()
+
+ assert ctx != ffi.NULL
+ assert 0 == C.zmq_ctx_destroy(ctx)
+
+ def test_zmq_socket_open_close(self):
+ ctx = C.zmq_ctx_new()
+ socket = C.zmq_socket(ctx, PUSH)
+
+ assert ctx != ffi.NULL
+ assert ffi.NULL != socket
+ assert 0 == C.zmq_close(socket)
+ assert 0 == C.zmq_ctx_destroy(ctx)
+
+ def test_zmq_setsockopt(self):
+ ctx = C.zmq_ctx_new()
+ socket = C.zmq_socket(ctx, PUSH)
+
+ identity = ffi.new('char[3]', 'zmq')
+ ret = C.zmq_setsockopt(socket, IDENTITY, ffi.cast('void*', identity), 3)
+
+ assert ret == 0
+ assert ctx != ffi.NULL
+ assert ffi.NULL != socket
+ assert 0 == C.zmq_close(socket)
+ assert 0 == C.zmq_ctx_destroy(ctx)
+
+ def test_zmq_getsockopt(self):
+ ctx = C.zmq_ctx_new()
+ socket = C.zmq_socket(ctx, PUSH)
+
+ identity = ffi.new('char[]', 'zmq')
+ ret = C.zmq_setsockopt(socket, IDENTITY, ffi.cast('void*', identity), 3)
+ assert ret == 0
+
+ option_len = ffi.new('size_t*', 3)
+ option = ffi.new('char*')
+ ret = C.zmq_getsockopt(socket,
+ IDENTITY,
+ ffi.cast('void*', option),
+ option_len)
+
+ assert ret == 0
+ assert ffi.string(ffi.cast('char*', option))[0] == "z"
+ assert ffi.string(ffi.cast('char*', option))[1] == "m"
+ assert ffi.string(ffi.cast('char*', option))[2] == "q"
+ assert ctx != ffi.NULL
+ assert ffi.NULL != socket
+ assert 0 == C.zmq_close(socket)
+ assert 0 == C.zmq_ctx_destroy(ctx)
+
+ def test_zmq_bind(self):
+ ctx = C.zmq_ctx_new()
+ socket = C.zmq_socket(ctx, 8)
+
+ assert 0 == C.zmq_bind(socket, 'tcp://*:4444')
+ assert ctx != ffi.NULL
+ assert ffi.NULL != socket
+ assert 0 == C.zmq_close(socket)
+ assert 0 == C.zmq_ctx_destroy(ctx)
+
+ def test_zmq_bind_connect(self):
+ ctx = C.zmq_ctx_new()
+
+ socket1 = C.zmq_socket(ctx, PUSH)
+ socket2 = C.zmq_socket(ctx, PULL)
+
+ assert 0 == C.zmq_bind(socket1, 'tcp://*:4444')
+ assert 0 == C.zmq_connect(socket2, 'tcp://127.0.0.1:4444')
+ assert ctx != ffi.NULL
+ assert ffi.NULL != socket1
+ assert ffi.NULL != socket2
+ assert 0 == C.zmq_close(socket1)
+ assert 0 == C.zmq_close(socket2)
+ assert 0 == C.zmq_ctx_destroy(ctx)
+
+ def test_zmq_msg_init_close(self):
+ zmq_msg = ffi.new('zmq_msg_t*')
+
+ assert ffi.NULL != zmq_msg
+ assert 0 == C.zmq_msg_init(zmq_msg)
+ assert 0 == C.zmq_msg_close(zmq_msg)
+
+ def test_zmq_msg_init_size(self):
+ zmq_msg = ffi.new('zmq_msg_t*')
+
+ assert ffi.NULL != zmq_msg
+ assert 0 == C.zmq_msg_init_size(zmq_msg, 10)
+ assert 0 == C.zmq_msg_close(zmq_msg)
+
+ def test_zmq_msg_init_data(self):
+ zmq_msg = ffi.new('zmq_msg_t*')
+ message = ffi.new('char[5]', 'Hello')
+
+ assert 0 == C.zmq_msg_init_data(zmq_msg,
+ ffi.cast('void*', message),
+ 5,
+ ffi.NULL,
+ ffi.NULL)
+
+ assert ffi.NULL != zmq_msg
+ assert 0 == C.zmq_msg_close(zmq_msg)
+
+ def test_zmq_msg_data(self):
+ zmq_msg = ffi.new('zmq_msg_t*')
+ message = ffi.new('char[]', 'Hello')
+ assert 0 == C.zmq_msg_init_data(zmq_msg,
+ ffi.cast('void*', message),
+ 5,
+ ffi.NULL,
+ ffi.NULL)
+
+ data = C.zmq_msg_data(zmq_msg)
+
+ assert ffi.NULL != zmq_msg
+ assert ffi.string(ffi.cast("char*", data)) == 'Hello'
+ assert 0 == C.zmq_msg_close(zmq_msg)
+
+
+ def test_zmq_send(self):
+ ctx = C.zmq_ctx_new()
+
+ sender = C.zmq_socket(ctx, REQ)
+ receiver = C.zmq_socket(ctx, REP)
+
+ assert 0 == C.zmq_bind(receiver, 'tcp://*:7777')
+ assert 0 == C.zmq_connect(sender, 'tcp://127.0.0.1:7777')
+
+ time.sleep(0.1)
+
+ zmq_msg = ffi.new('zmq_msg_t*')
+ message = ffi.new('char[5]', 'Hello')
+
+ C.zmq_msg_init_data(zmq_msg,
+ ffi.cast('void*', message),
+ ffi.cast('size_t', 5),
+ ffi.NULL,
+ ffi.NULL)
+
+ assert 5 == C.zmq_msg_send(zmq_msg, sender, 0)
+ assert 0 == C.zmq_msg_close(zmq_msg)
+ assert C.zmq_close(sender) == 0
+ assert C.zmq_close(receiver) == 0
+ assert C.zmq_ctx_destroy(ctx) == 0
+
+ def test_zmq_recv(self):
+ ctx = C.zmq_ctx_new()
+
+ sender = C.zmq_socket(ctx, REQ)
+ receiver = C.zmq_socket(ctx, REP)
+
+ assert 0 == C.zmq_bind(receiver, 'tcp://*:2222')
+ assert 0 == C.zmq_connect(sender, 'tcp://127.0.0.1:2222')
+
+ time.sleep(0.1)
+
+ zmq_msg = ffi.new('zmq_msg_t*')
+ message = ffi.new('char[5]', 'Hello')
+
+ C.zmq_msg_init_data(zmq_msg,
+ ffi.cast('void*', message),
+ ffi.cast('size_t', 5),
+ ffi.NULL,
+ ffi.NULL)
+
+ zmq_msg2 = ffi.new('zmq_msg_t*')
+ C.zmq_msg_init(zmq_msg2)
+
+ assert 5 == C.zmq_msg_send(zmq_msg, sender, 0)
+ assert 5 == C.zmq_msg_recv(zmq_msg2, receiver, 0)
+ assert 5 == C.zmq_msg_size(zmq_msg2)
+ assert b"Hello" == ffi.buffer(C.zmq_msg_data(zmq_msg2),
+ C.zmq_msg_size(zmq_msg2))[:]
+ assert C.zmq_close(sender) == 0
+ assert C.zmq_close(receiver) == 0
+ assert C.zmq_ctx_destroy(ctx) == 0
+
+ def test_zmq_poll(self):
+ ctx = C.zmq_ctx_new()
+
+ sender = C.zmq_socket(ctx, REQ)
+ receiver = C.zmq_socket(ctx, REP)
+
+ r1 = C.zmq_bind(receiver, 'tcp://*:3333')
+ r2 = C.zmq_connect(sender, 'tcp://127.0.0.1:3333')
+
+ zmq_msg = ffi.new('zmq_msg_t*')
+ message = ffi.new('char[5]', 'Hello')
+
+ C.zmq_msg_init_data(zmq_msg,
+ ffi.cast('void*', message),
+ ffi.cast('size_t', 5),
+ ffi.NULL,
+ ffi.NULL)
+
+ receiver_pollitem = ffi.new('zmq_pollitem_t*')
+ receiver_pollitem.socket = receiver
+ receiver_pollitem.fd = 0
+ receiver_pollitem.events = POLLIN | POLLOUT
+ receiver_pollitem.revents = 0
+
+ ret = C.zmq_poll(ffi.NULL, 0, 0)
+ assert ret == 0
+
+ ret = C.zmq_poll(receiver_pollitem, 1, 0)
+ assert ret == 0
+
+ ret = C.zmq_msg_send(zmq_msg, sender, 0)
+ print(ffi.string(C.zmq_strerror(C.zmq_errno())))
+ assert ret == 5
+
+ time.sleep(0.2)
+
+ ret = C.zmq_poll(receiver_pollitem, 1, 0)
+ assert ret == 1
+
+ assert int(receiver_pollitem.revents) & POLLIN
+ assert not int(receiver_pollitem.revents) & POLLOUT
+
+ zmq_msg2 = ffi.new('zmq_msg_t*')
+ C.zmq_msg_init(zmq_msg2)
+
+ ret_recv = C.zmq_msg_recv(zmq_msg2, receiver, 0)
+ assert ret_recv == 5
+
+ assert 5 == C.zmq_msg_size(zmq_msg2)
+ assert b"Hello" == ffi.buffer(C.zmq_msg_data(zmq_msg2),
+ C.zmq_msg_size(zmq_msg2))[:]
+
+ sender_pollitem = ffi.new('zmq_pollitem_t*')
+ sender_pollitem.socket = sender
+ sender_pollitem.fd = 0
+ sender_pollitem.events = POLLIN | POLLOUT
+ sender_pollitem.revents = 0
+
+ ret = C.zmq_poll(sender_pollitem, 1, 0)
+ assert ret == 0
+
+ zmq_msg_again = ffi.new('zmq_msg_t*')
+ message_again = ffi.new('char[11]', 'Hello Again')
+
+ C.zmq_msg_init_data(zmq_msg_again,
+ ffi.cast('void*', message_again),
+ ffi.cast('size_t', 11),
+ ffi.NULL,
+ ffi.NULL)
+
+ assert 11 == C.zmq_msg_send(zmq_msg_again, receiver, 0)
+
+ time.sleep(0.2)
+
+ assert 0 <= C.zmq_poll(sender_pollitem, 1, 0)
+ assert int(sender_pollitem.revents) & POLLIN
+ assert 11 == C.zmq_msg_recv(zmq_msg2, sender, 0)
+ assert 11 == C.zmq_msg_size(zmq_msg2)
+ assert b"Hello Again" == ffi.buffer(C.zmq_msg_data(zmq_msg2),
+ int(C.zmq_msg_size(zmq_msg2)))[:]
+ assert 0 == C.zmq_close(sender)
+ assert 0 == C.zmq_close(receiver)
+ assert 0 == C.zmq_ctx_destroy(ctx)
+ assert 0 == C.zmq_msg_close(zmq_msg)
+ assert 0 == C.zmq_msg_close(zmq_msg2)
+ assert 0 == C.zmq_msg_close(zmq_msg_again)
+
+ def test_zmq_stopwatch_functions(self):
+ stopwatch = C.zmq_stopwatch_start()
+ ret = C.zmq_stopwatch_stop(stopwatch)
+
+ assert ffi.NULL != stopwatch
+ assert 0 < int(ret)
+
+ def test_zmq_sleep(self):
+ try:
+ C.zmq_sleep(1)
+ except Exception as e:
+ raise AssertionError("Error executing zmq_sleep(int)")
+
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/tests/test_constants.py b/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/tests/test_constants.py
new file mode 100644
index 00000000..d32b2b48
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/tests/test_constants.py
@@ -0,0 +1,104 @@
+# Copyright (C) PyZMQ Developers
+# Distributed under the terms of the Modified BSD License.
+
+import json
+from unittest import TestCase
+
+import zmq
+
+from zmq.utils import constant_names
+from zmq.sugar import constants as sugar_constants
+from zmq.backend import constants as backend_constants
+
+all_set = set(constant_names.all_names)
+
+class TestConstants(TestCase):
+
+ def _duplicate_test(self, namelist, listname):
+ """test that a given list has no duplicates"""
+ dupes = {}
+ for name in set(namelist):
+ cnt = namelist.count(name)
+ if cnt > 1:
+ dupes[name] = cnt
+ if dupes:
+ self.fail("The following names occur more than once in %s: %s" % (listname, json.dumps(dupes, indent=2)))
+
+ def test_duplicate_all(self):
+ return self._duplicate_test(constant_names.all_names, "all_names")
+
+ def _change_key(self, change, version):
+ """return changed-in key"""
+ return "%s-in %d.%d.%d" % tuple([change] + list(version))
+
+ def test_duplicate_changed(self):
+ all_changed = []
+ for change in ("new", "removed"):
+ d = getattr(constant_names, change + "_in")
+ for version, namelist in d.items():
+ all_changed.extend(namelist)
+ self._duplicate_test(namelist, self._change_key(change, version))
+
+ self._duplicate_test(all_changed, "all-changed")
+
+ def test_changed_in_all(self):
+ missing = {}
+ for change in ("new", "removed"):
+ d = getattr(constant_names, change + "_in")
+ for version, namelist in d.items():
+ key = self._change_key(change, version)
+ for name in namelist:
+ if name not in all_set:
+ if key not in missing:
+ missing[key] = []
+ missing[key].append(name)
+
+ if missing:
+ self.fail(
+ "The following names are missing in `all_names`: %s" % json.dumps(missing, indent=2)
+ )
+
+ def test_no_negative_constants(self):
+ for name in sugar_constants.__all__:
+ self.assertNotEqual(getattr(zmq, name), sugar_constants._UNDEFINED)
+
+ def test_undefined_constants(self):
+ all_aliases = []
+ for alias_group in sugar_constants.aliases:
+ all_aliases.extend(alias_group)
+
+ for name in all_set.difference(all_aliases):
+ raw = getattr(backend_constants, name)
+ if raw == sugar_constants._UNDEFINED:
+ self.assertRaises(AttributeError, getattr, zmq, name)
+ else:
+ self.assertEqual(getattr(zmq, name), raw)
+
+ def test_new(self):
+ zmq_version = zmq.zmq_version_info()
+ for version, new_names in constant_names.new_in.items():
+ should_have = zmq_version >= version
+ for name in new_names:
+ try:
+ value = getattr(zmq, name)
+ except AttributeError:
+ if should_have:
+ self.fail("AttributeError: zmq.%s" % name)
+ else:
+ if not should_have:
+ self.fail("Shouldn't have: zmq.%s=%s" % (name, value))
+
+ def test_removed(self):
+ zmq_version = zmq.zmq_version_info()
+ for version, new_names in constant_names.removed_in.items():
+ should_have = zmq_version < version
+ for name in new_names:
+ try:
+ value = getattr(zmq, name)
+ except AttributeError:
+ if should_have:
+ self.fail("AttributeError: zmq.%s" % name)
+ else:
+ if not should_have:
+ self.fail("Shouldn't have: zmq.%s=%s" % (name, value))
+
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/tests/test_context.py b/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/tests/test_context.py
new file mode 100644
index 00000000..e3280778
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/tests/test_context.py
@@ -0,0 +1,257 @@
+# Copyright (C) PyZMQ Developers
+# Distributed under the terms of the Modified BSD License.
+
+import gc
+import sys
+import time
+from threading import Thread, Event
+
+import zmq
+from zmq.tests import (
+ BaseZMQTestCase, have_gevent, GreenTest, skip_green, PYPY, SkipTest,
+)
+
+
+class TestContext(BaseZMQTestCase):
+
+ def test_init(self):
+ c1 = self.Context()
+ self.assert_(isinstance(c1, self.Context))
+ del c1
+ c2 = self.Context()
+ self.assert_(isinstance(c2, self.Context))
+ del c2
+ c3 = self.Context()
+ self.assert_(isinstance(c3, self.Context))
+ del c3
+
+ def test_dir(self):
+ ctx = self.Context()
+ self.assertTrue('socket' in dir(ctx))
+ if zmq.zmq_version_info() > (3,):
+ self.assertTrue('IO_THREADS' in dir(ctx))
+ ctx.term()
+
+ def test_term(self):
+ c = self.Context()
+ c.term()
+ self.assert_(c.closed)
+
+ def test_context_manager(self):
+ with self.Context() as c:
+ pass
+ self.assert_(c.closed)
+
+ def test_fail_init(self):
+ self.assertRaisesErrno(zmq.EINVAL, self.Context, -1)
+
+ def test_term_hang(self):
+ rep,req = self.create_bound_pair(zmq.ROUTER, zmq.DEALER)
+ req.setsockopt(zmq.LINGER, 0)
+ req.send(b'hello', copy=False)
+ req.close()
+ rep.close()
+ self.context.term()
+
+ def test_instance(self):
+ ctx = self.Context.instance()
+ c2 = self.Context.instance(io_threads=2)
+ self.assertTrue(c2 is ctx)
+ c2.term()
+ c3 = self.Context.instance()
+ c4 = self.Context.instance()
+ self.assertFalse(c3 is c2)
+ self.assertFalse(c3.closed)
+ self.assertTrue(c3 is c4)
+
+ def test_many_sockets(self):
+ """opening and closing many sockets shouldn't cause problems"""
+ ctx = self.Context()
+ for i in range(16):
+ sockets = [ ctx.socket(zmq.REP) for i in range(65) ]
+ [ s.close() for s in sockets ]
+ # give the reaper a chance
+ time.sleep(1e-2)
+ ctx.term()
+
+ def test_sockopts(self):
+ """setting socket options with ctx attributes"""
+ ctx = self.Context()
+ ctx.linger = 5
+ self.assertEqual(ctx.linger, 5)
+ s = ctx.socket(zmq.REQ)
+ self.assertEqual(s.linger, 5)
+ self.assertEqual(s.getsockopt(zmq.LINGER), 5)
+ s.close()
+ # check that subscribe doesn't get set on sockets that don't subscribe:
+ ctx.subscribe = b''
+ s = ctx.socket(zmq.REQ)
+ s.close()
+
+ ctx.term()
+
+
+ def test_destroy(self):
+ """Context.destroy should close sockets"""
+ ctx = self.Context()
+ sockets = [ ctx.socket(zmq.REP) for i in range(65) ]
+
+ # close half of the sockets
+ [ s.close() for s in sockets[::2] ]
+
+ ctx.destroy()
+ # reaper is not instantaneous
+ time.sleep(1e-2)
+ for s in sockets:
+ self.assertTrue(s.closed)
+
+ def test_destroy_linger(self):
+ """Context.destroy should set linger on closing sockets"""
+ req,rep = self.create_bound_pair(zmq.REQ, zmq.REP)
+ req.send(b'hi')
+ time.sleep(1e-2)
+ self.context.destroy(linger=0)
+ # reaper is not instantaneous
+ time.sleep(1e-2)
+ for s in (req,rep):
+ self.assertTrue(s.closed)
+
+ def test_term_noclose(self):
+ """Context.term won't close sockets"""
+ ctx = self.Context()
+ s = ctx.socket(zmq.REQ)
+ self.assertFalse(s.closed)
+ t = Thread(target=ctx.term)
+ t.start()
+ t.join(timeout=0.1)
+ self.assertTrue(t.is_alive(), "Context should be waiting")
+ s.close()
+ t.join(timeout=0.1)
+ self.assertFalse(t.is_alive(), "Context should have closed")
+
+ def test_gc(self):
+ """test close&term by garbage collection alone"""
+ if PYPY:
+ raise SkipTest("GC doesn't work ")
+
+ # test credit @dln (GH #137):
+ def gcf():
+ def inner():
+ ctx = self.Context()
+ s = ctx.socket(zmq.PUSH)
+ inner()
+ gc.collect()
+ t = Thread(target=gcf)
+ t.start()
+ t.join(timeout=1)
+ self.assertFalse(t.is_alive(), "Garbage collection should have cleaned up context")
+
+ def test_cyclic_destroy(self):
+ """ctx.destroy should succeed when cyclic ref prevents gc"""
+ # test credit @dln (GH #137):
+ class CyclicReference(object):
+ def __init__(self, parent=None):
+ self.parent = parent
+
+ def crash(self, sock):
+ self.sock = sock
+ self.child = CyclicReference(self)
+
+ def crash_zmq():
+ ctx = self.Context()
+ sock = ctx.socket(zmq.PULL)
+ c = CyclicReference()
+ c.crash(sock)
+ ctx.destroy()
+
+ crash_zmq()
+
+ def test_term_thread(self):
+ """ctx.term should not crash active threads (#139)"""
+ ctx = self.Context()
+ evt = Event()
+ evt.clear()
+
+ def block():
+ s = ctx.socket(zmq.REP)
+ s.bind_to_random_port('tcp://127.0.0.1')
+ evt.set()
+ try:
+ s.recv()
+ except zmq.ZMQError as e:
+ self.assertEqual(e.errno, zmq.ETERM)
+ return
+ finally:
+ s.close()
+ self.fail("recv should have been interrupted with ETERM")
+ t = Thread(target=block)
+ t.start()
+
+ evt.wait(1)
+ self.assertTrue(evt.is_set(), "sync event never fired")
+ time.sleep(0.01)
+ ctx.term()
+ t.join(timeout=1)
+ self.assertFalse(t.is_alive(), "term should have interrupted s.recv()")
+
+ def test_destroy_no_sockets(self):
+ ctx = self.Context()
+ s = ctx.socket(zmq.PUB)
+ s.bind_to_random_port('tcp://127.0.0.1')
+ s.close()
+ ctx.destroy()
+ assert s.closed
+ assert ctx.closed
+
+ def test_ctx_opts(self):
+ if zmq.zmq_version_info() < (3,):
+ raise SkipTest("context options require libzmq 3")
+ ctx = self.Context()
+ ctx.set(zmq.MAX_SOCKETS, 2)
+ self.assertEqual(ctx.get(zmq.MAX_SOCKETS), 2)
+ ctx.max_sockets = 100
+ self.assertEqual(ctx.max_sockets, 100)
+ self.assertEqual(ctx.get(zmq.MAX_SOCKETS), 100)
+
+ def test_shadow(self):
+ ctx = self.Context()
+ ctx2 = self.Context.shadow(ctx.underlying)
+ self.assertEqual(ctx.underlying, ctx2.underlying)
+ s = ctx.socket(zmq.PUB)
+ s.close()
+ del ctx2
+ self.assertFalse(ctx.closed)
+ s = ctx.socket(zmq.PUB)
+ ctx2 = self.Context.shadow(ctx.underlying)
+ s2 = ctx2.socket(zmq.PUB)
+ s.close()
+ s2.close()
+ ctx.term()
+ self.assertRaisesErrno(zmq.EFAULT, ctx2.socket, zmq.PUB)
+ del ctx2
+
+ def test_shadow_pyczmq(self):
+ try:
+ from pyczmq import zctx, zsocket, zstr
+ except Exception:
+ raise SkipTest("Requires pyczmq")
+
+ ctx = zctx.new()
+ a = zsocket.new(ctx, zmq.PUSH)
+ zsocket.bind(a, "inproc://a")
+ ctx2 = self.Context.shadow_pyczmq(ctx)
+ b = ctx2.socket(zmq.PULL)
+ b.connect("inproc://a")
+ zstr.send(a, b'hi')
+ rcvd = self.recv(b)
+ self.assertEqual(rcvd, b'hi')
+ b.close()
+
+
+if False: # disable green context tests
+ class TestContextGreen(GreenTest, TestContext):
+ """gevent subclass of context tests"""
+ # skip tests that use real threads:
+ test_gc = GreenTest.skip_green
+ test_term_thread = GreenTest.skip_green
+ test_destroy_linger = GreenTest.skip_green
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/tests/test_device.py b/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/tests/test_device.py
new file mode 100644
index 00000000..f8305074
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/tests/test_device.py
@@ -0,0 +1,146 @@
+# Copyright (C) PyZMQ Developers
+# Distributed under the terms of the Modified BSD License.
+
+import time
+
+import zmq
+from zmq import devices
+from zmq.tests import BaseZMQTestCase, SkipTest, have_gevent, GreenTest, PYPY
+from zmq.utils.strtypes import (bytes,unicode,basestring)
+
+if PYPY:
+ # cleanup of shared Context doesn't work on PyPy
+ devices.Device.context_factory = zmq.Context
+
+class TestDevice(BaseZMQTestCase):
+
+ def test_device_types(self):
+ for devtype in (zmq.STREAMER, zmq.FORWARDER, zmq.QUEUE):
+ dev = devices.Device(devtype, zmq.PAIR, zmq.PAIR)
+ self.assertEqual(dev.device_type, devtype)
+ del dev
+
+ def test_device_attributes(self):
+ dev = devices.Device(zmq.QUEUE, zmq.SUB, zmq.PUB)
+ self.assertEqual(dev.in_type, zmq.SUB)
+ self.assertEqual(dev.out_type, zmq.PUB)
+ self.assertEqual(dev.device_type, zmq.QUEUE)
+ self.assertEqual(dev.daemon, True)
+ del dev
+
+ def test_tsdevice_attributes(self):
+ dev = devices.Device(zmq.QUEUE, zmq.SUB, zmq.PUB)
+ self.assertEqual(dev.in_type, zmq.SUB)
+ self.assertEqual(dev.out_type, zmq.PUB)
+ self.assertEqual(dev.device_type, zmq.QUEUE)
+ self.assertEqual(dev.daemon, True)
+ del dev
+
+
+ def test_single_socket_forwarder_connect(self):
+ dev = devices.ThreadDevice(zmq.QUEUE, zmq.REP, -1)
+ req = self.context.socket(zmq.REQ)
+ port = req.bind_to_random_port('tcp://127.0.0.1')
+ dev.connect_in('tcp://127.0.0.1:%i'%port)
+ dev.start()
+ time.sleep(.25)
+ msg = b'hello'
+ req.send(msg)
+ self.assertEqual(msg, self.recv(req))
+ del dev
+ req.close()
+ dev = devices.ThreadDevice(zmq.QUEUE, zmq.REP, -1)
+ req = self.context.socket(zmq.REQ)
+ port = req.bind_to_random_port('tcp://127.0.0.1')
+ dev.connect_out('tcp://127.0.0.1:%i'%port)
+ dev.start()
+ time.sleep(.25)
+ msg = b'hello again'
+ req.send(msg)
+ self.assertEqual(msg, self.recv(req))
+ del dev
+ req.close()
+
+ def test_single_socket_forwarder_bind(self):
+ dev = devices.ThreadDevice(zmq.QUEUE, zmq.REP, -1)
+ # select random port:
+ binder = self.context.socket(zmq.REQ)
+ port = binder.bind_to_random_port('tcp://127.0.0.1')
+ binder.close()
+ time.sleep(0.1)
+ req = self.context.socket(zmq.REQ)
+ req.connect('tcp://127.0.0.1:%i'%port)
+ dev.bind_in('tcp://127.0.0.1:%i'%port)
+ dev.start()
+ time.sleep(.25)
+ msg = b'hello'
+ req.send(msg)
+ self.assertEqual(msg, self.recv(req))
+ del dev
+ req.close()
+ dev = devices.ThreadDevice(zmq.QUEUE, zmq.REP, -1)
+ # select random port:
+ binder = self.context.socket(zmq.REQ)
+ port = binder.bind_to_random_port('tcp://127.0.0.1')
+ binder.close()
+ time.sleep(0.1)
+ req = self.context.socket(zmq.REQ)
+ req.connect('tcp://127.0.0.1:%i'%port)
+ dev.bind_in('tcp://127.0.0.1:%i'%port)
+ dev.start()
+ time.sleep(.25)
+ msg = b'hello again'
+ req.send(msg)
+ self.assertEqual(msg, self.recv(req))
+ del dev
+ req.close()
+
+ def test_proxy(self):
+ if zmq.zmq_version_info() < (3,2):
+ raise SkipTest("Proxies only in libzmq >= 3")
+ dev = devices.ThreadProxy(zmq.PULL, zmq.PUSH, zmq.PUSH)
+ binder = self.context.socket(zmq.REQ)
+ iface = 'tcp://127.0.0.1'
+ port = binder.bind_to_random_port(iface)
+ port2 = binder.bind_to_random_port(iface)
+ port3 = binder.bind_to_random_port(iface)
+ binder.close()
+ time.sleep(0.1)
+ dev.bind_in("%s:%i" % (iface, port))
+ dev.bind_out("%s:%i" % (iface, port2))
+ dev.bind_mon("%s:%i" % (iface, port3))
+ dev.start()
+ time.sleep(0.25)
+ msg = b'hello'
+ push = self.context.socket(zmq.PUSH)
+ push.connect("%s:%i" % (iface, port))
+ pull = self.context.socket(zmq.PULL)
+ pull.connect("%s:%i" % (iface, port2))
+ mon = self.context.socket(zmq.PULL)
+ mon.connect("%s:%i" % (iface, port3))
+ push.send(msg)
+ self.sockets.extend([push, pull, mon])
+ self.assertEqual(msg, self.recv(pull))
+ self.assertEqual(msg, self.recv(mon))
+
+if have_gevent:
+ import gevent
+ import zmq.green
+
+ class TestDeviceGreen(GreenTest, BaseZMQTestCase):
+
+ def test_green_device(self):
+ rep = self.context.socket(zmq.REP)
+ req = self.context.socket(zmq.REQ)
+ self.sockets.extend([req, rep])
+ port = rep.bind_to_random_port('tcp://127.0.0.1')
+ g = gevent.spawn(zmq.green.device, zmq.QUEUE, rep, rep)
+ req.connect('tcp://127.0.0.1:%i' % port)
+ req.send(b'hi')
+ timeout = gevent.Timeout(3)
+ timeout.start()
+ receiver = gevent.spawn(req.recv)
+ self.assertEqual(receiver.get(2), b'hi')
+ timeout.cancel()
+ g.kill(block=True)
+
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/tests/test_error.py b/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/tests/test_error.py
new file mode 100644
index 00000000..a2eee14a
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/tests/test_error.py
@@ -0,0 +1,43 @@
+# -*- coding: utf8 -*-
+# Copyright (C) PyZMQ Developers
+# Distributed under the terms of the Modified BSD License.
+
+import sys
+import time
+
+import zmq
+from zmq import ZMQError, strerror, Again, ContextTerminated
+from zmq.tests import BaseZMQTestCase
+
+if sys.version_info[0] >= 3:
+ long = int
+
+class TestZMQError(BaseZMQTestCase):
+
+ def test_strerror(self):
+ """test that strerror gets the right type."""
+ for i in range(10):
+ e = strerror(i)
+ self.assertTrue(isinstance(e, str))
+
+ def test_zmqerror(self):
+ for errno in range(10):
+ e = ZMQError(errno)
+ self.assertEqual(e.errno, errno)
+ self.assertEqual(str(e), strerror(errno))
+
+ def test_again(self):
+ s = self.context.socket(zmq.REP)
+ self.assertRaises(Again, s.recv, zmq.NOBLOCK)
+ self.assertRaisesErrno(zmq.EAGAIN, s.recv, zmq.NOBLOCK)
+ s.close()
+
+ def atest_ctxterm(self):
+ s = self.context.socket(zmq.REP)
+ t = Thread(target=self.context.term)
+ t.start()
+ self.assertRaises(ContextTerminated, s.recv, zmq.NOBLOCK)
+ self.assertRaisesErrno(zmq.TERM, s.recv, zmq.NOBLOCK)
+ s.close()
+ t.join()
+
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/tests/test_etc.py b/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/tests/test_etc.py
new file mode 100644
index 00000000..ad224064
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/tests/test_etc.py
@@ -0,0 +1,15 @@
+# Copyright (c) PyZMQ Developers.
+# Distributed under the terms of the Modified BSD License.
+
+import sys
+
+import zmq
+
+from . import skip_if
+
+@skip_if(zmq.zmq_version_info() < (4,1), "libzmq < 4.1")
+def test_has():
+ assert not zmq.has('something weird')
+ has_ipc = zmq.has('ipc')
+ not_windows = not sys.platform.startswith('win')
+ assert has_ipc == not_windows
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/tests/test_imports.py b/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/tests/test_imports.py
new file mode 100644
index 00000000..c0ddfaac
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/tests/test_imports.py
@@ -0,0 +1,62 @@
+# Copyright (C) PyZMQ Developers
+# Distributed under the terms of the Modified BSD License.
+
+import sys
+from unittest import TestCase
+
+class TestImports(TestCase):
+ """Test Imports - the quickest test to ensure that we haven't
+ introduced version-incompatible syntax errors."""
+
+ def test_toplevel(self):
+ """test toplevel import"""
+ import zmq
+
+ def test_core(self):
+ """test core imports"""
+ from zmq import Context
+ from zmq import Socket
+ from zmq import Poller
+ from zmq import Frame
+ from zmq import constants
+ from zmq import device, proxy
+ from zmq import Stopwatch
+ from zmq import (
+ zmq_version,
+ zmq_version_info,
+ pyzmq_version,
+ pyzmq_version_info,
+ )
+
+ def test_devices(self):
+ """test device imports"""
+ import zmq.devices
+ from zmq.devices import basedevice
+ from zmq.devices import monitoredqueue
+ from zmq.devices import monitoredqueuedevice
+
+ def test_log(self):
+ """test log imports"""
+ import zmq.log
+ from zmq.log import handlers
+
+ def test_eventloop(self):
+ """test eventloop imports"""
+ import zmq.eventloop
+ from zmq.eventloop import ioloop
+ from zmq.eventloop import zmqstream
+ from zmq.eventloop.minitornado.platform import auto
+ from zmq.eventloop.minitornado import ioloop
+
+ def test_utils(self):
+ """test util imports"""
+ import zmq.utils
+ from zmq.utils import strtypes
+ from zmq.utils import jsonapi
+
+ def test_ssh(self):
+ """test ssh imports"""
+ from zmq.ssh import tunnel
+
+
+
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/tests/test_ioloop.py b/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/tests/test_ioloop.py
new file mode 100644
index 00000000..2a8b1153
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/tests/test_ioloop.py
@@ -0,0 +1,113 @@
+# Copyright (C) PyZMQ Developers
+# Distributed under the terms of the Modified BSD License.
+
+import time
+import os
+import threading
+
+import zmq
+from zmq.tests import BaseZMQTestCase
+from zmq.eventloop import ioloop
+from zmq.eventloop.minitornado.ioloop import _Timeout
+try:
+ from tornado.ioloop import PollIOLoop, IOLoop as BaseIOLoop
+except ImportError:
+ from zmq.eventloop.minitornado.ioloop import IOLoop as BaseIOLoop
+
+
+def printer():
+ os.system("say hello")
+ raise Exception
+ print (time.time())
+
+
+class Delay(threading.Thread):
+ def __init__(self, f, delay=1):
+ self.f=f
+ self.delay=delay
+ self.aborted=False
+ self.cond=threading.Condition()
+ super(Delay, self).__init__()
+
+ def run(self):
+ self.cond.acquire()
+ self.cond.wait(self.delay)
+ self.cond.release()
+ if not self.aborted:
+ self.f()
+
+ def abort(self):
+ self.aborted=True
+ self.cond.acquire()
+ self.cond.notify()
+ self.cond.release()
+
+
+class TestIOLoop(BaseZMQTestCase):
+
+ def test_simple(self):
+ """simple IOLoop creation test"""
+ loop = ioloop.IOLoop()
+ dc = ioloop.PeriodicCallback(loop.stop, 200, loop)
+ pc = ioloop.PeriodicCallback(lambda : None, 10, loop)
+ pc.start()
+ dc.start()
+ t = Delay(loop.stop,1)
+ t.start()
+ loop.start()
+ if t.isAlive():
+ t.abort()
+ else:
+ self.fail("IOLoop failed to exit")
+
+ def test_timeout_compare(self):
+ """test timeout comparisons"""
+ loop = ioloop.IOLoop()
+ t = _Timeout(1, 2, loop)
+ t2 = _Timeout(1, 3, loop)
+ self.assertEqual(t < t2, id(t) < id(t2))
+ t2 = _Timeout(2,1, loop)
+ self.assertTrue(t < t2)
+
+ def test_poller_events(self):
+ """Tornado poller implementation maps events correctly"""
+ req,rep = self.create_bound_pair(zmq.REQ, zmq.REP)
+ poller = ioloop.ZMQPoller()
+ poller.register(req, ioloop.IOLoop.READ)
+ poller.register(rep, ioloop.IOLoop.READ)
+ events = dict(poller.poll(0))
+ self.assertEqual(events.get(rep), None)
+ self.assertEqual(events.get(req), None)
+
+ poller.register(req, ioloop.IOLoop.WRITE)
+ poller.register(rep, ioloop.IOLoop.WRITE)
+ events = dict(poller.poll(1))
+ self.assertEqual(events.get(req), ioloop.IOLoop.WRITE)
+ self.assertEqual(events.get(rep), None)
+
+ poller.register(rep, ioloop.IOLoop.READ)
+ req.send(b'hi')
+ events = dict(poller.poll(1))
+ self.assertEqual(events.get(rep), ioloop.IOLoop.READ)
+ self.assertEqual(events.get(req), None)
+
+ def test_instance(self):
+ """Test IOLoop.instance returns the right object"""
+ loop = ioloop.IOLoop.instance()
+ self.assertEqual(loop.__class__, ioloop.IOLoop)
+ loop = BaseIOLoop.instance()
+ self.assertEqual(loop.__class__, ioloop.IOLoop)
+
+ def test_close_all(self):
+ """Test close(all_fds=True)"""
+ loop = ioloop.IOLoop.instance()
+ req,rep = self.create_bound_pair(zmq.REQ, zmq.REP)
+ loop.add_handler(req, lambda msg: msg, ioloop.IOLoop.READ)
+ loop.add_handler(rep, lambda msg: msg, ioloop.IOLoop.READ)
+ self.assertEqual(req.closed, False)
+ self.assertEqual(rep.closed, False)
+ loop.close(all_fds=True)
+ self.assertEqual(req.closed, True)
+ self.assertEqual(rep.closed, True)
+
+
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/tests/test_log.py b/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/tests/test_log.py
new file mode 100644
index 00000000..9206f095
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/tests/test_log.py
@@ -0,0 +1,116 @@
+# encoding: utf-8
+
+# Copyright (C) PyZMQ Developers
+# Distributed under the terms of the Modified BSD License.
+
+
+import logging
+import time
+from unittest import TestCase
+
+import zmq
+from zmq.log import handlers
+from zmq.utils.strtypes import b, u
+from zmq.tests import BaseZMQTestCase
+
+
+class TestPubLog(BaseZMQTestCase):
+
+ iface = 'inproc://zmqlog'
+ topic= 'zmq'
+
+ @property
+ def logger(self):
+ # print dir(self)
+ logger = logging.getLogger('zmqtest')
+ logger.setLevel(logging.DEBUG)
+ return logger
+
+ def connect_handler(self, topic=None):
+ topic = self.topic if topic is None else topic
+ logger = self.logger
+ pub,sub = self.create_bound_pair(zmq.PUB, zmq.SUB)
+ handler = handlers.PUBHandler(pub)
+ handler.setLevel(logging.DEBUG)
+ handler.root_topic = topic
+ logger.addHandler(handler)
+ sub.setsockopt(zmq.SUBSCRIBE, b(topic))
+ time.sleep(0.1)
+ return logger, handler, sub
+
+ def test_init_iface(self):
+ logger = self.logger
+ ctx = self.context
+ handler = handlers.PUBHandler(self.iface)
+ self.assertFalse(handler.ctx is ctx)
+ self.sockets.append(handler.socket)
+ # handler.ctx.term()
+ handler = handlers.PUBHandler(self.iface, self.context)
+ self.sockets.append(handler.socket)
+ self.assertTrue(handler.ctx is ctx)
+ handler.setLevel(logging.DEBUG)
+ handler.root_topic = self.topic
+ logger.addHandler(handler)
+ sub = ctx.socket(zmq.SUB)
+ self.sockets.append(sub)
+ sub.setsockopt(zmq.SUBSCRIBE, b(self.topic))
+ sub.connect(self.iface)
+ import time; time.sleep(0.25)
+ msg1 = 'message'
+ logger.info(msg1)
+
+ (topic, msg2) = sub.recv_multipart()
+ self.assertEqual(topic, b'zmq.INFO')
+ self.assertEqual(msg2, b(msg1)+b'\n')
+ logger.removeHandler(handler)
+
+ def test_init_socket(self):
+ pub,sub = self.create_bound_pair(zmq.PUB, zmq.SUB)
+ logger = self.logger
+ handler = handlers.PUBHandler(pub)
+ handler.setLevel(logging.DEBUG)
+ handler.root_topic = self.topic
+ logger.addHandler(handler)
+
+ self.assertTrue(handler.socket is pub)
+ self.assertTrue(handler.ctx is pub.context)
+ self.assertTrue(handler.ctx is self.context)
+ sub.setsockopt(zmq.SUBSCRIBE, b(self.topic))
+ import time; time.sleep(0.1)
+ msg1 = 'message'
+ logger.info(msg1)
+
+ (topic, msg2) = sub.recv_multipart()
+ self.assertEqual(topic, b'zmq.INFO')
+ self.assertEqual(msg2, b(msg1)+b'\n')
+ logger.removeHandler(handler)
+
+ def test_root_topic(self):
+ logger, handler, sub = self.connect_handler()
+ handler.socket.bind(self.iface)
+ sub2 = sub.context.socket(zmq.SUB)
+ self.sockets.append(sub2)
+ sub2.connect(self.iface)
+ sub2.setsockopt(zmq.SUBSCRIBE, b'')
+ handler.root_topic = b'twoonly'
+ msg1 = 'ignored'
+ logger.info(msg1)
+ self.assertRaisesErrno(zmq.EAGAIN, sub.recv, zmq.NOBLOCK)
+ topic,msg2 = sub2.recv_multipart()
+ self.assertEqual(topic, b'twoonly.INFO')
+ self.assertEqual(msg2, b(msg1)+b'\n')
+
+ logger.removeHandler(handler)
+
+ def test_unicode_message(self):
+ logger, handler, sub = self.connect_handler()
+ base_topic = b(self.topic + '.INFO')
+ for msg, expected in [
+ (u('hello'), [base_topic, b('hello\n')]),
+ (u('héllo'), [base_topic, b('héllo\n')]),
+ (u('tøpic::héllo'), [base_topic + b('.tøpic'), b('héllo\n')]),
+ ]:
+ logger.info(msg)
+ received = sub.recv_multipart()
+ self.assertEqual(received, expected)
+
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/tests/test_message.py b/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/tests/test_message.py
new file mode 100644
index 00000000..d8770bdf
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/tests/test_message.py
@@ -0,0 +1,362 @@
+# -*- coding: utf8 -*-
+# Copyright (C) PyZMQ Developers
+# Distributed under the terms of the Modified BSD License.
+
+
+import copy
+import sys
+try:
+ from sys import getrefcount as grc
+except ImportError:
+ grc = None
+
+import time
+from pprint import pprint
+from unittest import TestCase
+
+import zmq
+from zmq.tests import BaseZMQTestCase, SkipTest, skip_pypy, PYPY
+from zmq.utils.strtypes import unicode, bytes, b, u
+
+
+# some useful constants:
+
+x = b'x'
+
+try:
+ view = memoryview
+except NameError:
+ view = buffer
+
+if grc:
+ rc0 = grc(x)
+ v = view(x)
+ view_rc = grc(x) - rc0
+
+def await_gc(obj, rc):
+ """wait for refcount on an object to drop to an expected value
+
+ Necessary because of the zero-copy gc thread,
+ which can take some time to receive its DECREF message.
+ """
+ for i in range(50):
+ # rc + 2 because of the refs in this function
+ if grc(obj) <= rc + 2:
+ return
+ time.sleep(0.05)
+
+class TestFrame(BaseZMQTestCase):
+
+ @skip_pypy
+ def test_above_30(self):
+ """Message above 30 bytes are never copied by 0MQ."""
+ for i in range(5, 16): # 32, 64,..., 65536
+ s = (2**i)*x
+ self.assertEqual(grc(s), 2)
+ m = zmq.Frame(s)
+ self.assertEqual(grc(s), 4)
+ del m
+ await_gc(s, 2)
+ self.assertEqual(grc(s), 2)
+ del s
+
+ def test_str(self):
+ """Test the str representations of the Frames."""
+ for i in range(16):
+ s = (2**i)*x
+ m = zmq.Frame(s)
+ m_str = str(m)
+ m_str_b = b(m_str) # py3compat
+ self.assertEqual(s, m_str_b)
+
+ def test_bytes(self):
+ """Test the Frame.bytes property."""
+ for i in range(1,16):
+ s = (2**i)*x
+ m = zmq.Frame(s)
+ b = m.bytes
+ self.assertEqual(s, m.bytes)
+ if not PYPY:
+ # check that it copies
+ self.assert_(b is not s)
+ # check that it copies only once
+ self.assert_(b is m.bytes)
+
+ def test_unicode(self):
+ """Test the unicode representations of the Frames."""
+ s = u('asdf')
+ self.assertRaises(TypeError, zmq.Frame, s)
+ for i in range(16):
+ s = (2**i)*u('§')
+ m = zmq.Frame(s.encode('utf8'))
+ self.assertEqual(s, unicode(m.bytes,'utf8'))
+
+ def test_len(self):
+ """Test the len of the Frames."""
+ for i in range(16):
+ s = (2**i)*x
+ m = zmq.Frame(s)
+ self.assertEqual(len(s), len(m))
+
+ @skip_pypy
+ def test_lifecycle1(self):
+ """Run through a ref counting cycle with a copy."""
+ for i in range(5, 16): # 32, 64,..., 65536
+ s = (2**i)*x
+ rc = 2
+ self.assertEqual(grc(s), rc)
+ m = zmq.Frame(s)
+ rc += 2
+ self.assertEqual(grc(s), rc)
+ m2 = copy.copy(m)
+ rc += 1
+ self.assertEqual(grc(s), rc)
+ buf = m2.buffer
+
+ rc += view_rc
+ self.assertEqual(grc(s), rc)
+
+ self.assertEqual(s, b(str(m)))
+ self.assertEqual(s, bytes(m2))
+ self.assertEqual(s, m.bytes)
+ # self.assert_(s is str(m))
+ # self.assert_(s is str(m2))
+ del m2
+ rc -= 1
+ self.assertEqual(grc(s), rc)
+ rc -= view_rc
+ del buf
+ self.assertEqual(grc(s), rc)
+ del m
+ rc -= 2
+ await_gc(s, rc)
+ self.assertEqual(grc(s), rc)
+ self.assertEqual(rc, 2)
+ del s
+
+ @skip_pypy
+ def test_lifecycle2(self):
+ """Run through a different ref counting cycle with a copy."""
+ for i in range(5, 16): # 32, 64,..., 65536
+ s = (2**i)*x
+ rc = 2
+ self.assertEqual(grc(s), rc)
+ m = zmq.Frame(s)
+ rc += 2
+ self.assertEqual(grc(s), rc)
+ m2 = copy.copy(m)
+ rc += 1
+ self.assertEqual(grc(s), rc)
+ buf = m.buffer
+ rc += view_rc
+ self.assertEqual(grc(s), rc)
+ self.assertEqual(s, b(str(m)))
+ self.assertEqual(s, bytes(m2))
+ self.assertEqual(s, m2.bytes)
+ self.assertEqual(s, m.bytes)
+ # self.assert_(s is str(m))
+ # self.assert_(s is str(m2))
+ del buf
+ self.assertEqual(grc(s), rc)
+ del m
+ # m.buffer is kept until m is del'd
+ rc -= view_rc
+ rc -= 1
+ self.assertEqual(grc(s), rc)
+ del m2
+ rc -= 2
+ await_gc(s, rc)
+ self.assertEqual(grc(s), rc)
+ self.assertEqual(rc, 2)
+ del s
+
+ @skip_pypy
+ def test_tracker(self):
+ m = zmq.Frame(b'asdf', track=True)
+ self.assertFalse(m.tracker.done)
+ pm = zmq.MessageTracker(m)
+ self.assertFalse(pm.done)
+ del m
+ for i in range(10):
+ if pm.done:
+ break
+ time.sleep(0.1)
+ self.assertTrue(pm.done)
+
+ def test_no_tracker(self):
+ m = zmq.Frame(b'asdf', track=False)
+ self.assertEqual(m.tracker, None)
+ m2 = copy.copy(m)
+ self.assertEqual(m2.tracker, None)
+ self.assertRaises(ValueError, zmq.MessageTracker, m)
+
+ @skip_pypy
+ def test_multi_tracker(self):
+ m = zmq.Frame(b'asdf', track=True)
+ m2 = zmq.Frame(b'whoda', track=True)
+ mt = zmq.MessageTracker(m,m2)
+ self.assertFalse(m.tracker.done)
+ self.assertFalse(mt.done)
+ self.assertRaises(zmq.NotDone, mt.wait, 0.1)
+ del m
+ time.sleep(0.1)
+ self.assertRaises(zmq.NotDone, mt.wait, 0.1)
+ self.assertFalse(mt.done)
+ del m2
+ self.assertTrue(mt.wait() is None)
+ self.assertTrue(mt.done)
+
+
+ def test_buffer_in(self):
+ """test using a buffer as input"""
+ ins = b("§§¶•ªº˜µ¬˚…∆˙åß∂©œ∑´†≈ç√")
+ m = zmq.Frame(view(ins))
+
+ def test_bad_buffer_in(self):
+ """test using a bad object"""
+ self.assertRaises(TypeError, zmq.Frame, 5)
+ self.assertRaises(TypeError, zmq.Frame, object())
+
+ def test_buffer_out(self):
+ """receiving buffered output"""
+ ins = b("§§¶•ªº˜µ¬˚…∆˙åß∂©œ∑´†≈ç√")
+ m = zmq.Frame(ins)
+ outb = m.buffer
+ self.assertTrue(isinstance(outb, view))
+ self.assert_(outb is m.buffer)
+ self.assert_(m.buffer is m.buffer)
+
+ def test_multisend(self):
+ """ensure that a message remains intact after multiple sends"""
+ a,b = self.create_bound_pair(zmq.PAIR, zmq.PAIR)
+ s = b"message"
+ m = zmq.Frame(s)
+ self.assertEqual(s, m.bytes)
+
+ a.send(m, copy=False)
+ time.sleep(0.1)
+ self.assertEqual(s, m.bytes)
+ a.send(m, copy=False)
+ time.sleep(0.1)
+ self.assertEqual(s, m.bytes)
+ a.send(m, copy=True)
+ time.sleep(0.1)
+ self.assertEqual(s, m.bytes)
+ a.send(m, copy=True)
+ time.sleep(0.1)
+ self.assertEqual(s, m.bytes)
+ for i in range(4):
+ r = b.recv()
+ self.assertEqual(s,r)
+ self.assertEqual(s, m.bytes)
+
+ def test_buffer_numpy(self):
+ """test non-copying numpy array messages"""
+ try:
+ import numpy
+ except ImportError:
+ raise SkipTest("numpy required")
+ rand = numpy.random.randint
+ shapes = [ rand(2,16) for i in range(5) ]
+ for i in range(1,len(shapes)+1):
+ shape = shapes[:i]
+ A = numpy.random.random(shape)
+ m = zmq.Frame(A)
+ if view.__name__ == 'buffer':
+ self.assertEqual(A.data, m.buffer)
+ B = numpy.frombuffer(m.buffer,dtype=A.dtype).reshape(A.shape)
+ else:
+ self.assertEqual(memoryview(A), m.buffer)
+ B = numpy.array(m.buffer,dtype=A.dtype).reshape(A.shape)
+ self.assertEqual((A==B).all(), True)
+
+ def test_memoryview(self):
+ """test messages from memoryview"""
+ major,minor = sys.version_info[:2]
+ if not (major >= 3 or (major == 2 and minor >= 7)):
+ raise SkipTest("memoryviews only in python >= 2.7")
+
+ s = b'carrotjuice'
+ v = memoryview(s)
+ m = zmq.Frame(s)
+ buf = m.buffer
+ s2 = buf.tobytes()
+ self.assertEqual(s2,s)
+ self.assertEqual(m.bytes,s)
+
+ def test_noncopying_recv(self):
+ """check for clobbering message buffers"""
+ null = b'\0'*64
+ sa,sb = self.create_bound_pair(zmq.PAIR, zmq.PAIR)
+ for i in range(32):
+ # try a few times
+ sb.send(null, copy=False)
+ m = sa.recv(copy=False)
+ mb = m.bytes
+ # buf = view(m)
+ buf = m.buffer
+ del m
+ for i in range(5):
+ ff=b'\xff'*(40 + i*10)
+ sb.send(ff, copy=False)
+ m2 = sa.recv(copy=False)
+ if view.__name__ == 'buffer':
+ b = bytes(buf)
+ else:
+ b = buf.tobytes()
+ self.assertEqual(b, null)
+ self.assertEqual(mb, null)
+ self.assertEqual(m2.bytes, ff)
+
+ @skip_pypy
+ def test_buffer_numpy(self):
+ """test non-copying numpy array messages"""
+ try:
+ import numpy
+ except ImportError:
+ raise SkipTest("requires numpy")
+ if sys.version_info < (2,7):
+ raise SkipTest("requires new-style buffer interface (py >= 2.7)")
+ rand = numpy.random.randint
+ shapes = [ rand(2,5) for i in range(5) ]
+ a,b = self.create_bound_pair(zmq.PAIR, zmq.PAIR)
+ dtypes = [int, float, '>i4', 'B']
+ for i in range(1,len(shapes)+1):
+ shape = shapes[:i]
+ for dt in dtypes:
+ A = numpy.empty(shape, dtype=dt)
+ while numpy.isnan(A).any():
+ # don't let nan sneak in
+ A = numpy.ndarray(shape, dtype=dt)
+ a.send(A, copy=False)
+ msg = b.recv(copy=False)
+
+ B = numpy.frombuffer(msg, A.dtype).reshape(A.shape)
+ self.assertEqual(A.shape, B.shape)
+ self.assertTrue((A==B).all())
+ A = numpy.empty(shape, dtype=[('a', int), ('b', float), ('c', 'a32')])
+ A['a'] = 1024
+ A['b'] = 1e9
+ A['c'] = 'hello there'
+ a.send(A, copy=False)
+ msg = b.recv(copy=False)
+
+ B = numpy.frombuffer(msg, A.dtype).reshape(A.shape)
+ self.assertEqual(A.shape, B.shape)
+ self.assertTrue((A==B).all())
+
+ def test_frame_more(self):
+ """test Frame.more attribute"""
+ frame = zmq.Frame(b"hello")
+ self.assertFalse(frame.more)
+ sa,sb = self.create_bound_pair(zmq.PAIR, zmq.PAIR)
+ sa.send_multipart([b'hi', b'there'])
+ frame = self.recv(sb, copy=False)
+ self.assertTrue(frame.more)
+ if zmq.zmq_version_info()[0] >= 3 and not PYPY:
+ self.assertTrue(frame.get(zmq.MORE))
+ frame = self.recv(sb, copy=False)
+ self.assertFalse(frame.more)
+ if zmq.zmq_version_info()[0] >= 3 and not PYPY:
+ self.assertFalse(frame.get(zmq.MORE))
+
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/tests/test_monitor.py b/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/tests/test_monitor.py
new file mode 100644
index 00000000..4f035388
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/tests/test_monitor.py
@@ -0,0 +1,71 @@
+# -*- coding: utf-8 -*-
+# Copyright (C) PyZMQ Developers
+# Distributed under the terms of the Modified BSD License.
+
+
+import sys
+import time
+import struct
+
+from unittest import TestCase
+
+import zmq
+from zmq.tests import BaseZMQTestCase, skip_if, skip_pypy
+from zmq.utils.monitor import recv_monitor_message
+
+skip_lt_4 = skip_if(zmq.zmq_version_info() < (4,), "requires zmq >= 4")
+
+class TestSocketMonitor(BaseZMQTestCase):
+
+ @skip_lt_4
+ def test_monitor(self):
+ """Test monitoring interface for sockets."""
+ s_rep = self.context.socket(zmq.REP)
+ s_req = self.context.socket(zmq.REQ)
+ self.sockets.extend([s_rep, s_req])
+ s_req.bind("tcp://127.0.0.1:6666")
+ # try monitoring the REP socket
+
+ s_rep.monitor("inproc://monitor.rep", zmq.EVENT_ALL)
+ # create listening socket for monitor
+ s_event = self.context.socket(zmq.PAIR)
+ self.sockets.append(s_event)
+ s_event.connect("inproc://monitor.rep")
+ s_event.linger = 0
+ # test receive event for connect event
+ s_rep.connect("tcp://127.0.0.1:6666")
+ m = recv_monitor_message(s_event)
+ if m['event'] == zmq.EVENT_CONNECT_DELAYED:
+ self.assertEqual(m['endpoint'], b"tcp://127.0.0.1:6666")
+ # test receive event for connected event
+ m = recv_monitor_message(s_event)
+ self.assertEqual(m['event'], zmq.EVENT_CONNECTED)
+ self.assertEqual(m['endpoint'], b"tcp://127.0.0.1:6666")
+
+ # test monitor can be disabled.
+ s_rep.disable_monitor()
+ m = recv_monitor_message(s_event)
+ self.assertEqual(m['event'], zmq.EVENT_MONITOR_STOPPED)
+
+
+ @skip_lt_4
+ def test_monitor_connected(self):
+ """Test connected monitoring socket."""
+ s_rep = self.context.socket(zmq.REP)
+ s_req = self.context.socket(zmq.REQ)
+ self.sockets.extend([s_rep, s_req])
+ s_req.bind("tcp://127.0.0.1:6667")
+ # try monitoring the REP socket
+ # create listening socket for monitor
+ s_event = s_rep.get_monitor_socket()
+ s_event.linger = 0
+ self.sockets.append(s_event)
+ # test receive event for connect event
+ s_rep.connect("tcp://127.0.0.1:6667")
+ m = recv_monitor_message(s_event)
+ if m['event'] == zmq.EVENT_CONNECT_DELAYED:
+ self.assertEqual(m['endpoint'], b"tcp://127.0.0.1:6667")
+ # test receive event for connected event
+ m = recv_monitor_message(s_event)
+ self.assertEqual(m['event'], zmq.EVENT_CONNECTED)
+ self.assertEqual(m['endpoint'], b"tcp://127.0.0.1:6667")
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/tests/test_monqueue.py b/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/tests/test_monqueue.py
new file mode 100644
index 00000000..e855602e
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/tests/test_monqueue.py
@@ -0,0 +1,227 @@
+# Copyright (C) PyZMQ Developers
+# Distributed under the terms of the Modified BSD License.
+
+import time
+from unittest import TestCase
+
+import zmq
+from zmq import devices
+
+from zmq.tests import BaseZMQTestCase, SkipTest, PYPY
+from zmq.utils.strtypes import unicode
+
+
+if PYPY or zmq.zmq_version_info() >= (4,1):
+ # cleanup of shared Context doesn't work on PyPy
+ # there also seems to be a bug in cleanup in libzmq-4.1 (zeromq/libzmq#1052)
+ devices.Device.context_factory = zmq.Context
+
+
+class TestMonitoredQueue(BaseZMQTestCase):
+
+ sockets = []
+
+ def build_device(self, mon_sub=b"", in_prefix=b'in', out_prefix=b'out'):
+ self.device = devices.ThreadMonitoredQueue(zmq.PAIR, zmq.PAIR, zmq.PUB,
+ in_prefix, out_prefix)
+ alice = self.context.socket(zmq.PAIR)
+ bob = self.context.socket(zmq.PAIR)
+ mon = self.context.socket(zmq.SUB)
+
+ aport = alice.bind_to_random_port('tcp://127.0.0.1')
+ bport = bob.bind_to_random_port('tcp://127.0.0.1')
+ mport = mon.bind_to_random_port('tcp://127.0.0.1')
+ mon.setsockopt(zmq.SUBSCRIBE, mon_sub)
+
+ self.device.connect_in("tcp://127.0.0.1:%i"%aport)
+ self.device.connect_out("tcp://127.0.0.1:%i"%bport)
+ self.device.connect_mon("tcp://127.0.0.1:%i"%mport)
+ self.device.start()
+ time.sleep(.2)
+ try:
+ # this is currenlty necessary to ensure no dropped monitor messages
+ # see LIBZMQ-248 for more info
+ mon.recv_multipart(zmq.NOBLOCK)
+ except zmq.ZMQError:
+ pass
+ self.sockets.extend([alice, bob, mon])
+ return alice, bob, mon
+
+
+ def teardown_device(self):
+ for socket in self.sockets:
+ socket.close()
+ del socket
+ del self.device
+
+ def test_reply(self):
+ alice, bob, mon = self.build_device()
+ alices = b"hello bob".split()
+ alice.send_multipart(alices)
+ bobs = self.recv_multipart(bob)
+ self.assertEqual(alices, bobs)
+ bobs = b"hello alice".split()
+ bob.send_multipart(bobs)
+ alices = self.recv_multipart(alice)
+ self.assertEqual(alices, bobs)
+ self.teardown_device()
+
+ def test_queue(self):
+ alice, bob, mon = self.build_device()
+ alices = b"hello bob".split()
+ alice.send_multipart(alices)
+ alices2 = b"hello again".split()
+ alice.send_multipart(alices2)
+ alices3 = b"hello again and again".split()
+ alice.send_multipart(alices3)
+ bobs = self.recv_multipart(bob)
+ self.assertEqual(alices, bobs)
+ bobs = self.recv_multipart(bob)
+ self.assertEqual(alices2, bobs)
+ bobs = self.recv_multipart(bob)
+ self.assertEqual(alices3, bobs)
+ bobs = b"hello alice".split()
+ bob.send_multipart(bobs)
+ alices = self.recv_multipart(alice)
+ self.assertEqual(alices, bobs)
+ self.teardown_device()
+
+ def test_monitor(self):
+ alice, bob, mon = self.build_device()
+ alices = b"hello bob".split()
+ alice.send_multipart(alices)
+ alices2 = b"hello again".split()
+ alice.send_multipart(alices2)
+ alices3 = b"hello again and again".split()
+ alice.send_multipart(alices3)
+ bobs = self.recv_multipart(bob)
+ self.assertEqual(alices, bobs)
+ mons = self.recv_multipart(mon)
+ self.assertEqual([b'in']+bobs, mons)
+ bobs = self.recv_multipart(bob)
+ self.assertEqual(alices2, bobs)
+ bobs = self.recv_multipart(bob)
+ self.assertEqual(alices3, bobs)
+ mons = self.recv_multipart(mon)
+ self.assertEqual([b'in']+alices2, mons)
+ bobs = b"hello alice".split()
+ bob.send_multipart(bobs)
+ alices = self.recv_multipart(alice)
+ self.assertEqual(alices, bobs)
+ mons = self.recv_multipart(mon)
+ self.assertEqual([b'in']+alices3, mons)
+ mons = self.recv_multipart(mon)
+ self.assertEqual([b'out']+bobs, mons)
+ self.teardown_device()
+
+ def test_prefix(self):
+ alice, bob, mon = self.build_device(b"", b'foo', b'bar')
+ alices = b"hello bob".split()
+ alice.send_multipart(alices)
+ alices2 = b"hello again".split()
+ alice.send_multipart(alices2)
+ alices3 = b"hello again and again".split()
+ alice.send_multipart(alices3)
+ bobs = self.recv_multipart(bob)
+ self.assertEqual(alices, bobs)
+ mons = self.recv_multipart(mon)
+ self.assertEqual([b'foo']+bobs, mons)
+ bobs = self.recv_multipart(bob)
+ self.assertEqual(alices2, bobs)
+ bobs = self.recv_multipart(bob)
+ self.assertEqual(alices3, bobs)
+ mons = self.recv_multipart(mon)
+ self.assertEqual([b'foo']+alices2, mons)
+ bobs = b"hello alice".split()
+ bob.send_multipart(bobs)
+ alices = self.recv_multipart(alice)
+ self.assertEqual(alices, bobs)
+ mons = self.recv_multipart(mon)
+ self.assertEqual([b'foo']+alices3, mons)
+ mons = self.recv_multipart(mon)
+ self.assertEqual([b'bar']+bobs, mons)
+ self.teardown_device()
+
+ def test_monitor_subscribe(self):
+ alice, bob, mon = self.build_device(b"out")
+ alices = b"hello bob".split()
+ alice.send_multipart(alices)
+ alices2 = b"hello again".split()
+ alice.send_multipart(alices2)
+ alices3 = b"hello again and again".split()
+ alice.send_multipart(alices3)
+ bobs = self.recv_multipart(bob)
+ self.assertEqual(alices, bobs)
+ bobs = self.recv_multipart(bob)
+ self.assertEqual(alices2, bobs)
+ bobs = self.recv_multipart(bob)
+ self.assertEqual(alices3, bobs)
+ bobs = b"hello alice".split()
+ bob.send_multipart(bobs)
+ alices = self.recv_multipart(alice)
+ self.assertEqual(alices, bobs)
+ mons = self.recv_multipart(mon)
+ self.assertEqual([b'out']+bobs, mons)
+ self.teardown_device()
+
+ def test_router_router(self):
+ """test router-router MQ devices"""
+ dev = devices.ThreadMonitoredQueue(zmq.ROUTER, zmq.ROUTER, zmq.PUB, b'in', b'out')
+ self.device = dev
+ dev.setsockopt_in(zmq.LINGER, 0)
+ dev.setsockopt_out(zmq.LINGER, 0)
+ dev.setsockopt_mon(zmq.LINGER, 0)
+
+ binder = self.context.socket(zmq.DEALER)
+ porta = binder.bind_to_random_port('tcp://127.0.0.1')
+ portb = binder.bind_to_random_port('tcp://127.0.0.1')
+ binder.close()
+ time.sleep(0.1)
+ a = self.context.socket(zmq.DEALER)
+ a.identity = b'a'
+ b = self.context.socket(zmq.DEALER)
+ b.identity = b'b'
+ self.sockets.extend([a, b])
+
+ a.connect('tcp://127.0.0.1:%i'%porta)
+ dev.bind_in('tcp://127.0.0.1:%i'%porta)
+ b.connect('tcp://127.0.0.1:%i'%portb)
+ dev.bind_out('tcp://127.0.0.1:%i'%portb)
+ dev.start()
+ time.sleep(0.2)
+ if zmq.zmq_version_info() >= (3,1,0):
+ # flush erroneous poll state, due to LIBZMQ-280
+ ping_msg = [ b'ping', b'pong' ]
+ for s in (a,b):
+ s.send_multipart(ping_msg)
+ try:
+ s.recv(zmq.NOBLOCK)
+ except zmq.ZMQError:
+ pass
+ msg = [ b'hello', b'there' ]
+ a.send_multipart([b'b']+msg)
+ bmsg = self.recv_multipart(b)
+ self.assertEqual(bmsg, [b'a']+msg)
+ b.send_multipart(bmsg)
+ amsg = self.recv_multipart(a)
+ self.assertEqual(amsg, [b'b']+msg)
+ self.teardown_device()
+
+ def test_default_mq_args(self):
+ self.device = dev = devices.ThreadMonitoredQueue(zmq.ROUTER, zmq.DEALER, zmq.PUB)
+ dev.setsockopt_in(zmq.LINGER, 0)
+ dev.setsockopt_out(zmq.LINGER, 0)
+ dev.setsockopt_mon(zmq.LINGER, 0)
+ # this will raise if default args are wrong
+ dev.start()
+ self.teardown_device()
+
+ def test_mq_check_prefix(self):
+ ins = self.context.socket(zmq.ROUTER)
+ outs = self.context.socket(zmq.DEALER)
+ mons = self.context.socket(zmq.PUB)
+ self.sockets.extend([ins, outs, mons])
+
+ ins = unicode('in')
+ outs = unicode('out')
+ self.assertRaises(TypeError, devices.monitoredqueue, ins, outs, mons)
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/tests/test_multipart.py b/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/tests/test_multipart.py
new file mode 100644
index 00000000..24d41be0
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/tests/test_multipart.py
@@ -0,0 +1,35 @@
+# Copyright (C) PyZMQ Developers
+# Distributed under the terms of the Modified BSD License.
+
+
+import zmq
+
+
+from zmq.tests import BaseZMQTestCase, SkipTest, have_gevent, GreenTest
+
+
+class TestMultipart(BaseZMQTestCase):
+
+ def test_router_dealer(self):
+ router, dealer = self.create_bound_pair(zmq.ROUTER, zmq.DEALER)
+
+ msg1 = b'message1'
+ dealer.send(msg1)
+ ident = self.recv(router)
+ more = router.rcvmore
+ self.assertEqual(more, True)
+ msg2 = self.recv(router)
+ self.assertEqual(msg1, msg2)
+ more = router.rcvmore
+ self.assertEqual(more, False)
+
+ def test_basic_multipart(self):
+ a,b = self.create_bound_pair(zmq.PAIR, zmq.PAIR)
+ msg = [ b'hi', b'there', b'b']
+ a.send_multipart(msg)
+ recvd = b.recv_multipart()
+ self.assertEqual(msg, recvd)
+
+if have_gevent:
+ class TestMultipartGreen(GreenTest, TestMultipart):
+ pass
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/tests/test_pair.py b/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/tests/test_pair.py
new file mode 100644
index 00000000..e88c1e8b
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/tests/test_pair.py
@@ -0,0 +1,53 @@
+# Copyright (C) PyZMQ Developers
+# Distributed under the terms of the Modified BSD License.
+
+
+import zmq
+
+
+from zmq.tests import BaseZMQTestCase, have_gevent, GreenTest
+
+
+x = b' '
+class TestPair(BaseZMQTestCase):
+
+ def test_basic(self):
+ s1, s2 = self.create_bound_pair(zmq.PAIR, zmq.PAIR)
+
+ msg1 = b'message1'
+ msg2 = self.ping_pong(s1, s2, msg1)
+ self.assertEqual(msg1, msg2)
+
+ def test_multiple(self):
+ s1, s2 = self.create_bound_pair(zmq.PAIR, zmq.PAIR)
+
+ for i in range(10):
+ msg = i*x
+ s1.send(msg)
+
+ for i in range(10):
+ msg = i*x
+ s2.send(msg)
+
+ for i in range(10):
+ msg = s1.recv()
+ self.assertEqual(msg, i*x)
+
+ for i in range(10):
+ msg = s2.recv()
+ self.assertEqual(msg, i*x)
+
+ def test_json(self):
+ s1, s2 = self.create_bound_pair(zmq.PAIR, zmq.PAIR)
+ o = dict(a=10,b=list(range(10)))
+ o2 = self.ping_pong_json(s1, s2, o)
+
+ def test_pyobj(self):
+ s1, s2 = self.create_bound_pair(zmq.PAIR, zmq.PAIR)
+ o = dict(a=10,b=range(10))
+ o2 = self.ping_pong_pyobj(s1, s2, o)
+
+if have_gevent:
+ class TestReqRepGreen(GreenTest, TestPair):
+ pass
+
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/tests/test_poll.py b/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/tests/test_poll.py
new file mode 100644
index 00000000..57346c89
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/tests/test_poll.py
@@ -0,0 +1,229 @@
+# Copyright (C) PyZMQ Developers
+# Distributed under the terms of the Modified BSD License.
+
+
+import time
+from unittest import TestCase
+
+import zmq
+
+from zmq.tests import PollZMQTestCase, have_gevent, GreenTest
+
+def wait():
+ time.sleep(.25)
+
+
+class TestPoll(PollZMQTestCase):
+
+ Poller = zmq.Poller
+
+ # This test is failing due to this issue:
+ # http://github.com/sustrik/zeromq2/issues#issue/26
+ def test_pair(self):
+ s1, s2 = self.create_bound_pair(zmq.PAIR, zmq.PAIR)
+
+ # Sleep to allow sockets to connect.
+ wait()
+
+ poller = self.Poller()
+ poller.register(s1, zmq.POLLIN|zmq.POLLOUT)
+ poller.register(s2, zmq.POLLIN|zmq.POLLOUT)
+ # Poll result should contain both sockets
+ socks = dict(poller.poll())
+ # Now make sure that both are send ready.
+ self.assertEqual(socks[s1], zmq.POLLOUT)
+ self.assertEqual(socks[s2], zmq.POLLOUT)
+ # Now do a send on both, wait and test for zmq.POLLOUT|zmq.POLLIN
+ s1.send(b'msg1')
+ s2.send(b'msg2')
+ wait()
+ socks = dict(poller.poll())
+ self.assertEqual(socks[s1], zmq.POLLOUT|zmq.POLLIN)
+ self.assertEqual(socks[s2], zmq.POLLOUT|zmq.POLLIN)
+ # Make sure that both are in POLLOUT after recv.
+ s1.recv()
+ s2.recv()
+ socks = dict(poller.poll())
+ self.assertEqual(socks[s1], zmq.POLLOUT)
+ self.assertEqual(socks[s2], zmq.POLLOUT)
+
+ poller.unregister(s1)
+ poller.unregister(s2)
+
+ # Wait for everything to finish.
+ wait()
+
+ def test_reqrep(self):
+ s1, s2 = self.create_bound_pair(zmq.REP, zmq.REQ)
+
+ # Sleep to allow sockets to connect.
+ wait()
+
+ poller = self.Poller()
+ poller.register(s1, zmq.POLLIN|zmq.POLLOUT)
+ poller.register(s2, zmq.POLLIN|zmq.POLLOUT)
+
+ # Make sure that s1 is in state 0 and s2 is in POLLOUT
+ socks = dict(poller.poll())
+ self.assertEqual(s1 in socks, 0)
+ self.assertEqual(socks[s2], zmq.POLLOUT)
+
+ # Make sure that s2 goes immediately into state 0 after send.
+ s2.send(b'msg1')
+ socks = dict(poller.poll())
+ self.assertEqual(s2 in socks, 0)
+
+ # Make sure that s1 goes into POLLIN state after a time.sleep().
+ time.sleep(0.5)
+ socks = dict(poller.poll())
+ self.assertEqual(socks[s1], zmq.POLLIN)
+
+ # Make sure that s1 goes into POLLOUT after recv.
+ s1.recv()
+ socks = dict(poller.poll())
+ self.assertEqual(socks[s1], zmq.POLLOUT)
+
+ # Make sure s1 goes into state 0 after send.
+ s1.send(b'msg2')
+ socks = dict(poller.poll())
+ self.assertEqual(s1 in socks, 0)
+
+ # Wait and then see that s2 is in POLLIN.
+ time.sleep(0.5)
+ socks = dict(poller.poll())
+ self.assertEqual(socks[s2], zmq.POLLIN)
+
+ # Make sure that s2 is in POLLOUT after recv.
+ s2.recv()
+ socks = dict(poller.poll())
+ self.assertEqual(socks[s2], zmq.POLLOUT)
+
+ poller.unregister(s1)
+ poller.unregister(s2)
+
+ # Wait for everything to finish.
+ wait()
+
+ def test_no_events(self):
+ s1, s2 = self.create_bound_pair(zmq.PAIR, zmq.PAIR)
+ poller = self.Poller()
+ poller.register(s1, zmq.POLLIN|zmq.POLLOUT)
+ poller.register(s2, 0)
+ self.assertTrue(s1 in poller)
+ self.assertFalse(s2 in poller)
+ poller.register(s1, 0)
+ self.assertFalse(s1 in poller)
+
+ def test_pubsub(self):
+ s1, s2 = self.create_bound_pair(zmq.PUB, zmq.SUB)
+ s2.setsockopt(zmq.SUBSCRIBE, b'')
+
+ # Sleep to allow sockets to connect.
+ wait()
+
+ poller = self.Poller()
+ poller.register(s1, zmq.POLLIN|zmq.POLLOUT)
+ poller.register(s2, zmq.POLLIN)
+
+ # Now make sure that both are send ready.
+ socks = dict(poller.poll())
+ self.assertEqual(socks[s1], zmq.POLLOUT)
+ self.assertEqual(s2 in socks, 0)
+ # Make sure that s1 stays in POLLOUT after a send.
+ s1.send(b'msg1')
+ socks = dict(poller.poll())
+ self.assertEqual(socks[s1], zmq.POLLOUT)
+
+ # Make sure that s2 is POLLIN after waiting.
+ wait()
+ socks = dict(poller.poll())
+ self.assertEqual(socks[s2], zmq.POLLIN)
+
+ # Make sure that s2 goes into 0 after recv.
+ s2.recv()
+ socks = dict(poller.poll())
+ self.assertEqual(s2 in socks, 0)
+
+ poller.unregister(s1)
+ poller.unregister(s2)
+
+ # Wait for everything to finish.
+ wait()
+ def test_timeout(self):
+ """make sure Poller.poll timeout has the right units (milliseconds)."""
+ s1, s2 = self.create_bound_pair(zmq.PAIR, zmq.PAIR)
+ poller = self.Poller()
+ poller.register(s1, zmq.POLLIN)
+ tic = time.time()
+ evt = poller.poll(.005)
+ toc = time.time()
+ self.assertTrue(toc-tic < 0.1)
+ tic = time.time()
+ evt = poller.poll(5)
+ toc = time.time()
+ self.assertTrue(toc-tic < 0.1)
+ self.assertTrue(toc-tic > .001)
+ tic = time.time()
+ evt = poller.poll(500)
+ toc = time.time()
+ self.assertTrue(toc-tic < 1)
+ self.assertTrue(toc-tic > 0.1)
+
+class TestSelect(PollZMQTestCase):
+
+ def test_pair(self):
+ s1, s2 = self.create_bound_pair(zmq.PAIR, zmq.PAIR)
+
+ # Sleep to allow sockets to connect.
+ wait()
+
+ rlist, wlist, xlist = zmq.select([s1, s2], [s1, s2], [s1, s2])
+ self.assert_(s1 in wlist)
+ self.assert_(s2 in wlist)
+ self.assert_(s1 not in rlist)
+ self.assert_(s2 not in rlist)
+
+ def test_timeout(self):
+ """make sure select timeout has the right units (seconds)."""
+ s1, s2 = self.create_bound_pair(zmq.PAIR, zmq.PAIR)
+ tic = time.time()
+ r,w,x = zmq.select([s1,s2],[],[],.005)
+ toc = time.time()
+ self.assertTrue(toc-tic < 1)
+ self.assertTrue(toc-tic > 0.001)
+ tic = time.time()
+ r,w,x = zmq.select([s1,s2],[],[],.25)
+ toc = time.time()
+ self.assertTrue(toc-tic < 1)
+ self.assertTrue(toc-tic > 0.1)
+
+
+if have_gevent:
+ import gevent
+ from zmq import green as gzmq
+
+ class TestPollGreen(GreenTest, TestPoll):
+ Poller = gzmq.Poller
+
+ def test_wakeup(self):
+ s1, s2 = self.create_bound_pair(zmq.PAIR, zmq.PAIR)
+ poller = self.Poller()
+ poller.register(s2, zmq.POLLIN)
+
+ tic = time.time()
+ r = gevent.spawn(lambda: poller.poll(10000))
+ s = gevent.spawn(lambda: s1.send(b'msg1'))
+ r.join()
+ toc = time.time()
+ self.assertTrue(toc-tic < 1)
+
+ def test_socket_poll(self):
+ s1, s2 = self.create_bound_pair(zmq.PAIR, zmq.PAIR)
+
+ tic = time.time()
+ r = gevent.spawn(lambda: s2.poll(10000))
+ s = gevent.spawn(lambda: s1.send(b'msg1'))
+ r.join()
+ toc = time.time()
+ self.assertTrue(toc-tic < 1)
+
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/tests/test_pubsub.py b/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/tests/test_pubsub.py
new file mode 100644
index 00000000..a3ee22aa
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/tests/test_pubsub.py
@@ -0,0 +1,41 @@
+# Copyright (C) PyZMQ Developers
+# Distributed under the terms of the Modified BSD License.
+
+
+import time
+from unittest import TestCase
+
+import zmq
+
+from zmq.tests import BaseZMQTestCase, have_gevent, GreenTest
+
+
+class TestPubSub(BaseZMQTestCase):
+
+ pass
+
+ # We are disabling this test while an issue is being resolved.
+ def test_basic(self):
+ s1, s2 = self.create_bound_pair(zmq.PUB, zmq.SUB)
+ s2.setsockopt(zmq.SUBSCRIBE,b'')
+ time.sleep(0.1)
+ msg1 = b'message'
+ s1.send(msg1)
+ msg2 = s2.recv() # This is blocking!
+ self.assertEqual(msg1, msg2)
+
+ def test_topic(self):
+ s1, s2 = self.create_bound_pair(zmq.PUB, zmq.SUB)
+ s2.setsockopt(zmq.SUBSCRIBE, b'x')
+ time.sleep(0.1)
+ msg1 = b'message'
+ s1.send(msg1)
+ self.assertRaisesErrno(zmq.EAGAIN, s2.recv, zmq.NOBLOCK)
+ msg1 = b'xmessage'
+ s1.send(msg1)
+ msg2 = s2.recv()
+ self.assertEqual(msg1, msg2)
+
+if have_gevent:
+ class TestPubSubGreen(GreenTest, TestPubSub):
+ pass
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/tests/test_reqrep.py b/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/tests/test_reqrep.py
new file mode 100644
index 00000000..de17f2b3
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/tests/test_reqrep.py
@@ -0,0 +1,62 @@
+# Copyright (C) PyZMQ Developers
+# Distributed under the terms of the Modified BSD License.
+
+
+from unittest import TestCase
+
+import zmq
+from zmq.tests import BaseZMQTestCase, have_gevent, GreenTest
+
+
+class TestReqRep(BaseZMQTestCase):
+
+ def test_basic(self):
+ s1, s2 = self.create_bound_pair(zmq.REQ, zmq.REP)
+
+ msg1 = b'message 1'
+ msg2 = self.ping_pong(s1, s2, msg1)
+ self.assertEqual(msg1, msg2)
+
+ def test_multiple(self):
+ s1, s2 = self.create_bound_pair(zmq.REQ, zmq.REP)
+
+ for i in range(10):
+ msg1 = i*b' '
+ msg2 = self.ping_pong(s1, s2, msg1)
+ self.assertEqual(msg1, msg2)
+
+ def test_bad_send_recv(self):
+ s1, s2 = self.create_bound_pair(zmq.REQ, zmq.REP)
+
+ if zmq.zmq_version() != '2.1.8':
+ # this doesn't work on 2.1.8
+ for copy in (True,False):
+ self.assertRaisesErrno(zmq.EFSM, s1.recv, copy=copy)
+ self.assertRaisesErrno(zmq.EFSM, s2.send, b'asdf', copy=copy)
+
+ # I have to have this or we die on an Abort trap.
+ msg1 = b'asdf'
+ msg2 = self.ping_pong(s1, s2, msg1)
+ self.assertEqual(msg1, msg2)
+
+ def test_json(self):
+ s1, s2 = self.create_bound_pair(zmq.REQ, zmq.REP)
+ o = dict(a=10,b=list(range(10)))
+ o2 = self.ping_pong_json(s1, s2, o)
+
+ def test_pyobj(self):
+ s1, s2 = self.create_bound_pair(zmq.REQ, zmq.REP)
+ o = dict(a=10,b=range(10))
+ o2 = self.ping_pong_pyobj(s1, s2, o)
+
+ def test_large_msg(self):
+ s1, s2 = self.create_bound_pair(zmq.REQ, zmq.REP)
+ msg1 = 10000*b'X'
+
+ for i in range(10):
+ msg2 = self.ping_pong(s1, s2, msg1)
+ self.assertEqual(msg1, msg2)
+
+if have_gevent:
+ class TestReqRepGreen(GreenTest, TestReqRep):
+ pass
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/tests/test_security.py b/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/tests/test_security.py
new file mode 100644
index 00000000..687b7e0f
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/tests/test_security.py
@@ -0,0 +1,212 @@
+"""Test libzmq security (libzmq >= 3.3.0)"""
+# -*- coding: utf8 -*-
+
+# Copyright (C) PyZMQ Developers
+# Distributed under the terms of the Modified BSD License.
+
+import os
+from threading import Thread
+
+import zmq
+from zmq.tests import (
+ BaseZMQTestCase, SkipTest, PYPY
+)
+from zmq.utils import z85
+
+
+USER = b"admin"
+PASS = b"password"
+
+class TestSecurity(BaseZMQTestCase):
+
+ def setUp(self):
+ if zmq.zmq_version_info() < (4,0):
+ raise SkipTest("security is new in libzmq 4.0")
+ try:
+ zmq.curve_keypair()
+ except zmq.ZMQError:
+ raise SkipTest("security requires libzmq to be linked against libsodium")
+ super(TestSecurity, self).setUp()
+
+
+ def zap_handler(self):
+ socket = self.context.socket(zmq.REP)
+ socket.bind("inproc://zeromq.zap.01")
+ try:
+ msg = self.recv_multipart(socket)
+
+ version, sequence, domain, address, identity, mechanism = msg[:6]
+ if mechanism == b'PLAIN':
+ username, password = msg[6:]
+ elif mechanism == b'CURVE':
+ key = msg[6]
+
+ self.assertEqual(version, b"1.0")
+ self.assertEqual(identity, b"IDENT")
+ reply = [version, sequence]
+ if mechanism == b'CURVE' or \
+ (mechanism == b'PLAIN' and username == USER and password == PASS) or \
+ (mechanism == b'NULL'):
+ reply.extend([
+ b"200",
+ b"OK",
+ b"anonymous",
+ b"\5Hello\0\0\0\5World",
+ ])
+ else:
+ reply.extend([
+ b"400",
+ b"Invalid username or password",
+ b"",
+ b"",
+ ])
+ socket.send_multipart(reply)
+ finally:
+ socket.close()
+
+ def start_zap(self):
+ self.zap_thread = Thread(target=self.zap_handler)
+ self.zap_thread.start()
+
+ def stop_zap(self):
+ self.zap_thread.join()
+
+ def bounce(self, server, client, test_metadata=True):
+ msg = [os.urandom(64), os.urandom(64)]
+ client.send_multipart(msg)
+ frames = self.recv_multipart(server, copy=False)
+ recvd = list(map(lambda x: x.bytes, frames))
+
+ try:
+ if test_metadata and not PYPY:
+ for frame in frames:
+ self.assertEqual(frame.get('User-Id'), 'anonymous')
+ self.assertEqual(frame.get('Hello'), 'World')
+ self.assertEqual(frame['Socket-Type'], 'DEALER')
+ except zmq.ZMQVersionError:
+ pass
+
+ self.assertEqual(recvd, msg)
+ server.send_multipart(recvd)
+ msg2 = self.recv_multipart(client)
+ self.assertEqual(msg2, msg)
+
+ def test_null(self):
+ """test NULL (default) security"""
+ server = self.socket(zmq.DEALER)
+ client = self.socket(zmq.DEALER)
+ self.assertEqual(client.MECHANISM, zmq.NULL)
+ self.assertEqual(server.mechanism, zmq.NULL)
+ self.assertEqual(client.plain_server, 0)
+ self.assertEqual(server.plain_server, 0)
+ iface = 'tcp://127.0.0.1'
+ port = server.bind_to_random_port(iface)
+ client.connect("%s:%i" % (iface, port))
+ self.bounce(server, client, False)
+
+ def test_plain(self):
+ """test PLAIN authentication"""
+ server = self.socket(zmq.DEALER)
+ server.identity = b'IDENT'
+ client = self.socket(zmq.DEALER)
+ self.assertEqual(client.plain_username, b'')
+ self.assertEqual(client.plain_password, b'')
+ client.plain_username = USER
+ client.plain_password = PASS
+ self.assertEqual(client.getsockopt(zmq.PLAIN_USERNAME), USER)
+ self.assertEqual(client.getsockopt(zmq.PLAIN_PASSWORD), PASS)
+ self.assertEqual(client.plain_server, 0)
+ self.assertEqual(server.plain_server, 0)
+ server.plain_server = True
+ self.assertEqual(server.mechanism, zmq.PLAIN)
+ self.assertEqual(client.mechanism, zmq.PLAIN)
+
+ assert not client.plain_server
+ assert server.plain_server
+
+ self.start_zap()
+
+ iface = 'tcp://127.0.0.1'
+ port = server.bind_to_random_port(iface)
+ client.connect("%s:%i" % (iface, port))
+ self.bounce(server, client)
+ self.stop_zap()
+
+ def skip_plain_inauth(self):
+ """test PLAIN failed authentication"""
+ server = self.socket(zmq.DEALER)
+ server.identity = b'IDENT'
+ client = self.socket(zmq.DEALER)
+ self.sockets.extend([server, client])
+ client.plain_username = USER
+ client.plain_password = b'incorrect'
+ server.plain_server = True
+ self.assertEqual(server.mechanism, zmq.PLAIN)
+ self.assertEqual(client.mechanism, zmq.PLAIN)
+
+ self.start_zap()
+
+ iface = 'tcp://127.0.0.1'
+ port = server.bind_to_random_port(iface)
+ client.connect("%s:%i" % (iface, port))
+ client.send(b'ping')
+ server.rcvtimeo = 250
+ self.assertRaisesErrno(zmq.EAGAIN, server.recv)
+ self.stop_zap()
+
+ def test_keypair(self):
+ """test curve_keypair"""
+ try:
+ public, secret = zmq.curve_keypair()
+ except zmq.ZMQError:
+ raise SkipTest("CURVE unsupported")
+
+ self.assertEqual(type(secret), bytes)
+ self.assertEqual(type(public), bytes)
+ self.assertEqual(len(secret), 40)
+ self.assertEqual(len(public), 40)
+
+ # verify that it is indeed Z85
+ bsecret, bpublic = [ z85.decode(key) for key in (public, secret) ]
+ self.assertEqual(type(bsecret), bytes)
+ self.assertEqual(type(bpublic), bytes)
+ self.assertEqual(len(bsecret), 32)
+ self.assertEqual(len(bpublic), 32)
+
+
+ def test_curve(self):
+ """test CURVE encryption"""
+ server = self.socket(zmq.DEALER)
+ server.identity = b'IDENT'
+ client = self.socket(zmq.DEALER)
+ self.sockets.extend([server, client])
+ try:
+ server.curve_server = True
+ except zmq.ZMQError as e:
+ # will raise EINVAL if not linked against libsodium
+ if e.errno == zmq.EINVAL:
+ raise SkipTest("CURVE unsupported")
+
+ server_public, server_secret = zmq.curve_keypair()
+ client_public, client_secret = zmq.curve_keypair()
+
+ server.curve_secretkey = server_secret
+ server.curve_publickey = server_public
+ client.curve_serverkey = server_public
+ client.curve_publickey = client_public
+ client.curve_secretkey = client_secret
+
+ self.assertEqual(server.mechanism, zmq.CURVE)
+ self.assertEqual(client.mechanism, zmq.CURVE)
+
+ self.assertEqual(server.get(zmq.CURVE_SERVER), True)
+ self.assertEqual(client.get(zmq.CURVE_SERVER), False)
+
+ self.start_zap()
+
+ iface = 'tcp://127.0.0.1'
+ port = server.bind_to_random_port(iface)
+ client.connect("%s:%i" % (iface, port))
+ self.bounce(server, client)
+ self.stop_zap()
+
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/tests/test_socket.py b/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/tests/test_socket.py
new file mode 100644
index 00000000..5c842edc
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/tests/test_socket.py
@@ -0,0 +1,450 @@
+# -*- coding: utf8 -*-
+# Copyright (C) PyZMQ Developers
+# Distributed under the terms of the Modified BSD License.
+
+import time
+import warnings
+
+import zmq
+from zmq.tests import (
+ BaseZMQTestCase, SkipTest, have_gevent, GreenTest, skip_pypy, skip_if
+)
+from zmq.utils.strtypes import bytes, unicode
+
+
+class TestSocket(BaseZMQTestCase):
+
+ def test_create(self):
+ ctx = self.Context()
+ s = ctx.socket(zmq.PUB)
+ # Superluminal protocol not yet implemented
+ self.assertRaisesErrno(zmq.EPROTONOSUPPORT, s.bind, 'ftl://a')
+ self.assertRaisesErrno(zmq.EPROTONOSUPPORT, s.connect, 'ftl://a')
+ self.assertRaisesErrno(zmq.EINVAL, s.bind, 'tcp://')
+ s.close()
+ del ctx
+
+ def test_context_manager(self):
+ url = 'inproc://a'
+ with self.Context() as ctx:
+ with ctx.socket(zmq.PUSH) as a:
+ a.bind(url)
+ with ctx.socket(zmq.PULL) as b:
+ b.connect(url)
+ msg = b'hi'
+ a.send(msg)
+ rcvd = self.recv(b)
+ self.assertEqual(rcvd, msg)
+ self.assertEqual(b.closed, True)
+ self.assertEqual(a.closed, True)
+ self.assertEqual(ctx.closed, True)
+
+ def test_dir(self):
+ ctx = self.Context()
+ s = ctx.socket(zmq.PUB)
+ self.assertTrue('send' in dir(s))
+ self.assertTrue('IDENTITY' in dir(s))
+ self.assertTrue('AFFINITY' in dir(s))
+ self.assertTrue('FD' in dir(s))
+ s.close()
+ ctx.term()
+
+ def test_bind_unicode(self):
+ s = self.socket(zmq.PUB)
+ p = s.bind_to_random_port(unicode("tcp://*"))
+
+ def test_connect_unicode(self):
+ s = self.socket(zmq.PUB)
+ s.connect(unicode("tcp://127.0.0.1:5555"))
+
+ def test_bind_to_random_port(self):
+ # Check that bind_to_random_port do not hide usefull exception
+ ctx = self.Context()
+ c = ctx.socket(zmq.PUB)
+ # Invalid format
+ try:
+ c.bind_to_random_port('tcp:*')
+ except zmq.ZMQError as e:
+ self.assertEqual(e.errno, zmq.EINVAL)
+ # Invalid protocol
+ try:
+ c.bind_to_random_port('rand://*')
+ except zmq.ZMQError as e:
+ self.assertEqual(e.errno, zmq.EPROTONOSUPPORT)
+
+ def test_identity(self):
+ s = self.context.socket(zmq.PULL)
+ self.sockets.append(s)
+ ident = b'identity\0\0'
+ s.identity = ident
+ self.assertEqual(s.get(zmq.IDENTITY), ident)
+
+ def test_unicode_sockopts(self):
+ """test setting/getting sockopts with unicode strings"""
+ topic = "tést"
+ if str is not unicode:
+ topic = topic.decode('utf8')
+ p,s = self.create_bound_pair(zmq.PUB, zmq.SUB)
+ self.assertEqual(s.send_unicode, s.send_unicode)
+ self.assertEqual(p.recv_unicode, p.recv_unicode)
+ self.assertRaises(TypeError, s.setsockopt, zmq.SUBSCRIBE, topic)
+ self.assertRaises(TypeError, s.setsockopt, zmq.IDENTITY, topic)
+ s.setsockopt_unicode(zmq.IDENTITY, topic, 'utf16')
+ self.assertRaises(TypeError, s.setsockopt, zmq.AFFINITY, topic)
+ s.setsockopt_unicode(zmq.SUBSCRIBE, topic)
+ self.assertRaises(TypeError, s.getsockopt_unicode, zmq.AFFINITY)
+ self.assertRaisesErrno(zmq.EINVAL, s.getsockopt_unicode, zmq.SUBSCRIBE)
+
+ identb = s.getsockopt(zmq.IDENTITY)
+ identu = identb.decode('utf16')
+ identu2 = s.getsockopt_unicode(zmq.IDENTITY, 'utf16')
+ self.assertEqual(identu, identu2)
+ time.sleep(0.1) # wait for connection/subscription
+ p.send_unicode(topic,zmq.SNDMORE)
+ p.send_unicode(topic*2, encoding='latin-1')
+ self.assertEqual(topic, s.recv_unicode())
+ self.assertEqual(topic*2, s.recv_unicode(encoding='latin-1'))
+
+ def test_int_sockopts(self):
+ "test integer sockopts"
+ v = zmq.zmq_version_info()
+ if v < (3,0):
+ default_hwm = 0
+ else:
+ default_hwm = 1000
+ p,s = self.create_bound_pair(zmq.PUB, zmq.SUB)
+ p.setsockopt(zmq.LINGER, 0)
+ self.assertEqual(p.getsockopt(zmq.LINGER), 0)
+ p.setsockopt(zmq.LINGER, -1)
+ self.assertEqual(p.getsockopt(zmq.LINGER), -1)
+ self.assertEqual(p.hwm, default_hwm)
+ p.hwm = 11
+ self.assertEqual(p.hwm, 11)
+ # p.setsockopt(zmq.EVENTS, zmq.POLLIN)
+ self.assertEqual(p.getsockopt(zmq.EVENTS), zmq.POLLOUT)
+ self.assertRaisesErrno(zmq.EINVAL, p.setsockopt,zmq.EVENTS, 2**7-1)
+ self.assertEqual(p.getsockopt(zmq.TYPE), p.socket_type)
+ self.assertEqual(p.getsockopt(zmq.TYPE), zmq.PUB)
+ self.assertEqual(s.getsockopt(zmq.TYPE), s.socket_type)
+ self.assertEqual(s.getsockopt(zmq.TYPE), zmq.SUB)
+
+ # check for overflow / wrong type:
+ errors = []
+ backref = {}
+ constants = zmq.constants
+ for name in constants.__all__:
+ value = getattr(constants, name)
+ if isinstance(value, int):
+ backref[value] = name
+ for opt in zmq.constants.int_sockopts.union(zmq.constants.int64_sockopts):
+ sopt = backref[opt]
+ if sopt.startswith((
+ 'ROUTER', 'XPUB', 'TCP', 'FAIL',
+ 'REQ_', 'CURVE_', 'PROBE_ROUTER',
+ 'IPC_FILTER', 'GSSAPI',
+ )):
+ # some sockopts are write-only
+ continue
+ try:
+ n = p.getsockopt(opt)
+ except zmq.ZMQError as e:
+ errors.append("getsockopt(zmq.%s) raised '%s'."%(sopt, e))
+ else:
+ if n > 2**31:
+ errors.append("getsockopt(zmq.%s) returned a ridiculous value."
+ " It is probably the wrong type."%sopt)
+ if errors:
+ self.fail('\n'.join([''] + errors))
+
+ def test_bad_sockopts(self):
+ """Test that appropriate errors are raised on bad socket options"""
+ s = self.context.socket(zmq.PUB)
+ self.sockets.append(s)
+ s.setsockopt(zmq.LINGER, 0)
+ # unrecognized int sockopts pass through to libzmq, and should raise EINVAL
+ self.assertRaisesErrno(zmq.EINVAL, s.setsockopt, 9999, 5)
+ self.assertRaisesErrno(zmq.EINVAL, s.getsockopt, 9999)
+ # but only int sockopts are allowed through this way, otherwise raise a TypeError
+ self.assertRaises(TypeError, s.setsockopt, 9999, b"5")
+ # some sockopts are valid in general, but not on every socket:
+ self.assertRaisesErrno(zmq.EINVAL, s.setsockopt, zmq.SUBSCRIBE, b'hi')
+
+ def test_sockopt_roundtrip(self):
+ "test set/getsockopt roundtrip."
+ p = self.context.socket(zmq.PUB)
+ self.sockets.append(p)
+ p.setsockopt(zmq.LINGER, 11)
+ self.assertEqual(p.getsockopt(zmq.LINGER), 11)
+
+ def test_send_unicode(self):
+ "test sending unicode objects"
+ a,b = self.create_bound_pair(zmq.PAIR, zmq.PAIR)
+ self.sockets.extend([a,b])
+ u = "çπ§"
+ if str is not unicode:
+ u = u.decode('utf8')
+ self.assertRaises(TypeError, a.send, u,copy=False)
+ self.assertRaises(TypeError, a.send, u,copy=True)
+ a.send_unicode(u)
+ s = b.recv()
+ self.assertEqual(s,u.encode('utf8'))
+ self.assertEqual(s.decode('utf8'),u)
+ a.send_unicode(u,encoding='utf16')
+ s = b.recv_unicode(encoding='utf16')
+ self.assertEqual(s,u)
+
+ @skip_pypy
+ def test_tracker(self):
+ "test the MessageTracker object for tracking when zmq is done with a buffer"
+ addr = 'tcp://127.0.0.1'
+ a = self.context.socket(zmq.PUB)
+ port = a.bind_to_random_port(addr)
+ a.close()
+ iface = "%s:%i"%(addr,port)
+ a = self.context.socket(zmq.PAIR)
+ # a.setsockopt(zmq.IDENTITY, b"a")
+ b = self.context.socket(zmq.PAIR)
+ self.sockets.extend([a,b])
+ a.connect(iface)
+ time.sleep(0.1)
+ p1 = a.send(b'something', copy=False, track=True)
+ self.assertTrue(isinstance(p1, zmq.MessageTracker))
+ self.assertFalse(p1.done)
+ p2 = a.send_multipart([b'something', b'else'], copy=False, track=True)
+ self.assert_(isinstance(p2, zmq.MessageTracker))
+ self.assertEqual(p2.done, False)
+ self.assertEqual(p1.done, False)
+
+ b.bind(iface)
+ msg = b.recv_multipart()
+ for i in range(10):
+ if p1.done:
+ break
+ time.sleep(0.1)
+ self.assertEqual(p1.done, True)
+ self.assertEqual(msg, [b'something'])
+ msg = b.recv_multipart()
+ for i in range(10):
+ if p2.done:
+ break
+ time.sleep(0.1)
+ self.assertEqual(p2.done, True)
+ self.assertEqual(msg, [b'something', b'else'])
+ m = zmq.Frame(b"again", track=True)
+ self.assertEqual(m.tracker.done, False)
+ p1 = a.send(m, copy=False)
+ p2 = a.send(m, copy=False)
+ self.assertEqual(m.tracker.done, False)
+ self.assertEqual(p1.done, False)
+ self.assertEqual(p2.done, False)
+ msg = b.recv_multipart()
+ self.assertEqual(m.tracker.done, False)
+ self.assertEqual(msg, [b'again'])
+ msg = b.recv_multipart()
+ self.assertEqual(m.tracker.done, False)
+ self.assertEqual(msg, [b'again'])
+ self.assertEqual(p1.done, False)
+ self.assertEqual(p2.done, False)
+ pm = m.tracker
+ del m
+ for i in range(10):
+ if p1.done:
+ break
+ time.sleep(0.1)
+ self.assertEqual(p1.done, True)
+ self.assertEqual(p2.done, True)
+ m = zmq.Frame(b'something', track=False)
+ self.assertRaises(ValueError, a.send, m, copy=False, track=True)
+
+
+ def test_close(self):
+ ctx = self.Context()
+ s = ctx.socket(zmq.PUB)
+ s.close()
+ self.assertRaisesErrno(zmq.ENOTSOCK, s.bind, b'')
+ self.assertRaisesErrno(zmq.ENOTSOCK, s.connect, b'')
+ self.assertRaisesErrno(zmq.ENOTSOCK, s.setsockopt, zmq.SUBSCRIBE, b'')
+ self.assertRaisesErrno(zmq.ENOTSOCK, s.send, b'asdf')
+ self.assertRaisesErrno(zmq.ENOTSOCK, s.recv)
+ del ctx
+
+ def test_attr(self):
+ """set setting/getting sockopts as attributes"""
+ s = self.context.socket(zmq.DEALER)
+ self.sockets.append(s)
+ linger = 10
+ s.linger = linger
+ self.assertEqual(linger, s.linger)
+ self.assertEqual(linger, s.getsockopt(zmq.LINGER))
+ self.assertEqual(s.fd, s.getsockopt(zmq.FD))
+
+ def test_bad_attr(self):
+ s = self.context.socket(zmq.DEALER)
+ self.sockets.append(s)
+ try:
+ s.apple='foo'
+ except AttributeError:
+ pass
+ else:
+ self.fail("bad setattr should have raised AttributeError")
+ try:
+ s.apple
+ except AttributeError:
+ pass
+ else:
+ self.fail("bad getattr should have raised AttributeError")
+
+ def test_subclass(self):
+ """subclasses can assign attributes"""
+ class S(zmq.Socket):
+ a = None
+ def __init__(self, *a, **kw):
+ self.a=-1
+ super(S, self).__init__(*a, **kw)
+
+ s = S(self.context, zmq.REP)
+ self.sockets.append(s)
+ self.assertEqual(s.a, -1)
+ s.a=1
+ self.assertEqual(s.a, 1)
+ a=s.a
+ self.assertEqual(a, 1)
+
+ def test_recv_multipart(self):
+ a,b = self.create_bound_pair()
+ msg = b'hi'
+ for i in range(3):
+ a.send(msg)
+ time.sleep(0.1)
+ for i in range(3):
+ self.assertEqual(b.recv_multipart(), [msg])
+
+ def test_close_after_destroy(self):
+ """s.close() after ctx.destroy() should be fine"""
+ ctx = self.Context()
+ s = ctx.socket(zmq.REP)
+ ctx.destroy()
+ # reaper is not instantaneous
+ time.sleep(1e-2)
+ s.close()
+ self.assertTrue(s.closed)
+
+ def test_poll(self):
+ a,b = self.create_bound_pair()
+ tic = time.time()
+ evt = a.poll(50)
+ self.assertEqual(evt, 0)
+ evt = a.poll(50, zmq.POLLOUT)
+ self.assertEqual(evt, zmq.POLLOUT)
+ msg = b'hi'
+ a.send(msg)
+ evt = b.poll(50)
+ self.assertEqual(evt, zmq.POLLIN)
+ msg2 = self.recv(b)
+ evt = b.poll(50)
+ self.assertEqual(evt, 0)
+ self.assertEqual(msg2, msg)
+
+ def test_ipc_path_max_length(self):
+ """IPC_PATH_MAX_LEN is a sensible value"""
+ if zmq.IPC_PATH_MAX_LEN == 0:
+ raise SkipTest("IPC_PATH_MAX_LEN undefined")
+
+ msg = "Surprising value for IPC_PATH_MAX_LEN: %s" % zmq.IPC_PATH_MAX_LEN
+ self.assertTrue(zmq.IPC_PATH_MAX_LEN > 30, msg)
+ self.assertTrue(zmq.IPC_PATH_MAX_LEN < 1025, msg)
+
+ def test_ipc_path_max_length_msg(self):
+ if zmq.IPC_PATH_MAX_LEN == 0:
+ raise SkipTest("IPC_PATH_MAX_LEN undefined")
+
+ s = self.context.socket(zmq.PUB)
+ self.sockets.append(s)
+ try:
+ s.bind('ipc://{0}'.format('a' * (zmq.IPC_PATH_MAX_LEN + 1)))
+ except zmq.ZMQError as e:
+ self.assertTrue(str(zmq.IPC_PATH_MAX_LEN) in e.strerror)
+
+ def test_hwm(self):
+ zmq3 = zmq.zmq_version_info()[0] >= 3
+ for stype in (zmq.PUB, zmq.ROUTER, zmq.SUB, zmq.REQ, zmq.DEALER):
+ s = self.context.socket(stype)
+ s.hwm = 100
+ self.assertEqual(s.hwm, 100)
+ if zmq3:
+ try:
+ self.assertEqual(s.sndhwm, 100)
+ except AttributeError:
+ pass
+ try:
+ self.assertEqual(s.rcvhwm, 100)
+ except AttributeError:
+ pass
+ s.close()
+
+ def test_shadow(self):
+ p = self.socket(zmq.PUSH)
+ p.bind("tcp://127.0.0.1:5555")
+ p2 = zmq.Socket.shadow(p.underlying)
+ self.assertEqual(p.underlying, p2.underlying)
+ s = self.socket(zmq.PULL)
+ s2 = zmq.Socket.shadow(s.underlying)
+ self.assertNotEqual(s.underlying, p.underlying)
+ self.assertEqual(s.underlying, s2.underlying)
+ s2.connect("tcp://127.0.0.1:5555")
+ sent = b'hi'
+ p2.send(sent)
+ rcvd = self.recv(s2)
+ self.assertEqual(rcvd, sent)
+
+ def test_shadow_pyczmq(self):
+ try:
+ from pyczmq import zctx, zsocket
+ except Exception:
+ raise SkipTest("Requires pyczmq")
+
+ ctx = zctx.new()
+ ca = zsocket.new(ctx, zmq.PUSH)
+ cb = zsocket.new(ctx, zmq.PULL)
+ a = zmq.Socket.shadow(ca)
+ b = zmq.Socket.shadow(cb)
+ a.bind("inproc://a")
+ b.connect("inproc://a")
+ a.send(b'hi')
+ rcvd = self.recv(b)
+ self.assertEqual(rcvd, b'hi')
+
+
+if have_gevent:
+ import gevent
+
+ class TestSocketGreen(GreenTest, TestSocket):
+ test_bad_attr = GreenTest.skip_green
+ test_close_after_destroy = GreenTest.skip_green
+
+ def test_timeout(self):
+ a,b = self.create_bound_pair()
+ g = gevent.spawn_later(0.5, lambda: a.send(b'hi'))
+ timeout = gevent.Timeout(0.1)
+ timeout.start()
+ self.assertRaises(gevent.Timeout, b.recv)
+ g.kill()
+
+ @skip_if(not hasattr(zmq, 'RCVTIMEO'))
+ def test_warn_set_timeo(self):
+ s = self.context.socket(zmq.REQ)
+ with warnings.catch_warnings(record=True) as w:
+ s.rcvtimeo = 5
+ s.close()
+ self.assertEqual(len(w), 1)
+ self.assertEqual(w[0].category, UserWarning)
+
+
+ @skip_if(not hasattr(zmq, 'SNDTIMEO'))
+ def test_warn_get_timeo(self):
+ s = self.context.socket(zmq.REQ)
+ with warnings.catch_warnings(record=True) as w:
+ s.sndtimeo
+ s.close()
+ self.assertEqual(len(w), 1)
+ self.assertEqual(w[0].category, UserWarning)
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/tests/test_stopwatch.py b/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/tests/test_stopwatch.py
new file mode 100644
index 00000000..49fb79f2
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/tests/test_stopwatch.py
@@ -0,0 +1,42 @@
+# -*- coding: utf8 -*-
+# Copyright (C) PyZMQ Developers
+# Distributed under the terms of the Modified BSD License.
+
+
+import sys
+import time
+
+from unittest import TestCase
+
+from zmq import Stopwatch, ZMQError
+
+if sys.version_info[0] >= 3:
+ long = int
+
+class TestStopWatch(TestCase):
+
+ def test_stop_long(self):
+ """Ensure stop returns a long int."""
+ watch = Stopwatch()
+ watch.start()
+ us = watch.stop()
+ self.assertTrue(isinstance(us, long))
+
+ def test_stop_microseconds(self):
+ """Test that stop/sleep have right units."""
+ watch = Stopwatch()
+ watch.start()
+ tic = time.time()
+ watch.sleep(1)
+ us = watch.stop()
+ toc = time.time()
+ self.assertAlmostEqual(us/1e6,(toc-tic),places=0)
+
+ def test_double_stop(self):
+ """Test error raised on multiple calls to stop."""
+ watch = Stopwatch()
+ watch.start()
+ watch.stop()
+ self.assertRaises(ZMQError, watch.stop)
+ self.assertRaises(ZMQError, watch.stop)
+
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/tests/test_version.py b/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/tests/test_version.py
new file mode 100644
index 00000000..6ebebf30
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/tests/test_version.py
@@ -0,0 +1,44 @@
+# Copyright (C) PyZMQ Developers
+# Distributed under the terms of the Modified BSD License.
+
+
+from unittest import TestCase
+import zmq
+from zmq.sugar import version
+
+
+class TestVersion(TestCase):
+
+ def test_pyzmq_version(self):
+ vs = zmq.pyzmq_version()
+ vs2 = zmq.__version__
+ self.assertTrue(isinstance(vs, str))
+ if zmq.__revision__:
+ self.assertEqual(vs, '@'.join(vs2, zmq.__revision__))
+ else:
+ self.assertEqual(vs, vs2)
+ if version.VERSION_EXTRA:
+ self.assertTrue(version.VERSION_EXTRA in vs)
+ self.assertTrue(version.VERSION_EXTRA in vs2)
+
+ def test_pyzmq_version_info(self):
+ info = zmq.pyzmq_version_info()
+ self.assertTrue(isinstance(info, tuple))
+ for n in info[:3]:
+ self.assertTrue(isinstance(n, int))
+ if version.VERSION_EXTRA:
+ self.assertEqual(len(info), 4)
+ self.assertEqual(info[-1], float('inf'))
+ else:
+ self.assertEqual(len(info), 3)
+
+ def test_zmq_version_info(self):
+ info = zmq.zmq_version_info()
+ self.assertTrue(isinstance(info, tuple))
+ for n in info[:3]:
+ self.assertTrue(isinstance(n, int))
+
+ def test_zmq_version(self):
+ v = zmq.zmq_version()
+ self.assertTrue(isinstance(v, str))
+
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/tests/test_win32_shim.py b/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/tests/test_win32_shim.py
new file mode 100644
index 00000000..55657bda
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/tests/test_win32_shim.py
@@ -0,0 +1,56 @@
+from __future__ import print_function
+
+import os
+
+from functools import wraps
+from zmq.tests import BaseZMQTestCase
+from zmq.utils.win32 import allow_interrupt
+
+
+def count_calls(f):
+ @wraps(f)
+ def _(*args, **kwds):
+ try:
+ return f(*args, **kwds)
+ finally:
+ _.__calls__ += 1
+ _.__calls__ = 0
+ return _
+
+
+class TestWindowsConsoleControlHandler(BaseZMQTestCase):
+
+ def test_handler(self):
+ @count_calls
+ def interrupt_polling():
+ print('Caught CTRL-C!')
+
+ if os.name == 'nt':
+ from ctypes import windll
+ from ctypes.wintypes import BOOL, DWORD
+
+ kernel32 = windll.LoadLibrary('kernel32')
+
+ # <http://msdn.microsoft.com/en-us/library/ms683155.aspx>
+ GenerateConsoleCtrlEvent = kernel32.GenerateConsoleCtrlEvent
+ GenerateConsoleCtrlEvent.argtypes = (DWORD, DWORD)
+ GenerateConsoleCtrlEvent.restype = BOOL
+
+ try:
+ # Simulate CTRL-C event while handler is active.
+ with allow_interrupt(interrupt_polling):
+ result = GenerateConsoleCtrlEvent(0, 0)
+ if result == 0:
+ raise WindowsError
+ except KeyboardInterrupt:
+ pass
+ else:
+ self.fail('Expecting `KeyboardInterrupt` exception!')
+
+ # Make sure our handler was called.
+ self.assertEqual(interrupt_polling.__calls__, 1)
+ else:
+ # On non-Windows systems, this utility is just a no-op!
+ with allow_interrupt(interrupt_polling):
+ pass
+ self.assertEqual(interrupt_polling.__calls__, 0)
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/tests/test_z85.py b/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/tests/test_z85.py
new file mode 100644
index 00000000..8a73cb4d
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/tests/test_z85.py
@@ -0,0 +1,63 @@
+# -*- coding: utf8 -*-
+"""Test Z85 encoding
+
+confirm values and roundtrip with test values from the reference implementation.
+"""
+
+# Copyright (C) PyZMQ Developers
+# Distributed under the terms of the Modified BSD License.
+
+from unittest import TestCase
+from zmq.utils import z85
+
+
+class TestZ85(TestCase):
+
+ def test_client_public(self):
+ client_public = \
+ b"\xBB\x88\x47\x1D\x65\xE2\x65\x9B" \
+ b"\x30\xC5\x5A\x53\x21\xCE\xBB\x5A" \
+ b"\xAB\x2B\x70\xA3\x98\x64\x5C\x26" \
+ b"\xDC\xA2\xB2\xFC\xB4\x3F\xC5\x18"
+ encoded = z85.encode(client_public)
+
+ self.assertEqual(encoded, b"Yne@$w-vo<fVvi]a<NY6T1ed:M$fCG*[IaLV{hID")
+ decoded = z85.decode(encoded)
+ self.assertEqual(decoded, client_public)
+
+ def test_client_secret(self):
+ client_secret = \
+ b"\x7B\xB8\x64\xB4\x89\xAF\xA3\x67" \
+ b"\x1F\xBE\x69\x10\x1F\x94\xB3\x89" \
+ b"\x72\xF2\x48\x16\xDF\xB0\x1B\x51" \
+ b"\x65\x6B\x3F\xEC\x8D\xFD\x08\x88"
+ encoded = z85.encode(client_secret)
+
+ self.assertEqual(encoded, b"D:)Q[IlAW!ahhC2ac:9*A}h:p?([4%wOTJ%JR%cs")
+ decoded = z85.decode(encoded)
+ self.assertEqual(decoded, client_secret)
+
+ def test_server_public(self):
+ server_public = \
+ b"\x54\xFC\xBA\x24\xE9\x32\x49\x96" \
+ b"\x93\x16\xFB\x61\x7C\x87\x2B\xB0" \
+ b"\xC1\xD1\xFF\x14\x80\x04\x27\xC5" \
+ b"\x94\xCB\xFA\xCF\x1B\xC2\xD6\x52"
+ encoded = z85.encode(server_public)
+
+ self.assertEqual(encoded, b"rq:rM>}U?@Lns47E1%kR.o@n%FcmmsL/@{H8]yf7")
+ decoded = z85.decode(encoded)
+ self.assertEqual(decoded, server_public)
+
+ def test_server_secret(self):
+ server_secret = \
+ b"\x8E\x0B\xDD\x69\x76\x28\xB9\x1D" \
+ b"\x8F\x24\x55\x87\xEE\x95\xC5\xB0" \
+ b"\x4D\x48\x96\x3F\x79\x25\x98\x77" \
+ b"\xB4\x9C\xD9\x06\x3A\xEA\xD3\xB7"
+ encoded = z85.encode(server_secret)
+
+ self.assertEqual(encoded, b"JTKVSB%%)wK0E.X)V>+}o?pNmC{O&4W4b!Ni{Lh6")
+ decoded = z85.decode(encoded)
+ self.assertEqual(decoded, server_secret)
+
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/tests/test_zmqstream.py b/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/tests/test_zmqstream.py
new file mode 100644
index 00000000..cdb3a171
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/tests/test_zmqstream.py
@@ -0,0 +1,34 @@
+# -*- coding: utf8 -*-
+# Copyright (C) PyZMQ Developers
+# Distributed under the terms of the Modified BSD License.
+
+
+import sys
+import time
+
+from unittest import TestCase
+
+import zmq
+from zmq.eventloop import ioloop, zmqstream
+
+class TestZMQStream(TestCase):
+
+ def setUp(self):
+ self.context = zmq.Context()
+ self.socket = self.context.socket(zmq.REP)
+ self.loop = ioloop.IOLoop.instance()
+ self.stream = zmqstream.ZMQStream(self.socket)
+
+ def tearDown(self):
+ self.socket.close()
+ self.context.term()
+
+ def test_callable_check(self):
+ """Ensure callable check works (py3k)."""
+
+ self.stream.on_send(lambda *args: None)
+ self.stream.on_recv(lambda *args: None)
+ self.assertRaises(AssertionError, self.stream.on_recv, 1)
+ self.assertRaises(AssertionError, self.stream.on_send, 1)
+ self.assertRaises(AssertionError, self.stream.on_recv, zmq)
+
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/utils/__init__.py b/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/utils/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/utils/__init__.py
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/utils/buffers.pxd b/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/utils/buffers.pxd
new file mode 100644
index 00000000..998aa551
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/utils/buffers.pxd
@@ -0,0 +1,313 @@
+"""Python version-independent methods for C/Python buffers.
+
+This file was copied and adapted from mpi4py.
+
+Authors
+-------
+* MinRK
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2010 Lisandro Dalcin
+# All rights reserved.
+# Used under BSD License: http://www.opensource.org/licenses/bsd-license.php
+#
+# Retrieval:
+# Jul 23, 2010 18:00 PST (r539)
+# http://code.google.com/p/mpi4py/source/browse/trunk/src/MPI/asbuffer.pxi
+#
+# Modifications from original:
+# Copyright (c) 2010-2012 Brian Granger, Min Ragan-Kelley
+#
+# Distributed under the terms of the New BSD License. The full license is in
+# the file COPYING.BSD, distributed as part of this software.
+#-----------------------------------------------------------------------------
+
+
+#-----------------------------------------------------------------------------
+# Python includes.
+#-----------------------------------------------------------------------------
+
+# get version-independent aliases:
+cdef extern from "pyversion_compat.h":
+ pass
+
+# Python 3 buffer interface (PEP 3118)
+cdef extern from "Python.h":
+ int PY_MAJOR_VERSION
+ int PY_MINOR_VERSION
+ ctypedef int Py_ssize_t
+ ctypedef struct PyMemoryViewObject:
+ pass
+ ctypedef struct Py_buffer:
+ void *buf
+ Py_ssize_t len
+ int readonly
+ char *format
+ int ndim
+ Py_ssize_t *shape
+ Py_ssize_t *strides
+ Py_ssize_t *suboffsets
+ Py_ssize_t itemsize
+ void *internal
+ cdef enum:
+ PyBUF_SIMPLE
+ PyBUF_WRITABLE
+ PyBUF_FORMAT
+ PyBUF_ANY_CONTIGUOUS
+ int PyObject_CheckBuffer(object)
+ int PyObject_GetBuffer(object, Py_buffer *, int) except -1
+ void PyBuffer_Release(Py_buffer *)
+
+ int PyBuffer_FillInfo(Py_buffer *view, object obj, void *buf,
+ Py_ssize_t len, int readonly, int infoflags) except -1
+ object PyMemoryView_FromBuffer(Py_buffer *info)
+
+ object PyMemoryView_FromObject(object)
+
+# Python 2 buffer interface (legacy)
+cdef extern from "Python.h":
+ ctypedef void const_void "const void"
+ Py_ssize_t Py_END_OF_BUFFER
+ int PyObject_CheckReadBuffer(object)
+ int PyObject_AsReadBuffer (object, const_void **, Py_ssize_t *) except -1
+ int PyObject_AsWriteBuffer(object, void **, Py_ssize_t *) except -1
+
+ object PyBuffer_FromMemory(void *ptr, Py_ssize_t s)
+ object PyBuffer_FromReadWriteMemory(void *ptr, Py_ssize_t s)
+
+ object PyBuffer_FromObject(object, Py_ssize_t offset, Py_ssize_t size)
+ object PyBuffer_FromReadWriteObject(object, Py_ssize_t offset, Py_ssize_t size)
+
+
+#-----------------------------------------------------------------------------
+# asbuffer: C buffer from python object
+#-----------------------------------------------------------------------------
+
+
+cdef inline int memoryview_available():
+ return PY_MAJOR_VERSION >= 3 or (PY_MAJOR_VERSION >=2 and PY_MINOR_VERSION >= 7)
+
+cdef inline int oldstyle_available():
+ return PY_MAJOR_VERSION < 3
+
+
+cdef inline int check_buffer(object ob):
+ """Version independent check for whether an object is a buffer.
+
+ Parameters
+ ----------
+ object : object
+ Any Python object
+
+ Returns
+ -------
+ int : 0 if no buffer interface, 3 if newstyle buffer interface, 2 if oldstyle.
+ """
+ if PyObject_CheckBuffer(ob):
+ return 3
+ if oldstyle_available():
+ return PyObject_CheckReadBuffer(ob) and 2
+ return 0
+
+
+cdef inline object asbuffer(object ob, int writable, int format,
+ void **base, Py_ssize_t *size,
+ Py_ssize_t *itemsize):
+ """Turn an object into a C buffer in a Python version-independent way.
+
+ Parameters
+ ----------
+ ob : object
+ The object to be turned into a buffer.
+ Must provide a Python Buffer interface
+ writable : int
+ Whether the resulting buffer should be allowed to write
+ to the object.
+ format : int
+ The format of the buffer. See Python buffer docs.
+ base : void **
+ The pointer that will be used to store the resulting C buffer.
+ size : Py_ssize_t *
+ The size of the buffer(s).
+ itemsize : Py_ssize_t *
+ The size of an item, if the buffer is non-contiguous.
+
+ Returns
+ -------
+ An object describing the buffer format. Generally a str, such as 'B'.
+ """
+
+ cdef void *bptr = NULL
+ cdef Py_ssize_t blen = 0, bitemlen = 0
+ cdef Py_buffer view
+ cdef int flags = PyBUF_SIMPLE
+ cdef int mode = 0
+
+ bfmt = None
+
+ mode = check_buffer(ob)
+ if mode == 0:
+ raise TypeError("%r does not provide a buffer interface."%ob)
+
+ if mode == 3:
+ flags = PyBUF_ANY_CONTIGUOUS
+ if writable:
+ flags |= PyBUF_WRITABLE
+ if format:
+ flags |= PyBUF_FORMAT
+ PyObject_GetBuffer(ob, &view, flags)
+ bptr = view.buf
+ blen = view.len
+ if format:
+ if view.format != NULL:
+ bfmt = view.format
+ bitemlen = view.itemsize
+ PyBuffer_Release(&view)
+ else: # oldstyle
+ if writable:
+ PyObject_AsWriteBuffer(ob, &bptr, &blen)
+ else:
+ PyObject_AsReadBuffer(ob, <const_void **>&bptr, &blen)
+ if format:
+ try: # numpy.ndarray
+ dtype = ob.dtype
+ bfmt = dtype.char
+ bitemlen = dtype.itemsize
+ except AttributeError:
+ try: # array.array
+ bfmt = ob.typecode
+ bitemlen = ob.itemsize
+ except AttributeError:
+ if isinstance(ob, bytes):
+ bfmt = b"B"
+ bitemlen = 1
+ else:
+ # nothing found
+ bfmt = None
+ bitemlen = 0
+ if base: base[0] = <void *>bptr
+ if size: size[0] = <Py_ssize_t>blen
+ if itemsize: itemsize[0] = <Py_ssize_t>bitemlen
+
+ if PY_MAJOR_VERSION >= 3 and bfmt is not None:
+ return bfmt.decode('ascii')
+ return bfmt
+
+
+cdef inline object asbuffer_r(object ob, void **base, Py_ssize_t *size):
+ """Wrapper for standard calls to asbuffer with a readonly buffer."""
+ asbuffer(ob, 0, 0, base, size, NULL)
+ return ob
+
+
+cdef inline object asbuffer_w(object ob, void **base, Py_ssize_t *size):
+ """Wrapper for standard calls to asbuffer with a writable buffer."""
+ asbuffer(ob, 1, 0, base, size, NULL)
+ return ob
+
+#------------------------------------------------------------------------------
+# frombuffer: python buffer/view from C buffer
+#------------------------------------------------------------------------------
+
+
+cdef inline object frombuffer_3(void *ptr, Py_ssize_t s, int readonly):
+ """Python 3 version of frombuffer.
+
+ This is the Python 3 model, but will work on Python >= 2.6. Currently,
+ we use it only on >= 3.0.
+ """
+ cdef Py_buffer pybuf
+ cdef Py_ssize_t *shape = [s]
+ cdef str astr=""
+ PyBuffer_FillInfo(&pybuf, astr, ptr, s, readonly, PyBUF_SIMPLE)
+ pybuf.format = "B"
+ pybuf.shape = shape
+ return PyMemoryView_FromBuffer(&pybuf)
+
+
+cdef inline object frombuffer_2(void *ptr, Py_ssize_t s, int readonly):
+ """Python 2 version of frombuffer.
+
+ This must be used for Python <= 2.6, but we use it for all Python < 3.
+ """
+
+ if oldstyle_available():
+ if readonly:
+ return PyBuffer_FromMemory(ptr, s)
+ else:
+ return PyBuffer_FromReadWriteMemory(ptr, s)
+ else:
+ raise NotImplementedError("Old style buffers not available.")
+
+
+cdef inline object frombuffer(void *ptr, Py_ssize_t s, int readonly):
+ """Create a Python Buffer/View of a C array.
+
+ Parameters
+ ----------
+ ptr : void *
+ Pointer to the array to be copied.
+ s : size_t
+ Length of the buffer.
+ readonly : int
+ whether the resulting object should be allowed to write to the buffer.
+
+ Returns
+ -------
+ Python Buffer/View of the C buffer.
+ """
+ # oldstyle first priority for now
+ if oldstyle_available():
+ return frombuffer_2(ptr, s, readonly)
+ else:
+ return frombuffer_3(ptr, s, readonly)
+
+
+cdef inline object frombuffer_r(void *ptr, Py_ssize_t s):
+ """Wrapper for readonly view frombuffer."""
+ return frombuffer(ptr, s, 1)
+
+
+cdef inline object frombuffer_w(void *ptr, Py_ssize_t s):
+ """Wrapper for writable view frombuffer."""
+ return frombuffer(ptr, s, 0)
+
+#------------------------------------------------------------------------------
+# viewfromobject: python buffer/view from python object, refcounts intact
+# frombuffer(asbuffer(obj)) would lose track of refs
+#------------------------------------------------------------------------------
+
+cdef inline object viewfromobject(object obj, int readonly):
+ """Construct a Python Buffer/View object from another Python object.
+
+ This work in a Python version independent manner.
+
+ Parameters
+ ----------
+ obj : object
+ The input object to be cast as a buffer
+ readonly : int
+ Whether the result should be prevented from overwriting the original.
+
+ Returns
+ -------
+ Buffer/View of the original object.
+ """
+ if not memoryview_available():
+ if readonly:
+ return PyBuffer_FromObject(obj, 0, Py_END_OF_BUFFER)
+ else:
+ return PyBuffer_FromReadWriteObject(obj, 0, Py_END_OF_BUFFER)
+ else:
+ return PyMemoryView_FromObject(obj)
+
+
+cdef inline object viewfromobject_r(object obj):
+ """Wrapper for readonly viewfromobject."""
+ return viewfromobject(obj, 1)
+
+
+cdef inline object viewfromobject_w(object obj):
+ """Wrapper for writable viewfromobject."""
+ return viewfromobject(obj, 0)
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/utils/compiler.json b/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/utils/compiler.json
new file mode 100644
index 00000000..740f9d4c
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/utils/compiler.json
@@ -0,0 +1,19 @@
+{
+ "include_dirs": [
+ "/tmp/zmq/zmq-bin/include",
+ "zmq/utils",
+ "zmq/backend/cython",
+ "zmq/devices"
+ ],
+ "extra_link_args": [],
+ "runtime_library_dirs": [
+ "/tmp/zmq/zmq-bin/lib"
+ ],
+ "libraries": [
+ "zmq"
+ ],
+ "define_macros": [],
+ "library_dirs": [
+ "/tmp/zmq/zmq-bin/lib"
+ ]
+} \ No newline at end of file
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/utils/config.json b/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/utils/config.json
new file mode 100644
index 00000000..4161014f
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/utils/config.json
@@ -0,0 +1,9 @@
+{
+ "have_sys_un_h": false,
+ "build_ext": {},
+ "zmq_prefix": "/tmp/zmq/zmq-bin",
+ "libzmq_extension": false,
+ "no_libzmq_extension": true,
+ "skip_check_zmq": false,
+ "bdist_egg": {}
+} \ No newline at end of file
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/utils/constant_names.py b/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/utils/constant_names.py
new file mode 100644
index 00000000..47da9dc2
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/utils/constant_names.py
@@ -0,0 +1,365 @@
+"""0MQ Constant names"""
+
+# Copyright (C) PyZMQ Developers
+# Distributed under the terms of the Modified BSD License.
+
+# dictionaries of constants new or removed in particular versions
+
+new_in = {
+ (2,2,0) : [
+ 'RCVTIMEO',
+ 'SNDTIMEO',
+ ],
+ (3,2,2) : [
+ # errnos
+ 'EMSGSIZE',
+ 'EAFNOSUPPORT',
+ 'ENETUNREACH',
+ 'ECONNABORTED',
+ 'ECONNRESET',
+ 'ENOTCONN',
+ 'ETIMEDOUT',
+ 'EHOSTUNREACH',
+ 'ENETRESET',
+
+ # ctx opts
+ 'IO_THREADS',
+ 'MAX_SOCKETS',
+ 'IO_THREADS_DFLT',
+ 'MAX_SOCKETS_DFLT',
+
+ # socket opts
+ 'ROUTER_BEHAVIOR',
+ 'ROUTER_MANDATORY',
+ 'FAIL_UNROUTABLE',
+ 'TCP_KEEPALIVE',
+ 'TCP_KEEPALIVE_CNT',
+ 'TCP_KEEPALIVE_IDLE',
+ 'TCP_KEEPALIVE_INTVL',
+ 'DELAY_ATTACH_ON_CONNECT',
+ 'XPUB_VERBOSE',
+
+ # msg opts
+ 'MORE',
+
+ 'EVENT_CONNECTED',
+ 'EVENT_CONNECT_DELAYED',
+ 'EVENT_CONNECT_RETRIED',
+ 'EVENT_LISTENING',
+ 'EVENT_BIND_FAILED',
+ 'EVENT_ACCEPTED',
+ 'EVENT_ACCEPT_FAILED',
+ 'EVENT_CLOSED',
+ 'EVENT_CLOSE_FAILED',
+ 'EVENT_DISCONNECTED',
+ 'EVENT_ALL',
+ ],
+ (4,0,0) : [
+ # socket types
+ 'STREAM',
+
+ # socket opts
+ 'IMMEDIATE',
+ 'ROUTER_RAW',
+ 'IPV6',
+ 'MECHANISM',
+ 'PLAIN_SERVER',
+ 'PLAIN_USERNAME',
+ 'PLAIN_PASSWORD',
+ 'CURVE_SERVER',
+ 'CURVE_PUBLICKEY',
+ 'CURVE_SECRETKEY',
+ 'CURVE_SERVERKEY',
+ 'PROBE_ROUTER',
+ 'REQ_RELAXED',
+ 'REQ_CORRELATE',
+ 'CONFLATE',
+ 'ZAP_DOMAIN',
+
+ # security
+ 'NULL',
+ 'PLAIN',
+ 'CURVE',
+
+ # events
+ 'EVENT_MONITOR_STOPPED',
+ ],
+ (4,1,0) : [
+ # ctx opts
+ 'SOCKET_LIMIT',
+ 'THREAD_PRIORITY',
+ 'THREAD_PRIORITY_DFLT',
+ 'THREAD_SCHED_POLICY',
+ 'THREAD_SCHED_POLICY_DFLT',
+
+ # socket opts
+ 'ROUTER_HANDOVER',
+ 'TOS',
+ 'IPC_FILTER_PID',
+ 'IPC_FILTER_UID',
+ 'IPC_FILTER_GID',
+ 'CONNECT_RID',
+ 'GSSAPI_SERVER',
+ 'GSSAPI_PRINCIPAL',
+ 'GSSAPI_SERVICE_PRINCIPAL',
+ 'GSSAPI_PLAINTEXT',
+ 'HANDSHAKE_IVL',
+ 'IDENTITY_FD',
+ 'XPUB_NODROP',
+ 'SOCKS_PROXY',
+
+ # msg opts
+ 'SRCFD',
+ 'SHARED',
+
+ # security
+ 'GSSAPI',
+
+ ],
+}
+
+
+removed_in = {
+ (3,2,2) : [
+ 'UPSTREAM',
+ 'DOWNSTREAM',
+
+ 'HWM',
+ 'SWAP',
+ 'MCAST_LOOP',
+ 'RECOVERY_IVL_MSEC',
+ ]
+}
+
+# collections of zmq constant names based on their role
+# base names have no specific use
+# opt names are validated in get/set methods of various objects
+
+base_names = [
+ # base
+ 'VERSION',
+ 'VERSION_MAJOR',
+ 'VERSION_MINOR',
+ 'VERSION_PATCH',
+ 'NOBLOCK',
+ 'DONTWAIT',
+
+ 'POLLIN',
+ 'POLLOUT',
+ 'POLLERR',
+
+ 'SNDMORE',
+
+ 'STREAMER',
+ 'FORWARDER',
+ 'QUEUE',
+
+ 'IO_THREADS_DFLT',
+ 'MAX_SOCKETS_DFLT',
+ 'POLLITEMS_DFLT',
+ 'THREAD_PRIORITY_DFLT',
+ 'THREAD_SCHED_POLICY_DFLT',
+
+ # socktypes
+ 'PAIR',
+ 'PUB',
+ 'SUB',
+ 'REQ',
+ 'REP',
+ 'DEALER',
+ 'ROUTER',
+ 'XREQ',
+ 'XREP',
+ 'PULL',
+ 'PUSH',
+ 'XPUB',
+ 'XSUB',
+ 'UPSTREAM',
+ 'DOWNSTREAM',
+ 'STREAM',
+
+ # events
+ 'EVENT_CONNECTED',
+ 'EVENT_CONNECT_DELAYED',
+ 'EVENT_CONNECT_RETRIED',
+ 'EVENT_LISTENING',
+ 'EVENT_BIND_FAILED',
+ 'EVENT_ACCEPTED',
+ 'EVENT_ACCEPT_FAILED',
+ 'EVENT_CLOSED',
+ 'EVENT_CLOSE_FAILED',
+ 'EVENT_DISCONNECTED',
+ 'EVENT_ALL',
+ 'EVENT_MONITOR_STOPPED',
+
+ # security
+ 'NULL',
+ 'PLAIN',
+ 'CURVE',
+ 'GSSAPI',
+
+ ## ERRNO
+ # Often used (these are alse in errno.)
+ 'EAGAIN',
+ 'EINVAL',
+ 'EFAULT',
+ 'ENOMEM',
+ 'ENODEV',
+ 'EMSGSIZE',
+ 'EAFNOSUPPORT',
+ 'ENETUNREACH',
+ 'ECONNABORTED',
+ 'ECONNRESET',
+ 'ENOTCONN',
+ 'ETIMEDOUT',
+ 'EHOSTUNREACH',
+ 'ENETRESET',
+
+ # For Windows compatability
+ 'HAUSNUMERO',
+ 'ENOTSUP',
+ 'EPROTONOSUPPORT',
+ 'ENOBUFS',
+ 'ENETDOWN',
+ 'EADDRINUSE',
+ 'EADDRNOTAVAIL',
+ 'ECONNREFUSED',
+ 'EINPROGRESS',
+ 'ENOTSOCK',
+
+ # 0MQ Native
+ 'EFSM',
+ 'ENOCOMPATPROTO',
+ 'ETERM',
+ 'EMTHREAD',
+]
+
+int64_sockopt_names = [
+ 'AFFINITY',
+ 'MAXMSGSIZE',
+
+ # sockopts removed in 3.0.0
+ 'HWM',
+ 'SWAP',
+ 'MCAST_LOOP',
+ 'RECOVERY_IVL_MSEC',
+]
+
+bytes_sockopt_names = [
+ 'IDENTITY',
+ 'SUBSCRIBE',
+ 'UNSUBSCRIBE',
+ 'LAST_ENDPOINT',
+ 'TCP_ACCEPT_FILTER',
+
+ 'PLAIN_USERNAME',
+ 'PLAIN_PASSWORD',
+
+ 'CURVE_PUBLICKEY',
+ 'CURVE_SECRETKEY',
+ 'CURVE_SERVERKEY',
+ 'ZAP_DOMAIN',
+ 'CONNECT_RID',
+ 'GSSAPI_PRINCIPAL',
+ 'GSSAPI_SERVICE_PRINCIPAL',
+ 'SOCKS_PROXY',
+]
+
+fd_sockopt_names = [
+ 'FD',
+ 'IDENTITY_FD',
+]
+
+int_sockopt_names = [
+ # sockopts
+ 'RECONNECT_IVL_MAX',
+
+ # sockopts new in 2.2.0
+ 'SNDTIMEO',
+ 'RCVTIMEO',
+
+ # new in 3.x
+ 'SNDHWM',
+ 'RCVHWM',
+ 'MULTICAST_HOPS',
+ 'IPV4ONLY',
+
+ 'ROUTER_BEHAVIOR',
+ 'TCP_KEEPALIVE',
+ 'TCP_KEEPALIVE_CNT',
+ 'TCP_KEEPALIVE_IDLE',
+ 'TCP_KEEPALIVE_INTVL',
+ 'DELAY_ATTACH_ON_CONNECT',
+ 'XPUB_VERBOSE',
+
+ 'EVENTS',
+ 'TYPE',
+ 'LINGER',
+ 'RECONNECT_IVL',
+ 'BACKLOG',
+
+ 'ROUTER_MANDATORY',
+ 'FAIL_UNROUTABLE',
+
+ 'ROUTER_RAW',
+ 'IMMEDIATE',
+ 'IPV6',
+ 'MECHANISM',
+ 'PLAIN_SERVER',
+ 'CURVE_SERVER',
+ 'PROBE_ROUTER',
+ 'REQ_RELAXED',
+ 'REQ_CORRELATE',
+ 'CONFLATE',
+ 'ROUTER_HANDOVER',
+ 'TOS',
+ 'IPC_FILTER_PID',
+ 'IPC_FILTER_UID',
+ 'IPC_FILTER_GID',
+ 'GSSAPI_SERVER',
+ 'GSSAPI_PLAINTEXT',
+ 'HANDSHAKE_IVL',
+ 'XPUB_NODROP',
+]
+
+switched_sockopt_names = [
+ 'RATE',
+ 'RECOVERY_IVL',
+ 'SNDBUF',
+ 'RCVBUF',
+ 'RCVMORE',
+]
+
+ctx_opt_names = [
+ 'IO_THREADS',
+ 'MAX_SOCKETS',
+ 'SOCKET_LIMIT',
+ 'THREAD_PRIORITY',
+ 'THREAD_SCHED_POLICY',
+]
+
+msg_opt_names = [
+ 'MORE',
+ 'SRCFD',
+ 'SHARED',
+]
+
+from itertools import chain
+
+all_names = list(chain(
+ base_names,
+ ctx_opt_names,
+ bytes_sockopt_names,
+ fd_sockopt_names,
+ int_sockopt_names,
+ int64_sockopt_names,
+ switched_sockopt_names,
+ msg_opt_names,
+))
+
+del chain
+
+def no_prefix(name):
+ """does the given constant have a ZMQ_ prefix?"""
+ return name.startswith('E') and not name.startswith('EVENT')
+
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/utils/garbage.py b/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/utils/garbage.py
new file mode 100644
index 00000000..80a8725a
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/utils/garbage.py
@@ -0,0 +1,180 @@
+"""Garbage collection thread for representing zmq refcount of Python objects
+used in zero-copy sends.
+"""
+
+# Copyright (C) PyZMQ Developers
+# Distributed under the terms of the Modified BSD License.
+
+
+import atexit
+import struct
+
+from os import getpid
+from collections import namedtuple
+from threading import Thread, Event, Lock
+import warnings
+
+import zmq
+
+
+gcref = namedtuple('gcref', ['obj', 'event'])
+
+class GarbageCollectorThread(Thread):
+ """Thread in which garbage collection actually happens."""
+ def __init__(self, gc):
+ super(GarbageCollectorThread, self).__init__()
+ self.gc = gc
+ self.daemon = True
+ self.pid = getpid()
+ self.ready = Event()
+
+ def run(self):
+ # detect fork at begining of the thread
+ if getpid is None or getpid() != self.pid:
+ self.ready.set()
+ return
+ try:
+ s = self.gc.context.socket(zmq.PULL)
+ s.linger = 0
+ s.bind(self.gc.url)
+ finally:
+ self.ready.set()
+
+ while True:
+ # detect fork
+ if getpid is None or getpid() != self.pid:
+ return
+ msg = s.recv()
+ if msg == b'DIE':
+ break
+ fmt = 'L' if len(msg) == 4 else 'Q'
+ key = struct.unpack(fmt, msg)[0]
+ tup = self.gc.refs.pop(key, None)
+ if tup and tup.event:
+ tup.event.set()
+ del tup
+ s.close()
+
+
+class GarbageCollector(object):
+ """PyZMQ Garbage Collector
+
+ Used for representing the reference held by libzmq during zero-copy sends.
+ This object holds a dictionary, keyed by Python id,
+ of the Python objects whose memory are currently in use by zeromq.
+
+ When zeromq is done with the memory, it sends a message on an inproc PUSH socket
+ containing the packed size_t (32 or 64-bit unsigned int),
+ which is the key in the dict.
+ When the PULL socket in the gc thread receives that message,
+ the reference is popped from the dict,
+ and any tracker events that should be signaled fire.
+ """
+
+ refs = None
+ _context = None
+ _lock = None
+ url = "inproc://pyzmq.gc.01"
+
+ def __init__(self, context=None):
+ super(GarbageCollector, self).__init__()
+ self.refs = {}
+ self.pid = None
+ self.thread = None
+ self._context = context
+ self._lock = Lock()
+ self._stay_down = False
+ atexit.register(self._atexit)
+
+ @property
+ def context(self):
+ if self._context is None:
+ self._context = zmq.Context()
+ return self._context
+
+ @context.setter
+ def context(self, ctx):
+ if self.is_alive():
+ if self.refs:
+ warnings.warn("Replacing gc context while gc is running", RuntimeWarning)
+ self.stop()
+ self._context = ctx
+
+ def _atexit(self):
+ """atexit callback
+
+ sets _stay_down flag so that gc doesn't try to start up again in other atexit handlers
+ """
+ self._stay_down = True
+ self.stop()
+
+ def stop(self):
+ """stop the garbage-collection thread"""
+ if not self.is_alive():
+ return
+ self._stop()
+
+ def _stop(self):
+ push = self.context.socket(zmq.PUSH)
+ push.connect(self.url)
+ push.send(b'DIE')
+ push.close()
+ self.thread.join()
+ self.context.term()
+ self.refs.clear()
+ self.context = None
+
+ def start(self):
+ """Start a new garbage collection thread.
+
+ Creates a new zmq Context used for garbage collection.
+ Under most circumstances, this will only be called once per process.
+ """
+ if self.thread is not None and self.pid != getpid():
+ # It's re-starting, must free earlier thread's context
+ # since a fork probably broke it
+ self._stop()
+ self.pid = getpid()
+ self.refs = {}
+ self.thread = GarbageCollectorThread(self)
+ self.thread.start()
+ self.thread.ready.wait()
+
+ def is_alive(self):
+ """Is the garbage collection thread currently running?
+
+ Includes checks for process shutdown or fork.
+ """
+ if (getpid is None or
+ getpid() != self.pid or
+ self.thread is None or
+ not self.thread.is_alive()
+ ):
+ return False
+ return True
+
+ def store(self, obj, event=None):
+ """store an object and (optionally) event for zero-copy"""
+ if not self.is_alive():
+ if self._stay_down:
+ return 0
+ # safely start the gc thread
+ # use lock and double check,
+ # so we don't start multiple threads
+ with self._lock:
+ if not self.is_alive():
+ self.start()
+ tup = gcref(obj, event)
+ theid = id(tup)
+ self.refs[theid] = tup
+ return theid
+
+ def __del__(self):
+ if not self.is_alive():
+ return
+ try:
+ self.stop()
+ except Exception as e:
+ raise (e)
+
+gc = GarbageCollector()
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/utils/getpid_compat.h b/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/utils/getpid_compat.h
new file mode 100644
index 00000000..47ce90fa
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/utils/getpid_compat.h
@@ -0,0 +1,6 @@
+#ifdef _WIN32
+ #include <process.h>
+ #define getpid _getpid
+#else
+ #include <unistd.h>
+#endif
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/utils/interop.py b/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/utils/interop.py
new file mode 100644
index 00000000..26c01969
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/utils/interop.py
@@ -0,0 +1,33 @@
+"""Utils for interoperability with other libraries.
+
+Just CFFI pointer casting for now.
+"""
+
+# Copyright (C) PyZMQ Developers
+# Distributed under the terms of the Modified BSD License.
+
+
+try:
+ long
+except NameError:
+ long = int # Python 3
+
+
+def cast_int_addr(n):
+ """Cast an address to a Python int
+
+ This could be a Python integer or a CFFI pointer
+ """
+ if isinstance(n, (int, long)):
+ return n
+ try:
+ import cffi
+ except ImportError:
+ pass
+ else:
+ # from pyzmq, this is an FFI void *
+ ffi = cffi.FFI()
+ if isinstance(n, ffi.CData):
+ return int(ffi.cast("size_t", n))
+
+ raise ValueError("Cannot cast %r to int" % n)
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/utils/ipcmaxlen.h b/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/utils/ipcmaxlen.h
new file mode 100644
index 00000000..7218db78
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/utils/ipcmaxlen.h
@@ -0,0 +1,21 @@
+/*
+
+Platform-independant detection of IPC path max length
+
+Copyright (c) 2012 Godefroid Chapelle
+
+Distributed under the terms of the New BSD License. The full license is in
+the file COPYING.BSD, distributed as part of this software.
+ */
+
+#if defined(HAVE_SYS_UN_H)
+#include "sys/un.h"
+int get_ipc_path_max_len(void) {
+ struct sockaddr_un *dummy;
+ return sizeof(dummy->sun_path) - 1;
+}
+#else
+int get_ipc_path_max_len(void) {
+ return 0;
+}
+#endif
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/utils/jsonapi.py b/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/utils/jsonapi.py
new file mode 100644
index 00000000..865ca6d5
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/utils/jsonapi.py
@@ -0,0 +1,59 @@
+"""Priority based json library imports.
+
+Always serializes to bytes instead of unicode for zeromq compatibility
+on Python 2 and 3.
+
+Use ``jsonapi.loads()`` and ``jsonapi.dumps()`` for guaranteed symmetry.
+
+Priority: ``simplejson`` > ``jsonlib2`` > stdlib ``json``
+
+``jsonapi.loads/dumps`` provide kwarg-compatibility with stdlib json.
+
+``jsonapi.jsonmod`` will be the module of the actual underlying implementation.
+"""
+
+# Copyright (C) PyZMQ Developers
+# Distributed under the terms of the Modified BSD License.
+
+from zmq.utils.strtypes import bytes, unicode
+
+jsonmod = None
+
+priority = ['simplejson', 'jsonlib2', 'json']
+for mod in priority:
+ try:
+ jsonmod = __import__(mod)
+ except ImportError:
+ pass
+ else:
+ break
+
+def dumps(o, **kwargs):
+ """Serialize object to JSON bytes (utf-8).
+
+ See jsonapi.jsonmod.dumps for details on kwargs.
+ """
+
+ if 'separators' not in kwargs:
+ kwargs['separators'] = (',', ':')
+
+ s = jsonmod.dumps(o, **kwargs)
+
+ if isinstance(s, unicode):
+ s = s.encode('utf8')
+
+ return s
+
+def loads(s, **kwargs):
+ """Load object from JSON bytes (utf-8).
+
+ See jsonapi.jsonmod.loads for details on kwargs.
+ """
+
+ if str is unicode and isinstance(s, bytes):
+ s = s.decode('utf8')
+
+ return jsonmod.loads(s, **kwargs)
+
+__all__ = ['jsonmod', 'dumps', 'loads']
+
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/utils/monitor.py b/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/utils/monitor.py
new file mode 100644
index 00000000..734d54b1
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/utils/monitor.py
@@ -0,0 +1,68 @@
+# -*- coding: utf-8 -*-
+"""Module holding utility and convenience functions for zmq event monitoring."""
+
+# Copyright (C) PyZMQ Developers
+# Distributed under the terms of the Modified BSD License.
+
+import struct
+import zmq
+from zmq.error import _check_version
+
+def parse_monitor_message(msg):
+ """decode zmq_monitor event messages.
+
+ Parameters
+ ----------
+ msg : list(bytes)
+ zmq multipart message that has arrived on a monitor PAIR socket.
+
+ First frame is::
+
+ 16 bit event id
+ 32 bit event value
+ no padding
+
+ Second frame is the endpoint as a bytestring
+
+ Returns
+ -------
+ event : dict
+ event description as dict with the keys `event`, `value`, and `endpoint`.
+ """
+
+ if len(msg) != 2 or len(msg[0]) != 6:
+ raise RuntimeError("Invalid event message format: %s" % msg)
+ event = {}
+ event['event'], event['value'] = struct.unpack("=hi", msg[0])
+ event['endpoint'] = msg[1]
+ return event
+
+def recv_monitor_message(socket, flags=0):
+ """Receive and decode the given raw message from the monitoring socket and return a dict.
+
+ Requires libzmq ≥ 4.0
+
+ The returned dict will have the following entries:
+ event : int, the event id as described in libzmq.zmq_socket_monitor
+ value : int, the event value associated with the event, see libzmq.zmq_socket_monitor
+ endpoint : string, the affected endpoint
+
+ Parameters
+ ----------
+ socket : zmq PAIR socket
+ The PAIR socket (created by other.get_monitor_socket()) on which to recv the message
+ flags : bitfield (int)
+ standard zmq recv flags
+
+ Returns
+ -------
+ event : dict
+ event description as dict with the keys `event`, `value`, and `endpoint`.
+ """
+ _check_version((4,0), 'libzmq event API')
+ # will always return a list
+ msg = socket.recv_multipart(flags)
+ # 4.0-style event API
+ return parse_monitor_message(msg)
+
+__all__ = ['parse_monitor_message', 'recv_monitor_message']
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/utils/pyversion_compat.h b/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/utils/pyversion_compat.h
new file mode 100644
index 00000000..fac09046
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/utils/pyversion_compat.h
@@ -0,0 +1,25 @@
+#include "Python.h"
+
+#if PY_VERSION_HEX < 0x02070000
+ #define PyMemoryView_FromBuffer(info) (PyErr_SetString(PyExc_NotImplementedError, \
+ "new buffer interface is not available"), (PyObject *)NULL)
+ #define PyMemoryView_FromObject(object) (PyErr_SetString(PyExc_NotImplementedError, \
+ "new buffer interface is not available"), (PyObject *)NULL)
+#endif
+
+#if PY_VERSION_HEX >= 0x03000000
+ // for buffers
+ #define Py_END_OF_BUFFER ((Py_ssize_t) 0)
+
+ #define PyObject_CheckReadBuffer(object) (0)
+
+ #define PyBuffer_FromMemory(ptr, s) (PyErr_SetString(PyExc_NotImplementedError, \
+ "old buffer interface is not available"), (PyObject *)NULL)
+ #define PyBuffer_FromReadWriteMemory(ptr, s) (PyErr_SetString(PyExc_NotImplementedError, \
+ "old buffer interface is not available"), (PyObject *)NULL)
+ #define PyBuffer_FromObject(object, offset, size) (PyErr_SetString(PyExc_NotImplementedError, \
+ "old buffer interface is not available"), (PyObject *)NULL)
+ #define PyBuffer_FromReadWriteObject(object, offset, size) (PyErr_SetString(PyExc_NotImplementedError, \
+ "old buffer interface is not available"), (PyObject *)NULL)
+
+#endif
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/utils/sixcerpt.py b/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/utils/sixcerpt.py
new file mode 100644
index 00000000..5492fd59
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/utils/sixcerpt.py
@@ -0,0 +1,52 @@
+"""Excerpts of six.py"""
+
+# Copyright (C) 2010-2014 Benjamin Peterson
+#
+# Permission is hereby granted, free of charge, to any person obtaining a copy
+# of this software and associated documentation files (the "Software"), to deal
+# in the Software without restriction, including without limitation the rights
+# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+# copies of the Software, and to permit persons to whom the Software is
+# furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in all
+# copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+# SOFTWARE.
+
+import sys
+
+# Useful for very coarse version differentiation.
+PY2 = sys.version_info[0] == 2
+PY3 = sys.version_info[0] == 3
+
+if PY3:
+
+ def reraise(tp, value, tb=None):
+ if value.__traceback__ is not tb:
+ raise value.with_traceback(tb)
+ raise value
+
+else:
+ def exec_(_code_, _globs_=None, _locs_=None):
+ """Execute code in a namespace."""
+ if _globs_ is None:
+ frame = sys._getframe(1)
+ _globs_ = frame.f_globals
+ if _locs_ is None:
+ _locs_ = frame.f_locals
+ del frame
+ elif _locs_ is None:
+ _locs_ = _globs_
+ exec("""exec _code_ in _globs_, _locs_""")
+
+
+ exec_("""def reraise(tp, value, tb=None):
+ raise tp, value, tb
+""")
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/utils/strtypes.py b/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/utils/strtypes.py
new file mode 100644
index 00000000..548410dc
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/utils/strtypes.py
@@ -0,0 +1,45 @@
+"""Declare basic string types unambiguously for various Python versions.
+
+Authors
+-------
+* MinRK
+"""
+
+# Copyright (C) PyZMQ Developers
+# Distributed under the terms of the Modified BSD License.
+
+import sys
+
+if sys.version_info[0] >= 3:
+ bytes = bytes
+ unicode = str
+ basestring = (bytes, unicode)
+else:
+ unicode = unicode
+ bytes = str
+ basestring = basestring
+
+def cast_bytes(s, encoding='utf8', errors='strict'):
+ """cast unicode or bytes to bytes"""
+ if isinstance(s, bytes):
+ return s
+ elif isinstance(s, unicode):
+ return s.encode(encoding, errors)
+ else:
+ raise TypeError("Expected unicode or bytes, got %r" % s)
+
+def cast_unicode(s, encoding='utf8', errors='strict'):
+ """cast bytes or unicode to unicode"""
+ if isinstance(s, bytes):
+ return s.decode(encoding, errors)
+ elif isinstance(s, unicode):
+ return s
+ else:
+ raise TypeError("Expected unicode or bytes, got %r" % s)
+
+# give short 'b' alias for cast_bytes, so that we can use fake b('stuff')
+# to simulate b'stuff'
+b = asbytes = cast_bytes
+u = cast_unicode
+
+__all__ = ['asbytes', 'bytes', 'unicode', 'basestring', 'b', 'u', 'cast_bytes', 'cast_unicode']
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/utils/win32.py b/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/utils/win32.py
new file mode 100644
index 00000000..ea758299
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/utils/win32.py
@@ -0,0 +1,132 @@
+"""Win32 compatibility utilities."""
+
+#-----------------------------------------------------------------------------
+# Copyright (C) PyZMQ Developers
+# Distributed under the terms of the Modified BSD License.
+#-----------------------------------------------------------------------------
+
+import os
+
+# No-op implementation for other platforms.
+class _allow_interrupt(object):
+ """Utility for fixing CTRL-C events on Windows.
+
+ On Windows, the Python interpreter intercepts CTRL-C events in order to
+ translate them into ``KeyboardInterrupt`` exceptions. It (presumably)
+ does this by setting a flag in its "control control handler" and
+ checking it later at a convenient location in the interpreter.
+
+ However, when the Python interpreter is blocked waiting for the ZMQ
+ poll operation to complete, it must wait for ZMQ's ``select()``
+ operation to complete before translating the CTRL-C event into the
+ ``KeyboardInterrupt`` exception.
+
+ The only way to fix this seems to be to add our own "console control
+ handler" and perform some application-defined operation that will
+ unblock the ZMQ polling operation in order to force ZMQ to pass control
+ back to the Python interpreter.
+
+ This context manager performs all that Windows-y stuff, providing you
+ with a hook that is called when a CTRL-C event is intercepted. This
+ hook allows you to unblock your ZMQ poll operation immediately, which
+ will then result in the expected ``KeyboardInterrupt`` exception.
+
+ Without this context manager, your ZMQ-based application will not
+ respond normally to CTRL-C events on Windows. If a CTRL-C event occurs
+ while blocked on ZMQ socket polling, the translation to a
+ ``KeyboardInterrupt`` exception will be delayed until the I/O completes
+ and control returns to the Python interpreter (this may never happen if
+ you use an infinite timeout).
+
+ A no-op implementation is provided on non-Win32 systems to avoid the
+ application from having to conditionally use it.
+
+ Example usage:
+
+ .. sourcecode:: python
+
+ def stop_my_application():
+ # ...
+
+ with allow_interrupt(stop_my_application):
+ # main polling loop.
+
+ In a typical ZMQ application, you would use the "self pipe trick" to
+ send message to a ``PAIR`` socket in order to interrupt your blocking
+ socket polling operation.
+
+ In a Tornado event loop, you can use the ``IOLoop.stop`` method to
+ unblock your I/O loop.
+ """
+
+ def __init__(self, action=None):
+ """Translate ``action`` into a CTRL-C handler.
+
+ ``action`` is a callable that takes no arguments and returns no
+ value (returned value is ignored). It must *NEVER* raise an
+ exception.
+
+ If unspecified, a no-op will be used.
+ """
+ self._init_action(action)
+
+ def _init_action(self, action):
+ pass
+
+ def __enter__(self):
+ return self
+
+ def __exit__(self, *args):
+ return
+
+if os.name == 'nt':
+ from ctypes import WINFUNCTYPE, windll
+ from ctypes.wintypes import BOOL, DWORD
+
+ kernel32 = windll.LoadLibrary('kernel32')
+
+ # <http://msdn.microsoft.com/en-us/library/ms686016.aspx>
+ PHANDLER_ROUTINE = WINFUNCTYPE(BOOL, DWORD)
+ SetConsoleCtrlHandler = kernel32.SetConsoleCtrlHandler
+ SetConsoleCtrlHandler.argtypes = (PHANDLER_ROUTINE, BOOL)
+ SetConsoleCtrlHandler.restype = BOOL
+
+ class allow_interrupt(_allow_interrupt):
+ __doc__ = _allow_interrupt.__doc__
+
+ def _init_action(self, action):
+ if action is None:
+ action = lambda: None
+ self.action = action
+ @PHANDLER_ROUTINE
+ def handle(event):
+ if event == 0: # CTRL_C_EVENT
+ action()
+ # Typical C implementations would return 1 to indicate that
+ # the event was processed and other control handlers in the
+ # stack should not be executed. However, that would
+ # prevent the Python interpreter's handler from translating
+ # CTRL-C to a `KeyboardInterrupt` exception, so we pretend
+ # that we didn't handle it.
+ return 0
+ self.handle = handle
+
+ def __enter__(self):
+ """Install the custom CTRL-C handler."""
+ result = SetConsoleCtrlHandler(self.handle, 1)
+ if result == 0:
+ # Have standard library automatically call `GetLastError()` and
+ # `FormatMessage()` into a nice exception object :-)
+ raise WindowsError()
+
+ def __exit__(self, *args):
+ """Remove the custom CTRL-C handler."""
+ result = SetConsoleCtrlHandler(self.handle, 0)
+ if result == 0:
+ # Have standard library automatically call `GetLastError()` and
+ # `FormatMessage()` into a nice exception object :-)
+ raise WindowsError()
+else:
+ class allow_interrupt(_allow_interrupt):
+ __doc__ = _allow_interrupt.__doc__
+ pass
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/utils/z85.py b/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/utils/z85.py
new file mode 100644
index 00000000..1bb1784e
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/utils/z85.py
@@ -0,0 +1,56 @@
+"""Python implementation of Z85 85-bit encoding
+
+Z85 encoding is a plaintext encoding for a bytestring interpreted as 32bit integers.
+Since the chunks are 32bit, a bytestring must be a multiple of 4 bytes.
+See ZMQ RFC 32 for details.
+
+
+"""
+
+# Copyright (C) PyZMQ Developers
+# Distributed under the terms of the Modified BSD License.
+
+import sys
+import struct
+
+PY3 = sys.version_info[0] >= 3
+# Z85CHARS is the base 85 symbol table
+Z85CHARS = b"0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ.-:+=^!/*?&<>()[]{}@%$#"
+# Z85MAP maps integers in [0,84] to the appropriate character in Z85CHARS
+Z85MAP = dict([(c, idx) for idx, c in enumerate(Z85CHARS)])
+
+_85s = [ 85**i for i in range(5) ][::-1]
+
+def encode(rawbytes):
+ """encode raw bytes into Z85"""
+ # Accepts only byte arrays bounded to 4 bytes
+ if len(rawbytes) % 4:
+ raise ValueError("length must be multiple of 4, not %i" % len(rawbytes))
+
+ nvalues = len(rawbytes) / 4
+
+ values = struct.unpack('>%dI' % nvalues, rawbytes)
+ encoded = []
+ for v in values:
+ for offset in _85s:
+ encoded.append(Z85CHARS[(v // offset) % 85])
+
+ # In Python 3, encoded is a list of integers (obviously?!)
+ if PY3:
+ return bytes(encoded)
+ else:
+ return b''.join(encoded)
+
+def decode(z85bytes):
+ """decode Z85 bytes to raw bytes"""
+ if len(z85bytes) % 5:
+ raise ValueError("Z85 length must be multiple of 5, not %i" % len(z85bytes))
+
+ nvalues = len(z85bytes) / 5
+ values = []
+ for i in range(0, len(z85bytes), 5):
+ value = 0
+ for j, offset in enumerate(_85s):
+ value += Z85MAP[z85bytes[i+j]] * offset
+ values.append(value)
+ return struct.pack('>%dI' % nvalues, *values)
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/utils/zmq_compat.h b/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/utils/zmq_compat.h
new file mode 100644
index 00000000..81c57b69
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/utils/zmq_compat.h
@@ -0,0 +1,80 @@
+//-----------------------------------------------------------------------------
+// Copyright (c) 2010 Brian Granger, Min Ragan-Kelley
+//
+// Distributed under the terms of the New BSD License. The full license is in
+// the file COPYING.BSD, distributed as part of this software.
+//-----------------------------------------------------------------------------
+
+#if defined(_MSC_VER)
+#define pyzmq_int64_t __int64
+#else
+#include <stdint.h>
+#define pyzmq_int64_t int64_t
+#endif
+
+
+#include "zmq.h"
+// version compatibility for constants:
+#include "zmq_constants.h"
+
+#define _missing (-1)
+
+
+// define fd type (from libzmq's fd.hpp)
+#ifdef _WIN32
+ #ifdef _MSC_VER && _MSC_VER <= 1400
+ #define ZMQ_FD_T UINT_PTR
+ #else
+ #define ZMQ_FD_T SOCKET
+ #endif
+#else
+ #define ZMQ_FD_T int
+#endif
+
+// use unambiguous aliases for zmq_send/recv functions
+
+#if ZMQ_VERSION_MAJOR >= 4
+// nothing to remove
+#else
+ #define zmq_curve_keypair(z85_public_key, z85_secret_key) _missing
+#endif
+
+#if ZMQ_VERSION_MAJOR >= 4 && ZMQ_VERSION_MINOR >= 1
+// nothing to remove
+#else
+ #define zmq_msg_gets(msg, prop) _missing
+ #define zmq_has(capability) _missing
+#endif
+
+#if ZMQ_VERSION_MAJOR >= 3
+ #define zmq_sendbuf zmq_send
+ #define zmq_recvbuf zmq_recv
+
+ // 3.x deprecations - these symbols haven't been removed,
+ // but let's protect against their planned removal
+ #define zmq_device(device_type, isocket, osocket) _missing
+ #define zmq_init(io_threads) ((void*)NULL)
+ #define zmq_term zmq_ctx_destroy
+#else
+ #define zmq_ctx_set(ctx, opt, val) _missing
+ #define zmq_ctx_get(ctx, opt) _missing
+ #define zmq_ctx_destroy zmq_term
+ #define zmq_ctx_new() ((void*)NULL)
+
+ #define zmq_proxy(a,b,c) _missing
+
+ #define zmq_disconnect(s, addr) _missing
+ #define zmq_unbind(s, addr) _missing
+
+ #define zmq_msg_more(msg) _missing
+ #define zmq_msg_get(msg, opt) _missing
+ #define zmq_msg_set(msg, opt, val) _missing
+ #define zmq_msg_send(msg, s, flags) zmq_send(s, msg, flags)
+ #define zmq_msg_recv(msg, s, flags) zmq_recv(s, msg, flags)
+
+ #define zmq_sendbuf(s, buf, len, flags) _missing
+ #define zmq_recvbuf(s, buf, len, flags) _missing
+
+ #define zmq_socket_monitor(s, addr, flags) _missing
+
+#endif
diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/utils/zmq_constants.h b/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/utils/zmq_constants.h
new file mode 100644
index 00000000..97683022
--- /dev/null
+++ b/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/utils/zmq_constants.h
@@ -0,0 +1,622 @@
+#ifndef _PYZMQ_CONSTANT_DEFS
+#define _PYZMQ_CONSTANT_DEFS
+
+#define _PYZMQ_UNDEFINED (-9999)
+#ifndef ZMQ_VERSION
+ #define ZMQ_VERSION (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_VERSION_MAJOR
+ #define ZMQ_VERSION_MAJOR (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_VERSION_MINOR
+ #define ZMQ_VERSION_MINOR (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_VERSION_PATCH
+ #define ZMQ_VERSION_PATCH (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_NOBLOCK
+ #define ZMQ_NOBLOCK (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_DONTWAIT
+ #define ZMQ_DONTWAIT (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_POLLIN
+ #define ZMQ_POLLIN (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_POLLOUT
+ #define ZMQ_POLLOUT (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_POLLERR
+ #define ZMQ_POLLERR (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_SNDMORE
+ #define ZMQ_SNDMORE (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_STREAMER
+ #define ZMQ_STREAMER (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_FORWARDER
+ #define ZMQ_FORWARDER (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_QUEUE
+ #define ZMQ_QUEUE (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_IO_THREADS_DFLT
+ #define ZMQ_IO_THREADS_DFLT (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_MAX_SOCKETS_DFLT
+ #define ZMQ_MAX_SOCKETS_DFLT (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_POLLITEMS_DFLT
+ #define ZMQ_POLLITEMS_DFLT (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_THREAD_PRIORITY_DFLT
+ #define ZMQ_THREAD_PRIORITY_DFLT (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_THREAD_SCHED_POLICY_DFLT
+ #define ZMQ_THREAD_SCHED_POLICY_DFLT (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_PAIR
+ #define ZMQ_PAIR (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_PUB
+ #define ZMQ_PUB (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_SUB
+ #define ZMQ_SUB (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_REQ
+ #define ZMQ_REQ (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_REP
+ #define ZMQ_REP (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_DEALER
+ #define ZMQ_DEALER (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_ROUTER
+ #define ZMQ_ROUTER (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_XREQ
+ #define ZMQ_XREQ (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_XREP
+ #define ZMQ_XREP (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_PULL
+ #define ZMQ_PULL (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_PUSH
+ #define ZMQ_PUSH (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_XPUB
+ #define ZMQ_XPUB (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_XSUB
+ #define ZMQ_XSUB (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_UPSTREAM
+ #define ZMQ_UPSTREAM (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_DOWNSTREAM
+ #define ZMQ_DOWNSTREAM (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_STREAM
+ #define ZMQ_STREAM (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_EVENT_CONNECTED
+ #define ZMQ_EVENT_CONNECTED (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_EVENT_CONNECT_DELAYED
+ #define ZMQ_EVENT_CONNECT_DELAYED (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_EVENT_CONNECT_RETRIED
+ #define ZMQ_EVENT_CONNECT_RETRIED (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_EVENT_LISTENING
+ #define ZMQ_EVENT_LISTENING (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_EVENT_BIND_FAILED
+ #define ZMQ_EVENT_BIND_FAILED (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_EVENT_ACCEPTED
+ #define ZMQ_EVENT_ACCEPTED (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_EVENT_ACCEPT_FAILED
+ #define ZMQ_EVENT_ACCEPT_FAILED (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_EVENT_CLOSED
+ #define ZMQ_EVENT_CLOSED (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_EVENT_CLOSE_FAILED
+ #define ZMQ_EVENT_CLOSE_FAILED (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_EVENT_DISCONNECTED
+ #define ZMQ_EVENT_DISCONNECTED (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_EVENT_ALL
+ #define ZMQ_EVENT_ALL (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_EVENT_MONITOR_STOPPED
+ #define ZMQ_EVENT_MONITOR_STOPPED (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_NULL
+ #define ZMQ_NULL (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_PLAIN
+ #define ZMQ_PLAIN (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_CURVE
+ #define ZMQ_CURVE (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_GSSAPI
+ #define ZMQ_GSSAPI (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef EAGAIN
+ #define EAGAIN (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef EINVAL
+ #define EINVAL (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef EFAULT
+ #define EFAULT (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ENOMEM
+ #define ENOMEM (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ENODEV
+ #define ENODEV (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef EMSGSIZE
+ #define EMSGSIZE (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef EAFNOSUPPORT
+ #define EAFNOSUPPORT (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ENETUNREACH
+ #define ENETUNREACH (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ECONNABORTED
+ #define ECONNABORTED (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ECONNRESET
+ #define ECONNRESET (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ENOTCONN
+ #define ENOTCONN (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ETIMEDOUT
+ #define ETIMEDOUT (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef EHOSTUNREACH
+ #define EHOSTUNREACH (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ENETRESET
+ #define ENETRESET (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_HAUSNUMERO
+ #define ZMQ_HAUSNUMERO (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ENOTSUP
+ #define ENOTSUP (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef EPROTONOSUPPORT
+ #define EPROTONOSUPPORT (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ENOBUFS
+ #define ENOBUFS (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ENETDOWN
+ #define ENETDOWN (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef EADDRINUSE
+ #define EADDRINUSE (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef EADDRNOTAVAIL
+ #define EADDRNOTAVAIL (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ECONNREFUSED
+ #define ECONNREFUSED (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef EINPROGRESS
+ #define EINPROGRESS (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ENOTSOCK
+ #define ENOTSOCK (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef EFSM
+ #define EFSM (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ENOCOMPATPROTO
+ #define ENOCOMPATPROTO (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ETERM
+ #define ETERM (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef EMTHREAD
+ #define EMTHREAD (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_IO_THREADS
+ #define ZMQ_IO_THREADS (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_MAX_SOCKETS
+ #define ZMQ_MAX_SOCKETS (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_SOCKET_LIMIT
+ #define ZMQ_SOCKET_LIMIT (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_THREAD_PRIORITY
+ #define ZMQ_THREAD_PRIORITY (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_THREAD_SCHED_POLICY
+ #define ZMQ_THREAD_SCHED_POLICY (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_IDENTITY
+ #define ZMQ_IDENTITY (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_SUBSCRIBE
+ #define ZMQ_SUBSCRIBE (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_UNSUBSCRIBE
+ #define ZMQ_UNSUBSCRIBE (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_LAST_ENDPOINT
+ #define ZMQ_LAST_ENDPOINT (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_TCP_ACCEPT_FILTER
+ #define ZMQ_TCP_ACCEPT_FILTER (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_PLAIN_USERNAME
+ #define ZMQ_PLAIN_USERNAME (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_PLAIN_PASSWORD
+ #define ZMQ_PLAIN_PASSWORD (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_CURVE_PUBLICKEY
+ #define ZMQ_CURVE_PUBLICKEY (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_CURVE_SECRETKEY
+ #define ZMQ_CURVE_SECRETKEY (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_CURVE_SERVERKEY
+ #define ZMQ_CURVE_SERVERKEY (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_ZAP_DOMAIN
+ #define ZMQ_ZAP_DOMAIN (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_CONNECT_RID
+ #define ZMQ_CONNECT_RID (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_GSSAPI_PRINCIPAL
+ #define ZMQ_GSSAPI_PRINCIPAL (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_GSSAPI_SERVICE_PRINCIPAL
+ #define ZMQ_GSSAPI_SERVICE_PRINCIPAL (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_SOCKS_PROXY
+ #define ZMQ_SOCKS_PROXY (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_FD
+ #define ZMQ_FD (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_IDENTITY_FD
+ #define ZMQ_IDENTITY_FD (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_RECONNECT_IVL_MAX
+ #define ZMQ_RECONNECT_IVL_MAX (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_SNDTIMEO
+ #define ZMQ_SNDTIMEO (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_RCVTIMEO
+ #define ZMQ_RCVTIMEO (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_SNDHWM
+ #define ZMQ_SNDHWM (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_RCVHWM
+ #define ZMQ_RCVHWM (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_MULTICAST_HOPS
+ #define ZMQ_MULTICAST_HOPS (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_IPV4ONLY
+ #define ZMQ_IPV4ONLY (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_ROUTER_BEHAVIOR
+ #define ZMQ_ROUTER_BEHAVIOR (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_TCP_KEEPALIVE
+ #define ZMQ_TCP_KEEPALIVE (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_TCP_KEEPALIVE_CNT
+ #define ZMQ_TCP_KEEPALIVE_CNT (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_TCP_KEEPALIVE_IDLE
+ #define ZMQ_TCP_KEEPALIVE_IDLE (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_TCP_KEEPALIVE_INTVL
+ #define ZMQ_TCP_KEEPALIVE_INTVL (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_DELAY_ATTACH_ON_CONNECT
+ #define ZMQ_DELAY_ATTACH_ON_CONNECT (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_XPUB_VERBOSE
+ #define ZMQ_XPUB_VERBOSE (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_EVENTS
+ #define ZMQ_EVENTS (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_TYPE
+ #define ZMQ_TYPE (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_LINGER
+ #define ZMQ_LINGER (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_RECONNECT_IVL
+ #define ZMQ_RECONNECT_IVL (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_BACKLOG
+ #define ZMQ_BACKLOG (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_ROUTER_MANDATORY
+ #define ZMQ_ROUTER_MANDATORY (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_FAIL_UNROUTABLE
+ #define ZMQ_FAIL_UNROUTABLE (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_ROUTER_RAW
+ #define ZMQ_ROUTER_RAW (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_IMMEDIATE
+ #define ZMQ_IMMEDIATE (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_IPV6
+ #define ZMQ_IPV6 (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_MECHANISM
+ #define ZMQ_MECHANISM (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_PLAIN_SERVER
+ #define ZMQ_PLAIN_SERVER (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_CURVE_SERVER
+ #define ZMQ_CURVE_SERVER (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_PROBE_ROUTER
+ #define ZMQ_PROBE_ROUTER (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_REQ_RELAXED
+ #define ZMQ_REQ_RELAXED (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_REQ_CORRELATE
+ #define ZMQ_REQ_CORRELATE (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_CONFLATE
+ #define ZMQ_CONFLATE (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_ROUTER_HANDOVER
+ #define ZMQ_ROUTER_HANDOVER (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_TOS
+ #define ZMQ_TOS (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_IPC_FILTER_PID
+ #define ZMQ_IPC_FILTER_PID (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_IPC_FILTER_UID
+ #define ZMQ_IPC_FILTER_UID (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_IPC_FILTER_GID
+ #define ZMQ_IPC_FILTER_GID (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_GSSAPI_SERVER
+ #define ZMQ_GSSAPI_SERVER (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_GSSAPI_PLAINTEXT
+ #define ZMQ_GSSAPI_PLAINTEXT (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_HANDSHAKE_IVL
+ #define ZMQ_HANDSHAKE_IVL (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_XPUB_NODROP
+ #define ZMQ_XPUB_NODROP (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_AFFINITY
+ #define ZMQ_AFFINITY (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_MAXMSGSIZE
+ #define ZMQ_MAXMSGSIZE (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_HWM
+ #define ZMQ_HWM (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_SWAP
+ #define ZMQ_SWAP (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_MCAST_LOOP
+ #define ZMQ_MCAST_LOOP (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_RECOVERY_IVL_MSEC
+ #define ZMQ_RECOVERY_IVL_MSEC (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_RATE
+ #define ZMQ_RATE (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_RECOVERY_IVL
+ #define ZMQ_RECOVERY_IVL (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_SNDBUF
+ #define ZMQ_SNDBUF (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_RCVBUF
+ #define ZMQ_RCVBUF (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_RCVMORE
+ #define ZMQ_RCVMORE (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_MORE
+ #define ZMQ_MORE (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_SRCFD
+ #define ZMQ_SRCFD (_PYZMQ_UNDEFINED)
+#endif
+
+#ifndef ZMQ_SHARED
+ #define ZMQ_SHARED (_PYZMQ_UNDEFINED)
+#endif
+
+
+#endif // ifndef _PYZMQ_CONSTANT_DEFS
diff --git a/scripts/external_libs/scapy-2.3.1/scapy/__init__.py b/scripts/external_libs/scapy-2.3.1/python2/scapy/__init__.py
index 443b3675..443b3675 100644
--- a/scripts/external_libs/scapy-2.3.1/scapy/__init__.py
+++ b/scripts/external_libs/scapy-2.3.1/python2/scapy/__init__.py
diff --git a/scripts/external_libs/scapy-2.3.1/scapy/all.py b/scripts/external_libs/scapy-2.3.1/python2/scapy/all.py
index 5b160df4..5b160df4 100644
--- a/scripts/external_libs/scapy-2.3.1/scapy/all.py
+++ b/scripts/external_libs/scapy-2.3.1/python2/scapy/all.py
diff --git a/scripts/external_libs/scapy-2.3.1/scapy/ansmachine.py b/scripts/external_libs/scapy-2.3.1/python2/scapy/ansmachine.py
index 2a90adcc..2a90adcc 100644
--- a/scripts/external_libs/scapy-2.3.1/scapy/ansmachine.py
+++ b/scripts/external_libs/scapy-2.3.1/python2/scapy/ansmachine.py
diff --git a/scripts/external_libs/scapy-2.3.1/scapy/arch/__init__.py b/scripts/external_libs/scapy-2.3.1/python2/scapy/arch/__init__.py
index 95f95ecf..95f95ecf 100644
--- a/scripts/external_libs/scapy-2.3.1/scapy/arch/__init__.py
+++ b/scripts/external_libs/scapy-2.3.1/python2/scapy/arch/__init__.py
diff --git a/scripts/external_libs/scapy-2.3.1/scapy/arch/bsd.py b/scripts/external_libs/scapy-2.3.1/python2/scapy/arch/bsd.py
index 1be7bd73..1be7bd73 100644
--- a/scripts/external_libs/scapy-2.3.1/scapy/arch/bsd.py
+++ b/scripts/external_libs/scapy-2.3.1/python2/scapy/arch/bsd.py
diff --git a/scripts/external_libs/scapy-2.3.1/scapy/arch/linux.py b/scripts/external_libs/scapy-2.3.1/python2/scapy/arch/linux.py
index 32f0a2d1..32f0a2d1 100644
--- a/scripts/external_libs/scapy-2.3.1/scapy/arch/linux.py
+++ b/scripts/external_libs/scapy-2.3.1/python2/scapy/arch/linux.py
diff --git a/scripts/external_libs/scapy-2.3.1/scapy/arch/pcapdnet.py b/scripts/external_libs/scapy-2.3.1/python2/scapy/arch/pcapdnet.py
index 94673d7d..94673d7d 100644
--- a/scripts/external_libs/scapy-2.3.1/scapy/arch/pcapdnet.py
+++ b/scripts/external_libs/scapy-2.3.1/python2/scapy/arch/pcapdnet.py
diff --git a/scripts/external_libs/scapy-2.3.1/scapy/arch/solaris.py b/scripts/external_libs/scapy-2.3.1/python2/scapy/arch/solaris.py
index 3117076a..3117076a 100644
--- a/scripts/external_libs/scapy-2.3.1/scapy/arch/solaris.py
+++ b/scripts/external_libs/scapy-2.3.1/python2/scapy/arch/solaris.py
diff --git a/scripts/external_libs/scapy-2.3.1/scapy/arch/unix.py b/scripts/external_libs/scapy-2.3.1/python2/scapy/arch/unix.py
index 151a08f5..151a08f5 100644
--- a/scripts/external_libs/scapy-2.3.1/scapy/arch/unix.py
+++ b/scripts/external_libs/scapy-2.3.1/python2/scapy/arch/unix.py
diff --git a/scripts/external_libs/scapy-2.3.1/scapy/arch/windows/__init__.py b/scripts/external_libs/scapy-2.3.1/python2/scapy/arch/windows/__init__.py
index e604266b..e604266b 100644
--- a/scripts/external_libs/scapy-2.3.1/scapy/arch/windows/__init__.py
+++ b/scripts/external_libs/scapy-2.3.1/python2/scapy/arch/windows/__init__.py
diff --git a/scripts/external_libs/scapy-2.3.1/scapy/as_resolvers.py b/scripts/external_libs/scapy-2.3.1/python2/scapy/as_resolvers.py
index 24573c20..24573c20 100644
--- a/scripts/external_libs/scapy-2.3.1/scapy/as_resolvers.py
+++ b/scripts/external_libs/scapy-2.3.1/python2/scapy/as_resolvers.py
diff --git a/scripts/external_libs/scapy-2.3.1/scapy/asn1/__init__.py b/scripts/external_libs/scapy-2.3.1/python2/scapy/asn1/__init__.py
index 4827a588..4827a588 100644
--- a/scripts/external_libs/scapy-2.3.1/scapy/asn1/__init__.py
+++ b/scripts/external_libs/scapy-2.3.1/python2/scapy/asn1/__init__.py
diff --git a/scripts/external_libs/scapy-2.3.1/scapy/asn1/asn1.py b/scripts/external_libs/scapy-2.3.1/python2/scapy/asn1/asn1.py
index bad7b2cf..bad7b2cf 100644
--- a/scripts/external_libs/scapy-2.3.1/scapy/asn1/asn1.py
+++ b/scripts/external_libs/scapy-2.3.1/python2/scapy/asn1/asn1.py
diff --git a/scripts/external_libs/scapy-2.3.1/scapy/asn1/ber.py b/scripts/external_libs/scapy-2.3.1/python2/scapy/asn1/ber.py
index 2312e025..2312e025 100644
--- a/scripts/external_libs/scapy-2.3.1/scapy/asn1/ber.py
+++ b/scripts/external_libs/scapy-2.3.1/python2/scapy/asn1/ber.py
diff --git a/scripts/external_libs/scapy-2.3.1/scapy/asn1/mib.py b/scripts/external_libs/scapy-2.3.1/python2/scapy/asn1/mib.py
index 8531fcf2..8531fcf2 100644
--- a/scripts/external_libs/scapy-2.3.1/scapy/asn1/mib.py
+++ b/scripts/external_libs/scapy-2.3.1/python2/scapy/asn1/mib.py
diff --git a/scripts/external_libs/scapy-2.3.1/scapy/asn1fields.py b/scripts/external_libs/scapy-2.3.1/python2/scapy/asn1fields.py
index 1a59bd50..1a59bd50 100644
--- a/scripts/external_libs/scapy-2.3.1/scapy/asn1fields.py
+++ b/scripts/external_libs/scapy-2.3.1/python2/scapy/asn1fields.py
diff --git a/scripts/external_libs/scapy-2.3.1/scapy/asn1packet.py b/scripts/external_libs/scapy-2.3.1/python2/scapy/asn1packet.py
index 4c476d2a..4c476d2a 100644
--- a/scripts/external_libs/scapy-2.3.1/scapy/asn1packet.py
+++ b/scripts/external_libs/scapy-2.3.1/python2/scapy/asn1packet.py
diff --git a/scripts/external_libs/scapy-2.3.1/scapy/automaton.py b/scripts/external_libs/scapy-2.3.1/python2/scapy/automaton.py
index 7502ac51..7502ac51 100644
--- a/scripts/external_libs/scapy-2.3.1/scapy/automaton.py
+++ b/scripts/external_libs/scapy-2.3.1/python2/scapy/automaton.py
diff --git a/scripts/external_libs/scapy-2.3.1/scapy/autorun.py b/scripts/external_libs/scapy-2.3.1/python2/scapy/autorun.py
index a4534949..a4534949 100644
--- a/scripts/external_libs/scapy-2.3.1/scapy/autorun.py
+++ b/scripts/external_libs/scapy-2.3.1/python2/scapy/autorun.py
diff --git a/scripts/external_libs/scapy-2.3.1/scapy/base_classes.py b/scripts/external_libs/scapy-2.3.1/python2/scapy/base_classes.py
index e54428a2..e54428a2 100644
--- a/scripts/external_libs/scapy-2.3.1/scapy/base_classes.py
+++ b/scripts/external_libs/scapy-2.3.1/python2/scapy/base_classes.py
diff --git a/scripts/external_libs/scapy-2.3.1/scapy/config.py b/scripts/external_libs/scapy-2.3.1/python2/scapy/config.py
index 816e8a18..816e8a18 100644
--- a/scripts/external_libs/scapy-2.3.1/scapy/config.py
+++ b/scripts/external_libs/scapy-2.3.1/python2/scapy/config.py
diff --git a/scripts/external_libs/scapy-2.3.1/scapy/contrib/__init__.py b/scripts/external_libs/scapy-2.3.1/python2/scapy/contrib/__init__.py
index 99654377..99654377 100644
--- a/scripts/external_libs/scapy-2.3.1/scapy/contrib/__init__.py
+++ b/scripts/external_libs/scapy-2.3.1/python2/scapy/contrib/__init__.py
diff --git a/scripts/external_libs/scapy-2.3.1/scapy/contrib/avs.py b/scripts/external_libs/scapy-2.3.1/python2/scapy/contrib/avs.py
index 461b94b8..461b94b8 100644
--- a/scripts/external_libs/scapy-2.3.1/scapy/contrib/avs.py
+++ b/scripts/external_libs/scapy-2.3.1/python2/scapy/contrib/avs.py
diff --git a/scripts/external_libs/scapy-2.3.1/scapy/contrib/bgp.py b/scripts/external_libs/scapy-2.3.1/python2/scapy/contrib/bgp.py
index 525dac5f..525dac5f 100644
--- a/scripts/external_libs/scapy-2.3.1/scapy/contrib/bgp.py
+++ b/scripts/external_libs/scapy-2.3.1/python2/scapy/contrib/bgp.py
diff --git a/scripts/external_libs/scapy-2.3.1/scapy/contrib/carp.py b/scripts/external_libs/scapy-2.3.1/python2/scapy/contrib/carp.py
index e785adef..e785adef 100644
--- a/scripts/external_libs/scapy-2.3.1/scapy/contrib/carp.py
+++ b/scripts/external_libs/scapy-2.3.1/python2/scapy/contrib/carp.py
diff --git a/scripts/external_libs/scapy-2.3.1/scapy/contrib/cdp.py b/scripts/external_libs/scapy-2.3.1/python2/scapy/contrib/cdp.py
index 12794c7b..12794c7b 100644
--- a/scripts/external_libs/scapy-2.3.1/scapy/contrib/cdp.py
+++ b/scripts/external_libs/scapy-2.3.1/python2/scapy/contrib/cdp.py
diff --git a/scripts/external_libs/scapy-2.3.1/scapy/contrib/chdlc.py b/scripts/external_libs/scapy-2.3.1/python2/scapy/contrib/chdlc.py
index 6e483762..6e483762 100644
--- a/scripts/external_libs/scapy-2.3.1/scapy/contrib/chdlc.py
+++ b/scripts/external_libs/scapy-2.3.1/python2/scapy/contrib/chdlc.py
diff --git a/scripts/external_libs/scapy-2.3.1/scapy/contrib/dtp.py b/scripts/external_libs/scapy-2.3.1/python2/scapy/contrib/dtp.py
index 294350bc..294350bc 100644
--- a/scripts/external_libs/scapy-2.3.1/scapy/contrib/dtp.py
+++ b/scripts/external_libs/scapy-2.3.1/python2/scapy/contrib/dtp.py
diff --git a/scripts/external_libs/scapy-2.3.1/scapy/contrib/eigrp.py b/scripts/external_libs/scapy-2.3.1/python2/scapy/contrib/eigrp.py
index 73b4ce02..73b4ce02 100644
--- a/scripts/external_libs/scapy-2.3.1/scapy/contrib/eigrp.py
+++ b/scripts/external_libs/scapy-2.3.1/python2/scapy/contrib/eigrp.py
diff --git a/scripts/external_libs/scapy-2.3.1/scapy/contrib/etherip.py b/scripts/external_libs/scapy-2.3.1/python2/scapy/contrib/etherip.py
index e331c146..e331c146 100644
--- a/scripts/external_libs/scapy-2.3.1/scapy/contrib/etherip.py
+++ b/scripts/external_libs/scapy-2.3.1/python2/scapy/contrib/etherip.py
diff --git a/scripts/external_libs/scapy-2.3.1/scapy/contrib/gsm_um.py b/scripts/external_libs/scapy-2.3.1/python2/scapy/contrib/gsm_um.py
index cd6b9b05..cd6b9b05 100644
--- a/scripts/external_libs/scapy-2.3.1/scapy/contrib/gsm_um.py
+++ b/scripts/external_libs/scapy-2.3.1/python2/scapy/contrib/gsm_um.py
diff --git a/scripts/external_libs/scapy-2.3.1/scapy/contrib/igmp.py b/scripts/external_libs/scapy-2.3.1/python2/scapy/contrib/igmp.py
index cc3dadc8..cc3dadc8 100644
--- a/scripts/external_libs/scapy-2.3.1/scapy/contrib/igmp.py
+++ b/scripts/external_libs/scapy-2.3.1/python2/scapy/contrib/igmp.py
diff --git a/scripts/external_libs/scapy-2.3.1/scapy/contrib/igmpv3.py b/scripts/external_libs/scapy-2.3.1/python2/scapy/contrib/igmpv3.py
index 1ab1bae4..1ab1bae4 100644
--- a/scripts/external_libs/scapy-2.3.1/scapy/contrib/igmpv3.py
+++ b/scripts/external_libs/scapy-2.3.1/python2/scapy/contrib/igmpv3.py
diff --git a/scripts/external_libs/scapy-2.3.1/scapy/contrib/ikev2.py b/scripts/external_libs/scapy-2.3.1/python2/scapy/contrib/ikev2.py
index fd38b80c..fd38b80c 100644
--- a/scripts/external_libs/scapy-2.3.1/scapy/contrib/ikev2.py
+++ b/scripts/external_libs/scapy-2.3.1/python2/scapy/contrib/ikev2.py
diff --git a/scripts/external_libs/scapy-2.3.1/scapy/contrib/ldp.py b/scripts/external_libs/scapy-2.3.1/python2/scapy/contrib/ldp.py
index bc2464ab..bc2464ab 100644
--- a/scripts/external_libs/scapy-2.3.1/scapy/contrib/ldp.py
+++ b/scripts/external_libs/scapy-2.3.1/python2/scapy/contrib/ldp.py
diff --git a/scripts/external_libs/scapy-2.3.1/scapy/contrib/mpls.py b/scripts/external_libs/scapy-2.3.1/python2/scapy/contrib/mpls.py
index 037278c5..037278c5 100644
--- a/scripts/external_libs/scapy-2.3.1/scapy/contrib/mpls.py
+++ b/scripts/external_libs/scapy-2.3.1/python2/scapy/contrib/mpls.py
diff --git a/scripts/external_libs/scapy-2.3.1/scapy/contrib/ospf.py b/scripts/external_libs/scapy-2.3.1/python2/scapy/contrib/ospf.py
index a6422bd8..a6422bd8 100644
--- a/scripts/external_libs/scapy-2.3.1/scapy/contrib/ospf.py
+++ b/scripts/external_libs/scapy-2.3.1/python2/scapy/contrib/ospf.py
diff --git a/scripts/external_libs/scapy-2.3.1/scapy/contrib/ppi.py b/scripts/external_libs/scapy-2.3.1/python2/scapy/contrib/ppi.py
index f4364096..f4364096 100644
--- a/scripts/external_libs/scapy-2.3.1/scapy/contrib/ppi.py
+++ b/scripts/external_libs/scapy-2.3.1/python2/scapy/contrib/ppi.py
diff --git a/scripts/external_libs/scapy-2.3.1/scapy/contrib/ppi_cace.py b/scripts/external_libs/scapy-2.3.1/python2/scapy/contrib/ppi_cace.py
index ba2c4abf..ba2c4abf 100644
--- a/scripts/external_libs/scapy-2.3.1/scapy/contrib/ppi_cace.py
+++ b/scripts/external_libs/scapy-2.3.1/python2/scapy/contrib/ppi_cace.py
diff --git a/scripts/external_libs/scapy-2.3.1/scapy/contrib/ppi_geotag.py b/scripts/external_libs/scapy-2.3.1/python2/scapy/contrib/ppi_geotag.py
index 19371512..19371512 100644
--- a/scripts/external_libs/scapy-2.3.1/scapy/contrib/ppi_geotag.py
+++ b/scripts/external_libs/scapy-2.3.1/python2/scapy/contrib/ppi_geotag.py
diff --git a/scripts/external_libs/scapy-2.3.1/scapy/contrib/ripng.py b/scripts/external_libs/scapy-2.3.1/python2/scapy/contrib/ripng.py
index 47e17bc4..47e17bc4 100644
--- a/scripts/external_libs/scapy-2.3.1/scapy/contrib/ripng.py
+++ b/scripts/external_libs/scapy-2.3.1/python2/scapy/contrib/ripng.py
diff --git a/scripts/external_libs/scapy-2.3.1/scapy/contrib/rsvp.py b/scripts/external_libs/scapy-2.3.1/python2/scapy/contrib/rsvp.py
index c9d4ebee..c9d4ebee 100644
--- a/scripts/external_libs/scapy-2.3.1/scapy/contrib/rsvp.py
+++ b/scripts/external_libs/scapy-2.3.1/python2/scapy/contrib/rsvp.py
diff --git a/scripts/external_libs/scapy-2.3.1/scapy/contrib/skinny.py b/scripts/external_libs/scapy-2.3.1/python2/scapy/contrib/skinny.py
index 8b686822..8b686822 100644
--- a/scripts/external_libs/scapy-2.3.1/scapy/contrib/skinny.py
+++ b/scripts/external_libs/scapy-2.3.1/python2/scapy/contrib/skinny.py
diff --git a/scripts/external_libs/scapy-2.3.1/scapy/contrib/ubberlogger.py b/scripts/external_libs/scapy-2.3.1/python2/scapy/contrib/ubberlogger.py
index 1c01db2f..1c01db2f 100644
--- a/scripts/external_libs/scapy-2.3.1/scapy/contrib/ubberlogger.py
+++ b/scripts/external_libs/scapy-2.3.1/python2/scapy/contrib/ubberlogger.py
diff --git a/scripts/external_libs/scapy-2.3.1/scapy/contrib/vqp.py b/scripts/external_libs/scapy-2.3.1/python2/scapy/contrib/vqp.py
index 9328cea4..9328cea4 100644
--- a/scripts/external_libs/scapy-2.3.1/scapy/contrib/vqp.py
+++ b/scripts/external_libs/scapy-2.3.1/python2/scapy/contrib/vqp.py
diff --git a/scripts/external_libs/scapy-2.3.1/scapy/contrib/vtp.py b/scripts/external_libs/scapy-2.3.1/python2/scapy/contrib/vtp.py
index af5c2823..af5c2823 100644
--- a/scripts/external_libs/scapy-2.3.1/scapy/contrib/vtp.py
+++ b/scripts/external_libs/scapy-2.3.1/python2/scapy/contrib/vtp.py
diff --git a/scripts/external_libs/scapy-2.3.1/scapy/contrib/wpa_eapol.py b/scripts/external_libs/scapy-2.3.1/python2/scapy/contrib/wpa_eapol.py
index 084eedd8..084eedd8 100644
--- a/scripts/external_libs/scapy-2.3.1/scapy/contrib/wpa_eapol.py
+++ b/scripts/external_libs/scapy-2.3.1/python2/scapy/contrib/wpa_eapol.py
diff --git a/scripts/external_libs/scapy-2.3.1/scapy/crypto/__init__.py b/scripts/external_libs/scapy-2.3.1/python2/scapy/crypto/__init__.py
index b441863e..b441863e 100644
--- a/scripts/external_libs/scapy-2.3.1/scapy/crypto/__init__.py
+++ b/scripts/external_libs/scapy-2.3.1/python2/scapy/crypto/__init__.py
diff --git a/scripts/external_libs/scapy-2.3.1/scapy/crypto/cert.py b/scripts/external_libs/scapy-2.3.1/python2/scapy/crypto/cert.py
index e6c00496..e6c00496 100644
--- a/scripts/external_libs/scapy-2.3.1/scapy/crypto/cert.py
+++ b/scripts/external_libs/scapy-2.3.1/python2/scapy/crypto/cert.py
diff --git a/scripts/external_libs/scapy-2.3.1/scapy/dadict.py b/scripts/external_libs/scapy-2.3.1/python2/scapy/dadict.py
index dcfd2a77..dcfd2a77 100644
--- a/scripts/external_libs/scapy-2.3.1/scapy/dadict.py
+++ b/scripts/external_libs/scapy-2.3.1/python2/scapy/dadict.py
diff --git a/scripts/external_libs/scapy-2.3.1/scapy/data.py b/scripts/external_libs/scapy-2.3.1/python2/scapy/data.py
index de01cfc9..de01cfc9 100644
--- a/scripts/external_libs/scapy-2.3.1/scapy/data.py
+++ b/scripts/external_libs/scapy-2.3.1/python2/scapy/data.py
diff --git a/scripts/external_libs/scapy-2.3.1/scapy/error.py b/scripts/external_libs/scapy-2.3.1/python2/scapy/error.py
index 29ebc1ba..29ebc1ba 100644
--- a/scripts/external_libs/scapy-2.3.1/scapy/error.py
+++ b/scripts/external_libs/scapy-2.3.1/python2/scapy/error.py
diff --git a/scripts/external_libs/scapy-2.3.1/scapy/fields.py b/scripts/external_libs/scapy-2.3.1/python2/scapy/fields.py
index 8bb8c970..8bb8c970 100644
--- a/scripts/external_libs/scapy-2.3.1/scapy/fields.py
+++ b/scripts/external_libs/scapy-2.3.1/python2/scapy/fields.py
diff --git a/scripts/external_libs/scapy-2.3.1/scapy/layers/__init__.py b/scripts/external_libs/scapy-2.3.1/python2/scapy/layers/__init__.py
index a3f2afb9..a3f2afb9 100644
--- a/scripts/external_libs/scapy-2.3.1/scapy/layers/__init__.py
+++ b/scripts/external_libs/scapy-2.3.1/python2/scapy/layers/__init__.py
diff --git a/scripts/external_libs/scapy-2.3.1/scapy/layers/all.py b/scripts/external_libs/scapy-2.3.1/python2/scapy/layers/all.py
index e92c22c5..e92c22c5 100644
--- a/scripts/external_libs/scapy-2.3.1/scapy/layers/all.py
+++ b/scripts/external_libs/scapy-2.3.1/python2/scapy/layers/all.py
diff --git a/scripts/external_libs/scapy-2.3.1/scapy/layers/bluetooth.py b/scripts/external_libs/scapy-2.3.1/python2/scapy/layers/bluetooth.py
index 662bad3b..662bad3b 100644
--- a/scripts/external_libs/scapy-2.3.1/scapy/layers/bluetooth.py
+++ b/scripts/external_libs/scapy-2.3.1/python2/scapy/layers/bluetooth.py
diff --git a/scripts/external_libs/scapy-2.3.1/scapy/layers/dhcp.py b/scripts/external_libs/scapy-2.3.1/python2/scapy/layers/dhcp.py
index 18f83f00..18f83f00 100644
--- a/scripts/external_libs/scapy-2.3.1/scapy/layers/dhcp.py
+++ b/scripts/external_libs/scapy-2.3.1/python2/scapy/layers/dhcp.py
diff --git a/scripts/external_libs/scapy-2.3.1/scapy/layers/dhcp6.py b/scripts/external_libs/scapy-2.3.1/python2/scapy/layers/dhcp6.py
index 2bd215d0..2bd215d0 100644
--- a/scripts/external_libs/scapy-2.3.1/scapy/layers/dhcp6.py
+++ b/scripts/external_libs/scapy-2.3.1/python2/scapy/layers/dhcp6.py
diff --git a/scripts/external_libs/scapy-2.3.1/scapy/layers/dns.py b/scripts/external_libs/scapy-2.3.1/python2/scapy/layers/dns.py
index 533db6c2..533db6c2 100644
--- a/scripts/external_libs/scapy-2.3.1/scapy/layers/dns.py
+++ b/scripts/external_libs/scapy-2.3.1/python2/scapy/layers/dns.py
diff --git a/scripts/external_libs/scapy-2.3.1/scapy/layers/dot11.py b/scripts/external_libs/scapy-2.3.1/python2/scapy/layers/dot11.py
index b340dd85..b340dd85 100644
--- a/scripts/external_libs/scapy-2.3.1/scapy/layers/dot11.py
+++ b/scripts/external_libs/scapy-2.3.1/python2/scapy/layers/dot11.py
diff --git a/scripts/external_libs/scapy-2.3.1/scapy/layers/gprs.py b/scripts/external_libs/scapy-2.3.1/python2/scapy/layers/gprs.py
index 31a931fe..31a931fe 100644
--- a/scripts/external_libs/scapy-2.3.1/scapy/layers/gprs.py
+++ b/scripts/external_libs/scapy-2.3.1/python2/scapy/layers/gprs.py
diff --git a/scripts/external_libs/scapy-2.3.1/scapy/layers/hsrp.py b/scripts/external_libs/scapy-2.3.1/python2/scapy/layers/hsrp.py
index 7193b97e..7193b97e 100644
--- a/scripts/external_libs/scapy-2.3.1/scapy/layers/hsrp.py
+++ b/scripts/external_libs/scapy-2.3.1/python2/scapy/layers/hsrp.py
diff --git a/scripts/external_libs/scapy-2.3.1/scapy/layers/inet.py b/scripts/external_libs/scapy-2.3.1/python2/scapy/layers/inet.py
index 34b5e7be..34b5e7be 100644
--- a/scripts/external_libs/scapy-2.3.1/scapy/layers/inet.py
+++ b/scripts/external_libs/scapy-2.3.1/python2/scapy/layers/inet.py
diff --git a/scripts/external_libs/scapy-2.3.1/scapy/layers/inet6.py b/scripts/external_libs/scapy-2.3.1/python2/scapy/layers/inet6.py
index 46cd85e5..46cd85e5 100644
--- a/scripts/external_libs/scapy-2.3.1/scapy/layers/inet6.py
+++ b/scripts/external_libs/scapy-2.3.1/python2/scapy/layers/inet6.py
diff --git a/scripts/external_libs/scapy-2.3.1/scapy/layers/ipsec.py b/scripts/external_libs/scapy-2.3.1/python2/scapy/layers/ipsec.py
index 692a6e18..692a6e18 100644
--- a/scripts/external_libs/scapy-2.3.1/scapy/layers/ipsec.py
+++ b/scripts/external_libs/scapy-2.3.1/python2/scapy/layers/ipsec.py
diff --git a/scripts/external_libs/scapy-2.3.1/scapy/layers/ir.py b/scripts/external_libs/scapy-2.3.1/python2/scapy/layers/ir.py
index fc738c55..fc738c55 100644
--- a/scripts/external_libs/scapy-2.3.1/scapy/layers/ir.py
+++ b/scripts/external_libs/scapy-2.3.1/python2/scapy/layers/ir.py
diff --git a/scripts/external_libs/scapy-2.3.1/scapy/layers/isakmp.py b/scripts/external_libs/scapy-2.3.1/python2/scapy/layers/isakmp.py
index 9c54bed4..9c54bed4 100644
--- a/scripts/external_libs/scapy-2.3.1/scapy/layers/isakmp.py
+++ b/scripts/external_libs/scapy-2.3.1/python2/scapy/layers/isakmp.py
diff --git a/scripts/external_libs/scapy-2.3.1/scapy/layers/l2.py b/scripts/external_libs/scapy-2.3.1/python2/scapy/layers/l2.py
index 3f80ed7d..3f80ed7d 100644
--- a/scripts/external_libs/scapy-2.3.1/scapy/layers/l2.py
+++ b/scripts/external_libs/scapy-2.3.1/python2/scapy/layers/l2.py
diff --git a/scripts/external_libs/scapy-2.3.1/scapy/layers/l2tp.py b/scripts/external_libs/scapy-2.3.1/python2/scapy/layers/l2tp.py
index 0b56db21..0b56db21 100644
--- a/scripts/external_libs/scapy-2.3.1/scapy/layers/l2tp.py
+++ b/scripts/external_libs/scapy-2.3.1/python2/scapy/layers/l2tp.py
diff --git a/scripts/external_libs/scapy-2.3.1/scapy/layers/llmnr.py b/scripts/external_libs/scapy-2.3.1/python2/scapy/layers/llmnr.py
index 65ecad41..65ecad41 100644
--- a/scripts/external_libs/scapy-2.3.1/scapy/layers/llmnr.py
+++ b/scripts/external_libs/scapy-2.3.1/python2/scapy/layers/llmnr.py
diff --git a/scripts/external_libs/scapy-2.3.1/scapy/layers/mgcp.py b/scripts/external_libs/scapy-2.3.1/python2/scapy/layers/mgcp.py
index 5d8a064e..5d8a064e 100644
--- a/scripts/external_libs/scapy-2.3.1/scapy/layers/mgcp.py
+++ b/scripts/external_libs/scapy-2.3.1/python2/scapy/layers/mgcp.py
diff --git a/scripts/external_libs/scapy-2.3.1/scapy/layers/mobileip.py b/scripts/external_libs/scapy-2.3.1/python2/scapy/layers/mobileip.py
index bbaa8ce7..bbaa8ce7 100644
--- a/scripts/external_libs/scapy-2.3.1/scapy/layers/mobileip.py
+++ b/scripts/external_libs/scapy-2.3.1/python2/scapy/layers/mobileip.py
diff --git a/scripts/external_libs/scapy-2.3.1/scapy/layers/netbios.py b/scripts/external_libs/scapy-2.3.1/python2/scapy/layers/netbios.py
index 605e06f1..605e06f1 100644
--- a/scripts/external_libs/scapy-2.3.1/scapy/layers/netbios.py
+++ b/scripts/external_libs/scapy-2.3.1/python2/scapy/layers/netbios.py
diff --git a/scripts/external_libs/scapy-2.3.1/scapy/layers/netflow.py b/scripts/external_libs/scapy-2.3.1/python2/scapy/layers/netflow.py
index 44567737..44567737 100644
--- a/scripts/external_libs/scapy-2.3.1/scapy/layers/netflow.py
+++ b/scripts/external_libs/scapy-2.3.1/python2/scapy/layers/netflow.py
diff --git a/scripts/external_libs/scapy-2.3.1/scapy/layers/ntp.py b/scripts/external_libs/scapy-2.3.1/python2/scapy/layers/ntp.py
index 6d11966c..6d11966c 100644
--- a/scripts/external_libs/scapy-2.3.1/scapy/layers/ntp.py
+++ b/scripts/external_libs/scapy-2.3.1/python2/scapy/layers/ntp.py
diff --git a/scripts/external_libs/scapy-2.3.1/scapy/layers/pflog.py b/scripts/external_libs/scapy-2.3.1/python2/scapy/layers/pflog.py
index a8fc9fe0..a8fc9fe0 100644
--- a/scripts/external_libs/scapy-2.3.1/scapy/layers/pflog.py
+++ b/scripts/external_libs/scapy-2.3.1/python2/scapy/layers/pflog.py
diff --git a/scripts/external_libs/scapy-2.3.1/scapy/layers/ppp.py b/scripts/external_libs/scapy-2.3.1/python2/scapy/layers/ppp.py
index 50c68465..50c68465 100644
--- a/scripts/external_libs/scapy-2.3.1/scapy/layers/ppp.py
+++ b/scripts/external_libs/scapy-2.3.1/python2/scapy/layers/ppp.py
diff --git a/scripts/external_libs/scapy-2.3.1/scapy/layers/radius.py b/scripts/external_libs/scapy-2.3.1/python2/scapy/layers/radius.py
index 13239603..13239603 100644
--- a/scripts/external_libs/scapy-2.3.1/scapy/layers/radius.py
+++ b/scripts/external_libs/scapy-2.3.1/python2/scapy/layers/radius.py
diff --git a/scripts/external_libs/scapy-2.3.1/scapy/layers/rip.py b/scripts/external_libs/scapy-2.3.1/python2/scapy/layers/rip.py
index 1507fe5c..1507fe5c 100644
--- a/scripts/external_libs/scapy-2.3.1/scapy/layers/rip.py
+++ b/scripts/external_libs/scapy-2.3.1/python2/scapy/layers/rip.py
diff --git a/scripts/external_libs/scapy-2.3.1/scapy/layers/rtp.py b/scripts/external_libs/scapy-2.3.1/python2/scapy/layers/rtp.py
index 629dccdd..629dccdd 100644
--- a/scripts/external_libs/scapy-2.3.1/scapy/layers/rtp.py
+++ b/scripts/external_libs/scapy-2.3.1/python2/scapy/layers/rtp.py
diff --git a/scripts/external_libs/scapy-2.3.1/scapy/layers/sctp.py b/scripts/external_libs/scapy-2.3.1/python2/scapy/layers/sctp.py
index 632becb1..632becb1 100644
--- a/scripts/external_libs/scapy-2.3.1/scapy/layers/sctp.py
+++ b/scripts/external_libs/scapy-2.3.1/python2/scapy/layers/sctp.py
diff --git a/scripts/external_libs/scapy-2.3.1/scapy/layers/sebek.py b/scripts/external_libs/scapy-2.3.1/python2/scapy/layers/sebek.py
index c54e6728..c54e6728 100644
--- a/scripts/external_libs/scapy-2.3.1/scapy/layers/sebek.py
+++ b/scripts/external_libs/scapy-2.3.1/python2/scapy/layers/sebek.py
diff --git a/scripts/external_libs/scapy-2.3.1/scapy/layers/skinny.py b/scripts/external_libs/scapy-2.3.1/python2/scapy/layers/skinny.py
index 9fb6ac06..9fb6ac06 100644
--- a/scripts/external_libs/scapy-2.3.1/scapy/layers/skinny.py
+++ b/scripts/external_libs/scapy-2.3.1/python2/scapy/layers/skinny.py
diff --git a/scripts/external_libs/scapy-2.3.1/scapy/layers/smb.py b/scripts/external_libs/scapy-2.3.1/python2/scapy/layers/smb.py
index 73ebe5b1..73ebe5b1 100644
--- a/scripts/external_libs/scapy-2.3.1/scapy/layers/smb.py
+++ b/scripts/external_libs/scapy-2.3.1/python2/scapy/layers/smb.py
diff --git a/scripts/external_libs/scapy-2.3.1/scapy/layers/snmp.py b/scripts/external_libs/scapy-2.3.1/python2/scapy/layers/snmp.py
index 2c588250..2c588250 100644
--- a/scripts/external_libs/scapy-2.3.1/scapy/layers/snmp.py
+++ b/scripts/external_libs/scapy-2.3.1/python2/scapy/layers/snmp.py
diff --git a/scripts/external_libs/scapy-2.3.1/scapy/layers/tftp.py b/scripts/external_libs/scapy-2.3.1/python2/scapy/layers/tftp.py
index 1535e99c..1535e99c 100644
--- a/scripts/external_libs/scapy-2.3.1/scapy/layers/tftp.py
+++ b/scripts/external_libs/scapy-2.3.1/python2/scapy/layers/tftp.py
diff --git a/scripts/external_libs/scapy-2.3.1/scapy/layers/vrrp.py b/scripts/external_libs/scapy-2.3.1/python2/scapy/layers/vrrp.py
index f874b352..f874b352 100644
--- a/scripts/external_libs/scapy-2.3.1/scapy/layers/vrrp.py
+++ b/scripts/external_libs/scapy-2.3.1/python2/scapy/layers/vrrp.py
diff --git a/scripts/external_libs/scapy-2.3.1/scapy/layers/x509.py b/scripts/external_libs/scapy-2.3.1/python2/scapy/layers/x509.py
index 18aaa5e3..18aaa5e3 100644
--- a/scripts/external_libs/scapy-2.3.1/scapy/layers/x509.py
+++ b/scripts/external_libs/scapy-2.3.1/python2/scapy/layers/x509.py
diff --git a/scripts/external_libs/scapy-2.3.1/scapy/main.py b/scripts/external_libs/scapy-2.3.1/python2/scapy/main.py
index 0fce8c3e..0fce8c3e 100644
--- a/scripts/external_libs/scapy-2.3.1/scapy/main.py
+++ b/scripts/external_libs/scapy-2.3.1/python2/scapy/main.py
diff --git a/scripts/external_libs/scapy-2.3.1/scapy/modules/__init__.py b/scripts/external_libs/scapy-2.3.1/python2/scapy/modules/__init__.py
index 6303dad0..6303dad0 100644
--- a/scripts/external_libs/scapy-2.3.1/scapy/modules/__init__.py
+++ b/scripts/external_libs/scapy-2.3.1/python2/scapy/modules/__init__.py
diff --git a/scripts/external_libs/scapy-2.3.1/scapy/modules/geoip.py b/scripts/external_libs/scapy-2.3.1/python2/scapy/modules/geoip.py
index 8b308a4c..8b308a4c 100644
--- a/scripts/external_libs/scapy-2.3.1/scapy/modules/geoip.py
+++ b/scripts/external_libs/scapy-2.3.1/python2/scapy/modules/geoip.py
diff --git a/scripts/external_libs/scapy-2.3.1/scapy/modules/nmap.py b/scripts/external_libs/scapy-2.3.1/python2/scapy/modules/nmap.py
index ef064643..ef064643 100644
--- a/scripts/external_libs/scapy-2.3.1/scapy/modules/nmap.py
+++ b/scripts/external_libs/scapy-2.3.1/python2/scapy/modules/nmap.py
diff --git a/scripts/external_libs/scapy-2.3.1/scapy/modules/p0f.py b/scripts/external_libs/scapy-2.3.1/python2/scapy/modules/p0f.py
index d051779d..d051779d 100644
--- a/scripts/external_libs/scapy-2.3.1/scapy/modules/p0f.py
+++ b/scripts/external_libs/scapy-2.3.1/python2/scapy/modules/p0f.py
diff --git a/scripts/external_libs/scapy-2.3.1/scapy/modules/queso.py b/scripts/external_libs/scapy-2.3.1/python2/scapy/modules/queso.py
index ebc5486e..ebc5486e 100644
--- a/scripts/external_libs/scapy-2.3.1/scapy/modules/queso.py
+++ b/scripts/external_libs/scapy-2.3.1/python2/scapy/modules/queso.py
diff --git a/scripts/external_libs/scapy-2.3.1/scapy/modules/voip.py b/scripts/external_libs/scapy-2.3.1/python2/scapy/modules/voip.py
index 70000a54..70000a54 100644
--- a/scripts/external_libs/scapy-2.3.1/scapy/modules/voip.py
+++ b/scripts/external_libs/scapy-2.3.1/python2/scapy/modules/voip.py
diff --git a/scripts/external_libs/scapy-2.3.1/scapy/packet.py b/scripts/external_libs/scapy-2.3.1/python2/scapy/packet.py
index 711d7032..711d7032 100644
--- a/scripts/external_libs/scapy-2.3.1/scapy/packet.py
+++ b/scripts/external_libs/scapy-2.3.1/python2/scapy/packet.py
diff --git a/scripts/external_libs/scapy-2.3.1/scapy/pipetool.py b/scripts/external_libs/scapy-2.3.1/python2/scapy/pipetool.py
index 7b85dd78..7b85dd78 100644
--- a/scripts/external_libs/scapy-2.3.1/scapy/pipetool.py
+++ b/scripts/external_libs/scapy-2.3.1/python2/scapy/pipetool.py
diff --git a/scripts/external_libs/scapy-2.3.1/scapy/plist.py b/scripts/external_libs/scapy-2.3.1/python2/scapy/plist.py
index 92d7c3eb..92d7c3eb 100644
--- a/scripts/external_libs/scapy-2.3.1/scapy/plist.py
+++ b/scripts/external_libs/scapy-2.3.1/python2/scapy/plist.py
diff --git a/scripts/external_libs/scapy-2.3.1/scapy/pton_ntop.py b/scripts/external_libs/scapy-2.3.1/python2/scapy/pton_ntop.py
index 1bd88920..1bd88920 100644
--- a/scripts/external_libs/scapy-2.3.1/scapy/pton_ntop.py
+++ b/scripts/external_libs/scapy-2.3.1/python2/scapy/pton_ntop.py
diff --git a/scripts/external_libs/scapy-2.3.1/scapy/route.py b/scripts/external_libs/scapy-2.3.1/python2/scapy/route.py
index d7e7ce4b..d7e7ce4b 100644
--- a/scripts/external_libs/scapy-2.3.1/scapy/route.py
+++ b/scripts/external_libs/scapy-2.3.1/python2/scapy/route.py
diff --git a/scripts/external_libs/scapy-2.3.1/scapy/route6.py b/scripts/external_libs/scapy-2.3.1/python2/scapy/route6.py
index 1bc5d491..1bc5d491 100644
--- a/scripts/external_libs/scapy-2.3.1/scapy/route6.py
+++ b/scripts/external_libs/scapy-2.3.1/python2/scapy/route6.py
diff --git a/scripts/external_libs/scapy-2.3.1/scapy/scapypipes.py b/scripts/external_libs/scapy-2.3.1/python2/scapy/scapypipes.py
index 29b370bc..29b370bc 100644
--- a/scripts/external_libs/scapy-2.3.1/scapy/scapypipes.py
+++ b/scripts/external_libs/scapy-2.3.1/python2/scapy/scapypipes.py
diff --git a/scripts/external_libs/scapy-2.3.1/scapy/sendrecv.py b/scripts/external_libs/scapy-2.3.1/python2/scapy/sendrecv.py
index c4024eef..c4024eef 100644
--- a/scripts/external_libs/scapy-2.3.1/scapy/sendrecv.py
+++ b/scripts/external_libs/scapy-2.3.1/python2/scapy/sendrecv.py
diff --git a/scripts/external_libs/scapy-2.3.1/scapy/supersocket.py b/scripts/external_libs/scapy-2.3.1/python2/scapy/supersocket.py
index a5fe5e40..a5fe5e40 100644
--- a/scripts/external_libs/scapy-2.3.1/scapy/supersocket.py
+++ b/scripts/external_libs/scapy-2.3.1/python2/scapy/supersocket.py
diff --git a/scripts/external_libs/scapy-2.3.1/scapy/themes.py b/scripts/external_libs/scapy-2.3.1/python2/scapy/themes.py
index 188fd547..188fd547 100644
--- a/scripts/external_libs/scapy-2.3.1/scapy/themes.py
+++ b/scripts/external_libs/scapy-2.3.1/python2/scapy/themes.py
diff --git a/scripts/external_libs/scapy-2.3.1/scapy/tools/UTscapy.py b/scripts/external_libs/scapy-2.3.1/python2/scapy/tools/UTscapy.py
index d4310eb5..d4310eb5 100644
--- a/scripts/external_libs/scapy-2.3.1/scapy/tools/UTscapy.py
+++ b/scripts/external_libs/scapy-2.3.1/python2/scapy/tools/UTscapy.py
diff --git a/scripts/external_libs/scapy-2.3.1/scapy/tools/__init__.py b/scripts/external_libs/scapy-2.3.1/python2/scapy/tools/__init__.py
index af6eec74..af6eec74 100644
--- a/scripts/external_libs/scapy-2.3.1/scapy/tools/__init__.py
+++ b/scripts/external_libs/scapy-2.3.1/python2/scapy/tools/__init__.py
diff --git a/scripts/external_libs/scapy-2.3.1/scapy/tools/check_asdis.py b/scripts/external_libs/scapy-2.3.1/python2/scapy/tools/check_asdis.py
index 2c1efa4c..2c1efa4c 100644
--- a/scripts/external_libs/scapy-2.3.1/scapy/tools/check_asdis.py
+++ b/scripts/external_libs/scapy-2.3.1/python2/scapy/tools/check_asdis.py
diff --git a/scripts/external_libs/scapy-2.3.1/scapy/utils.py b/scripts/external_libs/scapy-2.3.1/python2/scapy/utils.py
index c5ac2520..c5ac2520 100644
--- a/scripts/external_libs/scapy-2.3.1/scapy/utils.py
+++ b/scripts/external_libs/scapy-2.3.1/python2/scapy/utils.py
diff --git a/scripts/external_libs/scapy-2.3.1/scapy/utils6.py b/scripts/external_libs/scapy-2.3.1/python2/scapy/utils6.py
index b1b7ee73..b1b7ee73 100644
--- a/scripts/external_libs/scapy-2.3.1/scapy/utils6.py
+++ b/scripts/external_libs/scapy-2.3.1/python2/scapy/utils6.py
diff --git a/scripts/external_libs/scapy-2.3.1/scapy/volatile.py b/scripts/external_libs/scapy-2.3.1/python2/scapy/volatile.py
index 5d3e2adc..5d3e2adc 100644
--- a/scripts/external_libs/scapy-2.3.1/scapy/volatile.py
+++ b/scripts/external_libs/scapy-2.3.1/python2/scapy/volatile.py
diff --git a/scripts/external_libs/scapy-2.3.1/python3/scapy/__init__.py b/scripts/external_libs/scapy-2.3.1/python3/scapy/__init__.py
new file mode 100644
index 00000000..443b3675
--- /dev/null
+++ b/scripts/external_libs/scapy-2.3.1/python3/scapy/__init__.py
@@ -0,0 +1,15 @@
+## This file is part of Scapy
+## See http://www.secdev.org/projects/scapy for more informations
+## Copyright (C) Philippe Biondi <phil@secdev.org>
+## This program is published under a GPLv2 license
+
+"""
+Scapy: create, send, sniff, dissect and manipulate network packets.
+
+Usable either from an interactive console or as a Python library.
+http://www.secdev.org/projects/scapy
+"""
+
+if __name__ == "__main__":
+ from scapy.main import interact
+ interact()
diff --git a/scripts/external_libs/scapy-2.3.1/python3/scapy/abc.py b/scripts/external_libs/scapy-2.3.1/python3/scapy/abc.py
new file mode 100644
index 00000000..3d5f06e2
--- /dev/null
+++ b/scripts/external_libs/scapy-2.3.1/python3/scapy/abc.py
@@ -0,0 +1 @@
+from config import conf
diff --git a/scripts/external_libs/scapy-2.3.1/python3/scapy/all.py b/scripts/external_libs/scapy-2.3.1/python3/scapy/all.py
new file mode 100644
index 00000000..46030c66
--- /dev/null
+++ b/scripts/external_libs/scapy-2.3.1/python3/scapy/all.py
@@ -0,0 +1,49 @@
+## This file is part of Scapy
+## See http://www.secdev.org/projects/scapy for more informations
+## Copyright (C) Philippe Biondi <phil@secdev.org>
+## This program is published under a GPLv2 license
+
+"""
+Aggregate top level objects from all Scapy modules.
+"""
+
+from .config import *
+from .base_classes import *
+from .dadict import *
+from .data import *
+from .error import *
+#from .themes import *
+from .arch import *
+
+from .plist import *
+from .fields import *
+from .packet import *
+#from .asn1fields import *
+#from .asn1packet import *
+
+from .utils import *
+#from .route import *
+#if conf.ipv6_enabled:
+# from .utils6 import *
+# from .route6 import *
+#from .sendrecv import *
+#from .supersocket import *
+#from .volatile import *
+#from .as_resolvers import *
+
+#from .ansmachine import *
+#from .automaton import *
+#from .autorun import *
+
+from .main import *
+
+from .layers.all import *
+
+#from .asn1.asn1 import *
+#from .asn1.ber import *
+#from .asn1.mib import *
+
+#from .crypto import *
+
+#from .pipetool import *
+#from .scapypipes import *
diff --git a/scripts/external_libs/scapy-2.3.1/python3/scapy/ansmachine.py b/scripts/external_libs/scapy-2.3.1/python3/scapy/ansmachine.py
new file mode 100644
index 00000000..b087ac44
--- /dev/null
+++ b/scripts/external_libs/scapy-2.3.1/python3/scapy/ansmachine.py
@@ -0,0 +1,130 @@
+## This file is part of Scapy
+## See http://www.secdev.org/projects/scapy for more informations
+## Copyright (C) Philippe Biondi <phil@secdev.org>
+## This program is published under a GPLv2 license
+
+"""
+Answering machines.
+"""
+
+########################
+## Answering machines ##
+########################
+
+from .sendrecv import send,sendp,sniff
+from .config import conf
+from .error import log_interactive
+
+class ReferenceAM(type):
+ def __new__(cls, name, bases, dct):
+ o = super(ReferenceAM, cls).__new__(cls, name, bases, dct)
+ if o.function_name:
+ globals()[o.function_name] = lambda o=o,*args,**kargs: o(*args,**kargs)()
+ return o
+
+
+class AnsweringMachine(object):
+ __metaclass__ = ReferenceAM
+ function_name = ""
+ filter = None
+ sniff_options = { "store":0 }
+ sniff_options_list = [ "store", "iface", "count", "promisc", "filter", "type", "prn", "stop_filter" ]
+ send_options = { "verbose":0 }
+ send_options_list = ["iface", "inter", "loop", "verbose"]
+ send_function = staticmethod(send)
+
+
+ def __init__(self, **kargs):
+ self.mode = 0
+ if self.filter:
+ kargs.setdefault("filter",self.filter)
+ kargs.setdefault("prn", self.reply)
+ self.optam1 = {}
+ self.optam2 = {}
+ self.optam0 = {}
+ doptsend,doptsniff = self.parse_all_options(1, kargs)
+ self.defoptsend = self.send_options.copy()
+ self.defoptsend.update(doptsend)
+ self.defoptsniff = self.sniff_options.copy()
+ self.defoptsniff.update(doptsniff)
+ self.optsend,self.optsniff = [{},{}]
+
+ def __getattr__(self, attr):
+ for d in [self.optam2, self.optam1]:
+ if attr in d:
+ return d[attr]
+ raise AttributeError(attr)
+
+ def __setattr__(self, attr, val):
+ mode = self.__dict__.get("mode",0)
+ if mode == 0:
+ self.__dict__[attr] = val
+ else:
+ [self.optam1, self.optam2][mode-1][attr] = val
+
+ def parse_options(self):
+ pass
+
+ def parse_all_options(self, mode, kargs):
+ sniffopt = {}
+ sendopt = {}
+ for k in list(kargs):
+ if k in self.sniff_options_list:
+ sniffopt[k] = kargs[k]
+ if k in self.send_options_list:
+ sendopt[k] = kargs[k]
+ if k in self.sniff_options_list+self.send_options_list:
+ del(kargs[k])
+ if mode != 2 or kargs:
+ if mode == 1:
+ self.optam0 = kargs
+ elif mode == 2 and kargs:
+ k = self.optam0.copy()
+ k.update(kargs)
+ self.parse_options(**k)
+ kargs = k
+ omode = self.__dict__.get("mode",0)
+ self.__dict__["mode"] = mode
+ self.parse_options(**kargs)
+ self.__dict__["mode"] = omode
+ return sendopt,sniffopt
+
+ def is_request(self, req):
+ return 1
+
+ def make_reply(self, req):
+ return req
+
+ def send_reply(self, reply):
+ self.send_function(reply, **self.optsend)
+
+ def print_reply(self, req, reply):
+ print("%s ==> %s" % (req.summary(),reply.summary()))
+
+ def reply(self, pkt):
+ if not self.is_request(pkt):
+ return
+ reply = self.make_reply(pkt)
+ self.send_reply(reply)
+ if conf.verb >= 0:
+ self.print_reply(pkt, reply)
+
+ def run(self, *args, **kargs):
+ log_interactive.warning("run() method deprecated. The intance is now callable")
+ self(*args,**kargs)
+
+ def __call__(self, *args, **kargs):
+ optsend,optsniff = self.parse_all_options(2,kargs)
+ self.optsend=self.defoptsend.copy()
+ self.optsend.update(optsend)
+ self.optsniff=self.defoptsniff.copy()
+ self.optsniff.update(optsniff)
+
+ try:
+ self.sniff()
+ except KeyboardInterrupt:
+ print("Interrupted by user")
+
+ def sniff(self):
+ sniff(**self.optsniff)
+
diff --git a/scripts/external_libs/scapy-2.3.1/python3/scapy/arch/__init__.py b/scripts/external_libs/scapy-2.3.1/python3/scapy/arch/__init__.py
new file mode 100644
index 00000000..0066e049
--- /dev/null
+++ b/scripts/external_libs/scapy-2.3.1/python3/scapy/arch/__init__.py
@@ -0,0 +1,108 @@
+## This file is part of Scapy
+## See http://www.secdev.org/projects/scapy for more informations
+## Copyright (C) Philippe Biondi <phil@secdev.org>
+## This program is published under a GPLv2 license
+
+"""
+Operating system specific functionality.
+"""
+
+
+import sys,os,socket
+from scapy.error import *
+import scapy.config
+
+try:
+ import matplotlib.pyplot as plt
+ MATPLOTLIB = True
+ if scapy.config.conf.interactive:
+ plt.ion()
+except ImportError:
+ log_loading.info("Can't import matplotlib. Not critical, but won't be able to plot.")
+ MATPLOTLIB = False
+
+try:
+ import networkx as nx
+ NETWORKX = True
+except ImportError:
+ log_loading.info("Can't import networkx. Not criticial, but won't be able to draw network graphs.")
+ NETWORKX = False
+
+try:
+ import pyx
+ PYX=1
+except ImportError:
+ log_loading.info("Can't import PyX. Won't be able to use psdump() or pdfdump().")
+ PYX=0
+
+
+def str2mac(s):
+ #return ("%02x:"*6)[:-1] % tuple(map(ord, s))
+ return ("%02x:"*6)[:-1] % tuple(s)
+
+
+
+def get_if_addr(iff):
+ return socket.inet_ntoa(get_if_raw_addr(iff))
+
+def get_if_hwaddr(iff):
+ mac = get_if_raw_hwaddr(iff)
+ return str2mac(mac)
+
+
+LINUX=sys.platform.startswith("linux")
+OPENBSD=sys.platform.startswith("openbsd")
+FREEBSD=sys.platform.startswith("freebsd")
+NETBSD = sys.platform.startswith("netbsd")
+DARWIN=sys.platform.startswith("darwin")
+SOLARIS=sys.platform.startswith("sunos")
+WINDOWS=sys.platform.startswith("win32")
+
+X86_64 = not WINDOWS and (os.uname()[4] == 'x86_64')
+
+#if WINDOWS:
+# log_loading.warning("Windows support for scapy3k is currently in testing. Sniffing/sending/receiving packets should be working with WinPcap driver and Powershell. Create issues at https://github.com/phaethon/scapy")
+
+# Next step is to import following architecture specific functions:
+# def get_if_raw_hwaddr(iff)
+# def get_if_raw_addr(iff):
+# def get_if_list():
+# def get_working_if():
+# def attach_filter(s, filter):
+# def set_promisc(s,iff,val=1):
+# def read_routes():
+# def get_if(iff,cmd):
+# def get_if_index(iff):
+
+
+
+if LINUX:
+ from .linux import *
+ if scapy.config.conf.use_winpcapy or scapy.config.conf.use_netifaces:
+ from pcapdnet import *
+elif OPENBSD or FREEBSD or NETBSD or DARWIN:
+ from .bsd import *
+elif SOLARIS:
+ from .solaris import *
+elif WINDOWS:
+ pass;
+ #from .windows import *
+
+LOOPBACK_NAME="a"
+
+if scapy.config.conf.iface is None:
+ scapy.config.conf.iface = LOOPBACK_NAME
+
+def get_if_raw_addr6(iff):
+ """
+ Returns the main global unicast address associated with provided
+ interface, in network format. If no global address is found, None
+ is returned.
+ """
+ #r = filter(lambda x: x[2] == iff and x[1] == IPV6_ADDR_GLOBAL, in6_getifaddr())
+ r = [ x for x in in6_getifaddr() if x[2] == iff and x[1] == IPV6_ADDR_GLOBAL]
+ if len(r) == 0:
+ return None
+ else:
+ r = r[0][0]
+ return inet_pton(socket.AF_INET6, r)
diff --git a/scripts/external_libs/scapy-2.3.1/python3/scapy/arch/bsd.py b/scripts/external_libs/scapy-2.3.1/python3/scapy/arch/bsd.py
new file mode 100644
index 00000000..c4220308
--- /dev/null
+++ b/scripts/external_libs/scapy-2.3.1/python3/scapy/arch/bsd.py
@@ -0,0 +1,12 @@
+## This file is part of Scapy
+## See http://www.secdev.org/projects/scapy for more informations
+## Copyright (C) Philippe Biondi <phil@secdev.org>
+## This program is published under a GPLv2 license
+
+"""
+Support for BSD-like operating systems such as FreeBSD, OpenBSD and Mac OS X.
+"""
+
+LOOPBACK_NAME="lo0"
+
+from .unix import *
diff --git a/scripts/external_libs/scapy-2.3.1/python3/scapy/arch/cdnet.py b/scripts/external_libs/scapy-2.3.1/python3/scapy/arch/cdnet.py
new file mode 100644
index 00000000..98ebf084
--- /dev/null
+++ b/scripts/external_libs/scapy-2.3.1/python3/scapy/arch/cdnet.py
@@ -0,0 +1,229 @@
+from ctypes import *
+from ctypes.util import find_library
+import sys
+
+WIN=False
+
+if sys.platform.startswith('win'):
+ WIN=True
+
+if WIN:
+ SOCKET = c_uint
+ _lib=CDLL('dnet')
+else:
+ SOCKET = c_int
+ _lib_name = find_library('dnet')
+ if not _lib_name:
+ raise OSError("Cannot find libdnet.so")
+ _lib=CDLL(_lib_name)
+
+ETH_ADDR_LEN = 6
+INTF_NAME_LEN = 16
+INTF_NAME_COUNT = 20
+INTF_ALIAS_COUNT = 20
+IP6_ADDR_LEN = 16
+
+ADDR_TYPE_NONE = 0
+ADDR_TYPE_ETH = 1
+ADDR_TYPE_IP = 2
+ADDR_TYPE_IP6 = 3
+
+INTF_TYPE_OTHER = 1
+INTF_TYPE_ETH = 6
+INTF_TYPE_TOKENRING = 9
+INTF_TYPE_FDDI = 15
+INTF_TYPE_PPP = 23
+INTF_TYPE_LOOPBACK = 24
+INTF_TYPE_SLIP = 28
+INTF_TYPE_TUN = 53
+
+
+uint8_t = c_ubyte
+uint16_t = c_ushort
+uint32_t = c_uint
+ssize_t = c_long
+dnet_ip_addr_t = uint32_t
+
+dnet_intf_name = c_char * INTF_NAME_LEN
+
+class dnet_intf_list(Structure):
+ pass
+
+dnet_intf_list._fields_ = [ ('length', c_int),
+ ('interfaces', dnet_intf_name * 20) ]
+
+class dnet_eth_addr(Structure):
+ pass
+
+dnet_eth_addr._fields_ = [ ('data', uint8_t * ETH_ADDR_LEN) ]
+dnet_eth_addr_t = dnet_eth_addr
+
+class dnet_ip6_addr(Structure):
+ pass
+
+dnet_ip6_addr._fields_ = [ ('data', uint8_t * IP6_ADDR_LEN) ]
+dnet_ip6_addr_t = dnet_ip6_addr
+
+class dnet_addr_u(Union):
+ pass
+
+dnet_addr_u._fields_ = [ ('eth', dnet_eth_addr_t),
+ ('ip', dnet_ip_addr_t),
+ ('ip6', dnet_ip6_addr_t),
+ ('data8', uint8_t * 16),
+ ('data16', uint16_t * 8),
+ ('data32', uint32_t * 4) ]
+
+class dnet_addr(Structure):
+ pass
+dnet_addr._anonymous_ = ('__addr_u', )
+dnet_addr._fields_ = [ ('addr_type', uint16_t),
+ ('addr_bits', uint16_t),
+ ('__addr_u', dnet_addr_u) ]
+
+class dnet_intf_entry(Structure):
+ pass
+
+dnet_intf_entry._fields_ = [ ('intf_len', c_uint),
+ ('intf_name', c_char * INTF_NAME_LEN),
+ ('intf_type', c_ushort),
+ ('intf_flags', c_ushort),
+ ('intf_mtu', c_uint),
+ ('intf_addr', dnet_addr),
+ ('intf_dst_addr', dnet_addr),
+ ('intf_link_addr', dnet_addr),
+ ('intf_alias_num', c_uint),
+ ('intf_alias_addrs', dnet_addr * INTF_ALIAS_COUNT) ]
+
+
+eth_t = c_void_p
+intf_t = c_void_p
+ip_t = c_void_p
+dnet_intf_handler = CFUNCTYPE(c_int, POINTER(dnet_intf_entry), POINTER(c_void_p))
+
+dnet_eth_open = _lib.eth_open
+dnet_eth_open.restype = POINTER(eth_t)
+dnet_eth_open.argtypes = [ POINTER(c_char) ]
+
+dnet_eth_get = _lib.eth_get
+dnet_eth_get.restype = c_int
+dnet_eth_get.argtypes = [ POINTER(eth_t), POINTER(dnet_eth_addr_t) ]
+
+dnet_eth_set = _lib.eth_set
+dnet_eth_set.restype = c_int
+dnet_eth_set.argtypes = [ POINTER(eth_t), POINTER(dnet_eth_addr_t) ]
+
+dnet_eth_send = _lib.eth_send
+dnet_eth_send.restype = ssize_t
+dnet_eth_send.argtypes = [ POINTER(eth_t), c_void_p, c_size_t ]
+
+dnet_eth_close = _lib.eth_close
+dnet_eth_close.restype = POINTER(eth_t)
+dnet_eth_close.argtypes = [ POINTER(eth_t) ]
+
+dnet_intf_open = _lib.intf_open
+dnet_intf_open.restype = POINTER(intf_t)
+dnet_intf_open.argtypes = [ ]
+
+dnet_intf_get = _lib.intf_get
+dnet_intf_get.restype = c_int
+dnet_intf_get.argtypes = [ POINTER(intf_t), POINTER(dnet_intf_entry) ]
+
+dnet_intf_get_src = _lib.intf_get_src
+dnet_intf_get_src.restype = c_int
+dnet_intf_get_src.argtypes = [ POINTER(intf_t), POINTER(dnet_intf_entry), POINTER(dnet_addr) ]
+
+dnet_intf_get_dst = _lib.intf_get_dst
+dnet_intf_get_dst.restype = c_int
+dnet_intf_get_dst.argtypes = [ POINTER(intf_t), POINTER(dnet_intf_entry), POINTER(dnet_addr) ]
+
+dnet_intf_set = _lib.intf_set
+dnet_intf_set.restype = c_int
+dnet_intf_set.argtypes = [ POINTER(intf_t), POINTER(dnet_intf_entry) ]
+
+dnet_intf_loop = _lib.intf_loop
+dnet_intf_loop.restype = POINTER(intf_t)
+dnet_intf_loop.argtypes = [ POINTER(intf_t), dnet_intf_handler, c_void_p ]
+
+dnet_intf_close = _lib.intf_close
+dnet_intf_close.restype = POINTER(intf_t)
+dnet_intf_close.argtypes = [ POINTER(intf_t) ]
+
+dnet_ip_open = _lib.ip_open
+dnet_ip_open.restype = POINTER(ip_t)
+dnet_ip_open.argtypes = [ ]
+
+dnet_ip_add_option = _lib.ip_add_option
+dnet_ip_add_option.restype = ssize_t
+dnet_ip_add_option.argtypes = [ POINTER(c_void_p), c_size_t, c_int, POINTER(c_void_p), c_size_t ]
+
+dnet_ip_checksum = _lib.ip_checksum
+dnet_ip_checksum.restype = None
+dnet_ip_checksum.argtypes = [ POINTER(c_void_p), c_size_t ]
+
+dnet_ip_send = _lib.ip_send
+dnet_ip_send.restype = ssize_t
+dnet_ip_send.argtypes = [ POINTER(ip_t), c_void_p, c_size_t ]
+
+dnet_ip_close = _lib.ip_close
+dnet_ip_close.restype = POINTER(ip_t)
+dnet_ip_close.argtypes = [ POINTER(ip_t) ]
+
+class dnet_eth:
+ def __init__(self, iface):
+ self.iface_b = create_string_buffer(iface.encode('ascii'))
+ self.eth = dnet_eth_open(self.iface_b)
+ def send(self, sx):
+ dnet_eth_send(self.eth, sx, len(sx))
+ def close(self):
+ return dnet_eth_close(self.eth)
+
+class dnet_ip:
+ def __init__(self):
+ self.ip = dnet_ip_open()
+ def send(self, sx):
+ dnet_ip_send(self.ip, sx, len(sx))
+ def close(self):
+ return dnet_ip_close(self.ip)
+
+def dnet_intf_name_loop(entry, intf_list):
+ l = cast(intf_list, POINTER(dnet_intf_list))
+ if l.contents.length >= INTF_NAME_COUNT:
+ return -1
+ for i in enumerate(entry.contents.intf_name):
+ l.contents.interfaces[l.contents.length][i[0]] = i[1]
+ l.contents.length += 1
+ return 0
+
+class dnet_intf:
+ def __init__(self):
+ self.intf = dnet_intf_open()
+ intf_list = dnet_intf_list()
+ intf_list.length = 0
+ dnet_intf_loop(self.intf, dnet_intf_handler(dnet_intf_name_loop), pointer(intf_list))
+ self.names = []
+ for i in range(INTF_NAME_COUNT):
+ if i >= intf_list.length:
+ break
+ self.names.append(intf_list.interfaces[i].value.decode('ascii').strip('\0'))
+
+ def close(self):
+ return dnet_intf_close(self.intf)
+
+ def get(self, iface):
+ ret = {}
+ entry = dnet_intf_entry()
+ entry.intf_name = iface.encode('ascii')
+ entry.intf_len = sizeof(entry)
+ r = dnet_intf_get(self.intf, byref(entry))
+ if r < 0:
+ return {}
+ ret['addr6'] = []
+ for i in range(entry.intf_alias_num):
+ if entry.intf_alias_addrs[i].addr_type == ADDR_TYPE_IP6:
+ ret['addr6'].append(bytes(entry.intf_alias_addrs[i].data8[:16]))
+ ret['type'] = entry.intf_type
+ ret['addr'] = bytes(entry.intf_addr.data8[:4])
+ ret['link_addr'] = bytes(entry.intf_link_addr.data8[:6])
+ return ret
+
diff --git a/scripts/external_libs/scapy-2.3.1/python3/scapy/arch/linux.py b/scripts/external_libs/scapy-2.3.1/python3/scapy/arch/linux.py
new file mode 100644
index 00000000..3eab16c6
--- /dev/null
+++ b/scripts/external_libs/scapy-2.3.1/python3/scapy/arch/linux.py
@@ -0,0 +1,530 @@
+## This file is part of Scapy
+## See http://www.secdev.org/projects/scapy for more informations
+## Copyright (C) Philippe Biondi <phil@secdev.org>
+## This program is published under a GPLv2 license
+
+"""
+Linux specific functions.
+"""
+
+import sys,os,struct,socket,time,ctypes
+from select import select
+from fcntl import ioctl
+import scapy.utils
+import scapy.utils6
+from scapy.config import conf
+from scapy.data import *
+from scapy.supersocket import SuperSocket
+import scapy.arch
+from scapy.error import warning, Scapy_Exception
+
+
+
+# From bits/ioctls.h
+SIOCGIFHWADDR = 0x8927 # Get hardware address
+SIOCGIFADDR = 0x8915 # get PA address
+SIOCGIFNETMASK = 0x891b # get network PA mask
+SIOCGIFNAME = 0x8910 # get iface name
+SIOCSIFLINK = 0x8911 # set iface channel
+SIOCGIFCONF = 0x8912 # get iface list
+SIOCGIFFLAGS = 0x8913 # get flags
+SIOCSIFFLAGS = 0x8914 # set flags
+SIOCGIFINDEX = 0x8933 # name -> if_index mapping
+SIOCGIFCOUNT = 0x8938 # get number of devices
+SIOCGSTAMP = 0x8906 # get packet timestamp (as a timeval)
+
+# From if.h
+IFF_UP = 0x1 # Interface is up.
+IFF_BROADCAST = 0x2 # Broadcast address valid.
+IFF_DEBUG = 0x4 # Turn on debugging.
+IFF_LOOPBACK = 0x8 # Is a loopback net.
+IFF_POINTOPOINT = 0x10 # Interface is point-to-point link.
+IFF_NOTRAILERS = 0x20 # Avoid use of trailers.
+IFF_RUNNING = 0x40 # Resources allocated.
+IFF_NOARP = 0x80 # No address resolution protocol.
+IFF_PROMISC = 0x100 # Receive all packets.
+
+# From netpacket/packet.h
+PACKET_ADD_MEMBERSHIP = 1
+PACKET_DROP_MEMBERSHIP = 2
+PACKET_RECV_OUTPUT = 3
+PACKET_RX_RING = 5
+PACKET_STATISTICS = 6
+PACKET_MR_MULTICAST = 0
+PACKET_MR_PROMISC = 1
+PACKET_MR_ALLMULTI = 2
+
+# From bits/socket.h
+SOL_PACKET = 263
+# From asm/socket.h
+SO_ATTACH_FILTER = 26
+SOL_SOCKET = 1
+
+# From net/route.h
+RTF_UP = 0x0001 # Route usable
+RTF_REJECT = 0x0200
+
+# From pcap/pcap.h
+PCAP_ERRBUF_SIZE=256
+
+
+
+LOOPBACK_NAME="lo"
+
+with os.popen("tcpdump -V 2> /dev/null") as _f:
+ if _f.close() >> 8 == 0x7f:
+ log_loading.warning("Failed to execute tcpdump. Check it is installed and in the PATH")
+ TCPDUMP=0
+ else:
+ TCPDUMP=1
+del(_f)
+
+
+def get_if_raw_hwaddr(iff):
+ return struct.unpack("16xh6s8x",get_if(iff,SIOCGIFHWADDR))[1]
+
+def get_if_raw_addr(iff):
+ try:
+ return get_if(iff, SIOCGIFADDR)[20:24]
+ except IOError:
+ return b"\0\0\0\0"
+
+
+def get_if_list():
+ try:
+ f=open("/proc/net/dev","r")
+ except IOError:
+ warning("Can't open /proc/net/dev !")
+ return []
+ lst = []
+ f.readline()
+ f.readline()
+ for l in f:
+ lst.append(l.split(":")[0].strip())
+ f.close()
+ return lst
+
+def get_working_if():
+ for i in get_if_list():
+ if i == LOOPBACK_NAME:
+ continue
+ ifflags = struct.unpack("16xH14x",get_if(i,SIOCGIFFLAGS))[0]
+ if ifflags & IFF_UP:
+ return i
+ return LOOPBACK_NAME
+def attach_filter(s, filter):
+ # XXX We generate the filter on the interface conf.iface
+ # because tcpdump open the "any" interface and ppp interfaces
+ # in cooked mode. As we use them in raw mode, the filter will not
+ # work... one solution could be to use "any" interface and translate
+ # the filter from cooked mode to raw mode
+ # mode
+ if not TCPDUMP:
+ return
+ try:
+ f = os.popen("%s -i %s -ddd -s 1600 '%s'" % (conf.prog.tcpdump,conf.iface,filter))
+ except OSError as msg:
+ log_interactive.warning("Failed to execute tcpdump: (%s)")
+ return
+ lines = f.readlines()
+ if f.close():
+ raise Scapy_Exception("Filter parse error")
+ nb = int(lines[0])
+ bpf = b""
+ for l in lines[1:]:
+ bpf += struct.pack("HBBI",*[int(x) for x in l.split()])
+
+ # XXX. Argl! We need to give the kernel a pointer on the BPF,
+ # python object header seems to be 20 bytes. 36 bytes for x86 64bits arch.
+ bpf_buf = ctypes.create_string_buffer(bpf)
+ class BpfProgram(ctypes.Structure):
+ _fields_ = [ ("bf_len", ctypes.c_int), ("bf_insn", ctypes.POINTER(type(bpf_buf))) ]
+ #if scapy.arch.X86_64:
+ # bpfh = struct.pack("HL", nb, id(bpf)+36)
+ #else:
+ # bpfh = struct.pack("HI", nb, id(bpf)+20)
+ bpfh = BpfProgram(nb, ctypes.pointer(bpf_buf))
+ s.setsockopt(SOL_SOCKET, SO_ATTACH_FILTER, bpfh)
+
+def set_promisc(s,iff,val=1):
+ mreq = struct.pack("IHH8s", get_if_index(iff), PACKET_MR_PROMISC, 0, b"")
+ if val:
+ cmd = PACKET_ADD_MEMBERSHIP
+ else:
+ cmd = PACKET_DROP_MEMBERSHIP
+ s.setsockopt(SOL_PACKET, cmd, mreq)
+
+
+
+def read_routes():
+ try:
+ f=open("/proc/net/route","rb")
+ except IOError:
+ warning("Can't open /proc/net/route !")
+ return []
+ routes = []
+ s=socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
+ ifreq = ioctl(s, SIOCGIFADDR,struct.pack("16s16x",LOOPBACK_NAME.encode('utf-8')))
+ addrfamily = struct.unpack("h",ifreq[16:18])[0]
+ if addrfamily == socket.AF_INET:
+ ifreq2 = ioctl(s, SIOCGIFNETMASK,struct.pack("16s16x",LOOPBACK_NAME.encode('utf-8')))
+ msk = socket.ntohl(struct.unpack("I",ifreq2[20:24])[0])
+ dst = socket.ntohl(struct.unpack("I",ifreq[20:24])[0]) & msk
+ ifaddr = scapy.utils.inet_ntoa(ifreq[20:24])
+ routes.append((dst, msk, "0.0.0.0", LOOPBACK_NAME, ifaddr))
+ else:
+ warning("Interface lo: unkown address family (%i)"% addrfamily)
+
+ for l in f.readlines()[1:]:
+ iff,dst,gw,flags,x,x,x,msk,x,x,x = l.split()
+ flags = int(flags,16)
+ if flags & RTF_UP == 0:
+ continue
+ if flags & RTF_REJECT:
+ continue
+ try:
+ ifreq = ioctl(s, SIOCGIFADDR,struct.pack("16s16x",iff))
+ except IOError: # interface is present in routing tables but does not have any assigned IP
+ ifaddr="0.0.0.0"
+ else:
+ addrfamily = struct.unpack("h",ifreq[16:18])[0]
+ if addrfamily == socket.AF_INET:
+ ifaddr = scapy.utils.inet_ntoa(ifreq[20:24])
+ else:
+ warning("Interface %s: unkown address family (%i)"%(iff, addrfamily))
+ continue
+ routes.append((socket.htonl(int(dst,16))&0xffffffff,
+ socket.htonl(int(msk,16))&0xffffffff,
+ scapy.utils.inet_ntoa(struct.pack("I",int(gw,16))),
+ iff.decode('utf-8'), ifaddr))
+
+ f.close()
+ return routes
+
+############
+### IPv6 ###
+############
+
+def in6_getifaddr():
+ """
+ Returns a list of 3-tuples of the form (addr, scope, iface) where
+ 'addr' is the address of scope 'scope' associated to the interface
+ 'ifcace'.
+
+ This is the list of all addresses of all interfaces available on
+ the system.
+ """
+ ret = []
+ try:
+ f = open("/proc/net/if_inet6","rb")
+ except IOError as err:
+ return ret
+ l = f.readlines()
+ for i in l:
+ # addr, index, plen, scope, flags, ifname
+ tmp = i.split()
+ addr = struct.unpack('4s4s4s4s4s4s4s4s', tmp[0])
+ addr = scapy.utils6.in6_ptop(b':'.join(addr).decode('ascii'))
+ ret.append((addr, int(tmp[3], 16), tmp[5].decode('ascii'))) # (addr, scope, iface)
+ f.close()
+ return ret
+
+def read_routes6():
+ try:
+ f = open("/proc/net/ipv6_route","r")
+ except IOError as err:
+ return []
+ # 1. destination network
+ # 2. destination prefix length
+ # 3. source network displayed
+ # 4. source prefix length
+ # 5. next hop
+ # 6. metric
+ # 7. reference counter (?!?)
+ # 8. use counter (?!?)
+ # 9. flags
+ # 10. device name
+ routes = []
+ def proc2r(p):
+ ret = struct.unpack('4s4s4s4s4s4s4s4s', p.encode('ascii'))
+ ret = b':'.join(ret)
+ return scapy.utils6.in6_ptop(ret.decode('ascii'))
+
+ lifaddr = in6_getifaddr()
+ for l in f.readlines():
+ d,dp,s,sp,nh,m,rc,us,fl,dev = l.split()
+ fl = int(fl, 16)
+
+ if fl & RTF_UP == 0:
+ continue
+ if fl & RTF_REJECT:
+ continue
+
+ d = proc2r(d) ; dp = int(dp, 16)
+ s = proc2r(s) ; sp = int(sp, 16)
+ nh = proc2r(nh)
+
+ cset = [] # candidate set (possible source addresses)
+ if dev == LOOPBACK_NAME:
+ if d == '::':
+ continue
+ cset = ['::1']
+ else:
+ #devaddrs = filter(lambda x: x[2] == dev, lifaddr)
+ devaddrs = [ x for x in lifaddr if x[2] == dev ]
+ cset = scapy.utils6.construct_source_candidate_set(d, dp, devaddrs, LOOPBACK_NAME)
+
+ if len(cset) != 0:
+ routes.append((d, dp, nh, dev, cset))
+ f.close()
+ return routes
+
+
+
+
+def get_if(iff,cmd):
+ s=socket.socket()
+ ifreq = ioctl(s, cmd, struct.pack("16s16x",bytes(iff,'utf-8')))
+ s.close()
+ return ifreq
+
+
+def get_if_index(iff):
+ return int(struct.unpack("I",get_if(iff, SIOCGIFINDEX)[16:20])[0])
+
+if os.uname()[4] == 'x86_64':
+ def get_last_packet_timestamp(sock):
+ ts = ioctl(sock, SIOCGSTAMP, "1234567890123456")
+ s,us = struct.unpack("QQ",ts)
+ return s+us/1000000.0
+else:
+ def get_last_packet_timestamp(sock):
+ ts = ioctl(sock, SIOCGSTAMP, "12345678")
+ s,us = struct.unpack("II",ts)
+ return s+us/1000000.0
+
+
+def _flush_fd(fd):
+ if type(fd) is not int:
+ fd = fd.fileno()
+ while 1:
+ r,w,e = select([fd],[],[],0)
+ if r:
+ os.read(fd,MTU)
+ else:
+ break
+
+
+
+
+
+class L3PacketSocket(SuperSocket):
+ desc = "read/write packets at layer 3 using Linux PF_PACKET sockets"
+ def __init__(self, type = ETH_P_ALL, filter=None, promisc=None, iface=None, nofilter=0):
+ self.type = type
+ self.ins = socket.socket(socket.AF_PACKET, socket.SOCK_RAW, socket.htons(type))
+ self.ins.setsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF, 0)
+ if iface:
+ self.ins.bind((iface, type))
+ if not nofilter:
+ if conf.except_filter:
+ if filter:
+ filter = "(%s) and not (%s)" % (filter, conf.except_filter)
+ else:
+ filter = "not (%s)" % conf.except_filter
+ if filter is not None:
+ attach_filter(self.ins, filter)
+ _flush_fd(self.ins)
+ self.ins.setsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF, 2**30)
+ self.outs = socket.socket(socket.AF_PACKET, socket.SOCK_RAW, socket.htons(type))
+ self.outs.setsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF, 2**30)
+ if promisc is None:
+ promisc = conf.promisc
+ self.promisc = promisc
+ if self.promisc:
+ if iface is None:
+ self.iff = get_if_list()
+ else:
+ if iface.__class__ is list:
+ self.iff = iface
+ else:
+ self.iff = [iface]
+ for i in self.iff:
+ set_promisc(self.ins, i)
+ def close(self):
+ if self.closed:
+ return
+ self.closed=1
+ if self.promisc:
+ for i in self.iff:
+ set_promisc(self.ins, i, 0)
+ SuperSocket.close(self)
+ def recv(self, x=MTU):
+ pkt, sa_ll = self.ins.recvfrom(x)
+ if sa_ll[2] == socket.PACKET_OUTGOING:
+ return None
+ if sa_ll[3] in conf.l2types:
+ cls = conf.l2types[sa_ll[3]]
+ lvl = 2
+ elif sa_ll[1] in conf.l3types:
+ cls = conf.l3types[sa_ll[1]]
+ lvl = 3
+ else:
+ cls = conf.default_l2
+ warning("Unable to guess type (interface=%s protocol=%#x family=%i). Using %s" % (sa_ll[0],sa_ll[1],sa_ll[3],cls.name))
+ lvl = 2
+
+ try:
+ pkt = cls(pkt)
+ except KeyboardInterrupt:
+ raise
+ except:
+ if conf.debug_dissector:
+ raise
+ pkt = conf.raw_layer(pkt)
+ if lvl == 2:
+ pkt = pkt.payload
+
+ if pkt is not None:
+ pkt.time = get_last_packet_timestamp(self.ins)
+ return pkt
+
+ def send(self, x):
+ iff,a,gw = x.route()
+ if iff is None:
+ iff = conf.iface
+ sdto = (iff, self.type)
+ self.outs.bind(sdto)
+ sn = self.outs.getsockname()
+ ll = lambda x:x
+ if type(x) in conf.l3types:
+ sdto = (iff, conf.l3types[type(x)])
+ if sn[3] in conf.l2types:
+ ll = lambda x:conf.l2types[sn[3]]()/x
+ try:
+ sx = bytes(ll(x))
+ x.sent_time = time.time()
+ self.outs.sendto(sx, sdto)
+ except OSError as msg:
+ x.sent_time = time.time() # bad approximation
+ if conf.auto_fragment and msg.errno == 90:
+ for p in x.fragment():
+ self.outs.sendto(bytes(ll(p)), sdto)
+ else:
+ raise
+
+
+
+class L2Socket(SuperSocket):
+ desc = "read/write packets at layer 2 using Linux PF_PACKET sockets"
+ def __init__(self, iface = None, type = ETH_P_ALL, filter=None, nofilter=0):
+ if iface is None:
+ iface = conf.iface
+ self.ins = socket.socket(socket.AF_PACKET, socket.SOCK_RAW, socket.htons(type))
+ self.ins.setsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF, 0)
+ if not nofilter:
+ if conf.except_filter:
+ if filter:
+ filter = "(%s) and not (%s)" % (filter, conf.except_filter)
+ else:
+ filter = "not (%s)" % conf.except_filter
+ if filter is not None:
+ attach_filter(self.ins, filter)
+ self.ins.bind((iface, type))
+ _flush_fd(self.ins)
+ self.ins.setsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF, 2**30)
+ self.outs = self.ins
+ self.outs.setsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF, 2**30)
+ sa_ll = self.outs.getsockname()
+ if sa_ll[3] in conf.l2types:
+ self.LL = conf.l2types[sa_ll[3]]
+ elif sa_ll[1] in conf.l3types:
+ self.LL = conf.l3types[sa_ll[1]]
+ else:
+ self.LL = conf.default_l2
+ warning("Unable to guess type (interface=%s protocol=%#x family=%i). Using %s" % (sa_ll[0],sa_ll[1],sa_ll[3],self.LL.name))
+
+ def recv(self, x=MTU):
+ pkt, sa_ll = self.ins.recvfrom(x)
+ if sa_ll[2] == socket.PACKET_OUTGOING:
+ return None
+ try:
+ q = self.LL(pkt)
+ except KeyboardInterrupt:
+ raise
+ except:
+ if conf.debug_dissector:
+ raise
+ q = conf.raw_layer(pkt)
+ q.time = get_last_packet_timestamp(self.ins)
+ return q
+
+
+class L2ListenSocket(SuperSocket):
+ desc = "read packets at layer 2 using Linux PF_PACKET sockets"
+ def __init__(self, iface = None, type = ETH_P_ALL, promisc=None, filter=None, nofilter=0):
+ self.type = type
+ self.outs = None
+ self.ins = socket.socket(socket.AF_PACKET, socket.SOCK_RAW, socket.htons(type))
+ self.ins.setsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF, 0)
+ if iface is not None:
+ self.ins.bind((iface, type))
+ if not nofilter:
+ if conf.except_filter:
+ if filter:
+ filter = "(%s) and not (%s)" % (filter, conf.except_filter)
+ else:
+ filter = "not (%s)" % conf.except_filter
+ if filter is not None:
+ attach_filter(self.ins, filter)
+ if promisc is None:
+ promisc = conf.sniff_promisc
+ self.promisc = promisc
+ if iface is None:
+ self.iff = get_if_list()
+ else:
+ if iface.__class__ is list:
+ self.iff = iface
+ else:
+ self.iff = [iface]
+ if self.promisc:
+ for i in self.iff:
+ set_promisc(self.ins, i)
+ _flush_fd(self.ins)
+ self.ins.setsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF, 2**30)
+ def close(self):
+ if self.promisc:
+ for i in self.iff:
+ set_promisc(self.ins, i, 0)
+ SuperSocket.close(self)
+
+ def recv(self, x=MTU):
+ pkt, sa_ll = self.ins.recvfrom(x)
+ if sa_ll[3] in conf.l2types :
+ cls = conf.l2types[sa_ll[3]]
+ elif sa_ll[1] in conf.l3types:
+ cls = conf.l3types[sa_ll[1]]
+ else:
+ cls = conf.default_l2
+ warning("Unable to guess type (interface=%s protocol=%#x family=%i). Using %s" % (sa_ll[0],sa_ll[1],sa_ll[3],cls.name))
+
+ try:
+ pkt = cls(pkt)
+ except KeyboardInterrupt:
+ raise
+ except:
+ if conf.debug_dissector:
+ raise
+ pkt = conf.raw_layer(pkt)
+ pkt.time = get_last_packet_timestamp(self.ins)
+ return pkt
+
+ def send(self, x):
+ raise Scapy_Exception("Can't send anything with L2ListenSocket")
+
+
+conf.L3socket = L3PacketSocket
+conf.L2socket = L2Socket
+conf.L2listen = L2ListenSocket
+
+conf.iface = get_working_if()
diff --git a/scripts/external_libs/scapy-2.3.1/python3/scapy/arch/pcapdnet.py b/scripts/external_libs/scapy-2.3.1/python3/scapy/arch/pcapdnet.py
new file mode 100644
index 00000000..a2e8aa59
--- /dev/null
+++ b/scripts/external_libs/scapy-2.3.1/python3/scapy/arch/pcapdnet.py
@@ -0,0 +1,565 @@
+## This file is part of Scapy
+## See http://www.secdev.org/projects/scapy for more informations
+## Copyright (C) Philippe Biondi <phil@secdev.org>
+## This program is published under a GPLv2 license
+
+"""
+Packet sending and receiving with libdnet and libpcap/WinPcap.
+"""
+
+import time,struct,sys,socket
+if not sys.platform.startswith("win"):
+ from fcntl import ioctl
+from scapy.data import *
+from scapy.config import conf
+from scapy.utils import warning
+from scapy.supersocket import SuperSocket
+from scapy.error import Scapy_Exception
+import scapy.arch
+
+if conf.use_dnet:
+ try:
+ from .cdnet import *
+ except OSError as e:
+ if conf.interactive:
+ log_loading.error("Unable to import libdnet library: %s" % e)
+ conf.use_dnet = False
+ else:
+ raise
+
+if conf.use_winpcapy:
+ try:
+ from .winpcapy import *
+ def winpcapy_get_if_list():
+ err = create_string_buffer(PCAP_ERRBUF_SIZE)
+ devs = POINTER(pcap_if_t)()
+ ret = []
+ if pcap_findalldevs(byref(devs), err) < 0:
+ return ret
+ try:
+ p = devs
+ while p:
+ ret.append(p.contents.name.decode('ascii'))
+ p = p.contents.next
+ return ret
+ finally:
+ pcap_freealldevs(devs)
+
+ except OSError as e:
+ if conf.interactive:
+ log_loading.error("Unable to import libpcap library: %s" % e)
+ conf.use_winpcapy = False
+ else:
+ raise
+
+ # From BSD net/bpf.h
+ #BIOCIMMEDIATE=0x80044270
+ BIOCIMMEDIATE=-2147204496
+
+ class PcapTimeoutElapsed(Scapy_Exception):
+ pass
+
+if conf.use_netifaces:
+ try:
+ import netifaces
+ except ImportError as e:
+ log_loading.warning("Could not load module netifaces: %s" % e)
+ conf.use_netifaces = False
+
+if conf.use_netifaces:
+ def get_if_raw_hwaddr(iff):
+ if iff == scapy.arch.LOOPBACK_NAME:
+ return (772, '\x00'*6)
+ try:
+ s = netifaces.ifaddresses(iff)[netifaces.AF_LINK][0]['addr']
+ return struct.pack('BBBBBB', *[ int(i, 16) for i in s.split(':') ])
+ except:
+ raise Scapy_Exception("Error in attempting to get hw address for interface [%s]" % iff)
+ return l
+ def get_if_raw_addr(ifname):
+ try:
+ s = netifaces.ifaddresses(ifname)[netifaces.AF_INET][0]['addr']
+ return socket.inet_aton(s)
+ except Exception as e:
+ return None
+ def get_if_list():
+ #return [ i[1] for i in socket.if_nameindex() ]
+ return netifaces.interfaces()
+ def in6_getifaddr():
+ """
+ Returns a list of 3-tuples of the form (addr, scope, iface) where
+ 'addr' is the address of scope 'scope' associated to the interface
+ 'ifcace'.
+
+ This is the list of all addresses of all interfaces available on
+ the system.
+ """
+
+ ret = []
+ interfaces = get_if_list()
+ for i in interfaces:
+ addrs = netifaces.ifaddresses(i)
+ if netifaces.AF_INET6 not in addrs:
+ continue
+ for a in addrs[netifaces.AF_INET6]:
+ addr = a['addr'].split('%')[0]
+ scope = scapy.utils6.in6_getscope(addr)
+ ret.append((addr, scope, i))
+ return ret
+elif conf.use_winpcapy:
+ def get_if_raw_hwaddr(iff):
+ err = create_string_buffer(PCAP_ERRBUF_SIZE)
+ devs = POINTER(pcap_if_t)()
+ ret = b"\0\0\0\0\0\0"
+ if pcap_findalldevs(byref(devs), err) < 0:
+ return ret
+ try:
+ p = devs
+ while p:
+ if p.contents.name.endswith(iff.encode('ascii')):
+ a = p.contents.addresses
+ while a:
+ if hasattr(socket, 'AF_LINK') and a.contents.addr.contents.sa_family == socket.AF_LINK:
+ ap = a.contents.addr
+ val = cast(ap, POINTER(sockaddr_dl))
+ ret = bytes(val.contents.sdl_data[ val.contents.sdl_nlen : val.contents.sdl_nlen + val.contents.sdl_alen ])
+ a = a.contents.next
+ break
+ p = p.contents.next
+ return ret
+ finally:
+ pcap_freealldevs(devs)
+ def get_if_raw_addr(iff):
+ err = create_string_buffer(PCAP_ERRBUF_SIZE)
+ devs = POINTER(pcap_if_t)()
+ ret = b"\0\0\0\0"
+ if pcap_findalldevs(byref(devs), err) < 0:
+ return ret
+ try:
+ p = devs
+ while p:
+ if p.contents.name.endswith(iff.encode('ascii')):
+ a = p.contents.addresses
+ while a:
+ if a.contents.addr.contents.sa_family == socket.AF_INET:
+ ap = a.contents.addr
+ val = cast(ap, POINTER(sockaddr_in))
+ ret = bytes(val.contents.sin_addr[:4])
+ a = a.contents.next
+ break
+ p = p.contents.next
+ return ret
+ finally:
+ pcap_freealldevs(devs)
+ get_if_list = winpcapy_get_if_list
+ def in6_getifaddr():
+ err = create_string_buffer(PCAP_ERRBUF_SIZE)
+ devs = POINTER(pcap_if_t)()
+ ret = []
+ if pcap_findalldevs(byref(devs), err) < 0:
+ return ret
+ try:
+ p = devs
+ ret = []
+ while p:
+ a = p.contents.addresses
+ while a:
+ if a.contents.addr.contents.sa_family == socket.AF_INET6:
+ ap = a.contents.addr
+ val = cast(ap, POINTER(sockaddr_in6))
+ addr = socket.inet_ntop(socket.AF_INET6, bytes(val.contents.sin6_addr[:]))
+ scope = scapy.utils6.in6_getscope(addr)
+ ret.append((addr, scope, p.contents.name.decode('ascii')))
+ a = a.contents.next
+ p = p.contents.next
+ return ret
+ finally:
+ pcap_freealldevs(devs)
+
+elif conf.use_dnet:
+ intf = dnet_intf()
+ def get_if_raw_hwaddr(iff):
+ return intf.get(iff)['link_addr']
+ def get_if_raw_addr(iff):
+ return intf.get(iff)['addr']
+ def get_if_list():
+ return intf.names
+ def in6_getifaddr():
+ ret = []
+ for i in get_if_list():
+ for a in intf.get(i)['addr6']:
+ addr = socket.inet_ntop(socket.AF_INET6, a)
+ scope = scapy.utils6.in6_getscope(addr)
+ ret.append((addr, scope, i))
+ return ret
+
+else:
+ log_loading.warning("No known method to get ip and hw address for interfaces")
+ def get_if_raw_hwaddr(iff):
+ "dummy"
+ return b"\0\0\0\0\0\0"
+ def get_if_raw_addr(iff):
+ "dummy"
+ return b"\0\0\0\0"
+ def get_if_list():
+ "dummy"
+ return []
+ def in6_getifaddr():
+ return []
+
+if conf.use_winpcapy:
+ from ctypes import POINTER, byref, create_string_buffer
+ class _PcapWrapper_pypcap:
+ def __init__(self, device, snaplen, promisc, to_ms):
+ self.errbuf = create_string_buffer(PCAP_ERRBUF_SIZE)
+ self.iface = create_string_buffer(device.encode('ascii'))
+ self.pcap = pcap_open_live(self.iface, snaplen, promisc, to_ms, self.errbuf)
+ self.header = POINTER(pcap_pkthdr)()
+ self.pkt_data = POINTER(c_ubyte)()
+ self.bpf_program = bpf_program()
+ def next(self):
+ c = pcap_next_ex(self.pcap, byref(self.header), byref(self.pkt_data))
+ if not c > 0:
+ return
+ ts = self.header.contents.ts.tv_sec
+ #pkt = "".join([ chr(i) for i in self.pkt_data[:self.header.contents.len] ])
+ pkt = bytes(self.pkt_data[:self.header.contents.len])
+ return ts, pkt
+ def datalink(self):
+ return pcap_datalink(self.pcap)
+ def fileno(self):
+ if sys.platform.startswith("win"):
+ error("Cannot get selectable PCAP fd on Windows")
+ return 0
+ return pcap_get_selectable_fd(self.pcap)
+ def setfilter(self, f):
+ filter_exp = create_string_buffer(f.encode('ascii'))
+ if pcap_compile(self.pcap, byref(self.bpf_program), filter_exp, 0, -1) == -1:
+ error("Could not compile filter expression %s" % f)
+ return False
+ else:
+ if pcap_setfilter(self.pcap, byref(self.bpf_program)) == -1:
+ error("Could not install filter %s" % f)
+ return False
+ return True
+ def setnonblock(self, i):
+ pcap_setnonblock(self.pcap, i, self.errbuf)
+ def send(self, x):
+ pcap_sendpacket(self.pcap, x, len(x))
+ def close(self):
+ pcap_close(self.pcap)
+ open_pcap = lambda *args,**kargs: _PcapWrapper_pypcap(*args,**kargs)
+ class PcapTimeoutElapsed(Scapy_Exception):
+ pass
+
+ class L2pcapListenSocket(SuperSocket):
+ desc = "read packets at layer 2 using libpcap"
+ def __init__(self, iface = None, type = ETH_P_ALL, promisc=None, filter=None):
+ self.type = type
+ self.outs = None
+ self.iface = iface
+ if iface is None:
+ iface = conf.iface
+ if promisc is None:
+ promisc = conf.sniff_promisc
+ self.promisc = promisc
+ self.ins = open_pcap(iface, 1600, self.promisc, 100)
+ try:
+ ioctl(self.ins.fileno(),BIOCIMMEDIATE,struct.pack("I",1))
+ except:
+ pass
+ if type == ETH_P_ALL: # Do not apply any filter if Ethernet type is given
+ if conf.except_filter:
+ if filter:
+ filter = "(%s) and not (%s)" % (filter, conf.except_filter)
+ else:
+ filter = "not (%s)" % conf.except_filter
+ if filter:
+ self.ins.setfilter(filter)
+
+ def close(self):
+ self.ins.close()
+
+ def recv(self, x=MTU):
+ ll = self.ins.datalink()
+ if ll in conf.l2types:
+ cls = conf.l2types[ll]
+ else:
+ cls = conf.default_l2
+ warning("Unable to guess datalink type (interface=%s linktype=%i). Using %s" % (self.iface, ll, cls.name))
+
+ pkt = None
+ while pkt is None:
+ pkt = self.ins.next()
+ if pkt is not None:
+ ts,pkt = pkt
+ if scapy.arch.WINDOWS and pkt is None:
+ raise PcapTimeoutElapsed
+
+ try:
+ pkt = cls(pkt)
+ except KeyboardInterrupt:
+ raise
+ except:
+ if conf.debug_dissector:
+ raise
+ pkt = conf.raw_layer(pkt)
+ pkt.time = ts
+ return pkt
+
+ def send(self, x):
+ raise Scapy_Exception("Can't send anything with L2pcapListenSocket")
+
+
+ conf.L2listen = L2pcapListenSocket
+ class L2pcapSocket(SuperSocket):
+ desc = "read/write packets at layer 2 using only libpcap"
+ def __init__(self, iface = None, type = ETH_P_ALL, filter=None, nofilter=0):
+ if iface is None:
+ iface = conf.iface
+ self.iface = iface
+ self.ins = open_pcap(iface, 1600, 0, 100)
+ try:
+ ioctl(self.ins.fileno(),BIOCIMMEDIATE,struct.pack("I",1))
+ except:
+ pass
+ if nofilter:
+ if type != ETH_P_ALL: # PF_PACKET stuff. Need to emulate this for pcap
+ filter = "ether proto %i" % type
+ else:
+ filter = None
+ else:
+ if conf.except_filter:
+ if filter:
+ filter = "(%s) and not (%s)" % (filter, conf.except_filter)
+ else:
+ filter = "not (%s)" % conf.except_filter
+ if type != ETH_P_ALL: # PF_PACKET stuff. Need to emulate this for pcap
+ if filter:
+ filter = "(ether proto %i) and (%s)" % (type,filter)
+ else:
+ filter = "ether proto %i" % type
+ if filter:
+ self.ins.setfilter(filter)
+ def send(self, x):
+ sx = bytes(x)
+ if hasattr(x, "sent_time"):
+ x.sent_time = time.time()
+ return self.ins.send(sx)
+
+ def recv(self,x=MTU):
+ ll = self.ins.datalink()
+ if ll in conf.l2types:
+ cls = conf.l2types[ll]
+ else:
+ cls = conf.default_l2
+ warning("Unable to guess datalink type (interface=%s linktype=%i). Using %s" % (self.iface, ll, cls.name))
+
+ pkt = self.ins.next()
+ if pkt is not None:
+ ts,pkt = pkt
+ if pkt is None:
+ return
+
+ try:
+ pkt = cls(pkt)
+ except KeyboardInterrupt:
+ raise
+ except:
+ if conf.debug_dissector:
+ raise
+ pkt = conf.raw_layer(pkt)
+ pkt.time = ts
+ return pkt
+
+ def nonblock_recv(self):
+ self.ins.setnonblock(1)
+ p = self.recv(MTU)
+ self.ins.setnonblock(0)
+ return p
+
+ def close(self):
+ if hasattr(self, "ins"):
+ self.ins.close()
+ if hasattr(self, "outs"):
+ self.outs.close()
+
+ class L3pcapSocket(L2pcapSocket):
+ #def __init__(self, iface = None, type = ETH_P_ALL, filter=None, nofilter=0):
+ # L2pcapSocket.__init__(self, iface, type, filter, nofilter)
+ def recv(self, x = MTU):
+ r = L2pcapSocket.recv(self, x)
+ if r:
+ return r.payload
+ else:
+ return
+ def send(self, x):
+ cls = conf.l2types[1]
+ sx = bytes(cls()/x)
+ if hasattr(x, "sent_time"):
+ x.sent_time = time.time()
+ return self.ins.send(sx)
+ conf.L2socket=L2pcapSocket
+ conf.L3socket=L3pcapSocket
+
+if conf.use_winpcapy and conf.use_dnet:
+ class L3dnetSocket(SuperSocket):
+ desc = "read/write packets at layer 3 using libdnet and libpcap"
+ def __init__(self, type = ETH_P_ALL, filter=None, promisc=None, iface=None, nofilter=0):
+ self.iflist = {}
+ self.intf = dnet_intf()
+ if iface is None:
+ iface = conf.iface
+ self.iface = iface
+ self.ins = open_pcap(iface, 1600, 0, 100)
+ try:
+ ioctl(self.ins.fileno(),BIOCIMMEDIATE,struct.pack("I",1))
+ except:
+ pass
+ if nofilter:
+ if type != ETH_P_ALL: # PF_PACKET stuff. Need to emulate this for pcap
+ filter = "ether proto %i" % type
+ else:
+ filter = None
+ else:
+ if conf.except_filter:
+ if filter:
+ filter = "(%s) and not (%s)" % (filter, conf.except_filter)
+ else:
+ filter = "not (%s)" % conf.except_filter
+ if type != ETH_P_ALL: # PF_PACKET stuff. Need to emulate this for pcap
+ if filter:
+ filter = "(ether proto %i) and (%s)" % (type,filter)
+ else:
+ filter = "ether proto %i" % type
+ if filter:
+ self.ins.setfilter(filter)
+ def send(self, x):
+ iff,a,gw = x.route()
+ if iff is None:
+ iff = conf.iface
+ ifs,cls = self.iflist.get(iff,(None,None))
+ if ifs is None:
+ iftype = self.intf.get(iff)["type"]
+ if iftype == INTF_TYPE_ETH:
+ try:
+ cls = conf.l2types[1]
+ except KeyError:
+ warning("Unable to find Ethernet class. Using nothing")
+ ifs = dnet_eth(iff)
+ else:
+ ifs = dnet_ip()
+ self.iflist[iff] = ifs,cls
+ if cls is None:
+ #sx = str(x)
+ sx = bytes(x)
+ else:
+ sx = bytes(cls()/x)
+ x.sent_time = time.time()
+ ifs.send(sx)
+ def recv(self,x=MTU):
+ ll = self.ins.datalink()
+ if ll in conf.l2types:
+ cls = conf.l2types[ll]
+ else:
+ cls = conf.default_l2
+ warning("Unable to guess datalink type (interface=%s linktype=%i). Using %s" % (self.iface, ll, cls.name))
+
+ pkt = self.ins.next()
+ if pkt is not None:
+ ts,pkt = pkt
+ if pkt is None:
+ return
+
+ try:
+ pkt = cls(pkt)
+ except KeyboardInterrupt:
+ raise
+ except:
+ if conf.debug_dissector:
+ raise
+ pkt = conf.raw_layer(pkt)
+ pkt.time = ts
+ return pkt.payload
+
+ def nonblock_recv(self):
+ self.ins.setnonblock(1)
+ p = self.recv()
+ self.ins.setnonblock(0)
+ return p
+
+ def close(self):
+ if hasattr(self, "ins"):
+ self.ins.close()
+ if hasattr(self, "outs"):
+ self.outs.close()
+
+ class L2dnetSocket(SuperSocket):
+ desc = "read/write packets at layer 2 using libdnet and libpcap"
+ def __init__(self, iface = None, type = ETH_P_ALL, filter=None, nofilter=0):
+ if iface is None:
+ iface = conf.iface
+ self.iface = iface
+ self.ins = open_pcap(iface, 1600, 0, 100)
+ try:
+ ioctl(self.ins.fileno(),BIOCIMMEDIATE,struct.pack("I",1))
+ except:
+ pass
+ if nofilter:
+ if type != ETH_P_ALL: # PF_PACKET stuff. Need to emulate this for pcap
+ filter = "ether proto %i" % type
+ else:
+ filter = None
+ else:
+ if conf.except_filter:
+ if filter:
+ filter = "(%s) and not (%s)" % (filter, conf.except_filter)
+ else:
+ filter = "not (%s)" % conf.except_filter
+ if type != ETH_P_ALL: # PF_PACKET stuff. Need to emulate this for pcap
+ if filter:
+ filter = "(ether proto %i) and (%s)" % (type,filter)
+ else:
+ filter = "ether proto %i" % type
+ if filter:
+ self.ins.setfilter(filter)
+ self.outs = dnet_eth(iface)
+ def recv(self,x=MTU):
+ ll = self.ins.datalink()
+ if ll in conf.l2types:
+ cls = conf.l2types[ll]
+ else:
+ cls = conf.default_l2
+ warning("Unable to guess datalink type (interface=%s linktype=%i). Using %s" % (self.iface, ll, cls.name))
+
+ pkt = self.ins.next()
+ if pkt is not None:
+ ts,pkt = pkt
+ if pkt is None:
+ return
+
+ try:
+ pkt = cls(pkt)
+ except KeyboardInterrupt:
+ raise
+ except:
+ if conf.debug_dissector:
+ raise
+ pkt = conf.raw_layer(pkt)
+ pkt.time = ts
+ return pkt
+
+ def nonblock_recv(self):
+ self.ins.setnonblock(1)
+ p = self.recv(MTU)
+ self.ins.setnonblock(0)
+ return p
+
+ def close(self):
+ if hasattr(self, "ins"):
+ self.ins.close()
+ if hasattr(self, "outs"):
+ self.outs.close()
+
+ conf.L3socket=L3dnetSocket
+ conf.L2socket=L2dnetSocket
diff --git a/scripts/external_libs/scapy-2.3.1/python3/scapy/arch/solaris.py b/scripts/external_libs/scapy-2.3.1/python3/scapy/arch/solaris.py
new file mode 100644
index 00000000..3117076a
--- /dev/null
+++ b/scripts/external_libs/scapy-2.3.1/python3/scapy/arch/solaris.py
@@ -0,0 +1,16 @@
+## This file is part of Scapy
+## See http://www.secdev.org/projects/scapy for more informations
+## Copyright (C) Philippe Biondi <phil@secdev.org>
+## This program is published under a GPLv2 license
+
+"""
+Customization for the Solaris operation system.
+"""
+
+# IPPROTO_GRE is missing on Solaris
+import socket
+socket.IPPROTO_GRE = 47
+
+LOOPBACK_NAME="lo0"
+
+from unix import *
diff --git a/scripts/external_libs/scapy-2.3.1/python3/scapy/arch/unix.py b/scripts/external_libs/scapy-2.3.1/python3/scapy/arch/unix.py
new file mode 100644
index 00000000..43e694b5
--- /dev/null
+++ b/scripts/external_libs/scapy-2.3.1/python3/scapy/arch/unix.py
@@ -0,0 +1,168 @@
+## This file is part of Scapy
+## See http://www.secdev.org/projects/scapy for more informations
+## Copyright (C) Philippe Biondi <phil@secdev.org>
+## This program is published under a GPLv2 license
+
+"""
+Common customizations for all Unix-like operating systems other than Linux
+"""
+
+import sys,os,struct,socket,time
+from subprocess import check_output
+from fcntl import ioctl
+from scapy.error import warning
+import scapy.config
+import scapy.utils
+import scapy.utils6
+import scapy.arch
+
+scapy.config.conf.use_winpcapy = True
+scapy.config.conf.use_netifaces = True
+scapy.config.conf.use_dnet = True
+from .pcapdnet import *
+
+
+
+##################
+## Routes stuff ##
+##################
+
+
+def read_routes():
+ if scapy.arch.SOLARIS:
+ f=check_output(["netstat", "-rvn"], universal_newlines = True) # -f inet
+ elif scapy.arch.FREEBSD:
+ f=check_output(["netstat", "-rnW"], universal_newlines = True) # -W to handle long interface names
+ else:
+ f=check_output(["netstat", "-rn"], universal_newlines = True) # -f inet
+ ok = False
+ routes = []
+ pending_if = []
+ for l in f.split('\n'):
+ l = l.strip()
+ if l.find("----") >= 0: # a separation line
+ continue
+ if not ok:
+ if_index = [ l.split().index(i) for i in ['Iface', 'Netif', 'Interface', 'Device'] if i in l.split()]
+ if if_index:
+ ok = True
+ if_index = if_index[0]
+ continue
+ if not l:
+ break
+ if scapy.arch.SOLARIS:
+ lspl = l.split()
+ if len(lspl) == 10:
+ dest,mask,gw,netif,mxfrg,rtt,ref,flg = lspl[:8]
+ else: # missing interface
+ dest,mask,gw,mxfrg,rtt,ref,flg = lspl[:7]
+ netif=None
+ else:
+ rt = l.split()
+ dest,gw,flg = rt[:3]
+ netif = rt[if_index]
+ if flg.find("Lc") >= 0:
+ continue
+ if dest == "default":
+ dest = 0
+ netmask = 0
+ else:
+ if scapy.arch.SOLARIS:
+ netmask = scapy.utils.atol(mask)
+ elif "/" in dest:
+ dest,netmask = dest.split("/")
+ netmask = scapy.utils.itom(int(netmask))
+ else:
+ netmask = scapy.utils.itom((dest.count(".") + 1) * 8)
+ dest += ".0"*(3-dest.count("."))
+ dest = scapy.utils.atol(dest)
+ if not "G" in flg:
+ gw = '0.0.0.0'
+ if netif is not None:
+ ifaddr = scapy.arch.get_if_addr(netif)
+ routes.append((dest,netmask,gw,netif,ifaddr))
+ else:
+ pending_if.append((dest,netmask,gw))
+
+ # On Solaris, netstat does not provide output interfaces for some routes
+ # We need to parse completely the routing table to route their gw and
+ # know their output interface
+ for dest,netmask,gw in pending_if:
+ gw_l = scapy.utils.atol(gw)
+ max_rtmask,gw_if,gw_if_addr, = 0,None,None
+ for rtdst,rtmask,_,rtif,rtaddr in routes[:]:
+ if gw_l & rtmask == rtdst:
+ if rtmask >= max_rtmask:
+ max_rtmask = rtmask
+ gw_if = rtif
+ gw_if_addr = rtaddr
+ if gw_if:
+ routes.append((dest,netmask,gw,gw_if,gw_if_addr))
+ else:
+ warning("Did not find output interface to reach gateway %s" % gw)
+
+ return routes
+
+############
+### IPv6 ###
+############
+
+def read_routes6():
+ f = os.popen("netstat -rn -f inet6")
+ ok = False
+ mtu_present = False
+ prio_present = False
+ routes = []
+ lifaddr = in6_getifaddr()
+ for l in f.readlines():
+ if not l:
+ break
+ l = l.strip()
+ if not ok:
+ if l.find("Destination") >= 0:
+ ok = 1
+ mtu_present = l.find("Mtu") >= 0
+ prio_present = l.find("Prio") >= 0
+ continue
+ # gv 12/12/06: under debugging
+ if scapy.arch.NETBSD or scapy.arch.OPENBSD:
+ lspl = l.split()
+ d,nh,fl = lspl[:3]
+ dev = lspl[5+mtu_present+prio_present]
+ expire = None
+ else: # FREEBSD or DARWIN
+ d,nh,fl,dev = l.split()[:4]
+ if [ x for x in lifaddr if x[2] == dev] == []:
+ continue
+ if 'L' in fl: # drop MAC addresses
+ continue
+
+ if 'link' in nh:
+ nh = '::'
+
+ cset = [] # candidate set (possible source addresses)
+ dp = 128
+ if d == 'default':
+ d = '::'
+ dp = 0
+ if '/' in d:
+ d,dp = d.split("/")
+ dp = int(dp)
+ if '%' in d:
+ d,dev = d.split('%')
+ if '%' in nh:
+ nh,dev = nh.split('%')
+ if scapy.arch.LOOPBACK_NAME in dev:
+ if d == '::' and dp == 96: #Do not use ::/96 deprecated IPV4 mapping address
+ continue
+ cset = ['::1']
+ nh = '::'
+ else:
+ devaddrs = [ x for x in lifaddr if x[2] == dev ]
+ cset = scapy.utils6.construct_source_candidate_set(d, dp, devaddrs, scapy.arch.LOOPBACK_NAME)
+
+ if len(cset) != 0:
+ routes.append((d, dp, nh, dev, cset))
+
+ f.close()
+ return routes
diff --git a/scripts/external_libs/scapy-2.3.1/python3/scapy/arch/windows/__init__.py b/scripts/external_libs/scapy-2.3.1/python3/scapy/arch/windows/__init__.py
new file mode 100644
index 00000000..3bd1ade3
--- /dev/null
+++ b/scripts/external_libs/scapy-2.3.1/python3/scapy/arch/windows/__init__.py
@@ -0,0 +1,501 @@
+## This file is part of Scapy
+## See http://www.secdev.org/projects/scapy for more informations
+## Copyright (C) Philippe Biondi <phil@secdev.org>
+## This program is published under a GPLv2 license
+
+"""
+Customizations needed to support Microsoft Windows.
+"""
+
+import os,re,sys,socket,time, itertools
+import subprocess as sp
+from glob import glob
+from scapy.config import conf,ConfClass
+from scapy.error import Scapy_Exception,log_loading,log_runtime
+from scapy.utils import atol, itom, inet_aton, inet_ntoa, PcapReader
+from scapy.base_classes import Gen, Net, SetGen
+import scapy.plist as plist
+from scapy.sendrecv import debug, srp1
+from scapy.layers.l2 import Ether, ARP
+from scapy.data import MTU, ETHER_BROADCAST, ETH_P_ARP
+
+conf.use_winpcapy = True
+from scapy.arch import pcapdnet
+from scapy.arch.pcapdnet import *
+
+LOOPBACK_NAME="lo0"
+WINDOWS = True
+
+
+def _where(filename, dirs=[], env="PATH"):
+ """Find file in current dir or system path"""
+ if not isinstance(dirs, list):
+ dirs = [dirs]
+ if glob(filename):
+ return filename
+ paths = [os.curdir] + os.environ[env].split(os.path.pathsep) + dirs
+ for path in paths:
+ for match in glob(os.path.join(path, filename)):
+ if match:
+ return os.path.normpath(match)
+ raise IOError("File not found: %s" % filename)
+
+def win_find_exe(filename, installsubdir=None, env="ProgramFiles"):
+ """Find executable in current dir, system path or given ProgramFiles subdir"""
+ for fn in [filename, filename+".exe"]:
+ try:
+ if installsubdir is None:
+ path = _where(fn)
+ else:
+ path = _where(fn, dirs=[os.path.join(os.environ[env], installsubdir)])
+ except IOError:
+ path = filename
+ else:
+ break
+ return path
+
+
+class WinProgPath(ConfClass):
+ _default = "<System default>"
+ # We try some magic to find the appropriate executables
+ pdfreader = win_find_exe("AcroRd32")
+ psreader = win_find_exe("gsview32.exe", "Ghostgum/gsview")
+ dot = win_find_exe("dot", "ATT/Graphviz/bin")
+ tcpdump = win_find_exe("windump")
+ tcpreplay = win_find_exe("tcpreplay")
+ display = _default
+ hexedit = win_find_exe("hexer")
+ wireshark = win_find_exe("wireshark", "wireshark")
+
+conf.prog = WinProgPath()
+
+class PcapNameNotFoundError(Scapy_Exception):
+ pass
+
+def get_windows_if_list():
+ ps = sp.Popen(['powershell', 'Get-NetAdapter', '|', 'select Name, InterfaceIndex, InterfaceDescription, InterfaceGuid, MacAddress', '|', 'fl'], stdout = sp.PIPE)
+ stdout, stdin = ps.communicate(timeout = 10)
+ current_interface = None
+ interface_list = []
+ for i in stdout.split(b'\r\n'):
+ if not i.strip():
+ continue
+ if i.find(b':')<0:
+ continue
+ name, value = [ j.strip() for j in i.split(b':') ]
+ if name == b'Name':
+ if current_interface:
+ interface_list.append(current_interface)
+ current_interface = {}
+ current_interface['name'] = value.decode('ascii')
+ elif name == b'InterfaceIndex':
+ current_interface['win_index'] = int(value)
+ elif name == b'InterfaceDescription':
+ current_interface['description'] = value.decode('ascii')
+ elif name == b'InterfaceGuid':
+ current_interface['guid'] = value.decode('ascii')
+ elif name == b'MacAddress':
+ current_interface['mac'] = ':'.join([ j.decode('ascii') for j in value.split(b'-')])
+ if current_interface:
+ interface_list.append(current_interface)
+ return interface_list
+
+class NetworkInterface(object):
+ """A network interface of your local host"""
+
+ def __init__(self, data=None):
+ self.name = None
+ self.ip = None
+ self.mac = None
+ self.pcap_name = None
+ self.description = None
+ self.data = data
+ if data is not None:
+ self.update(data)
+
+ def update(self, data):
+ """Update info about network interface according to given dnet dictionary"""
+ self.name = data["name"]
+ self.description = data['description']
+ self.win_index = data['win_index']
+ # Other attributes are optional
+ self._update_pcapdata()
+ try:
+ self.ip = socket.inet_ntoa(get_if_raw_addr(data['guid']))
+ except (KeyError, AttributeError, NameError):
+ pass
+ try:
+ self.mac = data['mac']
+ except KeyError:
+ pass
+
+ def _update_pcapdata(self):
+ for i in winpcapy_get_if_list():
+ if i.endswith(self.data['guid']):
+ self.pcap_name = i
+ return
+
+ raise PcapNameNotFoundError
+
+ def __repr__(self):
+ return "<%s: %s %s %s pcap_name=%s description=%s>" % (self.__class__.__name__,
+ self.name, self.ip, self.mac, self.pcap_name, self.description)
+
+from collections import UserDict
+
+class NetworkInterfaceDict(UserDict):
+ """Store information about network interfaces and convert between names"""
+ def load_from_powershell(self):
+ for i in get_windows_if_list():
+ try:
+ interface = NetworkInterface(i)
+ self.data[interface.name] = interface
+ except (KeyError, PcapNameNotFoundError):
+ pass
+ if len(self.data) == 0:
+ log_loading.warning("No match between your pcap and windows network interfaces found. "
+ "You probably won't be able to send packets. "
+ "Deactivating unneeded interfaces and restarting Scapy might help."
+ "Check your winpcap and powershell installation, and access rights.")
+
+ def pcap_name(self, devname):
+ """Return pcap device name for given Windows device name."""
+
+ try:
+ pcap_name = self.data[devname].pcap_name
+ except KeyError:
+ raise ValueError("Unknown network interface %r" % devname)
+ else:
+ return pcap_name
+
+ def devname(self, pcap_name):
+ """Return Windows device name for given pcap device name."""
+
+ for devname, iface in self.items():
+ if iface.pcap_name == pcap_name:
+ return iface.name
+ raise ValueError("Unknown pypcap network interface %r" % pcap_name)
+
+ def devname_from_index(self, if_index):
+ """Return interface name from interface index"""
+ for devname, iface in self.items():
+ if iface.win_index == if_index:
+ return iface.name
+ raise ValueError("Unknown network interface index %r" % if_index)
+
+ def show(self, resolve_mac=True):
+ """Print list of available network interfaces in human readable form"""
+
+ print("%s %s %s %s" % ("INDEX".ljust(5), "IFACE".ljust(35), "IP".ljust(15), "MAC"))
+ for iface_name in sorted(self.data.keys()):
+ dev = self.data[iface_name]
+ mac = dev.mac
+ if resolve_mac:
+ mac = conf.manufdb._resolve_MAC(mac)
+ print("%s %s %s %s" % (str(dev.win_index).ljust(5), str(dev.name).ljust(35), str(dev.ip).ljust(15), mac) )
+
+ifaces = NetworkInterfaceDict()
+ifaces.load_from_powershell()
+
+def pcap_name(devname):
+ """Return pypcap device name for given libdnet/Scapy device name"""
+ try:
+ pcap_name = ifaces.pcap_name(devname)
+ except ValueError:
+ # pcap.pcap() will choose a sensible default for sniffing if iface=None
+ pcap_name = None
+ return pcap_name
+
+def devname(pcap_name):
+ """Return libdnet/Scapy device name for given pypcap device name"""
+ return ifaces.devname(pcap_name)
+
+def devname_from_index(if_index):
+ """Return Windows adapter name for given Windows interface index"""
+ return ifaces.devname_from_index(if_index)
+
+def show_interfaces(resolve_mac=True):
+ """Print list of available network interfaces"""
+ return ifaces.show(resolve_mac)
+
+_orig_open_pcap = pcapdnet.open_pcap
+pcapdnet.open_pcap = lambda iface,*args,**kargs: _orig_open_pcap(pcap_name(iface),*args,**kargs)
+
+_orig_get_if_raw_hwaddr = pcapdnet.get_if_raw_hwaddr
+pcapdnet.get_if_raw_hwaddr = lambda iface,*args,**kargs: [ int(i, 16) for i in ifaces[iface].mac.split(':') ]
+get_if_raw_hwaddr = pcapdnet.get_if_raw_hwaddr
+
+def read_routes():
+ routes = []
+ if_index = '(\d+)'
+ dest = '(\d+\.\d+\.\d+\.\d+)/(\d+)'
+ next_hop = '(\d+\.\d+\.\d+\.\d+)'
+ metric_pattern = "(\d+)"
+ delim = "\s+" # The columns are separated by whitespace
+ netstat_line = delim.join([if_index, dest, next_hop, metric_pattern])
+ pattern = re.compile(netstat_line)
+ ps = sp.Popen(['powershell', 'Get-NetRoute', '-AddressFamily IPV4', '|', 'select ifIndex, DestinationPrefix, NextHop, RouteMetric'], stdout = sp.PIPE)
+ stdout, stdin = ps.communicate(timeout = 10)
+ for l in stdout.split(b'\r\n'):
+ match = re.search(pattern,l.decode('utf-8'))
+ if match:
+ try:
+ iface = devname_from_index(int(match.group(1)))
+ addr = ifaces[iface].ip
+ except:
+ continue
+ dest = atol(match.group(2))
+ mask = itom(int(match.group(3)))
+ gw = match.group(4)
+ # try:
+ # intf = pcapdnet.dnet.intf().get_dst(pcapdnet.dnet.addr(type=2, addrtxt=dest))
+ # except OSError:
+ # log_loading.warning("Building Scapy's routing table: Couldn't get outgoing interface for destination %s" % dest)
+ # continue
+ routes.append((dest, mask, gw, iface, addr))
+ return routes
+
+def read_routes6():
+ return []
+
+try:
+ __IPYTHON__
+except NameError:
+ try:
+ import readline
+ console = readline.GetOutputFile()
+ except (ImportError, AttributeError):
+ log_loading.info("Could not get readline console. Will not interpret ANSI color codes.")
+ else:
+ conf.readfunc = readline.rl.readline
+ orig_stdout = sys.stdout
+ sys.stdout = console
+
+def sndrcv(pks, pkt, timeout = 2, inter = 0, verbose=None, chainCC=0, retry=0, multi=0):
+ if not isinstance(pkt, Gen):
+ pkt = SetGen(pkt)
+
+ if verbose is None:
+ verbose = conf.verb
+ debug.recv = plist.PacketList([],"Unanswered")
+ debug.sent = plist.PacketList([],"Sent")
+ debug.match = plist.SndRcvList([])
+ nbrecv=0
+ ans = []
+ # do it here to fix random fields, so that parent and child have the same
+ all_stimuli = tobesent = [p for p in pkt]
+ notans = len(tobesent)
+
+ hsent={}
+ for i in tobesent:
+ h = i.hashret()
+ if h in hsent:
+ hsent[h].append(i)
+ else:
+ hsent[h] = [i]
+ if retry < 0:
+ retry = -retry
+ autostop=retry
+ else:
+ autostop=0
+
+
+ while retry >= 0:
+ found=0
+
+ if timeout < 0:
+ timeout = None
+
+ pid=1
+ try:
+ if WINDOWS or pid == 0:
+ try:
+ try:
+ i = 0
+ if verbose:
+ print("Begin emission:")
+ for p in tobesent:
+ pks.send(p)
+ i += 1
+ time.sleep(inter)
+ if verbose:
+ print("Finished to send %i packets." % i)
+ except SystemExit:
+ pass
+ except KeyboardInterrupt:
+ pass
+ except:
+ log_runtime.exception("--- Error sending packets")
+ log_runtime.info("--- Error sending packets")
+ finally:
+ try:
+ sent_times = [p.sent_time for p in all_stimuli if p.sent_time]
+ except:
+ pass
+ if WINDOWS or pid > 0:
+ # Timeout starts after last packet is sent (as in Unix version)
+ if timeout:
+ stoptime = time.time()+timeout
+ else:
+ stoptime = 0
+ remaintime = None
+ # inmask = [pks.ins.fd]
+ try:
+ try:
+ while 1:
+ if stoptime:
+ remaintime = stoptime-time.time()
+ if remaintime <= 0:
+ break
+ r = pks.recv(MTU)
+ if r is None:
+ continue
+ ok = 0
+ h = r.hashret()
+ if h in hsent:
+ hlst = hsent[h]
+ for i in range(len(hlst)):
+ if r.answers(hlst[i]):
+ ans.append((hlst[i],r))
+ if verbose > 1:
+ os.write(1, b"*")
+ ok = 1
+ if not multi:
+ del(hlst[i])
+ notans -= 1;
+ else:
+ if not hasattr(hlst[i], '_answered'):
+ notans -= 1;
+ hlst[i]._answered = 1;
+ break
+ if notans == 0 and not multi:
+ break
+ if not ok:
+ if verbose > 1:
+ os.write(1, b".")
+ nbrecv += 1
+ if conf.debug_match:
+ debug.recv.append(r)
+ except KeyboardInterrupt:
+ if chainCC:
+ raise
+ finally:
+ if WINDOWS:
+ for p,t in zip(all_stimuli, sent_times):
+ p.sent_time = t
+ finally:
+ pass
+
+ # remain = reduce(list.__add__, hsent.values(), [])
+ remain = list(itertools.chain(*[ i for i in hsent.values() ]))
+
+ if multi:
+ #remain = filter(lambda p: not hasattr(p, '_answered'), remain);
+ remain = [ p for p in remain if not hasattr(p, '_answered')]
+
+ if autostop and len(remain) > 0 and len(remain) != len(tobesent):
+ retry = autostop
+
+ tobesent = remain
+ if len(tobesent) == 0:
+ break
+ retry -= 1
+
+ if conf.debug_match:
+ debug.sent=plist.PacketList(remain[:],"Sent")
+ debug.match=plist.SndRcvList(ans[:])
+
+ #clean the ans list to delete the field _answered
+ if (multi):
+ for s,r in ans:
+ if hasattr(s, '_answered'):
+ del(s._answered)
+
+ if verbose:
+ print("\nReceived %i packets, got %i answers, remaining %i packets" % (nbrecv+len(ans), len(ans), notans))
+ return plist.SndRcvList(ans),plist.PacketList(remain,"Unanswered")
+
+
+import scapy.sendrecv
+scapy.sendrecv.sndrcv = sndrcv
+
+def sniff(count=0, store=1, offline=None, prn = None, lfilter=None, L2socket=None, timeout=None, *arg, **karg):
+ """Sniff packets
+sniff([count=0,] [prn=None,] [store=1,] [offline=None,] [lfilter=None,] + L2ListenSocket args) -> list of packets
+Select interface to sniff by setting conf.iface. Use show_interfaces() to see interface names.
+ count: number of packets to capture. 0 means infinity
+ store: wether to store sniffed packets or discard them
+ prn: function to apply to each packet. If something is returned,
+ it is displayed. Ex:
+ ex: prn = lambda x: x.summary()
+lfilter: python function applied to each packet to determine
+ if further action may be done
+ ex: lfilter = lambda x: x.haslayer(Padding)
+offline: pcap file to read packets from, instead of sniffing them
+timeout: stop sniffing after a given time (default: None)
+L2socket: use the provided L2socket
+ """
+ c = 0
+
+ if offline is None:
+ log_runtime.info('Sniffing on %s' % conf.iface)
+ if L2socket is None:
+ L2socket = conf.L2listen
+ s = L2socket(type=ETH_P_ALL, *arg, **karg)
+ else:
+ s = PcapReader(offline)
+
+ lst = []
+ if timeout is not None:
+ stoptime = time.time()+timeout
+ remain = None
+ while 1:
+ try:
+ if timeout is not None:
+ remain = stoptime-time.time()
+ if remain <= 0:
+ break
+
+ try:
+ p = s.recv(MTU)
+ except PcapTimeoutElapsed:
+ continue
+ if p is None:
+ break
+ if lfilter and not lfilter(p):
+ continue
+ if store:
+ lst.append(p)
+ c += 1
+ if prn:
+ r = prn(p)
+ if r is not None:
+ print(r)
+ if count > 0 and c >= count:
+ break
+ except KeyboardInterrupt:
+ break
+ s.close()
+ return plist.PacketList(lst,"Sniffed")
+
+import scapy.sendrecv
+scapy.sendrecv.sniff = sniff
+
+# def get_if_list():
+# print('windows if_list')
+# return sorted(ifaces.keys())
+
+def get_working_if():
+ try:
+ if 'Ethernet' in ifaces and ifaces['Ethernet'].ip != '0.0.0.0':
+ return 'Ethernet'
+ elif 'Wi-Fi' in ifaces and ifaces['Wi-Fi'].ip != '0.0.0.0':
+ return 'Wi-Fi'
+ elif len(ifaces) > 0:
+ return ifaces[list(ifaces.keys())[0]]
+ else:
+ return LOOPBACK_NAME
+ except:
+ return LOOPBACK_NAME
+
+conf.iface = get_working_if()
diff --git a/scripts/external_libs/scapy-2.3.1/python3/scapy/arch/winpcapy.py b/scripts/external_libs/scapy-2.3.1/python3/scapy/arch/winpcapy.py
new file mode 100644
index 00000000..fc452a02
--- /dev/null
+++ b/scripts/external_libs/scapy-2.3.1/python3/scapy/arch/winpcapy.py
@@ -0,0 +1,739 @@
+#-------------------------------------------------------------------------------
+# Name: winpcapy.py
+#
+# Author: Massimo Ciani
+#
+# Created: 01/09/2009
+# Copyright: (c) Massimo Ciani 2009
+#
+#-------------------------------------------------------------------------------
+
+
+from ctypes import *
+from ctypes.util import find_library
+import sys
+
+WIN32=False
+HAVE_REMOTE=False
+
+
+if sys.platform.startswith('win'):
+ WIN32=True
+ HAVE_REMOTE=True
+
+if WIN32:
+ SOCKET = c_uint
+ _lib=CDLL('wpcap.dll')
+else:
+ SOCKET = c_int
+ _lib_name = find_library('pcap')
+ if not _lib_name:
+ raise OSError("Cannot fine libpcap.so library")
+ _lib=CDLL(_lib_name)
+
+
+
+##
+## misc
+##
+u_short = c_ushort
+bpf_int32 = c_int
+u_int = c_int
+bpf_u_int32 = u_int
+pcap = c_void_p
+pcap_dumper = c_void_p
+u_char = c_ubyte
+FILE = c_void_p
+STRING = c_char_p
+
+class bpf_insn(Structure):
+ _fields_=[("code",c_ushort),
+ ("jt",c_ubyte),
+ ("jf",c_ubyte),
+ ("k",bpf_u_int32)]
+
+class bpf_program(Structure):
+ pass
+bpf_program._fields_ = [('bf_len', u_int),
+ ('bf_insns', POINTER(bpf_insn))]
+
+class bpf_version(Structure):
+ _fields_=[("bv_major",c_ushort),
+ ("bv_minor",c_ushort)]
+
+
+class timeval(Structure):
+ pass
+timeval._fields_ = [('tv_sec', c_long),
+ ('tv_usec', c_long)]
+
+## sockaddr is used by pcap_addr.
+## For exapmle if sa_family==socket.AF_INET then we need cast
+## with sockaddr_in
+if WIN32:
+ class sockaddr(Structure):
+ _fields_ = [("sa_family", c_ushort),
+ ("sa_data",c_ubyte * 14)]
+
+ class sockaddr_in(Structure):
+ _fields_ = [("sin_family", c_ushort),
+ ("sin_port", c_uint16),
+ ("sin_addr", 4 * c_ubyte)]
+
+ class sockaddr_in6(Structure):
+ _fields_ = [("sin6_family", c_ushort),
+ ("sin6_port", c_uint16),
+ ("sin6_flowinfo", c_uint32),
+ ("sin6_addr", 16 * c_ubyte),
+ ("sin6_scope", c_uint32)]
+else:
+ class sockaddr(Structure):
+ _fields_ = [("sa_len", c_ubyte),
+ ("sa_family",c_ubyte),
+ ("sa_data",c_ubyte * 14)]
+
+ class sockaddr_in(Structure):
+ _fields_ = [("sin_len", c_ubyte),
+ ("sin_family", c_ubyte),
+ ("sin_port", c_uint16),
+ ("sin_addr", 4 * c_ubyte),
+ ("sin_zero", 8 * c_char)]
+
+ class sockaddr_in6(Structure):
+ _fields_ = [("sin6_len", c_ubyte),
+ ("sin6_family", c_ubyte),
+ ("sin6_port", c_uint16),
+ ("sin6_flowinfo", c_uint32),
+ ("sin6_addr", 16 * c_ubyte),
+ ("sin6_scope", c_uint32)]
+
+ class sockaddr_dl(Structure):
+ _fields_ = [("sdl_len", c_ubyte),
+ ("sdl_family", c_ubyte),
+ ("sdl_index", c_ushort),
+ ("sdl_type", c_ubyte),
+ ("sdl_nlen", c_ubyte),
+ ("sdl_alen", c_ubyte),
+ ("sdl_slen", c_ubyte),
+ ("sdl_data", 46 * c_ubyte)]
+##
+## END misc
+##
+
+##
+## Data Structures
+##
+
+## struct pcap_file_header
+## Header of a libpcap dump file.
+class pcap_file_header(Structure):
+ _fields_ = [('magic', bpf_u_int32),
+ ('version_major', u_short),
+ ('version_minor', u_short),
+ ('thiszone', bpf_int32),
+ ('sigfigs', bpf_u_int32),
+ ('snaplen', bpf_u_int32),
+ ('linktype', bpf_u_int32)]
+
+## struct pcap_pkthdr
+## Header of a packet in the dump file.
+class pcap_pkthdr(Structure):
+ _fields_ = [('ts', timeval),
+ ('caplen', bpf_u_int32),
+ ('len', bpf_u_int32)]
+
+## struct pcap_stat
+## Structure that keeps statistical values on an interface.
+class pcap_stat(Structure):
+ pass
+### _fields_ list in Structure is final.
+### We need a temp list
+_tmpList=[]
+_tmpList.append(("ps_recv",c_uint))
+_tmpList.append(("ps_drop",c_uint))
+_tmpList.append(("ps_ifdrop",c_uint))
+if HAVE_REMOTE:
+ _tmpList.append(("ps_capt",c_uint))
+ _tmpList.append(("ps_sent",c_uint))
+ _tmpList.append(("ps_netdrop",c_uint))
+pcap_stat._fields_=_tmpList
+
+## struct pcap_addr
+## Representation of an interface address, used by pcap_findalldevs().
+class pcap_addr(Structure):
+ pass
+pcap_addr._fields_ = [('next', POINTER(pcap_addr)),
+ ('addr', POINTER(sockaddr)),
+ ('netmask', POINTER(sockaddr)),
+ ('broadaddr', POINTER(sockaddr)),
+ ('dstaddr', POINTER(sockaddr))]
+
+## struct pcap_if
+## Item in a list of interfaces, used by pcap_findalldevs().
+class pcap_if(Structure):
+ pass
+pcap_if._fields_ = [('next', POINTER(pcap_if)),
+ ('name', STRING),
+ ('description', STRING),
+ ('addresses', POINTER(pcap_addr)),
+ ('flags', bpf_u_int32)]
+
+##
+## END Data Structures
+##
+
+##
+## Defines
+##
+
+##define PCAP_VERSION_MAJOR 2
+# Major libpcap dump file version.
+PCAP_VERSION_MAJOR = 2
+##define PCAP_VERSION_MINOR 4
+# Minor libpcap dump file version.
+PCAP_VERSION_MINOR = 4
+##define PCAP_ERRBUF_SIZE 256
+# Size to use when allocating the buffer that contains the libpcap errors.
+PCAP_ERRBUF_SIZE = 256
+##define PCAP_IF_LOOPBACK 0x00000001
+# interface is loopback
+PCAP_IF_LOOPBACK = 1
+##define MODE_CAPT 0
+# Capture mode, to be used when calling pcap_setmode().
+MODE_CAPT = 0
+##define MODE_STAT 1
+# Statistical mode, to be used when calling pcap_setmode().
+MODE_STAT = 1
+
+##
+## END Defines
+##
+
+##
+## Typedefs
+##
+
+#typedef int bpf_int32 (already defined)
+# 32-bit integer
+#typedef u_int bpf_u_int32 (already defined)
+# 32-bit unsigned integer
+#typedef struct pcap pcap_t
+# Descriptor of an open capture instance. This structure is opaque to the user, that handles its content through the functions provided by wpcap.dll.
+pcap_t = pcap
+#typedef struct pcap_dumper pcap_dumper_t
+# libpcap savefile descriptor.
+pcap_dumper_t = pcap_dumper
+#typedef struct pcap_if pcap_if_t
+# Item in a list of interfaces, see pcap_if.
+pcap_if_t = pcap_if
+#typedef struct pcap_addr pcap_addr_t
+# Representation of an interface address, see pcap_addr.
+pcap_addr_t = pcap_addr
+
+##
+## END Typedefs
+##
+
+
+
+
+
+# values for enumeration 'pcap_direction_t'
+#pcap_direction_t = c_int # enum
+
+##
+## Unix-compatible Functions
+## These functions are part of the libpcap library, and therefore work both on Windows and on Linux.
+##
+
+#typedef void(* pcap_handler )(u_char *user, const struct pcap_pkthdr *pkt_header, const u_char *pkt_data)
+# Prototype of the callback function that receives the packets.
+## This one is defined from programmer
+pcap_handler=CFUNCTYPE(None,POINTER(c_ubyte),POINTER(pcap_pkthdr),POINTER(c_ubyte))
+
+#pcap_t * pcap_open_live (const char *device, int snaplen, int promisc, int to_ms, char *ebuf)
+# Open a live capture from the network.
+pcap_open_live = _lib.pcap_open_live
+pcap_open_live.restype = POINTER(pcap_t)
+pcap_open_live.argtypes = [STRING, c_int, c_int, c_int, STRING]
+
+#pcap_t * pcap_open_dead (int linktype, int snaplen)
+# Create a pcap_t structure without starting a capture.
+pcap_open_dead = _lib.pcap_open_dead
+pcap_open_dead.restype = POINTER(pcap_t)
+pcap_open_dead.argtypes = [c_int, c_int]
+
+#pcap_t * pcap_open_offline (const char *fname, char *errbuf)
+# Open a savefile in the tcpdump/libpcap format to read packets.
+pcap_open_offline = _lib.pcap_open_offline
+pcap_open_offline.restype = POINTER(pcap_t)
+pcap_open_offline.argtypes = [STRING, STRING]
+
+#pcap_dumper_t * pcap_dump_open (pcap_t *p, const char *fname)
+# Open a file to write packets.
+pcap_dump_open = _lib.pcap_dump_open
+pcap_dump_open.restype = POINTER(pcap_dumper_t)
+pcap_dump_open.argtypes = [POINTER(pcap_t), STRING]
+
+#int pcap_setnonblock (pcap_t *p, int nonblock, char *errbuf)
+# Switch between blocking and nonblocking mode.
+pcap_setnonblock = _lib.pcap_setnonblock
+pcap_setnonblock.restype = c_int
+pcap_setnonblock.argtypes = [POINTER(pcap_t), c_int, STRING]
+
+#int pcap_getnonblock (pcap_t *p, char *errbuf)
+# Get the "non-blocking" state of an interface.
+pcap_getnonblock = _lib.pcap_getnonblock
+pcap_getnonblock.restype = c_int
+pcap_getnonblock.argtypes = [POINTER(pcap_t), STRING]
+
+#int pcap_findalldevs (pcap_if_t **alldevsp, char *errbuf)
+# Construct a list of network devices that can be opened with pcap_open_live().
+pcap_findalldevs = _lib.pcap_findalldevs
+pcap_findalldevs.restype = c_int
+pcap_findalldevs.argtypes = [POINTER(POINTER(pcap_if_t)), STRING]
+
+#void pcap_freealldevs (pcap_if_t *alldevsp)
+# Free an interface list returned by pcap_findalldevs().
+pcap_freealldevs = _lib.pcap_freealldevs
+pcap_freealldevs.restype = None
+pcap_freealldevs.argtypes = [POINTER(pcap_if_t)]
+
+#char * pcap_lookupdev (char *errbuf)
+# Return the first valid device in the system.
+pcap_lookupdev = _lib.pcap_lookupdev
+pcap_lookupdev.restype = STRING
+pcap_lookupdev.argtypes = [STRING]
+
+#int pcap_lookupnet (const char *device, bpf_u_int32 *netp, bpf_u_int32 *maskp, char *errbuf)
+# Return the subnet and netmask of an interface.
+pcap_lookupnet = _lib.pcap_lookupnet
+pcap_lookupnet.restype = c_int
+pcap_lookupnet.argtypes = [STRING, POINTER(bpf_u_int32), POINTER(bpf_u_int32), STRING]
+
+#int pcap_dispatch (pcap_t *p, int cnt, pcap_handler callback, u_char *user)
+# Collect a group of packets.
+pcap_dispatch = _lib.pcap_dispatch
+pcap_dispatch.restype = c_int
+pcap_dispatch.argtypes = [POINTER(pcap_t), c_int, pcap_handler, POINTER(u_char)]
+
+#int pcap_loop (pcap_t *p, int cnt, pcap_handler callback, u_char *user)
+# Collect a group of packets.
+pcap_loop = _lib.pcap_loop
+pcap_loop.restype = c_int
+pcap_loop.argtypes = [POINTER(pcap_t), c_int, pcap_handler, POINTER(u_char)]
+
+#u_char * pcap_next (pcap_t *p, struct pcap_pkthdr *h)
+# Return the next available packet.
+pcap_next = _lib.pcap_next
+pcap_next.restype = POINTER(u_char)
+pcap_next.argtypes = [POINTER(pcap_t), POINTER(pcap_pkthdr)]
+
+#int pcap_next_ex (pcap_t *p, struct pcap_pkthdr **pkt_header, const u_char **pkt_data)
+# Read a packet from an interface or from an offline capture.
+pcap_next_ex = _lib.pcap_next_ex
+pcap_next_ex.restype = c_int
+pcap_next_ex.argtypes = [POINTER(pcap_t), POINTER(POINTER(pcap_pkthdr)), POINTER(POINTER(u_char))]
+
+#void pcap_breakloop (pcap_t *)
+# set a flag that will force pcap_dispatch() or pcap_loop() to return rather than looping.
+pcap_breakloop = _lib.pcap_breakloop
+pcap_breakloop.restype = None
+pcap_breakloop.argtypes = [POINTER(pcap_t)]
+
+#int pcap_sendpacket (pcap_t *p, u_char *buf, int size)
+# Send a raw packet.
+pcap_sendpacket = _lib.pcap_sendpacket
+pcap_sendpacket.restype = c_int
+#pcap_sendpacket.argtypes = [POINTER(pcap_t), POINTER(u_char), c_int]
+pcap_sendpacket.argtypes = [POINTER(pcap_t), c_void_p, c_int]
+
+#void pcap_dump (u_char *user, const struct pcap_pkthdr *h, const u_char *sp)
+# Save a packet to disk.
+pcap_dump = _lib.pcap_dump
+pcap_dump.restype = None
+pcap_dump.argtypes = [POINTER(pcap_dumper_t), POINTER(pcap_pkthdr), POINTER(u_char)]
+
+#long pcap_dump_ftell (pcap_dumper_t *)
+# Return the file position for a "savefile".
+pcap_dump_ftell = _lib.pcap_dump_ftell
+pcap_dump_ftell.restype = c_long
+pcap_dump_ftell.argtypes = [POINTER(pcap_dumper_t)]
+
+#int pcap_compile (pcap_t *p, struct bpf_program *fp, char *str, int optimize, bpf_u_int32 netmask)
+# Compile a packet filter, converting an high level filtering expression (see Filtering expression syntax) in a program that can be interpreted by the kernel-level filtering engine.
+pcap_compile = _lib.pcap_compile
+pcap_compile.restype = c_int
+pcap_compile.argtypes = [POINTER(pcap_t), POINTER(bpf_program), STRING, c_int, bpf_u_int32]
+
+#int pcap_compile_nopcap (int snaplen_arg, int linktype_arg, struct bpf_program *program, char *buf, int optimize, bpf_u_int32 mask)
+# Compile a packet filter without the need of opening an adapter. This function converts an high level filtering expression (see Filtering expression syntax) in a program that can be interpreted by the kernel-level filtering engine.
+pcap_compile_nopcap = _lib.pcap_compile_nopcap
+pcap_compile_nopcap.restype = c_int
+pcap_compile_nopcap.argtypes = [c_int, c_int, POINTER(bpf_program), STRING, c_int, bpf_u_int32]
+
+#int pcap_setfilter (pcap_t *p, struct bpf_program *fp)
+# Associate a filter to a capture.
+pcap_setfilter = _lib.pcap_setfilter
+pcap_setfilter.restype = c_int
+pcap_setfilter.argtypes = [POINTER(pcap_t), POINTER(bpf_program)]
+
+#void pcap_freecode (struct bpf_program *fp)
+# Free a filter.
+pcap_freecode = _lib.pcap_freecode
+pcap_freecode.restype = None
+pcap_freecode.argtypes = [POINTER(bpf_program)]
+
+#int pcap_datalink (pcap_t *p)
+# Return the link layer of an adapter.
+pcap_datalink = _lib.pcap_datalink
+pcap_datalink.restype = c_int
+pcap_datalink.argtypes = [POINTER(pcap_t)]
+
+#int pcap_list_datalinks (pcap_t *p, int **dlt_buf)
+# list datalinks
+pcap_list_datalinks = _lib.pcap_list_datalinks
+pcap_list_datalinks.restype = c_int
+#pcap_list_datalinks.argtypes = [POINTER(pcap_t), POINTER(POINTER(c_int))]
+
+#int pcap_set_datalink (pcap_t *p, int dlt)
+# Set the current data link type of the pcap descriptor to the type specified by dlt. -1 is returned on failure.
+pcap_set_datalink = _lib.pcap_set_datalink
+pcap_set_datalink.restype = c_int
+pcap_set_datalink.argtypes = [POINTER(pcap_t), c_int]
+
+#int pcap_datalink_name_to_val (const char *name)
+# Translates a data link type name, which is a DLT_ name with the DLT_ removed, to the corresponding data link type value. The translation is case-insensitive. -1 is returned on failure.
+pcap_datalink_name_to_val = _lib.pcap_datalink_name_to_val
+pcap_datalink_name_to_val.restype = c_int
+pcap_datalink_name_to_val.argtypes = [STRING]
+
+#const char * pcap_datalink_val_to_name (int dlt)
+# Translates a data link type value to the corresponding data link type name. NULL is returned on failure.
+pcap_datalink_val_to_name = _lib.pcap_datalink_val_to_name
+pcap_datalink_val_to_name.restype = STRING
+pcap_datalink_val_to_name.argtypes = [c_int]
+
+#const char * pcap_datalink_val_to_description (int dlt)
+# Translates a data link type value to a short description of that data link type. NULL is returned on failure.
+pcap_datalink_val_to_description = _lib.pcap_datalink_val_to_description
+pcap_datalink_val_to_description.restype = STRING
+pcap_datalink_val_to_description.argtypes = [c_int]
+
+#int pcap_snapshot (pcap_t *p)
+# Return the dimension of the packet portion (in bytes) that is delivered to the application.
+pcap_snapshot = _lib.pcap_snapshot
+pcap_snapshot.restype = c_int
+pcap_snapshot.argtypes = [POINTER(pcap_t)]
+
+#int pcap_is_swapped (pcap_t *p)
+# returns true if the current savefile uses a different byte order than the current system.
+pcap_is_swapped = _lib.pcap_is_swapped
+pcap_is_swapped.restype = c_int
+pcap_is_swapped.argtypes = [POINTER(pcap_t)]
+
+#int pcap_major_version (pcap_t *p)
+# return the major version number of the pcap library used to write the savefile.
+pcap_major_version = _lib.pcap_major_version
+pcap_major_version.restype = c_int
+pcap_major_version.argtypes = [POINTER(pcap_t)]
+
+#int pcap_minor_version (pcap_t *p)
+# return the minor version number of the pcap library used to write the savefile.
+pcap_minor_version = _lib.pcap_minor_version
+pcap_minor_version.restype = c_int
+pcap_minor_version.argtypes = [POINTER(pcap_t)]
+
+#FILE * pcap_file (pcap_t *p)
+# Return the standard stream of an offline capture.
+pcap_file=_lib.pcap_file
+pcap_file.restype = FILE
+pcap_file.argtypes = [POINTER(pcap_t)]
+
+#int pcap_stats (pcap_t *p, struct pcap_stat *ps)
+# Return statistics on current capture.
+pcap_stats = _lib.pcap_stats
+pcap_stats.restype = c_int
+pcap_stats.argtypes = [POINTER(pcap_t), POINTER(pcap_stat)]
+
+#void pcap_perror (pcap_t *p, char *prefix)
+# print the text of the last pcap library error on stderr, prefixed by prefix.
+pcap_perror = _lib.pcap_perror
+pcap_perror.restype = None
+pcap_perror.argtypes = [POINTER(pcap_t), STRING]
+
+#char * pcap_geterr (pcap_t *p)
+# return the error text pertaining to the last pcap library error.
+pcap_geterr = _lib.pcap_geterr
+pcap_geterr.restype = STRING
+pcap_geterr.argtypes = [POINTER(pcap_t)]
+
+#char * pcap_strerror (int error)
+# Provided in case strerror() isn't available.
+pcap_strerror = _lib.pcap_strerror
+pcap_strerror.restype = STRING
+pcap_strerror.argtypes = [c_int]
+
+#const char * pcap_lib_version (void)
+# Returns a pointer to a string giving information about the version of the libpcap library being used; note that it contains more information than just a version number.
+pcap_lib_version = _lib.pcap_lib_version
+pcap_lib_version.restype = STRING
+pcap_lib_version.argtypes = []
+
+#void pcap_close (pcap_t *p)
+# close the files associated with p and deallocates resources.
+pcap_close = _lib.pcap_close
+pcap_close.restype = None
+pcap_close.argtypes = [POINTER(pcap_t)]
+
+#FILE * pcap_dump_file (pcap_dumper_t *p)
+# return the standard I/O stream of the 'savefile' opened by pcap_dump_open().
+pcap_dump_file=_lib.pcap_dump_file
+pcap_dump_file.restype=FILE
+pcap_dump_file.argtypes= [POINTER(pcap_dumper_t)]
+
+#int pcap_dump_flush (pcap_dumper_t *p)
+# Flushes the output buffer to the ``savefile,'' so that any packets written with pcap_dump() but not yet written to the ``savefile'' will be written. -1 is returned on error, 0 on success.
+pcap_dump_flush = _lib.pcap_dump_flush
+pcap_dump_flush.restype = c_int
+pcap_dump_flush.argtypes = [POINTER(pcap_dumper_t)]
+
+#void pcap_dump_close (pcap_dumper_t *p)
+# Closes a savefile.
+pcap_dump_close = _lib.pcap_dump_close
+pcap_dump_close.restype = None
+pcap_dump_close.argtypes = [POINTER(pcap_dumper_t)]
+
+if not WIN32:
+
+ pcap_get_selectable_fd = _lib.pcap_get_selectable_fd
+ pcap_get_selectable_fd.restype = c_int
+ pcap_get_selectable_fd.argtypes = [POINTER(pcap_t)]
+
+###########################################
+## Windows-specific Extensions
+## The functions in this section extend libpcap to offer advanced functionalities
+## (like remote packet capture, packet buffer size variation or high-precision packet injection).
+## Howerver, at the moment they can be used only in Windows.
+###########################################
+if WIN32:
+ HANDLE = c_void_p
+
+ ##############
+ ## Identifiers related to the new source syntax
+ ##############
+ #define PCAP_SRC_FILE 2
+ #define PCAP_SRC_IFLOCAL 3
+ #define PCAP_SRC_IFREMOTE 4
+ #Internal representation of the type of source in use (file, remote/local interface).
+ PCAP_SRC_FILE = 2
+ PCAP_SRC_IFLOCAL = 3
+ PCAP_SRC_IFREMOTE = 4
+
+ ##############
+ ## Strings related to the new source syntax
+ ##############
+ #define PCAP_SRC_FILE_STRING "file://"
+ #define PCAP_SRC_IF_STRING "rpcap://"
+ #String that will be used to determine the type of source in use (file, remote/local interface).
+ PCAP_SRC_FILE_STRING="file://"
+ PCAP_SRC_IF_STRING="rpcap://"
+
+ ##############
+ ## Flags defined in the pcap_open() function
+ ##############
+ # define PCAP_OPENFLAG_PROMISCUOUS 1
+ # Defines if the adapter has to go in promiscuous mode.
+ PCAP_OPENFLAG_PROMISCUOUS=1
+ # define PCAP_OPENFLAG_DATATX_UDP 2
+ # Defines if the data trasfer (in case of a remote capture) has to be done with UDP protocol.
+ PCAP_OPENFLAG_DATATX_UDP=2
+ # define PCAP_OPENFLAG_NOCAPTURE_RPCAP 4
+ PCAP_OPENFLAG_NOCAPTURE_RPCAP=4
+ # Defines if the remote probe will capture its own generated traffic.
+ # define PCAP_OPENFLAG_NOCAPTURE_LOCAL 8
+ PCAP_OPENFLAG_NOCAPTURE_LOCAL = 8
+ # define PCAP_OPENFLAG_MAX_RESPONSIVENESS 16
+ # This flag configures the adapter for maximum responsiveness.
+ PCAP_OPENFLAG_MAX_RESPONSIVENESS=16
+
+ ##############
+ ## Sampling methods defined in the pcap_setsampling() function
+ ##############
+ # define PCAP_SAMP_NOSAMP 0
+ # No sampling has to be done on the current capture.
+ PCAP_SAMP_NOSAMP=0
+ # define PCAP_SAMP_1_EVERY_N 1
+ # It defines that only 1 out of N packets must be returned to the user.
+ PCAP_SAMP_1_EVERY_N=1
+ #define PCAP_SAMP_FIRST_AFTER_N_MS 2
+ # It defines that we have to return 1 packet every N milliseconds.
+ PCAP_SAMP_FIRST_AFTER_N_MS=2
+
+ ##############
+ ## Authentication methods supported by the RPCAP protocol
+ ##############
+ # define RPCAP_RMTAUTH_NULL 0
+ # It defines the NULL authentication.
+ RPCAP_RMTAUTH_NULL=0
+ # define RPCAP_RMTAUTH_PWD 1
+ # It defines the username/password authentication.
+ RPCAP_RMTAUTH_PWD=1
+
+
+ ##############
+ ## Remote struct and defines
+ ##############
+ # define PCAP_BUF_SIZE 1024
+ # Defines the maximum buffer size in which address, port, interface names are kept.
+ PCAP_BUF_SIZE = 1024
+ # define RPCAP_HOSTLIST_SIZE 1024
+ # Maximum lenght of an host name (needed for the RPCAP active mode).
+ RPCAP_HOSTLIST_SIZE = 1024
+
+ class pcap_send_queue(Structure):
+ _fields_=[("maxlen",c_uint),
+ ("len",c_uint),
+ ("buffer",c_char_p)]
+
+ ## struct pcap_rmtauth
+ ## This structure keeps the information needed to autheticate the user on a remote machine
+ class pcap_rmtauth(Structure):
+ _fields_=[("type",c_int),
+ ("username",c_char_p),
+ ("password",c_char_p)]
+
+ ## struct pcap_samp
+ ## This structure defines the information related to sampling
+ class pcap_samp(Structure):
+ _fields_=[("method",c_int),
+ ("value",c_int)]
+
+ #PAirpcapHandle pcap_get_airpcap_handle (pcap_t *p)
+ # Returns the AirPcap handler associated with an adapter. This handler can be used to change the wireless-related settings of the CACE Technologies AirPcap wireless capture adapters.
+
+ #bool pcap_offline_filter (struct bpf_program *prog, const struct pcap_pkthdr *header, const u_char *pkt_data)
+ # Returns if a given filter applies to an offline packet.
+ pcap_offline_filter = _lib.pcap_offline_filter
+ pcap_offline_filter.restype = c_bool
+ pcap_offline_filter.argtypes = [POINTER(bpf_program),POINTER(pcap_pkthdr),POINTER(u_char)]
+
+ #int pcap_live_dump (pcap_t *p, char *filename, int maxsize, int maxpacks)
+ # Save a capture to file.
+ pcap_live_dump = _lib.pcap_live_dump
+ pcap_live_dump.restype = c_int
+ pcap_live_dump.argtypes = [POINTER(pcap_t), POINTER(c_char), c_int,c_int]
+
+ #int pcap_live_dump_ended (pcap_t *p, int sync)
+ # Return the status of the kernel dump process, i.e. tells if one of the limits defined with pcap_live_dump() has been reached.
+ pcap_live_dump_ended = _lib.pcap_live_dump_ended
+ pcap_live_dump_ended.restype = c_int
+ pcap_live_dump_ended.argtypes = [POINTER(pcap_t), c_int]
+
+ #struct pcap_stat * pcap_stats_ex (pcap_t *p, int *pcap_stat_size)
+ # Return statistics on current capture.
+ pcap_stats_ex = _lib.pcap_stats_ex
+ pcap_stats_ex.restype = POINTER(pcap_stat)
+ pcap_stats_ex.argtypes = [POINTER(pcap_t), POINTER(c_int)]
+
+ #int pcap_setbuff (pcap_t *p, int dim)
+ # Set the size of the kernel buffer associated with an adapter.
+ pcap_setbuff = _lib.pcap_setbuff
+ pcap_setbuff.restype = c_int
+ pcap_setbuff.argtypes = [POINTER(pcap_t), c_int]
+
+ #int pcap_setmode (pcap_t *p, int mode)
+ # Set the working mode of the interface p to mode.
+ pcap_setmode = _lib.pcap_setmode
+ pcap_setmode.restype = c_int
+ pcap_setmode.argtypes = [POINTER(pcap_t), c_int]
+
+ #int pcap_setmintocopy (pcap_t *p, int size)
+ # Set the minumum amount of data received by the kernel in a single call.
+ pcap_setmintocopy = _lib.pcap_setmintocopy
+ pcap_setmintocopy.restype = c_int
+ pcap_setmintocopy.argtype = [POINTER(pcap_t), c_int]
+
+ #HANDLE pcap_getevent (pcap_t *p)
+ # Return the handle of the event associated with the interface p.
+ pcap_getevent = _lib.pcap_getevent
+ pcap_getevent.restype = HANDLE
+ pcap_getevent.argtypes = [POINTER(pcap_t)]
+
+ #pcap_send_queue * pcap_sendqueue_alloc (u_int memsize)
+ # Allocate a send queue.
+ pcap_sendqueue_alloc = _lib.pcap_sendqueue_alloc
+ pcap_sendqueue_alloc.restype = POINTER(pcap_send_queue)
+ pcap_sendqueue_alloc.argtypes = [c_uint]
+
+ #void pcap_sendqueue_destroy (pcap_send_queue *queue)
+ # Destroy a send queue.
+ pcap_sendqueue_destroy = _lib.pcap_sendqueue_destroy
+ pcap_sendqueue_destroy.restype = None
+ pcap_sendqueue_destroy.argtypes = [POINTER(pcap_send_queue)]
+
+ #int pcap_sendqueue_queue (pcap_send_queue *queue, const struct pcap_pkthdr *pkt_header, const u_char *pkt_data)
+ # Add a packet to a send queue.
+ pcap_sendqueue_queue = _lib.pcap_sendqueue_queue
+ pcap_sendqueue_queue.restype = c_int
+ pcap_sendqueue_queue.argtypes = [POINTER(pcap_send_queue), POINTER(pcap_pkthdr), POINTER(u_char)]
+
+ #u_int pcap_sendqueue_transmit (pcap_t *p, pcap_send_queue *queue, int sync)
+ # Send a queue of raw packets to the network.
+ pcap_sendqueue_transmit = _lib.pcap_sendqueue_transmit
+ pcap_sendqueue_transmit.retype = u_int
+ pcap_sendqueue_transmit.argtypes = [POINTER(pcap_t), POINTER(pcap_send_queue), c_int]
+
+ #int pcap_findalldevs_ex (char *source, struct pcap_rmtauth *auth, pcap_if_t **alldevs, char *errbuf)
+ # Create a list of network devices that can be opened with pcap_open().
+ pcap_findalldevs_ex = _lib.pcap_findalldevs_ex
+ pcap_findalldevs_ex.retype = c_int
+ pcap_findalldevs_ex.argtypes = [STRING, POINTER(pcap_rmtauth), POINTER(POINTER(pcap_if_t)), STRING]
+
+ #int pcap_createsrcstr (char *source, int type, const char *host, const char *port, const char *name, char *errbuf)
+ # Accept a set of strings (host name, port, ...), and it returns the complete source string according to the new format (e.g. 'rpcap://1.2.3.4/eth0').
+ pcap_createsrcstr = _lib.pcap_createsrcstr
+ pcap_createsrcstr.restype = c_int
+ pcap_createsrcstr.argtypes = [STRING, c_int, STRING, STRING, STRING, STRING]
+
+ #int pcap_parsesrcstr (const char *source, int *type, char *host, char *port, char *name, char *errbuf)
+ # Parse the source string and returns the pieces in which the source can be split.
+ pcap_parsesrcstr = _lib.pcap_parsesrcstr
+ pcap_parsesrcstr.retype = c_int
+ pcap_parsesrcstr.argtypes = [STRING, POINTER(c_int), STRING, STRING, STRING, STRING]
+
+ #pcap_t * pcap_open (const char *source, int snaplen, int flags, int read_timeout, struct pcap_rmtauth *auth, char *errbuf)
+ # Open a generic source in order to capture / send (WinPcap only) traffic.
+ pcap_open = _lib.pcap_open
+ pcap_open.restype = POINTER(pcap_t)
+ pcap_open.argtypes = [STRING, c_int, c_int, c_int, POINTER(pcap_rmtauth), STRING]
+
+ #struct pcap_samp * pcap_setsampling (pcap_t *p)
+ # Define a sampling method for packet capture.
+ pcap_setsampling = _lib.pcap_setsampling
+ pcap_setsampling.restype = POINTER(pcap_samp)
+ pcap_setsampling.argtypes = [POINTER(pcap_t)]
+
+ #SOCKET pcap_remoteact_accept (const char *address, const char *port, const char *hostlist, char *connectinghost, struct pcap_rmtauth *auth, char *errbuf)
+ # Block until a network connection is accepted (active mode only).
+ pcap_remoteact_accept = _lib.pcap_remoteact_accept
+ pcap_remoteact_accept.restype = SOCKET
+ pcap_remoteact_accept.argtypes = [STRING, STRING, STRING, STRING, POINTER(pcap_rmtauth), STRING]
+
+ #int pcap_remoteact_close (const char *host, char *errbuf)
+ # Drop an active connection (active mode only).
+ pcap_remoteact_close = _lib.pcap_remoteact_close
+ pcap_remoteact_close.restypes = c_int
+ pcap_remoteact_close.argtypes = [STRING, STRING]
+
+ #void pcap_remoteact_cleanup ()
+ # Clean the socket that is currently used in waiting active connections.
+ pcap_remoteact_cleanup = _lib.pcap_remoteact_cleanup
+ pcap_remoteact_cleanup.restypes = None
+ pcap_remoteact_cleanup.argtypes = []
+
+ #int pcap_remoteact_list (char *hostlist, char sep, int size, char *errbuf)
+ # Return the hostname of the host that have an active connection with us (active mode only).
+ pcap_remoteact_list = _lib.pcap_remoteact_list
+ pcap_remoteact_list.restype = c_int
+ pcap_remoteact_list.argtypes = [STRING, c_char, c_int, STRING]
diff --git a/scripts/external_libs/scapy-2.3.1/python3/scapy/as_resolvers.py b/scripts/external_libs/scapy-2.3.1/python3/scapy/as_resolvers.py
new file mode 100644
index 00000000..f04322b8
--- /dev/null
+++ b/scripts/external_libs/scapy-2.3.1/python3/scapy/as_resolvers.py
@@ -0,0 +1,115 @@
+## This file is part of Scapy
+## See http://www.secdev.org/projects/scapy for more informations
+## Copyright (C) Philippe Biondi <phil@secdev.org>
+## This program is published under a GPLv2 license
+
+"""
+Resolve Autonomous Systems (AS).
+"""
+
+
+import socket
+from .config import conf
+
+class AS_resolver:
+ server = None
+ options = "-k"
+ def __init__(self, server=None, port=43, options=None):
+ if server is not None:
+ self.server = server
+ self.port = port
+ if options is not None:
+ self.options = options
+
+ def _start(self):
+ self.s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+ self.s.connect((self.server,self.port))
+ if self.options:
+ self.s.send(self.options+b"\n")
+ self.s.recv(8192)
+ def _stop(self):
+ self.s.close()
+
+ def _parse_whois(self, txt):
+ asn,desc = None,b""
+ for l in txt.splitlines():
+ if not asn and l.startswith(b"origin:"):
+ asn = l[7:].strip().decode('utf-8')
+ if l.startswith(b"descr:"):
+ if desc:
+ desc += br"\n"
+ desc += l[6:].strip()
+ if asn is not None and desc.strip():
+ desc = desc.strip().decode('utf-8')
+ break
+ return asn, desc
+
+ def _resolve_one(self, ip):
+ self.s.send(b"".join([ip.encode('ascii')])+b"\n")
+ x = b""
+ while not (b"%" in x or b"source" in x):
+ x += self.s.recv(8192)
+ asn, desc = self._parse_whois(x)
+ return ip,asn,desc
+ def resolve(self, *ips):
+ self._start()
+ ret = []
+ for ip in ips:
+ ip,asn,desc = self._resolve_one(ip)
+ if asn is not None:
+ ret.append((ip,asn,desc))
+ self._stop()
+ return ret
+
+class AS_resolver_riswhois(AS_resolver):
+ server = "riswhois.ripe.net"
+ options = b"-k -M -1"
+
+
+class AS_resolver_radb(AS_resolver):
+ server = "whois.ra.net"
+ options = b"-k -M"
+
+
+class AS_resolver_cymru(AS_resolver):
+ server = "whois.cymru.com"
+ options = None
+ def resolve(self, *ips):
+ ASNlist = []
+ s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+ s.connect((self.server,self.port))
+ s.send(b"begin\r\n"+b"\r\n".join([ i.encode('ascii') for i in ips])+b"\r\nend\r\n")
+ r = b""
+ while 1:
+ l = s.recv(8192)
+ if l == b"":
+ break
+ r += l
+ s.close()
+ for l in r.splitlines()[1:]:
+ if b"|" not in l:
+ continue
+ asn,ip,desc = [ i.decode('ascii') for i in map(bytes.strip, l.split(b"|")) ]
+ if asn == "NA":
+ continue
+ asn = int(asn)
+ ASNlist.append((ip,asn,desc))
+ return ASNlist
+
+class AS_resolver_multi(AS_resolver):
+ resolvers_list = ( AS_resolver_cymru(),AS_resolver_riswhois(),AS_resolver_radb() )
+ def __init__(self, *reslist):
+ if reslist:
+ self.resolvers_list = reslist
+ def resolve(self, *ips):
+ todo = ips
+ ret = []
+ for ASres in self.resolvers_list:
+ res = ASres.resolve(*todo)
+ resolved = [ ip for ip,asn,desc in res ]
+ todo = [ ip for ip in todo if ip not in resolved ]
+ ret += res
+ return ret
+
+
+conf.AS_resolver = AS_resolver_multi()
diff --git a/scripts/external_libs/scapy-2.3.1/python3/scapy/asn1/__init__.py b/scripts/external_libs/scapy-2.3.1/python3/scapy/asn1/__init__.py
new file mode 100644
index 00000000..4827a588
--- /dev/null
+++ b/scripts/external_libs/scapy-2.3.1/python3/scapy/asn1/__init__.py
@@ -0,0 +1,12 @@
+## This file is part of Scapy
+## See http://www.secdev.org/projects/scapy for more informations
+## Copyright (C) Philippe Biondi <phil@secdev.org>
+## This program is published under a GPLv2 license
+
+"""
+Package holding ASN.1 related modules.
+"""
+
+# We do not import mib.py because it is more bound to scapy and
+# less prone to be used in a standalone fashion
+__all__ = ["asn1","ber"]
diff --git a/scripts/external_libs/scapy-2.3.1/python3/scapy/asn1/asn1.py b/scripts/external_libs/scapy-2.3.1/python3/scapy/asn1/asn1.py
new file mode 100644
index 00000000..c94d0b12
--- /dev/null
+++ b/scripts/external_libs/scapy-2.3.1/python3/scapy/asn1/asn1.py
@@ -0,0 +1,321 @@
+## This file is part of Scapy
+## See http://www.secdev.org/projects/scapy for more informations
+## Copyright (C) Philippe Biondi <phil@secdev.org>
+## This program is published under a GPLv2 license
+
+"""
+ASN.1 (Abstract Syntax Notation One)
+"""
+
+import random
+from scapy.config import conf
+from scapy.error import Scapy_Exception,warning
+from scapy.volatile import RandField
+from scapy.utils import Enum_metaclass, EnumElement
+
+class RandASN1Object(RandField):
+ def __init__(self, objlist=None):
+ if objlist is None:
+ objlist = [ x._asn1_obj for x in
+ [ x for x in ASN1_Class_UNIVERSAL.__rdict__.values() if hasattr(x,"_asn1_obj") ]]
+# objlist = map(lambda x:x._asn1_obj,
+# [ x for x in ASN1_Class_UNIVERSAL.__rdict__.values() if hasattr(x,"_asn1_obj") ])
+ self.objlist = objlist
+ self.chars = b"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"
+ def _fix(self, n=0):
+ o = random.choice(self.objlist)
+ if issubclass(o, ASN1_INTEGER):
+ return o(int(random.gauss(0,1000)))
+ elif issubclass(o, ASN1_IPADDRESS):
+ z = RandIP()._fix()
+ return o(z)
+ elif issubclass(o, ASN1_STRING):
+ z = int(random.expovariate(0.05)+1)
+ return o(bytes([random.choice(self.chars) for i in range(z)]))
+ elif issubclass(o, ASN1_SEQUENCE) and (n < 10):
+ z = int(random.expovariate(0.08)+1)
+# return o(map(lambda x:x._fix(n+1), [self.__class__(objlist=self.objlist)]*z))
+ return o([ x._fix(n+1) for x in [self.__class__(objlist=self.objlist)]*z])
+ return ASN1_INTEGER(int(random.gauss(0,1000)))
+
+
+##############
+#### ASN1 ####
+##############
+
+class ASN1_Error(Scapy_Exception):
+ pass
+
+class ASN1_Encoding_Error(ASN1_Error):
+ pass
+
+class ASN1_Decoding_Error(ASN1_Error):
+ pass
+
+class ASN1_BadTag_Decoding_Error(ASN1_Decoding_Error):
+ pass
+
+
+
+class ASN1Codec(EnumElement):
+ def register_stem(cls, stem):
+ cls._stem = stem
+ def dec(cls, s, context=None):
+ return cls._stem.dec(s, context=context)
+ def safedec(cls, s, context=None):
+ return cls._stem.safedec(s, context=context)
+ def get_stem(cls):
+ return cls.stem
+
+
+class ASN1_Codecs_metaclass(Enum_metaclass):
+ element_class = ASN1Codec
+
+class ASN1_Codecs(metaclass = ASN1_Codecs_metaclass):
+ #__metaclass__ = ASN1_Codecs_metaclass
+ BER = 1
+ DER = 2
+ PER = 3
+ CER = 4
+ LWER = 5
+ BACnet = 6
+ OER = 7
+ SER = 8
+ XER = 9
+
+class ASN1Tag(EnumElement):
+ def __init__(self, key, value, context=None, codec=None):
+ EnumElement.__init__(self, key, value)
+ self._context = context
+ if codec == None:
+ codec = {}
+ self._codec = codec
+ def clone(self): # /!\ not a real deep copy. self.codec is shared
+ return self.__class__(self._key, self._value, self._context, self._codec)
+ def register_asn1_object(self, asn1obj):
+ self._asn1_obj = asn1obj
+ def asn1_object(self, val):
+ if hasattr(self,"_asn1_obj"):
+ return self._asn1_obj(val)
+ raise ASN1_Error("%r does not have any assigned ASN1 object" % self)
+ def register(self, codecnum, codec):
+ self._codec[codecnum] = codec
+ def get_codec(self, codec):
+ try:
+ c = self._codec[codec]
+ except KeyError as msg:
+ raise ASN1_Error("Codec %r not found for tag %r" % (codec, self))
+ return c
+
+class ASN1_Class_metaclass(Enum_metaclass):
+ element_class = ASN1Tag
+ def __new__(cls, name, bases, dct): # XXX factorise a bit with Enum_metaclass.__new__()
+ for b in bases:
+ for k,v in b.__dict__.items():
+ if k not in dct and isinstance(v,ASN1Tag):
+ dct[k] = v.clone()
+
+ rdict = {}
+ for k,v in dct.items():
+ if type(v) is int:
+ v = ASN1Tag(k,v)
+ dct[k] = v
+ rdict[v] = v
+ elif isinstance(v, ASN1Tag):
+ rdict[v] = v
+ dct["__rdict__"] = rdict
+
+ cls = type.__new__(cls, name, bases, dct)
+ for v in cls.__dict__.values():
+ if isinstance(v, ASN1Tag):
+ v.context = cls # overwrite ASN1Tag contexts, even cloned ones
+ return cls
+
+
+class ASN1_Class(metaclass = ASN1_Class_metaclass):
+ pass
+
+class ASN1_Class_UNIVERSAL(ASN1_Class):
+ name = "UNIVERSAL"
+ ERROR = -3
+ RAW = -2
+ NONE = -1
+ ANY = 0
+ BOOLEAN = 1
+ INTEGER = 2
+ BIT_STRING = 3
+ STRING = 4
+ NULL = 5
+ OID = 6
+ OBJECT_DESCRIPTOR = 7
+ EXTERNAL = 8
+ REAL = 9
+ ENUMERATED = 10
+ EMBEDDED_PDF = 11
+ UTF8_STRING = 12
+ RELATIVE_OID = 13
+ SEQUENCE = 0x30#XXX 16 ??
+ SET = 0x31 #XXX 17 ??
+ NUMERIC_STRING = 18
+ PRINTABLE_STRING = 19
+ T61_STRING = 20
+ VIDEOTEX_STRING = 21
+ IA5_STRING = 22
+ UTC_TIME = 23
+ GENERALIZED_TIME = 24
+ GRAPHIC_STRING = 25
+ ISO646_STRING = 26
+ GENERAL_STRING = 27
+ UNIVERSAL_STRING = 28
+ CHAR_STRING = 29
+ BMP_STRING = 30
+ IPADDRESS = 0x40
+ COUNTER32 = 0x41
+ GAUGE32 = 0x42
+ TIME_TICKS = 0x43
+ SEP = 0x80
+
+class ASN1_Object_metaclass(type):
+ def __new__(cls, name, bases, dct):
+ c = super(ASN1_Object_metaclass, cls).__new__(cls, name, bases, dct)
+ try:
+ c.tag.register_asn1_object(c)
+ except:
+ warning("Error registering %r for %r" % (c.tag, c.codec))
+ return c
+
+
+class ASN1_Object(metaclass = ASN1_Object_metaclass):
+ tag = ASN1_Class_UNIVERSAL.ANY
+ def __init__(self, val):
+ self.val = val
+ def enc(self, codec):
+ return self.tag.get_codec(codec).enc(self.val)
+ def __repr__(self):
+ return "<%s[%r]>" % (self.__dict__.get("name", self.__class__.__name__), self.val)
+ def __str__(self):
+ raise Exception("Should not get here")
+ #return self.enc(conf.ASN1_default_codec)
+ def __bytes__(self):
+ return self.enc(conf.ASN1_default_codec)
+ def strshow(self, lvl=0):
+ return (" "*lvl)+repr(self)+"\n"
+ def show(self, lvl=0):
+ print(self.strshow(lvl))
+ def __eq__(self, other):
+ return self.val == other
+ def __hash__(self):
+ return self.val
+ def __cmp__(self, other):
+ return cmp(self.val, other)
+
+class ASN1_DECODING_ERROR(ASN1_Object):
+ tag = ASN1_Class_UNIVERSAL.ERROR
+ def __init__(self, val, exc=None):
+ ASN1_Object.__init__(self, val)
+ self.exc = exc
+ def __repr__(self):
+ return "<%s[%r]{{%s}}>" % (self.__dict__.get("name", self.__class__.__name__),
+ self.val, self.exc.args[0])
+ def enc(self, codec):
+ if isinstance(self.val, ASN1_Object):
+ return self.val.enc(codec)
+ return self.val
+
+class ASN1_force(ASN1_Object):
+ tag = ASN1_Class_UNIVERSAL.RAW
+ def enc(self, codec):
+ if isinstance(self.val, ASN1_Object):
+ return self.val.enc(codec)
+ return self.val
+
+class ASN1_BADTAG(ASN1_force):
+ pass
+
+class ASN1_INTEGER(ASN1_Object):
+ tag = ASN1_Class_UNIVERSAL.INTEGER
+
+class ASN1_STRING(ASN1_Object):
+ tag = ASN1_Class_UNIVERSAL.STRING
+ def __init__(self, val):
+ if type(val) is str:
+ self.val = val.encode('ascii')
+ elif type(val) is bytes:
+ self.val = val
+ else:
+ raise Exception("Unknown value type for ASN1_STRING")
+
+class ASN1_BIT_STRING(ASN1_STRING):
+ tag = ASN1_Class_UNIVERSAL.BIT_STRING
+
+class ASN1_PRINTABLE_STRING(ASN1_STRING):
+ tag = ASN1_Class_UNIVERSAL.PRINTABLE_STRING
+
+class ASN1_T61_STRING(ASN1_STRING):
+ tag = ASN1_Class_UNIVERSAL.T61_STRING
+
+class ASN1_IA5_STRING(ASN1_STRING):
+ tag = ASN1_Class_UNIVERSAL.IA5_STRING
+
+class ASN1_NUMERIC_STRING(ASN1_STRING):
+ tag = ASN1_Class_UNIVERSAL.NUMERIC_STRING
+
+class ASN1_VIDEOTEX_STRING(ASN1_STRING):
+ tag = ASN1_Class_UNIVERSAL.VIDEOTEX_STRING
+
+class ASN1_IPADDRESS(ASN1_STRING):
+ tag = ASN1_Class_UNIVERSAL.IPADDRESS
+
+class ASN1_UTC_TIME(ASN1_STRING):
+ tag = ASN1_Class_UNIVERSAL.UTC_TIME
+
+class ASN1_GENERALIZED_TIME(ASN1_STRING):
+ tag = ASN1_Class_UNIVERSAL.GENERALIZED_TIME
+
+class ASN1_TIME_TICKS(ASN1_INTEGER):
+ tag = ASN1_Class_UNIVERSAL.TIME_TICKS
+
+class ASN1_BOOLEAN(ASN1_INTEGER):
+ tag = ASN1_Class_UNIVERSAL.BOOLEAN
+
+class ASN1_ENUMERATED(ASN1_INTEGER):
+ tag = ASN1_Class_UNIVERSAL.ENUMERATED
+
+class ASN1_NULL(ASN1_INTEGER):
+ tag = ASN1_Class_UNIVERSAL.NULL
+
+class ASN1_SEP(ASN1_NULL):
+ tag = ASN1_Class_UNIVERSAL.SEP
+
+class ASN1_GAUGE32(ASN1_INTEGER):
+ tag = ASN1_Class_UNIVERSAL.GAUGE32
+
+class ASN1_COUNTER32(ASN1_INTEGER):
+ tag = ASN1_Class_UNIVERSAL.COUNTER32
+
+class ASN1_SEQUENCE(ASN1_Object):
+ tag = ASN1_Class_UNIVERSAL.SEQUENCE
+ def strshow(self, lvl=0):
+ s = (" "*lvl)+("# %s:" % self.__class__.__name__)+"\n"
+ for o in self.val:
+ s += o.strshow(lvl=lvl+1)
+ return s
+
+class ASN1_SET(ASN1_SEQUENCE):
+ tag = ASN1_Class_UNIVERSAL.SET
+
+class ASN1_OID(ASN1_Object):
+ tag = ASN1_Class_UNIVERSAL.OID
+ def __init__(self, val):
+ if type(val) is str:
+ val = val.encode('ascii')
+ val = conf.mib._oid(val)
+ ASN1_Object.__init__(self, val)
+ def __repr__(self):
+ return "<%s[%r]>" % (self.__dict__.get("name", self.__class__.__name__), conf.mib._oidname(self.val))
+ def __oidname__(self):
+ return '%s'%conf.mib._oidname(self.val)
+
+
+
+conf.ASN1_default_codec = ASN1_Codecs.BER
diff --git a/scripts/external_libs/scapy-2.3.1/python3/scapy/asn1/ber.py b/scripts/external_libs/scapy-2.3.1/python3/scapy/asn1/ber.py
new file mode 100644
index 00000000..48cb1c2d
--- /dev/null
+++ b/scripts/external_libs/scapy-2.3.1/python3/scapy/asn1/ber.py
@@ -0,0 +1,370 @@
+## This file is part of Scapy
+## See http://www.secdev.org/projects/scapy for more informations
+## Copyright (C) Philippe Biondi <phil@secdev.org>
+## This program is published under a GPLv2 license
+
+"""
+Basic Encoding Rules (BER) for ASN.1
+"""
+
+from scapy.error import warning
+from scapy.utils import inet_aton,inet_ntoa
+from scapy.asn1.asn1 import ASN1_Decoding_Error,ASN1_Encoding_Error,ASN1_BadTag_Decoding_Error,ASN1_Codecs,ASN1_Class_UNIVERSAL,ASN1_Error,ASN1_DECODING_ERROR,ASN1_BADTAG
+
+##################
+## BER encoding ##
+##################
+
+
+
+#####[ BER tools ]#####
+
+
+class BER_Exception(Exception):
+ pass
+
+class BER_Encoding_Error(ASN1_Encoding_Error):
+ def __init__(self, msg, encoded=None, remaining=None):
+ Exception.__init__(self, msg)
+ self.remaining = remaining
+ self.encoded = encoded
+ def __str__(self):
+ s = Exception.__str__(self)
+ if isinstance(self.encoded, BERcodec_Object):
+ s+="\n### Already encoded ###\n%s" % self.encoded.strshow()
+ else:
+ s+="\n### Already encoded ###\n%r" % self.encoded
+ s+="\n### Remaining ###\n%r" % self.remaining
+ return s
+
+class BER_Decoding_Error(ASN1_Decoding_Error):
+ def __init__(self, msg, decoded=None, remaining=None):
+ Exception.__init__(self, msg)
+ self.remaining = remaining
+ self.decoded = decoded
+ def __str__(self):
+ s = Exception.__str__(self)
+ if isinstance(self.decoded, BERcodec_Object):
+ s+="\n### Already decoded ###\n%s" % self.decoded.strshow()
+ else:
+ s+="\n### Already decoded ###\n%r" % self.decoded
+ s+="\n### Remaining ###\n%r" % self.remaining
+ return s
+
+class BER_BadTag_Decoding_Error(BER_Decoding_Error, ASN1_BadTag_Decoding_Error):
+ pass
+
+def BER_len_enc(l, size=0):
+ if l <= 127 and size==0:
+ return bytes([l])
+ s = b""
+ while l or size>0:
+ s = bytes([l&0xff])+s
+ l >>= 8
+ size -= 1
+ if len(s) > 127:
+ raise BER_Exception("BER_len_enc: Length too long (%i) to be encoded [%r]" % (len(s),s))
+ return bytes([len(s)|0x80])+s
+def BER_len_dec(s):
+ l = (s[0])
+ if not l & 0x80:
+ return l,s[1:]
+ l &= 0x7f
+ if len(s) <= l:
+ raise BER_Decoding_Error("BER_len_dec: Got %i bytes while expecting %i" % (len(s)-1, l),remaining=s)
+ ll = 0
+ for c in s[1:l+1]:
+ ll <<= 8
+ ll |= (c)
+ return ll,s[l+1:]
+
+def BER_num_enc(l, size=1):
+ x=[]
+ while l or size>0:
+ x.insert(0, l & 0x7f)
+ if len(x) > 1:
+ x[0] |= 0x80
+ l >>= 7
+ size -= 1
+ return bytes([(k) for k in x])
+def BER_num_dec(s):
+ x = 0
+ for i in range(len(s)):
+ c = (s[i])
+ x <<= 7
+ x |= c&0x7f
+ if not c&0x80:
+ break
+ if c&0x80:
+ raise BER_Decoding_Error("BER_num_dec: unfinished number description", remaining=s)
+ return x, s[i+1:]
+
+#####[ BER classes ]#####
+
+class BERcodec_metaclass(type):
+ def __new__(cls, name, bases, dct):
+ c = super(BERcodec_metaclass, cls).__new__(cls, name, bases, dct)
+ try:
+ c.tag.register(c.codec, c)
+ except:
+ warning("Error registering %r for %r" % (c.tag, c.codec))
+ return c
+
+
+class BERcodec_Object( metaclass = BERcodec_metaclass):
+ codec = ASN1_Codecs.BER
+ tag = ASN1_Class_UNIVERSAL.ANY
+
+ @classmethod
+ def asn1_object(cls, val):
+ return cls.tag.asn1_object(val)
+
+ @classmethod
+ def check_string(cls, s):
+ if not s:
+ raise BER_Decoding_Error("%s: Got empty object while expecting tag %r" %
+ (cls.__name__,cls.tag), remaining=s)
+ @classmethod
+ def check_type(cls, s):
+ cls.check_string(s)
+ if hash(cls.tag) != (s[0]):
+ raise BER_BadTag_Decoding_Error("%s: Got tag [%i/%#x] while expecting %r" %
+ (cls.__name__, (s[0]), (s[0]),cls.tag), remaining=s)
+ return s[1:]
+ @classmethod
+ def check_type_get_len(cls, s):
+ s2 = cls.check_type(s)
+ if not s2:
+ raise BER_Decoding_Error("%s: No bytes while expecting a length" %
+ cls.__name__, remaining=s)
+ return BER_len_dec(s2)
+ @classmethod
+ def check_type_check_len(cls, s):
+ l,s3 = cls.check_type_get_len(s)
+ if len(s3) < l:
+ raise BER_Decoding_Error("%s: Got %i bytes while expecting %i" %
+ (cls.__name__, len(s3), l), remaining=s)
+ return l,s3[:l],s3[l:]
+
+ @classmethod
+ def do_dec(cls, s, context=None, safe=False):
+ if context is None:
+ context = cls.tag.context
+ cls.check_string(s)
+ p = (s[0])
+ if p not in context:
+ t = s
+ if len(t) > 18:
+ t = t[:15]+"..."
+ raise BER_Decoding_Error("Unknown prefix [%02x] for [%r]" % (p,t), remaining=s)
+ codec = context[p].get_codec(ASN1_Codecs.BER)
+ return codec.dec(s,context,safe)
+
+ @classmethod
+ def dec(cls, s, context=None, safe=False):
+ if not safe:
+ return cls.do_dec(s, context, safe)
+ try:
+ return cls.do_dec(s, context, safe)
+ except BER_BadTag_Decoding_Error as e:
+ o,remain = BERcodec_Object.dec(e.remaining, context, safe)
+ return ASN1_BADTAG(o),remain
+ except BER_Decoding_Error as e:
+ return ASN1_DECODING_ERROR(s, exc=e),""
+ except ASN1_Error as e:
+ return ASN1_DECODING_ERROR(s, exc=e),""
+
+ @classmethod
+ def safedec(cls, s, context=None):
+ return cls.dec(s, context, safe=True)
+
+
+ @classmethod
+ def enc(cls, s):
+ if type(s) is str:
+ return BERcodec_STRING.enc(s)
+ #TODO3 wild guess
+ elif type(s) is bytes:
+ return BERcodec_STRING.enc(s)
+ else:
+ return BERcodec_INTEGER.enc(hash(s))
+
+
+
+ASN1_Codecs.BER.register_stem(BERcodec_Object)
+
+
+class BERcodec_INTEGER(BERcodec_Object):
+ tag = ASN1_Class_UNIVERSAL.INTEGER
+ @classmethod
+ def enc(cls, i):
+ s = []
+ while 1:
+ s.append(i&0xff)
+ if -127 <= i < 0:
+ break
+ if 128 <= i <= 255:
+ s.append(0)
+ i >>= 8
+ if not i:
+ break
+ #s = map(chr, s)
+ s = bytes(s) + BER_len_enc(len(s)) + bytes([hash(cls.tag)])
+ #s.reverse()
+ #return b"".join(s)
+ return s[::-1]
+ @classmethod
+ def do_dec(cls, s, context=None, safe=False):
+ l,s,t = cls.check_type_check_len(s)
+ x = 0
+ if s:
+ if (s[0])&0x80: # negative int
+ x = -1
+ for c in s:
+ x <<= 8
+ x |= (c)
+ return cls.asn1_object(x),t
+
+
+class BERcodec_BOOLEAN(BERcodec_INTEGER):
+ tag = ASN1_Class_UNIVERSAL.BOOLEAN
+
+class BERcodec_ENUMERATED(BERcodec_INTEGER):
+ tag = ASN1_Class_UNIVERSAL.ENUMERATED
+
+class BERcodec_NULL(BERcodec_INTEGER):
+ tag = ASN1_Class_UNIVERSAL.NULL
+ @classmethod
+ def enc(cls, i):
+ if i == 0:
+ return bytes([hash(cls.tag)])+b"\0"
+ else:
+ return BERcodec_INTEGER.enc(i)
+
+class BERcodec_SEP(BERcodec_NULL):
+ tag = ASN1_Class_UNIVERSAL.SEP
+
+class BERcodec_STRING(BERcodec_Object):
+ tag = ASN1_Class_UNIVERSAL.STRING
+ @classmethod
+ def enc(cls,s):
+ if type(s) is str:
+ s = s.encode('ascii')
+ return bytes([hash(cls.tag)])+BER_len_enc(len(s))+s
+ @classmethod
+ def do_dec(cls, s, context=None, safe=False):
+ l,s,t = cls.check_type_check_len(s)
+ return cls.tag.asn1_object(s),t
+
+class BERcodec_BIT_STRING(BERcodec_STRING):
+ tag = ASN1_Class_UNIVERSAL.BIT_STRING
+
+class BERcodec_PRINTABLE_STRING(BERcodec_STRING):
+ tag = ASN1_Class_UNIVERSAL.PRINTABLE_STRING
+
+class BERcodec_T61_STRING (BERcodec_STRING):
+ tag = ASN1_Class_UNIVERSAL.T61_STRING
+
+class BERcodec_IA5_STRING(BERcodec_STRING):
+ tag = ASN1_Class_UNIVERSAL.IA5_STRING
+
+class BERcodec_NUMERIC_STRING(BERcodec_STRING):
+ tag = ASN1_Class_UNIVERSAL.NUMERIC_STRING
+
+class BERcodec_VIDEOTEX_STRING(BERcodec_STRING):
+ tag = ASN1_Class_UNIVERSAL.VIDEOTEX_STRING
+
+class BERcodec_IPADDRESS(BERcodec_STRING):
+ tag = ASN1_Class_UNIVERSAL.IPADDRESS
+
+ @classmethod
+ def enc(cls, ipaddr_ascii):
+ try:
+ s = inet_aton(ipaddr_ascii)
+ except Exception:
+ raise BER_Encoding_Error("IPv4 address could not be encoded")
+ return bytes([hash(cls.tag)])+BER_len_enc(len(s))+s
+
+ @classmethod
+ def do_dec(cls, s, context=None, safe=False):
+ l,s,t = cls.check_type_check_len(s)
+ try:
+ ipaddr_ascii = inet_ntoa(s)
+ except Exception:
+ raise BER_Decoding_Error("IP address could not be decoded", decoded=obj)
+ return cls.asn1_object(ipaddr_ascii), t
+
+class BERcodec_UTC_TIME(BERcodec_STRING):
+ tag = ASN1_Class_UNIVERSAL.UTC_TIME
+
+class BERcodec_GENERALIZED_TIME(BERcodec_STRING):
+ tag = ASN1_Class_UNIVERSAL.GENERALIZED_TIME
+
+class BERcodec_TIME_TICKS(BERcodec_INTEGER):
+ tag = ASN1_Class_UNIVERSAL.TIME_TICKS
+
+class BERcodec_GAUGE32(BERcodec_INTEGER):
+ tag = ASN1_Class_UNIVERSAL.GAUGE32
+
+class BERcodec_COUNTER32(BERcodec_INTEGER):
+ tag = ASN1_Class_UNIVERSAL.COUNTER32
+
+class BERcodec_SEQUENCE(BERcodec_Object):
+ tag = ASN1_Class_UNIVERSAL.SEQUENCE
+ @classmethod
+ def enc(cls, l):
+ #if type(l) is not str:
+ if type(l) is not bytes:
+ l = b"".join(map(lambda x: x.enc(cls.codec), l))
+ return bytes([hash(cls.tag)])+BER_len_enc(len(l))+l
+ @classmethod
+ def do_dec(cls, s, context=None, safe=False):
+ if context is None:
+ context = cls.tag.context
+ l,st = cls.check_type_get_len(s) # we may have len(s) < l
+ s,t = st[:l],st[l:]
+ obj = []
+ while s:
+ try:
+ o,s = BERcodec_Object.dec(s, context, safe)
+ except BER_Decoding_Error as err:
+ err.remaining += t
+ if err.decoded is not None:
+ obj.append(err.decoded)
+ err.decoded = obj
+ raise
+ obj.append(o)
+ if len(st) < l:
+ raise BER_Decoding_Error("Not enough bytes to decode sequence", decoded=obj)
+ return cls.asn1_object(obj),t
+
+class BERcodec_SET(BERcodec_SEQUENCE):
+ tag = ASN1_Class_UNIVERSAL.SET
+
+
+class BERcodec_OID(BERcodec_Object):
+ tag = ASN1_Class_UNIVERSAL.OID
+
+ @classmethod
+ def enc(cls, oid):
+ if type(oid) is str:
+ oid = oid.encode('ascii')
+ lst = [int(x) for x in oid.strip(b".").split(b".")]
+ if len(lst) >= 2:
+ lst[1] += 40*lst[0]
+ del(lst[0])
+ s = b"".join([BER_num_enc(k) for k in lst])
+ return bytes([hash(cls.tag)])+BER_len_enc(len(s))+s
+ @classmethod
+ def do_dec(cls, s, context=None, safe=False):
+ l,s,t = cls.check_type_check_len(s)
+ lst = []
+ while s:
+ l,s = BER_num_dec(s)
+ lst.append(l)
+ if (len(lst) > 0):
+ lst.insert(0,lst[0]//40)
+ lst[1] %= 40
+ return cls.asn1_object(b".".join([str(k).encode('ascii') for k in lst])), t
+
+
diff --git a/scripts/external_libs/scapy-2.3.1/python3/scapy/asn1/mib.py b/scripts/external_libs/scapy-2.3.1/python3/scapy/asn1/mib.py
new file mode 100644
index 00000000..8f3df25c
--- /dev/null
+++ b/scripts/external_libs/scapy-2.3.1/python3/scapy/asn1/mib.py
@@ -0,0 +1,149 @@
+## This file is part of Scapy
+## See http://www.secdev.org/projects/scapy for more informations
+## Copyright (C) Philippe Biondi <phil@secdev.org>
+## This program is published under a GPLv2 license
+
+"""
+Management Information Base (MIB) parsing
+"""
+
+import re
+from glob import glob
+from scapy.dadict import DADict,fixname
+from scapy.config import conf
+from scapy.utils import do_graph
+
+#################
+## MIB parsing ##
+#################
+
+_mib_re_integer = re.compile(b"^[0-9]+$")
+_mib_re_both = re.compile(b"^([a-zA-Z_][a-zA-Z0-9_-]*)\(([0-9]+)\)$")
+_mib_re_oiddecl = re.compile(b"$\s*([a-zA-Z0-9_-]+)\s+OBJECT([^:\{\}]|\{[^:]+\})+::=\s*\{([^\}]+)\}",re.M)
+_mib_re_strings = re.compile(b'"[^"]*"')
+_mib_re_comments = re.compile(b'--.*(\r|\n)')
+
+class MIBDict(DADict):
+ def _findroot(self, x):
+ if x.startswith(b"."):
+ x = x[1:]
+ if not x.endswith(b"."):
+ x += b"."
+ max=0
+ root=b"."
+ for k in self.keys():
+ if x.startswith(self[k]+b"."):
+ if max < len(self[k]):
+ max = len(self[k])
+ root = k
+ return root, x[max:-1]
+ def _oidname(self, x):
+ root,remainder = self._findroot(x)
+ return root+remainder
+ def _oid(self, x):
+ if type(x) is str:
+ x = x.encode('ascii')
+ xl = x.strip(b".").split(b".")
+ p = len(xl)-1
+ while p >= 0 and _mib_re_integer.match(xl[p]):
+ p -= 1
+ if p != 0 or xl[p] not in self:
+ return x
+ xl[p] = self[xl[p]]
+ return b".".join(xl[p:])
+ def _make_graph(self, other_keys=[], **kargs):
+ nodes = [(k,self[k]) for k in self.keys()]
+ oids = [self[k] for k in self.keys()]
+ for k in other_keys:
+ if k not in oids:
+ nodes.append(self.oidname(k),k)
+ s = 'digraph "mib" {\n\trankdir=LR;\n\n'
+ for k,o in nodes:
+ s += '\t"%s" [ label="%s" ];\n' % (o,k)
+ s += "\n"
+ for k,o in nodes:
+ parent,remainder = self._findroot(o[:-1])
+ remainder = remainder[1:]+o[-1]
+ if parent != ".":
+ parent = self[parent]
+ s += '\t"%s" -> "%s" [label="%s"];\n' % (parent, o,remainder)
+ s += "}\n"
+ do_graph(s, **kargs)
+ def __len__(self):
+ return len(self.keys())
+
+
+def mib_register(ident, value, the_mib, unresolved):
+ if ident in the_mib or ident in unresolved:
+ return ident in the_mib
+ resval = []
+ not_resolved = 0
+ for v in value:
+ if _mib_re_integer.match(v):
+ resval.append(v)
+ else:
+ v = fixname(v)
+ if v not in the_mib:
+ not_resolved = 1
+ if v in the_mib:
+ v = the_mib[v]
+ elif v in unresolved:
+ v = unresolved[v]
+ if type(v) is list:
+ resval += v
+ else:
+ resval.append(v)
+ if not_resolved:
+ unresolved[ident] = resval
+ return False
+ else:
+ the_mib[ident] = resval
+ keys = unresolved.keys()
+ i = 0
+ while i < len(keys):
+ k = keys[i]
+ if mib_register(k,unresolved[k], the_mib, {}):
+ del(unresolved[k])
+ del(keys[i])
+ i = 0
+ else:
+ i += 1
+
+ return True
+
+
+def load_mib(filenames):
+ the_mib = {'iso': ['1']}
+ unresolved = {}
+ for k in conf.mib.keys():
+ mib_register(k, conf.mib[k].split("."), the_mib, unresolved)
+
+ if type(filenames) is str:
+ filenames = [filenames]
+ for fnames in filenames:
+ for fname in glob(fnames):
+ f = open(fname)
+ text = f.read()
+ cleantext = " ".join(_mib_re_strings.split(" ".join(_mib_re_comments.split(text))))
+ for m in _mib_re_oiddecl.finditer(cleantext):
+ gr = m.groups()
+ ident,oid = gr[0],gr[-1]
+ ident=fixname(ident)
+ oid = oid.split()
+ for i in range(len(oid)):
+ m = _mib_re_both.match(oid[i])
+ if m:
+ oid[i] = m.groups()[1]
+ mib_register(ident, oid, the_mib, unresolved)
+
+ newmib = MIBDict(_name="MIB")
+ for k,o in the_mib.items():
+ newmib[k]=".".join(o)
+ for k,o in unresolved.items():
+ newmib[k]=".".join(o)
+
+ conf.mib=newmib
+
+
+
+conf.mib = MIBDict(_name="MIB")
diff --git a/scripts/external_libs/scapy-2.3.1/python3/scapy/asn1fields.py b/scripts/external_libs/scapy-2.3.1/python3/scapy/asn1fields.py
new file mode 100644
index 00000000..e7165a83
--- /dev/null
+++ b/scripts/external_libs/scapy-2.3.1/python3/scapy/asn1fields.py
@@ -0,0 +1,341 @@
+## This file is part of Scapy
+## See http://www.secdev.org/projects/scapy for more informations
+## Copyright (C) Philippe Biondi <phil@secdev.org>
+## This program is published under a GPLv2 license
+
+"""
+Classes that implement ASN.1 data structures.
+"""
+
+import itertools
+from scapy.asn1.asn1 import *
+from scapy.asn1.ber import *
+from scapy.volatile import *
+from scapy.base_classes import BasePacket
+
+
+#####################
+#### ASN1 Fields ####
+#####################
+
+class ASN1F_badsequence(Exception):
+ pass
+
+class ASN1F_element:
+ pass
+
+class ASN1F_optionnal(ASN1F_element):
+ def __init__(self, field):
+ self._field=field
+ def __getattr__(self, attr):
+ return getattr(self._field,attr)
+ def dissect(self,pkt,s):
+ try:
+ return self._field.dissect(pkt,s)
+ except ASN1F_badsequence:
+ self._field.set_val(pkt,None)
+ return s
+ except BER_Decoding_Error:
+ self._field.set_val(pkt,None)
+ return s
+ def build(self, pkt):
+ if self._field.is_empty(pkt):
+ return b""
+ return self._field.build(pkt)
+
+class ASN1F_field(ASN1F_element):
+ holds_packets=0
+ islist=0
+
+ ASN1_tag = ASN1_Class_UNIVERSAL.ANY
+ context=ASN1_Class_UNIVERSAL
+
+ def __init__(self, name, default, context=None):
+ if context is not None:
+ self.context = context
+ self.name = name
+ self.default = default
+
+ def i2repr(self, pkt, x):
+ return repr(x)
+ def i2h(self, pkt, x):
+ return x
+ def any2i(self, pkt, x):
+ return x
+ def m2i(self, pkt, x):
+ return self.ASN1_tag.get_codec(pkt.ASN1_codec).safedec(x, context=self.context)
+ def i2m(self, pkt, x):
+ if x is None:
+ x = 0
+ if isinstance(x, ASN1_Object):
+ if ( self.ASN1_tag == ASN1_Class_UNIVERSAL.ANY
+ or x.tag == ASN1_Class_UNIVERSAL.RAW
+ or x.tag == ASN1_Class_UNIVERSAL.ERROR
+ or self.ASN1_tag == x.tag ):
+ return x.enc(pkt.ASN1_codec)
+ else:
+ raise ASN1_Error("Encoding Error: got %r instead of an %r for field [%s]" % (x, self.ASN1_tag, self.name))
+ return self.ASN1_tag.get_codec(pkt.ASN1_codec).enc(x)
+
+ def do_copy(self, x):
+ if hasattr(x, "copy"):
+ return x.copy()
+ if type(x) is list:
+ x = x[:]
+ for i in range(len(x)):
+ if isinstance(x[i], BasePacket):
+ x[i] = x[i].copy()
+ return x
+
+ def build(self, pkt):
+ return self.i2m(pkt, getattr(pkt, self.name))
+
+ def set_val(self, pkt, val):
+ setattr(pkt, self.name, val)
+ def is_empty(self, pkt):
+ return getattr(pkt,self.name) is None
+
+ def dissect(self, pkt, s):
+ v,s = self.m2i(pkt, s)
+ self.set_val(pkt, v)
+ return s
+
+ def get_fields_list(self):
+ return [self]
+
+ def __hash__(self):
+ return hash(self.name)
+ def __str__(self):
+ return self.name
+ def __eq__(self, other):
+ return self.name == other
+ def __repr__(self):
+ return self.name
+ def randval(self):
+ return RandInt()
+
+
+class ASN1F_INTEGER(ASN1F_field):
+ ASN1_tag= ASN1_Class_UNIVERSAL.INTEGER
+ def randval(self):
+ return RandNum(-2**64, 2**64-1)
+
+class ASN1F_BOOLEAN(ASN1F_field):
+ ASN1_tag= ASN1_Class_UNIVERSAL.BOOLEAN
+ def randval(self):
+ return RandChoice(True,False)
+
+class ASN1F_NULL(ASN1F_INTEGER):
+ ASN1_tag= ASN1_Class_UNIVERSAL.NULL
+
+class ASN1F_SEP(ASN1F_NULL):
+ ASN1_tag= ASN1_Class_UNIVERSAL.SEP
+
+class ASN1F_enum_INTEGER(ASN1F_INTEGER):
+ def __init__(self, name, default, enum):
+ ASN1F_INTEGER.__init__(self, name, default)
+ i2s = self.i2s = {}
+ s2i = self.s2i = {}
+ if type(enum) is list:
+ keys = range(len(enum))
+ else:
+ keys = enum.keys()
+ #if filter(lambda x: type(x) is str, keys):
+ if list(filter(lambda x: type(x) is str, keys)):
+ i2s,s2i = s2i,i2s
+ for k in keys:
+ i2s[k] = enum[k]
+ s2i[enum[k]] = k
+ def any2i_one(self, pkt, x):
+ if type(x) is str:
+ x = self.s2i[x]
+ return x
+ def i2repr_one(self, pkt, x):
+ return self.i2s.get(x, repr(x))
+
+ def any2i(self, pkt, x):
+ if type(x) is list:
+ return map(lambda z,pkt=pkt:self.any2i_one(pkt,z), x)
+ else:
+ return self.any2i_one(pkt,x)
+ def i2repr(self, pkt, x):
+ if type(x) is list:
+ return map(lambda z,pkt=pkt:self.i2repr_one(pkt,z), x)
+ else:
+ return self.i2repr_one(pkt,x)
+
+class ASN1F_ENUMERATED(ASN1F_enum_INTEGER):
+ ASN1_tag = ASN1_Class_UNIVERSAL.ENUMERATED
+
+class ASN1F_STRING(ASN1F_field):
+ ASN1_tag = ASN1_Class_UNIVERSAL.STRING
+ def randval(self):
+ return RandString(RandNum(0, 1000))
+
+class ASN1F_PRINTABLE_STRING(ASN1F_STRING):
+ ASN1_tag = ASN1_Class_UNIVERSAL.PRINTABLE_STRING
+
+class ASN1F_BIT_STRING(ASN1F_STRING):
+ ASN1_tag = ASN1_Class_UNIVERSAL.BIT_STRING
+
+class ASN1F_IPADDRESS(ASN1F_STRING):
+ ASN1_tag = ASN1_Class_UNIVERSAL.IPADDRESS
+
+class ASN1F_TIME_TICKS(ASN1F_INTEGER):
+ ASN1_tag = ASN1_Class_UNIVERSAL.TIME_TICKS
+
+class ASN1F_UTC_TIME(ASN1F_STRING):
+ ASN1_tag = ASN1_Class_UNIVERSAL.UTC_TIME
+
+class ASN1F_GENERALIZED_TIME(ASN1F_STRING):
+ ASN1_tag = ASN1_Class_UNIVERSAL.GENERALIZED_TIME
+
+class ASN1F_OID(ASN1F_field):
+ ASN1_tag = ASN1_Class_UNIVERSAL.OID
+ def randval(self):
+ return RandOID()
+
+class ASN1F_SEQUENCE(ASN1F_field):
+ ASN1_tag = ASN1_Class_UNIVERSAL.SEQUENCE
+ def __init__(self, *seq, **kargs):
+ if "ASN1_tag" in kargs:
+ self.ASN1_tag = kargs["ASN1_tag"]
+ self.seq = seq
+ def __repr__(self):
+ return "<%s%r>" % (self.__class__.__name__,self.seq,)
+ def set_val(self, pkt, val):
+ for f in self.seq:
+ f.set_val(pkt,val)
+ def is_empty(self, pkt):
+ for f in self.seq:
+ if not f.is_empty(pkt):
+ return False
+ return True
+ def get_fields_list(self):
+ #return reduce(lambda x,y: x+y.get_fields_list(), self.seq, [])
+ return list(itertools.chain(*[ i.get_fields_list() for i in self.seq ]))
+ def build(self, pkt):
+ #s = reduce(lambda x,y: x+y.build(pkt), self.seq, b"")
+ s = b""
+ for i in self.seq:
+ s += i.build(pkt)
+ return self.i2m(pkt, s)
+ def dissect(self, pkt, s):
+ codec = self.ASN1_tag.get_codec(pkt.ASN1_codec)
+ try:
+ i,s,remain = codec.check_type_check_len(s)
+ for obj in self.seq:
+ s = obj.dissect(pkt,s)
+ if s:
+ warning("Too many bytes to decode sequence: [%r]" % s) # XXX not reversible!
+ return remain
+ except ASN1_Error as e:
+ raise ASN1F_badsequence(e)
+
+class ASN1F_SET(ASN1F_SEQUENCE):
+ ASN1_tag = ASN1_Class_UNIVERSAL.SET
+
+class ASN1F_SEQUENCE_OF(ASN1F_SEQUENCE):
+ holds_packets = 1
+ islist = 1
+ def __init__(self, name, default, asn1pkt, ASN1_tag=0x30):
+ self.asn1pkt = asn1pkt
+ self.tag = bytes([ASN1_tag])
+ self.name = name
+ self.default = default
+ def i2repr(self, pkt, i):
+ if i is None:
+ return []
+ return i
+ def get_fields_list(self):
+ return [self]
+ def set_val(self, pkt, val):
+ ASN1F_field.set_val(self, pkt, val)
+ def is_empty(self, pkt):
+ return ASN1F_field.is_empty(self, pkt)
+ def build(self, pkt):
+ val = getattr(pkt, self.name)
+ if isinstance(val, ASN1_Object) and val.tag == ASN1_Class_UNIVERSAL.RAW:
+ s = val
+ elif val is None:
+ s = b""
+ else:
+ #print(val)
+ #s = b"".join(map(str, val ))
+ s = b"".join([ bytes(i) for i in val ])
+ return self.i2m(pkt, s)
+ def dissect(self, pkt, s):
+ codec = self.ASN1_tag.get_codec(pkt.ASN1_codec)
+ i,s1,remain = codec.check_type_check_len(s)
+ lst = []
+ while s1:
+ try:
+ p = self.asn1pkt(s1)
+ except ASN1F_badsequence as e:
+ lst.append(conf.raw_layer(s1))
+ break
+ lst.append(p)
+ if conf.raw_layer in p:
+ s1 = p[conf.raw_layer].load
+ del(p[conf.raw_layer].underlayer.payload)
+ else:
+ break
+ self.set_val(pkt, lst)
+ return remain
+ def randval(self):
+ return fuzz(self.asn1pkt())
+ def __repr__(self):
+ return "<%s %s>" % (self.__class__.__name__,self.name)
+
+class ASN1F_PACKET(ASN1F_field):
+ holds_packets = 1
+ def __init__(self, name, default, cls):
+ ASN1F_field.__init__(self, name, default)
+ self.cls = cls
+ def i2m(self, pkt, x):
+ if x is None:
+ x = b""
+ return bytes(x)
+ def extract_packet(self, cls, x):
+ try:
+ c = cls(x)
+ except ASN1F_badsequence:
+ c = conf.raw_layer(x)
+ cpad = c.getlayer(conf.padding_layer)
+ x = b""
+ if cpad is not None:
+ x = cpad.load
+ del(cpad.underlayer.payload)
+ return c,x
+ def m2i(self, pkt, x):
+ return self.extract_packet(self.cls, x)
+
+
+class ASN1F_CHOICE(ASN1F_PACKET):
+ ASN1_tag = ASN1_Class_UNIVERSAL.NONE
+ def __init__(self, name, default, *args):
+ self.name=name
+ self.choice = {}
+ for p in args:
+ self.choice[p.ASN1_root.ASN1_tag] = p
+# self.context=context
+ self.default=default
+ def m2i(self, pkt, x):
+ if len(x) == 0:
+ return conf.raw_layer(),b""
+ raise ASN1_Error("ASN1F_CHOICE: got empty string")
+ #if ord(x[0]) not in self.choice:
+ if (x[0]) not in self.choice:
+ return conf.raw_layer(x),b"" # XXX return RawASN1 packet ? Raise error
+ #raise ASN1_Error("Decoding Error: choice [%i] not found in %r" % (ord(x[0]), self.choice.keys()))
+ raise ASN1_Error("Decoding Error: choice [%i] not found in %r" % ((x[0]), self.choice.keys()))
+
+ #z = ASN1F_PACKET.extract_packet(self, self.choice[ord(x[0])], x)
+ z = ASN1F_PACKET.extract_packet(self, self.choice[(x[0])], x)
+ return z
+ def randval(self):
+ return RandChoice(*map(lambda x:fuzz(x()), self.choice.values()))
+
+
+# This import must come in last to avoid problems with cyclic dependencies
+import scapy.packet
diff --git a/scripts/external_libs/scapy-2.3.1/python3/scapy/asn1packet.py b/scripts/external_libs/scapy-2.3.1/python3/scapy/asn1packet.py
new file mode 100644
index 00000000..049e2b75
--- /dev/null
+++ b/scripts/external_libs/scapy-2.3.1/python3/scapy/asn1packet.py
@@ -0,0 +1,26 @@
+## This file is part of Scapy
+## See http://www.secdev.org/projects/scapy for more informations
+## Copyright (C) Philippe Biondi <phil@secdev.org>
+## This program is published under a GPLv2 license
+
+"""
+Packet holding data in Abstract Syntax Notation (ASN.1).
+"""
+
+from scapy.packet import *
+
+class ASN1_Packet(Packet):
+ ASN1_root = None
+ ASN1_codec = None
+ def init_fields(self):
+ flist = self.ASN1_root.get_fields_list()
+ self.do_init_fields(flist)
+ self.fields_desc = flist
+ def self_build(self):
+ if self.raw_packet_cache is not None:
+ return self.raw_packet_cache
+ return self.ASN1_root.build(self)
+ def do_dissect(self, x):
+ return self.ASN1_root.dissect(self, x)
+
+
diff --git a/scripts/external_libs/scapy-2.3.1/python3/scapy/automaton.py b/scripts/external_libs/scapy-2.3.1/python3/scapy/automaton.py
new file mode 100644
index 00000000..cd32b5c0
--- /dev/null
+++ b/scripts/external_libs/scapy-2.3.1/python3/scapy/automaton.py
@@ -0,0 +1,753 @@
+## This file is part of Scapy
+## See http://www.secdev.org/projects/scapy for more informations
+## Copyright (C) Philippe Biondi <phil@secdev.org>
+## This program is published under a GPLv2 license
+
+"""
+Automata with states, transitions and actions.
+"""
+
+from __future__ import with_statement
+import types,itertools,time,os,sys,socket,functools
+from select import select
+from collections import deque
+import _thread
+from .config import conf
+from .utils import do_graph
+from .error import log_interactive
+from .plist import PacketList
+from .data import MTU
+from .supersocket import SuperSocket
+
+class ObjectPipe:
+ def __init__(self):
+ self.rd,self.wr = os.pipe()
+ self.queue = deque()
+ def fileno(self):
+ return self.rd
+ def send(self, obj):
+ self.queue.append(obj)
+ os.write(self.wr,b"X")
+ def recv(self, n=0):
+ os.read(self.rd,1)
+ return self.queue.popleft()
+
+
+class Message:
+ def __init__(self, **args):
+ self.__dict__.update(args)
+ def __repr__(self):
+ return "<Message %s>" % " ".join("%s=%r"%(k,v)
+ for (k,v) in self.__dict__.items()
+ if not k.startswith("_"))
+
+# Currently does not seem necessary
+# class _meta_instance_state(type):
+# def __init__(cls, name, bases, dct):
+# def special_gen(special_method):
+# def special_wrapper(self):
+# print("Calling %s" % special_method)
+# return getattr(getattr(self, "im_func"), special_method)
+# return special_wrapper
+
+# type.__init__(cls, name, bases, dct)
+# for i in ["__int__", "__repr__", "__str__", "__index__", "__add__", "__radd__", "__bytes__"]:
+# setattr(cls, i, property(special_gen(i)))
+
+class _instance_state():
+ def __init__(self, instance):
+ self.im_self = instance.__self__
+ self.im_func = instance.__func__
+ self.im_class = instance.__self__.__class__
+ def __getattr__(self, attr):
+ return getattr(self.im_func, attr)
+
+ def __call__(self, *args, **kargs):
+ return self.im_func(self.im_self, *args, **kargs)
+ def breaks(self):
+ return self.im_self.add_breakpoints(self.im_func)
+ def intercepts(self):
+ return self.im_self.add_interception_points(self.im_func)
+ def unbreaks(self):
+ return self.im_self.remove_breakpoints(self.im_func)
+ def unintercepts(self):
+ return self.im_self.remove_interception_points(self.im_func)
+
+
+##############
+## Automata ##
+##############
+
+class ATMT:
+ STATE = "State"
+ ACTION = "Action"
+ CONDITION = "Condition"
+ RECV = "Receive condition"
+ TIMEOUT = "Timeout condition"
+ IOEVENT = "I/O event"
+
+ class NewStateRequested(Exception):
+ def __init__(self, state_func, automaton, *args, **kargs):
+ self.func = state_func
+ self.state = state_func.atmt_state
+ self.initial = state_func.atmt_initial
+ self.error = state_func.atmt_error
+ self.final = state_func.atmt_final
+ Exception.__init__(self, "Request state [%s]" % self.state)
+ self.automaton = automaton
+ self.args = args
+ self.kargs = kargs
+ self.action_parameters() # init action parameters
+ def action_parameters(self, *args, **kargs):
+ self.action_args = args
+ self.action_kargs = kargs
+ return self
+ def run(self):
+ return self.func(self.automaton, *self.args, **self.kargs)
+ def __repr__(self):
+ return "NewStateRequested(%s)" % self.state
+
+ @staticmethod
+ def state(initial=0,final=0,error=0):
+ def deco(f,initial=initial, final=final):
+ f.atmt_type = ATMT.STATE
+ f.atmt_state = f.__name__
+ f.atmt_initial = initial
+ f.atmt_final = final
+ f.atmt_error = error
+# @functools.wraps(f) This is possible alternative to assigning __qualname__; it would save __doc__, too
+ def state_wrapper(self, *args, **kargs):
+ return ATMT.NewStateRequested(f, self, *args, **kargs)
+
+ state_wrapper.__qualname__ = "%s_wrapper" % f.__name__
+ state_wrapper.atmt_type = ATMT.STATE
+ state_wrapper.atmt_state = f.__name__
+ state_wrapper.atmt_initial = initial
+ state_wrapper.atmt_final = final
+ state_wrapper.atmt_error = error
+ state_wrapper.atmt_origfunc = f
+ return state_wrapper
+ return deco
+ @staticmethod
+ def action(cond, prio=0):
+ def deco(f,cond=cond):
+ if not hasattr(f,"atmt_type"):
+ f.atmt_cond = {}
+ f.atmt_type = ATMT.ACTION
+ f.atmt_cond[cond.atmt_condname] = prio
+ return f
+ return deco
+ @staticmethod
+ def condition(state, prio=0):
+ def deco(f, state=state):
+ f.atmt_type = ATMT.CONDITION
+ f.atmt_state = state.atmt_state
+ f.atmt_condname = f.__name__
+ f.atmt_prio = prio
+ return f
+ return deco
+ @staticmethod
+ def receive_condition(state, prio=0):
+ def deco(f, state=state):
+ f.atmt_type = ATMT.RECV
+ f.atmt_state = state.atmt_state
+ f.atmt_condname = f.__name__
+ f.atmt_prio = prio
+ return f
+ return deco
+ @staticmethod
+ def ioevent(state, name, prio=0, as_supersocket=None):
+ def deco(f, state=state):
+ f.atmt_type = ATMT.IOEVENT
+ f.atmt_state = state.atmt_state
+ f.atmt_condname = f.__name__
+ f.atmt_ioname = name
+ f.atmt_prio = prio
+ f.atmt_as_supersocket = as_supersocket
+ return f
+ return deco
+ @staticmethod
+ def timeout(state, timeout):
+ def deco(f, state=state, timeout=timeout):
+ f.atmt_type = ATMT.TIMEOUT
+ f.atmt_state = state.atmt_state
+ f.atmt_timeout = timeout
+ f.atmt_condname = f.__name__
+ return f
+ return deco
+
+class _ATMT_Command:
+ RUN = "RUN"
+ NEXT = "NEXT"
+ FREEZE = "FREEZE"
+ STOP = "STOP"
+ END = "END"
+ EXCEPTION = "EXCEPTION"
+ SINGLESTEP = "SINGLESTEP"
+ BREAKPOINT = "BREAKPOINT"
+ INTERCEPT = "INTERCEPT"
+ ACCEPT = "ACCEPT"
+ REPLACE = "REPLACE"
+ REJECT = "REJECT"
+
+class _ATMT_supersocket(SuperSocket):
+ def __init__(self, name, ioevent, automaton, proto, args, kargs):
+ self.name = name
+ self.ioevent = ioevent
+ self.proto = proto
+ self.spa,self.spb = socket.socketpair(socket.AF_UNIX, socket.SOCK_DGRAM)
+ kargs["external_fd"] = {ioevent:self.spb}
+ self.atmt = automaton(*args, **kargs)
+ self.atmt.runbg()
+ def fileno(self):
+ return self.spa.fileno()
+ def send(self, s):
+ if type(s) is str:
+ s = s.encode()
+ elif type(s) is not bytes:
+ s = bytes(s)
+ return self.spa.send(s)
+ def recv(self, n=MTU):
+ r = self.spa.recv(n)
+ if self.proto is not None:
+ r = self.proto(r)
+ return r
+ def close(self):
+ pass
+
+class _ATMT_to_supersocket:
+ def __init__(self, name, ioevent, automaton):
+ self.name = name
+ self.ioevent = ioevent
+ self.automaton = automaton
+ def __call__(self, proto, *args, **kargs):
+ return _ATMT_supersocket(self.name, self.ioevent, self.automaton, proto, args, kargs)
+
+class Automaton_metaclass(type):
+ def __new__(cls, name, bases, dct):
+ cls = super(Automaton_metaclass, cls).__new__(cls, name, bases, dct)
+ cls.states={}
+ cls.state = None
+ cls.recv_conditions={}
+ cls.conditions={}
+ cls.ioevents={}
+ cls.timeout={}
+ cls.actions={}
+ cls.initial_states=[]
+ cls.ionames = []
+ cls.iosupersockets = []
+
+ members = {}
+ classes = [cls]
+ while classes:
+ c = classes.pop(0) # order is important to avoid breaking method overloading
+ classes += list(c.__bases__)
+ for k,v in c.__dict__.items():
+ if k not in members:
+ members[k] = v
+
+ decorated = [v for v in members.values()
+ if type(v) is types.FunctionType and hasattr(v, "atmt_type")]
+
+ for m in decorated:
+ if m.atmt_type == ATMT.STATE:
+ s = m.atmt_state
+ cls.states[s] = m
+ cls.recv_conditions[s]=[]
+ cls.ioevents[s]=[]
+ cls.conditions[s]=[]
+ cls.timeout[s]=[]
+ if m.atmt_initial:
+ cls.initial_states.append(m)
+ elif m.atmt_type in [ATMT.CONDITION, ATMT.RECV, ATMT.TIMEOUT, ATMT.IOEVENT]:
+ cls.actions[m.atmt_condname] = []
+
+ for m in decorated:
+ if m.atmt_type == ATMT.CONDITION:
+ cls.conditions[m.atmt_state].append(m)
+ elif m.atmt_type == ATMT.RECV:
+ cls.recv_conditions[m.atmt_state].append(m)
+ elif m.atmt_type == ATMT.IOEVENT:
+ cls.ioevents[m.atmt_state].append(m)
+ cls.ionames.append(m.atmt_ioname)
+ if m.atmt_as_supersocket is not None:
+ cls.iosupersockets.append(m)
+ elif m.atmt_type == ATMT.TIMEOUT:
+ cls.timeout[m.atmt_state].append((m.atmt_timeout, m))
+ elif m.atmt_type == ATMT.ACTION:
+ for c in m.atmt_cond:
+ cls.actions[c].append(m)
+
+
+ for v in cls.timeout.values():
+ #v.sort(lambda (t1,f1),(t2,f2): cmp(t1,t2))
+ v.sort(key = lambda x: x[0])
+ v.append((None, None))
+ for v in itertools.chain(cls.conditions.values(),
+ cls.recv_conditions.values(),
+ cls.ioevents.values()):
+ v.sort(key = lambda x: x.atmt_prio)
+ for condname,actlst in cls.actions.items():
+ actlst.sort(key = lambda x: x.atmt_cond[condname])
+
+ for ioev in cls.iosupersockets:
+ setattr(cls, ioev.atmt_as_supersocket, _ATMT_to_supersocket(ioev.atmt_as_supersocket, ioev.atmt_ioname, cls))
+
+ return cls
+
+ def graph(self, **kargs):
+ s = 'digraph "%s" {\n' % self.__class__.__name__
+
+ se = "" # Keep initial nodes at the begining for better rendering
+ for st in self.states.values():
+ if st.atmt_initial:
+ se = ('\t"%s" [ style=filled, fillcolor=blue, shape=box, root=true];\n' % st.atmt_state)+se
+ elif st.atmt_final:
+ se += '\t"%s" [ style=filled, fillcolor=green, shape=octagon ];\n' % st.atmt_state
+ elif st.atmt_error:
+ se += '\t"%s" [ style=filled, fillcolor=red, shape=octagon ];\n' % st.atmt_state
+ s += se
+
+ for st in self.states.values():
+ for n in st.atmt_origfunc.__code__.co_names+st.atmt_origfunc.__code__.co_consts:
+ if n in self.states:
+ s += '\t"%s" -> "%s" [ color=green ];\n' % (st.atmt_state,n)
+
+
+ for c,k,v in ([("purple",k,v) for k,v in self.conditions.items()]+
+ [("red",k,v) for k,v in self.recv_conditions.items()]+
+ [("orange",k,v) for k,v in self.ioevents.items()]):
+ for f in v:
+ for n in f.__code__.co_names+f.__code__.co_consts:
+ if n in self.states:
+ l = f.atmt_condname
+ for x in self.actions[f.atmt_condname]:
+ l += "\\l>[%s]" % x.__name__
+ s += '\t"%s" -> "%s" [label="%s", color=%s];\n' % (k,n,l,c)
+ for k,v in self.timeout.items():
+ for t,f in v:
+ if f is None:
+ continue
+ for n in f.__code__.co_names+f.__code__.co_consts:
+ if n in self.states:
+ l = "%s/%.1fs" % (f.atmt_condname,t)
+ for x in self.actions[f.atmt_condname]:
+ l += "\\l>[%s]" % x.__name__
+ s += '\t"%s" -> "%s" [label="%s",color=blue];\n' % (k,n,l)
+ s += "}\n"
+ return do_graph(s, **kargs)
+
+
+
+class Automaton(metaclass = Automaton_metaclass):
+
+ ## Methods to overload
+ def parse_args(self, debug=0, store=1, **kargs):
+ self.debug_level=debug
+ self.socket_kargs = kargs
+ self.store_packets = store
+
+ def master_filter(self, pkt):
+ return True
+
+ def my_send(self, pkt):
+ self.send_sock.send(pkt)
+
+
+ ## Utility classes and exceptions
+ class _IO_fdwrapper:
+ def __init__(self,rd,wr):
+ if rd is not None and type(rd) is not int:
+ rd = rd.fileno()
+ if wr is not None and type(wr) is not int:
+ wr = wr.fileno()
+ self.rd = rd
+ self.wr = wr
+ def fileno(self):
+ return self.rd
+ def read(self, n=65535):
+ return os.read(self.rd, n)
+ def write(self, msg):
+ return os.write(self.wr,msg)
+ def recv(self, n=65535):
+ return self.read(n)
+ def send(self, msg):
+ return self.write(msg)
+
+ class _IO_mixer:
+ def __init__(self,rd,wr):
+ self.rd = rd
+ self.wr = wr
+ def fileno(self):
+ if type(self.rd) is int:
+ return self.rd
+ return self.rd.fileno()
+ def recv(self, n=None):
+ return self.rd.recv(n)
+ def read(self, n=None):
+ return self.rd.recv(n)
+ def send(self, msg):
+ return self.wr.send(msg)
+ def write(self, msg):
+ return self.wr.send(msg)
+
+
+ class AutomatonException(Exception):
+ def __init__(self, msg, state=None, result=None):
+ Exception.__init__(self, msg)
+ self.state = state
+ self.result = result
+
+ class AutomatonError(AutomatonException):
+ pass
+ class ErrorState(AutomatonException):
+ pass
+ class Stuck(AutomatonException):
+ pass
+ class AutomatonStopped(AutomatonException):
+ pass
+
+ class Breakpoint(AutomatonStopped):
+ pass
+ class Singlestep(AutomatonStopped):
+ pass
+ class InterceptionPoint(AutomatonStopped):
+ def __init__(self, msg, state=None, result=None, packet=None):
+ Automaton.AutomatonStopped.__init__(self, msg, state=state, result=result)
+ self.packet = packet
+
+ class CommandMessage(AutomatonException):
+ pass
+
+
+ ## Services
+ def debug(self, lvl, msg):
+ if self.debug_level >= lvl:
+ log_interactive.debug(msg)
+
+ def send(self, pkt):
+ if self.state.state in self.interception_points:
+ self.debug(3,"INTERCEPT: packet intercepted: %s" % pkt.summary())
+ self.intercepted_packet = pkt
+ cmd = Message(type = _ATMT_Command.INTERCEPT, state=self.state, pkt=pkt)
+ self.cmdout.send(cmd)
+ cmd = self.cmdin.recv()
+ self.intercepted_packet = None
+ if cmd.type == _ATMT_Command.REJECT:
+ self.debug(3,"INTERCEPT: packet rejected")
+ return
+ elif cmd.type == _ATMT_Command.REPLACE:
+ pkt = cmd.pkt
+ self.debug(3,"INTERCEPT: packet replaced by: %s" % pkt.summary())
+ elif cmd.type == _ATMT_Command.ACCEPT:
+ self.debug(3,"INTERCEPT: packet accepted")
+ else:
+ raise self.AutomatonError("INTERCEPT: unkown verdict: %r" % cmd.type)
+ self.my_send(pkt)
+ self.debug(3,"SENT : %s" % pkt.summary())
+ self.packets.append(pkt.copy())
+
+
+ ## Internals
+ def __init__(self, *args, **kargs):
+ external_fd = kargs.pop("external_fd",{})
+ self.send_sock_class = kargs.pop("ll", conf.L3socket)
+ self.started = _thread.allocate_lock()
+ self.threadid = None
+ self.breakpointed = None
+ self.breakpoints = set()
+ self.interception_points = set()
+ self.intercepted_packet = None
+ self.debug_level=0
+ self.init_args=args
+ self.init_kargs=kargs
+ self.io = type.__new__(type, "IOnamespace",(),{})
+ self.oi = type.__new__(type, "IOnamespace",(),{})
+ self.cmdin = ObjectPipe()
+ self.cmdout = ObjectPipe()
+ self.ioin = {}
+ self.ioout = {}
+ for n in self.ionames:
+ extfd = external_fd.get(n)
+ if type(extfd) is not tuple:
+ extfd = (extfd,extfd)
+ ioin,ioout = extfd
+ if ioin is None:
+ ioin = ObjectPipe()
+ #elif type(ioin) is not types.InstanceType:
+ else:
+ #print(type(ioin))
+ ioin = self._IO_fdwrapper(ioin,None)
+ if ioout is None:
+ ioout = ObjectPipe()
+ #elif type(ioout) is not types.InstanceType:
+ else:
+ #print(type(ioout))
+ ioout = self._IO_fdwrapper(None,ioout)
+
+ self.ioin[n] = ioin
+ self.ioout[n] = ioout
+ ioin.ioname = n
+ ioout.ioname = n
+ setattr(self.io, n, self._IO_mixer(ioout,ioin))
+ setattr(self.oi, n, self._IO_mixer(ioin,ioout))
+
+ for stname in self.states:
+ setattr(self, stname,
+ _instance_state(getattr(self, stname)))
+
+ self.parse_args(*args, **kargs)
+
+ self.start()
+
+ def __iter__(self):
+ return self
+
+ def __del__(self):
+ self.stop()
+
+ def _run_condition(self, cond, *args, **kargs):
+ try:
+ self.debug(5, "Trying %s [%s]" % (cond.atmt_type, cond.atmt_condname))
+ cond(self,*args, **kargs)
+ except ATMT.NewStateRequested as state_req:
+ self.debug(2, "%s [%s] taken to state [%s]" % (cond.atmt_type, cond.atmt_condname, state_req.state))
+ if cond.atmt_type == ATMT.RECV:
+ self.packets.append(args[0])
+ for action in self.actions[cond.atmt_condname]:
+ self.debug(2, " + Running action [%s]" % action.__name__)
+ action(self, *state_req.action_args, **state_req.action_kargs)
+ raise
+ except Exception as e:
+ self.debug(2, "%s [%s] raised exception [%s]" % (cond.atmt_type, cond.atmt_condname, e))
+ raise
+ else:
+ self.debug(2, "%s [%s] not taken" % (cond.atmt_type, cond.atmt_condname))
+
+ def _do_start(self, *args, **kargs):
+
+ _thread.start_new_thread(self._do_control, args, kargs)
+
+
+ def _do_control(self, *args, **kargs):
+ with self.started:
+ self.threadid = _thread.get_ident()
+
+ # Update default parameters
+ a = args+self.init_args[len(args):]
+ k = self.init_kargs.copy()
+ k.update(kargs)
+ self.parse_args(*a,**k)
+
+ # Start the automaton
+ self.state=self.initial_states[0](self)
+ self.send_sock = self.send_sock_class()
+ self.listen_sock = conf.L2listen(**self.socket_kargs)
+ self.packets = PacketList(name="session[%s]"%self.__class__.__name__)
+
+ singlestep = True
+ iterator = self._do_iter()
+ self.debug(3, "Starting control thread [tid=%i]" % self.threadid)
+ try:
+ while True:
+ c = self.cmdin.recv()
+ self.debug(5, "Received command %s" % c.type)
+ if c.type == _ATMT_Command.RUN:
+ singlestep = False
+ elif c.type == _ATMT_Command.NEXT:
+ singlestep = True
+ elif c.type == _ATMT_Command.FREEZE:
+ continue
+ elif c.type == _ATMT_Command.STOP:
+ break
+ while True:
+ state = next(iterator)
+ if isinstance(state, self.CommandMessage):
+ break
+ elif isinstance(state, self.Breakpoint):
+ c = Message(type=_ATMT_Command.BREAKPOINT,state=state)
+ self.cmdout.send(c)
+ break
+ if singlestep:
+ c = Message(type=_ATMT_Command.SINGLESTEP,state=state)
+ self.cmdout.send(c)
+ break
+ except StopIteration as e:
+ c = Message(type=_ATMT_Command.END, result=e.args[0])
+ self.cmdout.send(c)
+ except Exception as e:
+ self.debug(3, "Transfering exception [%s] from tid=%i"% (e,self.threadid))
+ m = Message(type = _ATMT_Command.EXCEPTION, exception=e, exc_info=sys.exc_info())
+ self.cmdout.send(m)
+ self.debug(3, "Stopping control thread (tid=%i)"%self.threadid)
+ self.threadid = None
+
+ def _do_iter(self):
+ while True:
+ try:
+ self.debug(1, "## state=[%s]" % self.state.state)
+
+ # Entering a new state. First, call new state function
+ if self.state.state in self.breakpoints and self.state.state != self.breakpointed:
+ self.breakpointed = self.state.state
+ yield self.Breakpoint("breakpoint triggered on state %s" % self.state.state,
+ state = self.state.state)
+ self.breakpointed = None
+ state_output = self.state.run()
+ if self.state.error:
+ raise self.ErrorState("Reached %s: [%r]" % (self.state.state, state_output),
+ result=state_output, state=self.state.state)
+ if self.state.final:
+ raise StopIteration(state_output)
+
+ if state_output is None:
+ state_output = ()
+ elif type(state_output) is not list:
+ state_output = state_output,
+
+ # Then check immediate conditions
+ for cond in self.conditions[self.state.state]:
+ self._run_condition(cond, *state_output)
+
+ # If still there and no conditions left, we are stuck!
+ if ( len(self.recv_conditions[self.state.state]) == 0 and
+ len(self.ioevents[self.state.state]) == 0 and
+ len(self.timeout[self.state.state]) == 1 ):
+ raise self.Stuck("stuck in [%s]" % self.state.state,
+ state=self.state.state, result=state_output)
+
+ # Finally listen and pay attention to timeouts
+ expirations = iter(self.timeout[self.state.state])
+ next_timeout,timeout_func = next(expirations)
+ t0 = time.time()
+
+ fds = [self.cmdin]
+ if len(self.recv_conditions[self.state.state]) > 0:
+ fds.append(self.listen_sock)
+ for ioev in self.ioevents[self.state.state]:
+ fds.append(self.ioin[ioev.atmt_ioname])
+ while 1:
+ t = time.time()-t0
+ if next_timeout is not None:
+ if next_timeout <= t:
+ self._run_condition(timeout_func, *state_output)
+ next_timeout,timeout_func = next(expirations)
+ if next_timeout is None:
+ remain = None
+ else:
+ remain = next_timeout-t
+
+ self.debug(5, "Select on %r" % fds)
+ r,_,_ = select(fds,[],[],remain)
+ self.debug(5, "Selected %r" % r)
+ for fd in r:
+ self.debug(5, "Looking at %r" % fd)
+ if fd == self.cmdin:
+ yield self.CommandMessage("Received command message")
+ elif fd == self.listen_sock:
+ pkt = self.listen_sock.recv(MTU)
+ if pkt is not None:
+ if self.master_filter(pkt):
+ self.debug(3, "RECVD: %s" % pkt.summary())
+ for rcvcond in self.recv_conditions[self.state.state]:
+ self._run_condition(rcvcond, pkt, *state_output)
+ else:
+ self.debug(4, "FILTR: %s" % pkt.summary())
+ else:
+ self.debug(3, "IOEVENT on %s" % fd.ioname)
+ for ioevt in self.ioevents[self.state.state]:
+ if ioevt.atmt_ioname == fd.ioname:
+ self._run_condition(ioevt, fd, *state_output)
+
+ except ATMT.NewStateRequested as state_req:
+ self.debug(2, "switching from [%s] to [%s]" % (self.state.state,state_req.state))
+ self.state = state_req
+ yield state_req
+
+ ## Public API
+ def add_interception_points(self, *ipts):
+ for ipt in ipts:
+ if hasattr(ipt,"atmt_state"):
+ ipt = ipt.atmt_state
+ self.interception_points.add(ipt)
+
+ def remove_interception_points(self, *ipts):
+ for ipt in ipts:
+ if hasattr(ipt,"atmt_state"):
+ ipt = ipt.atmt_state
+ self.interception_points.discard(ipt)
+
+ def add_breakpoints(self, *bps):
+ for bp in bps:
+ if hasattr(bp,"atmt_state"):
+ bp = bp.atmt_state
+ self.breakpoints.add(bp)
+
+ def remove_breakpoints(self, *bps):
+ for bp in bps:
+ if hasattr(bp,"atmt_state"):
+ bp = bp.atmt_state
+ self.breakpoints.discard(bp)
+
+ def start(self, *args, **kargs):
+ if not self.started.locked():
+ self._do_start(*args, **kargs)
+
+ def run(self, resume=None, wait=True):
+ if resume is None:
+ resume = Message(type = _ATMT_Command.RUN)
+ self.cmdin.send(resume)
+ if wait:
+ try:
+ c = self.cmdout.recv()
+ except KeyboardInterrupt:
+ self.cmdin.send(Message(type = _ATMT_Command.FREEZE))
+ return
+ if c.type == _ATMT_Command.END:
+ return c.result
+ elif c.type == _ATMT_Command.INTERCEPT:
+ raise self.InterceptionPoint("packet intercepted", state=c.state.state, packet=c.pkt)
+ elif c.type == _ATMT_Command.SINGLESTEP:
+ raise self.Singlestep("singlestep state=[%s]"%c.state.state, state=c.state.state)
+ elif c.type == _ATMT_Command.BREAKPOINT:
+ raise self.Breakpoint("breakpoint triggered on state [%s]"%c.state.state, state=c.state.state)
+ elif c.type == _ATMT_Command.EXCEPTION:
+ raise c.exc_info[0](c.exc_info[1])
+ #raise c.exc_info[0],c.exc_info[1],c.exc_info[2]
+
+ def runbg(self, resume=None, wait=False):
+ self.run(resume, wait)
+
+ def next(self):
+ return self.run(resume = Message(type=_ATMT_Command.NEXT))
+
+ def stop(self):
+ self.cmdin.send(Message(type=_ATMT_Command.STOP))
+ with self.started:
+ # Flush command pipes
+ while True:
+ r,_,_ = select([self.cmdin, self.cmdout],[],[],0)
+ if not r:
+ break
+ for fd in r:
+ fd.recv()
+
+ def restart(self, *args, **kargs):
+ self.stop()
+ self.start(*args, **kargs)
+
+ def accept_packet(self, pkt=None, wait=False):
+ rsm = Message()
+ if pkt is None:
+ rsm.type = _ATMT_Command.ACCEPT
+ else:
+ rsm.type = _ATMT_Command.REPLACE
+ rsm.pkt = pkt
+ return self.run(resume=rsm, wait=wait)
+
+ def reject_packet(self, wait=False):
+ rsm = Message(type = _ATMT_Command.REJECT)
+ return self.run(resume=rsm, wait=wait)
+
+
+
diff --git a/scripts/external_libs/scapy-2.3.1/python3/scapy/autorun.py b/scripts/external_libs/scapy-2.3.1/python3/scapy/autorun.py
new file mode 100644
index 00000000..063d93dc
--- /dev/null
+++ b/scripts/external_libs/scapy-2.3.1/python3/scapy/autorun.py
@@ -0,0 +1,142 @@
+## This file is part of Scapy
+## See http://www.secdev.org/projects/scapy for more informations
+## Copyright (C) Philippe Biondi <phil@secdev.org>
+## This program is published under a GPLv2 license
+
+"""
+Run commands when the Scapy interpreter starts.
+"""
+
+import code,sys
+from .config import conf
+from .themes import *
+from .error import Scapy_Exception
+from .utils import tex_escape
+
+
+#########################
+##### Autorun stuff #####
+#########################
+
+class StopAutorun(Scapy_Exception):
+ code_run = ""
+
+class ScapyAutorunInterpreter(code.InteractiveInterpreter):
+ def __init__(self, *args, **kargs):
+ code.InteractiveInterpreter.__init__(self, *args, **kargs)
+ self.error = 0
+ def showsyntaxerror(self, *args, **kargs):
+ self.error = 1
+ return code.InteractiveInterpreter.showsyntaxerror(self, *args, **kargs)
+ def showtraceback(self, *args, **kargs):
+ self.error = 1
+ exc_type, exc_value, exc_tb = sys.exc_info()
+ if isinstance(exc_value, StopAutorun):
+ raise exc_value
+ return code.InteractiveInterpreter.showtraceback(self, *args, **kargs)
+
+
+def autorun_commands(cmds,my_globals=None,verb=0):
+ sv = conf.verb
+ import builtins
+ try:
+ try:
+ if my_globals is None:
+ my_globals = __import__("scapy.all").all.__dict__
+ conf.verb = verb
+ interp = ScapyAutorunInterpreter(my_globals)
+ cmd = ""
+ cmds = cmds.splitlines()
+ cmds.append("") # ensure we finish multiline commands
+ cmds.reverse()
+ builtins.__dict__["_"] = None
+ while 1:
+ if cmd:
+ sys.stderr.write(sys.__dict__.get("ps2","... "))
+ else:
+ sys.stderr.write(str(sys.__dict__.get("ps1",ColorPrompt())))
+
+ l = cmds.pop()
+ print(l)
+ cmd += "\n"+l
+ if interp.runsource(cmd):
+ continue
+ if interp.error:
+ return 0
+ cmd = ""
+ if len(cmds) <= 1:
+ break
+ except SystemExit:
+ pass
+ finally:
+ conf.verb = sv
+ return _
+
+def autorun_get_interactive_session(cmds, **kargs):
+ class StringWriter:
+ def __init__(self):
+ self.s = ""
+ def write(self, x):
+ self.s += x
+
+ sw = StringWriter()
+ sstdout,sstderr = sys.stdout,sys.stderr
+ try:
+ try:
+ sys.stdout = sys.stderr = sw
+ res = autorun_commands(cmds, **kargs)
+ except StopAutorun as e:
+ e.code_run = sw.s
+ raise
+ finally:
+ sys.stdout,sys.stderr = sstdout,sstderr
+ return sw.s,res
+
+def autorun_get_text_interactive_session(cmds, **kargs):
+ ct = conf.color_theme
+ try:
+ conf.color_theme = NoTheme()
+ s,res = autorun_get_interactive_session(cmds, **kargs)
+ finally:
+ conf.color_theme = ct
+ return s,res
+
+def autorun_get_ansi_interactive_session(cmds, **kargs):
+ ct = conf.color_theme
+ try:
+ conf.color_theme = DefaultTheme()
+ s,res = autorun_get_interactive_session(cmds, **kargs)
+ finally:
+ conf.color_theme = ct
+ return s,res
+
+def autorun_get_html_interactive_session(cmds, **kargs):
+ ct = conf.color_theme
+ to_html = lambda s: s.replace("<","&lt;").replace(">","&gt;").replace("#[#","<").replace("#]#",">")
+ try:
+ try:
+ conf.color_theme = HTMLTheme2()
+ s,res = autorun_get_interactive_session(cmds, **kargs)
+ except StopAutorun as e:
+ e.code_run = to_html(e.code_run)
+ raise
+ finally:
+ conf.color_theme = ct
+
+ return to_html(s),res
+
+def autorun_get_latex_interactive_session(cmds, **kargs):
+ ct = conf.color_theme
+ to_latex = lambda s: tex_escape(s).replace("@[@","{").replace("@]@","}").replace("@`@","\\")
+ try:
+ try:
+ conf.color_theme = LatexTheme2()
+ s,res = autorun_get_interactive_session(cmds, **kargs)
+ except StopAutorun as e:
+ e.code_run = to_latex(e.code_run)
+ raise
+ finally:
+ conf.color_theme = ct
+ return to_latex(s),res
+
+
diff --git a/scripts/external_libs/scapy-2.3.1/python3/scapy/base_classes.py b/scripts/external_libs/scapy-2.3.1/python3/scapy/base_classes.py
new file mode 100644
index 00000000..bace90d6
--- /dev/null
+++ b/scripts/external_libs/scapy-2.3.1/python3/scapy/base_classes.py
@@ -0,0 +1,237 @@
+## This file is part of Scapy
+## See http://www.secdev.org/projects/scapy for more informations
+## Copyright (C) Philippe Biondi <phil@secdev.org>
+## This program is published under a GPLv2 license
+
+"""
+Generators and packet meta classes.
+"""
+
+###############
+## Generators ##
+################
+
+import re,random,socket
+from types import GeneratorType
+import scapy.config
+from . import error
+
+class Gen(object):
+ def __iter__(self):
+ return iter([])
+
+class SetGen(Gen):
+ def __init__(self, col, _iterpacket=1):
+ self._iterpacket=_iterpacket
+ if type(col) is list or isinstance(col, GeneratorType):
+ self.col = col
+ elif isinstance(col, BasePacketList):
+ self.col = list(col)
+ else:
+ self.col = [col]
+ # DEPRECATED
+ # def transf(self, element):
+ # return element
+ def __iter__(self):
+ for i in self.col:
+ if (type(i) is tuple) and (len(i) == 2) and type(i[0]) is int and type(i[1]) is int:
+ if (i[0] <= i[1]):
+ j=i[0]
+ while j <= i[1]:
+ yield j
+ j += 1
+ elif isinstance(i, Gen) and (self._iterpacket or not isinstance(i,BasePacket)):
+ for j in i:
+ yield j
+ else:
+ yield i
+ def __repr__(self):
+ return "<SetGen %s>" % self.col.__repr__()
+
+class Net(Gen):
+ """Generate a list of IPs from a network address or a name"""
+ name = "ip"
+ ipaddress = re.compile(r"^(\*|[0-2]?[0-9]?[0-9](-[0-2]?[0-9]?[0-9])?)\.(\*|[0-2]?[0-9]?[0-9](-[0-2]?[0-9]?[0-9])?)\.(\*|[0-2]?[0-9]?[0-9](-[0-2]?[0-9]?[0-9])?)\.(\*|[0-2]?[0-9]?[0-9](-[0-2]?[0-9]?[0-9])?)(/[0-3]?[0-9])?$")
+
+ @staticmethod
+ def _parse_digit(a,netmask):
+ netmask = min(8,max(netmask,0))
+ if a == "*":
+ a = (0,256)
+ elif a.find("-") >= 0:
+ x,y = map(int,a.split("-"))
+ if x > y:
+ y = x
+ a = (x & (0xff<<netmask) , max(y, (x | (0xff>>(8-netmask))))+1)
+ else:
+ a = (int(a) & (0xff<<netmask),(int(a) | (0xff>>(8-netmask)))+1)
+ return a
+
+ @classmethod
+ def _parse_net(cls, net):
+ tmp=net.split('/')+["32"]
+ if not cls.ipaddress.match(net):
+ tmp[0]=socket.gethostbyname(tmp[0])
+ netmask = int(tmp[1])
+ #return map(lambda x,y: cls._parse_digit(x,y), tmp[0].split("."), map(lambda x,nm=netmask: x-nm, (8,16,24,32))),netmask
+ return list(map(lambda x,y: cls._parse_digit(x,y), tmp[0].split("."), [ i - netmask for i in (8,16,24,32)] )),netmask
+
+ def __init__(self, net):
+ self.repr=net
+ self.parsed,self.netmask = self._parse_net(net)
+
+ def __iter__(self):
+ for d in range(*self.parsed[3]):
+ for c in range(*self.parsed[2]):
+ for b in range(*self.parsed[1]):
+ for a in range(*self.parsed[0]):
+ yield "%i.%i.%i.%i" % (a,b,c,d)
+ def choice(self):
+ ip = []
+ for v in self.parsed:
+ ip.append(str(random.randint(v[0],v[1]-1)))
+ return ".".join(ip)
+
+ def __repr__(self):
+ return "Net(%r)" % self.repr
+ def __eq__(self, other):
+ if hasattr(other, "parsed"):
+ p2 = other.parsed
+ else:
+ p2,nm2 = self._parse_net(other)
+ return self.parsed == p2
+ def __contains__(self, other):
+ if hasattr(other, "parsed"):
+ p2 = other.parsed
+ else:
+ p2,nm2 = self._parse_net(other)
+ for (a1,b1),(a2,b2) in zip(self.parsed,p2):
+ if a1 > a2 or b1 < b2:
+ return False
+ return True
+ def __rcontains__(self, other):
+ return self in self.__class__(other)
+
+
+class OID(Gen):
+ name = "OID"
+ def __init__(self, oid):
+ self.oid = oid
+ self.cmpt = []
+ fmt = []
+ for i in oid.split("."):
+ if "-" in i:
+ fmt.append("%i")
+ self.cmpt.append(tuple(map(int, i.split("-"))))
+ else:
+ fmt.append(i)
+ self.fmt = ".".join(fmt)
+ def __repr__(self):
+ return "OID(%r)" % self.oid
+ def __iter__(self):
+ ii = [k[0] for k in self.cmpt]
+ while 1:
+ yield self.fmt % tuple(ii)
+ i = 0
+ while 1:
+ if i >= len(ii):
+ raise StopIteration
+ if ii[i] < self.cmpt[i][1]:
+ ii[i]+=1
+ break
+ else:
+ ii[i] = self.cmpt[i][0]
+ i += 1
+
+
+
+######################################
+## Packet abstract and base classes ##
+######################################
+
+class Packet_metaclass(type):
+ def __new__(cls, name, bases, dct):
+ if "fields_desc" in dct: # perform resolution of references to other packets
+ current_fld = dct["fields_desc"]
+ resolved_fld = []
+ for f in current_fld:
+ if isinstance(f, Packet_metaclass): # reference to another fields_desc
+ for f2 in f.fields_desc:
+ resolved_fld.append(f2)
+ else:
+ resolved_fld.append(f)
+ else: # look for a field_desc in parent classes
+ resolved_fld = None
+ for b in bases:
+ if hasattr(b,"fields_desc"):
+ resolved_fld = b.fields_desc
+ break
+
+ if resolved_fld: # perform default value replacements
+ final_fld = []
+ for f in resolved_fld:
+ if f.name in dct:
+ f = f.copy()
+ f.default = dct[f.name]
+ del(dct[f.name])
+ final_fld.append(f)
+
+ dct["fields_desc"] = final_fld
+
+ newcls = super(Packet_metaclass, cls).__new__(cls, name, bases, dct)
+ if hasattr(newcls,"register_variant"):
+ newcls.register_variant()
+ for f in newcls.fields_desc:
+ f.register_owner(newcls)
+ scapy.config.conf.layers.register(newcls)
+ return newcls
+
+ def __getattr__(self, attr):
+ for k in self.fields_desc:
+ if k.name == attr:
+ return k
+ raise AttributeError(attr)
+
+ def __call__(cls, *args, **kargs):
+ if "dispatch_hook" in cls.__dict__:
+ cls = cls.dispatch_hook(*args, **kargs)
+ i = cls.__new__(cls, cls.__name__, cls.__bases__, cls.__dict__)
+ i.__init__(*args, **kargs)
+ return i
+
+
+class NewDefaultValues(Packet_metaclass):
+ """NewDefaultValues is deprecated (not needed anymore)
+
+ remove this:
+ __metaclass__ = NewDefaultValues
+ and it should still work.
+ """
+ def __new__(cls, name, bases, dct):
+ from error import log_loading
+ import traceback
+ try:
+ for tb in traceback.extract_stack()+[("??",-1,None,"")]:
+ f,l,_,line = tb
+ if line.startswith("class"):
+ break
+ except:
+ f,l="??",-1
+ raise
+ log_loading.warning("Deprecated (no more needed) use of NewDefaultValues (%s l. %i)." % (f,l))
+
+ return super(NewDefaultValues, cls).__new__(cls, name, bases, dct)
+
+class BasePacket(Gen):
+ pass
+
+
+#############################
+## Packet list base classe ##
+#############################
+
+class BasePacketList:
+ pass
+
+
+
diff --git a/scripts/external_libs/scapy-2.3.1/python3/scapy/config.py b/scripts/external_libs/scapy-2.3.1/python3/scapy/config.py
new file mode 100644
index 00000000..88c324f8
--- /dev/null
+++ b/scripts/external_libs/scapy-2.3.1/python3/scapy/config.py
@@ -0,0 +1,394 @@
+## This file is part of Scapy
+## See http://www.secdev.org/projects/scapy for more informations
+## Copyright (C) Philippe Biondi <phil@secdev.org>
+## This program is published under a GPLv2 license
+
+"""
+Implementation for of the configuration object.
+"""
+
+import os,time,socket,sys
+from .data import *
+import scapy.base_classes
+import scapy.themes
+from .error import log_scapy
+
+############
+## Config ##
+############
+
+class ConfClass(object):
+ def configure(self, cnf):
+ self.__dict__ = cnf.__dict__.copy()
+ def __repr__(self):
+ return str(self)
+ def __str__(self):
+ s=""
+ keys = self.__class__.__dict__.copy()
+ keys.update(self.__dict__)
+ keys = list(keys.keys())
+ keys.sort()
+ for i in keys:
+ if i[0] != "_":
+ r = repr(getattr(self, i))
+ r = " ".join(r.split())
+ wlen = 76-max(len(i),10)
+ if len(r) > wlen:
+ r = r[:wlen-3]+"..."
+ s += "%-10s = %s\n" % (i, r)
+ return s[:-1]
+
+class Interceptor(object):
+ def __init__(self, name, default, hook, args=None, kargs=None):
+ self.name = name
+ self.intname = "_intercepted_%s" % name
+ self.default=default
+ self.hook = hook
+ self.args = args if args is not None else []
+ self.kargs = kargs if kargs is not None else {}
+ def __get__(self, obj, typ=None):
+ if not hasattr(obj, self.intname):
+ setattr(obj, self.intname, self.default)
+ return getattr(obj, self.intname)
+ def __set__(self, obj, val):
+ setattr(obj, self.intname, val)
+ self.hook(self.name, val, *self.args, **self.kargs)
+
+
+class ProgPath(ConfClass):
+ pdfreader = "acroread"
+ psreader = "gv"
+ dot = "dot"
+ display = "display"
+ tcpdump = "tcpdump"
+ tcpreplay = "tcpreplay"
+ hexedit = "hexedit"
+ wireshark = "wireshark"
+
+
+class ConfigFieldList:
+ def __init__(self):
+ self.fields = set()
+ self.layers = set()
+ @staticmethod
+ def _is_field(f):
+ return hasattr(f, "owners")
+ def _recalc_layer_list(self):
+ self.layers = set([owner for f in self.fields for owner in f.owners])
+ def add(self, *flds):
+ self.fields |= set([f for f in flds if self._is_field(f)])
+ self._recalc_layer_list()
+ def remove(self, *flds):
+ self.fields -= set(flds)
+ self._recalc_layer_list()
+ def __contains__(self, elt):
+ if isinstance(elt, scapy.base_classes.Packet_metaclass):
+ return elt in self.layers
+ return elt in self.fields
+ def __repr__(self):
+ return "<%s [%s]>" % (self.__class__.__name__," ".join(str(x) for x in self.fields))
+
+class Emphasize(ConfigFieldList):
+ pass
+
+class Resolve(ConfigFieldList):
+ pass
+
+
+class Num2Layer:
+ def __init__(self):
+ self.num2layer = {}
+ self.layer2num = {}
+
+ def register(self, num, layer):
+ self.register_num2layer(num, layer)
+ self.register_layer2num(num, layer)
+
+ def register_num2layer(self, num, layer):
+ self.num2layer[num] = layer
+ def register_layer2num(self, num, layer):
+ self.layer2num[layer] = num
+
+ def __getitem__(self, item):
+ if isinstance(item, scapy.base_classes.Packet_metaclass):
+ return self.layer2num[item]
+ return self.num2layer[item]
+ def __contains__(self, item):
+ if isinstance(item, scapy.base_classes.Packet_metaclass):
+ return item in self.layer2num
+ return item in self.num2layer
+ def get(self, item, default=None):
+ if item in self:
+ return self[item]
+ return default
+
+ def __repr__(self):
+ lst = []
+ for num,layer in self.num2layer.items():
+ if layer in self.layer2num and self.layer2num[layer] == num:
+ dir = "<->"
+ else:
+ dir = " ->"
+ lst.append((num,"%#6x %s %-20s (%s)" % (num,dir,layer.__name__,layer.name)))
+ for layer,num in self.layer2num.items():
+ if num not in self.num2layer or self.num2layer[num] != layer:
+ lst.append((num,"%#6x <- %-20s (%s)" % (num,layer.__name__,layer.name)))
+ lst.sort()
+ return "\n".join(y for x,y in lst)
+
+
+class LayersList(list):
+ def __repr__(self):
+ s=[]
+ for l in self:
+ s.append("%-20s: %s" % (l.__name__,l.name))
+ return "\n".join(s)
+ def register(self, layer):
+ self.append(layer)
+
+class CommandsList(list):
+ def __repr__(self):
+ s=[]
+ for l in sorted(self,key=lambda x:x.__name__):
+ if l.__doc__:
+ doc = l.__doc__.split("\n")[0]
+ else:
+ doc = "--"
+ s.append("%-20s: %s" % (l.__name__,doc))
+ return "\n".join(s)
+ def register(self, cmd):
+ self.append(cmd)
+ return cmd # return cmd so that method can be used as a decorator
+
+def lsc():
+ print(repr(conf.commands))
+
+class CacheInstance(dict):
+ def __init__(self, name="noname", timeout=None):
+ self.timeout = timeout
+ self.name = name
+ self._timetable = {}
+ def __getitem__(self, item):
+ val = dict.__getitem__(self,item)
+ if self.timeout is not None:
+ t = self._timetable[item]
+ if time.time()-t > self.timeout:
+ raise KeyError(item)
+ return val
+ def get(self, item, default=None):
+ # overloading this method is needed to force the dict to go through
+ # the timetable check
+ try:
+ return self[item]
+ except KeyError:
+ return default
+ def __setitem__(self, item, v):
+ try:
+ self._timetable[item] = time.time()
+ except AttributeError:
+ pass
+ dict.__setitem__(self, item,v)
+ def update(self, other):
+ dict.update(self, other)
+ self._timetable.update(other._timetable)
+ def items(self):
+ if self.timeout is None:
+ return dict.items(self)
+ t0=time.time()
+ return ((k,v) for (k,v) in dict.items(self) if t0-self._timetable[k] < self.timeout)
+ def keys(self):
+ if self.timeout is None:
+ return dict.keys(self)
+ t0=time.time()
+ return (k for k in dict.keys(self) if t0-self._timetable[k] < self.timeout)
+ def __iter__(self):
+ return self.keys()
+ def values(self):
+ if self.timeout is None:
+ return dict.values(self)
+ t0=time.time()
+ return (v for (k,v) in dict.items(self) if t0-self._timetable[k] < self.timeout)
+ def items(self):
+ if self.timeout is None:
+ return dict.items(self)
+ t0=time.time()
+ return [(k,v) for (k,v) in dict.items(self) if t0-self._timetable[k] < self.timeout]
+ def keys(self):
+ if self.timeout is None:
+ return dict.keys(self)
+ t0=time.time()
+ return [k for k in dict.keys(self) if t0-self._timetable[k] < self.timeout]
+ def values(self):
+ if self.timeout is None:
+ return dict.values(self)
+ t0=time.time()
+ return [v for (k,v) in dict.items(self) if t0-self._timetable[k] < self.timeout]
+ def __len__(self):
+ if self.timeout is None:
+ return dict.__len__(self)
+ return len(self.keys())
+ def summary(self):
+ return "%s: %i valid items. Timeout=%rs" % (self.name, len(self), self.timeout)
+ def __repr__(self):
+ s = []
+ if self:
+ mk = max(len(k) for k in self.keys())
+ fmt = "%%-%is %%s" % (mk+1)
+ for item in self.items():
+ s.append(fmt % item)
+ return "\n".join(s)
+
+
+
+
+class NetCache:
+ def __init__(self):
+ self._caches_list = []
+
+
+ def add_cache(self, cache):
+ self._caches_list.append(cache)
+ setattr(self,cache.name,cache)
+ def new_cache(self, name, timeout=None):
+ c = CacheInstance(name=name, timeout=timeout)
+ self.add_cache(c)
+ def __delattr__(self, attr):
+ raise AttributeError("Cannot delete attributes")
+ def update(self, other):
+ for co in other._caches_list:
+ if hasattr(self, co.name):
+ getattr(self,co.name).update(co)
+ else:
+ self.add_cache(co.copy())
+ def flush(self):
+ for c in self._caches_list:
+ c.flush()
+ def __repr__(self):
+ return "\n".join(c.summary() for c in self._caches_list)
+
+
+class LogLevel(object):
+ def __get__(self, obj, otype):
+ return obj._logLevel
+ def __set__(self,obj,val):
+ log_scapy.setLevel(val)
+ obj._logLevel = val
+
+
+
+def _prompt_changer(attr,val):
+ prompt = conf.prompt
+ try:
+ ct = val
+ if isinstance(ct, AnsiColorTheme) and ct.prompt(""):
+ ## ^A and ^B delimit invisible caracters for readline to count right.
+ ## And we need ct.prompt() to do change something or else ^A and ^B will be
+ ## displayed
+ prompt = "\001%s\002" % ct.prompt("\002"+prompt+"\001")
+ else:
+ prompt = ct.prompt(prompt)
+ except:
+ pass
+ sys.ps1 = prompt
+
+
+class Conf(ConfClass):
+ """This object contains the configuration of scapy.
+session : filename where the session will be saved
+interactive_shell : If set to "ipython", use IPython as shell. Default: IPython.
+ipython_embedded : If True use embedded ipython shell, standard ipython shell otherwise.
+stealth : if 1, prevents any unwanted packet to go out (ARP, DNS, ...)
+checkIPID: if 0, doesn't check that IPID matches between IP sent and ICMP IP citation received
+ if 1, checks that they either are equal or byte swapped equals (bug in some IP stacks)
+ if 2, strictly checks that they are equals
+checkIPsrc: if 1, checks IP src in IP and ICMP IP citation match (bug in some NAT stacks)
+check_TCPerror_seqack: if 1, also check that TCP seq and ack match the ones in ICMP citation
+iff : selects the default output interface for srp() and sendp(). default:"eth0")
+verb : level of verbosity, from 0 (almost mute) to 3 (verbose)
+promisc : default mode for listening socket (to get answers if you spoof on a lan)
+sniff_promisc : default mode for sniff()
+filter : bpf filter added to every sniffing socket to exclude traffic from analysis
+histfile : history file
+padding : includes padding in desassembled packets
+except_filter : BPF filter for packets to ignore
+debug_match : when 1, store received packet that are not matched into debug.recv
+route : holds the Scapy routing table and provides methods to manipulate it
+warning_threshold : how much time between warnings from the same place
+ASN1_default_codec: Codec used by default for ASN1 objects
+mib : holds MIB direct access dictionnary
+resolve : holds list of fields for which resolution should be done
+noenum : holds list of enum fields for which conversion to string should NOT be done
+AS_resolver: choose the AS resolver class to use
+extensions_paths: path or list of paths where extensions are to be looked for
+"""
+ version = "3.0.0"
+ session = ""
+ interactive = False
+ interactive_shell = "ipython"
+ ipython_embedded = True
+ stealth = "not implemented"
+ iface = None
+ readfunc = None
+ layers = LayersList()
+ commands = CommandsList()
+ logLevel = LogLevel()
+ checkIPID = 0
+ checkIPsrc = 1
+ checkIPaddr = 1
+ check_TCPerror_seqack = 0
+ verb = 2
+ prompt = ">>> "
+ promisc = 1
+ sniff_promisc = 1
+ raw_layer = None
+ raw_summary = False
+ default_l2 = None
+ l2types = Num2Layer()
+ l3types = Num2Layer()
+ L3socket = None
+ L2socket = None
+ L2listen = None
+ histfile = os.path.join(os.path.expanduser("~"), ".scapy_history")
+ padding = 1
+ except_filter = ""
+ debug_match = 0
+ wepkey = ""
+ route = None # Filled by route.py
+ route6 = None # Filled by route6.py
+ auto_fragment = 1
+ debug_dissector = 0
+ color_theme = Interceptor("color_theme", scapy.themes.NoTheme(), _prompt_changer)
+ warning_threshold = 5
+ prog = ProgPath()
+ resolve = Resolve()
+ noenum = Resolve()
+ emph = Emphasize()
+ use_dnet = False
+ use_winpcapy = False
+ use_netifaces = False
+ ipv6_enabled = socket.has_ipv6
+ ethertypes = ETHER_TYPES
+ protocols = IP_PROTOS
+ services_tcp = TCP_SERVICES
+ services_udp = UDP_SERVICES
+ extensions_paths = "."
+ manufdb = MANUFDB
+ stats_classic_protocols = []
+ stats_dot11_protocols = []
+ temp_files = []
+ netcache = NetCache()
+ load_layers = [ "l2", "inet", "dhcp", "dns", "dot11", "gprs", "hsrp", "inet6", "ir", "isakmp", "l2tp",
+ "mgcp", "mobileip", "netbios", "netflow", "ntp", "ppp", "radius", "rip", "rtp",
+ "sebek", "skinny", "smb", "snmp", "tftp", "x509", "bluetooth", "dhcp6", "llmnr", "sctp", "vrrp",
+ "ipsec" ]
+
+if not Conf.ipv6_enabled:
+ log_scapy.warning("IPv6 support disabled in Python. Cannot load scapy IPv6 layers.")
+ for m in ["inet6","dhcp6"]:
+ if m in Conf.load_layers:
+ Conf.load_layers.remove(m)
+
+
+conf=Conf()
+conf.logLevel=30 # 30=Warning
+
diff --git a/scripts/external_libs/scapy-2.3.1/python3/scapy/contrib/__init__.py b/scripts/external_libs/scapy-2.3.1/python3/scapy/contrib/__init__.py
new file mode 100644
index 00000000..99654377
--- /dev/null
+++ b/scripts/external_libs/scapy-2.3.1/python3/scapy/contrib/__init__.py
@@ -0,0 +1,8 @@
+## This file is part of Scapy
+## See http://www.secdev.org/projects/scapy for more informations
+## Copyright (C) Philippe Biondi <phil@secdev.org>
+## This program is published under a GPLv2 license
+
+"""
+Package of contrib modules that have to be loaded explicitly.
+"""
diff --git a/scripts/external_libs/scapy-2.3.1/python3/scapy/contrib/avs.py b/scripts/external_libs/scapy-2.3.1/python3/scapy/contrib/avs.py
new file mode 100644
index 00000000..461b94b8
--- /dev/null
+++ b/scripts/external_libs/scapy-2.3.1/python3/scapy/contrib/avs.py
@@ -0,0 +1,57 @@
+#! /usr/bin/env python
+
+# http://trac.secdev.org/scapy/ticket/82
+
+# scapy.contrib.description = AVS WLAN Monitor Header
+# scapy.contrib.status = loads
+
+from scapy.packet import *
+from scapy.fields import *
+from scapy.layers.dot11 import *
+
+AVSWLANPhyType = { 0 : "Unknown",
+ 1 : "FHSS 802.11 '97",
+ 2 : "DSSS 802.11 '97",
+ 3 : "IR Baseband",
+ 4 : "DSSS 802.11b",
+ 5 : "PBCC 802.11b",
+ 6 : "OFDM 802.11g",
+ 7 : "PBCC 802.11g",
+ 8 : "OFDM 802.11a" }
+
+AVSWLANEncodingType = { 0 : "Unknown",
+ 1 : "CCK",
+ 2 : "PBCC",
+ 3 : "OFDM"}
+
+AVSWLANSSIType = { 0 : "None",
+ 1 : "Normalized RSSI",
+ 2 : "dBm",
+ 3 : "Raw RSSI"}
+
+AVSWLANPreambleType = { 0 : "Unknown",
+ 1 : "Short",
+ 2 : "Long" }
+
+
+class AVSWLANHeader(Packet):
+ """ iwpriv eth1 set_prismhdr 1 """
+ name = "AVS WLAN Monitor Header"
+ fields_desc = [ IntField("version",1),
+ IntField("len",64),
+ LongField("mactime",0),
+ LongField("hosttime",0),
+ IntEnumField("phytype",0, AVSWLANPhyType),
+ IntField("channel",0),
+ IntField("datarate",0),
+ IntField("antenna",0),
+ IntField("priority",0),
+ IntEnumField("ssi_type",0, AVSWLANSSIType),
+ SignedIntField("ssi_signal",0),
+ SignedIntField("ssi_noise",0),
+ IntEnumField("preamble",0, AVSWLANPreambleType),
+ IntEnumField("encoding",0, AVSWLANEncodingType),
+ ]
+
+bind_layers(AVSWLANHeader, Dot11)
+
diff --git a/scripts/external_libs/scapy-2.3.1/python3/scapy/contrib/bgp.py b/scripts/external_libs/scapy-2.3.1/python3/scapy/contrib/bgp.py
new file mode 100644
index 00000000..525dac5f
--- /dev/null
+++ b/scripts/external_libs/scapy-2.3.1/python3/scapy/contrib/bgp.py
@@ -0,0 +1,168 @@
+#! /usr/bin/env python
+
+# http://trac.secdev.org/scapy/ticket/162
+
+# scapy.contrib.description = BGP
+# scapy.contrib.status = loads
+
+from scapy.packet import *
+from scapy.fields import *
+from scapy.layers.inet import TCP
+
+
+class BGPIPField(Field):
+ """Represents how bgp dose an ip prefix in (length, prefix)"""
+ def mask2iplen(self,mask):
+ """turn the mask into the length in bytes of the ip field"""
+ return (mask + 7) // 8
+ def h2i(self, pkt, h):
+ """human x.x.x.x/y to internal"""
+ ip,mask = re.split( '/', h)
+ return int(mask), ip
+ def i2h( self, pkt, i):
+ mask, ip = i
+ return ip + '/' + str( mask )
+ def i2repr( self, pkt, i):
+ """make it look nice"""
+ return self.i2h(pkt,i)
+ def i2len(self, pkt, i):
+ """rely on integer division"""
+ mask, ip = i
+ return self.mask2iplen(mask) + 1
+ def i2m(self, pkt, i):
+ """internal (ip as bytes, mask as int) to machine"""
+ mask, ip = i
+ ip = inet_aton( ip )
+ return struct.pack(">B",mask) + ip[:self.mask2iplen(mask)]
+ def addfield(self, pkt, s, val):
+ return s+self.i2m(pkt, val)
+ def getfield(self, pkt, s):
+ l = self.mask2iplen( struct.unpack(">B",s[0])[0] ) + 1
+ return s[l:], self.m2i(pkt,s[:l])
+ def m2i(self,pkt,m):
+ mask = struct.unpack(">B",m[0])[0]
+ ip = "".join( [ m[i + 1] if i < self.mask2iplen(mask) else '\x00' for i in range(4)] )
+ return (mask,inet_ntoa(ip))
+
+class BGPHeader(Packet):
+ """The first part of any BGP packet"""
+ name = "BGP header"
+ fields_desc = [
+ XBitField("marker",0xffffffffffffffffffffffffffffffff, 0x80 ),
+ ShortField("len", None),
+ ByteEnumField("type", 4, {0:"none", 1:"open",2:"update",3:"notification",4:"keep_alive"}),
+ ]
+ def post_build(self, p, pay):
+ if self.len is None and pay:
+ l = len(p) + len(pay)
+ p = p[:16]+struct.pack("!H", l)+p[18:]
+ return p+pay
+
+class BGPOptionalParameter(Packet):
+ """Format of optional Parameter for BGP Open"""
+ name = "BGP Optional Parameters"
+ fields_desc = [
+ ByteField("type", 2),
+ ByteField("len", None),
+ StrLenField("value", "", length_from = lambda x: x.len),
+ ]
+ def post_build(self,p,pay):
+ if self.len is None:
+ l = len(p) - 2 # 2 is length without value
+ p = p[:1]+struct.pack("!B", l)+p[2:]
+ return p+pay
+ def extract_padding(self, p):
+ """any thing after this packet is extracted is padding"""
+ return "",p
+
+class BGPOpen(Packet):
+ """ Opens a new BGP session"""
+ name = "BGP Open Header"
+ fields_desc = [
+ ByteField("version", 4),
+ ShortField("AS", 0),
+ ShortField("hold_time", 0),
+ IPField("bgp_id","0.0.0.0"),
+ ByteField("opt_parm_len", None),
+ PacketListField("opt_parm",[], BGPOptionalParameter, length_from=lambda p:p.opt_parm_len),
+ ]
+ def post_build(self, p, pay):
+ if self.opt_parm_len is None:
+ l = len(p) - 10 # 10 is regular length with no additional options
+ p = p[:9] + struct.pack("!B",l) +p[10:]
+ return p+pay
+
+class BGPAuthenticationData(Packet):
+ name = "BGP Authentication Data"
+ fields_desc = [
+ ByteField("AuthenticationCode", 0),
+ ByteField("FormMeaning", 0),
+ FieldLenField("Algorithm", 0),
+ ]
+
+class BGPPathAttribute(Packet):
+ "the attribute of total path"
+ name = "BGP Attribute fields"
+ fields_desc = [
+ FlagsField("flags", 0x40, 8, ["NA0","NA1","NA2","NA3","Extended-Length","Partial","Transitive","Optional"]), #Extened leght may not work
+ ByteEnumField("type", 1, {1:"ORIGIN", 2:"AS_PATH", 3:"NEXT_HOP", 4:"MULTI_EXIT_DISC", 5:"LOCAL_PREF", 6:"ATOMIC_AGGREGATE", 7:"AGGREGATOR"}),
+ ByteField("attr_len", None),
+ StrLenField("value", "", length_from = lambda p: p.attr_len),
+ ]
+ def post_build(self, p, pay):
+ if self.attr_len is None:
+ l = len(p) - 3 # 3 is regular length with no additional options
+ p = p[:2] + struct.pack("!B",l) +p[3:]
+ return p+pay
+ def extract_padding(self, p):
+ """any thing after this packet is extracted is padding"""
+ return "",p
+
+class BGPUpdate(Packet):
+ """Update the routes WithdrawnRoutes = UnfeasiableRoutes"""
+ name = "BGP Update fields"
+ fields_desc = [
+ ShortField("withdrawn_len", None),
+ FieldListField("withdrawn",[], BGPIPField("","0.0.0.0/0"), length_from=lambda p:p.withdrawn_len),
+ ShortField("tp_len", None),
+ PacketListField("total_path", [], BGPPathAttribute, length_from = lambda p: p.tp_len),
+ FieldListField("nlri",[], BGPIPField("","0.0.0.0/0"), length_from=lambda p:p.underlayer.len - 23 - p.tp_len - p.withdrawn_len), # len should be BGPHeader.len
+ ]
+ def post_build(self,p,pay):
+ wl = self.withdrawn_len
+ subpacklen = lambda p: len ( str( p ))
+ subfieldlen = lambda p: BGPIPField("", "0.0.0.0/0").i2len(self, p )
+ if wl is None:
+ wl = sum ( map ( subfieldlen , self.withdrawn))
+ p = p[:0]+struct.pack("!H", wl)+p[2:]
+ if self.tp_len is None:
+ l = sum ( map ( subpacklen , self.total_path))
+ p = p[:2+wl]+struct.pack("!H", l)+p[4+wl:]
+ return p+pay
+
+class BGPNotification(Packet):
+ name = "BGP Notification fields"
+ fields_desc = [
+ ByteEnumField("ErrorCode",0,{1:"Message Header Error",2:"OPEN Message Error",3:"UPDATE Messsage Error",4:"Hold Timer Expired",5:"Finite State Machine",6:"Cease"}),
+ ByteEnumField("ErrorSubCode",0,{1:"MessageHeader",2:"OPENMessage",3:"UPDATEMessage"}),
+ LongField("Data", 0),
+ ]
+
+class BGPErrorSubcodes(Packet):
+ name = "BGP Error Subcodes"
+ Fields_desc = [
+ ByteEnumField("MessageHeader",0,{1:"Connection Not Synchronized",2:"Bad Message Length",3:"Bad Messsage Type"}),
+ ByteEnumField("OPENMessage",0,{1:"Unsupported Version Number",2:"Bad Peer AS",3:"Bad BGP Identifier",4:"Unsupported Optional Parameter",5:"Authentication Failure",6:"Unacceptable Hold Time"}),
+ ByteEnumField("UPDATEMessage",0,{1:"Malformed Attribute List",2:"Unrecognized Well-Known Attribute",3:"Missing Well-Known Attribute",4:"Attribute Flags Error",5:"Attribute Length Error",6:"Invalid ORIGIN Attribute",7:"AS Routing Loop",8:"Invalid NEXT_HOP Attribute",9:"Optional Attribute Error",10:"Invalid Network Field",11:"Malformed AS_PATH"}),
+ ]
+
+bind_layers( TCP, BGPHeader, dport=179)
+bind_layers( TCP, BGPHeader, sport=179)
+bind_layers( BGPHeader, BGPOpen, type=1)
+bind_layers( BGPHeader, BGPUpdate, type=2)
+bind_layers( BGPHeader, BGPHeader, type=4)
+
+
+if __name__ == "__main__":
+ interact(mydict=globals(), mybanner="BGP addon .05")
+
diff --git a/scripts/external_libs/scapy-2.3.1/python3/scapy/contrib/carp.py b/scripts/external_libs/scapy-2.3.1/python3/scapy/contrib/carp.py
new file mode 100644
index 00000000..e785adef
--- /dev/null
+++ b/scripts/external_libs/scapy-2.3.1/python3/scapy/contrib/carp.py
@@ -0,0 +1,65 @@
+
+# scapy.contrib.description = CARP
+# scapy.contrib.status = loads
+
+from scapy.packet import *
+from scapy.layers.inet import IP
+from scapy.fields import BitField, ByteField, XShortField, IntField, XIntField
+from scapy.utils import checksum
+import struct, hmac, hashlib
+
+class CARP(Packet):
+ name = "CARP"
+ fields_desc = [ BitField("version", 4, 4),
+ BitField("type", 4, 4),
+ ByteField("vhid", 1),
+ ByteField("advskew", 0),
+ ByteField("authlen", 0),
+ ByteField("demotion", 0),
+ ByteField("advbase", 0),
+ XShortField("chksum", 0),
+ XIntField("counter1", 0),
+ XIntField("counter2", 0),
+ XIntField("hmac1", 0),
+ XIntField("hmac2", 0),
+ XIntField("hmac3", 0),
+ XIntField("hmac4", 0),
+ XIntField("hmac5", 0)
+ ]
+
+ def post_build(self, pkt, pay):
+ if self.chksum == None:
+ pkt = pkt[:6] + struct.pack("!H", checksum(pkt)) + pkt[8:]
+
+ return pkt
+
+def build_hmac_sha1(pkt, pw = '\0' * 20, ip4l = [], ip6l = []):
+ if not pkt.haslayer(CARP):
+ return None
+
+ p = pkt[CARP]
+ h = hmac.new(pw, digestmod = hashlib.sha1)
+ # XXX: this is a dirty hack. it needs to pack version and type into a single 8bit field
+ h.update('\x21')
+ # XXX: mac addy if different from special link layer. comes before vhid
+ h.update(struct.pack('!B', p.vhid))
+
+ sl = []
+ for i in ip4l:
+ # sort ips from smallest to largest
+ sl.append(inet_aton(i))
+ sl.sort()
+
+ for i in sl:
+ h.update(i)
+
+ # XXX: do ip6l sorting
+
+ return h.digest()
+
+"""
+XXX: Usually CARP is multicast to 224.0.0.18 but because of virtual setup, it'll
+be unicast between nodes. Uncomment the following line for normal use
+bind_layers(IP, CARP, proto=112, dst='224.0.0.18')
+"""
+bind_layers(IP, CARP, proto=112)
diff --git a/scripts/external_libs/scapy-2.3.1/python3/scapy/contrib/cdp.py b/scripts/external_libs/scapy-2.3.1/python3/scapy/contrib/cdp.py
new file mode 100644
index 00000000..ed336162
--- /dev/null
+++ b/scripts/external_libs/scapy-2.3.1/python3/scapy/contrib/cdp.py
@@ -0,0 +1,306 @@
+#! /usr/bin/env python
+
+# scapy.contrib.description = Cisco Discovery Protocol
+# scapy.contrib.status = loads
+
+#############################################################################
+## ##
+## cdp.py --- Cisco Discovery Protocol (CDP) extension for Scapy ##
+## ##
+## Copyright (C) 2006 Nicolas Bareil <nicolas.bareil AT eads DOT net> ##
+## Arnaud Ebalard <arnaud.ebalard AT eads DOT net> ##
+## EADS/CRC security team ##
+## ##
+## This program is free software; you can redistribute it and/or modify it ##
+## under the terms of the GNU General Public License version 2 as ##
+## published by the Free Software Foundation; version 2. ##
+## ##
+## This program is distributed in the hope that it will be useful, but ##
+## WITHOUT ANY WARRANTY; without even the implied warranty of ##
+## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ##
+## General Public License for more details. ##
+## ##
+#############################################################################
+
+from scapy.packet import *
+from scapy.fields import *
+from scapy.layers.inet6 import *
+
+
+#####################################################################
+# Helpers and constants
+#####################################################################
+
+# CDP TLV classes keyed by type
+_cdp_tlv_cls = { 0x0001: "CDPMsgDeviceID",
+ 0x0002: "CDPMsgAddr",
+ 0x0003: "CDPMsgPortID",
+ 0x0004: "CDPMsgCapabilities",
+ 0x0005: "CDPMsgSoftwareVersion",
+ 0x0006: "CDPMsgPlatform",
+ 0x0007: "CDPMsgIPPrefix",
+ 0x0008: "CDPMsgProtoHello",
+ 0x0009: "CDPMsgVTPMgmtDomain", # CDPv2
+ 0x000a: "CDPMsgNativeVLAN", # CDPv2
+ 0x000b: "CDPMsgDuplex", #
+# 0x000c: "CDPMsgGeneric",
+# 0x000d: "CDPMsgGeneric",
+ 0x000e: "CDPMsgVoIPVLANReply",
+ 0x000f: "CDPMsgVoIPVLANQuery",
+ 0x0010: "CDPMsgPower",
+ 0x0011: "CDPMsgMTU",
+# 0x0012: "CDPMsgTrustBitmap",
+# 0x0013: "CDPMsgUntrustedPortCoS",
+# 0x0014: "CDPMsgSystemName",
+# 0x0015: "CDPMsgSystemOID",
+ 0x0016: "CDPMsgMgmtAddr",
+# 0x0017: "CDPMsgLocation",
+ 0x0019: "CDPMsgUnknown19",
+# 0x001a: "CDPPowerAvailable"
+ }
+
+_cdp_tlv_types = { 0x0001: "Device ID",
+ 0x0002: "Addresses",
+ 0x0003: "Port ID",
+ 0x0004: "Capabilities",
+ 0x0005: "Software Version",
+ 0x0006: "Platform",
+ 0x0007: "IP Prefix",
+ 0x0008: "Protocol Hello",
+ 0x0009: "VTP Mangement Domain", # CDPv2
+ 0x000a: "Native VLAN", # CDPv2
+ 0x000b: "Duplex", #
+ 0x000c: "CDP Unknown command (send us a pcap file)",
+ 0x000d: "CDP Unknown command (send us a pcap file)",
+ 0x000e: "VoIP VLAN Reply",
+ 0x000f: "VoIP VLAN Query",
+ 0x0010: "Power",
+ 0x0011: "MTU",
+ 0x0012: "Trust Bitmap",
+ 0x0013: "Untrusted Port CoS",
+ 0x0014: "System Name",
+ 0x0015: "System OID",
+ 0x0016: "Management Address",
+ 0x0017: "Location",
+ 0x0018: "CDP Unknown command (send us a pcap file)",
+ 0x0019: "CDP Unknown command (send us a pcap file)",
+ 0x001a: "Power Available"}
+
+def _CDPGuessPayloadClass(p, **kargs):
+ cls = conf.raw_layer
+ if len(p) >= 2:
+ t = struct.unpack("!H", p[:2])[0]
+ clsname = _cdp_tlv_cls.get(t, "CDPMsgGeneric")
+ cls = globals()[clsname]
+
+ return cls(p, **kargs)
+
+class CDPMsgGeneric(Packet):
+ name = "CDP Generic Message"
+ fields_desc = [ XShortEnumField("type", None, _cdp_tlv_types),
+ FieldLenField("len", None, "val", "!H"),
+ StrLenField("val", "", length_from=lambda x:x.len - 4) ]
+
+
+ def guess_payload_class(self, p):
+ return conf.padding_layer # _CDPGuessPayloadClass
+
+class CDPMsgDeviceID(CDPMsgGeneric):
+ name = "Device ID"
+ type = 0x0001
+
+_cdp_addr_record_ptype = {0x01: "NLPID", 0x02: "802.2"}
+_cdp_addrrecord_proto_ip = "\xcc"
+_cdp_addrrecord_proto_ipv6 = "\xaa\xaa\x03\x00\x00\x00\x86\xdd"
+
+class CDPAddrRecord(Packet):
+ name = "CDP Address"
+ fields_desc = [ ByteEnumField("ptype", 0x01, _cdp_addr_record_ptype),
+ FieldLenField("plen", None, "proto", "B"),
+ StrLenField("proto", None, length_from=lambda x:x.plen),
+ FieldLenField("addrlen", None, length_of=lambda x:x.addr),
+ StrLenField("addr", None, length_from=lambda x:x.addrlen)]
+
+ def guess_payload_class(self, p):
+ return conf.padding_layer
+
+class CDPAddrRecordIPv4(CDPAddrRecord):
+ name = "CDP Address IPv4"
+ fields_desc = [ ByteEnumField("ptype", 0x01, _cdp_addr_record_ptype),
+ FieldLenField("plen", 1, "proto", "B"),
+ StrLenField("proto", _cdp_addrrecord_proto_ip, length_from=lambda x:x.plen),
+ ShortField("addrlen", 4),
+ IPField("addr", "0.0.0.0")]
+
+class CDPAddrRecordIPv6(CDPAddrRecord):
+ name = "CDP Address IPv6"
+ fields_desc = [ ByteEnumField("ptype", 0x02, _cdp_addr_record_ptype),
+ FieldLenField("plen", 8, "proto", "B"),
+ StrLenField("proto", _cdp_addrrecord_proto_ipv6, length_from=lambda x:x.plen),
+ ShortField("addrlen", 16),
+ IP6Field("addr", "::1")]
+
+def _CDPGuessAddrRecord(p, **kargs):
+ cls = conf.raw_layer
+ if len(p) >= 2:
+ plen = struct.unpack("B", p[1])[0]
+ proto = ''.join(struct.unpack("s" * plen, p[2:plen + 2])[0:plen])
+
+ if proto == _cdp_addrrecord_proto_ip:
+ clsname = "CDPAddrRecordIPv4"
+ elif proto == _cdp_addrrecord_proto_ipv6:
+ clsname = "CDPAddrRecordIPv6"
+ else:
+ clsname = "CDPAddrRecord"
+
+ cls = globals()[clsname]
+
+ return cls(p, **kargs)
+
+class CDPMsgAddr(CDPMsgGeneric):
+ name = "Addresses"
+ fields_desc = [ XShortEnumField("type", 0x0002, _cdp_tlv_types),
+ ShortField("len", None),
+ FieldLenField("naddr", None, "addr", "!I"),
+ PacketListField("addr", [], _CDPGuessAddrRecord, count_from=lambda x:x.naddr) ]
+
+ def post_build(self, pkt, pay):
+ if self.len is None:
+ l = 8 + len(self.addr) * 9
+ pkt = pkt[:2] + struct.pack("!H", l) + pkt[4:]
+ p = pkt + pay
+ return p
+
+class CDPMsgPortID(CDPMsgGeneric):
+ name = "Port ID"
+ fields_desc = [ XShortEnumField("type", 0x0003, _cdp_tlv_types),
+ FieldLenField("len", None, "iface", "!H"),
+ StrLenField("iface", "Port 1", length_from=lambda x:x.len - 4) ]
+
+
+_cdp_capabilities = [ "Router",
+ "TransparentBridge",
+ "SourceRouteBridge",
+ "Switch",
+ "Host",
+ "IGMPCapable",
+ "Repeater"] + ["Bit%d" % x for x in range(25,0,-1)]
+
+
+class CDPMsgCapabilities(CDPMsgGeneric):
+ name = "Capabilities"
+ fields_desc = [ XShortEnumField("type", 0x0004, _cdp_tlv_types),
+ ShortField("len", 8),
+ FlagsField("cap", 0, 32, _cdp_capabilities) ]
+
+
+class CDPMsgSoftwareVersion(CDPMsgGeneric):
+ name = "Software Version"
+ type = 0x0005
+
+
+class CDPMsgPlatform(CDPMsgGeneric):
+ name = "Platform"
+ type = 0x0006
+
+_cdp_duplex = { 0x00: "Half", 0x01: "Full" }
+
+# ODR Routing
+class CDPMsgIPPrefix(CDPMsgGeneric):
+ name = "IP Prefix"
+ type = 0x0007
+ fields_desc = [ XShortEnumField("type", 0x0007, _cdp_tlv_types),
+ ShortField("len", 8),
+ IPField("defaultgw", "192.168.0.1") ]
+
+# TODO : Do me !!!!!! 0x0008
+class CDPMsgProtoHello(CDPMsgGeneric):
+ name = "Protocol Hello"
+ type = 0x0008
+
+class CDPMsgVTPMgmtDomain(CDPMsgGeneric):
+ name = "VTP Management Domain"
+ type = 0x0009
+
+class CDPMsgNativeVLAN(CDPMsgGeneric):
+ name = "Native VLAN"
+ fields_desc = [ XShortEnumField("type", 0x000a, _cdp_tlv_types),
+ ShortField("len", 6),
+ ShortField("vlan", 1) ]
+
+class CDPMsgDuplex(CDPMsgGeneric):
+ name = "Duplex"
+ fields_desc = [ XShortEnumField("type", 0x000b, _cdp_tlv_types),
+ ShortField("len", 5),
+ ByteEnumField("duplex", 0x00, _cdp_duplex) ]
+
+class CDPMsgVoIPVLANReply(CDPMsgGeneric):
+ name = "VoIP VLAN Reply"
+ fields_desc = [ XShortEnumField("type", 0x000e, _cdp_tlv_types),
+ ShortField("len", 7),
+ ByteField("status?", 1),
+ ShortField("vlan", 1)]
+
+
+# TODO : Do me !!! 0x000F
+class CDPMsgVoIPVLANQuery(CDPMsgGeneric):
+ name = "VoIP VLAN Query"
+ type = 0x000f
+
+# fields_desc = [XShortEnumField("type", 0x000f, _cdp_tlv_types),
+# FieldLenField("len", None, "val", "!H") ]
+
+
+class _CDPPowerField(ShortField):
+ def i2repr(self, pkt, x):
+ if x is None:
+ x = 0
+ return "%d mW" % x
+
+
+class CDPMsgPower(CDPMsgGeneric):
+ name = "Power"
+ # Check if field length is fixed (2 bytes)
+ fields_desc = [ XShortEnumField("type", 0x0010, _cdp_tlv_types),
+ ShortField("len", 6),
+ _CDPPowerField("power", 1337)]
+
+
+class CDPMsgMTU(CDPMsgGeneric):
+ name = "MTU"
+ # Check if field length is fixed (2 bytes)
+ fields_desc = [ XShortEnumField("type", 0x0011, _cdp_tlv_types),
+ ShortField("len", 6),
+ ShortField("mtu", 1500)]
+
+class CDPMsgMgmtAddr(CDPMsgAddr):
+ name = "Management Address"
+ type = 0x0016
+
+class CDPMsgUnknown19(CDPMsgGeneric):
+ name = "Unknown CDP Message"
+ type = 0x0019
+
+class CDPMsg(CDPMsgGeneric):
+ name = "CDP "
+ fields_desc = [ XShortEnumField("type", None, _cdp_tlv_types),
+ FieldLenField("len", None, "val", "!H"),
+ StrLenField("val", "", length_from=lambda x:x.len - 4) ]
+
+class _CDPChecksum:
+ def post_build(self, pkt, pay):
+ p = pkt + pay
+ if self.cksum is None:
+ cksum = checksum(p)
+ p = p[:2] + struct.pack("!H", cksum) + p[4:]
+ return p
+
+class CDPv2_HDR(_CDPChecksum, CDPMsgGeneric):
+ name = "Cisco Discovery Protocol version 2"
+ fields_desc = [ ByteField("vers", 2),
+ ByteField("ttl", 180),
+ XShortField("cksum", None),
+ PacketListField("msg", [], _CDPGuessPayloadClass) ]
+
+bind_layers(SNAP, CDPv2_HDR, {"code": 0x2000, "OUI": 0xC})
+
diff --git a/scripts/external_libs/scapy-2.3.1/python3/scapy/contrib/chdlc.py b/scripts/external_libs/scapy-2.3.1/python3/scapy/contrib/chdlc.py
new file mode 100644
index 00000000..6e483762
--- /dev/null
+++ b/scripts/external_libs/scapy-2.3.1/python3/scapy/contrib/chdlc.py
@@ -0,0 +1,42 @@
+# http://trac.secdev.org/scapy/ticket/88
+
+# scapy.contrib.description = Cisco HDLC and SLARP
+# scapy.contrib.status = loads
+
+# This layer is based on information from http://www.nethelp.no/net/cisco-hdlc.txt
+
+from scapy.packet import *
+from scapy.fields import *
+from scapy.layers.l2 import *
+from scapy.layers.inet import *
+from scapy.layers.inet6 import *
+
+class CHDLC(Packet):
+ name = "Cisco HDLC"
+ fields_desc = [ ByteEnumField("address", 0x0f, {0x0f : "unicast", 0x8f :"multicast"}),
+ ByteField("control", 0),
+ XShortField("proto", 0x0800)]
+
+class SLARP(Packet):
+ name = "SLARP"
+ fields_desc = [ IntEnumField("type", 2, {0 : "request", 1 : "reply", 2 :"line keepalive"}),
+ ConditionalField(IPField("address", "192.168.0.1"),
+ lambda pkt : pkt.type == 0 or pkt.type == 1),
+ ConditionalField(IPField("mask", "255.255.255.0"),
+ lambda pkt : pkt.type == 0 or pkt.type == 1),
+ ConditionalField(XShortField("unused", 0),
+ lambda pkt : pkt.type == 0 or pkt.type == 1),
+ ConditionalField(IntField("mysequence", 0),
+ lambda pkt : pkt.type == 2),
+ ConditionalField(IntField("yoursequence", 0),
+ lambda pkt : pkt.type == 2),
+ ConditionalField(XShortField("reliability", 0xffff),
+ lambda pkt : pkt.type == 2)]
+
+bind_layers( CHDLC, Dot3, proto=0x6558)
+bind_layers( CHDLC, IP, proto=0x800)
+bind_layers( CHDLC, IPv6, proto=0x86dd)
+bind_layers( CHDLC, SLARP, proto=0x8035)
+bind_layers( CHDLC, STP, proto=0x4242)
+
+conf.l2types.register(104, CHDLC)
diff --git a/scripts/external_libs/scapy-2.3.1/python3/scapy/contrib/dtp.py b/scripts/external_libs/scapy-2.3.1/python3/scapy/contrib/dtp.py
new file mode 100644
index 00000000..1907300b
--- /dev/null
+++ b/scripts/external_libs/scapy-2.3.1/python3/scapy/contrib/dtp.py
@@ -0,0 +1,115 @@
+#!/usr/bin/env python
+
+# scapy.contrib.description = DTP
+# scapy.contrib.status = loads
+
+"""
+ DTP Scapy Extension
+ ~~~~~~~~~~~~~~~~~~~
+
+ :version: 2008-12-22
+ :author: Jochen Bartl <lobo@c3a.de>
+
+ :Thanks:
+
+ - TLV code derived from the CDP implementation of scapy. (Thanks to Nicolas Bareil and Arnaud Ebalard)
+ http://trac.secdev.org/scapy/ticket/18
+"""
+
+from scapy.packet import *
+from scapy.fields import *
+from scapy.layers.l2 import SNAP,Dot3,LLC
+from scapy.sendrecv import sendp
+
+class DtpGenericTlv(Packet):
+ name = "DTP Generic TLV"
+ fields_desc = [ XShortField("type", 0x0001),
+ FieldLenField("length", None, length_of=lambda pkt:pkt.value + 4),
+ StrLenField("value", "", length_from=lambda pkt:pkt.length - 4)
+ ]
+
+ def guess_payload_class(self, p):
+ return conf.padding_layer
+
+class RepeatedTlvListField(PacketListField):
+ def __init__(self, name, default, cls):
+ PacketField.__init__(self, name, default, cls)
+
+ def getfield(self, pkt, s):
+ lst = []
+ remain = s
+ while len(remain) > 0:
+ p = self.m2i(pkt,remain)
+ if conf.padding_layer in p:
+ pad = p[conf.padding_layer]
+ remain = pad.load
+ del(pad.underlayer.payload)
+ else:
+ remain = ""
+ lst.append(p)
+ return remain,lst
+
+ def addfield(self, pkt, s, val):
+ return s+reduce(str.__add__, map(str, val),"")
+
+_DTP_TLV_CLS = {
+ 0x0001 : "DTPDomain",
+ 0x0002 : "DTPStatus",
+ 0x0003 : "DTPType",
+ 0x0004 : "DTPNeighbor"
+ }
+
+class DTPDomain(DtpGenericTlv):
+ name = "DTP Domain"
+ fields_desc = [ ShortField("type", 1),
+ FieldLenField("length", None, "domain", adjust=lambda pkt,x:x + 4),
+ StrLenField("domain", "\x00", length_from=lambda pkt:pkt.length - 4)
+ ]
+
+class DTPStatus(DtpGenericTlv):
+ name = "DTP Status"
+ fields_desc = [ ShortField("type", 2),
+ FieldLenField("length", None, "status", adjust=lambda pkt,x:x + 4),
+ StrLenField("status", "\x03", length_from=lambda pkt:pkt.length - 4)
+ ]
+
+class DTPType(DtpGenericTlv):
+ name = "DTP Type"
+ fields_desc = [ ShortField("type", 3),
+ FieldLenField("length", None, "dtptype", adjust=lambda pkt,x:x + 4),
+ StrLenField("dtptype", "\xa5", length_from=lambda pkt:pkt.length - 4)
+ ]
+
+class DTPNeighbor(DtpGenericTlv):
+ name = "DTP Neighbor"
+ fields_desc = [ ShortField("type", 4),
+ #FieldLenField("length", None, "neighbor", adjust=lambda pkt,x:x + 4),
+ ShortField("len", 10),
+ MACField("neighbor", None)
+ ]
+
+def _DTPGuessPayloadClass(p, **kargs):
+ cls = conf.raw_layer
+ if len(p) >= 2:
+ t = struct.unpack("!H", p[:2])[0]
+ clsname = _DTP_TLV_CLS.get(t, "DtpGenericTlv")
+ cls = globals()[clsname]
+ return cls(p, **kargs)
+
+class DTP(Packet):
+ name = "DTP"
+ fields_desc = [ ByteField("ver", 1),
+ RepeatedTlvListField("tlvlist", [], _DTPGuessPayloadClass)
+ ]
+
+bind_layers(SNAP, DTP, code=0x2004, OUI=0xc)
+
+
+def negotiate_trunk(iface=conf.iface, mymac=str(RandMAC())):
+ print("Trying to negotiate a trunk on interface %s" % iface)
+ p = Dot3(src=mymac, dst="01:00:0c:cc:cc:cc")/LLC()/SNAP()/DTP(tlvlist=[DTPDomain(),DTPStatus(),DTPType(),DTPNeighbor(neighbor=mymac)])
+ sendp(p)
+
+if __name__ == "__main__":
+ from scapy.main import interact
+ interact(mydict=globals(), mybanner="DTP")
diff --git a/scripts/external_libs/scapy-2.3.1/python3/scapy/contrib/eigrp.py b/scripts/external_libs/scapy-2.3.1/python3/scapy/contrib/eigrp.py
new file mode 100644
index 00000000..f304ae68
--- /dev/null
+++ b/scripts/external_libs/scapy-2.3.1/python3/scapy/contrib/eigrp.py
@@ -0,0 +1,488 @@
+#!/usr/bin/env python
+
+# scapy.contrib.description = EIGRP
+# scapy.contrib.status = loads
+
+"""
+ EIGRP Scapy Extension
+ ~~~~~~~~~~~~~~~~~~~~~
+
+ :version: 2009-08-13
+ :copyright: 2009 by Jochen Bartl
+ :e-mail: lobo@c3a.de / jochen.bartl@gmail.com
+ :license: GPL v2
+
+ This program is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License
+ as published by the Free Software Foundation; either version 2
+ of the License, or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ :TODO
+
+ - Replace TLV code with a more generic solution
+ * http://trac.secdev.org/scapy/ticket/90
+ - Write function for calculating authentication data
+
+ :Known bugs:
+
+ -
+
+ :Thanks:
+
+ - TLV code derived from the CDP implementation of scapy. (Thanks to Nicolas Bareil and Arnaud Ebalard)
+ http://trac.secdev.org/scapy/ticket/18
+ - IOS / EIGRP Version Representation FIX by Dirk Loss
+"""
+
+from scapy.packet import *
+from scapy.fields import *
+from scapy.layers.inet import IP
+from scapy.layers.inet6 import *
+
+class EigrpIPField(StrField, IPField):
+ """
+ This is a special field type for handling ip addresses of destination networks in internal and
+ external route updates.
+
+ EIGRP removes zeros from the host portion of the ip address if the netmask is 8, 16 or 24 bits.
+ """
+
+ def __init__(self, name, default, length=None, length_from=None):
+ StrField.__init__(self, name, default)
+ self.length_from = length_from
+ if length is not None:
+ self.length_from = lambda pkt,length=length: length
+
+ def h2i(self, pkt, x):
+ return IPField.h2i(self, pkt, x)
+
+ def i2m(self, pkt, x):
+ x = inet_aton(x)
+ l = self.length_from(pkt)
+
+ if l <= 8:
+ return x[:1]
+ elif l <= 16:
+ return x[:2]
+ elif l <= 24:
+ return x[:3]
+ else:
+ return x
+
+ def m2i(self, pkt, x):
+ l = self.length_from(pkt)
+
+ if l <= 8:
+ x += "\x00\x00\x00"
+ elif l <= 16:
+ x += "\x00\x00"
+ elif l <= 24:
+ x += "\x00"
+
+ return inet_ntoa(x)
+
+ def prefixlen_to_bytelen(self, l):
+ if l <= 8:
+ l = 1
+ elif l <= 16:
+ l = 2
+ elif l <= 24:
+ l = 3
+ else:
+ l = 4
+
+ return l
+
+ def i2len(self, pkt, x):
+ l = self.length_from(pkt)
+ l = self.prefixlen_to_bytelen(l)
+
+ return l
+
+ def getfield(self, pkt, s):
+ l = self.length_from(pkt)
+ l = self.prefixlen_to_bytelen(l)
+
+ return s[l:], self.m2i(pkt, s[:l])
+
+ def randval(self):
+ return IPField.randval(self)
+
+class EigrpIP6Field(StrField, IP6Field, EigrpIPField):
+ """
+ This is a special field type for handling ip addresses of destination networks in internal and
+ external route updates.
+
+ """
+
+ def __init__(self, name, default, length=None, length_from=None):
+ StrField.__init__(self, name, default)
+ self.length_from = length_from
+ if length is not None:
+ self.length_from = lambda pkt,length=length: length
+
+ def any2i(self, pkt, x):
+ return IP6Field.any2i(self, pkt, x)
+
+ def i2repr(self, pkt, x):
+ return IP6Field.i2repr(self, pkt, x)
+
+ def h2i(self, pkt, x):
+ return IP6Field.h2i(self, pkt, x)
+
+ def i2m(self, pkt, x):
+ x = inet_pton(socket.AF_INET6, x)
+ l = self.length_from(pkt)
+ l = self.prefixlen_to_bytelen(l)
+
+ return x[:l]
+
+ def m2i(self, pkt, x):
+ l = self.length_from(pkt)
+
+ prefixlen = self.prefixlen_to_bytelen(l)
+ if l > 128:
+ warning("EigrpIP6Field: Prefix length is > 128. Dissection of this packet will fail")
+ else:
+ pad = "\x00" * (16 - prefixlen)
+ x += pad
+
+ return inet_ntop(socket.AF_INET6, x)
+
+ def prefixlen_to_bytelen(self, l):
+ l = l / 8
+
+ if l < 16:
+ l += 1
+
+ return l
+
+ def i2len(self, pkt, x):
+ return EigrpIPField.i2len(self, pkt, x)
+
+ def getfield(self, pkt, s):
+ return EigrpIPField.getfield(self, pkt, s)
+
+class ThreeBytesField(X3BytesField, ByteField):
+ def i2repr(self, pkt, x):
+ return ByteField.i2repr(self, pkt, x)
+
+
+class EIGRPGeneric(Packet):
+ name = "EIGRP Generic TLV"
+ fields_desc = [ XShortField("type", 0x0000),
+ FieldLenField("len", None, "value", "!H", adjust=lambda pkt,x: x + 4),
+ StrLenField("value", "\x00", length_from=lambda pkt: pkt.len - 4)]
+
+ def guess_payload_class(self, p):
+ return conf.padding_layer
+
+class EIGRPParam(EIGRPGeneric):
+ name = "EIGRP Parameters"
+ fields_desc = [ XShortField("type", 0x0001),
+ ShortField("len", 12),
+ # Bandwidth
+ ByteField("k1", 1),
+ # Load
+ ByteField("k2", 0),
+ # Delay
+ ByteField("k3", 1),
+ # Reliability
+ ByteField("k4", 0),
+ # MTU
+ ByteField("k5", 0),
+ ByteField("reserved", 0),
+ ShortField("holdtime", 15)
+ ]
+
+class EIGRPAuthData(EIGRPGeneric):
+ name = "EIGRP Authentication Data"
+ fields_desc = [ XShortField("type", 0x0002),
+ FieldLenField("len", None, "authdata", "!H", adjust=lambda pkt,x: x + 24),
+ ShortEnumField("authtype", 2, {2 : "MD5"}),
+ ShortField("keysize", None),
+ IntField("keyid", 1),
+ StrFixedLenField("nullpad", "\x00" * 12, 12),
+ StrLenField("authdata", RandString(16), length_from=lambda pkt: pkt.keysize)
+ ]
+
+ def post_build(self, p, pay):
+ p += pay
+
+ if self.keysize is None:
+ keysize = len(self.authdata)
+ p = p[:6] + chr((keysize >> 8) & 0xff) + chr(keysize & 0xff) + p[8:]
+
+ return p
+
+class EIGRPSeq(EIGRPGeneric):
+ name = "EIGRP Sequence"
+ fields_desc = [ XShortField("type", 0x0003),
+ ShortField("len", None),
+ ByteField("addrlen", 4),
+ ConditionalField(IPField("ipaddr", "192.168.0.1"),
+ lambda pkt:pkt.addrlen == 4),
+ ConditionalField(IP6Field("ip6addr", "2001::"),
+ lambda pkt:pkt.addrlen == 16)
+ ]
+
+ def post_build(self, p, pay):
+ p += pay
+
+ if self.len is None:
+ l = len(p)
+ p = p[:2] + chr((l >> 8) & 0xff) + chr(l & 0xff) + p[4:]
+
+ return p
+
+class ShortVersionField(ShortField):
+ def i2repr(self, pkt, x):
+ try:
+ minor = x & 0xff
+ major = (x >> 8) & 0xff
+ except TypeError:
+ return "unknown"
+ else:
+ # We print a leading 'v' so that these values don't look like floats
+ return "v%s.%s" % (major, minor)
+
+ def h2i(self, pkt, x):
+ """The field accepts string values like v12.1, v1.1 or integer values.
+ String values have to start with a "v" folled by a floating point number.
+ Valid numbers are between 0 and 255.
+ """
+
+ if type(x) is str and x.startswith("v") and len(x) <= 8:
+ major = int(x.split(".")[0][1:])
+ minor = int(x.split(".")[1])
+
+ return (major << 8) | minor
+
+ elif type(x) is int and x >= 0 and x <= 65535:
+ return x
+ else:
+ if self.default != None:
+ warning("set value to default. Format of %r is invalid" % x)
+ return self.default
+ else:
+ raise Scapy_Exception("Format of value is invalid")
+
+ def randval(self):
+ return RandShort()
+
+class EIGRPSwVer(EIGRPGeneric):
+ name = "EIGRP Software Version"
+ fields_desc = [ XShortField("type", 0x0004),
+ ShortField("len", 8),
+ ShortVersionField("ios", "v12.0"),
+ ShortVersionField("eigrp", "v1.2")
+ ]
+
+class EIGRPNms(EIGRPGeneric):
+ name = "EIGRP Next Multicast Sequence"
+ fields_desc = [ XShortField("type", 0x0005),
+ ShortField("len", 8),
+ IntField("nms", 2)
+ ]
+
+# Don't get confused by the term "receive-only". This flag is always set, when you configure
+# one of the stub options. It's also the only flag set, when you configure "eigrp stub receive-only".
+_EIGRP_STUB_FLAGS = ["connected", "static", "summary", "receive-only", "redistributed", "leak-map"]
+
+class EIGRPStub(EIGRPGeneric):
+ name = "EIGRP Stub Router"
+ fields_desc = [ XShortField("type", 0x0006),
+ ShortField("len", 6),
+ FlagsField("flags", 0x000d, 16, _EIGRP_STUB_FLAGS)]
+
+# Delay 0xffffffff == Destination Unreachable
+class EIGRPIntRoute(EIGRPGeneric):
+ name = "EIGRP Internal Route"
+ fields_desc = [ XShortField("type", 0x0102),
+ FieldLenField("len", None, "dst", "!H", adjust=lambda pkt,x: x + 25),
+ IPField("nexthop", "192.168.0.0"),
+ IntField("delay", 128000),
+ IntField("bandwidth", 256),
+ ThreeBytesField("mtu", 1500),
+ ByteField("hopcount", 0),
+ ByteField("reliability", 255),
+ ByteField("load", 0),
+ XShortField("reserved", 0),
+ ByteField("prefixlen", 24),
+ EigrpIPField("dst", "192.168.1.0", length_from=lambda pkt: pkt.prefixlen),
+ ]
+
+_EIGRP_EXTERNAL_PROTOCOL_ID = {
+ 0x01 : "IGRP",
+ 0x02 : "EIGRP",
+ 0x03 : "Static Route",
+ 0x04 : "RIP",
+ 0x05 : "Hello",
+ 0x06 : "OSPF",
+ 0x07 : "IS-IS",
+ 0x08 : "EGP",
+ 0x09 : "BGP",
+ 0x0A : "IDRP",
+ 0x0B : "Connected Link"
+ }
+
+_EIGRP_EXTROUTE_FLAGS = ["external", "candidate-default"]
+
+class EIGRPExtRoute(EIGRPGeneric):
+ name = "EIGRP External Route"
+ fields_desc = [ XShortField("type", 0x0103),
+ FieldLenField("len", None, "dst", "!H", adjust=lambda pkt,x: x + 45),
+ IPField("nexthop", "192.168.0.0"),
+ IPField("originrouter", "192.168.0.1"),
+ IntField("originasn", 0),
+ IntField("tag", 0),
+ IntField("externalmetric", 0),
+ ShortField("reserved", 0),
+ ByteEnumField("extprotocolid", 3, _EIGRP_EXTERNAL_PROTOCOL_ID),
+ FlagsField("flags", 0, 8, _EIGRP_EXTROUTE_FLAGS),
+ IntField("delay", 0),
+ IntField("bandwidth", 256),
+ ThreeBytesField("mtu", 1500),
+ ByteField("hopcount", 0),
+ ByteField("reliability", 255),
+ ByteField("load", 0),
+ XShortField("reserved2", 0),
+ ByteField("prefixlen", 24),
+ EigrpIPField("dst", "192.168.1.0", length_from=lambda pkt: pkt.prefixlen)
+ ]
+
+class EIGRPv6IntRoute(EIGRPGeneric):
+ name = "EIGRP for IPv6 Internal Route"
+ fields_desc = [ XShortField("type", 0x0402),
+ FieldLenField("len", None, "dst", "!H", adjust=lambda pkt,x: x + 37),
+ IP6Field("nexthop", "::"),
+ IntField("delay", 128000),
+ IntField("bandwidth", 256000),
+ ThreeBytesField("mtu", 1500),
+ ByteField("hopcount", 1),
+ ByteField("reliability", 255),
+ ByteField("load", 0),
+ XShortField("reserved", 0),
+ ByteField("prefixlen", 16),
+ EigrpIP6Field("dst", "2001::", length_from=lambda pkt: pkt.prefixlen)
+ ]
+
+class EIGRPv6ExtRoute(EIGRPGeneric):
+ name = "EIGRP for IPv6 External Route"
+ fields_desc = [ XShortField("type", 0x0403),
+ FieldLenField("len", None, "dst", "!H", adjust=lambda pkt,x: x + 57),
+ IP6Field("nexthop", "::"),
+ IPField("originrouter", "192.168.0.1"),
+ IntField("originasn", 0),
+ IntField("tag", 0),
+ IntField("externalmetric", 0),
+ ShortField("reserved", 0),
+ ByteEnumField("extprotocolid", 3, _EIGRP_EXTERNAL_PROTOCOL_ID),
+ FlagsField("flags", 0, 8, _EIGRP_EXTROUTE_FLAGS),
+ IntField("delay", 0),
+ IntField("bandwidth", 256000),
+ ThreeBytesField("mtu", 1500),
+ ByteField("hopcount", 1),
+ ByteField("reliability", 0),
+ ByteField("load", 1),
+ XShortField("reserved2", 0),
+ ByteField("prefixlen", 8),
+ EigrpIP6Field("dst", "::", length_from=lambda pkt: pkt.prefixlen)
+ ]
+
+_eigrp_tlv_cls = {
+ 0x0001: "EIGRPParam",
+ 0x0002: "EIGRPAuthData",
+ 0x0003: "EIGRPSeq",
+ 0x0004: "EIGRPSwVer",
+ 0x0005: "EIGRPNms",
+ 0x0006: "EIGRPStub",
+ 0x0102: "EIGRPIntRoute",
+ 0x0103: "EIGRPExtRoute",
+ 0x0402: "EIGRPv6IntRoute",
+ 0x0403: "EIGRPv6ExtRoute"
+ }
+
+class RepeatedTlvListField(PacketListField):
+ def __init__(self, name, default, cls):
+ PacketField.__init__(self, name, default, cls)
+
+ def getfield(self, pkt, s):
+ lst = []
+ remain = s
+ while len(remain) > 0:
+ p = self.m2i(pkt, remain)
+ if conf.padding_layer in p:
+ pad = p[conf.padding_layer]
+ remain = pad.load
+ del(pad.underlayer.payload)
+ else:
+ remain = ""
+ lst.append(p)
+ return remain,lst
+
+ def addfield(self, pkt, s, val):
+ return s + reduce(str.__add__, map(str, val), "")
+
+def _EIGRPGuessPayloadClass(p, **kargs):
+ cls = conf.raw_layer
+ if len(p) >= 2:
+ t = struct.unpack("!H", p[:2])[0]
+ clsname = _eigrp_tlv_cls.get(t, "EIGRPGeneric")
+ cls = globals()[clsname]
+ return cls(p, **kargs)
+
+_EIGRP_OPCODES = { 1 : "Update",
+ 2 : "Request",
+ 3 : "Query",
+ 4 : "Replay",
+ 5 : "Hello",
+ 6 : "IPX SAP",
+ 10 : "SIA Query",
+ 11 : "SIA Reply" }
+
+# The Conditional Receive bit is used for reliable multicast communication.
+# Update-Flag: Not sure if Cisco calls it that way, but it's set when neighbors
+# are exchanging routing information
+_EIGRP_FLAGS = ["init", "cond-recv", "unknown", "update"]
+
+class EIGRP(Packet):
+ name = "EIGRP"
+ fields_desc = [ ByteField("ver", 2),
+ ByteEnumField("opcode", 5, _EIGRP_OPCODES),
+ XShortField("chksum", None),
+ FlagsField("flags", 0, 32, _EIGRP_FLAGS),
+ IntField("seq", 0),
+ IntField("ack", 0),
+ IntField("asn", 100),
+ RepeatedTlvListField("tlvlist", [], _EIGRPGuessPayloadClass)
+ ]
+
+ def post_build(self, p, pay):
+ p += pay
+ if self.chksum is None:
+ c = checksum(p)
+ p = p[:2] + chr((c >> 8) & 0xff) + chr(c & 0xff) + p[4:]
+ return p
+
+ def mysummary(self):
+ summarystr = "EIGRP (AS=%EIGRP.asn% Opcode=%EIGRP.opcode%"
+ if self.opcode == 5 and self.ack != 0:
+ summarystr += " (ACK)"
+ if self.flags != 0:
+ summarystr += " Flags=%EIGRP.flags%"
+
+ return self.sprintf(summarystr + ")")
+
+bind_layers(IP, EIGRP, proto=88)
+bind_layers(IPv6, EIGRP, nh=88)
+
+if __name__ == "__main__":
+ from scapy.main import interact
+ interact(mydict=globals(), mybanner="EIGRP")
+
diff --git a/scripts/external_libs/scapy-2.3.1/python3/scapy/contrib/etherip.py b/scripts/external_libs/scapy-2.3.1/python3/scapy/contrib/etherip.py
new file mode 100644
index 00000000..e331c146
--- /dev/null
+++ b/scripts/external_libs/scapy-2.3.1/python3/scapy/contrib/etherip.py
@@ -0,0 +1,19 @@
+
+# http://trac.secdev.org/scapy/ticket/297
+
+# scapy.contrib.description = EtherIP
+# scapy.contrib.status = loads
+
+from scapy.fields import BitField
+from scapy.packet import Packet, bind_layers
+from scapy.layers.inet import IP
+from scapy.layers.l2 import Ether
+
+class EtherIP(Packet):
+ name = "EtherIP / RFC 3378"
+ fields_desc = [ BitField("version", 3, 4),
+ BitField("reserved", 0, 12)]
+
+bind_layers( IP, EtherIP, frag=0, proto=0x61)
+bind_layers( EtherIP, Ether)
+
diff --git a/scripts/external_libs/scapy-2.3.1/python3/scapy/contrib/gsm_um.py b/scripts/external_libs/scapy-2.3.1/python3/scapy/contrib/gsm_um.py
new file mode 100644
index 00000000..7b1354a4
--- /dev/null
+++ b/scripts/external_libs/scapy-2.3.1/python3/scapy/contrib/gsm_um.py
@@ -0,0 +1,13119 @@
+#!/usr/bin/env python
+
+# scapy.contrib.description = PPI
+# scapy.contrib.status = loads
+
+"""
+ This program is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation, either version 3 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see <http://www.gnu.org/licenses/>.
+"""
+
+ ####################################################################
+ # This file holds the GSM UM interface implementation for Scapy #
+ # author: Laurent Weber <k@0xbadcab1e.lu> #
+ # #
+ # Some examples on how to use this script: #
+ # http://0xbadcab1e.lu/scapy_gsm_um-howto.txt #
+ # #
+ # tested on: scapy-version: 2.2.0 (dev) #
+ ####################################################################
+
+import logging
+from types import IntType
+from types import NoneType
+from types import StringType
+#from time import sleep
+import socket
+logging.getLogger("scapy").setLevel(1)
+from scapy.all import *
+
+# This method is intended to send gsm air packets. It uses a unix domain
+# socket. It opens a socket, sends the parameter to the socket and
+# closes the socket.
+# typeSock determines the type of the socket, can be:
+# 0 for UDP Socket
+# 1 for Unix Domain Socket
+# 2 for TCP
+
+
+def sendum(x, typeSock=0):
+ try:
+ if type(x) is not str:
+ x = str(x)
+ if typeSock is 0:
+ s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
+ host = '127.0.0.1'
+ port = 28670 # default for openBTS
+ s.connect((host, port))
+ elif typeSock is 1:
+ s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
+ s.connect("/tmp/osmoL")
+ elif typeSock is 2:
+ s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+ host = '127.0.0.1'
+ port = 43797
+ s.connect((host, port))
+ s.send(x)
+ s.close()
+ except:
+ print("[Error]: There was a problem when trying to transmit data.\
+ Please make sure you started the socket server.")
+
+# Known Bugs/Problems:
+# If a message uses multiple times the same IE you cannot set the values
+# of this IE's if you use the preconfigured packets. You need to build
+# the IE's by hand and than assemble them as entire messages.
+
+# The ErrorLength class is a custom exception that gets raised when a
+# packet doesn't have the correct size.
+
+
+class ErrorLength(Exception):
+ def __str__(self):
+ error = "ERROR: Please make sure you build entire, 8 bit fields."
+ return repr(error)
+###
+# This method computes the length of the actual IE.
+# It computes how many "None" fields have to be removed (if any).
+# The method returns an integer containing the number of bytes that have to be
+# cut off the packet.
+# parameter length contains the max length of the IE can be found in
+# 0408
+# The parameter fields contains the value of the fields (not the default but
+# the real, actual value.
+# The parameter fields2 contains fields_desc.
+# Location contains the location of the length field in the IE. Everything
+# after the the length field has to be counted (04.07 11.2.1.1.2)
+
+
+def adapt(min_length, max_length, fields, fields2, location=2):
+ # find out how much bytes there are between min_length and the location of
+ # the length field
+ location = min_length - location
+ i = len(fields) - 1
+ rm = mysum = 0
+ while i >= 0:
+ if fields[i] is None:
+ rm += 1
+ try:
+ mysum += fields2[i].size
+ except AttributeError: # ByteFields don't have .size
+ mysum += 8
+ else:
+ break
+ i -= 1
+ if mysum % 8 is 0:
+ length = mysum / 8 # Number of bytes we have to delete
+ dyn_length = (max_length - min_length - length)
+ if dyn_length < 0:
+ dyn_length = 0
+ if length is max_length: # Fix for packets that have all values set
+ length -= min_length # to None
+ return [length, dyn_length + location]
+ else:
+ raise ErrorLength()
+
+
+def examples(example=None):
+ if example == None:
+ print("""This command presents some example to introduce scapy
+gsm-um to new users.
+The following parameters can be used:
+ examples("imsiDetach")
+ examples("call")
+ examples("dissect")""")
+ elif example == "imsiDetach":
+ print("""
+>>> a=imsiDetachIndication()
+... a.typeOfId=1; a.odd=1; a.idDigit1=0xF;
+... a.idDigit2_1=2; a.idDigit2=7; a.idDigit3_1=0;
+... a.idDigit3=7; a.idDigit4_1=7; a.idDigit4=2;
+... a.idDigit5_1=0; a.idDigit5=0; a.idDigit6_1=0;
+... a.idDigit6=1; a.idDigit7_1=2; a.idDigit7=7;
+... a.idDigit8_1=7; a.idDigit8=5; a.idDigit9_1=1; a.idDigit9=4;
+>>> hexdump(a)
+0000 05 01 00 08 F0 27 07 72 00 01 27 75 14 .....'.r..'u.
+>>> sendum(a)
+""")
+ elif example == "call":
+ print("""
+If you use an USRP and the testcall function this sets up a phonecall:
+>>> sendum(setupMobileOriginated())
+>>> sendum(connectAcknowledge())
+""")
+
+
+# Section 10.2/3
+class TpPd(Packet):
+ """Skip indicator and transaction identifier and Protocol Discriminator"""
+ name = "Skip Indicator And Transaction Identifier and Protocol \
+Discriminator"
+ fields_desc = [
+ BitField("ti", 0x0, 4),
+ BitField("pd", 0x3, 4)
+ ]
+
+
+class MessageType(Packet):
+ """Message Type Section 10.4"""
+ name = "Message Type"
+ fields_desc = [
+ XByteField("mesType", 0x3C)
+ ]
+
+
+##
+# Message for Radio Resources management (RR) Section 9.1
+###
+
+# Network to MS
+def additionalAssignment(MobileAllocation_presence=0,
+ StartingTime_presence=0):
+ """ADDITIONAL ASSIGNMENT Section 9.1.1"""
+ # Mandatory
+ a = TpPd(pd=0x6)
+ b = MessageType(mesType=0x3B) # 00111011
+ c = ChannelDescription()
+ packet = a / b / c
+ # Not Mandatory
+ if MobileAllocation_presence is 1:
+ d = MobileAllocationHdr(ieiMA=0x72, eightBitMA=0x0)
+ packet = packet / d
+ if StartingTime_presence is 1:
+ e = StartingTimeHdr(ieiST=0x7C, eightBitST=0x0)
+ packet = packet / e
+ return packet
+
+
+# Network to MS
+def assignmentCommand(FrequencyList_presence=0,
+ CellChannelDescription_presence=0,
+ CellChannelDescription_presence1=0,
+ MultislotAllocation_presence=0,
+ ChannelMode_presence=0, ChannelMode_presence1=0,
+ ChannelMode_presence2=0, ChannelMode_presence3=0,
+ ChannelMode_presence4=0, ChannelMode_presence5=0,
+ ChannelMode_presence6=0, ChannelMode_presence7=0,
+ ChannelDescription=0, ChannelMode2_presence=0,
+ MobileAllocation_presence=0, StartingTime_presence=0,
+ FrequencyList_presence1=0,
+ ChannelDescription2_presence=0,
+ ChannelDescription_presence=0,
+ FrequencyChannelSequence_presence=0,
+ MobileAllocation_presence1=0,
+ CipherModeSetting_presence=0,
+ VgcsTargetModeIdentication_presence=0,
+ MultiRateConfiguration_presence=0):
+ """ASSIGNMENT COMMAND Section 9.1.2"""
+ a = TpPd(pd=0x6)
+ b = MessageType(mesType=0x2e) # 101110
+ c = ChannelDescription2()
+ d = PowerCommand()
+ packet = a / b / c / d
+ if FrequencyList_presence is 1:
+ e = FrequencyListHdr(ieiFL=0x05, eightBitFL=0x0)
+ packet = packet / e
+ if CellChannelDescription_presence is 1:
+ f = CellChannelDescriptionHdr(ieiCCD=0x62, eightBitCCD=0x0)
+ packet = packet / f
+ if MultislotAllocation_presence is 1:
+ g = MultislotAllocationHdr(ieiMSA=0x10, eightBitMSA=0x0)
+ packet = packet / g
+ if ChannelMode_presence is 1:
+ h = ChannelModeHdr(ieiCM=0x63, eightBitCM=0x0)
+ packet = packet / h
+ if ChannelMode_presence1 is 1:
+ i = ChannelModeHdr(ieiCM=0x11, eightBitCM=0x0)
+ packet = packet / i
+ if ChannelMode_presence2 is 1:
+ j = ChannelModeHdr(ieiCM=0x13, eightBitCM=0x0)
+ packet = packet / j
+ if ChannelMode_presence3 is 1:
+ k = ChannelModeHdr(ieiCM=0x14, eightBitCM=0x0)
+ packet = packet / k
+ if ChannelMode_presence4 is 1:
+ l = ChannelModeHdr(ieiCM=0x15, eightBitCM=0x0)
+ packet = packet / l
+ if ChannelMode_presence5 is 1:
+ m = ChannelModeHdr(ieiCM=0x16, eightBitCM=0x0)
+ packet = packet / m
+ if ChannelMode_presence6 is 1:
+ n = ChannelModeHdr(ieiCM=0x17, eightBitCM=0x0)
+ packet = packet / n
+ if ChannelMode_presence7 is 1:
+ o = ChannelModeHdr(ieiCM=0x18, eightBitCM=0x0)
+ packet = packet / o
+ if ChannelDescription_presence is 1:
+ p = ChannelDescriptionHdr(ieiCD=0x64, eightBitCD=0x0)
+ packet = packet / p
+ if ChannelMode2_presence is 1:
+ q = ChannelMode2Hdr(ieiCM2=0x66, eightBitCM2=0x0)
+ packet = packet / q
+ if MobileAllocation_presence is 1:
+ r = MobileAllocationHdr(ieiMA=0x72, eightBitMA=0x0)
+ packet = packet / r
+ if StartingTime_presence is 1:
+ s = StartingTimeHdr(ieiST=0x7C, eightBitST=0x0)
+ packet = packet / s
+ if FrequencyList_presence1 is 1:
+ t = FrequencyListHdr(ieiFL=0x19, eightBitFL=0x0)
+ packet = packet / t
+ if ChannelDescription2_presence is 1:
+ u = ChannelDescription2Hdr(ieiCD2=0x1C, eightBitCD2=0x0)
+ packet = packet / u
+ if ChannelDescription_presence is 1:
+ v = ChannelDescriptionHdr(ieiCD=0x1D, eightBitCD=0x0)
+ packet = packet / v
+ if FrequencyChannelSequence_presence is 1:
+ w = FrequencyChannelSequenceHdr(ieiFCS=0x1E, eightBitFCS=0x0)
+ packet = packet / w
+ if MobileAllocation_presence1 is 1:
+ x = MobileAllocationHdr(ieiMA=0x21, eightBitMA=0x0)
+ packet = packet / x
+ if CipherModeSetting_presence is 1:
+ y = CipherModeSettingHdr(ieiCMS=0x9, eightBitCMS=0x0)
+ packet = packet / y
+ if VgcsTargetModeIdentication_presence is 1:
+ z = VgcsTargetModeIdenticationHdr(ieiVTMI=0x01, eightBitVTMI=0x0)
+ packet = packet / z
+ if MultiRateConfiguration_presence is 1:
+ aa = MultiRateConfigurationHdr(ieiMRC=0x03, eightBitMRC=0x0)
+ packet = packet / aa
+ return packet
+
+
+# MS to Network
+def assignmentComplete():
+ """ASSIGNMENT COMPLETE Section 9.1.3"""
+ a = TpPd(pd=0x6)
+ b = MessageType(mesType=0x29) # 00101001
+ c = RrCause()
+ packet = a / b / c
+ return packet
+
+
+# MS to Network
+def assignmentFailure():
+ """ASSIGNMENT FAILURE Section 9.1.4"""
+ a = TpPd(pd=0x6)
+ b = MessageType(mesType=0x2F) # 00101111
+ c = RrCause()
+ packet = a / b / c
+ return packet
+
+
+# Network to MS
+def channelModeModify(VgcsTargetModeIdentication_presence=0,
+ MultiRateConfiguration_presence=0):
+ """CHANNEL MODE MODIFY Section 9.1.5"""
+ a = TpPd(pd=0x6)
+ b = MessageType(mesType=0x8) # 0001000
+ c = ChannelDescription2()
+ d = ChannelMode()
+ packet = a / b / c / d
+ if VgcsTargetModeIdentication is 1:
+ e = VgcsTargetModeIdenticationHdr(ieiVTMI=0x01, eightBitVTMI=0x0)
+ packet = packet / e
+ if MultiRateConfiguration is 1:
+ f = MultiRateConfigurationHdr(ieiMRC=0x03, eightBitMRC=0x0)
+ packet = packet / f
+ return packet
+
+
+def channelModeModifyAcknowledge():
+ """CHANNEL MODE MODIFY ACKNOWLEDGE Section 9.1.6"""
+ a = TpPd(pd=0x6)
+ b = MessageType(mesType=0x17) # 00010111
+ c = ChannelDescription2()
+ d = ChannelMode()
+ packet = a / b / c / d
+ return packet
+
+
+# Network to MS
+def channelRelease(BaRange_presence=0, GroupChannelDescription_presence=0,
+ GroupCipherKeyNumber_presence=0, GprsResumption_presence=0,
+ BaListPref_presence=0):
+ """CHANNEL RELEASE Section 9.1.7"""
+ a = TpPd(pd=0x6)
+ b = MessageType(mesType=0xD) # 00001101
+ c = RrCause()
+ packet = a / b / c
+ if BaRange_presence is 1:
+ d = BaRangeHdr(ieiBR=0x73, eightBitBR=0x0)
+ packet = packet / d
+ if GroupChannelDescription_presence is 1:
+ e = GroupChannelDescriptionHdr(ieiGCD=0x74, eightBitGCD=0x0)
+ packet = packet / e
+ if GroupCipherKeyNumber_presence is 1:
+ f = GroupCipherKeyNumber(ieiGCKN=0x8)
+ packet = packet / f
+ if GprsResumption_presence is 1:
+ g = GprsResumptionHdr(ieiGR=0xC, eightBitGR=0x0)
+ packet = packet / g
+ if BaListPref_presence is 1:
+ h = BaListPrefHdr(ieiBLP=0x75, eightBitBLP=0x0)
+ packet = packet / h
+ return packet
+
+
+class ChannelRequest(Packet):
+ """Channel request Section 9.1.8"""
+ name = "Channel Request"
+ fields_desc = [
+ ByteField("estCause", 0x0)
+ ]
+
+
+def channelRequest():
+ return ChannelRequest()
+
+
+# Network to MS
+def cipheringModeCommand():
+ """CIPHERING MODE COMMAND Section 9.1.9"""
+ a = TpPd(pd=0x6)
+ b = MessageType(mesType=0x35) # 00110101
+ c = RrCause()
+ #d=cipherModeSetting()
+ #e=cipherResponse()
+ # FIX
+ d = CipherModeSettingAndcipherResponse()
+ packet = a / b / c / d
+ return packet
+
+
+def cipheringModeComplete(MobileId_presence=0):
+ """CIPHERING MODE COMPLETE Section 9.1.10"""
+ a = TpPd(pd=0x6)
+ b = MessageType(mesType=0x32) # 00110010
+ packet = a / b
+ if MobileId_presence is 1:
+ c = MobileIdHdr(ieiMI=0x17, eightBitMI=0x0)
+ packet = packet / c
+ return packet
+
+
+# Network to MS
+def classmarkChange(MobileStationClassmark3_presence=0):
+ """CLASSMARK CHANGE Section 9.1.11"""
+ a = TpPd(pd=0x6)
+ b = MessageType(mesType=0x16) # 00010110
+ c = MobileStationClassmark2()
+ packet = a / b / c
+ if MobileStationClassmark3_presence is 1:
+ e = MobileStationClassmark3(ieiMSC3=0x20)
+ packet = packet / e
+ return packet
+
+
+# Network to MS
+def classmarkEnquiry():
+ """CLASSMARK ENQUIRY Section 9.1.12"""
+ a = TpPd(pd=0x6)
+ b = MessageType(mesType=0x13) # 00010011
+ packet = a / b
+ return packet
+# 9.1.12a Spare
+
+
+# Network to MS
+def configurationChangeCommand(ChannelMode_presence=0,
+ ChannelMode_presence1=0,
+ ChannelMode_presence2=0,
+ ChannelMode_presence3=0,
+ ChannelMode_presence4=0,
+ ChannelMode_presence5=0,
+ ChannelMode_presence6=0,
+ ChannelMode_presence7=0):
+ """CONFIGURATION CHANGE COMMAND Section 9.1.12b"""
+ a = TpPd(pd=0x6)
+ b = MessageType(mesType=0x30) # 00110000
+ c = MultislotAllocation()
+ packet = a / b / c
+ if ChannelMode_presence is 1:
+ d = ChannelModeHdr(ieiCM=0x63, eightBitCM=0x0)
+ packet = packet / d
+ if ChannelMode_presence1 is 1:
+ e = ChannelModeHdr(ieiCM=0x11, eightBitCM=0x0)
+ packet = packet / e
+ if ChannelMode_presence2 is 1:
+ f = ChannelModeHdr(ieiCM=0x13, eightBitCM=0x0)
+ packet = packet / f
+ if ChannelMode_presence3 is 1:
+ g = ChannelModeHdr(ieiCM=0x14, eightBitCM=0x0)
+ packet = packet / g
+ if ChannelMode_presence4 is 1:
+ h = ChannelModeHdr(ieiCM=0x15, eightBitCM=0x0)
+ packet = packet / h
+ if ChannelMode_presence5 is 1:
+ i = ChannelModeHdr(ieiCM=0x16, eightBitCM=0x0)
+ packet = packet / i
+ if ChannelMode_presence6 is 1:
+ j = ChannelModeHdr(ieiCM=0x17, eightBitCM=0x0)
+ packet = packet / j
+ if ChannelMode_presence7 is 1:
+ k = ChannelModeHdr(ieiCM=0x18, eightBitCM=0x0)
+ packet = packet / k
+ return packet
+
+
+def configurationChangeAcknowledge():
+ """CONFIGURATION CHANGE ACKNOWLEDGE Section 9.1.12c"""
+ a = TpPd(pd=0x6)
+ b = MessageType(mesType=0x31) # 00110001
+ c = MobileId()
+ packet = a / b / c
+ return packet
+
+
+def configurationChangeReject():
+ """CONFIGURATION CHANGE REJECT Section 9.1.12d"""
+ a = TpPd(pd=0x6)
+ b = MessageType(mesType=0x33) # 00110011
+ c = RrCause()
+ packet = a / b / c
+ return packet
+
+
+# Network to MS
+def frequencyRedefinition(CellChannelDescription_presence=0):
+ """Frequency redefinition Section 9.1.13"""
+ a = TpPd(pd=0x6)
+ b = MessageType(mesType=0x14) # 00010100
+ c = ChannelDescription()
+ d = MobileAllocation()
+ e = StartingTime()
+ packet = a / b / c / d / e
+ if CellChannelDescription_presence is 1:
+ f = CellChannelDescriptionHdr(ieiCCD=0x62, eightBitCCD=0x0)
+ packet = packet / f
+ return packet
+
+
+# Network to MS
+def pdchAssignmentCommand(ChannelDescription_presence=0,
+ CellChannelDescription_presence=0,
+ MobileAllocation_presence=0,
+ StartingTime_presence=0, FrequencyList_presence=0,
+ ChannelDescription_presence1=0,
+ FrequencyChannelSequence_presence=0,
+ MobileAllocation_presence1=0,
+ PacketChannelDescription_presence=0,
+ DedicatedModeOrTBF_presence=0):
+ """PDCH ASSIGNMENT COMMAND Section 9.1.13a"""
+ a = TpPd(pd=0x6)
+ b = MessageType(mesType=0x23) # 00100011
+ c = ChannelDescription()
+ packet = a / b / c
+ if ChannelDescription_presence is 1:
+ d = ChannelDescriptionHdr(ieiCD=0x62, eightBitCD=0x0)
+ packet = packet / d
+ if CellChannelDescription_presence is 1:
+ e = CellChannelDescriptionHdr(ieiCCD=0x05, eightBitCCD=0x0)
+ packet = packet / e
+ if MobileAllocation_presence is 1:
+ f = MobileAllocationHdr(ieiMA=0x72, eightBitMA=0x0)
+ packet = packet / f
+ if StartingTime_presence is 1:
+ g = StartingTimeHdr(ieiST=0x7C, eightBitST=0x0)
+ packet = packet / g
+ if FrequencyList_presence is 1:
+ h = FrequencyListHdr(ieiFL=0x19, eightBitFL=0x0)
+ packet = packet / h
+ if ChannelDescription_presence1 is 1:
+ i = ChannelDescriptionHdr(ieiCD=0x1C, eightBitCD=0x0)
+ packet = packet / i
+ if FrequencyChannelSequence_presence is 1:
+ j = FrequencyChannelSequenceHdr(ieiFCS=0x1E, eightBitFCS=0x0)
+ packet = packet / j
+ if MobileAllocation_presence1 is 1:
+ k = MobileAllocationHdr(ieiMA=0x21, eightBitMA=0x0)
+ packet = packet / k
+ if PacketChannelDescription_presence is 1:
+ l = PacketChannelDescription(ieiPCD=0x22)
+ packet = packet / l
+ if DedicatedModeOrTBF_presence is 1:
+ m = DedicatedModeOrTBFHdr(ieiDMOT=0x23, eightBitDMOT=0x0)
+ packet = packet / m
+ return packet
+
+
+def gprsSuspensionRequest():
+ """GPRS SUSPENSION REQUEST Section 9.1.13b"""
+ a = TpPd(pd=0x6)
+ b = MessageType()
+ c = Tlli()
+ d = RoutingAreaIdentification()
+ e = SuspensionCause()
+ packet = a / b / c / d / e
+ return packet
+
+
+class HandoverAccess(Packet):
+ name = "Handover Access" # Section 9.1.14"
+ fields_desc = [
+ ByteField("handover", None),
+ ]
+
+
+# Network to MS
+def handoverCommand(SynchronizationIndication_presence=0,
+ FrequencyShortList_presence=0, FrequencyList_presence=0,
+ CellChannelDescription_presence=0,
+ MultislotAllocation_presence=0,
+ ChannelMode_presence=0, ChannelMode_presence1=0,
+ ChannelMode_presence2=0,
+ ChannelMode_presence3=0, ChannelMode_presence4=0,
+ ChannelMode_presence5=0,
+ ChannelMode_presence6=0, ChannelMode_presence7=0,
+ ChannelDescription_presence1=0, ChannelMode2_presence=0,
+ FrequencyChannelSequence_presence=0,
+ MobileAllocation_presence=0,
+ StartingTime_presence=0, TimeDifference_presence=0,
+ TimingAdvance_presence=0,
+ FrequencyShortList_presence1=0,
+ FrequencyList_presence1=0,
+ ChannelDescription2_presence=0,
+ ChannelDescription_presence2=0,
+ FrequencyChannelSequence_presence1=0,
+ MobileAllocation_presence1=0,
+ CipherModeSetting_presence=0,
+ VgcsTargetModeIdentication_presence=0,
+ MultiRateConfiguration_presence=0):
+ """HANDOVER COMMAND Section 9.1.15"""
+ name = "Handover Command"
+ a = TpPd(pd=0x6)
+ b = MessageType(mesType=0x2b) # 00101011
+ c = CellDescription()
+ d = ChannelDescription2()
+ e = HandoverReference()
+ f = PowerCommandAndAccessType()
+ packet = a / b / c / d / e / f
+ if SynchronizationIndication_presence is 1:
+ g = SynchronizationIndicationHdr(ieiSI=0xD, eightBitSI=0x0)
+ packet = packet / g
+ if FrequencyShortList_presence is 1:
+ h = FrequencyShortListHdr(ieiFSL=0x02)
+ packet = packet / h
+ if FrequencyList_presence is 1:
+ i = FrequencyListHdr(ieiFL=0x05, eightBitFL=0x0)
+ packet = packet / i
+ if CellChannelDescription_presence is 1:
+ j = CellChannelDescriptionHdr(ieiCCD=0x62, eightBitCCD=0x0)
+ packet = packet / j
+ if MultislotAllocation_presence is 1:
+ k = MultislotAllocationHdr(ieiMSA=0x10, eightBitMSA=0x0)
+ packet = packet / k
+ if ChannelMode_presence is 1:
+ l = ChannelModeHdr(ieiCM=0x63, eightBitCM=0x0)
+ packet = packet / l
+ if ChannelMode_presence1 is 1:
+ m = ChannelModeHdr(ieiCM=0x11, eightBitCM=0x0)
+ packet = packet / m
+ if ChannelMode_presence2 is 1:
+ n = ChannelModeHdr(ieiCM=0x13, eightBitCM=0x0)
+ packet = packet / n
+ if ChannelMode_presence3 is 1:
+ o = ChannelModeHdr(ieiCM=0x14, eightBitCM=0x0)
+ packet = packet / o
+ if ChannelMode_presence4 is 1:
+ p = ChannelModeHdr(ieiCM=0x15, eightBitCM=0x0)
+ packet = packet / p
+ if ChannelMode_presence5 is 1:
+ q = ChannelModeHdr(ieiCM=0x16, eightBitCM=0x0)
+ packet = packet / q
+ if ChannelMode_presence6 is 1:
+ r = ChannelModeHdr(ieiCM=0x17, eightBitCM=0x0)
+ packet = packet / r
+ if ChannelMode_presence7 is 1:
+ s = ChannelModeHdr(ieiCM=0x18, eightBitCM=0x0)
+ packet = packet / s
+ if ChannelDescription_presence1 is 1:
+ s1 = ChannelDescriptionHdr(ieiCD=0x64, eightBitCD=0x0)
+ packet = packet / s1
+ if ChannelMode2_presence is 1:
+ t = ChannelMode2Hdr(ieiCM2=0x66, eightBitCM2=0x0)
+ packet = packet / t
+ if FrequencyChannelSequence_presence is 1:
+ u = FrequencyChannelSequenceHdr(ieiFCS=0x69, eightBitFCS=0x0)
+ packet = packet / u
+ if MobileAllocation_presence is 1:
+ v = MobileAllocationHdr(ieiMA=0x72, eightBitMA=0x0)
+ packet = packet / v
+ if StartingTime_presence is 1:
+ w = StartingTimeHdr(ieiST=0x7C, eightBitST=0x0)
+ packet = packet / w
+ if TimeDifference_presence is 1:
+ x = TimeDifferenceHdr(ieiTD=0x7B, eightBitTD=0x0)
+ packet = packet / x
+ if TimingAdvance_presence is 1:
+ y = TimingAdvanceHdr(ieiTA=0x7D, eightBitTA=0x0)
+ packet = packet / y
+ if FrequencyShortList_presence1 is 1:
+ z = FrequencyShortListHdr(ieiFSL=0x12)
+ packet = packet / z
+ if FrequencyList_presence1 is 1:
+ aa = FrequencyListHdr(ieiFL=0x19, eightBitFL=0x0)
+ packet = packet / aa
+ if ChannelDescription2_presence is 1:
+ ab = ChannelDescription2Hdr(ieiCD2=0x1C, eightBitCD2=0x0)
+ packet = packet / ab
+ if ChannelDescription_presence2 is 1:
+ ac = ChannelDescriptionHdr(ieiCD=0x1D, eightBitCD=0x0)
+ packet = packet / ac
+ if FrequencyChannelSequence_presence1 is 1:
+ ad = FrequencyChannelSequenceHdr(ieiFCS=0x1E, eightBitFCS=0x0)
+ packet = packet / ad
+ if MobileAllocation_presence1 is 1:
+ ae = MobileAllocationHdr(ieiMA=0x21, eightBitMA=0x0)
+ packet = packet / ae
+ if CipherModeSetting_presence is 1:
+ af = CipherModeSettingHdr(ieiCMS=0x9, eightBitCMS=0x0)
+ packet = packet / af
+ if VgcsTargetModeIdentication_presence is 1:
+ ag = VgcsTargetModeIdenticationHdr(ieiVTMI=0x01, eightBitVTMI=0x0)
+ packet = packet / ag
+ if MultiRateConfiguration_presence is 1:
+ ah = MultiRateConfigurationHdr(ieiMRC=0x03, eightBitMRC=0x0)
+ packet = packet / ah
+ return packet
+
+
+def handoverComplete(MobileTimeDifference_presence=0):
+ """HANDOVER COMPLETE Section 9.1.16"""
+ a = TpPd(pd=0x6)
+ b = MessageType(mesType=0x2c) # 00101100
+ c = RrCause()
+ packet = a / b / c
+ if MobileTimeDifference_presence is 1:
+ d = MobileTimeDifferenceHdr(ieiMTD=0x77, eightBitMTD=0x0)
+ packet = packet / d
+ return packet
+
+
+def handoverFailure():
+ """HANDOVER FAILURE Section 9.1.17"""
+ a = TpPd(pd=0x6)
+ b = MessageType(mesType=0x28) # 00101000
+ c = RrCause()
+ packet = a / b / c
+ return packet
+
+
+#The L2 pseudo length of this message is the sum of lengths of all
+#information elements present in the message except
+#the IA Rest Octets and L2 Pseudo Length information elements.
+# Network to MS
+def immediateAssignment(ChannelDescription_presence=0,
+ PacketChannelDescription_presence=0,
+ StartingTime_presence=0):
+ """IMMEDIATE ASSIGNMENT Section 9.1.18"""
+ a = L2PseudoLength()
+ b = TpPd(pd=0x6)
+ c = MessageType(mesType=0x3F) # 00111111
+ d = PageModeAndDedicatedModeOrTBF()
+ packet = a / b / c / d
+ if ChannelDescription_presence is 1:
+ f = ChannelDescription()
+ packet = packet / f
+ if PacketChannelDescription_presence is 1:
+ g = PacketChannelDescription()
+ packet = packet / g
+ h = RequestReference()
+ i = TimingAdvance()
+ j = MobileAllocation()
+ packet = packet / h / i / j
+ if StartingTime_presence is 1:
+ k = StartingTimeHdr(ieiST=0x7C, eightBitST=0x0)
+ packet = packet / k
+ l = IaRestOctets()
+ packet = packet / l
+ return packet
+
+
+#The L2 pseudo length of this message is the sum of lengths of all
+#information elements present in the message except
+#the IAX Rest Octets and L2 Pseudo Length information elements.
+
+# Network to MS
+def immediateAssignmentExtended(StartingTime_presence=0):
+ """IMMEDIATE ASSIGNMENT EXTENDED Section 9.1.19"""
+ a = L2PseudoLength()
+ b = TpPd(pd=0x6)
+ c = MessageType(mesType=0x39) # 00111001
+ d = PageModeAndSpareHalfOctets()
+ f = ChannelDescription()
+ g = RequestReference()
+ h = TimingAdvance()
+ i = MobileAllocation()
+ packet = a / b / c / d / f / g / h / i
+ if StartingTime_presence is 1:
+ j = StartingTimeHdr(ieiST=0x7C, eightBitST=0x0)
+ packet = packet / j
+ k = IaxRestOctets()
+ packet = packet / k
+ return packet
+
+
+# This message has L2 pseudo length 19
+# Network to MS
+def immediateAssignmentReject():
+ """IMMEDIATE ASSIGNMENT REJECT Section 9.1.20"""
+ a = L2PseudoLength(l2pLength=0x13)
+ b = TpPd(pd=0x6)
+ c = MessageType(mesType=0x3a) # 00111010
+ d = PageModeAndSpareHalfOctets()
+ f = RequestReference()
+ g = WaitIndication()
+ h = RequestReference()
+ i = WaitIndication()
+ j = RequestReference()
+ k = WaitIndication()
+ l = RequestReference()
+ m = WaitIndication()
+ n = IraRestOctets()
+ packet = a / b / c / d / f / g / h / i / j / k / l / m / n
+ return packet
+
+
+def measurementReport():
+ """MEASUREMENT REPORT Section 9.1.21"""
+ a = TpPd(pd=0x6)
+ b = MessageType(mesType=0x15) # 00010101
+ c = MeasurementResults()
+ packet = a / b / c
+ return packet
+
+
+# len max 20
+class NotificationFacch():
+ """NOTIFICATION/FACCH Section 9.1.21a"""
+ name = "Notification/facch"
+ fields_desc = [
+ BitField("rr", 0x0, 1),
+ BitField("msgTyoe", 0x0, 5),
+ BitField("layer2Header", 0x0, 2),
+ BitField("frChanDes", 0x0, 24)
+ ]
+
+
+# The L2 pseudo length of this message has a value one
+# Network to MS
+def notificationNch():
+ """NOTIFICATION/NCH Section 9.1.21b"""
+ a = L2PseudoLength(l2pLength=0x01)
+ b = TpPd(pd=0x6)
+ c = MessageType(mesType=0x20) # 00100000
+ d = NtNRestOctets()
+ packet = a / b / c / d
+ return packet
+
+
+def notificationResponse():
+ """NOTIFICATION RESPONSE Section 9.1.21d"""
+ a = TpPd(pd=0x6)
+ b = MessageType(mesType=0x26) # 00100110
+ c = MobileStationClassmark2()
+ d = MobileId()
+ e = DescriptiveGroupOrBroadcastCallReference()
+ packet = a / b / c / d / e
+ return packet
+
+
+# Network to MS
+def rrCellChangeOrder():
+ """RR-CELL CHANGE ORDER Section 9.1.21e"""
+ a = TpPd(pd=0x6)
+ b = MessageType(mesType=0x8) # 00001000
+ c = CellDescription()
+ d = NcModeAndSpareHalfOctets()
+ packet = a / b / c / d
+ return packet
+
+
+# Network to MS
+def pagingRequestType1(MobileId_presence=0):
+ """PAGING REQUEST TYPE 1 Section 9.1.22"""
+ #The L2 pseudo length of this message is the sum of lengths of all
+ #information elements present in the message except
+ #the P1 Rest Octets and L2 Pseudo Length information elements.
+ a = L2PseudoLength()
+ b = TpPd(pd=0x6)
+ c = MessageType(mesType=0x21) # 00100001
+ d = PageModeAndChannelNeeded()
+ f = MobileId()
+ packet = a / b / c / d / f
+ if MobileId_presence is 1:
+ g = MobileIdHdr(ieiMI=0x17, eightBitMI=0x0)
+ packet = packet / g
+ h = P1RestOctets()
+ packet = packet / h
+ return packet
+
+
+# The L2 pseudo length of this message is the sum of lengths of all
+# information elements present in the message except
+# Network to MS
+def pagingRequestType2(MobileId_presence=0):
+ """PAGING REQUEST TYPE 2 Section 9.1.23"""
+ a = L2PseudoLength()
+ b = TpPd(pd=0x6)
+ c = MessageType(mesType=0x22) # 00100010
+ d = PageModeAndChannelNeeded()
+ f = MobileId()
+ g = MobileId()
+ packet = a / b / c / d / f / g
+ if MobileId_presence is 1:
+ h = MobileIdHdr(ieiMI=0x17, eightBitMI=0x0)
+ packet = packet / h
+ i = P2RestOctets()
+ packet = packet / i
+ return packet
+
+
+# Network to MS
+def pagingRequestType3():
+ """PAGING REQUEST TYPE 3 Section 9.1.24"""
+# This message has a L2 Pseudo Length of 19
+ a = L2PseudoLength(l2pLength=0x13)
+ b = TpPd(pd=0x6)
+ c = MessageType(mesType=0x24) # 00100100
+ d = PageModeAndChannelNeeded()
+ e = TmsiPTmsi()
+ f = TmsiPTmsi()
+ g = TmsiPTmsi()
+ h = TmsiPTmsi()
+ i = P3RestOctets()
+ packet = a / b / c / d / e / f / g / h / i
+ return packet
+
+
+def pagingResponse():
+ """PAGING RESPONSE Section 9.1.25"""
+ a = TpPd(pd=0x6)
+ b = MessageType(mesType=0x27) # 00100111
+ c = CiphKeySeqNrAndSpareHalfOctets()
+ d = MobileStationClassmark2()
+ e = MobileId()
+ packet = a / b / c / d / e
+ return packet
+
+
+# Network to MS
+def partialRelease():
+ """PARTIAL RELEASE Section 9.1.26"""
+ a = TpPd(pd=0x6)
+ b = MessageType(mesType=0xa) # 00001010
+ c = ChannelDescription()
+ packet = a / b / c
+ return packet
+
+
+def partialReleaseComplete():
+ """PARTIAL RELEASE COMPLETE Section 9.1.27"""
+ a = TpPd(pd=0x6)
+ b = MessageType(mesType=0xf) # 00001111
+ packet = a / b
+ return packet
+
+
+# Network to MS
+def physicalInformation():
+ """PHYSICAL INFORMATION Section 9.1.28"""
+ a = TpPd(pd=0x6)
+ b = MessageType(mesType=0x2d) # 00101101
+ c = TimingAdvance()
+ packet = a / b / c
+ return packet
+
+
+def rrInitialisationRequest():
+ """RR Initialisation Request Section 9.1.28.a"""
+ a = TpPd(pd=0x6)
+ b = MessageType(mesType=0x3c) # 00111100
+ c = CiphKeySeqNrAndMacModeAndChannelCodingRequest()
+ e = MobileStationClassmark2()
+ f = Tlli()
+ g = ChannelRequestDescription()
+ h = GprsMeasurementResults()
+ packet = a / b / c / e / f / g / h
+ return packet
+
+
+def rrStatus():
+ """RR STATUS Section 9.1.29"""
+ a = TpPd(pd=0x6)
+ b = MessageType(mesType=0x12) # 00010010
+ c = RrCause()
+ packet = a / b / c
+ return packet
+
+
+# It does not
+# follow the basic format. Its length is _25_ bits. The
+# order of bit transmission is defined in GSM 04.04.
+# Network to MS
+class SynchronizationChannelInformation():
+ """SYNCHRONIZATION CHANNEL INFORMATION Section 9.1.30"""
+ name = "Synchronization Channel Information"
+ fields_desc = [
+ BitField("bsic", 0x0, 5),
+ BitField("t1Hi", 0x0, 3),
+ ByteField("t1Mi", 0x0),
+ BitField("t1Lo", 0x0, 1),
+ BitField("t2", 0x0, 5),
+ BitField("t3Hi", 0x0, 2),
+ BitField("t3Lo", 0x0, 1)
+ ]
+
+
+# This message has a L2 Pseudo Length of 21.
+# Network to MS
+def systemInformationType1():
+ """SYSTEM INFORMATION TYPE 1 Section 9.1.31"""
+ a = L2PseudoLength(l2pLength=0x15)
+ b = TpPd(pd=0x6)
+ c = MessageType(mesType=0x19) # 00011001
+ d = CellChannelDescription()
+ e = RachControlParameters()
+ f = Si1RestOctets()
+ packet = a / b / c / d / e / f
+ return packet
+
+
+# This message has a L2 Pseudo Length of 22.
+# Network to MS
+def systemInformationType2():
+ """SYSTEM INFORMATION TYPE 2 Section 9.1.32"""
+ a = L2PseudoLength(l2pLength=0x16)
+ b = TpPd(pd=0x6)
+ c = MessageType(mesType=0x1a) # 00011010
+ d = NeighbourCellsDescription()
+ e = NccPermitted()
+ f = RachControlParameters()
+ packet = a / b / c / d / e / f
+ return packet
+
+
+# This message has a L2 pseudo length of 21
+# Network to MS
+def systemInformationType2bis():
+ """SYSTEM INFORMATION TYPE 2bis Section 9.1.33"""
+ a = L2PseudoLength(l2pLength=0x15)
+ b = TpPd(pd=0x6)
+ c = MessageType(mesType=0x2) # 00000010
+ d = NeighbourCellsDescription()
+ e = RachControlParameters()
+ f = Si2bisRestOctets()
+ packet = a / b / c / d / e / f
+ return packet
+
+
+# This message has a L2 pseudo length of 18
+# Network to MS
+def systemInformationType2ter():
+ """SYSTEM INFORMATION TYPE 2ter Section 9.1.34"""
+ a = L2PseudoLength(l2pLength=0x12)
+ b = TpPd(pd=0x6)
+ c = MessageType(mesType=0x3) # 00000011
+ d = NeighbourCellsDescription2()
+ e = Si2terRestOctets()
+ packet = a / b / c / d / e
+ return packet
+
+
+# This message has a L2 Pseudo Length of 18
+# Network to MS
+def systemInformationType3():
+ """SYSTEM INFORMATION TYPE 3 Section 9.1.35"""
+ a = L2PseudoLength(l2pLength=0x12)
+ b = TpPd(pd=0x6)
+ c = MessageType(mesType=0x1b) # 00011011
+ d = CellIdentity()
+ e = LocalAreaId()
+ f = ControlChannelDescription()
+ g = CellOptionsBCCH()
+ h = CellSelectionParameters()
+ i = RachControlParameters()
+ j = Si3RestOctets()
+ packet = a / b / c / d / e / f / g / h / i / j
+ return packet
+
+
+#The L2 pseudo length of this message is the
+#sum of lengths of all information elements present in the message except
+#the SI 4 Rest Octets and L2 Pseudo Length
+# Network to MS
+def systemInformationType4(ChannelDescription_presence=0,
+ MobileAllocation_presence=0):
+ """SYSTEM INFORMATION TYPE 4 Section 9.1.36"""
+ a = L2PseudoLength()
+ b = TpPd(pd=0x6)
+ c = MessageType(mesType=0x1C) # 000111100
+ d = LocalAreaId()
+ e = CellSelectionParameters()
+ f = RachControlParameters()
+ packet = a / b / c / d / e / f
+ if ChannelDescription_presence is 1:
+ g = ChannelDescriptionHdr(ieiCD=0x64, eightBitCD=0x0)
+ packet = packet / g
+ if MobileAllocation_presence is 1:
+ h = MobileAllocationHdr(ieiMA=0x72, eightBitMA=0x0)
+ packet = packet / h
+ i = Si4RestOctets()
+ packet = packet / i
+ return packet
+
+
+#This message has a L2 Pseudo Length of 18
+# Network to MS
+def systemInformationType5():
+ """SYSTEM INFORMATION TYPE 5 Section 9.1.37"""
+ a = L2PseudoLength(l2pLength=0x12)
+ b = TpPd(pd=0x6)
+ c = MessageType(mesType=0x35) # 000110101
+ d = NeighbourCellsDescription()
+ packet = a / b / c / d
+ return packet
+
+
+#This message has a L2 Pseudo Length of 18
+# Network to MS
+def systemInformationType5bis():
+ """SYSTEM INFORMATION TYPE 5bis Section 9.1.38"""
+ a = L2PseudoLength(l2pLength=0x12)
+ b = TpPd(pd=0x6)
+ c = MessageType(mesType=0x5) # 00000101
+ d = NeighbourCellsDescription()
+ packet = a / b / c / d
+ return packet
+
+
+# This message has a L2 Pseudo Length of 18
+# Network to MS
+def systemInformationType5ter():
+ """SYSTEM INFORMATION TYPE 5ter Section 9.1.39"""
+ a = L2PseudoLength(l2pLength=0x12)
+ b = TpPd(pd=0x6)
+ c = MessageType(mesType=0x6) # 00000110
+ d = NeighbourCellsDescription2()
+ packet = a / b / c / d
+ return packet
+
+
+#This message has a L2 Pseudo Length of 11
+# Network to MS
+def systemInformationType6():
+ """SYSTEM INFORMATION TYPE 6 Section 9.1.40"""
+ a = L2PseudoLength(l2pLength=0x0b)
+ b = TpPd(pd=0x6)
+ c = MessageType(mesType=0x1e) # 00011011
+ d = CellIdentity()
+ e = LocalAreaId()
+ f = CellOptionsBCCH()
+ g = NccPermitted()
+ h = Si6RestOctets()
+ packet = a / b / c / d / e / f / g
+ return packet
+
+
+# The L2 pseudo length of this message has the value 1
+# Network to MS
+def systemInformationType7():
+ """SYSTEM INFORMATION TYPE 7 Section 9.1.41"""
+ a = L2PseudoLength(l2pLength=0x01)
+ b = TpPd(pd=0x6)
+ c = MessageType(mesType=0x37) # 000110111
+ d = Si7RestOctets()
+ packet = a / b / c / d
+ return packet
+
+
+# The L2 pseudo length of this message has the value 1
+# Network to MS
+def systemInformationType8():
+ """SYSTEM INFORMATION TYPE 8 Section 9.1.42"""
+ a = L2PseudoLength(l2pLength=0x01)
+ b = TpPd(pd=0x6)
+ c = MessageType(mesType=0x18) # 00011000
+ d = Si8RestOctets()
+ packet = a / b / c / d
+ return packet
+
+
+# The L2 pseudo length of this message has the value 1
+# Network to MS
+def systemInformationType9():
+ """SYSTEM INFORMATION TYPE 9 Section 9.1.43"""
+ a = L2PseudoLength(l2pLength=0x01)
+ b = TpPd(pd=0x6)
+ c = MessageType(mesType=0x4) # 00000100
+ d = Si9RestOctets()
+ packet = a / b / c / d
+ return packet
+
+
+# The L2 pseudo length of this message has the value 0
+# Network to MS
+def systemInformationType13():
+ """SYSTEM INFORMATION TYPE 13 Section 9.1.43a"""
+ a = L2PseudoLength(l2pLength=0x00)
+ b = TpPd(pd=0x6)
+ c = MessageType(mesType=0x0) # 00000000
+ d = Si13RestOctets()
+ packet = a / b / c / d
+ return packet
+#
+# 9.1.43b / c spare
+#
+
+
+# The L2 pseudo length of this message has the value 1
+# Network to MS
+def systemInformationType16():
+ """SYSTEM INFORMATION TYPE 16 Section 9.1.43d"""
+ a = L2PseudoLength(l2pLength=0x01)
+ b = TpPd(pd=0x6)
+ c = MessageType(mesType=0x3d) # 00111101
+ d = Si16RestOctets()
+ packet = a / b / c / d
+ return packet
+
+
+# The L2 pseudo length of this message has the value 1
+# Network to MS
+def systemInformationType17():
+ """SYSTEM INFORMATION TYPE 17 Section 9.1.43e"""
+ a = L2PseudoLength(l2pLength=0x01)
+ b = TpPd(pd=0x6)
+ c = MessageType(mesType=0x3e) # 00111110
+ d = Si17RestOctets()
+ packet = a / b / c / d
+ return packet
+
+
+def talkerIndication():
+ """TALKER INDICATION Section 9.1.44"""
+ a = TpPd(pd=0x6)
+ b = MessageType(mesType=0x11) # 00010001
+ c = MobileStationClassmark2()
+ d = MobileId()
+ packet = a / b / c / d
+ return packet
+
+
+class UplinkAccess():
+ """UPLINK ACCESS Section 9.1.45"""
+ name = "Uplink Access"
+ fields_desc = [
+ ByteField("establishment", 0x0)
+ ]
+
+
+# Network to MS
+def uplinkBusy():
+ """UPLINK BUSY Section 9.1.46"""
+ name = "Uplink Busy"
+ a = TpPd(pd=0x6)
+ b = MessageType(mesType=0x2a) # 00101010
+ packet = a / b
+ return packet
+
+
+# Network to MS
+class UplinkFree():
+ """UPLINK FREE Section 9.1.47"""
+ name = "Uplink Free"
+ fields_desc = [
+ BitField("pd", 0x0, 1),
+ BitField("msgType", 0x0, 5),
+ BitField("layer2Header", 0x0, 2),
+ BitField("uplinkAccess", 0x0, 1),
+ BitField("lOrH", 0x0, 1), # 0 for L, 1 for H
+ BitField("upIdCode", 0x0, 6),
+ ]
+
+
+def uplinkRelease():
+ """UPLINK RELEASE Section 9.1.48"""
+ a = TpPd(pd=0x6)
+ b = MessageType(mesType=0xe) # 00001110
+ c = RrCause()
+ packet = a / b / c
+ return packet
+
+
+# Network to MS
+def vgcsUplinkGrant():
+ """VGCS UPLINK GRANT Section 9.1.49"""
+ a = TpPd(pd=0x6)
+ b = MessageType(mesType=0x9) # 00001001
+ c = RrCause()
+ d = RequestReference()
+ e = TimingAdvance()
+ packet = a / b / c / d / e
+ return packet
+
+
+# Network to MS
+def systemInformationType10():
+ """SYSTEM INFORMATION TYPE 10 Section 9.1.50"""
+ name = "SyStem Information Type 10"
+ fields_desc = [
+ BitField("pd", 0x0, 1),
+ BitField("msgType", 0x0, 5),
+ BitField("layer2Header", 0x0, 2),
+ BitField("si10", 0x0, 160)
+ ]
+
+
+# Network to MS
+# The L2 pseudo length of this message has the value 18
+def extendedMeasurementOrder():
+ """EXTENDED MEASUREMENT ORDER Section 9.1.51"""
+ a = L2PseudoLength(l2pLength=0x12)
+ b = TpPd(pd=0x6)
+ c = MessageType(mesType=0x37) # 00110111
+ d = ExtendedMeasurementFrequencyList()
+ packet = a / b / c / d
+ return packet
+
+
+def extendedMeasurementReport():
+ """EXTENDED MEASUREMENT REPORT Section 9.1.52"""
+ a = TpPd(pd=0x6)
+ b = MessageType(mesType=0x36) # 00110110
+ c = ExtendedMeasurementResults()
+ packet = a / b / c
+ return packet
+
+
+def applicationInformation():
+ """APPLICATION INFORMATION Section 9.1.53"""
+ a = TpPd(pd=0x6)
+ b = MessageType(mesType=0x38) # 00111000
+ c = ApduIDAndApduFlags()
+ e = ApduData()
+ packet = a / b / c / e
+ return packet
+#
+# 9.2 Messages for mobility management
+#
+
+
+# Network to MS
+def authenticationReject():
+ """AUTHENTICATION REJECT Section 9.2.1"""
+ a = TpPd(pd=0x5)
+ b = MessageType(mesType=0x11) # 00010001
+ packet = a / b
+ return packet
+
+
+# Network to MS
+def authenticationRequest():
+ """AUTHENTICATION REQUEST Section 9.2.2"""
+ a = TpPd(pd=0x5)
+ b = MessageType(mesType=0x12) # 00010010
+ c = CiphKeySeqNrAndSpareHalfOctets()
+ d = AuthenticationParameterRAND()
+ packet = a / b / c / d
+ return packet
+
+
+def authenticationResponse():
+ """AUTHENTICATION RESPONSE Section 9.2.3"""
+ a = TpPd(pd=0x5)
+ b = MessageType(mesType=0x14) # 00010100
+ c = AuthenticationParameterSRES()
+ packet = a / b / c
+ return packet
+
+
+def cmReestablishmentRequest(LocalAreaId_presence=0):
+ """CM RE-ESTABLISHMENT REQUEST Section 9.2.4"""
+ a = TpPd(pd=0x5)
+ b = MessageType(mesType=0x28) # 00101000
+ c = CiphKeySeqNrAndSpareHalfOctets()
+ e = MobileStationClassmark2()
+ f = MobileId()
+ if LocalAreaId_presence is 1:
+ g = LocalAreaId(iei=0x13, eightbit=0x0)
+ packet = packet / g
+ packet = a / b / c / e / f
+ return packet
+
+
+# Network to MS
+def cmServiceAccept():
+ """CM SERVICE ACCEPT Section 9.2.5"""
+ a = TpPd(pd=0x5)
+ b = MessageType(mesType=0x21) # 00100001
+ packet = a / b
+ return packet
+
+
+# Network to MS
+def cmServicePrompt():
+ """CM SERVICE PROMPT Section 9.2.5a"""
+ a = TpPd(pd=0x5)
+ b = MessageType(mesType=0x25) # 00100101
+ c = PdAndSapi()
+ packet = a / b / c
+ return packet
+
+
+# Network to MS
+def cmServiceReject():
+ """CM SERVICE REJECT Section 9.2.6"""
+ a = TpPd(pd=0x5)
+ b = MessageType(mesType=0x22) # 00100010
+ c = RejectCause()
+ packet = a / b / c
+ return packet
+
+
+def cmServiceAbort():
+ """CM SERVICE ABORT Section 9.2.7"""
+ a = TpPd(pd=0x5)
+ b = MessageType(mesType=0x23) # 00100011
+ packet = a / b
+ return packet
+
+
+# Network to MS
+def abort():
+ """ABORT Section 9.2.8"""
+ a = TpPd(pd=0x5)
+ b = MessageType(mesType=0x29) # 00101001
+ c = RejectCause()
+ packet = a / b / c
+ return packet
+
+
+def cmServiceRequest(PriorityLevel_presence=0):
+ """CM SERVICE REQUEST Section 9.2.9"""
+ a = TpPd(pd=0x5)
+ b = MessageType(mesType=0x24) # 00100100
+ c = CmServiceTypeAndCiphKeySeqNr()
+ e = MobileStationClassmark2()
+ f = MobileId()
+ packet = a / b / c / e / f
+ if PriorityLevel_presence is 1:
+ g = PriorityLevelHdr(ieiPL=0x8, eightBitPL=0x0)
+ packet = packet / g
+ return packet
+
+
+# Network to MS
+def identityRequest():
+ """IDENTITY REQUEST Section 9.2.10"""
+ a = TpPd(pd=0x5)
+ b = MessageType(mesType=0x8) # 00001000
+ c = IdentityTypeAndSpareHalfOctets()
+ packet = a / b / c
+ return packet
+
+
+def identityResponse():
+ """IDENTITY RESPONSE Section 9.2.11"""
+ a = TpPd(pd=0x5)
+ b = MessageType(mesType=0x9) # 00001001
+ c = MobileId()
+ packet = a / b / c
+ return packet
+
+
+def imsiDetachIndication():
+ """IMSI DETACH INDICATION Section 9.2.12"""
+ a = TpPd(pd=0x5)
+ b = MessageType(mesType=0x1) # 00000001
+ c = MobileStationClassmark1()
+ d = MobileId()
+ packet = a / b / c / d
+ return packet
+
+
+# Network to MS
+def locationUpdatingAccept(MobileId_presence=0,
+ FollowOnProceed_presence=0,
+ CtsPermission_presence=0):
+ """LOCATION UPDATING ACCEPT Section 9.2.13"""
+ a = TpPd(pd=0x5)
+ b = MessageType(mesType=0x02) # 00000010
+ c = LocalAreaId()
+ packet = a / b / c
+ if MobileId_presence is 1:
+ d = MobileIdHdr(ieiMI=0x17, eightBitMI=0x0)
+ packet = packet / d
+ if FollowOnProceed_presence is 1:
+ e = FollowOnProceed(ieiFOP=0xA1)
+ packet = packet / e
+ if CtsPermission_presence is 1:
+ f = CtsPermissionHdr(ieiCP=0xA2, eightBitCP=0x0)
+ packet = packet / f
+ return packet
+
+
+# Network to MS
+def locationUpdatingReject():
+ """LOCATION UPDATING REJECT Section 9.2.14"""
+ a = TpPd(pd=0x5)
+ b = MessageType(mesType=0x4) # 0x00000100
+ c = RejectCause()
+ packet = a / b / c
+ return packet
+
+
+def locationUpdatingRequest():
+ """LOCATION UPDATING REQUEST Section 9.2.15"""
+ a = TpPd(pd=0x5)
+ b = MessageType(mesType=0x8) # 00001000
+ c = LocationUpdatingTypeAndCiphKeySeqNr()
+ e = LocalAreaId()
+ f = MobileStationClassmark1()
+ g = MobileId()
+ packet = a / b / c / e / f / g
+ return packet
+
+
+# Network to MS
+def mmInformation(NetworkName_presence=0, NetworkName_presence1=0,
+ TimeZone_presence=0, TimeZoneAndTime_presence=0,
+ LsaIdentifier_presence=0):
+ """MM INFORMATION Section 9.2.15a"""
+ a = TpPd(pd=0x5)
+ b = MessageType(mesType=0x32) # 00110010
+ packet = a / b
+ if NetworkName_presence is 1:
+ c = NetworkNameHdr(ieiNN=0x43, eightBitNN=0x0)
+ packet = packet / c
+ if NetworkName_presence1 is 1:
+ d = NetworkNameHdr(ieiNN=0x45, eightBitNN=0x0)
+ packet = packet / d
+ if TimeZone_presence is 1:
+ e = TimeZoneHdr(ieiTZ=0x46, eightBitTZ=0x0)
+ packet = packet / e
+ if TimeZoneAndTime_presence is 1:
+ f = TimeZoneAndTimeHdr(ieiTZAT=0x47, eightBitTZAT=0x0)
+ packet = packet / f
+ if LsaIdentifier_presence is 1:
+ g = LsaIdentifierHdr(ieiLI=0x48, eightBitLI=0x0)
+ packet = packet / g
+ return packet
+
+
+def mmStatus():
+ """MM STATUS Section 9.2.16"""
+ a = TpPd(pd=0x5)
+ b = MessageType(mesType=0x31) # 00110001
+ c = RejectCause()
+ packet = a / b / c
+ return packet
+
+
+# Network to MS
+def tmsiReallocationCommand():
+ """TMSI REALLOCATION COMMAND Section 9.2.17"""
+ a = TpPd(pd=0x5)
+ b = MessageType(mesType=0x1a) # 00011010
+ c = LocalAreaId()
+ d = MobileId()
+ packet = a / b / c / d
+ return packet
+
+
+def tmsiReallocationComplete():
+ """TMSI REALLOCATION COMPLETE Section 9.2.18"""
+ a = TpPd(pd=0x5)
+ b = MessageType(mesType=0x1b) # 00011011
+ packet = a / b
+ return packet
+
+
+def mmNull():
+ """MM NULL Section 9.2.19"""
+ a = TpPd(pd=0x5)
+ b = MessageType(mesType=0x30) # 00110000
+ packet = a / b
+ return packet
+
+#
+# 9.3 Messages for circuit-switched call control
+#
+
+
+# Network to MS
+def alertingNetToMs(Facility_presence=0, ProgressIndicator_presence=0,
+ UserUser_presence=0):
+ """ALERTING Section 9.3.1.1"""
+ a = TpPd(pd=0x3)
+ b = MessageType(mesType=0x1) # 00000001
+ packet = a / b
+ if Facility_presence is 1:
+ c = FacilityHdr(ieiF=0x1C)
+ packet = packet / c
+ if ProgressIndicator_presence is 1:
+ d = ProgressIndicatorHdr(ieiPI=0x1E)
+ packet = packet / d
+ if UserUser_presence is 1:
+ e = UserUserHdr(ieiUU=0x7E)
+ packet = packet / e
+ return packet
+
+
+def alertingMsToNet(Facility_presence=0, UserUser_presence=0,
+ SsVersionIndicator_presence=0):
+ """ALERTING Section 9.3.1.2"""
+ a = TpPd(pd=0x3)
+ b = MessageType(mesType=0x1) # 00000001
+ packet = a / b
+ if Facility_presence is 1:
+ c = FacilityHdr(ieiF=0x1C, eightBitF=0x0)
+ packet = packet / c
+ if UserUser_presence is 1:
+ d = UserUserHdr(ieiUU=0x7E, eightBitUU=0x0)
+ packet = packet / d
+ if SsVersionIndicator_presence is 1:
+ e = SsVersionIndicatorHdr(ieiSVI=0x7F, eightBitSVI=0x0)
+ packet = packet / e
+ return packet
+
+
+def callConfirmed(RepeatIndicator_presence=0,
+ BearerCapability_presence=0, BearerCapability_presence1=0,
+ Cause_presence=0, CallControlCapabilities_presence=0):
+ """CALL CONFIRMED Section 9.3.2"""
+ a = TpPd(pd=0x3)
+ b = MessageType(mesType=0x8) # 00001000
+ packet = a / b
+ if RepeatIndicator_presence is 1:
+ c = RepeatIndicatorHdr(ieiRI=0xD, eightBitRI=0x0)
+ packet = packet / c
+ if BearerCapability_presence is 1:
+ d = BearerCapabilityHdr(ieiBC=0x04, eightBitBC=0x0)
+ packet = packet / d
+ if BearerCapability_presence1 is 1:
+ e = BearerCapabilityHdr(ieiBC=0x04, eightBitBC=0x0)
+ packet = packet / e
+ if Cause_presence is 1:
+ f = CauseHdr(ieiC=0x08, eightBitC=0x0)
+ packet = packet / f
+ if CallControlCapabilities_presence is 1:
+ g = CallControlCapabilitiesHdr(ieiCCC=0x15, eightBitCCC=0x0)
+ packet = packet / g
+ return packet
+
+
+# Network to MS
+def callProceeding(RepeatIndicator_presence=0,
+ BearerCapability_presence=0,
+ BearerCapability_presence1=0,
+ Facility_presence=0, ProgressIndicator_presence=0,
+ PriorityLevel_presence=0):
+ """CALL PROCEEDING Section 9.3.3"""
+ a = TpPd(pd=0x3)
+ b = MessageType(mesType=0x2) # 00000010
+ packet = a / b
+ if RepeatIndicator_presence is 1:
+ c = RepeatIndicatorHdr(ieiRI=0xD, eightBitRI=0x0)
+ packet = packet / c
+ if BearerCapability_presence is 1:
+ d = BearerCapabilityHdr(ieiBC=0x04, eightBitBC=0x0)
+ packet = packet / d
+ if BearerCapability_presence1 is 1:
+ e = BearerCapabilityHdr(ieiBC=0x04, eightBitBC=0x0)
+ packet = packet / e
+ if Facility_presence is 1:
+ f = FacilityHdr(ieiF=0x1C, eightBitF=0x0)
+ packet = packet / f
+ if ProgressIndicator_presence is 1:
+ g = ProgressIndicatorHdr(ieiPI=0x1E, eightBitPI=0x0)
+ packet = packet / g
+ if PriorityLevel_presence is 1:
+ h = PriorityLevelHdr(ieiPL=0x80, eightBitPL=0x0)
+ packet = packet / h
+ return packet
+
+
+# Network to MS
+def congestionControl(Cause_presence=0):
+ """CONGESTION CONTROL Section 9.3.4"""
+ a = TpPd(pd=0x3)
+ b = MessageType(mesType=0x39) # 00111001
+ c = CongestionLevelAndSpareHalfOctets()
+ packet = a / b / c
+ if Cause_presence is 1:
+ e = CauseHdr(ieiC=0x08, eightBitC=0x0)
+ packet = packet / e
+ return packet
+
+
+# Network to MS
+def connectNetToMs(Facility_presence=0, ProgressIndicator_presence=0,
+ ConnectedNumber_presence=0, ConnectedSubaddress_presence=0,
+ UserUser_presence=0):
+ """CONNECT Section 9.3.5.1"""
+ a = TpPd(pd=0x3)
+ b = MessageType(mesType=0x7) # 00000111
+ packet = a / b
+ if Facility_presence is 1:
+ c = FacilityHdr(ieiF=0x1C, eightBitF=0x0)
+ packet = packet / c
+ if ProgressIndicator_presence is 1:
+ d = ProgressIndicatorHdr(ieiPI=0x1E, eightBitPI=0x0)
+ packet = packet / d
+ if ConnectedNumber_presence is 1:
+ e = ConnectedNumberHdr(ieiCN=0x4C, eightBitCN=0x0)
+ packet = packet / e
+ if ConnectedSubaddress_presence is 1:
+ f = ConnectedSubaddressHdr(ieiCS=0x4D, eightBitCS=0x0)
+ packet = packet / f
+ if UserUser_presence is 1:
+ g = UserUserHdr(ieiUU=0x7F, eightBitUU=0x0)
+ packet = packet / g
+ return packet
+
+
+def connectMsToNet(Facility_presence=0, ConnectedSubaddress_presence=0,
+ UserUser_presence=0, SsVersionIndicator_presence=0):
+ """CONNECT Section 9.3.5.2"""
+ a = TpPd(pd=0x3)
+ b = MessageType(mesType=0x7) # 00000111
+ packet = a / b
+ if Facility_presence is 1:
+ c = FacilityHdr(ieiF=0x1C, eightBitF=0x0)
+ packet = packet / c
+ if ConnectedSubaddress_presence is 1:
+ d = ConnectedSubaddressHdr(ieiCS=0x4D, eightBitCS=0x0)
+ packet = packet / d
+ if UserUser_presence is 1:
+ e = UserUserHdr(ieiUU=0x7F, eightBitUU=0x0)
+ packet = packet / e
+ if SsVersionIndicator_presence is 1:
+ f = SsVersionIndicatorHdr(ieiSVI=0x7F, eightBitSVI=0x0)
+ packet = packet / f
+ return packet
+
+
+def connectAcknowledge():
+ """CONNECT ACKNOWLEDGE Section 9.3.6"""
+ a = TpPd(pd=0x3)
+ b = MessageType(mesType=0xf) # 00001111
+ packet = a / b
+ return packet
+
+
+# Network to MS
+def disconnectNetToMs(Facility_presence=0, ProgressIndicator_presence=0,
+ UserUser_presence=0, AllowedActions_presence=0):
+ """DISCONNECT Section 9.3.7.1"""
+ a = TpPd(pd=0x3)
+ b = MessageType(mesType=0x25) # 00100101
+ c = Cause()
+ packet = a / b / c
+ if Facility_presence is 1:
+ d = FacilityHdr(ieiF=0x1C, eightBitF=0x0)
+ packet = packet / d
+ if ProgressIndicator_presence is 1:
+ e = ProgressIndicatorHdr(ieiPI=0x1E, eightBitPI=0x0)
+ packet = packet / e
+ if UserUser_presence is 1:
+ f = UserUserHdr(ieiUU=0x7E, eightBitUU=0x0)
+ packet = packet / f
+ if AllowedActions_presence is 1:
+ g = AllowedActionsHdr(ieiAA=0x7B, eightBitAA=0x0)
+ packet = packet / g
+ return packet
+
+
+def disconnectMsToNet(Facility_presence=0, UserUser_presence=0,
+ SsVersionIndicator_presence=0):
+ """Disconnect Section 9.3.7.2"""
+ a = TpPd(pd=0x3)
+ b = MessageType(mesType=0x25) # 00100101
+ c = Cause()
+ packet = a / b / c
+ if Facility_presence is 1:
+ d = FacilityHdr(ieiF=0x1C, eightBitF=0x0)
+ packet = packet / d
+ if UserUser_presence is 1:
+ e = UserUserHdr(ieiUU=0x7E, eightBitUU=0x0)
+ packet = packet / e
+ if SsVersionIndicator_presence is 1:
+ f = SsVersionIndicatorHdr(ieiSVI=0x7F, eightBitSVI=0x0)
+ packet = packet / f
+ return packet
+
+
+def emergencySetup(BearerCapability_presence=0):
+ """EMERGENCY SETUP Section 9.3.8"""
+ a = TpPd(pd=0x3)
+ b = MessageType(mesType=0xe) # 00001110
+ packet = a / b
+ if BearerCapability_presence is 1:
+ c = BearerCapabilityHdr(ieiBC=0x04, eightBitBC=0x0)
+ packet = packet / c
+ return packet
+
+
+# Network to MS
+def facilityNetToMs():
+ """FACILITY Section 9.3.9.1"""
+ a = TpPd(pd=0x3)
+ b = MessageType(mesType=0x3a) # 00111010
+ c = Facility()
+ packet = a / b / c
+ return packet
+
+
+def facilityMsToNet(SsVersionIndicator_presence=0):
+ """FACILITY Section 9.3.9.2"""
+ a = TpPd(pd=0x3)
+ b = MessageType(mesType=0x3a) # 00111010
+ c = Facility()
+ packet = a / b / c
+ if SsVersionIndicator_presence is 1:
+ d = SsVersionIndicatorHdr(ieiSVI=0x7F, eightBitSVI=0x0)
+ packet = packet / d
+ return packet
+
+
+def hold():
+ """HOLD Section 9.3.10"""
+ a = TpPd(pd=0x3)
+ b = MessageType(mesType=0x18) # 00011000
+ packet = a / b
+ return packet
+
+
+# Network to MS
+def holdAcknowledge():
+ """HOLD ACKNOWLEDGE Section 9.3.11"""
+ a = TpPd(pd=0x3)
+ b = MessageType(mesType=0x19) # 00011001
+ packet = a / b
+ return packet
+
+
+# Network to MS
+def holdReject():
+ """HOLD REJECT Section 9.3.12"""
+ a = TpPd(pd=0x3)
+ b = MessageType(mesType=0x1a) # 00011010
+ c = Cause()
+ packet = a / b / c
+ return packet
+
+
+def modify(LowLayerCompatibility_presence=0,
+ HighLayerCompatibility_presence=0,
+ ReverseCallSetupDirection_presence=0):
+ """MODIFY Section 9.3.13"""
+ a = TpPd(pd=0x3)
+ b = MessageType(mesType=0x17) # 00010111
+ c = BearerCapability()
+ packet = a / b / c
+ if LowLayerCompatibility_presence is 1:
+ d = LowLayerCompatibilityHdr(ieiLLC=0x7C, eightBitLLC=0x0)
+ packet = packet / d
+ if HighLayerCompatibility_presence is 1:
+ e = HighLayerCompatibilityHdr(ieiHLC=0x7D, eightBitHLC=0x0)
+ packet = packet / e
+ if ReverseCallSetupDirection_presence is 1:
+ f = ReverseCallSetupDirectionHdr(ieiRCSD=0xA3)
+ packet = packet / f
+ return packet
+
+
+def modifyComplete(LowLayerCompatibility_presence=0,
+ HighLayerCompatibility_presence=0,
+ ReverseCallSetupDirection_presence=0):
+ """MODIFY COMPLETE Section 9.3.14"""
+ a = TpPd(pd=0x3)
+ b = MessageType(mesType=0x1f) # 00011111
+ c = BearerCapability()
+ packet = a / b / c
+ if LowLayerCompatibility_presence is 1:
+ d = LowLayerCompatibilityHdr(ieiLLC=0x7C, eightBitLLC=0x0)
+ packet = packet / d
+ if HighLayerCompatibility_presence is 1:
+ e = HighLayerCompatibilityHdr(ieiHLC=0x7D, eightBitHLC=0x0)
+ packet = packet / e
+ if ReverseCallSetupDirection_presence is 1:
+ f = ReverseCallSetupDirection(ieiRCSD=0xA3)
+ packet = packet / f
+ return packet
+
+
+def modifyReject(LowLayerCompatibility_presence=0,
+ HighLayerCompatibility_presence=0):
+ """MODIFY REJECT Section 9.3.15"""
+ a = TpPd(pd=0x3)
+ b = MessageType(mesType=0x13) # 00010011
+ c = BearerCapability()
+ d = Cause()
+ packet = a / b / c / d
+ if LowLayerCompatibility_presence is 1:
+ e = LowLayerCompatibilityHdr(ieiLLC=0x7C, eightBitLLC=0x0)
+ packet = packet / e
+ if HighLayerCompatibility_presence is 1:
+ f = HighLayerCompatibilityHdr(ieiHLC=0x7D, eightBitHLC=0x0)
+ packet = packet / f
+ return packet
+
+
+def notify():
+ """NOTIFY Section 9.3.16"""
+ a = TpPd(pd=0x3)
+ b = MessageType(mesType=0x3e) # 00111110
+ c = NotificationIndicator()
+ packet = a / b / c
+ return packet
+
+
+# Network to MS
+def progress(UserUser_presence=0):
+ """PROGRESS Section 9.3.17"""
+ a = TpPd(pd=0x3)
+ b = MessageType(mesType=0x3) # 00000011
+ c = ProgressIndicator()
+ packet = a / b / c
+ if UserUser_presence is 1:
+ d = UserUserHdr()
+ packet = packet / d
+ return packet
+
+
+# Network to MS
+def ccEstablishment():
+ """CC-ESTABLISHMENT Section 9.3.17a"""
+ a = TpPd(pd=0x3)
+ b = MessageType(mesType=0x4) # 00000100
+ c = SetupContainer()
+ packet = a / b / c
+ return packet
+
+
+def ccEstablishmentConfirmed(RepeatIndicator_presence=0,
+ BearerCapability_presence=0,
+ BearerCapability_presence1=0,
+ Cause_presence=0):
+ """CC-ESTABLISHMENT CONFIRMED Section 9.3.17b"""
+ a = TpPd(pd=0x3)
+ b = MessageType(mesType=0x6) # 00000110
+ packet = a / b
+ if RepeatIndicator_presence is 1:
+ c = RepeatIndicatorHdr(ieiRI=0xD, eightBitRI=0x0)
+ packet = packet / c
+ if BearerCapability_presence is 1:
+ d = BearerCapabilityHdr(ieiBC=0x04, eightBitBC=0x0)
+ packet = packet / d
+ if BearerCapability_presence1 is 1:
+ e = BearerCapabilityHdr(ieiBC=0x04, eightBitBC=0x0)
+ packet = packet / e
+ if Cause_presence is 1:
+ f = CauseHdr(ieiC=0x08, eightBitC=0x0)
+ packet = packet / f
+ return packet
+
+
+# Network to MS
+def releaseNetToMs():
+ """RELEASE Section 9.3.18.1"""
+ a = TpPd(pd=0x3)
+ b = MessageType(mesType=0x2d) # 00101101
+ c = CauseHdr(ieiC=0x08, eightBitC=0x0)
+ d = CauseHdr(ieiC=0x08, eightBitC=0x0)
+ e = FacilityHdr(ieiF=0x1C, eightBitF=0x0)
+ f = UserUserHdr(ieiUU=0x7E, eightBitUU=0x0)
+ packet = a / b / c / d / e / f
+ return packet
+
+
+def releaseMsToNet(Cause_presence=0, Cause_presence1=0,
+ Facility_presence=0, UserUser_presence=0,
+ SsVersionIndicator_presence=0):
+ """RELEASE Section 9.3.18.2"""
+ a = TpPd(pd=0x3)
+ b = MessageType(mesType=0x2d) # 00101101
+ packet = a / b
+ if Cause_presence is 1:
+ c = CauseHdr(ieiC=0x08, eightBitC=0x0)
+ packet = packet / c
+ if Cause_presence1 is 1:
+ d = CauseHdr(ieiC=0x08, eightBitC=0x0)
+ packet = packet / d
+ if Facility_presence is 1:
+ e = FacilityHdr(ieiF=0x1C, eightBitF=0x0)
+ packet = packet / e
+ if UserUser_presence is 1:
+ f = UserUserHdr(ieiUU=0x7E, eightBitUU=0x0)
+ packet = packet / f
+ if SsVersionIndicator_presence is 1:
+ g = SsVersionIndicatorHdr(ieiSVI=0x7F, eightBitSVI=0x0)
+ packet = packet / g
+ return packet
+
+
+# Network to MS
+def recall():
+ """RECALL Section 9.3.18a"""
+ a = TpPd(pd=0x3)
+ b = MessageType(mesType=0xb) # 00001011
+ c = RecallType()
+ d = Facility()
+ packet = a / b / c / d
+ return packet
+
+
+# Network to MS
+def releaseCompleteNetToMs(Cause_presence=0, Facility_presence=0,
+ UserUser_presence=0):
+ """RELEASE COMPLETE Section 9.3.19.1"""
+ a = TpPd(pd=0x3)
+ b = MessageType(mesType=0x2a) # 00101010
+ packet = a / b
+ if Cause_presence is 1:
+ c = CauseHdr(ieiC=0x08, eightBitC=0x0)
+ packet = packet / c
+ if Facility_presence is 1:
+ d = FacilityHdr(ieiF=0x1C, eightBitF=0x0)
+ packet = packet / d
+ if UserUser_presence is 1:
+ e = UserUserHdr(ieiUU=0x7E, eightBitUU=0x0)
+ packet = packet / e
+ return packet
+
+
+def releaseCompleteMsToNet(Cause_presence=0, Facility_presence=0,
+ UserUser_presence=0, SsVersionIndicator_presence=0):
+ """RELEASE COMPLETE Section 9.3.19.2"""
+ a = TpPd(pd=0x3)
+ b = MessageType(mesType=0x2a) # 00101010
+ packet = a / b
+ if Cause_presence is 1:
+ c = CauseHdr(ieiC=0x08, eightBitC=0x0)
+ packet = packet / c
+ if Facility_presence is 1:
+ d = FacilityHdr(ieiF=0x1C, eightBitF=0x0)
+ packet = packet / d
+ if UserUser_presence is 1:
+ e = UserUserHdr(ieiUU=0x7E, eightBitUU=0x0)
+ packet = packet / e
+ if SsVersionIndicator_presence is 1:
+ f = SsVersionIndicatorHdr(ieiSVI=0x7F, eightBitSVI=0x0)
+ packet = packet / f
+ return packet
+
+
+def retrieve():
+ """RETRIEVE Section 9.3.20"""
+ a = TpPd(pd=0x3)
+ b = MessageType(mesType=0x1c) # 00011100
+ packet = a / b
+ return packet
+
+
+# Network to MS
+def retrieveAcknowledge():
+ """RETRIEVE ACKNOWLEDGE Section 9.3.21"""
+ a = TpPd(pd=0x3)
+ b = MessageType(mesType=0x1d) # 00011101
+ packet = a / b
+ return packet
+
+
+# Network to MS
+def retrieveReject():
+ """RETRIEVE REJECT Section 9.3.22"""
+ a = TpPd(pd=0x3)
+ b = MessageType(mesType=0x1e) # 00011110
+ c = Cause()
+ packet = a / b / c
+ return packet
+
+
+# Network to MS
+def setupMobileTerminated(RepeatIndicator_presence=0,
+ BearerCapability_presence=0,
+ BearerCapability_presence1=0,
+ Facility_presence=0, ProgressIndicator_presence=0,
+ Signal_presence=0,
+ CallingPartyBcdNumber_presence=0,
+ CallingPartySubaddress_presence=0,
+ CalledPartyBcdNumber_presence=0,
+ CalledPartySubaddress_presence=0,
+# RecallType_presence=0,
+ RedirectingPartyBcdNumber_presence=0,
+ RedirectingPartySubaddress_presence=0,
+ RepeatIndicator_presence1=0,
+ LowLayerCompatibility_presence=0,
+ LowLayerCompatibility_presence1=0,
+ RepeatIndicator_presence2=0,
+ HighLayerCompatibility_presence=0,
+ HighLayerCompatibility_presence1=0,
+ UserUser_presence=0, PriorityLevel_presence=0,
+ AlertingPattern_presence=0):
+ """SETUP Section 9.3.23.1"""
+ a = TpPd(pd=0x3)
+ b = MessageType(mesType=0x5) # 00000101
+ packet = a / b
+ if RepeatIndicator_presence is 1:
+ c = RepeatIndicatorHdr(ieiRI=0xD, eightBitRI=0x0)
+ packet = packet / c
+ if BearerCapability_presence is 1:
+ d = BearerCapabilityHdr(ieiBC=0x04, eightBitBC=0x0)
+ packet = packet / d
+ if BearerCapability_presence1 is 1:
+ e = BearerCapabilityHdr(ieiBC=0x04, eightBitBC=0x0)
+ packet = packet / e
+ if Facility_presence is 1:
+ f = FacilityHdr(ieiF=0x1C, eightBitF=0x0)
+ packet = packet / f
+ if ProgressIndicator_presence is 1:
+ g = ProgressIndicatorHdr(ieiPI=0x1E, eightBitPI=0x0)
+ packet = packet / g
+ if Signal_presence is 1:
+ h = SignalHdr(ieiS=0x34, eightBitS=0x0)
+ packet = packet / h
+ if CallingPartyBcdNumber_presence is 1:
+ i = CallingPartyBcdNumberHdr(ieiCPBN=0x5C, eightBitCPBN=0x0)
+ packet = packet / i
+ if CallingPartySubaddress_presence is 1:
+ j = CallingPartySubaddressHdr(ieiCPS=0x5D, eightBitCPS=0x0)
+ packet = packet / j
+ if CalledPartyBcdNumber_presence is 1:
+ k = CalledPartyBcdNumberHdr(ieiCPBN=0x5E, eightBitCPBN=0x0)
+ packet = packet / k
+ if CalledPartySubaddress_presence is 1:
+ l = CalledPartySubaddressHdr(ieiCPS=0x6D, eightBitCPS=0x0)
+ packet = packet / l
+ if RedirectingPartyBcdNumber_presence is 1:
+ n = RedirectingPartyBcdNumberHdr(ieiRPBN=0x74, eightBitRPBN=0x0)
+ packet = packet / n
+ if RedirectingPartySubaddress_presence is 1:
+ m = RedirectingPartySubaddress_presence(ieiRPBN=0x75, eightBitRPBN=0x0)
+ packet = packet / m
+ if RepeatIndicator_presence1 is 1:
+ o = RepeatIndicatorHdr(ieiRI=0xD0, eightBitRI=0x0)
+ packet = packet / o
+ if LowLayerCompatibility_presence is 1:
+ p = LowLayerCompatibilityHdr(ieiLLC=0x7C, eightBitLLC=0x0)
+ packet = packet / p
+ if LowLayerCompatibility_presence1 is 1:
+ q = LowLayerCompatibilityHdr(ieiLLC=0x7C, eightBitLLC=0x0)
+ packet = packet / q
+ if RepeatIndicator_presence2 is 1:
+ r = RepeatIndicatorHdr(ieiRI=0xD, eightBitRI=0x0)
+ packet = packet / r
+ if HighLayerCompatibility_presence is 1:
+ s = HighLayerCompatibilityHdr(ieiHLC=0x7D, eightBitHLC=0x0)
+ packet = packet / s
+ if HighLayerCompatibility_presence1 is 1:
+ t = HighLayerCompatibilityHdr(ieiHLC=0x7D, eightBitHLC=0x0)
+ packet = packet / t
+ if UserUser_presence is 1:
+ u = UserUserHdr(ieiUU=0x7E, eightBitUU=0x0)
+ packet = packet / u
+ if PriorityLevel_presence is 1:
+ v = PriorityLevelHdr(ieiPL=0x8, eightBitPL=0x0)
+ packet = packet / v
+ if AlertingPattern_presence is 1:
+ w = AlertingPatternHdr(ieiAP=0x19, eightBitAP=0x0)
+ packet = packet / w
+ return packet
+
+
+def setupMobileOriginated(RepeatIndicator_presence=0,
+ BearerCapability_presence=0,
+ BearerCapability_presence1=0,
+ Facility_presence=0,
+ CallingPartySubaddress_presence=0,
+ CalledPartyBcdNumber_presence=0,
+ CalledPartySubaddress_presence=0,
+ RepeatIndicator_presence1=0,
+ LowLayerCompatibility_presence=0,
+ LowLayerCompatibility_presence1=0,
+ RepeatIndicator_presence2=0,
+ HighLayerCompatibility_presence=0,
+ HighLayerCompatibility_presence1=0,
+ UserUser_presence=0, SsVersionIndicator_presence=0,
+ ClirSuppression_presence=0,
+ ClirInvocation_presence=0,
+ CallControlCapabilities_presence=0,
+ Facility_presence1=0,
+ Facility_presence2=0):
+ """SETUP Section 9.3.23.2"""
+ a = TpPd(pd=0x3)
+ b = MessageType(mesType=0x5) # 00000101
+ packet = a / b
+ if RepeatIndicator_presence is 1:
+ c = RepeatIndicatorHdr(ieiRI=0xD, eightBitRI=0x0)
+ packet = packet / c
+ if BearerCapability_presence is 1:
+ d = BearerCapabilityHdr(ieiBC=0x04, eightBitBC=0x0)
+ packet = packet / d
+ if BearerCapability_presence1 is 1:
+ e = BearerCapabilityHdr(ieiBC=0x04, eightBitBC=0x0)
+ packet = packet / e
+ if Facility_presence is 1:
+ f = FacilityHdr(ieiF=0x1C, eightBitF=0x0)
+ packet = packet / f
+ if CallingPartySubaddress_presence is 1:
+ g = CallingPartySubaddressHdr(ieiCPS=0x5D, eightBitCPS=0x0)
+ packet = packet / g
+ if CalledPartyBcdNumber_presence is 1:
+ h = CalledPartyBcdNumberHdr(ieiCPBN=0x5E, eightBitCPBN=0x0)
+ packet = packet / h
+ if CalledPartySubaddress_presence is 1:
+ i = CalledPartySubaddressHdr(ieiCPS=0x6D, eightBitCPS=0x0)
+ packet = packet / i
+ if RepeatIndicator_presence1 is 1:
+ j = RepeatIndicatorHdr(ieiRI=0xD0, eightBitRI=0x0)
+ packet = packet / j
+ if LowLayerCompatibility_presence is 1:
+ k = LowLayerCompatibilityHdr(ieiLLC=0x7C, eightBitLLC=0x0)
+ packet = packet / k
+ if LowLayerCompatibility_presence1 is 1:
+ l = LowLayerCompatibilityHdr(ieiLLC=0x7C, eightBitLLC=0x0)
+ packet = packet / l
+ if RepeatIndicator_presence2 is 1:
+ m = RepeatIndicatorHdr(ieiRI=0xD, eightBitRI=0x0)
+ packet = packet / m
+ if HighLayerCompatibility_presence is 1:
+ n = HighLayerCompatibilityHdr(ieiHLC=0x7D, eightBitHLC=0x0)
+ packet = packet / n
+ if HighLayerCompatibility_presence1 is 1:
+ o = HighLayerCompatibilityHdr(ieiHLC=0x7D, eightBitHLC=0x0)
+ packet = packet / o
+ if UserUser_presence is 1:
+ p = UserUserHdr(ieiUU=0x7E, eightBitUU=0x0)
+ packet = packet / p
+ if SsVersionIndicator_presence is 1:
+ q = SsVersionIndicatorHdr(ieiSVI=0x7F, eightBitSVI=0x0)
+ packet = packet / q
+ if ClirSuppression_presence is 1:
+ r = ClirSuppressionHdr(ieiCS=0xA1, eightBitCS=0x0)
+ packet = packet / r
+ if ClirInvocation_presence is 1:
+ s = ClirInvocationHdr(ieiCI=0xA2, eightBitCI=0x0)
+ packet = packet / s
+ if CallControlCapabilities_presence is 1:
+ t = CallControlCapabilitiesHdr(ieiCCC=0x15, eightBitCCC=0x0)
+ packet = packet / t
+ if Facility_presence1 is 1:
+ u = FacilityHdr(ieiF=0x1D, eightBitF=0x0)
+ packet = packet / u
+ if Facility_presence2 is 1:
+ v = FacilityHdr(ieiF=0x1B, eightBitF=0x0)
+ packet = packet / v
+ return packet
+
+
+def startCc(CallControlCapabilities_presence=0):
+ """START CC Section 9.3.23a"""
+ a = TpPd(pd=0x3)
+ b = MessageType(mesType=0x9) # 00001001
+ packet = a / b
+ if CallControlCapabilities_presence is 1:
+ c = CallControlCapabilitiesHdr(ieiCCC=0x15, eightBitCCC=0x0)
+ packet = paclet / c
+ return packet
+
+
+def startDtmf():
+ """START DTMF Section 9.3.24"""
+ a = TpPd(pd=0x3)
+ b = MessageType(mesType=0x35) # 00110101
+ c = KeypadFacilityHdr(ieiKF=0x2C, eightBitKF=0x0)
+ packet = a / b / c
+ return packet
+
+
+# Network to MS
+def startDtmfAcknowledge():
+ """START DTMF ACKNOWLEDGE Section 9.3.25"""
+ a = TpPd(pd=0x3)
+ b = MessageType(mesType=0x32) # 00110010
+ c = KeypadFacilityHdr(ieiKF=0x2C, eightBitKF=0x0)
+ packet = a / b / c
+ return packet
+
+
+# Network to MS
+def startDtmfReject():
+ """ START DTMF REJECT Section 9.3.26"""
+ a = TpPd(pd=0x3)
+ b = MessageType(mesType=0x37) # 00110111
+ c = Cause()
+ packet = a / b / c
+ return packet
+
+
+def status(AuxiliaryStates_presence=0):
+ """STATUS Section 9.3.27"""
+ a = TpPd(pd=0x3)
+ b = MessageType(mesType=0x3d) # 00111101
+ c = Cause()
+ d = CallState()
+ packet = a / b / c / d
+ if AuxiliaryStates_presence is 1:
+ e = AuxiliaryStatesHdr(ieiAS=0x24, eightBitAS=0x0)
+ packet = packet / e
+ return packet
+
+
+def statusEnquiry():
+ """STATUS ENQUIRY Section 9.3.28"""
+ a = TpPd(pd=0x3)
+ b = MessageType(mesType=0x34) # 00110100
+ packet = a / b
+ return packet
+
+
+def stopDtmf():
+ """STOP DTMF Section 9.3.29"""
+ a = TpPd(pd=0x3)
+ b = MessageType(mesType=0x31) # 00110001
+ packet = a / b
+ return packet
+
+
+# Network to MS
+def stopDtmfAcknowledge():
+ """STOP DTMF ACKNOWLEDGE Section 9.3.30"""
+ a = TpPd(pd=0x3)
+ b = MessageType(mesType=0x32) # 00110010
+ packet = a / b
+ return packet
+
+
+def userInformation(MoreData_presence=0):
+ """USER INFORMATION Section 9.3.31"""
+ a = TpPd(pd=0x3)
+ b = MessageType(mesType=0x20) # 000100000
+ c = UserUser()
+ packet = a / b / c
+ if MoreData_presence is 1:
+ d = MoreDataHdr(ieiMD=0xA0, eightBitMD=0x0)
+ packet = packet / d
+ return packet
+
+#
+# 9.4 GPRS Mobility Management Messages
+#
+
+
+def attachRequest(PTmsiSignature_presence=0, GprsTimer_presence=0,
+ TmsiStatus_presence=0):
+ """ATTACH REQUEST Section 9.4.1"""
+ a = TpPd(pd=0x3)
+ b = MessageType(mesType=0x1) # 0000001
+ c = MsNetworkCapability()
+ d = AttachTypeAndCiphKeySeqNr()
+ f = DrxParameter()
+ g = MobileId()
+ h = RoutingAreaIdentification()
+ i = MsRadioAccessCapability()
+ packet = a / b / c / d / f / g / h / i
+ if PTmsiSignature_presence is 1:
+ j = PTmsiSignature(ieiPTS=0x19)
+ packet = packet / j
+ if GprsTimer_presence is 1:
+ k = GprsTimer(ieiGT=0x17)
+ packet = packet / k
+ if TmsiStatus_presence is 1:
+ l = TmsiStatus(ieiTS=0x9)
+ packet = packet / l
+ return packet
+
+
+def attachAccept(PTmsiSignature_presence=0, GprsTimer_presence=0,
+ MobileId_presence=0, MobileId_presence1=0,
+ GmmCause_presence=0):
+ """ATTACH ACCEPT Section 9.4.2"""
+ a = TpPd(pd=0x3)
+ b = MessageType(mesType=0x2) # 00000010
+ c = AttachResult()
+ d = ForceToStandby()
+ e = GprsTimer()
+ f = RadioPriorityAndSpareHalfOctets()
+ h = RoutingAreaIdentification()
+ packet = a / b / c / d / e / f / h
+ if PTmsiSignature_presence is 1:
+ i = PTmsiSignature(ieiPTS=0x19)
+ packet = packet / i
+ if GprsTimer_presence is 1:
+ j = GprsTimer(ieiGT=0x17)
+ packet = packet / j
+ if MobileId_presence is 1:
+ k = MobileIdHdr(ieiMI=0x18, eightBitMI=0x0)
+ packet = packet / k
+ if MobileId_presence1 is 1:
+ l = MobileIdHdr(ieiMI=0x23, eightBitMI=0x0)
+ packet = packet / l
+ if GmmCause_presence is 1:
+ m = GmmCause(ieiGC=0x25)
+ packet = packet / m
+ return packet
+
+
+def attachComplete():
+ """ATTACH COMPLETE Section 9.4.3"""
+ a = TpPd(pd=0x3)
+ b = MessageType(mesType=0x3) # 00000011
+ packet = a / b
+ return packet
+
+
+def attachReject():
+ """ATTACH REJECT Section 9.4.4"""
+ a = TpPd(pd=0x3)
+ b = MessageType(mesType=0x1) # 00000001
+ c = GmmCause()
+ packet = a / b / c
+ return packet
+
+
+def detachRequest(GmmCause_presence=0):
+ """DETACH REQUEST Section 9.4.5"""
+ a = TpPd(pd=0x3)
+ b = MessageType(mesType=0x5) # 00000101
+ c = DetachTypeAndForceToStandby()
+ packet = a / b / c
+ if GmmCause_presence is 1:
+ e = GmmCause(ieiGC=0x25)
+ packet = packet / e
+ return packet
+
+
+def detachRequestMsOriginating():
+ """DETACH REQUEST Section 9.4.5.2"""
+ a = TpPd(pd=0x3)
+ b = MessageType(mesType=0x5) # 00000101
+ c = DetachTypeAndSpareHalfOctets()
+ packet = a / b / c
+ return packet
+
+
+def detachAcceptMsTerminated():
+ """DETACH ACCEPT Section 9.4.6.1"""
+ a = TpPd(pd=0x3)
+ b = MessageType(mesType=0x6) # 00000110
+ packet = a / b
+ return packet
+
+
+def detachAcceptMsOriginating():
+ """DETACH ACCEPT Section 9.4.6.2"""
+ a = TpPd(pd=0x3)
+ b = MessageType(mesType=0x6) # 00000110
+ c = ForceToStandbyAndSpareHalfOctets()
+ packet = a / b / c
+ return packet
+
+
+def ptmsiReallocationCommand(PTmsiSignature_presence=0):
+ """P-TMSI REALLOCATION COMMAND Section 9.4.7"""
+ a = TpPd(pd=0x3)
+ b = MessageType(mesType=0x10) # 00010000
+ c = MobileId()
+ d = RoutingAreaIdentification()
+ e = ForceToStandbyAndSpareHalfOctets()
+ packet = a / b / c / d / e
+ if PTmsiSignature_presence is 1:
+ g = PTmsiSignature(ieiPTS=0x19)
+ packet = packet / g
+ return packet
+
+
+def ptmsiReallocationComplete():
+ """P-TMSI REALLOCATION COMPLETE Section 9.4.8"""
+ a = TpPd(pd=0x3)
+ b = MessageType(mesType=0x11) # 00010001
+ packet = a / b
+ return packet
+
+
+def authenticationAndCipheringRequest(
+ AuthenticationParameterRAND_presence=0,
+ CiphKeySeqNr_presence=0):
+ """AUTHENTICATION AND CIPHERING REQUEST Section 9.4.9"""
+ a = TpPd(pd=0x3)
+ b = MessageType(mesType=0x12) # 00010010
+ d = CipheringAlgorithmAndImeisvRequest()
+ e = ForceToStandbyAndAcReferenceNumber()
+ packet = a / b / d / e
+ if AuthenticationParameterRAND_presence is 1:
+ g = AuthenticationParameterRAND(ieiAPR=0x21)
+ packet = packet / g
+ if CiphKeySeqNr_presence is 1:
+ h = CiphKeySeqNrHdr(ieiCKSN=0x08, eightBitCKSN=0x0)
+ packet = packet / h
+ return packet
+
+
+def authenticationAndCipheringResponse(
+ AuthenticationParameterSRES_presence=0,
+ MobileId_presence=0):
+ """AUTHENTICATION AND CIPHERING RESPONSE Section 9.4.10"""
+ a = TpPd(pd=0x3)
+ b = MessageType(mesType=0x13) # 00010011
+ c = AcReferenceNumberAndSpareHalfOctets()
+ packet = a / b / c
+ if AuthenticationParameterSRES_presence is 1:
+ e = AuthenticationParameterSRES(ieiAPS=0x22)
+ packet = packet / e
+ if MobileId_presence is 1:
+ f = MobileIdHdr(ieiMI=0x23, eightBitMI=0x0)
+ packet = packet / f
+ return packet
+
+
+def authenticationAndCipheringReject():
+ """AUTHENTICATION AND CIPHERING REJECT Section 9.4.11"""
+ a = TpPd(pd=0x3)
+ b = MessageType(mesType=0x14) # 00010100
+ packet = a / b
+ return packet
+
+
+def identityRequest():
+ """IDENTITY REQUEST Section 9.4.12"""
+ a = TpPd(pd=0x3)
+ b = MessageType(mesType=0x15) # 00010101
+ c = IdentityType2AndforceToStandby()
+ packet = a / b / c
+ return packet
+
+
+def identityResponse():
+ """IDENTITY RESPONSE Section 9.4.13"""
+ a = TpPd(pd=0x3)
+ b = MessageType(mesType=0x16) # 00010110
+ c = MobileId()
+ packet = a / b / c
+ return packet
+
+
+def routingAreaUpdateRequest(PTmsiSignature_presence=0,
+ GprsTimer_presence=0,
+ DrxParameter_presence=0,
+ TmsiStatus_presence=0):
+ """ROUTING AREA UPDATE REQUEST Section 9.4.14"""
+ a = TpPd(pd=0x3)
+ b = MessageType(mesType=0x8) # 00001000
+ c = UpdateTypeAndCiphKeySeqNr()
+ e = RoutingAreaIdentification()
+ f = MsNetworkCapability()
+ packet = a / b / c / e / f
+ if PTmsiSignature_presence is 1:
+ g = PTmsiSignature(ieiPTS=0x19)
+ packet = packet / g
+ if GprsTimer_presence is 1:
+ h = GprsTimer(ieiGT=0x17)
+ packet = packet / h
+ if DrxParameter_presence is 1:
+ i = DrxParameter(ieiDP=0x27)
+ packet = packet / i
+ if TmsiStatus_presence is 1:
+ j = TmsiStatus(ieiTS=0x9)
+ packet = packet / j
+ return packet
+
+
+def routingAreaUpdateAccept(PTmsiSignature_presence=0,
+ MobileId_presence=0, MobileId_presence1=0,
+ ReceiveNpduNumbersList_presence=0,
+ GprsTimer_presence=0, GmmCause_presence=0):
+ """ROUTING AREA UPDATE ACCEPT Section 9.4.15"""
+ a = TpPd(pd=0x3)
+ b = MessageType(mesType=0x9) # 00001001
+ c = ForceToStandbyAndUpdateResult()
+ e = GprsTimer()
+ f = RoutingAreaIdentification()
+ packet = a / b / c / e / f
+ if PTmsiSignature_presence is 1:
+ g = PTmsiSignature(ieiPTS=0x19)
+ packet = packet / g
+ if MobileId_presence is 1:
+ h = MobileIdHdr(ieiMI=0x18, eightBitMI=0x0)
+ packet = packet / h
+ if MobileId_presence1 is 1:
+ i = MobileIdHdr(ieiMI=0x23, eightBitMI=0x0)
+ packet = packet / i
+ if ReceiveNpduNumbersList_presence is 1:
+ j = ReceiveNpduNumbersList(ieiRNNL=0x26)
+ packet = packet / j
+ if GprsTimer_presence is 1:
+ k = GprsTimer(ieiGT=0x17)
+ packet = packet / k
+ if GmmCause_presence is 1:
+ l = GmmCause(ieiGC=0x25)
+ packet = packet / l
+ return packet
+
+
+def routingAreaUpdateComplete(ReceiveNpduNumbersList_presence=0):
+ """ROUTING AREA UPDATE COMPLETE Section 9.4.16"""
+ a = TpPd(pd=0x3)
+ b = MessageType(mesType=0xa) # 00001010
+ packet = a / b
+ if ReceiveNpduNumbersList_presence is 1:
+ c = ReceiveNpduNumbersList(ieiRNNL=0x26)
+ packet = packet / c
+ return packet
+
+
+def routingAreaUpdateReject():
+ """ROUTING AREA UPDATE REJECT Section 9.4.17"""
+ a = TpPd(pd=0x3)
+ b = MessageType(mesType=0xb) # 00001011
+ c = GmmCause()
+ d = ForceToStandbyAndSpareHalfOctets()
+ packet = a / b / c / d
+ return packet
+
+
+def gmmStatus():
+ """GMM STATUS Section 9.4.18"""
+ a = TpPd(pd=0x3)
+ b = MessageType(mesType=0x20) # 00100000
+ c = GmmCause()
+ packet = a / b / c
+ return packet
+
+
+def gmmInformation(NetworkName_presence=0, NetworkName_presence1=0,
+ TimeZone_presence=0, TimeZoneAndTime_presence=0,
+ LsaIdentifier_presence=0):
+ """GMM INFORMATION Section 9.4.19"""
+ a = TpPd(pd=0x3)
+ b = MessageType(mesType=0x21) # 00100001
+ packet = a / b
+ if NetworkName_presence is 1:
+ c = NetworkNameHdr(ieiNN=0x43, eightBitNN=0x0)
+ packet = packet / c
+ if NetworkName_presence1 is 1:
+ d = NetworkNameHdr(ieiNN=0x45, eightBitNN=0x0)
+ packet = packet / d
+ if TimeZone_presence is 1:
+ e = TimeZoneHdr(ieiTZ=0x46, eightBitTZ=0x0)
+ packet = packet / e
+ if TimeZoneAndTime_presence is 1:
+ f = TimeZoneAndTimeHdr(ieiTZAT=0x47, eightBitTZAT=0x0)
+ packet = packet / f
+ if LsaIdentifier_presence is 1:
+ g = LsaIdentifierHdr(ieiLI=0x48, eightBitLI=0x0)
+ packet = packet / g
+ return packet
+
+#
+# 9.5 GPRS Session Management Messages
+#
+
+
+def activatePdpContextRequest(AccessPointName_presence=0,
+ ProtocolConfigurationOptions_presence=0):
+ """ACTIVATE PDP CONTEXT REQUEST Section 9.5.1"""
+ a = TpPd(pd=0x8)
+ b = MessageType(mesType=0x41) # 01000001
+ c = NetworkServiceAccessPointIdentifier()
+ d = LlcServiceAccessPointIdentifier()
+ e = QualityOfService()
+ f = PacketDataProtocolAddress()
+ packet = a / b / c / d / e / f
+ if AccessPointName_presence is 1:
+ g = AccessPointName(ieiAPN=0x28)
+ packet = packet / g
+ if ProtocolConfigurationOptions_presence is 1:
+ h = ProtocolConfigurationOptions(ieiPCO=0x27)
+ packet = packet / h
+ return packet
+
+
+def activatePdpContextAccept(PacketDataProtocolAddress_presence=0,
+ ProtocolConfigurationOptions_presence=0):
+ """ACTIVATE PDP CONTEXT ACCEPT Section 9.5.2"""
+ a = TpPd(pd=0x8)
+ b = MessageType(mesType=0x42) # 01000010
+ c = LlcServiceAccessPointIdentifier()
+ d = QualityOfService()
+ e = RadioPriorityAndSpareHalfOctets()
+ packet = a / b / c / d / e
+ if PacketDataProtocolAddress_presence is 1:
+ f = PacketDataProtocolAddress(ieiPDPA=0x2B)
+ packet = packet / f
+ if ProtocolConfigurationOptions_presence is 1:
+ g = ProtocolConfigurationOptions(ieiPCO=0x27)
+ packet = packet / g
+ return packet
+
+
+def activatePdpContextReject(ProtocolConfigurationOptions_presence=0):
+ """ACTIVATE PDP CONTEXT REJECT Section 9.5.3"""
+ a = TpPd(pd=0x8)
+ b = MessageType(mesType=0x43) # 01000011
+ c = SmCause()
+ packet = a / b / c
+ if ProtocolConfigurationOptions_presence is 1:
+ d = ProtocolConfigurationOptions(ieiPCO=0x27)
+ packet = packet / d
+ return packet
+
+
+def requestPdpContextActivation(AccessPointName_presence=0):
+ """REQUEST PDP CONTEXT ACTIVATION Section 9.5.4"""
+ a = TpPd(pd=0x8)
+ b = MessageType(mesType=0x44) # 01000100
+ c = PacketDataProtocolAddress()
+ packet = a / b / c
+ if AccessPointName_presence is 1:
+ d = AccessPointName(ieiAPN=0x28)
+ packet = packet / d
+ return packet
+
+
+def requestPdpContextActivationReject():
+ """REQUEST PDP CONTEXT ACTIVATION REJECT Section 9.5.5"""
+ a = TpPd(pd=0x8)
+ b = MessageType(mesType=0x45) # 01000101
+ c = SmCause()
+ packet = a / b / c
+ return packet
+
+
+def modifyPdpContextRequest():
+ """MODIFY PDP CONTEXT REQUEST Section 9.5.6"""
+ a = TpPd(pd=0x8)
+ b = MessageType(mesType=0x48) # 01001000
+ c = RadioPriorityAndSpareHalfOctets()
+ d = LlcServiceAccessPointIdentifier()
+ e = QualityOfService()
+ packet = a / b / c / d / e
+ return packet
+
+
+def modifyPdpContextAccept():
+ """MODIFY PDP CONTEXT ACCEPT Section 9.5.7"""
+ a = TpPd(pd=0x8)
+ b = MessageType(mesType=0x45) # 01000101
+ packet = a / b
+ return packet
+
+
+def deactivatePdpContextRequest():
+ """DEACTIVATE PDP CONTEXT REQUEST Section 9.5.8"""
+ a = TpPd(pd=0x8)
+ b = MessageType(mesType=0x46) # 01000110
+ c = SmCause()
+ packet = a / b / c
+ return packet
+
+
+def deactivatePdpContextAccept():
+ """DEACTIVATE PDP CONTEXT ACCEPT Section 9.5.9"""
+ a = TpPd(pd=0x8)
+ b = MessageType(mesType=0x47) # 01000111
+ packet = a / b
+ return packet
+
+
+def activateAaPdpContextRequest(AccessPointName_presence=0,
+ ProtocolConfigurationOptions_presence=0,
+ GprsTimer_presence=0):
+ """ACTIVATE AA PDP CONTEXT REQUEST Section 9.5.10"""
+ a = TpPd(pd=0x8)
+ b = MessageType(mesType=0x50) # 01010000
+ c = NetworkServiceAccessPointIdentifier()
+ d = LlcServiceAccessPointIdentifier()
+ e = QualityOfService()
+ f = PacketDataProtocolAddress()
+ packet = a / b / c / d / e / f
+ if AccessPointName_presence is 1:
+ g = AccessPointName(ieiAPN=0x28)
+ packet = packet / g
+ if ProtocolConfigurationOptions_presence is 1:
+ h = ProtocolConfigurationOptions(ieiPCO=0x27)
+ packet = packet / h
+ if GprsTimer_presence is 1:
+ i = GprsTimer(ieiGT=0x29)
+ packet = packet / i
+ return packet
+
+
+def activateAaPdpContextAccept(ProtocolConfigurationOptions_presence=0,
+ GprsTimer_presence=0):
+ """ACTIVATE AA PDP CONTEXT ACCEPT Section 9.5.11"""
+ a = TpPd(pd=0x8)
+ b = MessageType(mesType=0x51) # 01010001
+ c = LlcServiceAccessPointIdentifier()
+ d = QualityOfService()
+ e = MobileId()
+ f = PacketDataProtocolAddress()
+ g = RadioPriorityAndSpareHalfOctets()
+ packet = a / b / c / d / e / f / g
+ if ProtocolConfigurationOptions_presence is 1:
+ i = ProtocolConfigurationOptions(ieiPCO=0x27)
+ packet = packet / i
+ if GprsTimer_presence is 1:
+ j = GprsTimer(ieiGT=0x29)
+ packet = packet / j
+ return packet
+
+
+def activateAaPdpContextReject(ProtocolConfigurationOptions_presence=0):
+ """ACTIVATE AA PDP CONTEXT REJECT Section 9.5.12"""
+ a = TpPd(pd=0x8)
+ b = MessageType(mesType=0x52) # 01010010
+ c = SmCause()
+ packet = a / b / c
+ if ProtocolConfigurationOptions_presence is 1:
+ d = ProtocolConfigurationOptions(ieiPCO=0x27)
+ packet = packet / d
+ return packet
+
+
+def deactivateAaPdpContextRequest():
+ """DEACTIVATE AA PDP CONTEXT REQUEST Section 9.5.13"""
+ a = TpPd(pd=0x8)
+ b = MessageType(mesType=0x53) # 01010011
+ c = AaDeactivationCauseAndSpareHalfOctets()
+ packet = a / b / c
+ return packet
+
+
+def deactivateAaPdpContextAccept():
+ """DEACTIVATE AA PDP CONTEXT ACCEPT Section 9.5.14"""
+ a = TpPd(pd=0x8)
+ b = MessageType(mesType=0x54) # 01010100
+ packet = a / b
+ return packet
+
+
+def smStatus():
+ """SM STATUS Section 9.5.15"""
+ a = TpPd(pd=0x8)
+ b = MessageType(mesType=0x55) # 01010101
+ c = SmCause()
+ packet = a / b / c
+ return packet
+
+
+# ============================================#
+# Information Elements contents (Section 10) #
+# =========================================== #
+
+####
+# This section contains the elements we need to build the messages
+####
+
+#
+# Common information elements:
+#
+class CellIdentityHdr(Packet):
+ """ Cell identity Section 10.5.1.1 """
+ name = "Cell Identity"
+ fields_desc = [
+ BitField("eightBitCI", None, 1),
+ XBitField("ieiCI", None, 7),
+ ByteField("ciValue1", 0x0),
+ ByteField("ciValue2", 0x0)
+ ]
+
+
+class CiphKeySeqNrHdr(Packet):
+ """ Ciphering Key Sequence Number Section 10.5.1.2 """
+ name = "Cipher Key Sequence Number"
+ fields_desc = [
+ XBitField("ieiCKSN", None, 4),
+ BitField("spare", 0x0, 1),
+ BitField("keySeq", 0x0, 3)
+ ]
+
+
+# Fix 1/2 len problem
+class CiphKeySeqNrAndSpareHalfOctets(Packet):
+ name = "Cipher Key Sequence Number and Spare Half Octets"
+ fields_desc = [
+ BitField("spare", 0x0, 1),
+ BitField("keySeq", 0x0, 3),
+ BitField("spareHalfOctets", 0x0, 4)
+ ]
+
+
+# Fix 1/2 len problem
+class CiphKeySeqNrAndMacModeAndChannelCodingRequest(Packet):
+ name = "Cipher Key Sequence Number and Mac Mode And Channel Coding Request"
+ fields_desc = [
+ BitField("spare", 0x0, 1),
+ BitField("keySeq", 0x0, 3),
+ BitField("macMode", 0x0, 2),
+ BitField("cs", 0x0, 2)
+ ]
+
+
+class LocalAreaIdHdr(Packet):
+ """ Local Area Identification Section 10.5.1.3 """
+ name = "Location Area Identification"
+ fields_desc = [
+ BitField("eightBitLAI", None, 1),
+ XBitField("ieiLAI", None, 7),
+ BitField("mccDigit2", 0x0, 4),
+ BitField("mccDigit1", 0x0, 4),
+ BitField("mncDigit3", 0x0, 4),
+ BitField("mccDigit3", 0x0, 4),
+ BitField("mncDigit2", 0x0, 4),
+ BitField("mncDigit1", 0x0, 4),
+ ByteField("lac1", 0x0),
+ ByteField("lac2", 0x0)
+ ]
+#
+# The Mobile Identity is a type 4 information element with a minimum
+# length of 3 octet and 11 octets length maximal.
+#
+
+
+# len 3 - 11
+class MobileIdHdr(Packet):
+ """ Mobile Identity Section 10.5.1.4 """
+ name = "Mobile Identity"
+ fields_desc = [
+ BitField("eightBitMI", 0x0, 1),
+ XBitField("ieiMI", 0x0, 7),
+
+ XByteField("lengthMI", None),
+
+ BitField("idDigit1", 0x0, 4),
+ BitField("oddEven", 0x0, 1),
+ BitField("typeOfId", 0x0, 3),
+
+ BitField("idDigit2_1", None, 4), # optional
+ BitField("idDigit2", None, 4),
+
+ BitField("idDigit3_1", None, 4),
+ BitField("idDigit3", None, 4),
+
+ BitField("idDigit4_1", None, 4),
+ BitField("idDigit4", None, 4),
+
+ BitField("idDigit5_1", None, 4),
+ BitField("idDigit5", None, 4),
+
+ BitField("idDigit6_1", None, 4),
+ BitField("idDigit6", None, 4),
+ BitField("idDigit7_1", None, 4),
+ BitField("idDigit7", None, 4),
+ BitField("idDigit8_1", None, 4),
+ BitField("idDigit8", None, 4),
+ BitField("idDigit9_1", None, 4),
+ BitField("idDigit9", None, 4),
+ ]
+
+ def post_build(self, p, pay):
+ aList = []
+ a = []
+ i = 0
+ for i in range(0, len(self.fields_desc)):
+ aList.append(self.fields_desc[i].name)
+ for i in aList:
+ a.append(getattr(self, i, None)) # this list holds the values of
+# the variables, the INTERESSTING value!
+ res = adapt(3, 11, a, self.fields_desc)
+ if self.lengthMI is None:
+ p = p[:1] + struct.pack(">B", res[1]) + p[2:]
+ if res[0] is not 0:
+ p = p[:-res[0]]
+ print(repr(p))
+ return p + pay
+
+
+class MobileStationClassmark1Hdr(Packet):
+ """ Mobile Station Classmark 1 Section 10.5.1.5 """
+ name = "Mobile Station Classmark 1"
+ fields_desc = [
+ BitField("eightBitiMSC1", None, 1),
+ XBitField("ieiMSC1", None, 7),
+ BitField("spare", 0x0, 1),
+ BitField("revisionLvl", 0x0, 2),
+ BitField("esInd", 0x0, 1),
+ BitField("a51", 0x0, 1),
+ BitField("rfPowerCap", 0x0, 3)
+ ]
+
+
+class MobileStationClassmark2Hdr(Packet):
+ """ Mobile Station Classmark 2 Section 10.5.1.6 """
+ name = "Mobile Station Classmark 2"
+ fields_desc = [
+ BitField("eightBitMSC2", None, 1),
+ XBitField("ieiMSC2", None, 7),
+ XByteField("lengthMSC2", 0x3),
+ BitField("spare", 0x0, 1),
+ BitField("revisionLvl", 0x0, 2),
+ BitField("esInd", 0x0, 1),
+ BitField("a51", 0x0, 1),
+ BitField("rfPowerCap", 0x0, 3),
+ BitField("spare1", 0x0, 1),
+ BitField("psCap", 0x0, 1),
+ BitField("ssScreenInd", 0x0, 2),
+ BitField("smCaPabi", 0x0, 1),
+ BitField("vbs", 0x0, 1),
+ BitField("vgcs", 0x0, 1),
+ BitField("fc", 0x0, 1),
+ BitField("cm3", 0x0, 1),
+ BitField("spare2", 0x0, 1),
+ BitField("lcsvaCap", 0x0, 1),
+ BitField("spare3", 0x0, 1),
+ BitField("soLsa", 0x0, 1),
+ BitField("cmsp", 0x0, 1),
+ BitField("a53", 0x0, 1),
+ BitField("a52", 0x0, 1)
+ ]
+
+
+# len max 14
+class MobileStationClassmark3(Packet):
+ """ Mobile Station Classmark 3 Section 10.5.1.7 """
+ name = "Mobile Station Classmark 3"
+ fields_desc = [
+ # FIXME
+ ByteField("ieiMSC3", 0x0),
+ ByteField("byte2", 0x0),
+ ByteField("byte3", 0x0),
+ ByteField("byte4", 0x0),
+ ByteField("byte5", 0x0),
+ ByteField("byte6", 0x0),
+ ByteField("byte7", 0x0),
+ ByteField("byte8", 0x0),
+ ByteField("byte9", 0x0),
+ ByteField("byte10", 0x0),
+ ByteField("byte11", 0x0),
+ ByteField("byte12", 0x0),
+ ByteField("byte13", 0x0),
+ ByteField("byte14", 0x0)
+ ]
+
+
+class SpareHalfOctets(Packet):
+ """ Spare Half Octet Section 10.5.1.8 """
+ name = "Spare Half Octet"
+ fields_desc = [
+ BitField("filler", None, 4),
+ BitField("spareHalfOctets", 0x0, 4)
+ ]
+
+
+class DescriptiveGroupOrBroadcastCallReferenceHdr(Packet):
+ """ Descriptive group or broadcast call reference Section 10.5.1.9 """
+ name = "Descriptive Group or Broadcast Call Reference"
+ fields_desc = [
+ BitField("eightBitDGOBCR", None, 1),
+ XBitField("ieiDGOBCR", None, 7),
+ BitField("binCallRef", 0x0, 27),
+ BitField("sf", 0x0, 1),
+ BitField("fa", 0x0, 1),
+ BitField("callPrio", 0x0, 3),
+ BitField("cipherInfo", 0x0, 4),
+ BitField("spare1", 0x0, 1),
+ BitField("spare2", 0x0, 1),
+ BitField("spare3", 0x0, 1),
+ BitField("spare4", 0x0, 1)
+ ]
+
+
+class GroupCipherKeyNumber(Packet):
+ """ Group Cipher Key Number reference Section 10.5.1.10 """
+ name = "Group Cipher Key Number"
+ fields_desc = [
+ XBitField("ieiGCKN", None, 4),
+ BitField("groupCipher", 0x0, 4)
+ ]
+
+
+class PdAndSapiHdr(Packet):
+ """ PD and SAPI $(CCBS)$ Section 10.5.1.10a """
+ name = "PD and SAPI $(CCBS)$"
+ fields_desc = [
+ BitField("eightBitPAS", None, 1),
+ XBitField("ieiPAS", None, 7),
+ BitField("spare", 0x0, 1),
+ BitField("spare1", 0x0, 1),
+ BitField("sapi", 0x0, 2),
+ BitField("pd", 0x0, 4)
+ ]
+
+
+class PriorityLevelHdr(Packet):
+ """ Priority Level Section 10.5.1.11 """
+ name = "Priority Level"
+ fields_desc = [
+ XBitField("ieiPL", None, 4),
+ BitField("spare", 0x0, 1),
+ BitField("callPrio", 0x0, 3)
+ ]
+
+#
+# Radio Resource management information elements
+#
+
+
+# len 6 to max for L3 message (251)
+class BaRangeHdr(Packet):
+ """ BA Range Section 10.5.2.1a """
+ name = "BA Range"
+ fields_desc = [
+ BitField("eightBitBR", None, 1),
+ XBitField("ieiBR", None, 7),
+
+ XByteField("lengthBR", None),
+#error: byte format requires -128 <= number <= 127
+ ByteField("nrOfRanges", 0x0),
+# # rX = range X
+# # L o = Lower H i = higher
+# # H p = high Part Lp = low Part
+ ByteField("r1LoHp", 0x0),
+
+ BitField("r1LoLp", 0x0, 3),
+ BitField("r1HiHp", 0x0, 5),
+
+ BitField("r1HiLp", 0x0, 4),
+ BitField("r2LoHp", 0x0, 4),
+ # optional
+ BitField("r2LoLp", None, 5),
+ BitField("r2HiHp", None, 3),
+
+ ByteField("r2HiLp", None),
+ ByteField("r3LoHp", None),
+
+ BitField("r3LoLp", None, 5),
+ BitField("r3HiHp", None, 3),
+
+ ByteField("r3HiLp", None),
+ ByteField("r4LoHp", None),
+
+ BitField("r4LoLp", None, 5),
+ BitField("r4HiHp", None, 3),
+ ByteField("r4HiLp", None),
+ ByteField("r5LoHp", None),
+
+ BitField("r5LoLp", None, 5),
+ BitField("r5HiHp", None, 3),
+ ByteField("r5HiLp", None),
+ ByteField("r6LoHp", None),
+
+ BitField("r6LoLp", None, 5),
+ BitField("r6HiHp", None, 3),
+ ByteField("r6HiLp", None),
+ ByteField("r7LoHp", None),
+
+ BitField("r7LoLp", None, 5),
+ BitField("r7HiHp", None, 3),
+ ByteField("r7HiLp", None),
+ ByteField("r8LoHp", None),
+
+ BitField("r8LoLp", None, 5),
+ BitField("r8HiHp", None, 3),
+ ByteField("r8HiLp", None),
+ ByteField("r9LoHp", None),
+
+ BitField("r9LoLp", None, 5),
+ BitField("r9HiHp", None, 3),
+ ByteField("r9HiLp", None),
+ ByteField("r10LoHp", None),
+
+ BitField("r10LoLp", None, 5),
+ BitField("r10HiHp", None, 3),
+ ByteField("r10HiLp", None),
+ ByteField("r11LoHp", None),
+
+ BitField("r11LoLp", None, 5),
+ BitField("r11HiHp", None, 3),
+ ByteField("r11HiLp", None),
+ ByteField("r12LoHp", None),
+
+ BitField("r12LoLp", None, 5),
+ BitField("r12HiHp", None, 3),
+ ByteField("r12HiLp", None),
+ ByteField("r13LoHp", None),
+
+ BitField("r13LoLp", None, 5),
+ BitField("r13HiHp", None, 3),
+ ByteField("r13HiLp", None),
+ ByteField("r14LoHp", None),
+
+ BitField("r14LoLp", None, 5),
+ BitField("r14HiHp", None, 3),
+ ByteField("r14HiLp", None),
+ ByteField("r15LoHp", None),
+
+ BitField("r15LoLp", None, 5),
+ BitField("r15HiHp", None, 3),
+ ByteField("r15HiLp", None),
+ ByteField("r16LoHp", None),
+
+ BitField("r16LoLp", None, 5),
+ BitField("r16HiHp", None, 3),
+ ByteField("r16HiLp", None),
+ ByteField("r17LoHp", None),
+
+ BitField("r17LoLp", None, 5),
+ BitField("r17HiHp", None, 3),
+ ByteField("r17HiLp", None),
+ ByteField("r18LoHp", None),
+
+ BitField("r18LoLp", None, 5),
+ BitField("r18HiHp", None, 3),
+ ByteField("r18HiLp", None),
+ ByteField("r19LoHp", None),
+
+ BitField("r19LoLp", None, 5),
+ BitField("r19HiHp", None, 3),
+ ByteField("r19HiLp", None),
+ ByteField("r20LoHp", None),
+
+ BitField("r20LoLp", None, 5),
+ BitField("r20HiHp", None, 3),
+ ByteField("r20HiLp", None),
+ ByteField("r21LoHp", None),
+
+ BitField("r21LoLp", None, 5),
+ BitField("r21HiHp", None, 3),
+ ByteField("r21HiLp", None),
+ ByteField("r22LoHp", None),
+
+ BitField("r22LoLp", None, 5),
+ BitField("r22HiHp", None, 3),
+ ByteField("r22HiLp", None),
+ ByteField("r23LoHp", None),
+
+ BitField("r23LoLp", None, 5),
+ BitField("r23HiHp", None, 3),
+ ByteField("r23HiLp", None),
+ ByteField("r24LoHp", None),
+
+ BitField("r24LoLp", None, 5),
+ BitField("r24HiHp", None, 3),
+ ByteField("r24HiLp", None),
+ ByteField("r25LoHp", None),
+
+ BitField("r25LoLp", None, 5),
+ BitField("r25HiHp", None, 3),
+ ByteField("r25HiLp", None),
+ ByteField("r26LoHp", None),
+
+ BitField("r26LoLp", None, 5),
+ BitField("r26HiHp", None, 3),
+ ByteField("r26HiLp", None),
+ ByteField("r27LoHp", None),
+
+ BitField("r27LoLp", None, 5),
+ BitField("r27HiHp", None, 3),
+ ByteField("r27HiLp", None),
+ ByteField("r28LoHp", None),
+
+ BitField("r28LoLp", None, 5),
+ BitField("r28HiHp", None, 3),
+ ByteField("r28HiLp", None),
+ ByteField("r29LoHp", None),
+
+ BitField("r29LoLp", None, 5),
+ BitField("r29HiHp", None, 3),
+ ByteField("r29HiLp", None),
+ ByteField("r30LoHp", None),
+
+ BitField("r30LoLp", None, 5),
+ BitField("r30HiHp", None, 3),
+ ByteField("r30HiLp", None),
+ ByteField("r31LoHp", None),
+
+ BitField("r31LoLp", None, 5),
+ BitField("r31HiHp", None, 3),
+ ByteField("r31HiLp", None),
+ ByteField("r32LoHp", None),
+
+ BitField("r32LoLp", None, 5),
+ BitField("r32HiHp", None, 3),
+ ByteField("r32HiLp", None),
+ ByteField("r33LoHp", None),
+
+ BitField("r33LoLp", None, 5),
+ BitField("r33HiHp", None, 3),
+ ByteField("r33HiLp", None),
+ ByteField("r34LoHp", None),
+
+ BitField("r34LoLp", None, 5),
+ BitField("r34HiHp", None, 3),
+ ByteField("r34HiLp", None),
+ ByteField("r35LoHp", None),
+
+ BitField("r35LoLp", None, 5),
+ BitField("r35HiHp", None, 3),
+ ByteField("r35HiLp", None),
+ ByteField("r36LoHp", None),
+
+ BitField("r36LoLp", None, 5),
+ BitField("r36HiHp", None, 3),
+ ByteField("r36HiLp", None),
+ ByteField("r37LoHp", None),
+
+ BitField("r37LoLp", None, 5),
+ BitField("r37HiHp", None, 3),
+ ByteField("r37HiLp", None),
+ ByteField("r38LoHp", None),
+
+ BitField("r38LoLp", None, 5),
+ BitField("r38HiHp", None, 3),
+ ByteField("r38HiLp", None),
+ ByteField("r39LoHp", None),
+
+ BitField("r39LoLp", None, 5),
+ BitField("r39HiHp", None, 3),
+ ByteField("r39HiLp", None),
+ ByteField("r40LoHp", None),
+
+ BitField("r40LoLp", None, 5),
+ BitField("r40HiHp", None, 3),
+ ByteField("r40HiLp", None),
+ ByteField("r41LoHp", None),
+
+ BitField("r41LoLp", None, 5),
+ BitField("r41HiHp", None, 3),
+ ByteField("r41HiLp", None),
+ ByteField("r42LoHp", None),
+
+ BitField("r42LoLp", None, 5),
+ BitField("r42HiHp", None, 3),
+ ByteField("r42HiLp", None),
+ ByteField("r43LoHp", None),
+
+ BitField("r43LoLp", None, 5),
+ BitField("r43HiHp", None, 3),
+ ByteField("r43HiLp", None),
+ ByteField("r44LoHp", None),
+
+ BitField("r44LoLp", None, 5),
+ BitField("r44HiHp", None, 3),
+ ByteField("r44HiLp", None),
+ ByteField("r45LoHp", None),
+
+ BitField("r45LoLp", None, 5),
+ BitField("r45HiHp", None, 3),
+ ByteField("r45HiLp", None),
+ ByteField("r46LoHp", None),
+
+ BitField("r46LoLp", None, 5),
+ BitField("r46HiHp", None, 3),
+ ByteField("r46HiLp", None),
+ ByteField("r47LoHp", None),
+
+ BitField("r47LoLp", None, 5),
+ BitField("r47HiHp", None, 3),
+ ByteField("r47HiLp", None),
+ ByteField("r48LoHp", None),
+
+ BitField("r48LoLp", None, 5),
+ BitField("r48HiHp", None, 3),
+ ByteField("r48HiLp", None),
+ ByteField("r49LoHp", None),
+
+ BitField("r49LoLp", None, 5),
+ BitField("r49HiHp", None, 3),
+ ByteField("r49HiLp", None),
+ ByteField("r50LoHp", None),
+
+ BitField("r50LoLp", None, 5),
+ BitField("r50HiHp", None, 3),
+ ByteField("r50HiLp", None),
+ ByteField("r51LoHp", None),
+
+ BitField("r51LoLp", None, 5),
+ BitField("r51HiHp", None, 3),
+ ByteField("r51HiLp", None),
+ ByteField("r52LoHp", None),
+
+ BitField("r52LoLp", None, 5),
+ BitField("r52HiHp", None, 3),
+ ByteField("r52HiLp", None),
+ ByteField("r53LoHp", None),
+
+ BitField("r53LoLp", None, 5),
+ BitField("r53HiHp", None, 3),
+ ByteField("r53HiLp", None),
+ ByteField("r54LoHp", None),
+
+ BitField("r54LoLp", None, 5),
+ BitField("r54HiHp", None, 3),
+ ByteField("r54HiLp", None),
+ ByteField("r55LoHp", None),
+
+ BitField("r55LoLp", None, 5),
+ BitField("r55HiHp", None, 3),
+ ByteField("r55HiLp", None),
+ ByteField("r56LoHp", None),
+
+ BitField("r56LoLp", None, 5),
+ BitField("r56HiHp", None, 3),
+ ByteField("r56HiLp", None),
+ ByteField("r57LoHp", None),
+
+ BitField("r57LoLp", None, 5),
+ BitField("r57HiHp", None, 3),
+ ByteField("r57HiLp", None),
+ ByteField("r58LoHp", None),
+
+ BitField("r58LoLp", None, 5),
+ BitField("r58HiHp", None, 3),
+ ByteField("r58HiLp", None),
+ ByteField("r59LoHp", None),
+
+ BitField("r59LoLp", None, 5),
+ BitField("r59HiHp", None, 3),
+ ByteField("r59HiLp", None),
+ ByteField("r60LoHp", None),
+
+ BitField("r60LoLp", None, 5),
+ BitField("r60HiHp", None, 3),
+ ByteField("r60HiLp", None),
+ ByteField("r61LoHp", None),
+
+ BitField("r61LoLp", None, 5),
+ BitField("r61HiHp", None, 3),
+ ByteField("r61HiLp", None),
+ ByteField("r62LoHp", None),
+
+ BitField("r62LoLp", None, 5),
+ BitField("r62HiHp", None, 3),
+ ByteField("r62HiLp", None),
+ ByteField("r63LoHp", None),
+
+ BitField("r63LoLp", None, 5),
+ BitField("r63HiHp", None, 3),
+ ByteField("r63HiLp", None),
+ ByteField("r64LoHp", None),
+
+ BitField("r64LoLp", None, 5),
+ BitField("r64HiHp", None, 3),
+ ByteField("r64HiLp", None),
+ ByteField("r65LoHp", None),
+
+ BitField("r65LoLp", None, 5),
+ BitField("r65HiHp", None, 3),
+ ByteField("r65HiLp", None),
+ ByteField("r66LoHp", None),
+
+ BitField("r66LoLp", None, 5),
+ BitField("r66HiHp", None, 3),
+ ByteField("r66HiLp", None),
+ ByteField("r67LoHp", None),
+
+ BitField("r67LoLp", None, 5),
+ BitField("r67HiHp", None, 3),
+ ByteField("r67HiLp", None),
+ ByteField("r68LoHp", None),
+
+ BitField("r68LoLp", None, 5),
+ BitField("r68HiHp", None, 3),
+ ByteField("r68HiLp", None),
+ ByteField("r69LoHp", None),
+
+ BitField("r69LoLp", None, 5),
+ BitField("r69HiHp", None, 3),
+ ByteField("r69HiLp", None),
+ ByteField("r70LoHp", None),
+
+ BitField("r70LoLp", None, 5),
+ BitField("r70HiHp", None, 3),
+ ByteField("r70HiLp", None),
+ ByteField("r71LoHp", None),
+
+ BitField("r71LoLp", None, 5),
+ BitField("r71HiHp", None, 3),
+ ByteField("r71HiLp", None),
+ ByteField("r72LoHp", None),
+
+ BitField("r72LoLp", None, 5),
+ BitField("r72HiHp", None, 3),
+ ByteField("r72HiLp", None),
+ ByteField("r73LoHp", None),
+
+ BitField("r73LoLp", None, 5),
+ BitField("r73HiHp", None, 3),
+ ByteField("r73HiLp", None),
+ ByteField("r74LoHp", None),
+
+ BitField("r74LoLp", None, 5),
+ BitField("r74HiHp", None, 3),
+ ByteField("r74HiLp", None),
+ ByteField("r75LoHp", None),
+
+ BitField("r75LoLp", None, 5),
+ BitField("r75HiHp", None, 3),
+ ByteField("r75HiLp", None),
+ ByteField("r76LoHp", None),
+
+ BitField("r76LoLp", None, 5),
+ BitField("r76HiHp", None, 3),
+ ByteField("r76HiLp", None),
+ ByteField("r77LoHp", None),
+
+ BitField("r77LoLp", None, 5),
+ BitField("r77HiHp", None, 3),
+ ByteField("r77HiLp", None),
+ ByteField("r78LoHp", None),
+
+ BitField("r78LoLp", None, 5),
+ BitField("r78HiHp", None, 3),
+ ByteField("r78HiLp", None),
+ ByteField("r79LoHp", None),
+
+ BitField("r79LoLp", None, 5),
+ BitField("r79HiHp", None, 3),
+ ByteField("r79HiLp", None),
+ ByteField("r80LoHp", None),
+
+ BitField("r80LoLp", None, 5),
+ BitField("r80HiHp", None, 3),
+ ByteField("r80HiLp", None),
+ ByteField("r81LoHp", None),
+
+ BitField("r81LoLp", None, 5),
+ BitField("r81HiHp", None, 3),
+ ByteField("r81HiLp", None),
+ ByteField("r82LoHp", None),
+
+ BitField("r82LoLp", None, 5),
+ BitField("r82HiHp", None, 3),
+ ByteField("r82HiLp", None),
+ ByteField("r83LoHp", None),
+
+ BitField("r83LoLp", None, 5),
+ BitField("r83HiHp", None, 3),
+ ByteField("r83HiLp", None),
+ ByteField("r84LoHp", None),
+
+ BitField("r84LoLp", None, 5),
+ BitField("r84HiHp", None, 3),
+ ByteField("r84HiLp", None)
+ ]
+
+ def post_build(self, p, pay):
+ aList = []
+ a = []
+ i = 0
+ for i in range(0, len(self.fields_desc)):
+ print("i is %s" % (i,))
+ aList.append(self.fields_desc[i].name)
+ print("aList %s" % (len(aList)))
+ print("self.fields_desc %s" % (len(self.fields_desc)))
+ for i in aList:
+ a.append(getattr(self, i))
+ res = adapt(6, 251, a, self.fields_desc)
+ if self.lengthBR is None:
+ p = p[:1] + struct.pack(">B", res[1]) + p[2:]
+ if res[0] is not 0:
+ p = p[:-res[0]]
+ return p + pay
+
+
+# len 3 to max for L3 message (251)
+class BaListPrefHdr(Packet):
+ """ BA List Pref Section 10.5.2.1c """
+ name = "BA List Pref"
+ fields_desc = [
+ # FIXME dynamic
+ BitField("eightBitBLP", None, 1),
+ XBitField("ieiBLP", None, 7),
+
+ XByteField("lengthBLP", None),
+
+ BitField("fixBit", 0x0, 1),
+ BitField("rangeLower", 0x0, 10),
+ BitField("fixBit2", 0x0, 1),
+ BitField("rangeUpper", 0x0, 10),
+ BitField("baFreq", 0x0, 10),
+ BitField("sparePad", 0x0, 8)
+ ]
+
+
+# len 17 || Have a look at the specs for the field format
+# Bit map 0 format
+# Range 1024 format
+# Range 512 format
+# Range 256 format
+# Range 128 format
+# Variable bit map format
+class CellChannelDescriptionHdr(Packet):
+ """ Cell Channel Description Section 10.5.2.1b """
+ name = "Cell Channel Description "
+ fields_desc = [
+ BitField("eightBitCCD", None, 1),
+ XBitField("ieiCCD", None, 7),
+ BitField("bit128", 0x0, 1),
+ BitField("bit127", 0x0, 1),
+ BitField("spare1", 0x0, 1),
+ BitField("spare2", 0x0, 1),
+ BitField("bit124", 0x0, 1),
+ BitField("bit123", 0x0, 1),
+ BitField("bit122", 0x0, 1),
+ BitField("bit121", 0x0, 1),
+ ByteField("bit120", 0x0),
+ ByteField("bit112", 0x0),
+ ByteField("bit104", 0x0),
+ ByteField("bit96", 0x0),
+ ByteField("bit88", 0x0),
+ ByteField("bit80", 0x0),
+ ByteField("bit72", 0x0),
+ ByteField("bit64", 0x0),
+ ByteField("bit56", 0x0),
+ ByteField("bit48", 0x0),
+ ByteField("bit40", 0x0),
+ ByteField("bit32", 0x0),
+ ByteField("bit24", 0x0),
+ ByteField("bit16", 0x0),
+ ByteField("bit8", 0x0)
+ ]
+
+
+class CellDescriptionHdr(Packet):
+ """ Cell Description Section 10.5.2.2 """
+ name = "Cell Description"
+ fields_desc = [
+ BitField("eightBitCD", None, 1),
+ XBitField("ieiCD", None, 7),
+ BitField("bcchHigh", 0x0, 2),
+ BitField("ncc", 0x0, 3),
+ BitField("bcc", 0x0, 3),
+ ByteField("bcchLow", 0x0)
+ ]
+
+
+class CellOptionsBCCHHdr(Packet):
+ """ Cell Options (BCCH) Section 10.5.2.3 """
+ name = "Cell Options (BCCH)"
+ fields_desc = [
+ BitField("eightBitCOB", None, 1),
+ XBitField("ieiCOB", None, 7),
+ BitField("spare", 0x0, 1),
+ BitField("pwrc", 0x0, 1),
+ BitField("dtx", 0x0, 2),
+ BitField("rLinkTout", 0x0, 4)
+ ]
+
+
+class CellOptionsSACCHHdr(Packet):
+ """ Cell Options (SACCH) Section 10.5.2.3a """
+ name = "Cell Options (SACCH)"
+ fields_desc = [
+ BitField("eightBitCOS", None, 1),
+ XBitField("ieiCOS", None, 7),
+ BitField("dtx", 0x0, 1),
+ BitField("pwrc", 0x0, 1),
+ BitField("dtx", 0x0, 1),
+ BitField("rLinkTout", 0x0, 4)
+ ]
+
+
+class CellSelectionParametersHdr(Packet):
+ """ Cell Selection Parameters Section 10.5.2.4 """
+ name = "Cell Selection Parameters"
+ fields_desc = [
+ BitField("eightBitCSP", None, 1),
+ XBitField("ieiCSP", None, 7),
+ BitField("cellReselect", 0x0, 3),
+ BitField("msTxPwrMax", 0x0, 5),
+ BitField("acs", None, 1),
+ BitField("neci", None, 1),
+ BitField("rxlenAccMin", None, 6)
+ ]
+
+
+class MacModeAndChannelCodingRequestHdr(Packet):
+ """ MAC Mode and Channel Coding Requested Section 10.5.2.4a """
+ name = "MAC Mode and Channel Coding Requested"
+ fields_desc = [
+ XBitField("ieiMMACCR", None, 4),
+ BitField("macMode", 0x0, 2),
+ BitField("cs", 0x0, 2)
+ ]
+
+
+class ChannelDescriptionHdr(Packet):
+ """ Channel Description Section 10.5.2.5 """
+ name = "Channel Description"
+ fields_desc = [
+ BitField("eightBitCD", None, 1),
+ XBitField("ieiCD", None, 7),
+
+ BitField("channelTyp", 0x0, 5),
+ BitField("tn", 0x0, 3),
+
+ BitField("tsc", 0x0, 3),
+ BitField("h", 0x1, 1),
+ # if h=1 maybe we find a better solution here...
+ BitField("maioHi", 0x0, 4),
+
+ BitField("maioLo", 0x0, 2),
+ BitField("hsn", 0x0, 6)
+ #BitField("spare", 0x0, 2),
+ #BitField("arfcnHigh", 0x0, 2),
+ #ByteField("arfcnLow", 0x0)
+ ]
+
+
+class ChannelDescription2Hdr(Packet):
+ """ Channel Description 2 Section 10.5.2.5a """
+ name = "Channel Description 2"
+ fields_desc = [
+ BitField("eightBitCD2", None, 1),
+ XBitField("ieiCD2", None, 7),
+ BitField("channelTyp", 0x0, 5),
+ BitField("tn", 0x0, 3),
+ BitField("tsc", 0x0, 3),
+ BitField("h", 0x0, 1),
+ # if h=1
+ # BitField("maioHi", 0x0, 4),
+ # BitField("maioLo", 0x0, 2),
+ # BitField("hsn", 0x0, 6)
+ BitField("spare", 0x0, 2),
+ BitField("arfcnHigh", 0x0, 2),
+ ByteField("arfcnLow", 0x0)
+ ]
+
+
+class ChannelModeHdr(Packet):
+ """ Channel Mode Section 10.5.2.6 """
+ name = "Channel Mode"
+ fields_desc = [
+ BitField("eightBitCM", None, 1),
+ XBitField("ieiCM", None, 7),
+ ByteField("mode", 0x0)
+ ]
+
+
+class ChannelMode2Hdr(Packet):
+ """ Channel Mode 2 Section 10.5.2.7 """
+ name = "Channel Mode 2"
+ fields_desc = [
+ BitField("eightBitCM2", None, 1),
+ XBitField("ieiCM2", None, 7),
+ ByteField("mode", 0x0)
+ ]
+
+
+class ChannelNeededHdr(Packet):
+ """ Channel Needed Section 10.5.2.8 """
+ name = "Channel Needed"
+ fields_desc = [
+ XBitField("ieiCN", None, 4),
+ BitField("channel2", 0x0, 2),
+ BitField("channel1", 0x0, 2),
+ ]
+
+
+class ChannelRequestDescriptionHdr(Packet):
+ """Channel Request Description Section 10.5.2.8a """
+ name = "Channel Request Description"
+ fields_desc = [
+ BitField("eightBitCRD", None, 1),
+ XBitField("ieiCRD", None, 7),
+ BitField("mt", 0x0, 1),
+ ConditionalField(BitField("spare", 0x0, 39),
+ lambda pkt: pkt.mt == 0),
+ ConditionalField(BitField("spare", 0x0, 3),
+ lambda pkt: pkt.mt == 1),
+ ConditionalField(BitField("priority", 0x0, 2),
+ lambda pkt: pkt.mt == 1),
+ ConditionalField(BitField("rlcMode", 0x0, 1),
+ lambda pkt: pkt.mt == 1),
+ ConditionalField(BitField("llcFrame", 0x1, 1),
+ lambda pkt: pkt.mt == 1),
+ ConditionalField(ByteField("reqBandMsb", 0x0),
+ lambda pkt: pkt.mt == 1),
+ ConditionalField(ByteField("reqBandLsb", 0x0),
+ lambda pkt: pkt.mt == 1),
+ ConditionalField(ByteField("rlcMsb", 0x0),
+ lambda pkt: pkt.mt == 1),
+ ConditionalField(ByteField("rlcLsb", 0x0),
+ lambda pkt: pkt.mt == 1)
+ ]
+
+
+class CipherModeSettingHdr(Packet):
+ """Cipher Mode Setting Section 10.5.2.9 """
+ name = "Cipher Mode Setting"
+ fields_desc = [
+ XBitField("ieiCMS", None, 4),
+ BitField("algoId", 0x0, 3),
+ BitField("sc", 0x0, 1),
+ ]
+
+
+class CipherResponseHdr(Packet):
+ """Cipher Response Section 10.5.2.10 """
+ name = "Cipher Response"
+ fields_desc = [
+ XBitField("ieiCR", None, 4),
+ BitField("spare", 0x0, 3),
+ BitField("cr", 0x0, 1),
+ ]
+
+
+# This packet fixes the problem with the 1/2 Byte length. Concatenation
+# of cipherModeSetting and cipherResponse
+class CipherModeSettingAndcipherResponse(Packet):
+ name = "Cipher Mode Setting And Cipher Response"
+ fields_desc = [
+ BitField("algoId", 0x0, 3),
+ BitField("sc", 0x0, 1),
+ BitField("spare", 0x0, 3),
+ BitField("cr", 0x0, 1)
+ ]
+
+
+class ControlChannelDescriptionHdr(Packet):
+ """Control Channel Description Section 10.5.2.11 """
+ name = "Control Channel Description"
+ fields_desc = [
+ BitField("eightBitCCD", None, 1),
+ XBitField("ieiCCD", None, 7),
+
+ BitField("spare", 0x0, 1),
+ BitField("att", 0x0, 1),
+ BitField("bsAgBlksRes", 0x0, 3),
+ BitField("ccchConf", 0x0, 3),
+
+ BitField("spare", 0x0, 1),
+ BitField("spare1", 0x0, 1),
+ BitField("spare2", 0x0, 1),
+ BitField("spare3", 0x0, 1),
+ BitField("spare4", 0x0, 1),
+ BitField("bsPaMfrms", 0x0, 3),
+
+ ByteField("t3212", 0x0)
+ ]
+
+
+class FrequencyChannelSequenceHdr(Packet):
+ """Frequency Channel Sequence Section 10.5.2.12"""
+ name = "Frequency Channel Sequence"
+ fields_desc = [
+ BitField("eightBitFCS", None, 1),
+ XBitField("ieiFCS", None, 7),
+ BitField("spare", 0x0, 1),
+ BitField("lowestArfcn", 0x0, 7),
+ BitField("skipArfcn01", 0x0, 4),
+ BitField("skipArfcn02", 0x0, 4),
+ BitField("skipArfcn03", 0x0, 4),
+ BitField("skipArfcn04", 0x0, 4),
+ BitField("skipArfcn05", 0x0, 4),
+ BitField("skipArfcn06", 0x0, 4),
+ BitField("skipArfcn07", 0x0, 4),
+ BitField("skipArfcn08", 0x0, 4),
+ BitField("skipArfcn09", 0x0, 4),
+ BitField("skipArfcn10", 0x0, 4),
+ BitField("skipArfcn11", 0x0, 4),
+ BitField("skipArfcn12", 0x0, 4),
+ BitField("skipArfcn13", 0x0, 4),
+ BitField("skipArfcn14", 0x0, 4),
+ BitField("skipArfcn15", 0x0, 4),
+ BitField("skipArfcn16", 0x0, 4)
+ ]
+
+
+class FrequencyListHdr(Packet):
+ """Frequency List Section 10.5.2.13"""
+ name = "Frequency List"
+ # Problem:
+ # There are several formats for the Frequency List information
+ # element, distinguished by the "format indicator" subfield.
+ # Some formats are frequency bit maps, the others use a special encoding
+ # scheme.
+ fields_desc = [
+ BitField("eightBitFL", None, 1),
+ XBitField("ieiFL", None, 7),
+ XByteField("lengthFL", None),
+
+ BitField("formatID", 0x0, 2),
+ BitField("spare", 0x0, 2),
+ BitField("arfcn124", 0x0, 1),
+ BitField("arfcn123", 0x0, 1),
+ BitField("arfcn122", 0x0, 1),
+ BitField("arfcn121", 0x0, 1),
+
+ ByteField("arfcn120", 0x0),
+ ByteField("arfcn112", 0x0),
+ ByteField("arfcn104", 0x0),
+ ByteField("arfcn96", 0x0),
+ ByteField("arfcn88", 0x0),
+ ByteField("arfcn80", 0x0),
+ ByteField("arfcn72", 0x0),
+ ByteField("arfcn64", 0x0),
+ ByteField("arfcn56", 0x0),
+ ByteField("arfcn48", 0x0),
+ ByteField("arfcn40", 0x0),
+ ByteField("arfcn32", 0x0),
+ ByteField("arfcn24", 0x0),
+ ByteField("arfcn16", 0x0),
+ ByteField("arfcn8", 0x0)
+ ]
+
+
+class FrequencyShortListHdr(Packet):
+ """Frequency Short List Section 10.5.2.14"""
+ name = "Frequency Short List"
+# len is 10
+#This element is encoded exactly as the Frequency List information element,
+#except that it has a fixed length instead of a
+#variable length and does not contain a length indicator and that it
+#shall not be encoded in bitmap 0 format.
+ fields_desc = [
+ ByteField("ieiFSL", 0x0),
+ ByteField("byte2", 0x0),
+ ByteField("byte3", 0x0),
+ ByteField("byte4", 0x0),
+ ByteField("byte5", 0x0),
+ ByteField("byte6", 0x0),
+ ByteField("byte7", 0x0),
+ ByteField("byte8", 0x0),
+ ByteField("byte9", 0x0),
+ ByteField("byte10", 0x0)
+ ]
+
+
+class FrequencyShortListHdr2(Packet):
+ """Frequency Short List2 Section 10.5.2.14a"""
+ name = "Frequency Short List 2"
+ fields_desc = [
+ ByteField("byte1", 0x0),
+ ByteField("byte2", 0x0),
+ ByteField("byte3", 0x0),
+ ByteField("byte4", 0x0),
+ ByteField("byte5", 0x0),
+ ByteField("byte6", 0x0),
+ ByteField("byte7", 0x0),
+ ByteField("byte8", 0x0)
+ ]
+
+
+# len 4 to 13
+class GroupChannelDescriptionHdr(Packet):
+ """Group Channel Description Section 10.5.2.14b"""
+ name = "Group Channel Description"
+ fields_desc = [
+ BitField("eightBitGCD", None, 1),
+ XBitField("ieiGCD", None, 7),
+
+ XByteField("lengthGCD", None),
+
+ BitField("channelType", 0x0, 5),
+ BitField("tn", 0x0, 3),
+
+ BitField("tsc", 0x0, 3),
+ BitField("h", 0x0, 1),
+ # if h == 0 the packet looks the following way:
+ ConditionalField(BitField("spare", 0x0, 2),
+ lambda pkt: pkt. h == 0x0),
+ ConditionalField(BitField("arfcnHi", 0x0, 2),
+ lambda pkt: pkt. h == 0x0),
+ ConditionalField(ByteField("arfcnLo", None),
+ lambda pkt: pkt. h == 0x0),
+ # if h == 1 the packet looks the following way:
+ ConditionalField(BitField("maioHi", 0x0, 4),
+ lambda pkt: pkt. h == 0x1),
+ ConditionalField(BitField("maioLo", None, 2),
+ lambda pkt: pkt. h == 0x1),
+ ConditionalField(BitField("hsn", None, 6),
+ lambda pkt: pkt. h == 0x1),
+ # finished with conditional fields
+ ByteField("maC6", None),
+ ByteField("maC7", None),
+ ByteField("maC8", None),
+ ByteField("maC9", None),
+ ByteField("maC10", None),
+ ByteField("maC11", None),
+ ByteField("maC12", None),
+ ByteField("maC13", None),
+ ByteField("maC14", None)
+ ]
+
+ def post_build(self, p, pay):
+ aList = []
+ a = []
+ i = 0
+ for i in range(0, len(self.fields_desc)):
+ aList.append(self.fields_desc[i].name)
+ for i in aList:
+ a.append(getattr(self, i))
+ res = adapt(4, 13, a, self.fields_desc)
+ if self.lengthGCD is None:
+ p = p[:1] + struct.pack(">B", res[1]) + p[2:]
+ if res[0] is not 0:
+ p = p[:-res[0]]
+ return p + pay
+
+
+class GprsResumptionHdr(Packet):
+ """GPRS Resumption Section 10.5.2.14c"""
+ name = "GPRS Resumption"
+ fields_desc = [
+ XBitField("ieiGR", None, 4),
+ BitField("spare", 0x0, 3),
+ BitField("ack", 0x0, 1)
+ ]
+
+
+class HandoverReferenceHdr(Packet):
+ """Handover Reference Section 10.5.2.15"""
+ name = "Handover Reference"
+ fields_desc = [
+ BitField("eightBitHR", None, 1),
+ XBitField("ieiHR", None, 7),
+ ByteField("handoverRef", 0x0)
+ ]
+
+
+# len 1-12
+class IaRestOctets(Packet):
+ """IA Rest Octets Section 10.5.2.16"""
+ name = "IA Rest Octets"
+ fields_desc = [
+ ByteField("ieiIRO", 0x0),
+ # FIXME brainfuck packet
+ XByteField("lengthIRO", None),
+ ByteField("byte2", None),
+ ByteField("byte3", None),
+ ByteField("byte4", None),
+ ByteField("byte5", None),
+ ByteField("byte6", None),
+ ByteField("byte7", None),
+ ByteField("byte8", None),
+ ByteField("byte9", None),
+ ByteField("byte10", None),
+ ByteField("byte11", None)
+ ]
+
+ def post_build(self, p, pay):
+ aList = []
+ a = []
+ i = 0
+ for i in range(0, len(self.fields_desc)):
+ aList.append(self.fields_desc[i].name)
+ for i in aList:
+ a.append(getattr(self, i))
+ res = adapt(1, 12, a, self.fields_desc)
+ if self.lengthIRO is None:
+ if res[1] < 0: # FIXME better fix
+ res[1] = 0
+ p = p[:1] + struct.pack(">B", res[1]) + p[2:]
+ if res[0] is not 0:
+ p = p[:-res[0]]
+ return p + pay
+
+
+class IraRestOctetsHdr(Packet):
+ """IAR Rest Octets Section 10.5.2.17"""
+ name = "IAR Rest Octets"
+ fields_desc = [
+ BitField("eightBitIRO", None, 1),
+ XBitField("ieiIRO", None, 7),
+ BitField("spare01", 0x0, 1),
+ BitField("spare02", 0x0, 1),
+ BitField("spare03", 0x1, 1),
+ BitField("spare04", 0x0, 1),
+ BitField("spare05", 0x1, 1),
+ BitField("spare06", 0x0, 1),
+ BitField("spare07", 0x1, 1),
+ BitField("spare08", 0x1, 1),
+ BitField("spare09", 0x0, 1),
+ BitField("spare10", 0x0, 1),
+ BitField("spare11", 0x1, 1),
+ BitField("spare12", 0x0, 1),
+ BitField("spare13", 0x1, 1),
+ BitField("spare14", 0x0, 1),
+ BitField("spare15", 0x1, 1),
+ BitField("spare16", 0x1, 1),
+ BitField("spare17", 0x0, 1),
+ BitField("spare18", 0x0, 1),
+ BitField("spare19", 0x1, 1),
+ BitField("spare20", 0x0, 1),
+ BitField("spare21", 0x1, 1),
+ BitField("spare22", 0x0, 1),
+ BitField("spare23", 0x1, 1),
+ BitField("spare24", 0x1, 1)
+ ]
+
+
+# len is 1 to 5 what do we do with the variable size? no lenght
+# field?! WTF
+class IaxRestOctetsHdr(Packet):
+ """IAX Rest Octets Section 10.5.2.18"""
+ name = "IAX Rest Octets"
+ fields_desc = [
+ BitField("eightBitIRO", None, 1),
+ XBitField("ieiIRO", None, 7),
+ BitField("spare01", 0x0, 1),
+ BitField("spare02", 0x0, 1),
+ BitField("spare03", 0x1, 1),
+ BitField("spare04", 0x0, 1),
+ BitField("spare05", 0x1, 1),
+ BitField("spare06", 0x0, 1),
+ BitField("spare07", 0x1, 1),
+ BitField("spare08", 0x1, 1),
+ ByteField("spareB1", None),
+ ByteField("spareB2", None),
+ ByteField("spareB3", None)
+ ]
+
+
+class L2PseudoLengthHdr(Packet):
+ """L2 Pseudo Length Section 10.5.2.19"""
+ name = "L2 Pseudo Length"
+ fields_desc = [
+ BitField("eightBitPL", None, 1),
+ XBitField("ieiPL", None, 7),
+ BitField("l2pLength", None, 6),
+ BitField("bit2", 0x0, 1),
+ BitField("bit1", 0x1, 1)
+ ]
+
+
+class MeasurementResultsHdr(Packet):
+ """Measurement Results Section 10.5.2.20"""
+ name = "Measurement Results"
+ fields_desc = [
+ BitField("eightBitMR", None, 1),
+ XBitField("ieiMR", None, 7),
+ BitField("baUsed", 0x0, 1),
+ BitField("dtxUsed", 0x0, 1),
+ BitField("rxLevFull", 0x0, 6),
+ BitField("spare", 0x0, 1),
+ BitField("measValid", 0x0, 1),
+ BitField("rxLevSub", 0x0, 6),
+ BitField("spare0", 0x0, 1),
+ BitField("rxqualFull", 0x0, 3),
+ BitField("rxqualSub", 0x0, 3),
+ BitField("noNcellHi", 0x0, 1),
+ BitField("noNcellLo", 0x0, 2),
+ BitField("rxlevC1", 0x0, 6),
+ BitField("bcchC1", 0x0, 5),
+ BitField("bsicC1Hi", 0x0, 3),
+ BitField("bsicC1Lo", 0x0, 3),
+ BitField("rxlevC2", 0x0, 5),
+ BitField("rxlevC2Lo", 0x0, 1),
+ BitField("bcchC2", 0x0, 5),
+ BitField("bsicC1Hi", 0x0, 2),
+ BitField("bscicC2Lo", 0x0, 4),
+ BitField("bscicC2Hi", 0x0, 4),
+
+ BitField("rxlevC3Lo", 0x0, 2),
+ BitField("bcchC3", 0x0, 5),
+ BitField("rxlevC3Hi", 0x0, 1),
+
+ BitField("bsicC3Lo", 0x0, 5),
+ BitField("bsicC3Hi", 0x0, 3),
+
+ BitField("rxlevC4Lo", 0x0, 3),
+ BitField("bcchC4", 0x0, 5),
+
+ BitField("bsicC4", 0x0, 6),
+ BitField("rxlevC5Hi", 0x0, 2),
+
+ BitField("rxlevC5Lo", 0x0, 4),
+ BitField("bcchC5Hi", 0x0, 4),
+
+ BitField("bcchC5Lo", 0x0, 1),
+ BitField("bsicC5", 0x0, 6),
+ BitField("rxlevC6", 0x0, 1),
+
+ BitField("rxlevC6Lo", 0x0, 5),
+ BitField("bcchC6Hi", 0x0, 3),
+
+ BitField("bcchC6Lo", 0x0, 3),
+ BitField("bsicC6", 0x0, 5)
+ ]
+
+
+class GprsMeasurementResultsHdr(Packet):
+ """GPRS Measurement Results Section 10.5.2.20a"""
+ name = "GPRS Measurement Results"
+ fields_desc = [
+ BitField("eightBitGMR", None, 1),
+ XBitField("ieiGMR", None, 7),
+ BitField("cValue", 0x0, 6),
+ BitField("rxqualHi", 0x0, 2),
+ BitField("rxqL", 0x0, 1),
+ BitField("spare", 0x0, 1),
+ BitField("signVar", 0x0, 6)
+ ]
+
+
+# len 3 to 10
+class MobileAllocationHdr(Packet):
+ """Mobile Allocation Section 10.5.2.21"""
+ name = "Mobile Allocation"
+ fields_desc = [
+ BitField("eightBitMA", None, 1),
+ XBitField("ieiMA", None, 7),
+ XByteField("lengthMA", None),
+ ByteField("maC64", 0x12),
+ ByteField("maC56", None), # optional fields start here
+ ByteField("maC48", None),
+ ByteField("maC40", None),
+ ByteField("maC32", None),
+ ByteField("maC24", None),
+ ByteField("maC16", None),
+ ByteField("maC8", None)
+ ]
+
+ def post_build(self, p, pay):
+ aList = []
+ a = []
+ i = 0
+ for i in range(0, len(self.fields_desc)):
+ aList.append(self.fields_desc[i].name)
+ for i in aList:
+ a.append(getattr(self, i))
+ res = adapt(3, 10, a, self.fields_desc)
+ if self.lengthMA is None:
+ p = p[:1] + struct.pack(">B", res[1]) + p[2:]
+ if res[0] is not 0:
+ p = p[:-res[0]]
+ return p + pay
+
+
+class MobileTimeDifferenceHdr(Packet):
+ """Mobile Time Difference Section 10.5.2.21a"""
+ name = "Mobile Time Difference"
+ fields_desc = [
+ BitField("eightBitMTD", None, 1),
+ XBitField("ieiMTD", None, 7),
+ XByteField("lengthMTD", 0x5),
+ ByteField("valueHi", 0x0),
+ ByteField("valueCnt", 0x0),
+ BitField("valueLow", 0x0, 5),
+ BitField("spare", 0x0, 1),
+ BitField("spare1", 0x0, 1),
+ BitField("spare2", 0x0, 1)
+ ]
+
+
+# min 4 octets max 8
+class MultiRateConfigurationHdr(Packet):
+ """ MultiRate configuration Section 10.5.2.21aa"""
+ name = "MultiRate Configuration"
+ fields_desc = [
+ BitField("eightBitMRC", None, 1),
+ XBitField("ieiMRC", None, 7),
+
+ XByteField("lengthMRC", None),
+
+ BitField("mrVersion", 0x0, 3),
+ BitField("spare", 0x0, 1),
+ BitField("icmi", 0x0, 1),
+ BitField("spare", 0x0, 1),
+ BitField("startMode", 0x0, 2),
+
+ ByteField("amrCodec", 0x0),
+
+ BitField("spare", None, 2),
+ BitField("threshold1", None, 6),
+
+ BitField("hysteresis1", None, 4),
+ BitField("threshold2", None, 4),
+
+ BitField("threshold2cnt", None, 2),
+ BitField("hysteresis2", None, 4),
+ BitField("threshold3", None, 2),
+
+ BitField("threshold3cnt", None, 4),
+ BitField("hysteresis3", None, 4)
+ ]
+
+ def post_build(self, p, pay):
+ aList = []
+ a = []
+ i = 0
+ for i in range(0, len(self.fields_desc)):
+ aList.append(self.fields_desc[i].name)
+ for i in aList:
+ a.append(getattr(self, i))
+ res = adapt(4, 8, a, self.fields_desc)
+ if self.lengthMRC is None:
+ p = p[:1] + struct.pack(">B", res[1]) + p[2:]
+ if res[0] is not 0:
+ p = p[:-res[0]]
+ return p + pay
+
+
+# len 3 to 12
+class MultislotAllocationHdr(Packet):
+ """Multislot Allocation Section 10.5.2.21b"""
+ name = "Multislot Allocation"
+ fields_desc = [
+ BitField("eightBitMSA", None, 1),
+ XBitField("ieiMSA", None, 7),
+ XByteField("lengthMSA", None),
+ BitField("ext0", 0x1, 1),
+ BitField("da", 0x0, 7),
+ ConditionalField(BitField("ext1", 0x1, 1), # optional
+ lambda pkt: pkt.ext0 == 0),
+ ConditionalField(BitField("ua", 0x0, 7),
+ lambda pkt: pkt.ext0 == 0),
+ ByteField("chan1", None),
+ ByteField("chan2", None),
+ ByteField("chan3", None),
+ ByteField("chan4", None),
+ ByteField("chan5", None),
+ ByteField("chan6", None),
+ ByteField("chan7", None),
+ ByteField("chan8", None)
+ ]
+
+ def post_build(self, p, pay):
+ aList = []
+ a = []
+ i = 0
+ for i in range(0, len(self.fields_desc)):
+ aList.append(self.fields_desc[i].name)
+ for i in aList:
+ a.append(getattr(self, i))
+ res = adapt(3, 12, a, self.fields_desc)
+ if res[0] is not 0:
+ p = p[:-res[0]]
+ if self.lengthMSA is None:
+ p = p[:1] + struct.pack(">B", len(p)-2) + p[2:]
+ return p + pay
+
+
+class NcModeHdr(Packet):
+ """NC mode Section 10.5.2.21c"""
+ name = "NC Mode"
+ fields_desc = [
+ XBitField("ieiNM", None, 4),
+ BitField("spare", 0x0, 2),
+ BitField("ncMode", 0x0, 2)
+ ]
+
+
+# Fix for len problem
+# concatenation NC Mode And Spare Half Octets
+class NcModeAndSpareHalfOctets(Packet):
+ name = "NC Mode And Spare Half Octets"
+ fields_desc = [
+ BitField("spare", 0x0, 2),
+ BitField("ncMode", 0x0, 2),
+ BitField("spareHalfOctets", 0x0, 4)
+ ]
+
+
+class NeighbourCellsDescriptionHdr(Packet):
+ """Neighbour Cells Description Section 10.5.2.22"""
+ name = "Neighbour Cells Description"
+ fields_desc = [
+ BitField("eightBitNCD", None, 1),
+ XBitField("ieiNCD", None, 7),
+ BitField("bit128", 0x0, 1),
+ BitField("bit127", 0x0, 1),
+ BitField("extInd", 0x0, 1),
+ BitField("baInd", 0x0, 1),
+ BitField("bit124", 0x0, 1),
+ BitField("bit123", 0x0, 1),
+ BitField("bit122", 0x0, 1),
+ BitField("bit121", 0x0, 1),
+ BitField("120bits", 0x0, 120)
+ ]
+
+
+class NeighbourCellsDescription2Hdr(Packet):
+ """Neighbour Cells Description 2 Section 10.5.2.22a"""
+ name = "Neighbour Cells Description 2"
+ fields_desc = [
+ BitField("eightBitNCD2", None, 1),
+ XBitField("ieiNCD2", None, 7),
+ BitField("bit128", 0x0, 1),
+ BitField("multiband", 0x0, 2),
+ BitField("baInd", 0x0, 1),
+ BitField("bit124", 0x0, 1),
+ BitField("bit123", 0x0, 1),
+ BitField("bit122", 0x0, 1),
+ BitField("bit121", 0x0, 1),
+ BitField("120bits", 0x0, 120)
+ ]
+
+
+class NtNRestOctets(Packet):
+ """NT/N Rest Octets Section 10.5.2.22c"""
+ name = "NT/N Rest Octets"
+ fields_desc = [
+ BitField("nln", 0x0, 2),
+ BitField("ncnInfo", 0x0, 4),
+ BitField("spare", 0x0, 2)
+ ]
+
+
+#
+# The following packet has no length info!
+#
+# len 1-18
+class P1RestOctets(Packet):
+ """P1 Rest Octets Section 10.5.2.23"""
+ name = "P1 Rest Octets"
+ fields_desc = [
+ BitField("nln", 0x0, 2),
+ BitField("nlnStatus", 0x0, 1),
+ BitField("prio1", 0x0, 3),
+ BitField("prio2", 0x0, 3),
+ # optional
+ BitField("pageIndication1", 0x0, 1),
+ BitField("pageIndication2", 0x0, 1),
+ BitField("spare", 0x0, 5),
+ ByteField("spareB1", None),
+ ByteField("spareB2", None),
+ ByteField("spareB3", None),
+ ByteField("spareB4", None),
+ ByteField("spareB5", None),
+ ByteField("spareB6", None),
+ ByteField("spareB7", None),
+ ByteField("spareB8", None),
+ ByteField("spareB9", None),
+ ByteField("spareB10", None),
+ ByteField("spareB11", None),
+ ByteField("spareB12", None),
+ ByteField("spareB13", None),
+ ByteField("spareB14", None),
+ ByteField("spareB15", None),
+ ByteField("spareB16", None),
+ ]
+
+
+# len 2-12
+class P2RestOctets(Packet):
+ """P2 Rest Octets Section 10.5.2.24"""
+ name = "P2 Rest Octets"
+ fields_desc = [
+ BitField("cn3", 0x0, 2),
+ BitField("nln", 0x0, 2),
+ BitField("nlnStatus", 0x0, 1),
+ BitField("prio1", 0x0, 3),
+
+ BitField("prio2", 0x0, 3),
+ BitField("prio3", 0x0, 3),
+ BitField("pageIndication3", 0x0, 1),
+ BitField("spare", 0x0, 1),
+
+ # optinal (No length field!)
+ ByteField("spareB1", None),
+ ByteField("spareB2", None),
+ ByteField("spareB3", None),
+ ByteField("spareB4", None),
+
+ ByteField("spareB5", None),
+ ByteField("spareB6", None),
+ ByteField("spareB7", None),
+ ByteField("spareB8", None),
+
+ ByteField("spareB9", None),
+ ByteField("spareB10", None)
+ ]
+
+
+# len 4
+class P3RestOctets(Packet):
+ """P3 Rest Octets Section 10.5.2.25"""
+ name = "P3 Rest Octets"
+ fields_desc = [
+ BitField("cn3", 0x0, 2),
+ BitField("cn4", 0x0, 2),
+ BitField("nln", 0x0, 2),
+ BitField("nlnStatus", 0x0, 1),
+ BitField("prio1", 0x0, 3),
+ BitField("prio2", 0x0, 3),
+ BitField("prio3", 0x0, 3),
+ BitField("prio4", 0x0, 3),
+ BitField("spare", 0x0, 5)
+ ]
+
+
+# len 4
+# strange packet, lots of valid formats
+
+# ideas for the dynamic packets:
+# 1] for user interaction: Create an interactive "builder" based on a
+# Q/A process (not very scapy like)
+# 2] for usage in scripts, create an alternative packet for every
+# possible packet layout
+#
+
+
+class PacketChannelDescription(Packet):
+ """Packet Channel Description Section 10.5.2.25a"""
+ name = "Packet Channel Description"
+ fields_desc = [
+ ByteField("ieiPCD", None),
+ BitField("chanType", 0x0, 5), # This packet has multiple
+ # possible layouts. I moddeled the first one
+ BitField("tn", 0x0, 3), # maybe build an
+ #"interactive" builder. Like
+ # a Q/A then propose a
+ # packet?
+ BitField("tsc", 0x0, 3),
+ BitField("chooser1", 0x0, 1),
+ BitField("chooser2", 0x0, 1),
+ BitField("spare1", 0x0, 1),
+ BitField("arfcn", 0x0, 10),
+ ]
+
+
+class DedicatedModeOrTBFHdr(Packet):
+ """Dedicated mode or TBF Section 10.5.2.25b"""
+ name = "Dedicated Mode or TBF"
+ fields_desc = [
+ XBitField("ieiDMOT", None, 4),
+ BitField("spare", 0x0, 1),
+ BitField("tma", 0x0, 1),
+ BitField("downlink", 0x0, 1),
+ BitField("td", 0x0, 1)
+ ]
+
+
+# FIXME add implementation
+class RrPacketUplinkAssignment(Packet):
+ """RR Packet Uplink Assignment Section 10.5.2.25c"""
+ name = "RR Packet Uplink Assignment"
+ fields_desc = [
+ # Fill me
+ ]
+
+
+class PageModeHdr(Packet):
+ """Page Mode Section 10.5.2.26"""
+ name = "Page Mode"
+ fields_desc = [
+ XBitField("ieiPM", None, 4),
+ BitField("spare", 0x0, 1),
+ BitField("spare1", 0x0, 1),
+ BitField("pm", 0x0, 2)
+ ]
+
+
+# Fix for 1/2 len problem
+# concatenation: pageMode and dedicatedModeOrTBF
+class PageModeAndDedicatedModeOrTBF(Packet):
+ name = "Page Mode and Dedicated Mode Or TBF"
+ fields_desc = [
+ BitField("spare", 0x0, 1),
+ BitField("spare1", 0x0, 1),
+ BitField("pm", 0x0, 2),
+ BitField("spare", 0x0, 1),
+ BitField("tma", 0x0, 1),
+ BitField("downlink", 0x0, 1),
+ BitField("td", 0x0, 1)
+ ]
+
+
+# Fix for 1/2 len problem
+# concatenation: pageMode and spareHalfOctets
+class PageModeAndSpareHalfOctets(Packet):
+ name = "Page Mode and Spare Half Octets"
+ fields_desc = [
+ BitField("spare", 0x0, 1),
+ BitField("spare1", 0x0, 1),
+ BitField("pm", 0x0, 2),
+ BitField("spareHalfOctets", 0x0, 4)
+ ]
+
+
+# Fix for 1/2 len problem
+# concatenation: pageMode and Channel Needed
+class PageModeAndChannelNeeded(Packet):
+ name = "Page Mode and Channel Needed"
+ fields_desc = [
+ BitField("spare", 0x0, 1),
+ BitField("spare1", 0x0, 1),
+ BitField("pm", 0x0, 2),
+ BitField("channel2", 0x0, 2),
+ BitField("channel1", 0x0, 2)
+ ]
+
+
+class NccPermittedHdr(Packet):
+ """NCC Permitted Section 10.5.2.27"""
+ name = "NCC Permited"
+ fields_desc = [
+ BitField("eightBitNP", None, 1),
+ XBitField("ieiNP", None, 7),
+ ByteField("nccPerm", 0x0)
+ ]
+
+
+class PowerCommandHdr(Packet):
+ """Power Command Section 10.5.2.28"""
+ name = "Power Command"
+ fields_desc = [
+ BitField("eightBitPC", None, 1),
+ XBitField("ieiPC", None, 7),
+ BitField("spare", 0x0, 1),
+ BitField("spare1", 0x0, 1),
+ BitField("spare2", 0x0, 1),
+ BitField("powerLvl", 0x0, 5)
+ ]
+
+
+class PowerCommandAndAccessTypeHdr(Packet):
+ """Power Command and access type Section 10.5.2.28a"""
+ name = "Power Command and Access Type"
+ fields_desc = [
+ BitField("eightBitPCAAT", None, 1),
+ XBitField("ieiPCAAT", None, 7),
+ BitField("atc", 0x0, 1),
+ BitField("spare", 0x0, 1),
+ BitField("spare1", 0x0, 1),
+ BitField("powerLvl", 0x0, 5)
+ ]
+
+
+class RachControlParametersHdr(Packet):
+ """RACH Control Parameters Section 10.5.2.29"""
+ name = "RACH Control Parameters"
+ fields_desc = [
+ BitField("eightBitRCP", None, 1),
+ XBitField("ieiRCP", None, 7),
+ BitField("maxRetrans", 0x0, 2),
+ BitField("txInteger", 0x0, 4),
+ BitField("cellBarrAccess", 0x0, 1),
+ BitField("re", 0x0, 1),
+ BitField("ACC15", 0x0, 1),
+ BitField("ACC14", 0x0, 1),
+ BitField("ACC13", 0x0, 1),
+ BitField("ACC12", 0x0, 1),
+ BitField("ACC11", 0x0, 1),
+ BitField("ACC10", 0x0, 1),
+ BitField("ACC09", 0x0, 1),
+ BitField("ACC08", 0x0, 1),
+ BitField("ACC07", 0x0, 1),
+ BitField("ACC06", 0x0, 1),
+ BitField("ACC05", 0x0, 1),
+ BitField("ACC04", 0x0, 1),
+ BitField("ACC03", 0x0, 1),
+ BitField("ACC02", 0x0, 1),
+ BitField("ACC01", 0x0, 1),
+ BitField("ACC00", 0x0, 1),
+ ]
+
+
+class RequestReferenceHdr(Packet):
+ """Request Reference Section 10.5.2.30"""
+ name = "Request Reference"
+ fields_desc = [
+ BitField("eightBitRR", None, 1),
+ XBitField("ieiRR", None, 7),
+ ByteField("ra", 0x0),
+ BitField("t1", 0x0, 5),
+ BitField("t3Hi", 0x0, 3),
+ BitField("t3Lo", 0x0, 3),
+ BitField("t2", 0x0, 5)
+ ]
+
+
+class RrCauseHdr(Packet):
+ """RR Cause Section 10.5.2.31"""
+ name = "RR Cause"
+ fields_desc = [
+ BitField("eightBitRC", None, 1),
+ XBitField("ieiRC", None, 7),
+ ByteField("rrCause", 0x0)
+ ]
+
+
+class Si1RestOctets(Packet):
+ """SI 1 Rest Octets Section 10.5.2.32"""
+ name = "SI 1 Rest Octets"
+ fields_desc = [
+ ByteField("nchPos", 0x0)
+ ]
+
+
+class Si2bisRestOctets(Packet):
+ """SI 2bis Rest Octets Section 10.5.2.33"""
+ name = "SI 2bis Rest Octets"
+ fields_desc = [
+ ByteField("spare", 0x0)
+ ]
+
+
+class Si2terRestOctets(Packet):
+ """SI 2ter Rest Octets Section 10.5.2.33a"""
+ name = "SI 2ter Rest Octets"
+ fields_desc = [
+ ByteField("spare1", 0x0),
+ ByteField("spare2", 0x0),
+ ByteField("spare3", 0x0),
+ ByteField("spare4", 0x0)
+ ]
+
+
+# len 5
+class Si3RestOctets(Packet):
+ """SI 3 Rest Octets Section 10.5.2.34"""
+ name = "SI 3 Rest Octets"
+ fields_desc = [
+ ByteField("byte1", 0x0),
+ ByteField("byte2", 0x0),
+ ByteField("byte3", 0x0),
+ ByteField("byte4", 0x0),
+ ByteField("byte5", 0x0)
+ ]
+
+
+# len 1 to 11
+class Si4RestOctets(Packet):
+ """SI 4 Rest Octets Section 10.5.2.35"""
+ name = "SI 4 Rest Octets"
+ fields_desc = [
+ XByteField("lengthSI4", None),
+ ByteField("byte2", None),
+ ByteField("byte3", None),
+ ByteField("byte4", None),
+ ByteField("byte5", None),
+ ByteField("byte6", None),
+ ByteField("byte7", None),
+ ByteField("byte8", None),
+ ByteField("byte9", None),
+ ByteField("byte10", None),
+ ByteField("byte11", None)
+ ]
+
+ def post_build(self, p, pay):
+ aList = []
+ a = []
+ i = 0
+ for i in range(0, len(self.fields_desc)):
+ aList.append(self.fields_desc[i].name)
+ for i in aList:
+ a.append(getattr(self, i))
+ res = adapt(1, 11, a, self.fields_desc, 1)
+ if self.lengthSI4 is None:
+ p = struct.pack(">B", res[1]) + p[1:]
+ if res[0] is not 0:
+ p = p[:-res[0]]
+ if len(p) is 1: # length of this packet can be 0, but packet is
+ p = '' # but the IE is manadatory 0_o
+ return p + pay
+
+
+class Si6RestOctets(Packet):
+ """SI 6 Rest Octets Section 10.5.2.35a"""
+ name = "SI 4 Rest Octets"
+ fields_desc = [
+ # FIXME
+ ]
+
+
+# len 21
+class Si7RestOctets(Packet):
+ """SI 7 Rest Octets Section 10.5.2.36"""
+ name = "SI 7 Rest Octets"
+ fields_desc = [
+ # FIXME
+ XByteField("lengthSI7", 0x15),
+ ByteField("byte2", 0x0),
+ ByteField("byte3", 0x0),
+ ByteField("byte4", 0x0),
+ ByteField("byte5", 0x0),
+ ByteField("byte6", 0x0),
+ ByteField("byte7", 0x0),
+ ByteField("byte8", 0x0),
+ ByteField("byte9", 0x0),
+ ByteField("byte10", 0x0),
+ ByteField("byte11", 0x0),
+ ByteField("byte12", 0x0),
+ ByteField("byte13", 0x0),
+ ByteField("byte14", 0x0),
+ ByteField("byte15", 0x0),
+ ByteField("byte16", 0x0),
+ ByteField("byte17", 0x0),
+ ByteField("byte18", 0x0),
+ ByteField("byte19", 0x0),
+ ByteField("byte20", 0x0),
+ ByteField("byte21", 0x0)
+ ]
+
+
+# len 21
+class Si8RestOctets(Packet):
+ """SI 8 Rest Octets Section 10.5.2.37"""
+ name = "SI 8 Rest Octets"
+ fields_desc = [
+ # FIXME
+ XByteField("lengthSI8", 0x15),
+ ByteField("byte2", 0x0),
+ ByteField("byte3", 0x0),
+ ByteField("byte4", 0x0),
+ ByteField("byte5", 0x0),
+ ByteField("byte6", 0x0),
+ ByteField("byte7", 0x0),
+ ByteField("byte8", 0x0),
+ ByteField("byte9", 0x0),
+ ByteField("byte10", 0x0),
+ ByteField("byte11", 0x0),
+ ByteField("byte12", 0x0),
+ ByteField("byte13", 0x0),
+ ByteField("byte14", 0x0),
+ ByteField("byte15", 0x0),
+ ByteField("byte16", 0x0),
+ ByteField("byte17", 0x0),
+ ByteField("byte18", 0x0),
+ ByteField("byte19", 0x0),
+ ByteField("byte20", 0x0),
+ ByteField("byte21", 0x0)
+ ]
+
+
+#len 17
+class Si9RestOctets(Packet):
+ """SI 9 Rest Octets Section 10.5.2.37a"""
+ name = "SI 9 Rest Octets"
+ fields_desc = [
+ # FIXME
+ XByteField("lengthSI9", 0x11),
+ ByteField("byte2", 0x0),
+ ByteField("byte3", 0x0),
+ ByteField("byte4", 0x0),
+ ByteField("byte5", 0x0),
+ ByteField("byte6", 0x0),
+ ByteField("byte7", 0x0),
+ ByteField("byte8", 0x0),
+ ByteField("byte9", 0x0),
+ ByteField("byte10", 0x0),
+ ByteField("byte11", 0x0),
+ ByteField("byte12", 0x0),
+ ByteField("byte13", 0x0),
+ ByteField("byte14", 0x0),
+ ByteField("byte15", 0x0),
+ ByteField("byte16", 0x0),
+ ByteField("byte17", 0x0)
+ ]
+
+
+# len 21
+class Si13RestOctets(Packet):
+ """SI 13 Rest Octets Section 10.5.2.37b"""
+ name = "SI 13 Rest Octets"
+ fields_desc = [
+ # FIXME
+ XByteField("lengthSI3", 0x15),
+ ByteField("byte2", 0x0),
+ ByteField("byte3", 0x0),
+ ByteField("byte4", 0x0),
+ ByteField("byte5", 0x0),
+ ByteField("byte6", 0x0),
+ ByteField("byte7", 0x0),
+ ByteField("byte8", 0x0),
+ ByteField("byte9", 0x0),
+ ByteField("byte10", 0x0),
+ ByteField("byte11", 0x0),
+ ByteField("byte12", 0x0),
+ ByteField("byte13", 0x0),
+ ByteField("byte14", 0x0),
+ ByteField("byte15", 0x0),
+ ByteField("byte16", 0x0),
+ ByteField("byte17", 0x0),
+ ByteField("byte18", 0x0),
+ ByteField("byte19", 0x0),
+ ByteField("byte20", 0x0),
+ ByteField("byte21", 0x0)
+ ]
+
+
+# 10.5.2.37c [spare]
+# 10.5.2.37d [spare]
+
+
+# len 21
+class Si16RestOctets(Packet):
+ """SI 16 Rest Octets Section 10.5.2.37e"""
+ name = "SI 16 Rest Octets"
+ fields_desc = [
+ # FIXME
+ XByteField("lengthSI16", 0x15),
+ ByteField("byte2", 0x0),
+ ByteField("byte3", 0x0),
+ ByteField("byte4", 0x0),
+ ByteField("byte5", 0x0),
+ ByteField("byte6", 0x0),
+ ByteField("byte7", 0x0),
+ ByteField("byte8", 0x0),
+ ByteField("byte9", 0x0),
+ ByteField("byte10", 0x0),
+ ByteField("byte11", 0x0),
+ ByteField("byte12", 0x0),
+ ByteField("byte13", 0x0),
+ ByteField("byte14", 0x0),
+ ByteField("byte15", 0x0),
+ ByteField("byte16", 0x0),
+ ByteField("byte17", 0x0),
+ ByteField("byte18", 0x0),
+ ByteField("byte19", 0x0),
+ ByteField("byte20", 0x0),
+ ByteField("byte21", 0x0)
+ ]
+
+
+# len 21
+class Si17RestOctets(Packet):
+ """SI 17 Rest Octets Section 10.5.2.37f"""
+ name = "SI 17 Rest Octets"
+ fields_desc = [
+ # FIXME
+ XByteField("lengthSI17", 0x15),
+ ByteField("byte2", 0x0),
+ ByteField("byte3", 0x0),
+ ByteField("byte4", 0x0),
+ ByteField("byte5", 0x0),
+ ByteField("byte6", 0x0),
+ ByteField("byte7", 0x0),
+ ByteField("byte8", 0x0),
+ ByteField("byte9", 0x0),
+ ByteField("byte10", 0x0),
+ ByteField("byte11", 0x0),
+ ByteField("byte12", 0x0),
+ ByteField("byte13", 0x0),
+ ByteField("byte14", 0x0),
+ ByteField("byte15", 0x0),
+ ByteField("byte16", 0x0),
+ ByteField("byte17", 0x0),
+ ByteField("byte18", 0x0),
+ ByteField("byte19", 0x0),
+ ByteField("byte20", 0x0),
+ ByteField("byte21", 0x0)
+ ]
+
+
+class StartingTimeHdr(Packet):
+ """Starting Time Section 10.5.2.38"""
+ name = "Starting Time"
+ fields_desc = [
+ BitField("eightBitST", None, 1),
+ XBitField("ieiST", None, 7),
+ ByteField("ra", 0x0),
+ BitField("t1", 0x0, 5),
+ BitField("t3Hi", 0x0, 3),
+ BitField("t3Lo", 0x0, 3),
+ BitField("t2", 0x0, 5)
+ ]
+
+
+class SynchronizationIndicationHdr(Packet):
+ """Synchronization Indication Section 10.5.2.39"""
+ name = "Synchronization Indication"
+ fields_desc = [
+ XBitField("ieiSI", None, 4),
+ BitField("nci", 0x0, 1),
+ BitField("rot", 0x0, 1),
+ BitField("si", 0x0, 2)
+ ]
+
+
+class TimingAdvanceHdr(Packet):
+ """Timing Advance Section 10.5.2.40"""
+ name = "Timing Advance"
+ fields_desc = [
+ BitField("eightBitTA", None, 1),
+ XBitField("ieiTA", None, 7),
+ BitField("spare", 0x0, 1),
+ BitField("spare1", 0x0, 1),
+ BitField("timingVal", 0x0, 6)
+ ]
+
+
+class TimeDifferenceHdr(Packet):
+ """ Time Difference Section 10.5.2.41"""
+ name = "Time Difference"
+ fields_desc = [
+ BitField("eightBitTD", None, 1),
+ XBitField("ieiTD", None, 7),
+ XByteField("lengthTD", 0x3),
+ ByteField("timeValue", 0x0)
+ ]
+
+
+class TlliHdr(Packet):
+ """ TLLI Section Section 10.5.2.41a"""
+ name = "TLLI"
+ fields_desc = [
+ BitField("eightBitT", None, 1),
+ XBitField("ieiT", None, 7),
+ ByteField("value", 0x0),
+ ByteField("value1", 0x0),
+ ByteField("value2", 0x0),
+ ByteField("value3", 0x0)
+ ]
+
+
+class TmsiPTmsiHdr(Packet):
+ """ TMSI/P-TMSI Section 10.5.2.42"""
+ name = "TMSI/P-TMSI"
+ fields_desc = [
+ BitField("eightBitTPT", None, 1),
+ XBitField("ieiTPT", None, 7),
+ ByteField("value", 0x0),
+ ByteField("value1", 0x0),
+ ByteField("value2", 0x0),
+ ByteField("value3", 0x0)
+ ]
+
+
+class VgcsTargetModeIdenticationHdr(Packet):
+ """ VGCS target Mode Indication 10.5.2.42a"""
+ name = "VGCS Target Mode Indication"
+ fields_desc = [
+ BitField("eightBitVTMI", None, 1),
+ XBitField("ieiVTMI", None, 7),
+ XByteField("lengthVTMI", 0x2),
+ BitField("targerMode", 0x0, 2),
+ BitField("cipherKeyNb", 0x0, 4),
+ BitField("spare", 0x0, 1),
+ BitField("spare1", 0x0, 1)
+ ]
+
+
+class WaitIndicationHdr(Packet):
+ """ Wait Indication Section 10.5.2.43"""
+ name = "Wait Indication"
+ fields_desc = [ # asciiart of specs strange
+ BitField("eightBitWI", None, 1),
+ XBitField("ieiWI", None, 7),
+ ByteField("timeoutVal", 0x0)
+ ]
+
+
+# len 17
+class ExtendedMeasurementResultsHdr(Packet):
+ """EXTENDED MEASUREMENT RESULTS Section 10.5.2.45"""
+ name = "Extended Measurement Results"
+ fields_desc = [
+ BitField("eightBitEMR", None, 1),
+ XBitField("ieiEMR", None, 7),
+
+ BitField("scUsed", None, 1),
+ BitField("dtxUsed", None, 1),
+ BitField("rxLevC0", None, 6),
+
+ BitField("rxLevC1", None, 6),
+ BitField("rxLevC2Hi", None, 2),
+
+ BitField("rxLevC2Lo", None, 4),
+ BitField("rxLevC3Hi", None, 4),
+
+ BitField("rxLevC3Lo", None, 3),
+ BitField("rxLevC4", None, 5),
+
+ BitField("rxLevC5", None, 6),
+ BitField("rxLevC6Hi", None, 2),
+
+ BitField("rxLevC6Lo", None, 4),
+ BitField("rxLevC7Hi", None, 4),
+
+ BitField("rxLevC7Lo", None, 2),
+ BitField("rxLevC8", None, 6),
+
+ BitField("rxLevC9", None, 6),
+ BitField("rxLevC10Hi", None, 2),
+
+ BitField("rxLevC10Lo", None, 4),
+ BitField("rxLevC11Hi", None, 4),
+
+ BitField("rxLevC13Lo", None, 2),
+ BitField("rxLevC12", None, 6),
+
+ BitField("rxLevC13", None, 6),
+ BitField("rxLevC14Hi", None, 2),
+
+ BitField("rxLevC14Lo", None, 4),
+ BitField("rxLevC15Hi", None, 4),
+
+ BitField("rxLevC15Lo", None, 2),
+ BitField("rxLevC16", None, 6),
+
+
+ BitField("rxLevC17", None, 6),
+ BitField("rxLevC18Hi", None, 2),
+
+ BitField("rxLevC18Lo", None, 4),
+ BitField("rxLevC19Hi", None, 4),
+
+ BitField("rxLevC19Lo", None, 2),
+ BitField("rxLevC20", None, 6)
+ ]
+
+
+# len 17
+class ExtendedMeasurementFrequencyListHdr(Packet):
+ """Extended Measurement Frequency List Section 10.5.2.46"""
+ name = "Extended Measurement Frequency List"
+ fields_desc = [
+ BitField("eightBitEMFL", None, 1),
+ XBitField("ieiEMFL", None, 7),
+
+ BitField("bit128", 0x0, 1),
+ BitField("bit127", 0x0, 1),
+ BitField("spare", 0x0, 1),
+ BitField("seqCode", 0x0, 1),
+ BitField("bit124", 0x0, 1),
+ BitField("bit123", 0x0, 1),
+ BitField("bit122", 0x0, 1),
+ BitField("bit121", 0x0, 1),
+
+ BitField("bitsRest", 0x0, 128)
+ ]
+
+
+class SuspensionCauseHdr(Packet):
+ """Suspension Cause Section 10.5.2.47"""
+ name = "Suspension Cause"
+ fields_desc = [
+ BitField("eightBitSC", None, 1),
+ XBitField("ieiSC", None, 7),
+ ByteField("suspVal", 0x0)
+ ]
+
+
+class ApduIDHdr(Packet):
+ """APDU Flags Section 10.5.2.48"""
+ name = "Apdu Id"
+ fields_desc = [
+ XBitField("ieiAI", None, 4),
+ BitField("id", None, 4)
+ ]
+
+
+class ApduFlagsHdr(Packet):
+ """APDU Flags Section 10.5.2.49"""
+ name = "Apdu Flags"
+ fields_desc = [
+ XBitField("iei", None, 4),
+ BitField("spare", 0x0, 1),
+ BitField("cr", 0x0, 1),
+ BitField("firstSeg", 0x0, 1),
+ BitField("lastSeg", 0x0, 1)
+ ]
+
+
+# Fix 1/2 len problem
+class ApduIDAndApduFlags(Packet):
+ name = "Apu Id and Apdu Flags"
+ fields_desc = [
+ BitField("id", None, 4),
+ BitField("spare", 0x0, 1),
+ BitField("cr", 0x0, 1),
+ BitField("firstSeg", 0x0, 1),
+ BitField("lastSeg", 0x0, 1)
+ ]
+
+
+# len 2 to max L3 (251) (done)
+class ApduDataHdr(Packet):
+ """APDU Data Section 10.5.2.50"""
+ name = "Apdu Data"
+ fields_desc = [
+ BitField("eightBitAD", None, 1),
+ XBitField("ieiAD", None, 7),
+ XByteField("lengthAD", None),
+ #optional
+ ByteField("apuInfo1", None),
+ ByteField("apuInfo2", None),
+ ByteField("apuInfo3", None),
+ ByteField("apuInfo4", None),
+ ByteField("apuInfo5", None),
+ ByteField("apuInfo6", None),
+ ByteField("apuInfo7", None),
+ ByteField("apuInfo8", None),
+ ByteField("apuInfo9", None),
+ ByteField("apuInfo10", None),
+ ByteField("apuInfo11", None),
+ ByteField("apuInfo12", None),
+ ByteField("apuInfo13", None),
+ ByteField("apuInfo14", None),
+ ByteField("apuInfo15", None),
+ ByteField("apuInfo16", None),
+ ByteField("apuInfo17", None),
+ ByteField("apuInfo18", None),
+ ByteField("apuInfo19", None),
+ ByteField("apuInfo20", None),
+ ByteField("apuInfo21", None),
+ ByteField("apuInfo22", None),
+ ByteField("apuInfo23", None),
+ ByteField("apuInfo24", None),
+ ByteField("apuInfo25", None),
+ ByteField("apuInfo26", None),
+ ByteField("apuInfo27", None),
+ ByteField("apuInfo28", None),
+ ByteField("apuInfo29", None),
+ ByteField("apuInfo30", None),
+ ByteField("apuInfo31", None),
+ ByteField("apuInfo32", None),
+ ByteField("apuInfo33", None),
+ ByteField("apuInfo34", None),
+ ByteField("apuInfo35", None),
+ ByteField("apuInfo36", None),
+ ByteField("apuInfo37", None),
+ ByteField("apuInfo38", None),
+ ByteField("apuInfo39", None),
+ ByteField("apuInfo40", None),
+ ByteField("apuInfo41", None),
+ ByteField("apuInfo42", None),
+ ByteField("apuInfo43", None),
+ ByteField("apuInfo44", None),
+ ByteField("apuInfo45", None),
+ ByteField("apuInfo46", None),
+ ByteField("apuInfo47", None),
+ ByteField("apuInfo48", None),
+ ByteField("apuInfo49", None),
+ ByteField("apuInfo50", None),
+ ByteField("apuInfo51", None),
+ ByteField("apuInfo52", None),
+ ByteField("apuInfo53", None),
+ ByteField("apuInfo54", None),
+ ByteField("apuInfo55", None),
+ ByteField("apuInfo56", None),
+ ByteField("apuInfo57", None),
+ ByteField("apuInfo58", None),
+ ByteField("apuInfo59", None),
+ ByteField("apuInfo60", None),
+ ByteField("apuInfo61", None),
+ ByteField("apuInfo62", None),
+ ByteField("apuInfo63", None),
+ ByteField("apuInfo64", None),
+ ByteField("apuInfo65", None),
+ ByteField("apuInfo66", None),
+ ByteField("apuInfo67", None),
+ ByteField("apuInfo68", None),
+ ByteField("apuInfo69", None),
+ ByteField("apuInfo70", None),
+ ByteField("apuInfo71", None),
+ ByteField("apuInfo72", None),
+ ByteField("apuInfo73", None),
+ ByteField("apuInfo74", None),
+ ByteField("apuInfo75", None),
+ ByteField("apuInfo76", None),
+ ByteField("apuInfo77", None),
+ ByteField("apuInfo78", None),
+ ByteField("apuInfo79", None),
+ ByteField("apuInfo80", None),
+ ByteField("apuInfo81", None),
+ ByteField("apuInfo82", None),
+ ByteField("apuInfo83", None),
+ ByteField("apuInfo84", None),
+ ByteField("apuInfo85", None),
+ ByteField("apuInfo86", None),
+ ByteField("apuInfo87", None),
+ ByteField("apuInfo88", None),
+ ByteField("apuInfo89", None),
+ ByteField("apuInfo90", None),
+ ByteField("apuInfo91", None),
+ ByteField("apuInfo92", None),
+ ByteField("apuInfo93", None),
+ ByteField("apuInfo94", None),
+ ByteField("apuInfo95", None),
+ ByteField("apuInfo96", None),
+ ByteField("apuInfo97", None),
+ ByteField("apuInfo98", None),
+ ByteField("apuInfo99", None),
+ ByteField("apuInfo100", None),
+ ByteField("apuInfo101", None),
+ ByteField("apuInfo102", None),
+ ByteField("apuInfo103", None),
+ ByteField("apuInfo104", None),
+ ByteField("apuInfo105", None),
+ ByteField("apuInfo106", None),
+ ByteField("apuInfo107", None),
+ ByteField("apuInfo108", None),
+ ByteField("apuInfo109", None),
+ ByteField("apuInfo110", None),
+ ByteField("apuInfo111", None),
+ ByteField("apuInfo112", None),
+ ByteField("apuInfo113", None),
+ ByteField("apuInfo114", None),
+ ByteField("apuInfo115", None),
+ ByteField("apuInfo116", None),
+ ByteField("apuInfo117", None),
+ ByteField("apuInfo118", None),
+ ByteField("apuInfo119", None),
+ ByteField("apuInfo120", None),
+ ByteField("apuInfo121", None),
+ ByteField("apuInfo122", None),
+ ByteField("apuInfo123", None),
+ ByteField("apuInfo124", None),
+ ByteField("apuInfo125", None),
+ ByteField("apuInfo126", None),
+ ByteField("apuInfo127", None),
+ ByteField("apuInfo128", None),
+ ByteField("apuInfo129", None),
+ ByteField("apuInfo130", None),
+ ByteField("apuInfo131", None),
+ ByteField("apuInfo132", None),
+ ByteField("apuInfo133", None),
+ ByteField("apuInfo134", None),
+ ByteField("apuInfo135", None),
+ ByteField("apuInfo136", None),
+ ByteField("apuInfo137", None),
+ ByteField("apuInfo138", None),
+ ByteField("apuInfo139", None),
+ ByteField("apuInfo140", None),
+ ByteField("apuInfo141", None),
+ ByteField("apuInfo142", None),
+ ByteField("apuInfo143", None),
+ ByteField("apuInfo144", None),
+ ByteField("apuInfo145", None),
+ ByteField("apuInfo146", None),
+ ByteField("apuInfo147", None),
+ ByteField("apuInfo148", None),
+ ByteField("apuInfo149", None),
+ ByteField("apuInfo150", None),
+ ByteField("apuInfo151", None),
+ ByteField("apuInfo152", None),
+ ByteField("apuInfo153", None),
+ ByteField("apuInfo154", None),
+ ByteField("apuInfo155", None),
+ ByteField("apuInfo156", None),
+ ByteField("apuInfo157", None),
+ ByteField("apuInfo158", None),
+ ByteField("apuInfo159", None),
+ ByteField("apuInfo160", None),
+ ByteField("apuInfo161", None),
+ ByteField("apuInfo162", None),
+ ByteField("apuInfo163", None),
+ ByteField("apuInfo164", None),
+ ByteField("apuInfo165", None),
+ ByteField("apuInfo166", None),
+ ByteField("apuInfo167", None),
+ ByteField("apuInfo168", None),
+ ByteField("apuInfo169", None),
+ ByteField("apuInfo170", None),
+ ByteField("apuInfo171", None),
+ ByteField("apuInfo172", None),
+ ByteField("apuInfo173", None),
+ ByteField("apuInfo174", None),
+ ByteField("apuInfo175", None),
+ ByteField("apuInfo176", None),
+ ByteField("apuInfo177", None),
+ ByteField("apuInfo178", None),
+ ByteField("apuInfo179", None),
+ ByteField("apuInfo180", None),
+ ByteField("apuInfo181", None),
+ ByteField("apuInfo182", None),
+ ByteField("apuInfo183", None),
+ ByteField("apuInfo184", None),
+ ByteField("apuInfo185", None),
+ ByteField("apuInfo186", None),
+ ByteField("apuInfo187", None),
+ ByteField("apuInfo188", None),
+ ByteField("apuInfo189", None),
+ ByteField("apuInfo190", None),
+ ByteField("apuInfo191", None),
+ ByteField("apuInfo192", None),
+ ByteField("apuInfo193", None),
+ ByteField("apuInfo194", None),
+ ByteField("apuInfo195", None),
+ ByteField("apuInfo196", None),
+ ByteField("apuInfo197", None),
+ ByteField("apuInfo198", None),
+ ByteField("apuInfo199", None),
+ ByteField("apuInfo200", None),
+ ByteField("apuInfo201", None),
+ ByteField("apuInfo202", None),
+ ByteField("apuInfo203", None),
+ ByteField("apuInfo204", None),
+ ByteField("apuInfo205", None),
+ ByteField("apuInfo206", None),
+ ByteField("apuInfo207", None),
+ ByteField("apuInfo208", None),
+ ByteField("apuInfo209", None),
+ ByteField("apuInfo210", None),
+ ByteField("apuInfo211", None),
+ ByteField("apuInfo212", None),
+ ByteField("apuInfo213", None),
+ ByteField("apuInfo214", None),
+ ByteField("apuInfo215", None),
+ ByteField("apuInfo216", None),
+ ByteField("apuInfo217", None),
+ ByteField("apuInfo218", None),
+ ByteField("apuInfo219", None),
+ ByteField("apuInfo220", None),
+ ByteField("apuInfo221", None),
+ ByteField("apuInfo222", None),
+ ByteField("apuInfo223", None),
+ ByteField("apuInfo224", None),
+ ByteField("apuInfo225", None),
+ ByteField("apuInfo226", None),
+ ByteField("apuInfo227", None),
+ ByteField("apuInfo228", None),
+ ByteField("apuInfo229", None),
+ ByteField("apuInfo230", None),
+ ByteField("apuInfo231", None),
+ ByteField("apuInfo232", None),
+ ByteField("apuInfo233", None),
+ ByteField("apuInfo234", None),
+ ByteField("apuInfo235", None),
+ ByteField("apuInfo236", None),
+ ByteField("apuInfo237", None),
+ ByteField("apuInfo238", None),
+ ByteField("apuInfo239", None),
+ ByteField("apuInfo240", None),
+ ByteField("apuInfo241", None),
+ ByteField("apuInfo242", None),
+ ByteField("apuInfo243", None),
+ ByteField("apuInfo244", None),
+ ByteField("apuInfo245", None),
+ ByteField("apuInfo246", None),
+ ByteField("apuInfo247", None),
+ ByteField("apuInfo248", None),
+ ByteField("apuInfo249", None)
+ ]
+
+ def post_build(self, p, pay):
+ aList = []
+ a = []
+ i = 0
+ for i in range(0, len(self.fields_desc)):
+ aList.append(self.fields_desc[i].name)
+ for i in aList:
+ a.append(getattr(self, i))
+ res = adapt(2, 251, a, self.fields_desc)
+ if self.lengthAD is None:
+ p = p[:1] + struct.pack(">B", res[1]) + p[2:]
+ if res[0] is not 0:
+ p = p[:-res[0]]
+ return p + pay
+
+#
+# 10.5.3 Mobility management information elements
+#
+
+
+class AuthenticationParameterRAND(Packet):
+ """Authentication parameter RAND Section 10.5.3.1"""
+ name = "Authentication Parameter Rand"
+ fields_desc = [
+ ByteField("ieiAPR", None),
+ BitField("randValue", 0x0, 128)
+ ]
+
+
+class AuthenticationParameterSRES(Packet):
+ """Authentication parameter SRES Section 10.5.3.2"""
+ name = "Authentication Parameter Sres"
+ fields_desc = [
+ ByteField("ieiAPS", None),
+ BitField("sresValue", 0x0, 40)
+ ]
+
+
+class CmServiceType(Packet):
+ """CM service type Section 10.5.3.3"""
+ name = "CM Service Type"
+ fields_desc = [
+ XBitField("ieiCST", 0x0, 4),
+ BitField("serviceType", 0x0, 4)
+ ]
+
+
+class CmServiceTypeAndCiphKeySeqNr(Packet):
+ name = "CM Service Type and Cipher Key Sequence Number"
+ fields_desc = [
+ BitField("keySeq", 0x0, 3),
+ BitField("spare", 0x0, 1),
+ BitField("serviceType", 0x0, 4)
+ ]
+
+
+class IdentityType(Packet):
+ """Identity type Section 10.5.3.4"""
+ name = "Identity Type"
+ fields_desc = [
+ XBitField("ieiIT", 0x0, 4),
+ BitField("spare", 0x0, 1),
+ BitField("idType", 0x1, 3)
+ ]
+
+
+# Fix 1/2 len problem
+class IdentityTypeAndSpareHalfOctet(Packet):
+ name = "Identity Type and Spare Half Octet"
+ fields_desc = [
+ BitField("spare", 0x0, 1),
+ BitField("idType", 0x1, 3),
+ BitField("spareHalfOctets", 0x0, 4)
+ ]
+
+
+class LocationUpdatingType(Packet):
+ """Location updating type Section 10.5.3.5"""
+ name = "Location Updating Type"
+ fields_desc = [
+ XBitField("ieiLUT", 0x0, 4),
+ BitField("for", 0x0, 1),
+ BitField("spare", 0x0, 1),
+ BitField("lut", 0x0, 2)
+ ]
+
+
+class LocationUpdatingTypeAndCiphKeySeqNr(Packet):
+ name = "Location Updating Type and Cipher Key Sequence Number"
+ fields_desc = [
+ BitField("for", 0x0, 1),
+ BitField("spare", 0x0, 1),
+ BitField("lut", 0x0, 2),
+ BitField("spare", 0x0, 1),
+ BitField("keySeq", 0x0, 3)
+ ]
+
+
+# len 3 to L3 max (251) (done)
+class NetworkNameHdr(Packet):
+ """Network Name Section 10.5.3.5a"""
+ name = "Network Name"
+ fields_desc = [
+ BitField("eightBitNN", None, 1),
+ XBitField("ieiNN", None, 7),
+
+ XByteField("lengthNN", None),
+
+ BitField("ext1", 0x1, 1),
+ BitField("codingScheme", 0x0, 3),
+ BitField("addCi", 0x0, 1),
+ BitField("nbSpare", 0x0, 3),
+ # optional
+ ByteField("txtString1", None),
+ ByteField("txtString2", None),
+ ByteField("txtString3", None),
+ ByteField("txtString4", None),
+ ByteField("txtString5", None),
+ ByteField("txtString6", None),
+ ByteField("txtString7", None),
+ ByteField("txtString8", None),
+ ByteField("txtString9", None),
+ ByteField("txtString10", None),
+ ByteField("txtString11", None),
+ ByteField("txtString12", None),
+ ByteField("txtString13", None),
+ ByteField("txtString14", None),
+ ByteField("txtString15", None),
+ ByteField("txtString16", None),
+ ByteField("txtString17", None),
+ ByteField("txtString18", None),
+ ByteField("txtString19", None),
+ ByteField("txtString20", None),
+ ByteField("txtString21", None),
+ ByteField("txtString22", None),
+ ByteField("txtString23", None),
+ ByteField("txtString24", None),
+ ByteField("txtString25", None),
+ ByteField("txtString26", None),
+ ByteField("txtString27", None),
+ ByteField("txtString28", None),
+ ByteField("txtString29", None),
+ ByteField("txtString30", None),
+ ByteField("txtString31", None),
+ ByteField("txtString32", None),
+ ByteField("txtString33", None),
+ ByteField("txtString34", None),
+ ByteField("txtString35", None),
+ ByteField("txtString36", None),
+ ByteField("txtString37", None),
+ ByteField("txtString38", None),
+ ByteField("txtString39", None),
+ ByteField("txtString40", None),
+ ByteField("txtString41", None),
+ ByteField("txtString42", None),
+ ByteField("txtString43", None),
+ ByteField("txtString44", None),
+ ByteField("txtString45", None),
+ ByteField("txtString46", None),
+ ByteField("txtString47", None),
+ ByteField("txtString48", None),
+ ByteField("txtString49", None),
+ ByteField("txtString50", None),
+ ByteField("txtString51", None),
+ ByteField("txtString52", None),
+ ByteField("txtString53", None),
+ ByteField("txtString54", None),
+ ByteField("txtString55", None),
+ ByteField("txtString56", None),
+ ByteField("txtString57", None),
+ ByteField("txtString58", None),
+ ByteField("txtString59", None),
+ ByteField("txtString60", None),
+ ByteField("txtString61", None),
+ ByteField("txtString62", None),
+ ByteField("txtString63", None),
+ ByteField("txtString64", None),
+ ByteField("txtString65", None),
+ ByteField("txtString66", None),
+ ByteField("txtString67", None),
+ ByteField("txtString68", None),
+ ByteField("txtString69", None),
+ ByteField("txtString70", None),
+ ByteField("txtString71", None),
+ ByteField("txtString72", None),
+ ByteField("txtString73", None),
+ ByteField("txtString74", None),
+ ByteField("txtString75", None),
+ ByteField("txtString76", None),
+ ByteField("txtString77", None),
+ ByteField("txtString78", None),
+ ByteField("txtString79", None),
+ ByteField("txtString80", None),
+ ByteField("txtString81", None),
+ ByteField("txtString82", None),
+ ByteField("txtString83", None),
+ ByteField("txtString84", None),
+ ByteField("txtString85", None),
+ ByteField("txtString86", None),
+ ByteField("txtString87", None),
+ ByteField("txtString88", None),
+ ByteField("txtString89", None),
+ ByteField("txtString90", None),
+ ByteField("txtString91", None),
+ ByteField("txtString92", None),
+ ByteField("txtString93", None),
+ ByteField("txtString94", None),
+ ByteField("txtString95", None),
+ ByteField("txtString96", None),
+ ByteField("txtString97", None),
+ ByteField("txtString98", None),
+ ByteField("txtString99", None),
+ ByteField("txtString100", None),
+ ByteField("txtString101", None),
+ ByteField("txtString102", None),
+ ByteField("txtString103", None),
+ ByteField("txtString104", None),
+ ByteField("txtString105", None),
+ ByteField("txtString106", None),
+ ByteField("txtString107", None),
+ ByteField("txtString108", None),
+ ByteField("txtString109", None),
+ ByteField("txtString110", None),
+ ByteField("txtString111", None),
+ ByteField("txtString112", None),
+ ByteField("txtString113", None),
+ ByteField("txtString114", None),
+ ByteField("txtString115", None),
+ ByteField("txtString116", None),
+ ByteField("txtString117", None),
+ ByteField("txtString118", None),
+ ByteField("txtString119", None),
+ ByteField("txtString120", None),
+ ByteField("txtString121", None),
+ ByteField("txtString122", None),
+ ByteField("txtString123", None),
+ ByteField("txtString124", None),
+ ByteField("txtString125", None),
+ ByteField("txtString126", None),
+ ByteField("txtString127", None),
+ ByteField("txtString128", None),
+ ByteField("txtString129", None),
+ ByteField("txtString130", None),
+ ByteField("txtString131", None),
+ ByteField("txtString132", None),
+ ByteField("txtString133", None),
+ ByteField("txtString134", None),
+ ByteField("txtString135", None),
+ ByteField("txtString136", None),
+ ByteField("txtString137", None),
+ ByteField("txtString138", None),
+ ByteField("txtString139", None),
+ ByteField("txtString140", None),
+ ByteField("txtString141", None),
+ ByteField("txtString142", None),
+ ByteField("txtString143", None),
+ ByteField("txtString144", None),
+ ByteField("txtString145", None),
+ ByteField("txtString146", None),
+ ByteField("txtString147", None),
+ ByteField("txtString148", None),
+ ByteField("txtString149", None),
+ ByteField("txtString150", None),
+ ByteField("txtString151", None),
+ ByteField("txtString152", None),
+ ByteField("txtString153", None),
+ ByteField("txtString154", None),
+ ByteField("txtString155", None),
+ ByteField("txtString156", None),
+ ByteField("txtString157", None),
+ ByteField("txtString158", None),
+ ByteField("txtString159", None),
+ ByteField("txtString160", None),
+ ByteField("txtString161", None),
+ ByteField("txtString162", None),
+ ByteField("txtString163", None),
+ ByteField("txtString164", None),
+ ByteField("txtString165", None),
+ ByteField("txtString166", None),
+ ByteField("txtString167", None),
+ ByteField("txtString168", None),
+ ByteField("txtString169", None),
+ ByteField("txtString170", None),
+ ByteField("txtString171", None),
+ ByteField("txtString172", None),
+ ByteField("txtString173", None),
+ ByteField("txtString174", None),
+ ByteField("txtString175", None),
+ ByteField("txtString176", None),
+ ByteField("txtString177", None),
+ ByteField("txtString178", None),
+ ByteField("txtString179", None),
+ ByteField("txtString180", None),
+ ByteField("txtString181", None),
+ ByteField("txtString182", None),
+ ByteField("txtString183", None),
+ ByteField("txtString184", None),
+ ByteField("txtString185", None),
+ ByteField("txtString186", None),
+ ByteField("txtString187", None),
+ ByteField("txtString188", None),
+ ByteField("txtString189", None),
+ ByteField("txtString190", None),
+ ByteField("txtString191", None),
+ ByteField("txtString192", None),
+ ByteField("txtString193", None),
+ ByteField("txtString194", None),
+ ByteField("txtString195", None),
+ ByteField("txtString196", None),
+ ByteField("txtString197", None),
+ ByteField("txtString198", None),
+ ByteField("txtString199", None),
+ ByteField("txtString200", None),
+ ByteField("txtString201", None),
+ ByteField("txtString202", None),
+ ByteField("txtString203", None),
+ ByteField("txtString204", None),
+ ByteField("txtString205", None),
+ ByteField("txtString206", None),
+ ByteField("txtString207", None),
+ ByteField("txtString208", None),
+ ByteField("txtString209", None),
+ ByteField("txtString210", None),
+ ByteField("txtString211", None),
+ ByteField("txtString212", None),
+ ByteField("txtString213", None),
+ ByteField("txtString214", None),
+ ByteField("txtString215", None),
+ ByteField("txtString216", None),
+ ByteField("txtString217", None),
+ ByteField("txtString218", None),
+ ByteField("txtString219", None),
+ ByteField("txtString220", None),
+ ByteField("txtString221", None),
+ ByteField("txtString222", None),
+ ByteField("txtString223", None),
+ ByteField("txtString224", None),
+ ByteField("txtString225", None),
+ ByteField("txtString226", None),
+ ByteField("txtString227", None),
+ ByteField("txtString228", None),
+ ByteField("txtString229", None),
+ ByteField("txtString230", None),
+ ByteField("txtString231", None),
+ ByteField("txtString232", None),
+ ByteField("txtString233", None),
+ ByteField("txtString234", None),
+ ByteField("txtString235", None),
+ ByteField("txtString236", None),
+ ByteField("txtString237", None),
+ ByteField("txtString238", None),
+ ByteField("txtString239", None),
+ ByteField("txtString240", None),
+ ByteField("txtString241", None),
+ ByteField("txtString242", None),
+ ByteField("txtString243", None),
+ ByteField("txtString244", None),
+ ByteField("txtString245", None),
+ ByteField("txtString246", None),
+ ByteField("txtString247", None),
+ ByteField("txtString248", None)
+ ]
+
+ def post_build(self, p, pay):
+ aList = []
+ a = []
+ i = 0
+ for i in range(0, len(self.fields_desc)):
+ aList.append(self.fields_desc[i].name)
+ for i in aList:
+ a.append(getattr(self, i))
+ res = adapt(3, 251, a, self.fields_desc)
+ if self.lengthNN is None:
+ p = p[:1] + struct.pack(">B", res[1]) + p[2:]
+ if res[0] is not 0:
+ p = p[:-res[0]]
+ return p + pay
+
+
+class RejectCause(Packet):
+ """Reject cause Section 10.5.3.6"""
+ name = "Reject Cause"
+ fields_desc = [
+ ByteField("ieiRC", 0x0),
+ ByteField("rejCause", 0x0)
+ ]
+
+
+class FollowOnProceed(Packet):
+ """Follow-on Proceed Section 10.5.3.7"""
+ name = "Follow-on Proceed"
+ fields_desc = [
+ ByteField("ieiFOP", 0x0),
+ ]
+
+
+class TimeZoneHdr(Packet):
+ """Time Zone Section 10.5.3.8"""
+ name = "Time Zone"
+ fields_desc = [
+ BitField("eightBitTZ", None, 1),
+ XBitField("ieiTZ", None, 7),
+ ByteField("timeZone", 0x0),
+ ]
+
+
+class TimeZoneAndTimeHdr(Packet):
+ """Time Zone and Time Section 10.5.3.9"""
+ name = "Time Zone and Time"
+ fields_desc = [
+ BitField("eightBitTZAT", None, 1),
+ XBitField("ieiTZAT", None, 7),
+ ByteField("year", 0x0),
+ ByteField("month", 0x0),
+ ByteField("day", 0x0),
+ ByteField("hour", 0x0),
+ ByteField("minute", 0x0),
+ ByteField("second", 0x0),
+ ByteField("timeZone", 0x0)
+ ]
+
+
+class CtsPermissionHdr(Packet):
+ """CTS permission Section 10.5.3.10"""
+ name = "Cts Permission"
+ fields_desc = [
+ BitField("eightBitCP", None, 1),
+ XBitField("ieiCP", None, 7),
+ ]
+
+
+class LsaIdentifierHdr(Packet):
+ """LSA Identifier Section 10.5.3.11"""
+ name = "Lsa Identifier"
+ fields_desc = [
+ BitField("eightBitLI", None, 1),
+ XBitField("ieiLI", None, 7),
+ ByteField("lsaID", 0x0),
+ ByteField("lsaID1", 0x0),
+ ByteField("lsaID2", 0x0)
+ ]
+
+
+#
+# 10.5.4 Call control information elements
+#
+
+#10.5.4.1 Extensions of codesets
+# This is only text and no packet
+
+class LockingShiftProcedureHdr(Packet):
+ """Locking shift procedure Section 10.5.4.2"""
+ name = "Locking Shift Procedure"
+ fields_desc = [
+ XBitField("ieiLSP", None, 4),
+ BitField("lockShift", 0x0, 1),
+ BitField("codesetId", 0x0, 3)
+ ]
+
+
+class NonLockingShiftProcedureHdr(Packet):
+ """Non-locking shift procedure Section 10.5.4.3"""
+ name = "Non-locking Shift Procedure"
+ fields_desc = [
+ XBitField("ieiNLSP", None, 4),
+ BitField("nonLockShift", 0x1, 1),
+ BitField("codesetId", 0x0, 3)
+ ]
+
+
+class AuxiliaryStatesHdr(Packet):
+ """Auxiliary states Section 10.5.4.4"""
+ name = "Auxiliary States"
+ fields_desc = [
+ BitField("eightBitAS", None, 1),
+ XBitField("ieiAS", None, 7),
+ XByteField("lengthAS", 0x3),
+ BitField("ext", 0x1, 1),
+ BitField("spare", 0x0, 3),
+ BitField("holdState", 0x0, 2),
+ BitField("mptyState", 0x0, 2)
+ ]
+
+
+# len 3 to 15
+class BearerCapabilityHdr(Packet):
+ """Bearer capability Section 10.5.4.5"""
+ name = "Bearer Capability"
+ fields_desc = [
+ BitField("eightBitBC", None, 1),
+ XBitField("ieiBC", None, 7),
+
+ XByteField("lengthBC", None),
+
+ BitField("ext0", 0x1, 1),
+ BitField("radioChReq", 0x1, 2),
+ BitField("codingStd", 0x0, 1),
+ BitField("transMode", 0x0, 1),
+ BitField("infoTransCa", 0x0, 3),
+ # optional
+ ConditionalField(BitField("ext1", 0x1, 1),
+ lambda pkt: pkt.ext0 == 0),
+ ConditionalField(BitField("coding", None, 1),
+ lambda pkt: pkt.ext0 == 0),
+ ConditionalField(BitField("spare", None, 2),
+ lambda pkt: pkt.ext0 == 0),
+ ConditionalField(BitField("speechVers", 0x0, 4),
+ lambda pkt: pkt.ext0 == 0),
+
+ ConditionalField(BitField("ext2", 0x1, 1),
+ lambda pkt: pkt.ext1 == 0),
+ ConditionalField(BitField("compress", None, 1),
+ lambda pkt: pkt.ext1 == 0),
+ ConditionalField(BitField("structure", None, 2),
+ lambda pkt: pkt.ext1 == 0),
+ ConditionalField(BitField("dupMode", None, 1),
+ lambda pkt: pkt.ext1 == 0),
+ ConditionalField(BitField("config", None, 1),
+ lambda pkt: pkt.ext1 == 0),
+ ConditionalField(BitField("nirr", None, 1),
+ lambda pkt: pkt.ext1 == 0),
+ ConditionalField(BitField("establi", 0x0, 1),
+ lambda pkt: pkt.ext1 == 0),
+
+ BitField("ext3", None, 1),
+ BitField("accessId", None, 2),
+ BitField("rateAda", None, 2),
+ BitField("signaling", None, 3),
+
+ ConditionalField(BitField("ext4", None, 1),
+ lambda pkt: pkt.ext3 == 0),
+ ConditionalField(BitField("otherITC", None, 2),
+ lambda pkt: pkt.ext3 == 0),
+ ConditionalField(BitField("otherRate", None, 2),
+ lambda pkt: pkt.ext3 == 0),
+ ConditionalField(BitField("spare1", 0x0, 3),
+ lambda pkt: pkt.ext3 == 0),
+
+ ConditionalField(BitField("ext5", 0x1, 1),
+ lambda pkt: pkt.ext4 == 0),
+ ConditionalField(BitField("hdr", None, 1),
+ lambda pkt: pkt.ext4 == 0),
+ ConditionalField(BitField("multiFr", None, 1),
+ lambda pkt: pkt.ext4 == 0),
+ ConditionalField(BitField("mode", None, 1),
+ lambda pkt: pkt.ext4 == 0),
+ ConditionalField(BitField("lli", None, 1),
+ lambda pkt: pkt.ext4 == 0),
+ ConditionalField(BitField("assig", None, 1),
+ lambda pkt: pkt.ext4 == 0),
+ ConditionalField(BitField("inbNeg", None, 1),
+ lambda pkt: pkt.ext4 == 0),
+ ConditionalField(BitField("spare2", 0x0, 1),
+ lambda pkt: pkt.ext4 == 0),
+
+ BitField("ext6", None, 1),
+ BitField("layer1Id", None, 2),
+ BitField("userInf", None, 4),
+ BitField("sync", None, 1),
+
+ ConditionalField(BitField("ext7", None, 1),
+ lambda pkt: pkt.ext6 == 0),
+ ConditionalField(BitField("stopBit", None, 1),
+ lambda pkt: pkt.ext6 == 0),
+ ConditionalField(BitField("negoc", None, 1),
+ lambda pkt: pkt.ext6 == 0),
+ ConditionalField(BitField("nbDataBit", None, 1),
+ lambda pkt: pkt.ext6 == 0),
+ ConditionalField(BitField("userRate", None, 4),
+ lambda pkt: pkt.ext6 == 0),
+
+ ConditionalField(BitField("ext8", None, 1),
+ lambda pkt: pkt.ext7 == 0),
+ ConditionalField(BitField("interRate", None, 2),
+ lambda pkt: pkt.ext7 == 0),
+ ConditionalField(BitField("nicTX", None, 1),
+ lambda pkt: pkt.ext7 == 0),
+ ConditionalField(BitField("nicRX", None, 1),
+ lambda pkt: pkt.ext7 == 0),
+ ConditionalField(BitField("parity", None, 3),
+ lambda pkt: pkt.ext7 == 0),
+
+ ConditionalField(BitField("ext9", None, 1),
+ lambda pkt: pkt.ext8 == 0),
+ ConditionalField(BitField("connEle", None, 2),
+ lambda pkt: pkt.ext8 == 0),
+ ConditionalField(BitField("modemType", None, 5),
+ lambda pkt: pkt.ext8 == 0),
+
+ ConditionalField(BitField("ext10", None, 1),
+ lambda pkt: pkt.ext9 == 0),
+ ConditionalField(BitField("otherModemType", None, 2),
+ lambda pkt: pkt.ext9 == 0),
+ ConditionalField(BitField("netUserRate", None, 5),
+ lambda pkt: pkt.ext9 == 0),
+
+ ConditionalField(BitField("ext11", None, 1),
+ lambda pkt: pkt.ext10 == 0),
+ ConditionalField(BitField("chanCoding", None, 4),
+ lambda pkt: pkt.ext10 == 0),
+ ConditionalField(BitField("maxTrafficChan", None, 3),
+ lambda pkt: pkt.ext10 == 0),
+
+ ConditionalField(BitField("ext12", None, 1),
+ lambda pkt: pkt.ext11 == 0),
+ ConditionalField(BitField("uimi", None, 3),
+ lambda pkt: pkt.ext11 == 0),
+ ConditionalField(BitField("airInterfaceUserRate", None, 4),
+ lambda pkt: pkt.ext11 == 0),
+
+ ConditionalField(BitField("ext13", 0x1, 1),
+ lambda pkt: pkt.ext12 == 0),
+ ConditionalField(BitField("layer2Ch", None, 2),
+ lambda pkt: pkt.ext12 == 0),
+ ConditionalField(BitField("userInfoL2", 0x0, 5),
+ lambda pkt: pkt.ext12 == 0)
+ ]
+
+# We have a bug here. packet is not working if used in message
+ def post_build(self, p, pay):
+ aList = []
+ a = []
+ i = 0
+ for i in range(0, len(self.fields_desc)):
+ aList.append(self.fields_desc[i].name)
+ for i in aList:
+ a.append(getattr(self, i))
+ res = adapt(3, 15, a, self.fields_desc)
+ if res[0] is not 0:
+ p = p[:-res[0]]
+ # avoids a bug. find better way
+ if len(p) is 5:
+ p = p[:-2]
+ if self.lengthBC is None:
+ print("len von a %s" % (len(p),))
+ p = p[:1] + struct.pack(">B", len(p)-3) + p[2:]
+ return p + pay
+
+
+class CallControlCapabilitiesHdr(Packet):
+ """Call Control Capabilities Section 10.5.4.5a"""
+ name = "Call Control Capabilities"
+ fields_desc = [
+ BitField("eightBitCCC", None, 1),
+ XBitField("ieiCCC", None, 7),
+ XByteField("lengthCCC", 0x3),
+ BitField("spare", 0x0, 6),
+ BitField("pcp", 0x0, 1),
+ BitField("dtmf", 0x0, 1)
+ ]
+
+
+class CallStateHdr(Packet):
+ """Call State Section 10.5.4.6"""
+ name = "Call State"
+ fields_desc = [
+ BitField("eightBitCS", None, 1),
+ XBitField("ieiCS", None, 7),
+ BitField("codingStd", 0x0, 2),
+ BitField("stateValue", 0x0, 6)
+ ]
+
+
+# len 3 to 43
+class CalledPartyBcdNumberHdr(Packet):
+ """Called party BCD number Section 10.5.4.7"""
+ name = "Called Party BCD Number"
+ fields_desc = [
+ BitField("eightBitCPBN", None, 1),
+ XBitField("ieiCPBN", None, 7),
+ XByteField("lengthCPBN", None),
+ BitField("ext", 0x1, 1),
+ BitField("typeNb", 0x0, 3),
+ BitField("nbPlanId", 0x0, 4),
+ # optional
+ BitField("nbDigit2", None, 4),
+ BitField("nbDigit1", None, 4),
+ BitField("nbDigit4", None, 4),
+ BitField("nbDigit3", None, 4),
+
+ BitField("nbDigit6", None, 4),
+ BitField("nbDigit5", None, 4),
+ BitField("nbDigit8", None, 4),
+ BitField("nbDigit7", None, 4),
+
+ BitField("nbDigit10", None, 4),
+ BitField("nbDigit9", None, 4),
+ BitField("nbDigit12", None, 4),
+ BitField("nbDigit11", None, 4),
+
+ BitField("nbDigit14", None, 4),
+ BitField("nbDigit13", None, 4),
+ BitField("nbDigit16", None, 4),
+ BitField("nbDigit15", None, 4),
+
+ BitField("nbDigit18", None, 4),
+ BitField("nbDigit17", None, 4),
+ BitField("nbDigit20", None, 4),
+ BitField("nbDigit19", None, 4),
+
+ BitField("nbDigit22", None, 4),
+ BitField("nbDigit21", None, 4),
+ BitField("nbDigit24", None, 4),
+ BitField("nbDigit23", None, 4),
+
+ BitField("nbDigit26", None, 4),
+ BitField("nbDigit25", None, 4),
+ BitField("nbDigit28", None, 4),
+ BitField("nbDigit27", None, 4),
+
+ BitField("nbDigit30", None, 4),
+ BitField("nbDigit29", None, 4),
+ BitField("nbDigit32", None, 4),
+ BitField("nbDigit31", None, 4),
+
+ BitField("nbDigit34", None, 4),
+ BitField("nbDigit33", None, 4),
+ BitField("nbDigit36", None, 4),
+ BitField("nbDigit35", None, 4),
+
+ BitField("nbDigit38", None, 4),
+ BitField("nbDigit37", None, 4),
+ BitField("nbDigit40", None, 4),
+ BitField("nbDigit39", None, 4),
+# ^^^^^^ 20 first optional bytes ^^^^^^^^^^^^^^^
+ BitField("nbDigit42", None, 4),
+ BitField("nbDigit41", None, 4),
+ BitField("nbDigit44", None, 4),
+ BitField("nbDigit43", None, 4),
+
+ BitField("nbDigit46", None, 4),
+ BitField("nbDigit45", None, 4),
+ BitField("nbDigit48", None, 4),
+ BitField("nbDigit47", None, 4),
+
+ BitField("nbDigit50", None, 4),
+ BitField("nbDigit49", None, 4),
+ BitField("nbDigit52", None, 4),
+ BitField("nbDigit51", None, 4),
+
+ BitField("nbDigit54", None, 4),
+ BitField("nbDigit53", None, 4),
+ BitField("nbDigit56", None, 4),
+ BitField("nbDigit55", None, 4),
+
+ BitField("nbDigit58", None, 4),
+ BitField("nbDigit57", None, 4),
+ BitField("nbDigit60", None, 4),
+ BitField("nbDigit59", None, 4),
+
+ BitField("nbDigit62", None, 4),
+ BitField("nbDigit61", None, 4),
+ BitField("nbDigit64", None, 4),
+ BitField("nbDigit63", None, 4),
+
+ BitField("nbDigit66", None, 4),
+ BitField("nbDigit65", None, 4),
+ BitField("nbDigit68", None, 4),
+ BitField("nbDigit67", None, 4),
+
+ BitField("nbDigit70", None, 4),
+ BitField("nbDigit69", None, 4),
+ BitField("nbDigit72", None, 4),
+ BitField("nbDigit71", None, 4),
+
+ BitField("nbDigit74", None, 4),
+ BitField("nbDigit73", None, 4),
+ BitField("nbDigit76", None, 4),
+ BitField("nbDigit75", None, 4),
+
+ BitField("nbDigit78", None, 4),
+ BitField("nbDigit77", None, 4),
+ BitField("nbDigit80", None, 4),
+ BitField("nbDigit79", None, 4),
+ ]
+
+ def post_build(self, p, pay):
+ aList = []
+ a = []
+ i = 0
+ for i in range(0, len(self.fields_desc)):
+ aList.append(self.fields_desc[i].name)
+ for i in aList:
+ a.append(getattr(self, i))
+ res = adapt(3, 43, a, self.fields_desc, 2)
+ if self.lengthCPBN is None:
+ p = p[:1] + struct.pack(">B", res[1]) + p[2:]
+ if res[0] is not 0:
+ p = p[:-res[0]]
+ return p + pay
+
+
+# len 2 to 23
+class CalledPartySubaddressHdr(Packet):
+ """Called party subaddress Section 10.5.4.8"""
+ name = "Called Party Subaddress"
+ fields_desc = [
+ BitField("eightBitCPS", None, 1),
+ XBitField("ieiCPS", None, 7),
+ XByteField("lengthCPS", None),
+ # optional
+ BitField("ext", None, 1),
+ BitField("subAddr", None, 3),
+ BitField("oddEven", None, 1),
+ BitField("spare", None, 3),
+
+ ByteField("subInfo0", None),
+ ByteField("subInfo1", None),
+ ByteField("subInfo2", None),
+ ByteField("subInfo3", None),
+ ByteField("subInfo4", None),
+ ByteField("subInfo5", None),
+ ByteField("subInfo6", None),
+ ByteField("subInfo7", None),
+ ByteField("subInfo8", None),
+ ByteField("subInfo9", None),
+ ByteField("subInfo10", None),
+ ByteField("subInfo11", None),
+ ByteField("subInfo12", None),
+ ByteField("subInfo13", None),
+ ByteField("subInfo14", None),
+ ByteField("subInfo15", None),
+ ByteField("subInfo16", None),
+ ByteField("subInfo17", None),
+ ByteField("subInfo18", None),
+ ByteField("subInfo19", None)
+ ]
+
+ def post_build(self, p, pay):
+ aList = []
+ a = []
+ i = 0
+ for i in range(0, len(self.fields_desc)):
+ aList.append(self.fields_desc[i].name)
+ for i in aList:
+ a.append(getattr(self, i))
+ res = adapt(2, 23, a, self.fields_desc)
+ if self.lengthCPS is None:
+ p = p[:1] + struct.pack(">B", res[1]) + p[2:]
+ if res[0] is not 0:
+ p = p[:-res[0]]
+ return p + pay
+
+
+# len 3 to 14
+class CallingPartyBcdNumberHdr(Packet):
+ """Called party subaddress Section 10.5.4.9"""
+ name = "Called Party Subaddress"
+ fields_desc = [
+ BitField("eightBitCPBN", None, 1),
+ XBitField("ieiCPBN", None, 7),
+ XByteField("lengthCPBN", None),
+ BitField("ext", 0x1, 1),
+ BitField("typeNb", 0x0, 3),
+ BitField("nbPlanId", 0x0, 4),
+ # optional
+ ConditionalField(BitField("ext1", 0x1, 1),
+ lambda pkt: pkt.ext == 0),
+ ConditionalField(BitField("presId", None, 2),
+ lambda pkt: pkt.ext == 0),
+ ConditionalField(BitField("spare", None, 3),
+ lambda pkt: pkt.ext == 0),
+ ConditionalField(BitField("screenId", 0x0, 2),
+ lambda pkt: pkt.ext == 0),
+
+ BitField("nbDigit2", None, 4),
+ BitField("nbDigit1", None, 4),
+
+ BitField("nbDigit4", None, 4),
+ BitField("nbDigit3", None, 4),
+
+ BitField("nbDigit6", None, 4),
+ BitField("nbDigit5", None, 4),
+
+ BitField("nbDigit8", None, 4),
+ BitField("nbDigit7", None, 4),
+
+ BitField("nbDigit10", None, 4),
+ BitField("nbDigit9", None, 4),
+
+ BitField("nbDigit12", None, 4),
+ BitField("nbDigit11", None, 4),
+
+ BitField("nbDigit14", None, 4),
+ BitField("nbDigit13", None, 4),
+
+ BitField("nbDigit16", None, 4),
+ BitField("nbDigit15", None, 4),
+
+ BitField("nbDigit18", None, 4),
+ BitField("nbDigit17", None, 4),
+
+ BitField("nbDigit20", None, 4),
+ BitField("nbDigit19", None, 4),
+ ]
+
+ def post_build(self, p, pay):
+ aList = []
+ a = []
+ i = 0
+ for i in range(0, len(self.fields_desc)):
+ aList.append(self.fields_desc[i].name)
+ for i in aList:
+ a.append(getattr(self, i))
+ res = adapt(4, 14, a, self.fields_desc)
+ if res[0] is not 0:
+ p = p[:-res[0]]
+ if self.lengthCPBN is None:
+ p = p[:1] + struct.pack(">B", len(p)-2) + p[2:]
+ return p + pay
+
+
+# len 2 to 23
+class CallingPartySubaddressHdr(Packet):
+ """Calling party subaddress Section 10.5.4.10"""
+ name = "Calling Party Subaddress"
+ fields_desc = [
+ BitField("eightBitCPS", None, 1),
+ XBitField("ieiCPS", None, 7),
+ XByteField("lengthCPS", None),
+ # optional
+ BitField("ext1", None, 1),
+ BitField("typeAddr", None, 3),
+ BitField("oddEven", None, 1),
+ BitField("spare", None, 3),
+
+ ByteField("subInfo0", None),
+ ByteField("subInfo1", None),
+ ByteField("subInfo2", None),
+ ByteField("subInfo3", None),
+ ByteField("subInfo4", None),
+ ByteField("subInfo5", None),
+ ByteField("subInfo6", None),
+ ByteField("subInfo7", None),
+ ByteField("subInfo8", None),
+ ByteField("subInfo9", None),
+ ByteField("subInfo10", None),
+ ByteField("subInfo11", None),
+ ByteField("subInfo12", None),
+ ByteField("subInfo13", None),
+ ByteField("subInfo14", None),
+ ByteField("subInfo15", None),
+ ByteField("subInfo16", None),
+ ByteField("subInfo17", None),
+ ByteField("subInfo18", None),
+ ByteField("subInfo19", None)
+ ]
+
+ def post_build(self, p, pay):
+ aList = []
+ a = []
+ i = 0
+ for i in range(0, len(self.fields_desc)):
+ aList.append(self.fields_desc[i].name)
+ for i in aList:
+ a.append(getattr(self, i))
+ res = adapt(2, 23, a, self.fields_desc)
+ if self.lengthCPS is None:
+ p = p[:1] + struct.pack(">B", res[1]) + p[2:]
+ if res[0] is not 0:
+ p = p[:-res[0]]
+ return p + pay
+
+
+# len 4 to 32
+class CauseHdr(Packet):
+ """Cause Section 10.5.4.11"""
+ name = "Cause"
+ fields_desc = [
+ BitField("eightBitC", None, 1),
+ XBitField("ieiC", None, 7),
+
+ XByteField("lengthC", None),
+
+ BitField("ext", 0x1, 1),
+ BitField("codingStd", 0x0, 2),
+ BitField("spare", 0x0, 1),
+ BitField("location", 0x0, 4),
+
+ ConditionalField(BitField("ext1", 0x1, 1),
+ lambda pkt: pkt.ext == 0),
+ ConditionalField(BitField("recommendation", 0x0, 7),
+ lambda pkt: pkt.ext == 0),
+ # optional
+ BitField("ext2", None, 1),
+ BitField("causeValue", None, 7),
+
+ ByteField("diagnositc0", None),
+ ByteField("diagnositc1", None),
+ ByteField("diagnositc2", None),
+ ByteField("diagnositc3", None),
+ ByteField("diagnositc4", None),
+ ByteField("diagnositc5", None),
+ ByteField("diagnositc6", None),
+ ByteField("diagnositc7", None),
+ ByteField("diagnositc8", None),
+ ByteField("diagnositc9", None),
+ ByteField("diagnositc10", None),
+ ByteField("diagnositc11", None),
+ ByteField("diagnositc12", None),
+ ByteField("diagnositc13", None),
+ ByteField("diagnositc14", None),
+ ByteField("diagnositc15", None),
+ ByteField("diagnositc16", None),
+ ByteField("diagnositc17", None),
+ ByteField("diagnositc18", None),
+ ByteField("diagnositc19", None),
+ ByteField("diagnositc20", None),
+ ByteField("diagnositc21", None),
+ ByteField("diagnositc22", None),
+ ByteField("diagnositc23", None),
+ ByteField("diagnositc24", None),
+ ByteField("diagnositc25", None),
+ ByteField("diagnositc26", None),
+ ]
+
+ def post_build(self, p, pay):
+ aList = []
+ a = []
+ i = 0
+ for i in range(0, len(self.fields_desc)):
+ aList.append(self.fields_desc[i].name)
+ for i in aList:
+ a.append(getattr(self, i))
+ res = adapt(4, 32, a, self.fields_desc)
+ if res[0] is not 0:
+ p = p[:-res[0]]
+ if self.lengthC is None:
+ p = p[:1] + struct.pack(">B", len(p)-2) + p[2:]
+ return p + pay
+
+
+class ClirSuppressionHdr(Packet):
+ """CLIR suppression Section 10.5.4.11a"""
+ name = "Clir Suppression"
+ fields_desc = [
+ BitField("eightBitCS", None, 1),
+ XBitField("ieiCS", None, 7),
+ ]
+
+
+class ClirInvocationHdr(Packet):
+ """CLIR invocation Section 10.5.4.11b"""
+ name = "Clir Invocation"
+ fields_desc = [
+ BitField("eightBitCI", None, 1),
+ XBitField("ieiCI", None, 7),
+ ]
+
+
+class CongestionLevelHdr(Packet):
+ """Congestion level Section 10.5.4.12"""
+ name = "Congestion Level"
+ fields_desc = [
+ XBitField("ieiCL", None, 4),
+ BitField("notDef", 0x0, 4)
+ ]
+
+
+# Fix 1/2 len problem
+class CongestionLevelAndSpareHalfOctets(Packet):
+ name = "Congestion Level and Spare Half Octets"
+ fields_desc = [
+ BitField("ieiCL", 0x0, 4),
+ BitField("spareHalfOctets", 0x0, 4)
+ ]
+
+
+# len 3 to 14
+class ConnectedNumberHdr(Packet):
+ """Connected number Section 10.5.4.13"""
+ name = "Connected Number"
+ fields_desc = [
+ BitField("eightBitCN", None, 1),
+ XBitField("ieiCN", None, 7),
+
+ XByteField("lengthCN", None),
+
+ BitField("ext", 0x1, 1),
+ BitField("typeNb", 0x0, 3),
+ BitField("typePlanId", 0x0, 4),
+ # optional
+ ConditionalField(BitField("ext1", 0x1, 1),
+ lambda pkt: pkt.ext == 0),
+ ConditionalField(BitField("presId", None, 2),
+ lambda pkt: pkt.ext == 0),
+ ConditionalField(BitField("spare", None, 3),
+ lambda pkt: pkt.ext == 0),
+ ConditionalField(BitField("screenId", None, 2),
+ lambda pkt: pkt.ext == 0),
+
+ BitField("nbDigit2", None, 4),
+ BitField("nbDigit1", None, 4),
+
+ BitField("nbDigit4", None, 4),
+ BitField("nbDigit3", None, 4),
+
+ BitField("nbDigit6", None, 4),
+ BitField("nbDigit5", None, 4),
+
+ BitField("nbDigit8", None, 4),
+ BitField("nbDigit7", None, 4),
+
+ BitField("nbDigit10", None, 4),
+ BitField("nbDigit9", None, 4),
+
+ BitField("nbDigit12", None, 4),
+ BitField("nbDigit11", None, 4),
+
+ BitField("nbDigit14", None, 4),
+ BitField("nbDigit13", None, 4),
+
+ BitField("nbDigit16", None, 4),
+ BitField("nbDigit15", None, 4),
+
+ BitField("nbDigit18", None, 4),
+ BitField("nbDigit17", None, 4),
+
+ BitField("nbDigit20", None, 4),
+ BitField("nbDigit19", None, 4)
+ ]
+
+ def post_build(self, p, pay):
+ aList = []
+ a = []
+ i = 0
+ sum1 = 0
+ for i in range(0, len(self.fields_desc)):
+ aList.append(self.fields_desc[i].name)
+ for i in aList:
+ a.append(getattr(self, i))
+ res = adapt(3, 14, a, self.fields_desc)
+ if res[0] is not 0:
+ p = p[:-res[0]]
+ if self.lengthCN is None:
+ p = p[:1] + struct.pack(">B", len(p)-2) + p[2:]
+ return p + pay
+
+
+# len 2 to 23
+class ConnectedSubaddressHdr(Packet):
+ """Connected subaddress Section 10.5.4.14"""
+ name = "Connected Subaddress"
+ fields_desc = [
+ BitField("eightBitCS", None, 1),
+ XBitField("ieiCS", None, 7),
+
+ XByteField("lengthCS", None),
+ # optional
+ BitField("ext", None, 1),
+ BitField("typeOfSub", None, 3),
+ BitField("oddEven", None, 1),
+ BitField("spare", None, 3),
+
+ ByteField("subInfo0", None),
+ ByteField("subInfo1", None),
+ ByteField("subInfo2", None),
+ ByteField("subInfo3", None),
+ ByteField("subInfo4", None),
+ ByteField("subInfo5", None),
+ ByteField("subInfo6", None),
+ ByteField("subInfo7", None),
+ ByteField("subInfo8", None),
+ ByteField("subInfo9", None),
+ ByteField("subInfo10", None),
+ ByteField("subInfo11", None),
+ ByteField("subInfo12", None),
+ ByteField("subInfo13", None),
+ ByteField("subInfo14", None),
+ ByteField("subInfo15", None),
+ ByteField("subInfo16", None),
+ ByteField("subInfo17", None),
+ ByteField("subInfo18", None),
+ ByteField("subInfo19", None)
+ ]
+
+ def post_build(self, p, pay):
+ aList = []
+ a = []
+ i = 0
+ for i in range(0, len(self.fields_desc)):
+ aList.append(self.fields_desc[i].name)
+ for i in aList:
+ a.append(getattr(self, i))
+ res = adapt(2, 23, a, self.fields_desc)
+ if self.lengthCS is None:
+ p = p[:1] + struct.pack(">B", res[1]) + p[2:]
+ if res[0] is not 0:
+ p = p[:-res[0]]
+ return p + pay
+
+
+# len 2 to L3 (251) (done)
+class FacilityHdr(Packet):
+ """Facility Section 10.5.4.15"""
+ name = "Facility"
+ fields_desc = [
+ BitField("eightBitF", None, 1),
+ XBitField("ieiF", None, 7),
+ XByteField("lengthF", None),
+ # optional
+ ByteField("facilityInfo1", None),
+ ByteField("facilityInfo2", None),
+ ByteField("facilityInfo3", None),
+ ByteField("facilityInfo4", None),
+ ByteField("facilityInfo5", None),
+ ByteField("facilityInfo6", None),
+ ByteField("facilityInfo7", None),
+ ByteField("facilityInfo8", None),
+ ByteField("facilityInfo9", None),
+ ByteField("facilityInfo10", None),
+ ByteField("facilityInfo11", None),
+ ByteField("facilityInfo12", None),
+ ByteField("facilityInfo13", None),
+ ByteField("facilityInfo14", None),
+ ByteField("facilityInfo15", None),
+ ByteField("facilityInfo16", None),
+ ByteField("facilityInfo17", None),
+ ByteField("facilityInfo18", None),
+ ByteField("facilityInfo19", None),
+ ByteField("facilityInfo20", None),
+ ByteField("facilityInfo21", None),
+ ByteField("facilityInfo22", None),
+ ByteField("facilityInfo23", None),
+ ByteField("facilityInfo24", None),
+ ByteField("facilityInfo25", None),
+ ByteField("facilityInfo26", None),
+ ByteField("facilityInfo27", None),
+ ByteField("facilityInfo28", None),
+ ByteField("facilityInfo29", None),
+ ByteField("facilityInfo30", None),
+ ByteField("facilityInfo31", None),
+ ByteField("facilityInfo32", None),
+ ByteField("facilityInfo33", None),
+ ByteField("facilityInfo34", None),
+ ByteField("facilityInfo35", None),
+ ByteField("facilityInfo36", None),
+ ByteField("facilityInfo37", None),
+ ByteField("facilityInfo38", None),
+ ByteField("facilityInfo39", None),
+ ByteField("facilityInfo40", None),
+ ByteField("facilityInfo41", None),
+ ByteField("facilityInfo42", None),
+ ByteField("facilityInfo43", None),
+ ByteField("facilityInfo44", None),
+ ByteField("facilityInfo45", None),
+ ByteField("facilityInfo46", None),
+ ByteField("facilityInfo47", None),
+ ByteField("facilityInfo48", None),
+ ByteField("facilityInfo49", None),
+ ByteField("facilityInfo50", None),
+ ByteField("facilityInfo51", None),
+ ByteField("facilityInfo52", None),
+ ByteField("facilityInfo53", None),
+ ByteField("facilityInfo54", None),
+ ByteField("facilityInfo55", None),
+ ByteField("facilityInfo56", None),
+ ByteField("facilityInfo57", None),
+ ByteField("facilityInfo58", None),
+ ByteField("facilityInfo59", None),
+ ByteField("facilityInfo60", None),
+ ByteField("facilityInfo61", None),
+ ByteField("facilityInfo62", None),
+ ByteField("facilityInfo63", None),
+ ByteField("facilityInfo64", None),
+ ByteField("facilityInfo65", None),
+ ByteField("facilityInfo66", None),
+ ByteField("facilityInfo67", None),
+ ByteField("facilityInfo68", None),
+ ByteField("facilityInfo69", None),
+ ByteField("facilityInfo70", None),
+ ByteField("facilityInfo71", None),
+ ByteField("facilityInfo72", None),
+ ByteField("facilityInfo73", None),
+ ByteField("facilityInfo74", None),
+ ByteField("facilityInfo75", None),
+ ByteField("facilityInfo76", None),
+ ByteField("facilityInfo77", None),
+ ByteField("facilityInfo78", None),
+ ByteField("facilityInfo79", None),
+ ByteField("facilityInfo80", None),
+ ByteField("facilityInfo81", None),
+ ByteField("facilityInfo82", None),
+ ByteField("facilityInfo83", None),
+ ByteField("facilityInfo84", None),
+ ByteField("facilityInfo85", None),
+ ByteField("facilityInfo86", None),
+ ByteField("facilityInfo87", None),
+ ByteField("facilityInfo88", None),
+ ByteField("facilityInfo89", None),
+ ByteField("facilityInfo90", None),
+ ByteField("facilityInfo91", None),
+ ByteField("facilityInfo92", None),
+ ByteField("facilityInfo93", None),
+ ByteField("facilityInfo94", None),
+ ByteField("facilityInfo95", None),
+ ByteField("facilityInfo96", None),
+ ByteField("facilityInfo97", None),
+ ByteField("facilityInfo98", None),
+ ByteField("facilityInfo99", None),
+ ByteField("facilityInfo100", None),
+ ByteField("facilityInfo101", None),
+ ByteField("facilityInfo102", None),
+ ByteField("facilityInfo103", None),
+ ByteField("facilityInfo104", None),
+ ByteField("facilityInfo105", None),
+ ByteField("facilityInfo106", None),
+ ByteField("facilityInfo107", None),
+ ByteField("facilityInfo108", None),
+ ByteField("facilityInfo109", None),
+ ByteField("facilityInfo110", None),
+ ByteField("facilityInfo111", None),
+ ByteField("facilityInfo112", None),
+ ByteField("facilityInfo113", None),
+ ByteField("facilityInfo114", None),
+ ByteField("facilityInfo115", None),
+ ByteField("facilityInfo116", None),
+ ByteField("facilityInfo117", None),
+ ByteField("facilityInfo118", None),
+ ByteField("facilityInfo119", None),
+ ByteField("facilityInfo120", None),
+ ByteField("facilityInfo121", None),
+ ByteField("facilityInfo122", None),
+ ByteField("facilityInfo123", None),
+ ByteField("facilityInfo124", None),
+ ByteField("facilityInfo125", None),
+ ByteField("facilityInfo126", None),
+ ByteField("facilityInfo127", None),
+ ByteField("facilityInfo128", None),
+ ByteField("facilityInfo129", None),
+ ByteField("facilityInfo130", None),
+ ByteField("facilityInfo131", None),
+ ByteField("facilityInfo132", None),
+ ByteField("facilityInfo133", None),
+ ByteField("facilityInfo134", None),
+ ByteField("facilityInfo135", None),
+ ByteField("facilityInfo136", None),
+ ByteField("facilityInfo137", None),
+ ByteField("facilityInfo138", None),
+ ByteField("facilityInfo139", None),
+ ByteField("facilityInfo140", None),
+ ByteField("facilityInfo141", None),
+ ByteField("facilityInfo142", None),
+ ByteField("facilityInfo143", None),
+ ByteField("facilityInfo144", None),
+ ByteField("facilityInfo145", None),
+ ByteField("facilityInfo146", None),
+ ByteField("facilityInfo147", None),
+ ByteField("facilityInfo148", None),
+ ByteField("facilityInfo149", None),
+ ByteField("facilityInfo150", None),
+ ByteField("facilityInfo151", None),
+ ByteField("facilityInfo152", None),
+ ByteField("facilityInfo153", None),
+ ByteField("facilityInfo154", None),
+ ByteField("facilityInfo155", None),
+ ByteField("facilityInfo156", None),
+ ByteField("facilityInfo157", None),
+ ByteField("facilityInfo158", None),
+ ByteField("facilityInfo159", None),
+ ByteField("facilityInfo160", None),
+ ByteField("facilityInfo161", None),
+ ByteField("facilityInfo162", None),
+ ByteField("facilityInfo163", None),
+ ByteField("facilityInfo164", None),
+ ByteField("facilityInfo165", None),
+ ByteField("facilityInfo166", None),
+ ByteField("facilityInfo167", None),
+ ByteField("facilityInfo168", None),
+ ByteField("facilityInfo169", None),
+ ByteField("facilityInfo170", None),
+ ByteField("facilityInfo171", None),
+ ByteField("facilityInfo172", None),
+ ByteField("facilityInfo173", None),
+ ByteField("facilityInfo174", None),
+ ByteField("facilityInfo175", None),
+ ByteField("facilityInfo176", None),
+ ByteField("facilityInfo177", None),
+ ByteField("facilityInfo178", None),
+ ByteField("facilityInfo179", None),
+ ByteField("facilityInfo180", None),
+ ByteField("facilityInfo181", None),
+ ByteField("facilityInfo182", None),
+ ByteField("facilityInfo183", None),
+ ByteField("facilityInfo184", None),
+ ByteField("facilityInfo185", None),
+ ByteField("facilityInfo186", None),
+ ByteField("facilityInfo187", None),
+ ByteField("facilityInfo188", None),
+ ByteField("facilityInfo189", None),
+ ByteField("facilityInfo190", None),
+ ByteField("facilityInfo191", None),
+ ByteField("facilityInfo192", None),
+ ByteField("facilityInfo193", None),
+ ByteField("facilityInfo194", None),
+ ByteField("facilityInfo195", None),
+ ByteField("facilityInfo196", None),
+ ByteField("facilityInfo197", None),
+ ByteField("facilityInfo198", None),
+ ByteField("facilityInfo199", None),
+ ByteField("facilityInfo200", None),
+ ByteField("facilityInfo201", None),
+ ByteField("facilityInfo202", None),
+ ByteField("facilityInfo203", None),
+ ByteField("facilityInfo204", None),
+ ByteField("facilityInfo205", None),
+ ByteField("facilityInfo206", None),
+ ByteField("facilityInfo207", None),
+ ByteField("facilityInfo208", None),
+ ByteField("facilityInfo209", None),
+ ByteField("facilityInfo210", None),
+ ByteField("facilityInfo211", None),
+ ByteField("facilityInfo212", None),
+ ByteField("facilityInfo213", None),
+ ByteField("facilityInfo214", None),
+ ByteField("facilityInfo215", None),
+ ByteField("facilityInfo216", None),
+ ByteField("facilityInfo217", None),
+ ByteField("facilityInfo218", None),
+ ByteField("facilityInfo219", None),
+ ByteField("facilityInfo220", None),
+ ByteField("facilityInfo221", None),
+ ByteField("facilityInfo222", None),
+ ByteField("facilityInfo223", None),
+ ByteField("facilityInfo224", None),
+ ByteField("facilityInfo225", None),
+ ByteField("facilityInfo226", None),
+ ByteField("facilityInfo227", None),
+ ByteField("facilityInfo228", None),
+ ByteField("facilityInfo229", None),
+ ByteField("facilityInfo230", None),
+ ByteField("facilityInfo231", None),
+ ByteField("facilityInfo232", None),
+ ByteField("facilityInfo233", None),
+ ByteField("facilityInfo234", None),
+ ByteField("facilityInfo235", None),
+ ByteField("facilityInfo236", None),
+ ByteField("facilityInfo237", None),
+ ByteField("facilityInfo238", None),
+ ByteField("facilityInfo239", None),
+ ByteField("facilityInfo240", None),
+ ByteField("facilityInfo241", None),
+ ByteField("facilityInfo242", None),
+ ByteField("facilityInfo243", None),
+ ByteField("facilityInfo244", None),
+ ByteField("facilityInfo245", None),
+ ByteField("facilityInfo246", None),
+ ByteField("facilityInfo247", None),
+ ByteField("facilityInfo248", None),
+ ByteField("facilityInfo249", None)
+ ]
+
+ def post_build(self, p, pay):
+ aList = []
+ a = []
+ i = 0
+ for i in range(0, len(self.fields_desc)):
+ aList.append(self.fields_desc[i].name)
+ for i in aList:
+ a.append(getattr(self, i))
+ res = adapt(2, 251, a, self.fields_desc)
+ if self.lengthF is None:
+ p = p[:1] + struct.pack(">B", res[1]) + p[2:]
+ if res[0] is not 0:
+ p = p[:-res[0]]
+ return p + pay
+
+
+#len 2 to 5
+class HighLayerCompatibilityHdr(Packet):
+ """High layer compatibility Section 10.5.4.16"""
+ name = "High Layer Compatibility"
+ fields_desc = [
+ BitField("eightBitHLC", None, 1),
+ XBitField("ieiHLC", None, 7),
+
+ XByteField("lengthHLC", None),
+ # optional
+ BitField("ext", None, 1),
+ BitField("codingStd", None, 2),
+ BitField("interpret", None, 3),
+ BitField("presMeth", None, 2),
+
+ BitField("ext1", None, 1),
+ BitField("highLayerId", None, 7),
+
+ ConditionalField(BitField("ext2", 0x1, 1),
+ lambda pkt: pkt.ext1 == 0),
+ ConditionalField(BitField("exHiLayerId", 0x0, 7),
+ lambda pkt: pkt.ext1 == 0)
+ ]
+
+ def post_build(self, p, pay):
+ aList = []
+ a = []
+ i = 0
+ for i in range(0, len(self.fields_desc)):
+ aList.append(self.fields_desc[i].name)
+ for i in aList:
+ a.append(getattr(self, i))
+ res = adapt(2, 5, a, self.fields_desc)
+ if res[0] is not 0:
+ p = p[:-res[0]]
+ if self.lengthHLC is None:
+ p = p[:1] + struct.pack(">B", len(p)-2) + p[2:]
+ return p + pay
+#
+# 10.5.4.16.1 Static conditions for the high layer
+# compatibility IE contents
+#
+
+
+class KeypadFacilityHdr(Packet):
+ """Keypad facility Section 10.5.4.17"""
+ name = "Keypad Facility"
+ fields_desc = [
+ BitField("eightBitKF", None, 1),
+ XBitField("ieiKF", None, 7),
+ BitField("spare", 0x0, 1),
+ BitField("keyPadInfo", 0x0, 7)
+ ]
+
+
+# len 2 to 15
+class LowLayerCompatibilityHdr(Packet):
+ """Low layer compatibility Section 10.5.4.18"""
+ name = "Low Layer Compatibility"
+ fields_desc = [
+ BitField("eightBitLLC", None, 1),
+ XBitField("ieiLLC", None, 7),
+
+ XByteField("lengthLLC", None),
+ # optional
+ ByteField("rest0", None),
+ ByteField("rest1", None),
+ ByteField("rest2", None),
+ ByteField("rest3", None),
+ ByteField("rest4", None),
+ ByteField("rest5", None),
+ ByteField("rest6", None),
+ ByteField("rest7", None),
+ ByteField("rest8", None),
+ ByteField("rest9", None),
+ ByteField("rest10", None),
+ ByteField("rest11", None),
+ ByteField("rest12", None)
+ ]
+
+ def post_build(self, p, pay):
+ aList = []
+ a = []
+ i = 0
+ for i in range(0, len(self.fields_desc)):
+ aList.append(self.fields_desc[i].name)
+ for i in aList:
+ a.append(getattr(self, i))
+ res = adapt(2, 15, a, self.fields_desc)
+ if self.lengthLLC is None:
+ p = p[:1] + struct.pack(">B", res[1]) + p[2:]
+ if res[0] is not 0:
+ p = p[:-res[0]]
+ return p + pay
+
+
+class MoreDataHdr(Packet):
+ """More data Section 10.5.4.19"""
+ name = "More Data"
+ fields_desc = [
+ BitField("eightBitMD", None, 1),
+ XBitField("ieiMD", None, 7),
+ ]
+
+
+class NotificationIndicatorHdr(Packet):
+ """Notification indicator Section 10.5.4.20"""
+ name = "Notification Indicator"
+ fields_desc = [
+ BitField("eightBitNI", None, 1),
+ XBitField("ieiNI", None, 7),
+ BitField("ext", 0x1, 1),
+ BitField("notifDesc", 0x0, 7)
+ ]
+
+
+class ProgressIndicatorHdr(Packet):
+ """Progress indicator Section 10.5.4.21"""
+ name = "Progress Indicator"
+ fields_desc = [
+ BitField("eightBitPI", None, 1),
+ XBitField("ieiPI", None, 7),
+ XByteField("lengthPI", 0x2),
+ BitField("ext", 0x1, 1),
+ BitField("codingStd", 0x0, 2),
+ BitField("spare", 0x0, 1),
+ BitField("location", 0x0, 4),
+ BitField("ext1", 0x1, 1),
+ BitField("progressDesc", 0x0, 7)
+ ]
+
+
+class RecallTypeHdr(Packet):
+ """Recall type $(CCBS)$ Section 10.5.4.21a"""
+ name = "Recall Type $(CCBS)$"
+ fields_desc = [
+ BitField("eightBitRT", None, 1),
+ XBitField("ieiRT", None, 7),
+ BitField("spare", 0x0, 5),
+ BitField("recallType", 0x0, 3)
+ ]
+
+
+# len 3 to 19
+class RedirectingPartyBcdNumberHdr(Packet):
+ """Redirecting party BCD number Section 10.5.4.21b"""
+ name = "Redirecting Party BCD Number"
+ fields_desc = [
+ BitField("eightBitRPBN", None, 1),
+ XBitField("ieiRPBN", None, 7),
+
+ XByteField("lengthRPBN", None),
+
+ BitField("ext", 0x1, 1),
+ BitField("typeNb", 0x0, 3),
+ BitField("numberingPlan", 0x0, 4),
+ # optional
+ ConditionalField(BitField("ext1", 0x1, 1),
+ lambda pkt: pkt.ext == 0),
+ ConditionalField(BitField("presId", None, 2),
+ lambda pkt: pkt.ext == 0),
+ ConditionalField(BitField("spare", None, 3),
+ lambda pkt: pkt.ext == 0),
+ ConditionalField(BitField("screenId", None, 2),
+ lambda pkt: pkt.ext == 0),
+
+ BitField("nbDigit2", None, 4),
+ BitField("nbDigit1", None, 4),
+
+ BitField("nbDigit4", None, 4),
+ BitField("nbDigit3", None, 4),
+
+ BitField("nbDigit6", None, 4),
+ BitField("nbDigit5", None, 4),
+
+ BitField("nbDigit8", None, 4),
+ BitField("nbDigit7", None, 4),
+
+ BitField("nbDigit10", None, 4),
+ BitField("nbDigit9", None, 4),
+
+ BitField("nbDigit12", None, 4),
+ BitField("nbDigit11", None, 4),
+
+ BitField("nbDigit14", None, 4),
+ BitField("nbDigit13", None, 4),
+
+ BitField("nbDigit16", None, 4),
+ BitField("nbDigit15", None, 4),
+
+ BitField("nbDigit18", None, 4),
+ BitField("nbDigit17", None, 4),
+
+ BitField("nbDigit20", None, 4),
+ BitField("nbDigit19", None, 4),
+
+ BitField("nbDigit22", None, 4),
+ BitField("nbDigit21", None, 4),
+
+ BitField("nbDigit24", None, 4),
+ BitField("nbDigit23", None, 4),
+
+ BitField("nbDigit26", None, 4),
+ BitField("nbDigit25", None, 4),
+
+ BitField("nbDigit28", None, 4),
+ BitField("nbDigit27", None, 4),
+
+ BitField("nbDigit30", None, 4),
+ BitField("nbDigit29", None, 4),
+ ]
+
+ def post_build(self, p, pay):
+ aList = []
+ a = []
+ i = 0
+ for i in range(0, len(self.fields_desc)):
+ aList.append(self.fields_desc[i].name)
+ for i in aList:
+ a.append(getattr(self, i))
+ res = adapt(3, 19, a, self.fields_desc)
+ if res[0] is not 0:
+ p = p[:-res[0]]
+ if self.lengthRPBN is None:
+ p = p[:1] + struct.pack(">B", len(p)-2) + p[2:]
+ return p + pay
+
+
+# length 2 to 23
+class RedirectingPartySubaddressHdr(Packet):
+ """Redirecting party subaddress Section 10.5.4.21c"""
+ name = "Redirecting Party BCD Number"
+ fields_desc = [
+ BitField("eightBitRPS", None, 1),
+ XBitField("ieiRPS", None, 7),
+
+ XByteField("lengthRPS", None),
+ # optional
+ BitField("ext", None, 1),
+ BitField("typeSub", None, 3),
+ BitField("oddEven", None, 1),
+ BitField("spare", None, 3),
+
+ ByteField("subInfo0", None),
+ ByteField("subInfo1", None),
+ ByteField("subInfo2", None),
+ ByteField("subInfo3", None),
+ ByteField("subInfo4", None),
+ ByteField("subInfo5", None),
+ ByteField("subInfo6", None),
+ ByteField("subInfo7", None),
+ ByteField("subInfo8", None),
+ ByteField("subInfo9", None),
+ ByteField("subInfo10", None),
+ ByteField("subInfo11", None),
+ ByteField("subInfo12", None),
+ ByteField("subInfo13", None),
+ ByteField("subInfo14", None),
+ ByteField("subInfo15", None),
+ ByteField("subInfo16", None),
+ ByteField("subInfo17", None),
+ ByteField("subInfo18", None),
+ ByteField("subInfo19", None)
+ ]
+
+ def post_build(self, p, pay):
+ aList = []
+ a = []
+ i = 0
+ for i in range(0, len(self.fields_desc)):
+ aList.append(self.fields_desc[i].name)
+ for i in aList:
+ a.append(getattr(self, i))
+ res = adapt(2, 23, a, self.fields_desc)
+ if self.lengthRPS is None:
+ p = p[:1] + struct.pack(">B", res[1]) + p[2:]
+ if res[0] is not 0:
+ p = p[:-res[0]]
+ return p + pay
+
+
+class RepeatIndicatorHdr(Packet):
+ """Repeat indicator Section 10.5.4.22"""
+ name = "Repeat Indicator"
+ fields_desc = [
+ XBitField("ieiRI", None, 4),
+ BitField("repeatIndic", 0x0, 4)
+ ]
+
+
+class ReverseCallSetupDirectionHdr(Packet):
+ """Reverse call setup direction Section 10.5.4.22a"""
+ name = "Reverse Call Setup Direction"
+ fields_desc = [
+ ByteField("ieiRCSD", 0x0)
+ ]
+
+
+# no upper length min 2(max for L3) (251)
+class SetupContainerHdr(Packet):
+ """SETUP Container $(CCBS)$ Section 10.5.4.22b"""
+ name = "Setup Container $(CCBS)$"
+ fields_desc = [
+ BitField("eightBitSC", None, 1),
+ XBitField("ieiSC", None, 7),
+ XByteField("lengthSC", None),
+ # optional
+ ByteField("mess1", None),
+ ByteField("mess2", None),
+ ByteField("mess3", None),
+ ByteField("mess4", None),
+ ByteField("mess5", None),
+ ByteField("mess6", None),
+ ByteField("mess7", None),
+ ByteField("mess8", None),
+ ByteField("mess9", None),
+ ByteField("mess10", None),
+ ByteField("mess11", None),
+ ByteField("mess12", None),
+ ByteField("mess13", None),
+ ByteField("mess14", None),
+ ByteField("mess15", None),
+ ByteField("mess16", None),
+ ByteField("mess17", None),
+ ByteField("mess18", None),
+ ByteField("mess19", None),
+ ByteField("mess20", None),
+ ByteField("mess21", None),
+ ByteField("mess22", None),
+ ByteField("mess23", None),
+ ByteField("mess24", None),
+ ByteField("mess25", None),
+ ByteField("mess26", None),
+ ByteField("mess27", None),
+ ByteField("mess28", None),
+ ByteField("mess29", None),
+ ByteField("mess30", None),
+ ByteField("mess31", None),
+ ByteField("mess32", None),
+ ByteField("mess33", None),
+ ByteField("mess34", None),
+ ByteField("mess35", None),
+ ByteField("mess36", None),
+ ByteField("mess37", None),
+ ByteField("mess38", None),
+ ByteField("mess39", None),
+ ByteField("mess40", None),
+ ByteField("mess41", None),
+ ByteField("mess42", None),
+ ByteField("mess43", None),
+ ByteField("mess44", None),
+ ByteField("mess45", None),
+ ByteField("mess46", None),
+ ByteField("mess47", None),
+ ByteField("mess48", None),
+ ByteField("mess49", None),
+ ByteField("mess50", None),
+ ByteField("mess51", None),
+ ByteField("mess52", None),
+ ByteField("mess53", None),
+ ByteField("mess54", None),
+ ByteField("mess55", None),
+ ByteField("mess56", None),
+ ByteField("mess57", None),
+ ByteField("mess58", None),
+ ByteField("mess59", None),
+ ByteField("mess60", None),
+ ByteField("mess61", None),
+ ByteField("mess62", None),
+ ByteField("mess63", None),
+ ByteField("mess64", None),
+ ByteField("mess65", None),
+ ByteField("mess66", None),
+ ByteField("mess67", None),
+ ByteField("mess68", None),
+ ByteField("mess69", None),
+ ByteField("mess70", None),
+ ByteField("mess71", None),
+ ByteField("mess72", None),
+ ByteField("mess73", None),
+ ByteField("mess74", None),
+ ByteField("mess75", None),
+ ByteField("mess76", None),
+ ByteField("mess77", None),
+ ByteField("mess78", None),
+ ByteField("mess79", None),
+ ByteField("mess80", None),
+ ByteField("mess81", None),
+ ByteField("mess82", None),
+ ByteField("mess83", None),
+ ByteField("mess84", None),
+ ByteField("mess85", None),
+ ByteField("mess86", None),
+ ByteField("mess87", None),
+ ByteField("mess88", None),
+ ByteField("mess89", None),
+ ByteField("mess90", None),
+ ByteField("mess91", None),
+ ByteField("mess92", None),
+ ByteField("mess93", None),
+ ByteField("mess94", None),
+ ByteField("mess95", None),
+ ByteField("mess96", None),
+ ByteField("mess97", None),
+ ByteField("mess98", None),
+ ByteField("mess99", None),
+ ByteField("mess100", None),
+ ByteField("mess101", None),
+ ByteField("mess102", None),
+ ByteField("mess103", None),
+ ByteField("mess104", None),
+ ByteField("mess105", None),
+ ByteField("mess106", None),
+ ByteField("mess107", None),
+ ByteField("mess108", None),
+ ByteField("mess109", None),
+ ByteField("mess110", None),
+ ByteField("mess111", None),
+ ByteField("mess112", None),
+ ByteField("mess113", None),
+ ByteField("mess114", None),
+ ByteField("mess115", None),
+ ByteField("mess116", None),
+ ByteField("mess117", None),
+ ByteField("mess118", None),
+ ByteField("mess119", None),
+ ByteField("mess120", None),
+ ByteField("mess121", None),
+ ByteField("mess122", None),
+ ByteField("mess123", None),
+ ByteField("mess124", None),
+ ByteField("mess125", None),
+ ByteField("mess126", None),
+ ByteField("mess127", None),
+ ByteField("mess128", None),
+ ByteField("mess129", None),
+ ByteField("mess130", None),
+ ByteField("mess131", None),
+ ByteField("mess132", None),
+ ByteField("mess133", None),
+ ByteField("mess134", None),
+ ByteField("mess135", None),
+ ByteField("mess136", None),
+ ByteField("mess137", None),
+ ByteField("mess138", None),
+ ByteField("mess139", None),
+ ByteField("mess140", None),
+ ByteField("mess141", None),
+ ByteField("mess142", None),
+ ByteField("mess143", None),
+ ByteField("mess144", None),
+ ByteField("mess145", None),
+ ByteField("mess146", None),
+ ByteField("mess147", None),
+ ByteField("mess148", None),
+ ByteField("mess149", None),
+ ByteField("mess150", None),
+ ByteField("mess151", None),
+ ByteField("mess152", None),
+ ByteField("mess153", None),
+ ByteField("mess154", None),
+ ByteField("mess155", None),
+ ByteField("mess156", None),
+ ByteField("mess157", None),
+ ByteField("mess158", None),
+ ByteField("mess159", None),
+ ByteField("mess160", None),
+ ByteField("mess161", None),
+ ByteField("mess162", None),
+ ByteField("mess163", None),
+ ByteField("mess164", None),
+ ByteField("mess165", None),
+ ByteField("mess166", None),
+ ByteField("mess167", None),
+ ByteField("mess168", None),
+ ByteField("mess169", None),
+ ByteField("mess170", None),
+ ByteField("mess171", None),
+ ByteField("mess172", None),
+ ByteField("mess173", None),
+ ByteField("mess174", None),
+ ByteField("mess175", None),
+ ByteField("mess176", None),
+ ByteField("mess177", None),
+ ByteField("mess178", None),
+ ByteField("mess179", None),
+ ByteField("mess180", None),
+ ByteField("mess181", None),
+ ByteField("mess182", None),
+ ByteField("mess183", None),
+ ByteField("mess184", None),
+ ByteField("mess185", None),
+ ByteField("mess186", None),
+ ByteField("mess187", None),
+ ByteField("mess188", None),
+ ByteField("mess189", None),
+ ByteField("mess190", None),
+ ByteField("mess191", None),
+ ByteField("mess192", None),
+ ByteField("mess193", None),
+ ByteField("mess194", None),
+ ByteField("mess195", None),
+ ByteField("mess196", None),
+ ByteField("mess197", None),
+ ByteField("mess198", None),
+ ByteField("mess199", None),
+ ByteField("mess200", None),
+ ByteField("mess201", None),
+ ByteField("mess202", None),
+ ByteField("mess203", None),
+ ByteField("mess204", None),
+ ByteField("mess205", None),
+ ByteField("mess206", None),
+ ByteField("mess207", None),
+ ByteField("mess208", None),
+ ByteField("mess209", None),
+ ByteField("mess210", None),
+ ByteField("mess211", None),
+ ByteField("mess212", None),
+ ByteField("mess213", None),
+ ByteField("mess214", None),
+ ByteField("mess215", None),
+ ByteField("mess216", None),
+ ByteField("mess217", None),
+ ByteField("mess218", None),
+ ByteField("mess219", None),
+ ByteField("mess220", None),
+ ByteField("mess221", None),
+ ByteField("mess222", None),
+ ByteField("mess223", None),
+ ByteField("mess224", None),
+ ByteField("mess225", None),
+ ByteField("mess226", None),
+ ByteField("mess227", None),
+ ByteField("mess228", None),
+ ByteField("mess229", None),
+ ByteField("mess230", None),
+ ByteField("mess231", None),
+ ByteField("mess232", None),
+ ByteField("mess233", None),
+ ByteField("mess234", None),
+ ByteField("mess235", None),
+ ByteField("mess236", None),
+ ByteField("mess237", None),
+ ByteField("mess238", None),
+ ByteField("mess239", None),
+ ByteField("mess240", None),
+ ByteField("mess241", None),
+ ByteField("mess242", None),
+ ByteField("mess243", None),
+ ByteField("mess244", None),
+ ByteField("mess245", None),
+ ByteField("mess246", None),
+ ByteField("mess247", None),
+ ByteField("mess248", None),
+ ByteField("mess249", None),
+ ]
+
+ def post_build(self, p, pay):
+ aList = []
+ a = []
+ i = 0
+ for i in range(0, len(self.fields_desc)):
+ aList.append(self.fields_desc[i].name)
+ for i in aList:
+ a.append(getattr(self, i))
+ res = adapt(2, 251, a, self.fields_desc)
+ if self.lengthSC is None:
+ p = p[:1] + struct.pack(">B", res[1]) + p[2:]
+ if res[0] is not 0:
+ p = p[:-res[0]]
+ return p + pay
+
+
+class SignalHdr(Packet):
+ """Signal Section 10.5.4.23"""
+ name = "Signal"
+ fields_desc = [
+ BitField("eightBitS", None, 1),
+ XBitField("ieiS", None, 7),
+ ByteField("sigValue", 0x0)
+ ]
+
+
+# length 2 to max for L3 message (251)
+class SsVersionIndicatorHdr(Packet):
+ """SS Version Indicator Section 10.5.4.24"""
+ name = "SS Version Indicator"
+ fields_desc = [
+ BitField("eightBitSVI", None, 1),
+ XBitField("ieiSVI", None, 7),
+ XByteField("lengthSVI", None),
+ # optional
+ ByteField("info1", None),
+ ByteField("info2", None),
+ ByteField("info3", None),
+ ByteField("info4", None),
+ ByteField("info5", None),
+ ByteField("info6", None),
+ ByteField("info7", None),
+ ByteField("info8", None),
+ ByteField("info9", None),
+ ByteField("info10", None),
+ ByteField("info11", None),
+ ByteField("info12", None),
+ ByteField("info13", None),
+ ByteField("info14", None),
+ ByteField("info15", None),
+ ByteField("info16", None),
+ ByteField("info17", None),
+ ByteField("info18", None),
+ ByteField("info19", None),
+ ByteField("info20", None),
+ ByteField("info21", None),
+ ByteField("info22", None),
+ ByteField("info23", None),
+ ByteField("info24", None),
+ ByteField("info25", None),
+ ByteField("info26", None),
+ ByteField("info27", None),
+ ByteField("info28", None),
+ ByteField("info29", None),
+ ByteField("info30", None),
+ ByteField("info31", None),
+ ByteField("info32", None),
+ ByteField("info33", None),
+ ByteField("info34", None),
+ ByteField("info35", None),
+ ByteField("info36", None),
+ ByteField("info37", None),
+ ByteField("info38", None),
+ ByteField("info39", None),
+ ByteField("info40", None),
+ ByteField("info41", None),
+ ByteField("info42", None),
+ ByteField("info43", None),
+ ByteField("info44", None),
+ ByteField("info45", None),
+ ByteField("info46", None),
+ ByteField("info47", None),
+ ByteField("info48", None),
+ ByteField("info49", None),
+ ByteField("info50", None),
+ ByteField("info51", None),
+ ByteField("info52", None),
+ ByteField("info53", None),
+ ByteField("info54", None),
+ ByteField("info55", None),
+ ByteField("info56", None),
+ ByteField("info57", None),
+ ByteField("info58", None),
+ ByteField("info59", None),
+ ByteField("info60", None),
+ ByteField("info61", None),
+ ByteField("info62", None),
+ ByteField("info63", None),
+ ByteField("info64", None),
+ ByteField("info65", None),
+ ByteField("info66", None),
+ ByteField("info67", None),
+ ByteField("info68", None),
+ ByteField("info69", None),
+ ByteField("info70", None),
+ ByteField("info71", None),
+ ByteField("info72", None),
+ ByteField("info73", None),
+ ByteField("info74", None),
+ ByteField("info75", None),
+ ByteField("info76", None),
+ ByteField("info77", None),
+ ByteField("info78", None),
+ ByteField("info79", None),
+ ByteField("info80", None),
+ ByteField("info81", None),
+ ByteField("info82", None),
+ ByteField("info83", None),
+ ByteField("info84", None),
+ ByteField("info85", None),
+ ByteField("info86", None),
+ ByteField("info87", None),
+ ByteField("info88", None),
+ ByteField("info89", None),
+ ByteField("info90", None),
+ ByteField("info91", None),
+ ByteField("info92", None),
+ ByteField("info93", None),
+ ByteField("info94", None),
+ ByteField("info95", None),
+ ByteField("info96", None),
+ ByteField("info97", None),
+ ByteField("info98", None),
+ ByteField("info99", None),
+ ByteField("info100", None),
+ ByteField("info101", None),
+ ByteField("info102", None),
+ ByteField("info103", None),
+ ByteField("info104", None),
+ ByteField("info105", None),
+ ByteField("info106", None),
+ ByteField("info107", None),
+ ByteField("info108", None),
+ ByteField("info109", None),
+ ByteField("info110", None),
+ ByteField("info111", None),
+ ByteField("info112", None),
+ ByteField("info113", None),
+ ByteField("info114", None),
+ ByteField("info115", None),
+ ByteField("info116", None),
+ ByteField("info117", None),
+ ByteField("info118", None),
+ ByteField("info119", None),
+ ByteField("info120", None),
+ ByteField("info121", None),
+ ByteField("info122", None),
+ ByteField("info123", None),
+ ByteField("info124", None),
+ ByteField("info125", None),
+ ByteField("info126", None),
+ ByteField("info127", None),
+ ByteField("info128", None),
+ ByteField("info129", None),
+ ByteField("info130", None),
+ ByteField("info131", None),
+ ByteField("info132", None),
+ ByteField("info133", None),
+ ByteField("info134", None),
+ ByteField("info135", None),
+ ByteField("info136", None),
+ ByteField("info137", None),
+ ByteField("info138", None),
+ ByteField("info139", None),
+ ByteField("info140", None),
+ ByteField("info141", None),
+ ByteField("info142", None),
+ ByteField("info143", None),
+ ByteField("info144", None),
+ ByteField("info145", None),
+ ByteField("info146", None),
+ ByteField("info147", None),
+ ByteField("info148", None),
+ ByteField("info149", None),
+ ByteField("info150", None),
+ ByteField("info151", None),
+ ByteField("info152", None),
+ ByteField("info153", None),
+ ByteField("info154", None),
+ ByteField("info155", None),
+ ByteField("info156", None),
+ ByteField("info157", None),
+ ByteField("info158", None),
+ ByteField("info159", None),
+ ByteField("info160", None),
+ ByteField("info161", None),
+ ByteField("info162", None),
+ ByteField("info163", None),
+ ByteField("info164", None),
+ ByteField("info165", None),
+ ByteField("info166", None),
+ ByteField("info167", None),
+ ByteField("info168", None),
+ ByteField("info169", None),
+ ByteField("info170", None),
+ ByteField("info171", None),
+ ByteField("info172", None),
+ ByteField("info173", None),
+ ByteField("info174", None),
+ ByteField("info175", None),
+ ByteField("info176", None),
+ ByteField("info177", None),
+ ByteField("info178", None),
+ ByteField("info179", None),
+ ByteField("info180", None),
+ ByteField("info181", None),
+ ByteField("info182", None),
+ ByteField("info183", None),
+ ByteField("info184", None),
+ ByteField("info185", None),
+ ByteField("info186", None),
+ ByteField("info187", None),
+ ByteField("info188", None),
+ ByteField("info189", None),
+ ByteField("info190", None),
+ ByteField("info191", None),
+ ByteField("info192", None),
+ ByteField("info193", None),
+ ByteField("info194", None),
+ ByteField("info195", None),
+ ByteField("info196", None),
+ ByteField("info197", None),
+ ByteField("info198", None),
+ ByteField("info199", None),
+ ByteField("info200", None),
+ ByteField("info201", None),
+ ByteField("info202", None),
+ ByteField("info203", None),
+ ByteField("info204", None),
+ ByteField("info205", None),
+ ByteField("info206", None),
+ ByteField("info207", None),
+ ByteField("info208", None),
+ ByteField("info209", None),
+ ByteField("info210", None),
+ ByteField("info211", None),
+ ByteField("info212", None),
+ ByteField("info213", None),
+ ByteField("info214", None),
+ ByteField("info215", None),
+ ByteField("info216", None),
+ ByteField("info217", None),
+ ByteField("info218", None),
+ ByteField("info219", None),
+ ByteField("info220", None),
+ ByteField("info221", None),
+ ByteField("info222", None),
+ ByteField("info223", None),
+ ByteField("info224", None),
+ ByteField("info225", None),
+ ByteField("info226", None),
+ ByteField("info227", None),
+ ByteField("info228", None),
+ ByteField("info229", None),
+ ByteField("info230", None),
+ ByteField("info231", None),
+ ByteField("info232", None),
+ ByteField("info233", None),
+ ByteField("info234", None),
+ ByteField("info235", None),
+ ByteField("info236", None),
+ ByteField("info237", None),
+ ByteField("info238", None),
+ ByteField("info239", None),
+ ByteField("info240", None),
+ ByteField("info241", None),
+ ByteField("info242", None),
+ ByteField("info243", None),
+ ByteField("info244", None),
+ ByteField("info245", None),
+ ByteField("info246", None),
+ ByteField("info247", None),
+ ByteField("info248", None),
+ ByteField("info249", None),
+ ]
+
+ def post_build(self, p, pay):
+ aList = []
+ a = []
+ i = 0
+ for i in range(0, len(self.fields_desc)):
+ aList.append(self.fields_desc[i].name)
+ for i in aList:
+ a.append(getattr(self, i))
+ res = adapt(2, 251, a, self.fields_desc)
+ if self.lengthSVI is None:
+ p = p[:1] + struct.pack(">B", res[1]) + p[2:]
+ if res[0] is not 0:
+ p = p[:-res[0]]
+ return p + pay
+
+
+# length 3 to 35 or 131
+class UserUserHdr(Packet):
+ """User-user Section 10.5.4.25"""
+ name = "User-User"
+ fields_desc = [
+ BitField("eightBitUU", None, 1),
+ XBitField("ieiUU", None, 7),
+
+ XByteField("lengthUU", None), # dynamic length of field depending
+ # of the type of message
+ # let user decide which length he
+ # wants to take
+ # => more fuzzing options
+ ByteField("userUserPD", 0x0),
+ # optional
+ ByteField("userUserInfo1", None),
+ ByteField("userUserInfo2", None),
+ ByteField("userUserInfo3", None),
+ ByteField("userUserInfo4", None),
+ ByteField("userUserInfo5", None),
+ ByteField("userUserInfo6", None),
+ ByteField("userUserInfo7", None),
+ ByteField("userUserInfo8", None),
+ ByteField("userUserInfo9", None),
+ ByteField("userUserInfo10", None),
+ ByteField("userUserInfo11", None),
+ ByteField("userUserInfo12", None),
+ ByteField("userUserInfo13", None),
+ ByteField("userUserInfo14", None),
+ ByteField("userUserInfo15", None),
+ ByteField("userUserInfo16", None),
+ ByteField("userUserInfo17", None),
+ ByteField("userUserInfo18", None),
+ ByteField("userUserInfo19", None),
+ ByteField("userUserInfo20", None),
+ ByteField("userUserInfo21", None),
+ ByteField("userUserInfo22", None),
+ ByteField("userUserInfo23", None),
+ ByteField("userUserInfo24", None),
+ ByteField("userUserInfo25", None),
+ ByteField("userUserInfo26", None),
+ ByteField("userUserInfo27", None),
+ ByteField("userUserInfo28", None),
+ ByteField("userUserInfo29", None),
+ ByteField("userUserInfo30", None),
+ ByteField("userUserInfo31", None),
+ ByteField("userUserInfo32", None),
+ # long packet
+ ByteField("userUserInfo33", None),
+ ByteField("userUserInfo34", None),
+ ByteField("userUserInfo35", None),
+ ByteField("userUserInfo36", None),
+ ByteField("userUserInfo37", None),
+ ByteField("userUserInfo38", None),
+ ByteField("userUserInfo39", None),
+ ByteField("userUserInfo40", None),
+ ByteField("userUserInfo41", None),
+ ByteField("userUserInfo42", None),
+ ByteField("userUserInfo43", None),
+ ByteField("userUserInfo44", None),
+ ByteField("userUserInfo45", None),
+ ByteField("userUserInfo46", None),
+ ByteField("userUserInfo47", None),
+ ByteField("userUserInfo48", None),
+ ByteField("userUserInfo49", None),
+ ByteField("userUserInfo50", None),
+ ByteField("userUserInfo51", None),
+ ByteField("userUserInfo52", None),
+ ByteField("userUserInfo53", None),
+ ByteField("userUserInfo54", None),
+ ByteField("userUserInfo55", None),
+ ByteField("userUserInfo56", None),
+ ByteField("userUserInfo57", None),
+ ByteField("userUserInfo58", None),
+ ByteField("userUserInfo59", None),
+ ByteField("userUserInfo60", None),
+ ByteField("userUserInfo61", None),
+ ByteField("userUserInfo62", None),
+ ByteField("userUserInfo63", None),
+ ByteField("userUserInfo64", None),
+ ByteField("userUserInfo65", None),
+ ByteField("userUserInfo66", None),
+ ByteField("userUserInfo67", None),
+ ByteField("userUserInfo68", None),
+ ByteField("userUserInfo69", None),
+ ByteField("userUserInfo70", None),
+ ByteField("userUserInfo71", None),
+ ByteField("userUserInfo72", None),
+ ByteField("userUserInfo73", None),
+ ByteField("userUserInfo74", None),
+ ByteField("userUserInfo75", None),
+ ByteField("userUserInfo76", None),
+ ByteField("userUserInfo77", None),
+ ByteField("userUserInfo78", None),
+ ByteField("userUserInfo79", None),
+ ByteField("userUserInfo80", None),
+ ByteField("userUserInfo81", None),
+ ByteField("userUserInfo82", None),
+ ByteField("userUserInfo83", None),
+ ByteField("userUserInfo84", None),
+ ByteField("userUserInfo85", None),
+ ByteField("userUserInfo86", None),
+ ByteField("userUserInfo87", None),
+ ByteField("userUserInfo88", None),
+ ByteField("userUserInfo89", None),
+ ByteField("userUserInfo90", None),
+ ByteField("userUserInfo91", None),
+ ByteField("userUserInfo92", None),
+ ByteField("userUserInfo93", None),
+ ByteField("userUserInfo94", None),
+ ByteField("userUserInfo95", None),
+ ByteField("userUserInfo96", None),
+ ByteField("userUserInfo97", None),
+ ByteField("userUserInfo98", None),
+ ByteField("userUserInfo99", None),
+ ByteField("userUserInfo100", None),
+ ByteField("userUserInfo101", None),
+ ByteField("userUserInfo102", None),
+ ByteField("userUserInfo103", None),
+ ByteField("userUserInfo104", None),
+ ByteField("userUserInfo105", None),
+ ByteField("userUserInfo106", None),
+ ByteField("userUserInfo107", None),
+ ByteField("userUserInfo108", None),
+ ByteField("userUserInfo109", None),
+ ByteField("userUserInfo110", None),
+ ByteField("userUserInfo111", None),
+ ByteField("userUserInfo112", None),
+ ByteField("userUserInfo113", None),
+ ByteField("userUserInfo114", None),
+ ByteField("userUserInfo115", None),
+ ByteField("userUserInfo116", None),
+ ByteField("userUserInfo117", None),
+ ByteField("userUserInfo118", None),
+ ByteField("userUserInfo119", None),
+ ByteField("userUserInfo120", None),
+ ByteField("userUserInfo121", None),
+ ByteField("userUserInfo122", None),
+ ByteField("userUserInfo123", None),
+ ByteField("userUserInfo124", None),
+ ByteField("userUserInfo125", None),
+ ByteField("userUserInfo126", None),
+ ByteField("userUserInfo127", None),
+ ByteField("userUserInfo128", None),
+ ByteField("userUserInfo129", None),
+ ByteField("userUserInfo130", None),
+ ByteField("userUserInfo131", None)
+ ]
+
+ def post_build(self, p, pay):
+ aList = []
+ a = []
+ i = 0
+ for i in range(0, len(self.fields_desc)):
+ aList.append(self.fields_desc[i].name)
+ for i in aList:
+ a.append(getattr(self, i))
+ res = adapt(3, 131, a, self.fields_desc)
+ if self.lengthUU is None:
+ p = p[:1] + struct.pack(">B", res[1]) + p[2:]
+ if res[0] is not 0:
+ p = p[:-res[0]]
+ return p + pay
+
+
+class AlertingPatternHdr(Packet):
+ """Alerting Pattern 10.5.4.26"""
+ name = "Alerting Pattern"
+ fields_desc = [
+ BitField("eightBitAP", None, 1),
+ XBitField("ieiAP", None, 7),
+ XByteField("lengthAP", 0x3),
+ BitField("spare", 0x0, 4),
+ BitField("alertingValue", 0x0, 4)
+ ]
+
+
+class AllowedActionsHdr(Packet):
+ """Allowed actions $(CCBS)$ Section 10.5.4.26"""
+ name = "Allowed Actions $(CCBS)$"
+ fields_desc = [
+ BitField("eightBitAA", None, 1),
+ XBitField("ieiAA", None, 7),
+ XByteField("lengthAP", 0x3),
+ BitField("CCBS", 0x0, 1),
+ BitField("spare", 0x0, 7)
+ ]
+
+
+#
+# 10.5.5 GPRS mobility management information elements
+#
+
+class AttachResult(Packet):
+ """Attach result Section 10.5.5.1"""
+ name = "Attach Result"
+ fields_desc = [
+ XBitField("ieiAR", 0x0, 4),
+ BitField("spare", 0x0, 1),
+ BitField("result", 0x1, 3)
+ ]
+
+
+class AttachTypeHdr(Packet):
+ """Attach type Section 10.5.5.2"""
+ name = "Attach Type"
+ fields_desc = [
+ XBitField("ieiAT", None, 4),
+ BitField("spare", 0x0, 1),
+ BitField("type", 0x1, 3)
+ ]
+
+
+# Fix 1/2 len problem
+class AttachTypeAndCiphKeySeqNr(Packet):
+ name = "Attach Type and Cipher Key Sequence Number"
+ fields_desc = [
+ BitField("spare", 0x0, 1),
+ BitField("type", 0x1, 3),
+ BitField("spareHalfOctets", 0x0, 4)
+ ]
+
+
+class CipheringAlgorithm(Packet):
+ """Ciphering algorithm Section 10.5.5.3"""
+ name = "Ciphering Algorithm"
+ fields_desc = [
+ XBitField("ieiCA", 0x0, 4),
+ BitField("spare", 0x0, 1),
+ BitField("type", 0x1, 3)
+ ]
+
+
+# Fix 1/2 len problem
+class CipheringAlgorithmAndImeisvRequest(Packet):
+ name = "Ciphering Algorithm and Imeisv Request"
+ fields_desc = [
+ BitField("spare", 0x0, 1),
+ BitField("type", 0x1, 3),
+ BitField("spare", 0x0, 1),
+ BitField("imeisvVal", 0x0, 3)
+ ]
+
+
+# [Spare]
+class TmsiStatus(Packet):
+ """[Spare] TMSI status Section 10.5.5.4"""
+ name = "[Spare] TMSI Status"
+ fields_desc = [
+ XBitField("ieiTS", None, 4),
+ BitField("spare", 0x0, 3),
+ BitField("flag", 0x1, 1)
+ ]
+
+
+class DetachType(Packet):
+ """Detach type Section 10.5.5.5"""
+ name = "Detach Type"
+ fields_desc = [
+ XBitField("ieiDT", 0x0, 4),
+ BitField("poweroff", 0x0, 1),
+ BitField("type", 0x1, 3)
+ ]
+
+
+# Fix 1/2 len problem
+class DetachTypeAndForceToStandby(Packet):
+ name = "Detach Type and Force To Standby"
+ fields_desc = [
+ BitField("poweroff", 0x0, 1),
+ BitField("type", 0x1, 3),
+ BitField("spare", 0x0, 1),
+ BitField("forceStandby", 0x0, 3)
+ ]
+
+
+# Fix 1/2 len problem
+class DetachTypeAndSpareHalfOctets(Packet):
+ name = "Detach Type and Spare Half Octets"
+ fields_desc = [
+ BitField("poweroff", 0x0, 1),
+ BitField("type", 0x1, 3),
+ BitField("spareHalfOctets", 0x0, 4)
+ ]
+
+
+class DrxParameter(Packet):
+ """DRX parameter Section 10.5.5.6"""
+ name = "DRX Parameter"
+ fields_desc = [
+ ByteField("ieiDP", 0x0),
+ ByteField("splitPG", 0x0),
+ BitField("spare", 0x0, 4),
+ BitField("splitCCCH", 0x0, 1),
+ BitField("NonDrxTimer", 0x1, 3)
+ ]
+
+
+class ForceToStandby(Packet):
+ """Force to standby Section 10.5.5.7"""
+ name = "Force To Standby"
+ fields_desc = [
+ XBitField("ieiFTS", 0x0, 4),
+ BitField("spare", 0x0, 1),
+ BitField("forceStandby", 0x0, 3)
+ ]
+
+
+# Fix 1/2 len problem
+class ForceToStandbyAndAcReferenceNumber(Packet):
+ name = "Force To Standby And Ac Reference Number"
+ fields_desc = [
+ BitField("spare", 0x0, 1),
+ BitField("forceStandby", 0x0, 3),
+ BitField("acRefVal", 0x0, 4)
+ ]
+
+
+# Fix 1/2 len problem
+class ForceToStandbyAndUpdateResult(Packet):
+ name = "Force To Standby And Update Result"
+ fields_desc = [
+ BitField("spare", 0x0, 1),
+ BitField("forceStandby", 0x0, 3),
+ BitField("spare", 0x0, 1),
+ BitField("updateResVal", 0x0, 3)
+ ]
+
+
+# Fix 1/2 len problem
+class ForceToStandbyAndSpareHalfOctets(Packet):
+ name = "Force To Standby And Spare Half Octets"
+ fields_desc = [
+ BitField("spare", 0x0, 1),
+ BitField("forceStandby", 0x0, 3),
+ BitField("spareHalfOctets", 0x0, 4)
+ ]
+
+
+class PTmsiSignature(Packet):
+ """P-TMSI signature Section 10.5.5.8"""
+ name = "P-TMSI Signature"
+ fields_desc = [
+ ByteField("ieiPTS", 0x0),
+ BitField("sgnature", 0x0, 24)
+ ]
+
+
+class IdentityType2(Packet):
+ """Identity type 2 Section 10.5.5.9"""
+ name = "Identity Type 2"
+ fields_desc = [
+ XBitField("ieiIT2", 0x0, 4),
+ BitField("spare", 0x0, 1),
+ BitField("typeOfIdentity", 0x0, 3)
+ ]
+
+
+# Fix 1/2 len problem
+class IdentityType2AndforceToStandby(Packet):
+ name = "Identity Type 2 and Force to Standby"
+ fields_desc = [
+ BitField("spare", 0x0, 1),
+ BitField("typeOfIdentity", 0x0, 3),
+ BitField("spare", 0x0, 1),
+ BitField("forceStandby", 0x0, 3)
+ ]
+
+
+class ImeisvRequest(Packet):
+ """IMEISV request Section 10.5.5.10"""
+ name = "IMEISV Request"
+ fields_desc = [
+ XBitField("ieiIR", 0x0, 4),
+ BitField("spare", 0x0, 1),
+ BitField("imeisvVal", 0x0, 3)
+ ]
+
+
+# Fix 1/2 len problem
+class ImeisvRequestAndForceToStandby(Packet):
+ name = "IMEISV Request and Force To Standby"
+ fields_desc = [
+ BitField("spare", 0x0, 1),
+ BitField("imeisvVal", 0x0, 3),
+ BitField("spareHalfOctets", 0x0, 4)
+ ]
+
+
+# length 4 to 19
+class ReceiveNpduNumbersList(Packet):
+ """Receive N-PDU Numbers list Section 10.5.5.11"""
+ name = "Receive N-PDU Numbers list"
+ fields_desc = [
+ ByteField("ieiRNNL", 0x0),
+
+ XByteField("lengthRNNL", None),
+
+ BitField("nbList0", 0x0, 16),
+ # optional
+ ByteField("nbList1", None),
+ ByteField("nbList2", None),
+ ByteField("nbList3", None),
+ ByteField("nbList4", None),
+ ByteField("nbList5", None),
+ ByteField("nbList6", None),
+ ByteField("nbList7", None),
+ ByteField("nbList8", None),
+ ByteField("nbList9", None),
+ ByteField("nbList10", None),
+ ByteField("nbList11", None),
+ ByteField("nbList12", None),
+ ByteField("nbList13", None),
+ ByteField("nbList14", None),
+ ByteField("nbList15", None),
+ ]
+
+ def post_build(self, p, pay):
+ aList = []
+ a = []
+ i = 0
+ for i in range(0, len(self.fields_desc)):
+ aList.append(self.fields_desc[i].name)
+ for i in aList:
+ a.append(getattr(self, i))
+ res = adapt(4, 19, a, self.fields_desc)
+ if self.lengthRNNL is None:
+ p = p[:1] + struct.pack(">B", res[1]) + p[2:]
+ if res[0] is not 0:
+ p = p[:-res[0]]
+ return p + pay
+
+
+class MsNetworkCapability(Packet):
+ """MS network capability Section 10.5.5.12"""
+ name = "MS Network Capability"
+ fields_desc = [
+ ByteField("ieiMNC", 0x0),
+ XByteField("lengthMNC", 0x3),
+ ByteField("msNetValue", 0x0)
+ ]
+
+
+# length 6 to 14
+class MsRadioAccessCapability(Packet):
+ """MS Radio Access capability Section 10.5.5.12a"""
+ name = "MS Radio Access Capability"
+ fields_desc = [
+ ByteField("ieiMRAC", 0x24),
+
+ XByteField("lengthMRAC", None),
+
+ BitField("spare1", 0x0, 1), # ...
+
+ BitField("accessCap", 0x0, 4),
+ BitField("accessTechType", 0x0, 4),
+ # access capability
+ BitField("bool", 0x0, 1),
+ BitField("lengthContent", 0x0, 7),
+ BitField("spare1", 0x0, 1), # ...
+ # content
+ BitField("pwrCap", 0x0, 3),
+ BitField("bool1", 0x0, 1),
+ BitField("a51", 0x0, 1),
+ BitField("a52", 0x0, 1),
+ BitField("a53", 0x0, 1),
+ BitField("a54", 0x0, 1),
+
+ BitField("a55", 0x0, 1),
+ BitField("a56", 0x0, 1),
+ BitField("a57", 0x0, 1),
+ BitField("esInd", 0x0, 1),
+ BitField("ps", 0x0, 1),
+ BitField("vgcs", 0x0, 1),
+ BitField("vbs", 0x0, 1),
+ BitField("bool2", 0x0, 1),
+ # multislot
+ BitField("bool3", 0x0, 1),
+ BitField("hscsd", 0x0, 5),
+
+ BitField("bool4", 0x0, 1),
+ BitField("gprs", 0x0, 5),
+ BitField("gprsExt", 0x0, 1),
+ BitField("bool5", 0x0, 1),
+
+ BitField("smsVal", 0x0, 4),
+ BitField("smVal", 0x0, 4)
+ ]
+
+
+# 10.5.5.13 Spare
+# This is intentionally left spare.
+
+class GmmCause(Packet):
+ """GMM cause Section 10.5.5.14"""
+ name = "GMM Cause"
+ fields_desc = [
+ ByteField("ieiGC", 0x0),
+ ByteField("causeValue", 0x0)
+ ]
+
+
+class RoutingAreaIdentification(Packet):
+ """Routing area identification Section 10.5.5.15"""
+ name = "Routing Area Identification"
+ fields_desc = [
+ ByteField("ieiRAI", 0x0),
+ BitField("mccDigit2", 0x0, 4),
+ BitField("mccDigit1", 0x0, 4),
+ BitField("mncDigit3", 0x0, 4),
+ BitField("mccDigit3", 0x0, 4),
+ BitField("mccDigit2", 0x0, 4),
+ BitField("mccDigit1", 0x0, 4),
+ ByteField("LAC", 0x0),
+ ByteField("LAC1", 0x0),
+ ByteField("LAC", 0x0)
+ ]
+# 10.5.5.16 Spare
+# This is intentionally left spare.
+
+
+class UpdateResult(Packet):
+ """Update result Section 10.5.5.17"""
+ name = "Update Result"
+ fields_desc = [
+ XBitField("ieiUR", 0x0, 4),
+ BitField("spare", 0x0, 1),
+ BitField("updateResVal", 0x0, 3)
+ ]
+
+
+class UpdateType(Packet):
+ """Update type Section 10.5.5.18"""
+ name = "Update Type"
+ fields_desc = [
+ XBitField("ieiUT", 0x0, 4),
+ BitField("spare", 0x0, 1),
+ BitField("updateTypeVal", 0x0, 3)
+ ]
+
+
+# Fix 1/2 len problem
+class UpdateTypeAndCiphKeySeqNr(Packet):
+ name = "Update Type and Cipher Key Sequence Number"
+ fields_desc = [
+ BitField("spare", 0x0, 1),
+ BitField("updateTypeVal", 0x0, 3),
+ BitField("spare", 0x0, 1),
+ BitField("keySeq", 0x0, 3)
+ ]
+
+
+class AcReferenceNumber(Packet):
+ """A&C reference number Section 10.5.5.19"""
+ name = "A&C Reference Number"
+ fields_desc = [
+ XBitField("ieiARN", 0x0, 4),
+ BitField("acRefVal", 0x0, 4)
+ ]
+
+
+# Fix 1/2 len problem
+class AcReferenceNumberAndSpareHalfOctets(Packet):
+ name = "A&C Reference Number and Spare Half Octets"
+ fields_desc = [
+ BitField("acRefVal", 0x0, 4),
+ BitField("spareHalfOctets", 0x0, 4)
+ ]
+#
+# 10.5.6 Session management information elements
+#
+# length 3 to 102
+
+
+class AccessPointName(Packet):
+ """Access Point Name Section 10.5.6.1"""
+ name = "Access Point Name"
+ fields_desc = [
+ ByteField("ieiAPN", 0x0),
+ XByteField("lengthAPN", None),
+ ByteField("apName", 0x0),
+ # optional
+ ByteField("apName1", None),
+ ByteField("apName2", None),
+ ByteField("apName3", None),
+ ByteField("apName4", None),
+ ByteField("apName5", None),
+ ByteField("apName6", None),
+ ByteField("apName7", None),
+ ByteField("apName8", None),
+ ByteField("apName9", None),
+ ByteField("apName10", None),
+ ByteField("apName11", None),
+ ByteField("apName12", None),
+ ByteField("apName13", None),
+ ByteField("apName14", None),
+ ByteField("apName15", None),
+ ByteField("apName16", None),
+ ByteField("apName17", None),
+ ByteField("apName18", None),
+ ByteField("apName19", None),
+ ByteField("apName20", None),
+ ByteField("apName21", None),
+ ByteField("apName22", None),
+ ByteField("apName23", None),
+ ByteField("apName24", None),
+ ByteField("apName25", None),
+ ByteField("apName26", None),
+ ByteField("apName27", None),
+ ByteField("apName28", None),
+ ByteField("apName29", None),
+ ByteField("apName30", None),
+ ByteField("apName31", None),
+ ByteField("apName32", None),
+ ByteField("apName33", None),
+ ByteField("apName34", None),
+ ByteField("apName35", None),
+ ByteField("apName36", None),
+ ByteField("apName37", None),
+ ByteField("apName38", None),
+ ByteField("apName39", None),
+ ByteField("apName40", None),
+ ByteField("apName41", None),
+ ByteField("apName42", None),
+ ByteField("apName43", None),
+ ByteField("apName44", None),
+ ByteField("apName45", None),
+ ByteField("apName46", None),
+ ByteField("apName47", None),
+ ByteField("apName48", None),
+ ByteField("apName49", None),
+ ByteField("apName50", None),
+ ByteField("apName51", None),
+ ByteField("apName52", None),
+ ByteField("apName53", None),
+ ByteField("apName54", None),
+ ByteField("apName55", None),
+ ByteField("apName56", None),
+ ByteField("apName57", None),
+ ByteField("apName58", None),
+ ByteField("apName59", None),
+ ByteField("apName60", None),
+ ByteField("apName61", None),
+ ByteField("apName62", None),
+ ByteField("apName63", None),
+ ByteField("apName64", None),
+ ByteField("apName65", None),
+ ByteField("apName66", None),
+ ByteField("apName67", None),
+ ByteField("apName68", None),
+ ByteField("apName69", None),
+ ByteField("apName70", None),
+ ByteField("apName71", None),
+ ByteField("apName72", None),
+ ByteField("apName73", None),
+ ByteField("apName74", None),
+ ByteField("apName75", None),
+ ByteField("apName76", None),
+ ByteField("apName77", None),
+ ByteField("apName78", None),
+ ByteField("apName79", None),
+ ByteField("apName80", None),
+ ByteField("apName81", None),
+ ByteField("apName82", None),
+ ByteField("apName83", None),
+ ByteField("apName84", None),
+ ByteField("apName85", None),
+ ByteField("apName86", None),
+ ByteField("apName87", None),
+ ByteField("apName88", None),
+ ByteField("apName89", None),
+ ByteField("apName90", None),
+ ByteField("apName91", None),
+ ByteField("apName92", None),
+ ByteField("apName93", None),
+ ByteField("apName94", None),
+ ByteField("apName95", None),
+ ByteField("apName96", None),
+ ByteField("apName97", None),
+ ByteField("apName98", None),
+ ByteField("apName99", None)
+ ]
+
+ def post_build(self, p, pay):
+ aList = []
+ a = []
+ i = 0
+ for i in range(0, len(self.fields_desc)):
+ aList.append(self.fields_desc[i].name)
+ for i in aList:
+ a.append(getattr(self, i))
+ res = adapt(3, 102, a, self.fields_desc)
+ if self.lengthAPN is None:
+ p = p[:1] + struct.pack(">B", res[1]) + p[2:]
+ if res[0] is not 0:
+ p = p[:-res[0]]
+ return p + pay
+
+
+class NetworkServiceAccessPointIdentifier(Packet):
+ """Network service access point identifier Section 10.5.6.2"""
+ name = "Network Service Access Point Identifier"
+ fields_desc = [
+ ByteField("ieiNSAPI", 0x0),
+ BitField("spare", 0x0, 4),
+ BitField("nsapiVal", 0x0, 4)
+ ]
+
+
+# length 2 to 253
+class ProtocolConfigurationOptions(Packet):
+ """Protocol configuration options Section 10.5.6.3"""
+ name = "Protocol Configuration Options"
+ fields_desc = [
+ ByteField("ieiPCO", 0x0),
+
+ XByteField("lengthPCO", None),
+ # optional
+ BitField("ext", None, 1),
+ BitField("spare", None, 4),
+ BitField("configProto", None, 3),
+
+ ByteField("protoId1", None),
+ ByteField("lenProto1", None),
+ ByteField("proto1Content", None),
+
+ ByteField("protoId2", None),
+ ByteField("lenProto2", None),
+ ByteField("proto2Content", None),
+
+ ByteField("protoId3", None),
+ ByteField("lenProto3", None),
+ ByteField("proto3Content", None),
+
+ ByteField("protoId4", None),
+ ByteField("lenProto4", None),
+ ByteField("proto4Content", None),
+
+
+ ByteField("protoId5", None),
+ ByteField("lenProto5", None),
+ ByteField("proto5Content", None),
+
+ ByteField("protoId6", None),
+ ByteField("lenProto6", None),
+ ByteField("proto6Content", None),
+
+ ByteField("protoId7", None),
+ ByteField("lenProto7", None),
+ ByteField("proto7Content", None),
+
+ ByteField("protoId8", None),
+ ByteField("lenProto8", None),
+ ByteField("proto8Content", None),
+
+ ByteField("protoId9", None),
+ ByteField("lenProto9", None),
+ ByteField("proto9Content", None),
+
+ ByteField("protoId10", None),
+ ByteField("lenProto10", None),
+ ByteField("proto10Content", None),
+
+ ByteField("protoId11", None),
+ ByteField("lenProto11", None),
+ ByteField("proto11Content", None),
+
+ ByteField("protoId12", None),
+ ByteField("lenProto12", None),
+ ByteField("proto12Content", None),
+
+ ByteField("protoId13", None),
+ ByteField("lenProto13", None),
+ ByteField("proto13Content", None),
+
+ ByteField("protoId14", None),
+ ByteField("lenProto14", None),
+ ByteField("proto14Content", None),
+
+ ByteField("protoId15", None),
+ ByteField("lenProto15", None),
+ ByteField("proto15Content", None),
+
+ ByteField("protoId16", None),
+ ByteField("lenProto16", None),
+ ByteField("proto16Content", None),
+
+ ByteField("protoId17", None),
+ ByteField("lenProto17", None),
+ ByteField("proto17Content", None),
+
+ ByteField("protoId18", None),
+ ByteField("lenProto18", None),
+ ByteField("proto18Content", None),
+
+ ByteField("protoId19", None),
+ ByteField("lenProto19", None),
+ ByteField("proto19Content", None),
+
+ ByteField("protoId20", None),
+ ByteField("lenProto20", None),
+ ByteField("proto20Content", None),
+
+ ByteField("protoId21", None),
+ ByteField("lenProto21", None),
+ ByteField("proto21Content", None),
+
+ ByteField("protoId22", None),
+ ByteField("lenProto22", None),
+ ByteField("proto22Content", None),
+
+ ByteField("protoId23", None),
+ ByteField("lenProto23", None),
+ ByteField("proto23Content", None),
+
+ ByteField("protoId24", None),
+ ByteField("lenProto24", None),
+ ByteField("proto24Content", None),
+
+ ByteField("protoId25", None),
+ ByteField("lenProto25", None),
+ ByteField("proto25Content", None),
+
+ ByteField("protoId26", None),
+ ByteField("lenProto26", None),
+ ByteField("proto26Content", None),
+
+ ByteField("protoId27", None),
+ ByteField("lenProto27", None),
+ ByteField("proto27Content", None),
+
+ ByteField("protoId28", None),
+ ByteField("lenProto28", None),
+ ByteField("proto28Content", None),
+
+ ByteField("protoId29", None),
+ ByteField("lenProto29", None),
+ ByteField("proto29Content", None),
+
+ ByteField("protoId30", None),
+ ByteField("lenProto30", None),
+ ByteField("proto30Content", None),
+
+ ByteField("protoId31", None),
+ ByteField("lenProto31", None),
+ ByteField("proto31Content", None),
+
+ ByteField("protoId32", None),
+ ByteField("lenProto32", None),
+ ByteField("proto32Content", None),
+
+ ByteField("protoId33", None),
+ ByteField("lenProto33", None),
+ ByteField("proto33Content", None),
+
+ ByteField("protoId34", None),
+ ByteField("lenProto34", None),
+ ByteField("proto34Content", None),
+
+ ByteField("protoId35", None),
+ ByteField("lenProto35", None),
+ ByteField("proto35Content", None),
+
+ ByteField("protoId36", None),
+ ByteField("lenProto36", None),
+ ByteField("proto36Content", None),
+
+ ByteField("protoId37", None),
+ ByteField("lenProto37", None),
+ ByteField("proto37Content", None),
+
+ ByteField("protoId38", None),
+ ByteField("lenProto38", None),
+ ByteField("proto38Content", None),
+
+ ByteField("protoId39", None),
+ ByteField("lenProto39", None),
+ ByteField("proto39Content", None),
+
+ ByteField("protoId40", None),
+ ByteField("lenProto40", None),
+ ByteField("proto40Content", None),
+
+ ByteField("protoId41", None),
+ ByteField("lenProto41", None),
+ ByteField("proto41Content", None),
+
+ ByteField("protoId42", None),
+ ByteField("lenProto42", None),
+ ByteField("proto42Content", None),
+
+ ByteField("protoId43", None),
+ ByteField("lenProto43", None),
+ ByteField("proto43Content", None),
+
+ ByteField("protoId44", None),
+ ByteField("lenProto44", None),
+ ByteField("proto44Content", None),
+
+ ByteField("protoId45", None),
+ ByteField("lenProto45", None),
+ ByteField("proto45Content", None),
+
+ ByteField("protoId46", None),
+ ByteField("lenProto46", None),
+ ByteField("proto46Content", None),
+
+ ByteField("protoId47", None),
+ ByteField("lenProto47", None),
+ ByteField("proto47Content", None),
+
+ ByteField("protoId48", None),
+ ByteField("lenProto48", None),
+ ByteField("proto48Content", None),
+
+ ByteField("protoId49", None),
+ ByteField("lenProto49", None),
+ ByteField("proto49Content", None),
+
+ ByteField("protoId50", None),
+ ByteField("lenProto50", None),
+ ByteField("proto50Content", None),
+
+ ByteField("protoId51", None),
+ ByteField("lenProto51", None),
+ ByteField("proto51Content", None),
+
+ ByteField("protoId52", None),
+ ByteField("lenProto52", None),
+ ByteField("proto52Content", None),
+
+ ByteField("protoId53", None),
+ ByteField("lenProto53", None),
+ ByteField("proto53Content", None),
+
+ ByteField("protoId54", None),
+ ByteField("lenProto54", None),
+ ByteField("proto54Content", None),
+
+ ByteField("protoId55", None),
+ ByteField("lenProto55", None),
+ ByteField("proto55Content", None),
+
+ ByteField("protoId56", None),
+ ByteField("lenProto56", None),
+ ByteField("proto56Content", None),
+
+ ByteField("protoId57", None),
+ ByteField("lenProto57", None),
+ ByteField("proto57Content", None),
+
+ ByteField("protoId58", None),
+ ByteField("lenProto58", None),
+ ByteField("proto58Content", None),
+
+ ByteField("protoId59", None),
+ ByteField("lenProto59", None),
+ ByteField("proto59Content", None),
+
+ ByteField("protoId60", None),
+ ByteField("lenProto60", None),
+ ByteField("proto60Content", None),
+
+ ByteField("protoId61", None),
+ ByteField("lenProto61", None),
+ ByteField("proto61Content", None),
+
+ ByteField("protoId62", None),
+ ByteField("lenProto62", None),
+ ByteField("proto62Content", None),
+
+ ByteField("protoId63", None),
+ ByteField("lenProto63", None),
+ ByteField("proto63Content", None),
+
+ ByteField("protoId64", None),
+ ByteField("lenProto64", None),
+ ByteField("proto64Content", None),
+
+ ByteField("protoId65", None),
+ ByteField("lenProto65", None),
+ ByteField("proto65Content", None),
+
+ ByteField("protoId66", None),
+ ByteField("lenProto66", None),
+ ByteField("proto66Content", None),
+
+ ByteField("protoId67", None),
+ ByteField("lenProto67", None),
+ ByteField("proto67Content", None),
+
+ ByteField("protoId68", None),
+ ByteField("lenProto68", None),
+ ByteField("proto68Content", None),
+
+ ByteField("protoId69", None),
+ ByteField("lenProto69", None),
+ ByteField("proto69Content", None),
+
+ ByteField("protoId70", None),
+ ByteField("lenProto70", None),
+ ByteField("proto70Content", None),
+
+ ByteField("protoId71", None),
+ ByteField("lenProto71", None),
+ ByteField("proto71Content", None),
+
+ ByteField("protoId72", None),
+ ByteField("lenProto72", None),
+ ByteField("proto72Content", None),
+
+ ByteField("protoId73", None),
+ ByteField("lenProto73", None),
+ ByteField("proto73Content", None),
+
+ ByteField("protoId74", None),
+ ByteField("lenProto74", None),
+ ByteField("proto74Content", None),
+
+ ByteField("protoId75", None),
+ ByteField("lenProto75", None),
+ ByteField("proto75Content", None),
+
+ ByteField("protoId76", None),
+ ByteField("lenProto76", None),
+ ByteField("proto76Content", None),
+
+ ByteField("protoId77", None),
+ ByteField("lenProto77", None),
+ ByteField("proto77Content", None),
+
+ ByteField("protoId78", None),
+ ByteField("lenProto78", None),
+ ByteField("proto78Content", None),
+
+ ByteField("protoId79", None),
+ ByteField("lenProto79", None),
+ ByteField("proto79Content", None),
+
+ ByteField("protoId80", None),
+ ByteField("lenProto80", None),
+ ByteField("proto80Content", None),
+
+ ByteField("protoId81", None),
+ ByteField("lenProto81", None),
+ ByteField("proto81Content", None),
+
+ ByteField("protoId82", None),
+ ByteField("lenProto82", None),
+ ByteField("proto82Content", None),
+
+ ByteField("protoId83", None),
+ ByteField("lenProto83", None),
+ ByteField("proto83Content", None),
+ ]
+
+ def post_build(self, p, pay):
+ aList = []
+ a = []
+ i = 0
+ for i in range(0, len(self.fields_desc)):
+ aList.append(self.fields_desc[i].name)
+ for i in aList:
+ a.append(getattr(self, i))
+ res = adapt(2, 253, a, self.fields_desc)
+ if self.lengthPCO is None:
+ p = p[:1] + struct.pack(">B", res[1]) + p[2:]
+ if res[0] is not 0:
+ p = p[:-res[0]]
+ return p + pay
+
+
+# len 4 to 20
+class PacketDataProtocolAddress(Packet):
+ """Packet data protocol address Section 10.5.6.4"""
+ name = "Packet Data Protocol Address"
+ fields_desc = [
+ ByteField("ieiPDPA", 0x0),
+
+ XByteField("lengthPDPA", None),
+
+ BitField("spare", 0x0, 4),
+ BitField("pdpTypeOrga", 0x0, 4),
+
+ ByteField("pdpTypeNb", 0x0),
+ # optional
+ ByteField("addressInfo1", None),
+ ByteField("addressInfo2", None),
+ ByteField("addressInfo3", None),
+ ByteField("addressInfo4", None),
+ ByteField("addressInfo5", None),
+ ByteField("addressInfo6", None),
+ ByteField("addressInfo7", None),
+ ByteField("addressInfo8", None),
+ ByteField("addressInfo9", None),
+ ByteField("addressInfo10", None),
+ ByteField("addressInfo11", None),
+ ByteField("addressInfo12", None),
+ ByteField("addressInfo13", None),
+ ByteField("addressInfo14", None),
+ ByteField("addressInfo15", None),
+ ByteField("addressInfo16", None)
+ ]
+
+ def post_build(self, p, pay):
+ aList = []
+ a = []
+ i = 0
+ for i in range(0, len(self.fields_desc)):
+ aList.append(self.fields_desc[i].name)
+ for i in aList:
+ a.append(getattr(self, i))
+ res = adapt(4, 20, a, self.fields_desc)
+ if self.lengthPDPA is None:
+ p = p[:1] + struct.pack(">B", res[1]) + p[2:]
+ if res[0] is not 0:
+ p = p[:-res[0]]
+ return p + pay
+
+
+class QualityOfService(Packet):
+ """Quality of service Section 10.5.6.5"""
+ name = "Quality of Service"
+ fields_desc = [
+ ByteField("ieiQOS", 0x0),
+ XByteField("lengthQOS", 0x5),
+
+ BitField("spare", 0x0, 2),
+ BitField("delayClass", 0x0, 3),
+ BitField("reliaClass", 0x0, 3),
+
+ BitField("peak", 0x0, 4),
+ BitField("spare", 0x0, 1),
+ BitField("precedenceCl", 0x0, 3),
+
+ BitField("spare", 0x0, 3),
+ BitField("mean", 0x0, 5)
+ ]
+
+
+class SmCause(Packet):
+ """SM cause Section 10.5.6.6"""
+ name = "SM Cause"
+ fields_desc = [
+ ByteField("ieiSC", 0x0),
+ ByteField("causeVal", 0x0)
+ ]
+
+# 10.5.6.7 Spare
+# This is intentionally left spare.
+
+
+class AaDeactivationCause(Packet):
+ """AA deactivation cause Section 10.5.6.8"""
+ name = "AA Deactivation Cause"
+ fields_desc = [
+ XBitField("ieiADC", 0x0, 4),
+ BitField("spare", 0x0, 1),
+ BitField("aaVal", 0x0, 3)
+ ]
+
+
+# Fix 1/2 len problem
+class AaDeactivationCauseAndSpareHalfOctets(Packet):
+ name = "AA Deactivation Cause and Spare Half Octets"
+ fields_desc = [
+ BitField("spare", 0x0, 1),
+ BitField("aaVal", 0x0, 3),
+ BitField("spareHalfOctets", 0x0, 4)
+ ]
+
+
+class LlcServiceAccessPointIdentifier(Packet):
+ """LLC service access point identifier Section 10.5.6.9"""
+ name = "LLC Service Access Point Identifier"
+ fields_desc = [
+ ByteField("ieiLSAPI", None),
+ BitField("spare", 0x0, 4),
+ BitField("llcVal", 0x0, 4)
+ ]
+
+
+#
+# 10.5.7 GPRS Common information elements
+#
+
+# 10.5.7.1 [Spare]
+
+class RadioPriority(Packet):
+ """Radio priority Section 10.5.7.2"""
+ name = "Radio Priority"
+ fields_desc = [
+ XBitField("ieiRP", 0x0, 4),
+ BitField("spare", 0x1, 1),
+ BitField("rplv", 0x0, 3)
+ ]
+
+
+# Fix 1/2 len problem
+class RadioPriorityAndSpareHalfOctets(Packet):
+ name = "Radio Priority and Spare Half Octets"
+ fields_desc = [
+ BitField("spare", 0x1, 1),
+ BitField("rplv", 0x0, 3),
+ BitField("spareHalfOctets", 0x0, 4)
+ ]
+
+
+class GprsTimer(Packet):
+ """GPRS Timer Section 10.5.7.3"""
+ name = "GPRS Timer"
+ fields_desc = [
+ ByteField("ieiGT", 0x0),
+ BitField("unit", 0x0, 3),
+ BitField("timerVal", 0x0, 5)
+ ]
+
+
+class CellIdentity(Packet):
+ """ Cell identity Section 10.5.1.1 """
+ name = "Cell Identity"
+ fields_desc = [
+ ByteField("ciValue1", 0x0),
+ ByteField("ciValue2", 0x0)
+ ]
+
+
+class CiphKeySeqNr(Packet):
+ """ Ciphering Key Sequence Number Section 10.5.1.2 """
+ name = "Cipher Key Sequence Number"
+ fields_desc = [
+ BitField("spare", 0x0, 1),
+ BitField("keySeq", 0x0, 3)
+ ]
+
+
+class LocalAreaId(Packet):
+ """ Local Area Identification Section 10.5.1.3 """
+ name = "Location Area Identification"
+ fields_desc = [
+ BitField("mccDigit2", 0x0, 4),
+ BitField("mccDigit1", 0x0, 4),
+ BitField("mncDigit3", 0x0, 4),
+ BitField("mccDigit3", 0x0, 4),
+ BitField("mncDigit2", 0x0, 4),
+ BitField("mncDigit1", 0x0, 4),
+ ByteField("lac1", 0x0),
+ ByteField("lac2", 0x0)
+ ]
+#
+# The Mobile Identity is a type 4 information element with a minimum
+# length of 3 octet and 11 octets length maximal.
+#
+
+
+# len 3 - 11
+class MobileId(Packet):
+ """ Mobile Identity Section 10.5.1.4 """
+ name = "Mobile Identity"
+ fields_desc = [
+ XByteField("lengthMI", None),
+ BitField("idDigit1", 0x0, 4),
+ BitField("oddEven", 0x0, 1),
+ BitField("typeOfId", 0x0, 3),
+
+ BitField("idDigit2_1", None, 4), # optional
+ BitField("idDigit2", None, 4),
+ BitField("idDigit3_1", None, 4),
+ BitField("idDigit3", None, 4),
+ BitField("idDigit4_1", None, 4),
+ BitField("idDigit4", None, 4),
+ BitField("idDigit5_1", None, 4),
+ BitField("idDigit5", None, 4),
+ BitField("idDigit6_1", None, 4),
+ BitField("idDigit6", None, 4),
+ BitField("idDigit7_1", None, 4),
+ BitField("idDigit7", None, 4),
+ BitField("idDigit8_1", None, 4),
+ BitField("idDigit8", None, 4),
+ BitField("idDigit9_1", None, 4),
+ BitField("idDigit9", None, 4),
+ ]
+
+ def post_build(self, p, pay):
+ aList = []
+ a = []
+ i = 0
+ for i in range(0, len(self.fields_desc)):
+ aList.append(self.fields_desc[i].name)
+ for i in aList:
+ a.append(getattr(self, i))
+ res = adapt(2, 10, a, self.fields_desc, 1)
+ if self.lengthMI is None:
+ p = struct.pack(">B", res[1]) + p[1:]
+ if res[0] is not 0:
+ p = p[:-res[0]]
+ return p + pay
+
+
+class MobileStationClassmark1(Packet):
+ """ Mobile Station Classmark 1 Section 10.5.1.5 """
+ name = "Mobile Station Classmark 1"
+ fields_desc = [
+ BitField("spare", 0x0, 1),
+ BitField("revisionLvl", 0x0, 2),
+ BitField("esInd", 0x0, 1),
+ BitField("a51", 0x0, 1),
+ BitField("rfPowerCap", 0x0, 3)
+ ]
+
+
+class MobileStationClassmark2(Packet):
+ """ Mobile Station Classmark 2 Section 10.5.1.6 """
+ name = "Mobile Station Classmark 2"
+ fields_desc = [
+ XByteField("lengthMSC2", 0x3),
+ BitField("spare", 0x0, 1),
+ BitField("revisionLvl", 0x0, 2),
+ BitField("esInd", 0x0, 1),
+ BitField("a51", 0x0, 1),
+ BitField("rfPowerCap", 0x0, 3),
+ BitField("spare1", 0x0, 1),
+ BitField("psCap", 0x0, 1),
+ BitField("ssScreenInd", 0x0, 2),
+ BitField("smCaPabi", 0x0, 1),
+ BitField("vbs", 0x0, 1),
+ BitField("vgcs", 0x0, 1),
+ BitField("fc", 0x0, 1),
+ BitField("cm3", 0x0, 1),
+ BitField("spare2", 0x0, 1),
+ BitField("lcsvaCap", 0x0, 1),
+ BitField("spare3", 0x0, 1),
+ BitField("soLsa", 0x0, 1),
+ BitField("cmsp", 0x0, 1),
+ BitField("a53", 0x0, 1),
+ BitField("a52", 0x0, 1)
+ ]
+
+
+class DescriptiveGroupOrBroadcastCallReference(Packet):
+ """ Descriptive group or broadcast call reference Section 10.5.1.9 """
+ name = "Descriptive Group or Broadcast Call Reference"
+ fields_desc = [
+ BitField("binCallRef", 0x0, 27),
+ BitField("sf", 0x0, 1),
+ BitField("fa", 0x0, 1),
+ BitField("callPrio", 0x0, 3),
+ BitField("cipherInfo", 0x0, 4),
+ BitField("spare1", 0x0, 1),
+ BitField("spare2", 0x0, 1),
+ BitField("spare3", 0x0, 1),
+ BitField("spare4", 0x0, 1)
+ ]
+
+
+class PdAndSapi(Packet):
+ """ PD and SAPI $(CCBS)$ Section 10.5.1.10a """
+ name = "PD and SAPI $(CCBS)$"
+ fields_desc = [
+ BitField("spare", 0x0, 1),
+ BitField("spare1", 0x0, 1),
+ BitField("sapi", 0x0, 2),
+ BitField("pd", 0x0, 4)
+ ]
+
+
+class PriorityLevel(Packet):
+ """ Priority Level Section 10.5.1.11 """
+ name = "Priority Level"
+ fields_desc = [
+ BitField("spare", 0x0, 1),
+ BitField("callPrio", 0x0, 3)
+ ]
+
+#
+# Radio Resource management information elements
+#
+
+
+# len 6 to max for L3 message (251)
+class BaRange(Packet):
+ """ BA Range Section 10.5.2.1a """
+ name = "BA Range"
+ fields_desc = [
+
+ XByteField("lengthBR", None),
+#error: byte format requires -128 <= number <= 127
+ ByteField("nrOfRanges", 0x0),
+# # rX = range X
+# # L o = Lower H i = higher
+# # H p = high Part Lp = low Part
+ ByteField("r1LoHp", 0x0),
+
+ BitField("r1LoLp", 0x0, 3),
+ BitField("r1HiHp", 0x0, 5),
+
+ BitField("r1HiLp", 0x0, 4),
+ BitField("r2LoHp", 0x0, 4),
+ # optional
+ BitField("r2LoLp", None, 5),
+ BitField("r2HiHp", None, 3),
+
+ ByteField("r2HiLp", None),
+ ByteField("r3LoHp", None),
+
+ BitField("r3LoLp", None, 5),
+ BitField("r3HiHp", None, 3),
+
+ ByteField("r3HiLp", None),
+ ByteField("r4LoHp", None),
+
+ BitField("r4LoLp", None, 5),
+ BitField("r4HiHp", None, 3),
+ ByteField("r4HiLp", None),
+ ByteField("r5LoHp", None),
+
+ BitField("r5LoLp", None, 5),
+ BitField("r5HiHp", None, 3),
+ ByteField("r5HiLp", None),
+ ByteField("r6LoHp", None),
+
+ BitField("r6LoLp", None, 5),
+ BitField("r6HiHp", None, 3),
+ ByteField("r6HiLp", None),
+ ByteField("r7LoHp", None),
+
+ BitField("r7LoLp", None, 5),
+ BitField("r7HiHp", None, 3),
+ ByteField("r7HiLp", None),
+ ByteField("r8LoHp", None),
+
+ BitField("r8LoLp", None, 5),
+ BitField("r8HiHp", None, 3),
+ ByteField("r8HiLp", None),
+ ByteField("r9LoHp", None),
+
+ BitField("r9LoLp", None, 5),
+ BitField("r9HiHp", None, 3),
+ ByteField("r9HiLp", None),
+ ByteField("r10LoHp", None),
+
+ BitField("r10LoLp", None, 5),
+ BitField("r10HiHp", None, 3),
+ ByteField("r10HiLp", None),
+ ByteField("r11LoHp", None),
+
+ BitField("r11LoLp", None, 5),
+ BitField("r11HiHp", None, 3),
+ ByteField("r11HiLp", None),
+ ByteField("r12LoHp", None),
+
+ BitField("r12LoLp", None, 5),
+ BitField("r12HiHp", None, 3),
+ ByteField("r12HiLp", None),
+ ByteField("r13LoHp", None),
+
+ BitField("r13LoLp", None, 5),
+ BitField("r13HiHp", None, 3),
+ ByteField("r13HiLp", None),
+ ByteField("r14LoHp", None),
+
+ BitField("r14LoLp", None, 5),
+ BitField("r14HiHp", None, 3),
+ ByteField("r14HiLp", None),
+ ByteField("r15LoHp", None),
+
+ BitField("r15LoLp", None, 5),
+ BitField("r15HiHp", None, 3),
+ ByteField("r15HiLp", None),
+ ByteField("r16LoHp", None),
+
+ BitField("r16LoLp", None, 5),
+ BitField("r16HiHp", None, 3),
+ ByteField("r16HiLp", None),
+ ByteField("r17LoHp", None),
+
+ BitField("r17LoLp", None, 5),
+ BitField("r17HiHp", None, 3),
+ ByteField("r17HiLp", None),
+ ByteField("r18LoHp", None),
+
+ BitField("r18LoLp", None, 5),
+ BitField("r18HiHp", None, 3),
+ ByteField("r18HiLp", None),
+ ByteField("r19LoHp", None),
+
+ BitField("r19LoLp", None, 5),
+ BitField("r19HiHp", None, 3),
+ ByteField("r19HiLp", None),
+ ByteField("r20LoHp", None),
+
+ BitField("r20LoLp", None, 5),
+ BitField("r20HiHp", None, 3),
+ ByteField("r20HiLp", None),
+ ByteField("r21LoHp", None),
+
+ BitField("r21LoLp", None, 5),
+ BitField("r21HiHp", None, 3),
+ ByteField("r21HiLp", None),
+ ByteField("r22LoHp", None),
+
+ BitField("r22LoLp", None, 5),
+ BitField("r22HiHp", None, 3),
+ ByteField("r22HiLp", None),
+ ByteField("r23LoHp", None),
+
+ BitField("r23LoLp", None, 5),
+ BitField("r23HiHp", None, 3),
+ ByteField("r23HiLp", None),
+ ByteField("r24LoHp", None),
+
+ BitField("r24LoLp", None, 5),
+ BitField("r24HiHp", None, 3),
+ ByteField("r24HiLp", None),
+ ByteField("r25LoHp", None),
+
+ BitField("r25LoLp", None, 5),
+ BitField("r25HiHp", None, 3),
+ ByteField("r25HiLp", None),
+ ByteField("r26LoHp", None),
+
+ BitField("r26LoLp", None, 5),
+ BitField("r26HiHp", None, 3),
+ ByteField("r26HiLp", None),
+ ByteField("r27LoHp", None),
+
+ BitField("r27LoLp", None, 5),
+ BitField("r27HiHp", None, 3),
+ ByteField("r27HiLp", None),
+ ByteField("r28LoHp", None),
+
+ BitField("r28LoLp", None, 5),
+ BitField("r28HiHp", None, 3),
+ ByteField("r28HiLp", None),
+ ByteField("r29LoHp", None),
+
+ BitField("r29LoLp", None, 5),
+ BitField("r29HiHp", None, 3),
+ ByteField("r29HiLp", None),
+ ByteField("r30LoHp", None),
+
+ BitField("r30LoLp", None, 5),
+ BitField("r30HiHp", None, 3),
+ ByteField("r30HiLp", None),
+ ByteField("r31LoHp", None),
+
+ BitField("r31LoLp", None, 5),
+ BitField("r31HiHp", None, 3),
+ ByteField("r31HiLp", None),
+ ByteField("r32LoHp", None),
+
+ BitField("r32LoLp", None, 5),
+ BitField("r32HiHp", None, 3),
+ ByteField("r32HiLp", None),
+ ByteField("r33LoHp", None),
+
+ BitField("r33LoLp", None, 5),
+ BitField("r33HiHp", None, 3),
+ ByteField("r33HiLp", None),
+ ByteField("r34LoHp", None),
+
+ BitField("r34LoLp", None, 5),
+ BitField("r34HiHp", None, 3),
+ ByteField("r34HiLp", None),
+ ByteField("r35LoHp", None),
+
+ BitField("r35LoLp", None, 5),
+ BitField("r35HiHp", None, 3),
+ ByteField("r35HiLp", None),
+ ByteField("r36LoHp", None),
+
+ BitField("r36LoLp", None, 5),
+ BitField("r36HiHp", None, 3),
+ ByteField("r36HiLp", None),
+ ByteField("r37LoHp", None),
+
+ BitField("r37LoLp", None, 5),
+ BitField("r37HiHp", None, 3),
+ ByteField("r37HiLp", None),
+ ByteField("r38LoHp", None),
+
+ BitField("r38LoLp", None, 5),
+ BitField("r38HiHp", None, 3),
+ ByteField("r38HiLp", None),
+ ByteField("r39LoHp", None),
+
+ BitField("r39LoLp", None, 5),
+ BitField("r39HiHp", None, 3),
+ ByteField("r39HiLp", None),
+ ByteField("r40LoHp", None),
+
+ BitField("r40LoLp", None, 5),
+ BitField("r40HiHp", None, 3),
+ ByteField("r40HiLp", None),
+ ByteField("r41LoHp", None),
+
+ BitField("r41LoLp", None, 5),
+ BitField("r41HiHp", None, 3),
+ ByteField("r41HiLp", None),
+ ByteField("r42LoHp", None),
+
+ BitField("r42LoLp", None, 5),
+ BitField("r42HiHp", None, 3),
+ ByteField("r42HiLp", None),
+ ByteField("r43LoHp", None),
+
+ BitField("r43LoLp", None, 5),
+ BitField("r43HiHp", None, 3),
+ ByteField("r43HiLp", None),
+ ByteField("r44LoHp", None),
+
+ BitField("r44LoLp", None, 5),
+ BitField("r44HiHp", None, 3),
+ ByteField("r44HiLp", None),
+ ByteField("r45LoHp", None),
+
+ BitField("r45LoLp", None, 5),
+ BitField("r45HiHp", None, 3),
+ ByteField("r45HiLp", None),
+ ByteField("r46LoHp", None),
+
+ BitField("r46LoLp", None, 5),
+ BitField("r46HiHp", None, 3),
+ ByteField("r46HiLp", None),
+ ByteField("r47LoHp", None),
+
+ BitField("r47LoLp", None, 5),
+ BitField("r47HiHp", None, 3),
+ ByteField("r47HiLp", None),
+ ByteField("r48LoHp", None),
+
+ BitField("r48LoLp", None, 5),
+ BitField("r48HiHp", None, 3),
+ ByteField("r48HiLp", None),
+ ByteField("r49LoHp", None),
+
+ BitField("r49LoLp", None, 5),
+ BitField("r49HiHp", None, 3),
+ ByteField("r49HiLp", None),
+ ByteField("r50LoHp", None),
+
+ BitField("r50LoLp", None, 5),
+ BitField("r50HiHp", None, 3),
+ ByteField("r50HiLp", None),
+ ByteField("r51LoHp", None),
+
+ BitField("r51LoLp", None, 5),
+ BitField("r51HiHp", None, 3),
+ ByteField("r51HiLp", None),
+ ByteField("r52LoHp", None),
+
+ BitField("r52LoLp", None, 5),
+ BitField("r52HiHp", None, 3),
+ ByteField("r52HiLp", None),
+ ByteField("r53LoHp", None),
+
+ BitField("r53LoLp", None, 5),
+ BitField("r53HiHp", None, 3),
+ ByteField("r53HiLp", None),
+ ByteField("r54LoHp", None),
+
+ BitField("r54LoLp", None, 5),
+ BitField("r54HiHp", None, 3),
+ ByteField("r54HiLp", None),
+ ByteField("r55LoHp", None),
+
+ BitField("r55LoLp", None, 5),
+ BitField("r55HiHp", None, 3),
+ ByteField("r55HiLp", None),
+ ByteField("r56LoHp", None),
+
+ BitField("r56LoLp", None, 5),
+ BitField("r56HiHp", None, 3),
+ ByteField("r56HiLp", None),
+ ByteField("r57LoHp", None),
+
+ BitField("r57LoLp", None, 5),
+ BitField("r57HiHp", None, 3),
+ ByteField("r57HiLp", None),
+ ByteField("r58LoHp", None),
+
+ BitField("r58LoLp", None, 5),
+ BitField("r58HiHp", None, 3),
+ ByteField("r58HiLp", None),
+ ByteField("r59LoHp", None),
+
+ BitField("r59LoLp", None, 5),
+ BitField("r59HiHp", None, 3),
+ ByteField("r59HiLp", None),
+ ByteField("r60LoHp", None),
+
+ BitField("r60LoLp", None, 5),
+ BitField("r60HiHp", None, 3),
+ ByteField("r60HiLp", None),
+ ByteField("r61LoHp", None),
+
+ BitField("r61LoLp", None, 5),
+ BitField("r61HiHp", None, 3),
+ ByteField("r61HiLp", None),
+ ByteField("r62LoHp", None),
+
+ BitField("r62LoLp", None, 5),
+ BitField("r62HiHp", None, 3),
+ ByteField("r62HiLp", None),
+ ByteField("r63LoHp", None),
+
+ BitField("r63LoLp", None, 5),
+ BitField("r63HiHp", None, 3),
+ ByteField("r63HiLp", None),
+ ByteField("r64LoHp", None),
+
+ BitField("r64LoLp", None, 5),
+ BitField("r64HiHp", None, 3),
+ ByteField("r64HiLp", None),
+ ByteField("r65LoHp", None),
+
+ BitField("r65LoLp", None, 5),
+ BitField("r65HiHp", None, 3),
+ ByteField("r65HiLp", None),
+ ByteField("r66LoHp", None),
+
+ BitField("r66LoLp", None, 5),
+ BitField("r66HiHp", None, 3),
+ ByteField("r66HiLp", None),
+ ByteField("r67LoHp", None),
+
+ BitField("r67LoLp", None, 5),
+ BitField("r67HiHp", None, 3),
+ ByteField("r67HiLp", None),
+ ByteField("r68LoHp", None),
+
+ BitField("r68LoLp", None, 5),
+ BitField("r68HiHp", None, 3),
+ ByteField("r68HiLp", None),
+ ByteField("r69LoHp", None),
+
+ BitField("r69LoLp", None, 5),
+ BitField("r69HiHp", None, 3),
+ ByteField("r69HiLp", None),
+ ByteField("r70LoHp", None),
+
+ BitField("r70LoLp", None, 5),
+ BitField("r70HiHp", None, 3),
+ ByteField("r70HiLp", None),
+ ByteField("r71LoHp", None),
+
+ BitField("r71LoLp", None, 5),
+ BitField("r71HiHp", None, 3),
+ ByteField("r71HiLp", None),
+ ByteField("r72LoHp", None),
+
+ BitField("r72LoLp", None, 5),
+ BitField("r72HiHp", None, 3),
+ ByteField("r72HiLp", None),
+ ByteField("r73LoHp", None),
+
+ BitField("r73LoLp", None, 5),
+ BitField("r73HiHp", None, 3),
+ ByteField("r73HiLp", None),
+ ByteField("r74LoHp", None),
+
+ BitField("r74LoLp", None, 5),
+ BitField("r74HiHp", None, 3),
+ ByteField("r74HiLp", None),
+ ByteField("r75LoHp", None),
+
+ BitField("r75LoLp", None, 5),
+ BitField("r75HiHp", None, 3),
+ ByteField("r75HiLp", None),
+ ByteField("r76LoHp", None),
+
+ BitField("r76LoLp", None, 5),
+ BitField("r76HiHp", None, 3),
+ ByteField("r76HiLp", None),
+ ByteField("r77LoHp", None),
+
+ BitField("r77LoLp", None, 5),
+ BitField("r77HiHp", None, 3),
+ ByteField("r77HiLp", None),
+ ByteField("r78LoHp", None),
+
+ BitField("r78LoLp", None, 5),
+ BitField("r78HiHp", None, 3),
+ ByteField("r78HiLp", None),
+ ByteField("r79LoHp", None),
+
+ BitField("r79LoLp", None, 5),
+ BitField("r79HiHp", None, 3),
+ ByteField("r79HiLp", None),
+ ByteField("r80LoHp", None),
+
+ BitField("r80LoLp", None, 5),
+ BitField("r80HiHp", None, 3),
+ ByteField("r80HiLp", None),
+ ByteField("r81LoHp", None),
+
+ BitField("r81LoLp", None, 5),
+ BitField("r81HiHp", None, 3),
+ ByteField("r81HiLp", None),
+ ByteField("r82LoHp", None),
+
+ BitField("r82LoLp", None, 5),
+ BitField("r82HiHp", None, 3),
+ ByteField("r82HiLp", None),
+ ByteField("r83LoHp", None),
+
+ BitField("r83LoLp", None, 5),
+ BitField("r83HiHp", None, 3),
+ ByteField("r83HiLp", None),
+ ByteField("r84LoHp", None),
+
+ BitField("r84LoLp", None, 5),
+ BitField("r84HiHp", None, 3),
+ ByteField("r84HiLp", None)
+ ]
+
+ def post_build(self, p, pay):
+ aList = []
+ a = []
+ i = 0
+ for i in range(0, len(self.fields_desc)):
+ aList.append(self.fields_desc[i].name)
+ for i in aList:
+ a.append(getattr(self, i))
+ res = adapt(5, 253, a, self.fields_desc, 1)
+ if self.lengthBR is None:
+ p = struct.pack(">B", res[1]) + p[1:]
+ if res[0] is not 0:
+ p = p[:-res[0]]
+ return p + pay
+
+
+# len 3 to max for L3 message (251)
+class BaListPref(Packet):
+ """ BA List Pref Section 10.5.2.1c """
+ name = "BA List Pref"
+ fields_desc = [
+ XByteField("lengthBLP", None),
+
+ BitField("fixBit", 0x0, 1),
+ BitField("rangeLower", 0x0, 10),
+ BitField("fixBit2", 0x0, 1),
+ BitField("rangeUpper", 0x0, 10),
+ BitField("baFreq", 0x0, 10),
+ BitField("sparePad", 0x0, 8)
+ ]
+
+
+# len 17 || Have a look at the specs for the field format
+# Bit map 0 format
+# Range 1024 format
+# Range 512 format
+# Range 256 format
+# Range 128 format
+# Variable bit map format
+class CellChannelDescription(Packet):
+ """ Cell Channel Description Section 10.5.2.1b """
+ name = "Cell Channel Description "
+ fields_desc = [
+ BitField("bit128", 0x0, 1),
+ BitField("bit127", 0x0, 1),
+ BitField("spare1", 0x0, 1),
+ BitField("spare2", 0x0, 1),
+ BitField("bit124", 0x0, 1),
+ BitField("bit123", 0x0, 1),
+ BitField("bit122", 0x0, 1),
+ BitField("bit121", 0x0, 1),
+ ByteField("bit120", 0x0),
+ ByteField("bit112", 0x0),
+ ByteField("bit104", 0x0),
+ ByteField("bit96", 0x0),
+ ByteField("bit88", 0x0),
+ ByteField("bit80", 0x0),
+ ByteField("bit72", 0x0),
+ ByteField("bit64", 0x0),
+ ByteField("bit56", 0x0),
+ ByteField("bit48", 0x0),
+ ByteField("bit40", 0x0),
+ ByteField("bit32", 0x0),
+ ByteField("bit24", 0x0),
+ ByteField("bit16", 0x0),
+ ByteField("bit8", 0x0)
+ ]
+
+
+class CellDescription(Packet):
+ """ Cell Description Section 10.5.2.2 """
+ name = "Cell Description"
+ fields_desc = [
+ BitField("bcchHigh", 0x0, 2),
+ BitField("ncc", 0x0, 3),
+ BitField("bcc", 0x0, 3),
+ ByteField("bcchLow", 0x0)
+ ]
+
+
+class CellOptionsBCCH(Packet):
+ """ Cell Options (BCCH) Section 10.5.2.3 """
+ name = "Cell Options (BCCH)"
+ fields_desc = [
+ BitField("spare", 0x0, 1),
+ BitField("pwrc", 0x0, 1),
+ BitField("dtx", 0x0, 2),
+ BitField("rLinkTout", 0x0, 4)
+ ]
+
+
+class CellOptionsSACCH(Packet):
+ """ Cell Options (SACCH) Section 10.5.2.3a """
+ name = "Cell Options (SACCH)"
+ fields_desc = [
+ BitField("dtx", 0x0, 1),
+ BitField("pwrc", 0x0, 1),
+ BitField("dtx", 0x0, 1),
+ BitField("rLinkTout", 0x0, 4)
+ ]
+
+
+class CellSelectionParameters(Packet):
+ """ Cell Selection Parameters Section 10.5.2.4 """
+ name = "Cell Selection Parameters"
+ fields_desc = [
+ BitField("cellReselect", 0x0, 3),
+ BitField("msTxPwrMax", 0x0, 5),
+ BitField("acs", None, 1),
+ BitField("neci", None, 1),
+ BitField("rxlenAccMin", None, 6)
+ ]
+
+
+class MacModeAndChannelCodingRequest(Packet):
+ """ MAC Mode and Channel Coding Requested Section 10.5.2.4a """
+ name = "MAC Mode and Channel Coding Requested"
+ fields_desc = [
+ BitField("macMode", 0x0, 2),
+ BitField("cs", 0x0, 2)
+ ]
+
+
+class ChannelDescription(Packet):
+ """ Channel Description Section 10.5.2.5 """
+ name = "Channel Description"
+ fields_desc = [
+
+ BitField("channelTyp", 0x0, 5),
+ BitField("tn", 0x0, 3),
+
+ BitField("tsc", 0x0, 3),
+ BitField("h", 0x1, 1),
+ BitField("maioHi", 0x0, 4),
+
+ BitField("maioLo", 0x0, 2),
+ BitField("hsn", 0x0, 6)
+ ]
+
+
+class ChannelDescription2(Packet):
+ """ Channel Description 2 Section 10.5.2.5a """
+ name = "Channel Description 2"
+ fields_desc = [
+ BitField("channelTyp", 0x0, 5),
+ BitField("tn", 0x0, 3),
+ BitField("tsc", 0x0, 3),
+ BitField("h", 0x0, 1),
+ # if h=1
+ # BitField("maioHi", 0x0, 4),
+ # BitField("maioLo", 0x0, 2),
+ # BitField("hsn", 0x0, 6)
+ BitField("spare", 0x0, 2),
+ BitField("arfcnHigh", 0x0, 2),
+ ByteField("arfcnLow", 0x0)
+ ]
+
+
+class ChannelMode(Packet):
+ """ Channel Mode Section 10.5.2.6 """
+ name = "Channel Mode"
+ fields_desc = [
+ ByteField("mode", 0x0)
+ ]
+
+
+class ChannelMode2(Packet):
+ """ Channel Mode 2 Section 10.5.2.7 """
+ name = "Channel Mode 2"
+ fields_desc = [
+ ByteField("mode", 0x0)
+ ]
+
+
+class ChannelNeeded(Packet):
+ """ Channel Needed Section 10.5.2.8 """
+ name = "Channel Needed"
+ fields_desc = [
+ BitField("channel2", 0x0, 2),
+ BitField("channel1", 0x0, 2),
+ ]
+
+
+class ChannelRequestDescription(Packet):
+ """Channel Request Description Section 10.5.2.8a """
+ name = "Channel Request Description"
+ fields_desc = [
+ BitField("mt", 0x0, 1),
+ ConditionalField(BitField("spare", 0x0, 39),
+ lambda pkt: pkt.mt == 0),
+ ConditionalField(BitField("spare", 0x0, 3),
+ lambda pkt: pkt.mt == 1),
+ ConditionalField(BitField("priority", 0x0, 2),
+ lambda pkt: pkt.mt == 1),
+ ConditionalField(BitField("rlcMode", 0x0, 1),
+ lambda pkt: pkt.mt == 1),
+ ConditionalField(BitField("llcFrame", 0x1, 1),
+ lambda pkt: pkt.mt == 1),
+ ConditionalField(ByteField("reqBandMsb", 0x0),
+ lambda pkt: pkt.mt == 1),
+ ConditionalField(ByteField("reqBandLsb", 0x0),
+ lambda pkt: pkt.mt == 1),
+ ConditionalField(ByteField("rlcMsb", 0x0),
+ lambda pkt: pkt.mt == 1),
+ ConditionalField(ByteField("rlcLsb", 0x0),
+ lambda pkt: pkt.mt == 1)
+ ]
+
+
+class CipherModeSetting(Packet):
+ """Cipher Mode Setting Section 10.5.2.9 """
+ name = "Cipher Mode Setting"
+ fields_desc = [
+ BitField("algoId", 0x0, 3),
+ BitField("sc", 0x0, 1),
+ ]
+
+
+class CipherResponse(Packet):
+ """Cipher Response Section 10.5.2.10 """
+ name = "Cipher Response"
+ fields_desc = [
+ BitField("spare", 0x0, 3),
+ BitField("cr", 0x0, 1),
+ ]
+
+
+class ControlChannelDescription(Packet):
+ """Control Channel Description Section 10.5.2.11 """
+ name = "Control Channel Description"
+ fields_desc = [
+
+ BitField("spare", 0x0, 1),
+ BitField("att", 0x0, 1),
+ BitField("bsAgBlksRes", 0x0, 3),
+ BitField("ccchConf", 0x0, 3),
+
+ BitField("spare", 0x0, 1),
+ BitField("spare1", 0x0, 1),
+ BitField("spare2", 0x0, 1),
+ BitField("spare3", 0x0, 1),
+ BitField("spare4", 0x0, 1),
+ BitField("bsPaMfrms", 0x0, 3),
+
+ ByteField("t3212", 0x0)
+ ]
+
+
+class FrequencyChannelSequence(Packet):
+ """Frequency Channel Sequence Section 10.5.2.12"""
+ name = "Frequency Channel Sequence"
+ fields_desc = [
+ BitField("spare", 0x0, 1),
+ BitField("lowestArfcn", 0x0, 7),
+ BitField("skipArfcn01", 0x0, 4),
+ BitField("skipArfcn02", 0x0, 4),
+ BitField("skipArfcn03", 0x0, 4),
+ BitField("skipArfcn04", 0x0, 4),
+ BitField("skipArfcn05", 0x0, 4),
+ BitField("skipArfcn06", 0x0, 4),
+ BitField("skipArfcn07", 0x0, 4),
+ BitField("skipArfcn08", 0x0, 4),
+ BitField("skipArfcn09", 0x0, 4),
+ BitField("skipArfcn10", 0x0, 4),
+ BitField("skipArfcn11", 0x0, 4),
+ BitField("skipArfcn12", 0x0, 4),
+ BitField("skipArfcn13", 0x0, 4),
+ BitField("skipArfcn14", 0x0, 4),
+ BitField("skipArfcn15", 0x0, 4),
+ BitField("skipArfcn16", 0x0, 4)
+ ]
+
+
+class FrequencyList(Packet):
+ """Frequency List Section 10.5.2.13"""
+ name = "Frequency List"
+ # Problem:
+ # There are several formats for the Frequency List information
+ # element, distinguished by the "format indicator" subfield.
+ # Some formats are frequency bit maps, the others use a special encoding
+ # scheme.
+ fields_desc = [
+ XByteField("lengthFL", None),
+
+ BitField("formatID", 0x0, 2),
+ BitField("spare", 0x0, 2),
+ BitField("arfcn124", 0x0, 1),
+ BitField("arfcn123", 0x0, 1),
+ BitField("arfcn122", 0x0, 1),
+ BitField("arfcn121", 0x0, 1),
+
+ ByteField("arfcn120", 0x0),
+ ByteField("arfcn112", 0x0),
+ ByteField("arfcn104", 0x0),
+ ByteField("arfcn96", 0x0),
+ ByteField("arfcn88", 0x0),
+ ByteField("arfcn80", 0x0),
+ ByteField("arfcn72", 0x0),
+ ByteField("arfcn64", 0x0),
+ ByteField("arfcn56", 0x0),
+ ByteField("arfcn48", 0x0),
+ ByteField("arfcn40", 0x0),
+ ByteField("arfcn32", 0x0),
+ ByteField("arfcn24", 0x0),
+ ByteField("arfcn16", 0x0),
+ ByteField("arfcn8", 0x0)
+ ]
+
+
+# len 4 to 13
+class GroupChannelDescription(Packet):
+ """Group Channel Description Section 10.5.2.14b"""
+ name = "Group Channel Description"
+ fields_desc = [
+ XByteField("lengthGCD", None),
+
+ BitField("channelType", 0x0, 5),
+ BitField("tn", 0x0, 3),
+
+ BitField("tsc", 0x0, 3),
+ BitField("h", 0x0, 1),
+ # if h == 0 the packet looks the following way:
+ ConditionalField(BitField("spare", 0x0, 2),
+ lambda pkt: pkt. h == 0x0),
+ ConditionalField(BitField("arfcnHi", 0x0, 2),
+ lambda pkt: pkt. h == 0x0),
+ ConditionalField(ByteField("arfcnLo", None),
+ lambda pkt: pkt. h == 0x0),
+ # if h == 1 the packet looks the following way:
+ ConditionalField(BitField("maioHi", 0x0, 4),
+ lambda pkt: pkt. h == 0x1),
+ ConditionalField(BitField("maioLo", None, 2),
+ lambda pkt: pkt. h == 0x1),
+ ConditionalField(BitField("hsn", None, 6),
+ lambda pkt: pkt. h == 0x1),
+ # finished with conditional fields
+ ByteField("maC6", None),
+ ByteField("maC7", None),
+ ByteField("maC8", None),
+ ByteField("maC9", None),
+ ByteField("maC10", None),
+ ByteField("maC11", None),
+ ByteField("maC12", None),
+ ByteField("maC13", None),
+ ByteField("maC14", None)
+ ]
+
+ def post_build(self, p, pay):
+ aList = []
+ a = []
+ i = 0
+ for i in range(0, len(self.fields_desc)):
+ aList.append(self.fields_desc[i].name)
+ for i in aList:
+ a.append(getattr(self, i))
+ res = adapt(4, 13, a, self.fields_desc, 1)
+ if self.lengthGCD is None:
+ p = struct.pack(">B", res[1]) + p[1:]
+ if res[0] is not 0:
+ p = p[:-res[0]]
+ return p + pay
+
+
+class GprsResumption(Packet):
+ """GPRS Resumption Section 10.5.2.14c"""
+ name = "GPRS Resumption"
+ fields_desc = [
+ BitField("spare", 0x0, 3),
+ BitField("ack", 0x0, 1)
+ ]
+
+
+class HandoverReference(Packet):
+ """Handover Reference Section 10.5.2.15"""
+ name = "Handover Reference"
+ fields_desc = [
+ ByteField("handoverRef", 0x0)
+ ]
+
+
+class IraRestOctets(Packet):
+ """IAR Rest Octets Section 10.5.2.17"""
+ name = "IAR Rest Octets"
+ fields_desc = [
+ BitField("spare01", 0x0, 1),
+ BitField("spare02", 0x0, 1),
+ BitField("spare03", 0x1, 1),
+ BitField("spare04", 0x0, 1),
+ BitField("spare05", 0x1, 1),
+ BitField("spare06", 0x0, 1),
+ BitField("spare07", 0x1, 1),
+ BitField("spare08", 0x1, 1),
+ BitField("spare09", 0x0, 1),
+ BitField("spare10", 0x0, 1),
+ BitField("spare11", 0x1, 1),
+ BitField("spare12", 0x0, 1),
+ BitField("spare13", 0x1, 1),
+ BitField("spare14", 0x0, 1),
+ BitField("spare15", 0x1, 1),
+ BitField("spare16", 0x1, 1),
+ BitField("spare17", 0x0, 1),
+ BitField("spare18", 0x0, 1),
+ BitField("spare19", 0x1, 1),
+ BitField("spare20", 0x0, 1),
+ BitField("spare21", 0x1, 1),
+ BitField("spare22", 0x0, 1),
+ BitField("spare23", 0x1, 1),
+ BitField("spare24", 0x1, 1)
+ ]
+
+
+# len is 1 to 5 what do we do with the variable size? no lenght
+# field?! WTF
+class IaxRestOctets(Packet):
+ """IAX Rest Octets Section 10.5.2.18"""
+ name = "IAX Rest Octets"
+ fields_desc = [
+ BitField("spare01", 0x0, 1),
+ BitField("spare02", 0x0, 1),
+ BitField("spare03", 0x1, 1),
+ BitField("spare04", 0x0, 1),
+ BitField("spare05", 0x1, 1),
+ BitField("spare06", 0x0, 1),
+ BitField("spare07", 0x1, 1),
+ BitField("spare08", 0x1, 1),
+ ByteField("spareB1", None),
+ ByteField("spareB2", None),
+ ByteField("spareB3", None)
+ ]
+
+
+class L2PseudoLength(Packet):
+ """L2 Pseudo Length Section 10.5.2.19"""
+ name = "L2 Pseudo Length"
+ fields_desc = [
+ BitField("l2pLength", None, 6),
+ BitField("bit2", 0x0, 1),
+ BitField("bit1", 0x1, 1)
+ ]
+
+
+class MeasurementResults(Packet):
+ """Measurement Results Section 10.5.2.20"""
+ name = "Measurement Results"
+ fields_desc = [
+ BitField("baUsed", 0x0, 1),
+ BitField("dtxUsed", 0x0, 1),
+ BitField("rxLevFull", 0x0, 6),
+
+ BitField("spare", 0x0, 1),
+ BitField("measValid", 0x0, 1),
+ BitField("rxLevSub", 0x0, 6),
+
+ BitField("spare0", 0x0, 1),
+ BitField("rxqualFull", 0x0, 3),
+ BitField("rxqualSub", 0x0, 3),
+ BitField("noNcellHi", 0x0, 1),
+
+ BitField("noNcellLo", 0x0, 2),
+ BitField("rxlevC1", 0x0, 6),
+
+ BitField("bcchC1", 0x0, 5),
+ BitField("bsicC1Hi", 0x0, 3),
+
+ BitField("bsicC1Lo", 0x0, 3),
+ BitField("rxlevC2", 0x0, 5),
+
+ BitField("rxlevC2Lo", 0x0, 1),
+ BitField("bcchC2", 0x0, 5),
+ BitField("bsicC2Hi", 0x0, 2),
+
+ BitField("bscicC2Lo", 0x0, 4),
+ BitField("bscicC2Hi", 0x0, 4),
+
+ BitField("rxlevC3Lo", 0x0, 2),
+ BitField("bcchC3", 0x0, 5),
+ BitField("rxlevC3Hi", 0x0, 1),
+
+ BitField("bsicC3Lo", 0x0, 5),
+ BitField("bsicC3Hi", 0x0, 3),
+
+ BitField("rxlevC4Lo", 0x0, 3),
+ BitField("bcchC4", 0x0, 5),
+
+ BitField("bsicC4", 0x0, 6),
+ BitField("rxlevC5Hi", 0x0, 2),
+
+ BitField("rxlevC5Lo", 0x0, 4),
+ BitField("bcchC5Hi", 0x0, 4),
+
+ BitField("bcchC5Lo", 0x0, 1),
+ BitField("bsicC5", 0x0, 6),
+ BitField("rxlevC6", 0x0, 1),
+
+ BitField("rxlevC6Lo", 0x0, 5),
+ BitField("bcchC6Hi", 0x0, 3),
+
+ BitField("bcchC6Lo", 0x0, 3),
+ BitField("bsicC6", 0x0, 5)
+ ]
+
+
+class GprsMeasurementResults(Packet):
+ """GPRS Measurement Results Section 10.5.2.20a"""
+ name = "GPRS Measurement Results"
+ fields_desc = [
+ BitField("cValue", 0x0, 6),
+ BitField("rxqualHi", 0x0, 2),
+ BitField("rxqL", 0x0, 1),
+ BitField("spare", 0x0, 1),
+ BitField("signVar", 0x0, 6)
+ ]
+
+
+# len 3 to 10
+class MobileAllocation(Packet):
+ """Mobile Allocation Section 10.5.2.21"""
+ name = "Mobile Allocation"
+ fields_desc = [
+ XByteField("lengthMA", None),
+ ByteField("maC64", 0x12),
+ ByteField("maC56", None), # optional fields start here
+ ByteField("maC48", None),
+ ByteField("maC40", None),
+ ByteField("maC32", None),
+ ByteField("maC24", None),
+ ByteField("maC16", None),
+ ByteField("maC8", None)
+ ]
+
+ def post_build(self, p, pay):
+ aList = []
+ a = []
+ i = 0
+ for i in range(0, len(self.fields_desc)):
+ aList.append(self.fields_desc[i].name)
+ for i in aList:
+ a.append(getattr(self, i))
+ res = adapt(2, 9, a, self.fields_desc, 1)
+ if self.lengthMA is None:
+ p = struct.pack(">B", res[1]) + p[1:]
+ if res[0] is not 0:
+ p = p[:-res[0]]
+ return p + pay
+
+
+class MobileTimeDifference(Packet):
+ """Mobile Time Difference Section 10.5.2.21a"""
+ name = "Mobile Time Difference"
+ fields_desc = [
+ XByteField("lengthMTD", 0x5),
+ ByteField("valueHi", 0x0),
+ ByteField("valueCnt", 0x0),
+ BitField("valueLow", 0x0, 5),
+ BitField("spare", 0x0, 1),
+ BitField("spare1", 0x0, 1),
+ BitField("spare2", 0x0, 1)
+ ]
+
+
+# min 4 octets max 8
+class MultiRateConfiguration(Packet):
+ """ MultiRate configuration Section 10.5.2.21aa"""
+ name = "MultiRate Configuration"
+ # This packet has a variable length and hence structure. This packet
+ # implements the longuest possible packet. If you biuild a shorter
+ # packet, for example having only 6 bytes, the last 4 bytes are named
+ # "Spare" in the specs. Here they are named "threshold2"
+ fields_desc = [
+ XByteField("lengthMRC", None),
+
+ BitField("mrVersion", 0x0, 3),
+ BitField("spare", 0x0, 1),
+ BitField("icmi", 0x0, 1),
+ BitField("spare", 0x0, 1),
+ BitField("startMode", 0x0, 2),
+
+ ByteField("amrCodec", None),
+
+ BitField("spare", None, 2),
+ BitField("threshold1", None, 6),
+
+ BitField("hysteresis1", None, 4),
+ BitField("threshold2", None, 4),
+
+ BitField("threshold2cnt", None, 2),
+ BitField("hysteresis2", None, 4),
+ BitField("threshold3", None, 2),
+
+ BitField("threshold3cnt", None, 4),
+ BitField("hysteresis3", None, 4)
+ ]
+
+ def post_build(self, p, pay):
+ # we set the length
+ aList = []
+ a = []
+ i = 0
+ for i in range(0, len(self.fields_desc)):
+ aList.append(self.fields_desc[i].name)
+ for i in aList:
+ a.append(getattr(self, i))
+ res = adapt(3, 7, a, self.fields_desc, 1)
+ if self.lengthMRC is None:
+ p = struct.pack(">B", res[1]) + p[1:]
+ if res[0] is not 0:
+ p = p[:-res[0]]
+ return p + pay
+
+
+# len 2 to 11
+class MultislotAllocation(Packet):
+ """Multislot Allocation Section 10.5.2.21b"""
+ name = "Multislot Allocation"
+ fields_desc = [
+ XByteField("lengthMSA", None),
+ BitField("ext0", 0x1, 1),
+ BitField("da", 0x0, 7),
+ ConditionalField(BitField("ext1", 0x1, 1), # optional
+ lambda pkt: pkt.ext0 == 0),
+ ConditionalField(BitField("ua", 0x0, 7),
+ lambda pkt: pkt.ext0 == 0),
+ ByteField("chan1", None),
+ ByteField("chan2", None),
+ ByteField("chan3", None),
+ ByteField("chan4", None),
+ ByteField("chan5", None),
+ ByteField("chan6", None),
+ ByteField("chan7", None),
+ ByteField("chan8", None)
+ ]
+
+ def post_build(self, p, pay):
+ aList = []
+ a = []
+ i = 0
+ for i in range(0, len(self.fields_desc)):
+ aList.append(self.fields_desc[i].name)
+ for i in aList:
+ a.append(getattr(self, i))
+ res = adapt(1, 11, a, self.fields_desc, 1)
+ if res[0] is not 0:
+ p = p[:-res[0]]
+ if self.lengthMSA is None:
+ p = struct.pack(">B", len(p)-1) + p[1:]
+ return p + pay
+
+
+class NcMode(Packet):
+ """NC mode Section 10.5.2.21c"""
+ name = "NC Mode"
+ fields_desc = [
+ BitField("spare", 0x0, 2),
+ BitField("ncMode", 0x0, 2)
+ ]
+
+
+class NeighbourCellsDescription(Packet):
+ """Neighbour Cells Description Section 10.5.2.22"""
+ name = "Neighbour Cells Description"
+ fields_desc = [
+ BitField("bit128", 0x0, 1),
+ BitField("bit127", 0x0, 1),
+ BitField("extInd", 0x0, 1),
+ BitField("baInd", 0x0, 1),
+ BitField("bit124", 0x0, 1),
+ BitField("bit123", 0x0, 1),
+ BitField("bit122", 0x0, 1),
+ BitField("bit121", 0x0, 1),
+ BitField("120bits", 0x0, 120)
+ ]
+
+
+class NeighbourCellsDescription2(Packet):
+ """Neighbour Cells Description 2 Section 10.5.2.22a"""
+ name = "Neighbour Cells Description 2"
+ fields_desc = [
+ BitField("bit128", 0x0, 1),
+ BitField("multiband", 0x0, 2),
+ BitField("baInd", 0x0, 1),
+ BitField("bit124", 0x0, 1),
+ BitField("bit123", 0x0, 1),
+ BitField("bit122", 0x0, 1),
+ BitField("bit121", 0x0, 1),
+ BitField("120bits", 0x0, 120)
+ ]
+
+
+# len 4
+# strange packet, lots of valid formats
+
+# ideas for the dynamic packets:
+# 1] for user interaction: Create an interactive "builder" based on a
+# Q/A process (not very scapy like)
+# 2] for usage in scripts, create an alternative packet for every
+# possible packet layout
+#
+
+class DedicatedModeOrTBF(Packet):
+ """Dedicated mode or TBF Section 10.5.2.25b"""
+ name = "Dedicated Mode or TBF"
+ fields_desc = [
+ BitField("spare", 0x0, 1),
+ BitField("tma", 0x0, 1),
+ BitField("downlink", 0x0, 1),
+ BitField("td", 0x0, 1)
+ ]
+
+
+class PageMode(Packet):
+ """Page Mode Section 10.5.2.26"""
+ name = "Page Mode"
+ fields_desc = [
+ BitField("spare", 0x0, 1),
+ BitField("spare1", 0x0, 1),
+ BitField("pm", 0x0, 2)
+ ]
+
+
+class NccPermitted(Packet):
+ """NCC Permitted Section 10.5.2.27"""
+ name = "NCC Permited"
+ fields_desc = [
+ ByteField("nccPerm", 0x0)
+ ]
+
+
+class PowerCommand(Packet):
+ """Power Command Section 10.5.2.28"""
+ name = "Power Command"
+ fields_desc = [
+ BitField("spare", 0x0, 1),
+ BitField("spare1", 0x0, 1),
+ BitField("spare2", 0x0, 1),
+ BitField("powerLvl", 0x0, 5)
+ ]
+
+
+class PowerCommandAndAccessType(Packet):
+ """Power Command and access type Section 10.5.2.28a"""
+ name = "Power Command and Access Type"
+ fields_desc = [
+ BitField("atc", 0x0, 1),
+ BitField("spare", 0x0, 1),
+ BitField("spare1", 0x0, 1),
+ BitField("powerLvl", 0x0, 5)
+ ]
+
+
+class RachControlParameters(Packet):
+ """RACH Control Parameters Section 10.5.2.29"""
+ name = "RACH Control Parameters"
+ fields_desc = [
+ BitField("maxRetrans", 0x0, 2),
+ BitField("txInteger", 0x0, 4),
+ BitField("cellBarrAccess", 0x0, 1),
+ BitField("re", 0x0, 1),
+ BitField("ACC15", 0x0, 1),
+ BitField("ACC14", 0x0, 1),
+ BitField("ACC13", 0x0, 1),
+ BitField("ACC12", 0x0, 1),
+ BitField("ACC11", 0x0, 1),
+ BitField("ACC10", 0x0, 1),
+ BitField("ACC09", 0x0, 1),
+ BitField("ACC08", 0x0, 1),
+ BitField("ACC07", 0x0, 1),
+ BitField("ACC06", 0x0, 1),
+ BitField("ACC05", 0x0, 1),
+ BitField("ACC04", 0x0, 1),
+ BitField("ACC03", 0x0, 1),
+ BitField("ACC02", 0x0, 1),
+ BitField("ACC01", 0x0, 1),
+ BitField("ACC00", 0x0, 1),
+ ]
+
+
+class RequestReference(Packet):
+ """Request Reference Section 10.5.2.30"""
+ name = "Request Reference"
+ fields_desc = [
+ ByteField("ra", 0x0),
+ BitField("t1", 0x0, 5),
+ BitField("t3Hi", 0x0, 3),
+ BitField("t3Lo", 0x0, 3),
+ BitField("t2", 0x0, 5)
+ ]
+
+
+class RrCause(Packet):
+ """RR Cause Section 10.5.2.31"""
+ name = "RR Cause"
+ fields_desc = [
+ ByteField("rrCause", 0x0)
+ ]
+
+
+class StartingTime(Packet):
+ """Starting Time Section 10.5.2.38"""
+ name = "Starting Time"
+ fields_desc = [
+ ByteField("ra", 0x0),
+ BitField("t1", 0x0, 5),
+ BitField("t3Hi", 0x0, 3),
+ BitField("t3Lo", 0x0, 3),
+ BitField("t2", 0x0, 5)
+ ]
+
+
+class SynchronizationIndication(Packet):
+ """Synchronization Indication Section 10.5.2.39"""
+ name = "Synchronization Indication"
+ fields_desc = [
+ BitField("nci", 0x0, 1),
+ BitField("rot", 0x0, 1),
+ BitField("si", 0x0, 2)
+ ]
+
+
+class TimingAdvance(Packet):
+ """Timing Advance Section 10.5.2.40"""
+ name = "Timing Advance"
+ fields_desc = [
+ BitField("spare", 0x0, 1),
+ BitField("spare1", 0x0, 1),
+ BitField("timingVal", 0x0, 6)
+ ]
+
+
+class TimeDifference(Packet):
+ """ Time Difference Section 10.5.2.41"""
+ name = "Time Difference"
+ fields_desc = [
+ XByteField("lengthTD", 0x3),
+ ByteField("timeValue", 0x0)
+ ]
+
+
+class Tlli(Packet):
+ """ TLLI Section Section 10.5.2.41a"""
+ name = "TLLI"
+ fields_desc = [
+ ByteField("value", 0x0),
+ ByteField("value1", 0x0),
+ ByteField("value2", 0x0),
+ ByteField("value3", 0x0)
+ ]
+
+
+class TmsiPTmsi(Packet):
+ """ TMSI/P-TMSI Section 10.5.2.42"""
+ name = "TMSI/P-TMSI"
+ fields_desc = [
+ ByteField("value", 0x0),
+ ByteField("value1", 0x0),
+ ByteField("value2", 0x0),
+ ByteField("value3", 0x0)
+ ]
+
+
+class VgcsTargetModeIdentication(Packet):
+ """ VGCS target Mode Indication 10.5.2.42a"""
+ name = "VGCS Target Mode Indication"
+ fields_desc = [
+ XByteField("lengthVTMI", 0x2),
+ BitField("targerMode", 0x0, 2),
+ BitField("cipherKeyNb", 0x0, 4),
+ BitField("spare", 0x0, 1),
+ BitField("spare1", 0x0, 1)
+ ]
+
+
+class WaitIndication(Packet):
+ """ Wait Indication Section 10.5.2.43"""
+ name = "Wait Indication"
+ fields_desc = [ # asciiart of specs strange
+ ByteField("timeoutVal", 0x0)
+ ]
+
+
+#class Si10RestOctets(Packet):
+# """SI10 rest octets 10.5.2.44"""
+# name = "SI10 rest octets"
+# fields_desc = [
+
+
+# len 17
+class ExtendedMeasurementResults(Packet):
+ """EXTENDED MEASUREMENT RESULTS Section 10.5.2.45"""
+ name = "Extended Measurement Results"
+ fields_desc = [
+
+ BitField("scUsed", None, 1),
+ BitField("dtxUsed", None, 1),
+ BitField("rxLevC0", None, 6),
+
+ BitField("rxLevC1", None, 6),
+ BitField("rxLevC2Hi", None, 2),
+
+ BitField("rxLevC2Lo", None, 4),
+ BitField("rxLevC3Hi", None, 4),
+
+ BitField("rxLevC3Lo", None, 3),
+ BitField("rxLevC4", None, 5),
+
+ BitField("rxLevC5", None, 6),
+ BitField("rxLevC6Hi", None, 2),
+
+ BitField("rxLevC6Lo", None, 4),
+ BitField("rxLevC7Hi", None, 4),
+
+ BitField("rxLevC7Lo", None, 2),
+ BitField("rxLevC8", None, 6),
+
+ BitField("rxLevC9", None, 6),
+ BitField("rxLevC10Hi", None, 2),
+
+ BitField("rxLevC10Lo", None, 4),
+ BitField("rxLevC11Hi", None, 4),
+
+ BitField("rxLevC13Lo", None, 2),
+ BitField("rxLevC12", None, 6),
+
+ BitField("rxLevC13", None, 6),
+ BitField("rxLevC14Hi", None, 2),
+
+ BitField("rxLevC14Lo", None, 4),
+ BitField("rxLevC15Hi", None, 4),
+
+ BitField("rxLevC15Lo", None, 2),
+ BitField("rxLevC16", None, 6),
+
+
+ BitField("rxLevC17", None, 6),
+ BitField("rxLevC18Hi", None, 2),
+
+ BitField("rxLevC18Lo", None, 4),
+ BitField("rxLevC19Hi", None, 4),
+
+ BitField("rxLevC19Lo", None, 2),
+ BitField("rxLevC20", None, 6)
+ ]
+
+
+# len 17
+class ExtendedMeasurementFrequencyList(Packet):
+ """Extended Measurement Frequency List Section 10.5.2.46"""
+ name = "Extended Measurement Frequency List"
+ fields_desc = [
+
+ BitField("bit128", 0x0, 1),
+ BitField("bit127", 0x0, 1),
+ BitField("spare", 0x0, 1),
+ BitField("seqCode", 0x0, 1),
+ BitField("bit124", 0x0, 1),
+ BitField("bit123", 0x0, 1),
+ BitField("bit122", 0x0, 1),
+ BitField("bit121", 0x0, 1),
+
+ BitField("bitsRest", 0x0, 128)
+ ]
+
+
+class SuspensionCause(Packet):
+ """Suspension Cause Section 10.5.2.47"""
+ name = "Suspension Cause"
+ fields_desc = [
+ ByteField("suspVal", 0x0)
+ ]
+
+
+class ApduID(Packet):
+ """APDU Flags Section 10.5.2.48"""
+ name = "Apdu Id"
+ fields_desc = [
+ BitField("id", None, 4)
+ ]
+
+
+class ApduFlags(Packet):
+ """APDU Flags Section 10.5.2.49"""
+ name = "Apdu Flags"
+ fields_desc = [
+ BitField("spare", 0x0, 1),
+ BitField("cr", 0x0, 1),
+ BitField("firstSeg", 0x0, 1),
+ BitField("lastSeg", 0x0, 1)
+ ]
+
+
+# len 1 to max L3 (251) (done)
+class ApduData(Packet):
+ """APDU Data Section 10.5.2.50"""
+ name = "Apdu Data"
+ fields_desc = [
+ XByteField("lengthAD", None),
+ #optional
+ ByteField("apuInfo1", None),
+ ByteField("apuInfo2", None),
+ ByteField("apuInfo3", None),
+ ByteField("apuInfo4", None),
+ ByteField("apuInfo5", None),
+ ByteField("apuInfo6", None),
+ ByteField("apuInfo7", None),
+ ByteField("apuInfo8", None),
+ ByteField("apuInfo9", None),
+ ByteField("apuInfo10", None),
+ ByteField("apuInfo11", None),
+ ByteField("apuInfo12", None),
+ ByteField("apuInfo13", None),
+ ByteField("apuInfo14", None),
+ ByteField("apuInfo15", None),
+ ByteField("apuInfo16", None),
+ ByteField("apuInfo17", None),
+ ByteField("apuInfo18", None),
+ ByteField("apuInfo19", None),
+ ByteField("apuInfo20", None),
+ ByteField("apuInfo21", None),
+ ByteField("apuInfo22", None),
+ ByteField("apuInfo23", None),
+ ByteField("apuInfo24", None),
+ ByteField("apuInfo25", None),
+ ByteField("apuInfo26", None),
+ ByteField("apuInfo27", None),
+ ByteField("apuInfo28", None),
+ ByteField("apuInfo29", None),
+ ByteField("apuInfo30", None),
+ ByteField("apuInfo31", None),
+ ByteField("apuInfo32", None),
+ ByteField("apuInfo33", None),
+ ByteField("apuInfo34", None),
+ ByteField("apuInfo35", None),
+ ByteField("apuInfo36", None),
+ ByteField("apuInfo37", None),
+ ByteField("apuInfo38", None),
+ ByteField("apuInfo39", None),
+ ByteField("apuInfo40", None),
+ ByteField("apuInfo41", None),
+ ByteField("apuInfo42", None),
+ ByteField("apuInfo43", None),
+ ByteField("apuInfo44", None),
+ ByteField("apuInfo45", None),
+ ByteField("apuInfo46", None),
+ ByteField("apuInfo47", None),
+ ByteField("apuInfo48", None),
+ ByteField("apuInfo49", None),
+ ByteField("apuInfo50", None),
+ ByteField("apuInfo51", None),
+ ByteField("apuInfo52", None),
+ ByteField("apuInfo53", None),
+ ByteField("apuInfo54", None),
+ ByteField("apuInfo55", None),
+ ByteField("apuInfo56", None),
+ ByteField("apuInfo57", None),
+ ByteField("apuInfo58", None),
+ ByteField("apuInfo59", None),
+ ByteField("apuInfo60", None),
+ ByteField("apuInfo61", None),
+ ByteField("apuInfo62", None),
+ ByteField("apuInfo63", None),
+ ByteField("apuInfo64", None),
+ ByteField("apuInfo65", None),
+ ByteField("apuInfo66", None),
+ ByteField("apuInfo67", None),
+ ByteField("apuInfo68", None),
+ ByteField("apuInfo69", None),
+ ByteField("apuInfo70", None),
+ ByteField("apuInfo71", None),
+ ByteField("apuInfo72", None),
+ ByteField("apuInfo73", None),
+ ByteField("apuInfo74", None),
+ ByteField("apuInfo75", None),
+ ByteField("apuInfo76", None),
+ ByteField("apuInfo77", None),
+ ByteField("apuInfo78", None),
+ ByteField("apuInfo79", None),
+ ByteField("apuInfo80", None),
+ ByteField("apuInfo81", None),
+ ByteField("apuInfo82", None),
+ ByteField("apuInfo83", None),
+ ByteField("apuInfo84", None),
+ ByteField("apuInfo85", None),
+ ByteField("apuInfo86", None),
+ ByteField("apuInfo87", None),
+ ByteField("apuInfo88", None),
+ ByteField("apuInfo89", None),
+ ByteField("apuInfo90", None),
+ ByteField("apuInfo91", None),
+ ByteField("apuInfo92", None),
+ ByteField("apuInfo93", None),
+ ByteField("apuInfo94", None),
+ ByteField("apuInfo95", None),
+ ByteField("apuInfo96", None),
+ ByteField("apuInfo97", None),
+ ByteField("apuInfo98", None),
+ ByteField("apuInfo99", None),
+ ByteField("apuInfo100", None),
+ ByteField("apuInfo101", None),
+ ByteField("apuInfo102", None),
+ ByteField("apuInfo103", None),
+ ByteField("apuInfo104", None),
+ ByteField("apuInfo105", None),
+ ByteField("apuInfo106", None),
+ ByteField("apuInfo107", None),
+ ByteField("apuInfo108", None),
+ ByteField("apuInfo109", None),
+ ByteField("apuInfo110", None),
+ ByteField("apuInfo111", None),
+ ByteField("apuInfo112", None),
+ ByteField("apuInfo113", None),
+ ByteField("apuInfo114", None),
+ ByteField("apuInfo115", None),
+ ByteField("apuInfo116", None),
+ ByteField("apuInfo117", None),
+ ByteField("apuInfo118", None),
+ ByteField("apuInfo119", None),
+ ByteField("apuInfo120", None),
+ ByteField("apuInfo121", None),
+ ByteField("apuInfo122", None),
+ ByteField("apuInfo123", None),
+ ByteField("apuInfo124", None),
+ ByteField("apuInfo125", None),
+ ByteField("apuInfo126", None),
+ ByteField("apuInfo127", None),
+ ByteField("apuInfo128", None),
+ ByteField("apuInfo129", None),
+ ByteField("apuInfo130", None),
+ ByteField("apuInfo131", None),
+ ByteField("apuInfo132", None),
+ ByteField("apuInfo133", None),
+ ByteField("apuInfo134", None),
+ ByteField("apuInfo135", None),
+ ByteField("apuInfo136", None),
+ ByteField("apuInfo137", None),
+ ByteField("apuInfo138", None),
+ ByteField("apuInfo139", None),
+ ByteField("apuInfo140", None),
+ ByteField("apuInfo141", None),
+ ByteField("apuInfo142", None),
+ ByteField("apuInfo143", None),
+ ByteField("apuInfo144", None),
+ ByteField("apuInfo145", None),
+ ByteField("apuInfo146", None),
+ ByteField("apuInfo147", None),
+ ByteField("apuInfo148", None),
+ ByteField("apuInfo149", None),
+ ByteField("apuInfo150", None),
+ ByteField("apuInfo151", None),
+ ByteField("apuInfo152", None),
+ ByteField("apuInfo153", None),
+ ByteField("apuInfo154", None),
+ ByteField("apuInfo155", None),
+ ByteField("apuInfo156", None),
+ ByteField("apuInfo157", None),
+ ByteField("apuInfo158", None),
+ ByteField("apuInfo159", None),
+ ByteField("apuInfo160", None),
+ ByteField("apuInfo161", None),
+ ByteField("apuInfo162", None),
+ ByteField("apuInfo163", None),
+ ByteField("apuInfo164", None),
+ ByteField("apuInfo165", None),
+ ByteField("apuInfo166", None),
+ ByteField("apuInfo167", None),
+ ByteField("apuInfo168", None),
+ ByteField("apuInfo169", None),
+ ByteField("apuInfo170", None),
+ ByteField("apuInfo171", None),
+ ByteField("apuInfo172", None),
+ ByteField("apuInfo173", None),
+ ByteField("apuInfo174", None),
+ ByteField("apuInfo175", None),
+ ByteField("apuInfo176", None),
+ ByteField("apuInfo177", None),
+ ByteField("apuInfo178", None),
+ ByteField("apuInfo179", None),
+ ByteField("apuInfo180", None),
+ ByteField("apuInfo181", None),
+ ByteField("apuInfo182", None),
+ ByteField("apuInfo183", None),
+ ByteField("apuInfo184", None),
+ ByteField("apuInfo185", None),
+ ByteField("apuInfo186", None),
+ ByteField("apuInfo187", None),
+ ByteField("apuInfo188", None),
+ ByteField("apuInfo189", None),
+ ByteField("apuInfo190", None),
+ ByteField("apuInfo191", None),
+ ByteField("apuInfo192", None),
+ ByteField("apuInfo193", None),
+ ByteField("apuInfo194", None),
+ ByteField("apuInfo195", None),
+ ByteField("apuInfo196", None),
+ ByteField("apuInfo197", None),
+ ByteField("apuInfo198", None),
+ ByteField("apuInfo199", None),
+ ByteField("apuInfo200", None),
+ ByteField("apuInfo201", None),
+ ByteField("apuInfo202", None),
+ ByteField("apuInfo203", None),
+ ByteField("apuInfo204", None),
+ ByteField("apuInfo205", None),
+ ByteField("apuInfo206", None),
+ ByteField("apuInfo207", None),
+ ByteField("apuInfo208", None),
+ ByteField("apuInfo209", None),
+ ByteField("apuInfo210", None),
+ ByteField("apuInfo211", None),
+ ByteField("apuInfo212", None),
+ ByteField("apuInfo213", None),
+ ByteField("apuInfo214", None),
+ ByteField("apuInfo215", None),
+ ByteField("apuInfo216", None),
+ ByteField("apuInfo217", None),
+ ByteField("apuInfo218", None),
+ ByteField("apuInfo219", None),
+ ByteField("apuInfo220", None),
+ ByteField("apuInfo221", None),
+ ByteField("apuInfo222", None),
+ ByteField("apuInfo223", None),
+ ByteField("apuInfo224", None),
+ ByteField("apuInfo225", None),
+ ByteField("apuInfo226", None),
+ ByteField("apuInfo227", None),
+ ByteField("apuInfo228", None),
+ ByteField("apuInfo229", None),
+ ByteField("apuInfo230", None),
+ ByteField("apuInfo231", None),
+ ByteField("apuInfo232", None),
+ ByteField("apuInfo233", None),
+ ByteField("apuInfo234", None),
+ ByteField("apuInfo235", None),
+ ByteField("apuInfo236", None),
+ ByteField("apuInfo237", None),
+ ByteField("apuInfo238", None),
+ ByteField("apuInfo239", None),
+ ByteField("apuInfo240", None),
+ ByteField("apuInfo241", None),
+ ByteField("apuInfo242", None),
+ ByteField("apuInfo243", None),
+ ByteField("apuInfo244", None),
+ ByteField("apuInfo245", None),
+ ByteField("apuInfo246", None),
+ ByteField("apuInfo247", None),
+ ByteField("apuInfo248", None),
+ ByteField("apuInfo249", None)
+ ]
+
+ def post_build(self, p, pay):
+ aList = []
+ a = []
+ i = 0
+ for i in range(0, len(self.fields_desc)):
+ aList.append(self.fields_desc[i].name)
+ for i in aList:
+ a.append(getattr(self, i))
+ res = adapt(1, 250, a, self.fields_desc, 1)
+ if self.lengthAD is None:
+ p = struct.pack(">B", res[1]) + p[1:]
+ if res[0] is not 0:
+ p = p[:-res[0]]
+ return p + pay
+#
+# 10.5.3 Mobility management information elements
+#
+
+
+# len 3 to L3 max (251) (done)
+class NetworkName(Packet):
+ """Network Name Section 10.5.3.5a"""
+ name = "Network Name"
+ fields_desc = [
+
+ XByteField("lengthNN", None),
+
+ BitField("ext", 0x1, 1),
+ BitField("codingScheme", 0x0, 3),
+ BitField("addCi", 0x0, 1),
+ BitField("nbSpare", 0x0, 3),
+ # optional
+ ByteField("txtString1", None),
+ ByteField("txtString2", None),
+ ByteField("txtString3", None),
+ ByteField("txtString4", None),
+ ByteField("txtString5", None),
+ ByteField("txtString6", None),
+ ByteField("txtString7", None),
+ ByteField("txtString8", None),
+ ByteField("txtString9", None),
+ ByteField("txtString10", None),
+ ByteField("txtString11", None),
+ ByteField("txtString12", None),
+ ByteField("txtString13", None),
+ ByteField("txtString14", None),
+ ByteField("txtString15", None),
+ ByteField("txtString16", None),
+ ByteField("txtString17", None),
+ ByteField("txtString18", None),
+ ByteField("txtString19", None),
+ ByteField("txtString20", None),
+ ByteField("txtString21", None),
+ ByteField("txtString22", None),
+ ByteField("txtString23", None),
+ ByteField("txtString24", None),
+ ByteField("txtString25", None),
+ ByteField("txtString26", None),
+ ByteField("txtString27", None),
+ ByteField("txtString28", None),
+ ByteField("txtString29", None),
+ ByteField("txtString30", None),
+ ByteField("txtString31", None),
+ ByteField("txtString32", None),
+ ByteField("txtString33", None),
+ ByteField("txtString34", None),
+ ByteField("txtString35", None),
+ ByteField("txtString36", None),
+ ByteField("txtString37", None),
+ ByteField("txtString38", None),
+ ByteField("txtString39", None),
+ ByteField("txtString40", None),
+ ByteField("txtString41", None),
+ ByteField("txtString42", None),
+ ByteField("txtString43", None),
+ ByteField("txtString44", None),
+ ByteField("txtString45", None),
+ ByteField("txtString46", None),
+ ByteField("txtString47", None),
+ ByteField("txtString48", None),
+ ByteField("txtString49", None),
+ ByteField("txtString50", None),
+ ByteField("txtString51", None),
+ ByteField("txtString52", None),
+ ByteField("txtString53", None),
+ ByteField("txtString54", None),
+ ByteField("txtString55", None),
+ ByteField("txtString56", None),
+ ByteField("txtString57", None),
+ ByteField("txtString58", None),
+ ByteField("txtString59", None),
+ ByteField("txtString60", None),
+ ByteField("txtString61", None),
+ ByteField("txtString62", None),
+ ByteField("txtString63", None),
+ ByteField("txtString64", None),
+ ByteField("txtString65", None),
+ ByteField("txtString66", None),
+ ByteField("txtString67", None),
+ ByteField("txtString68", None),
+ ByteField("txtString69", None),
+ ByteField("txtString70", None),
+ ByteField("txtString71", None),
+ ByteField("txtString72", None),
+ ByteField("txtString73", None),
+ ByteField("txtString74", None),
+ ByteField("txtString75", None),
+ ByteField("txtString76", None),
+ ByteField("txtString77", None),
+ ByteField("txtString78", None),
+ ByteField("txtString79", None),
+ ByteField("txtString80", None),
+ ByteField("txtString81", None),
+ ByteField("txtString82", None),
+ ByteField("txtString83", None),
+ ByteField("txtString84", None),
+ ByteField("txtString85", None),
+ ByteField("txtString86", None),
+ ByteField("txtString87", None),
+ ByteField("txtString88", None),
+ ByteField("txtString89", None),
+ ByteField("txtString90", None),
+ ByteField("txtString91", None),
+ ByteField("txtString92", None),
+ ByteField("txtString93", None),
+ ByteField("txtString94", None),
+ ByteField("txtString95", None),
+ ByteField("txtString96", None),
+ ByteField("txtString97", None),
+ ByteField("txtString98", None),
+ ByteField("txtString99", None),
+ ByteField("txtString100", None),
+ ByteField("txtString101", None),
+ ByteField("txtString102", None),
+ ByteField("txtString103", None),
+ ByteField("txtString104", None),
+ ByteField("txtString105", None),
+ ByteField("txtString106", None),
+ ByteField("txtString107", None),
+ ByteField("txtString108", None),
+ ByteField("txtString109", None),
+ ByteField("txtString110", None),
+ ByteField("txtString111", None),
+ ByteField("txtString112", None),
+ ByteField("txtString113", None),
+ ByteField("txtString114", None),
+ ByteField("txtString115", None),
+ ByteField("txtString116", None),
+ ByteField("txtString117", None),
+ ByteField("txtString118", None),
+ ByteField("txtString119", None),
+ ByteField("txtString120", None),
+ ByteField("txtString121", None),
+ ByteField("txtString122", None),
+ ByteField("txtString123", None),
+ ByteField("txtString124", None),
+ ByteField("txtString125", None),
+ ByteField("txtString126", None),
+ ByteField("txtString127", None),
+ ByteField("txtString128", None),
+ ByteField("txtString129", None),
+ ByteField("txtString130", None),
+ ByteField("txtString131", None),
+ ByteField("txtString132", None),
+ ByteField("txtString133", None),
+ ByteField("txtString134", None),
+ ByteField("txtString135", None),
+ ByteField("txtString136", None),
+ ByteField("txtString137", None),
+ ByteField("txtString138", None),
+ ByteField("txtString139", None),
+ ByteField("txtString140", None),
+ ByteField("txtString141", None),
+ ByteField("txtString142", None),
+ ByteField("txtString143", None),
+ ByteField("txtString144", None),
+ ByteField("txtString145", None),
+ ByteField("txtString146", None),
+ ByteField("txtString147", None),
+ ByteField("txtString148", None),
+ ByteField("txtString149", None),
+ ByteField("txtString150", None),
+ ByteField("txtString151", None),
+ ByteField("txtString152", None),
+ ByteField("txtString153", None),
+ ByteField("txtString154", None),
+ ByteField("txtString155", None),
+ ByteField("txtString156", None),
+ ByteField("txtString157", None),
+ ByteField("txtString158", None),
+ ByteField("txtString159", None),
+ ByteField("txtString160", None),
+ ByteField("txtString161", None),
+ ByteField("txtString162", None),
+ ByteField("txtString163", None),
+ ByteField("txtString164", None),
+ ByteField("txtString165", None),
+ ByteField("txtString166", None),
+ ByteField("txtString167", None),
+ ByteField("txtString168", None),
+ ByteField("txtString169", None),
+ ByteField("txtString170", None),
+ ByteField("txtString171", None),
+ ByteField("txtString172", None),
+ ByteField("txtString173", None),
+ ByteField("txtString174", None),
+ ByteField("txtString175", None),
+ ByteField("txtString176", None),
+ ByteField("txtString177", None),
+ ByteField("txtString178", None),
+ ByteField("txtString179", None),
+ ByteField("txtString180", None),
+ ByteField("txtString181", None),
+ ByteField("txtString182", None),
+ ByteField("txtString183", None),
+ ByteField("txtString184", None),
+ ByteField("txtString185", None),
+ ByteField("txtString186", None),
+ ByteField("txtString187", None),
+ ByteField("txtString188", None),
+ ByteField("txtString189", None),
+ ByteField("txtString190", None),
+ ByteField("txtString191", None),
+ ByteField("txtString192", None),
+ ByteField("txtString193", None),
+ ByteField("txtString194", None),
+ ByteField("txtString195", None),
+ ByteField("txtString196", None),
+ ByteField("txtString197", None),
+ ByteField("txtString198", None),
+ ByteField("txtString199", None),
+ ByteField("txtString200", None),
+ ByteField("txtString201", None),
+ ByteField("txtString202", None),
+ ByteField("txtString203", None),
+ ByteField("txtString204", None),
+ ByteField("txtString205", None),
+ ByteField("txtString206", None),
+ ByteField("txtString207", None),
+ ByteField("txtString208", None),
+ ByteField("txtString209", None),
+ ByteField("txtString210", None),
+ ByteField("txtString211", None),
+ ByteField("txtString212", None),
+ ByteField("txtString213", None),
+ ByteField("txtString214", None),
+ ByteField("txtString215", None),
+ ByteField("txtString216", None),
+ ByteField("txtString217", None),
+ ByteField("txtString218", None),
+ ByteField("txtString219", None),
+ ByteField("txtString220", None),
+ ByteField("txtString221", None),
+ ByteField("txtString222", None),
+ ByteField("txtString223", None),
+ ByteField("txtString224", None),
+ ByteField("txtString225", None),
+ ByteField("txtString226", None),
+ ByteField("txtString227", None),
+ ByteField("txtString228", None),
+ ByteField("txtString229", None),
+ ByteField("txtString230", None),
+ ByteField("txtString231", None),
+ ByteField("txtString232", None),
+ ByteField("txtString233", None),
+ ByteField("txtString234", None),
+ ByteField("txtString235", None),
+ ByteField("txtString236", None),
+ ByteField("txtString237", None),
+ ByteField("txtString238", None),
+ ByteField("txtString239", None),
+ ByteField("txtString240", None),
+ ByteField("txtString241", None),
+ ByteField("txtString242", None),
+ ByteField("txtString243", None),
+ ByteField("txtString244", None),
+ ByteField("txtString245", None),
+ ByteField("txtString246", None),
+ ByteField("txtString247", None),
+ ByteField("txtString248", None)
+ ]
+
+ def post_build(self, p, pay):
+ aList = []
+ a = []
+ i = 0
+ for i in range(0, len(self.fields_desc)):
+ aList.append(self.fields_desc[i].name)
+ for i in aList:
+ a.append(getattr(self, i))
+ res = adapt(2, 250, a, self.fields_desc, 1)
+ if self.lengthNN is None:
+ p = struct.pack(">B", res[1]) + p[1:]
+ if res[0] is not 0:
+ p = p[:-res[0]]
+ return p + pay
+
+
+class TimeZone(Packet):
+ """Time Zone Section 10.5.3.8"""
+ name = "Time Zone"
+ fields_desc = [
+ ByteField("timeZone", 0x0),
+ ]
+
+
+class TimeZoneAndTime(Packet):
+ """Time Zone and Time Section 10.5.3.9"""
+ name = "Time Zone and Time"
+ fields_desc = [
+ ByteField("year", 0x0),
+ ByteField("month", 0x0),
+ ByteField("day", 0x0),
+ ByteField("hour", 0x0),
+ ByteField("minute", 0x0),
+ ByteField("second", 0x0),
+ ByteField("timeZone", 0x0)
+ ]
+
+
+class CtsPermission(Packet):
+ """CTS permission Section 10.5.3.10"""
+ name = "Cts Permission"
+ fields_desc = [
+ ]
+
+
+class LsaIdentifier(Packet):
+ """LSA Identifier Section 10.5.3.11"""
+ name = "Lsa Identifier"
+ fields_desc = [
+ ByteField("lsaID", 0x0),
+ ByteField("lsaID1", 0x0),
+ ByteField("lsaID2", 0x0)
+ ]
+
+
+#
+# 10.5.4 Call control information elements
+#
+
+#10.5.4.1 Extensions of codesets
+# This is only text and no packet
+
+class LockingShiftProcedure(Packet):
+ """Locking shift procedure Section 10.5.4.2"""
+ name = "Locking Shift Procedure"
+ fields_desc = [
+ BitField("lockShift", 0x0, 1),
+ BitField("codesetId", 0x0, 3)
+ ]
+
+
+class NonLockingShiftProcedure(Packet):
+ """Non-locking shift procedure Section 10.5.4.3"""
+ name = "Non-locking Shift Procedure"
+ fields_desc = [
+ BitField("nonLockShift", 0x1, 1),
+ BitField("codesetId", 0x0, 3)
+ ]
+
+
+class AuxiliaryStates(Packet):
+ """Auxiliary states Section 10.5.4.4"""
+ name = "Auxiliary States"
+ fields_desc = [
+ XByteField("lengthAS", 0x3),
+ BitField("ext", 0x1, 1),
+ BitField("spare", 0x0, 3),
+ BitField("holdState", 0x0, 2),
+ BitField("mptyState", 0x0, 2)
+ ]
+
+
+# len 3 to 15
+class BearerCapability(Packet):
+ """Bearer capability Section 10.5.4.5"""
+ name = "Bearer Capability"
+ fields_desc = [
+
+ XByteField("lengthBC", None),
+
+ BitField("ext0", 0x1, 1),
+ BitField("radioChReq", 0x1, 2),
+ BitField("codingStd", 0x0, 1),
+ BitField("transMode", 0x0, 1),
+ BitField("infoTransCa", 0x0, 3),
+ # optional
+ ConditionalField(BitField("ext1", 0x1, 1),
+ lambda pkt: pkt.ext0 == 0),
+ ConditionalField(BitField("coding", None, 1),
+ lambda pkt: pkt.ext0 == 0),
+ ConditionalField(BitField("spare", None, 2),
+ lambda pkt: pkt.ext0 == 0),
+ ConditionalField(BitField("speechVers", 0x0, 4),
+ lambda pkt: pkt.ext0 == 0),
+
+ ConditionalField(BitField("ext2", 0x1, 1),
+ lambda pkt: pkt.ext1 == 0),
+ ConditionalField(BitField("compress", None, 1),
+ lambda pkt: pkt.ext1 == 0),
+ ConditionalField(BitField("structure", None, 2),
+ lambda pkt: pkt.ext1 == 0),
+ ConditionalField(BitField("dupMode", None, 1),
+ lambda pkt: pkt.ext1 == 0),
+ ConditionalField(BitField("config", None, 1),
+ lambda pkt: pkt.ext1 == 0),
+ ConditionalField(BitField("nirr", None, 1),
+ lambda pkt: pkt.ext1 == 0),
+ ConditionalField(BitField("establi", 0x0, 1),
+ lambda pkt: pkt.ext1 == 0),
+
+ BitField("ext3", None, 1),
+ BitField("accessId", None, 2),
+ BitField("rateAda", None, 2),
+ BitField("signaling", None, 3),
+
+ ConditionalField(BitField("ext4", None, 1),
+ lambda pkt: pkt.ext3 == 0),
+ ConditionalField(BitField("otherITC", None, 2),
+ lambda pkt: pkt.ext3 == 0),
+ ConditionalField(BitField("otherRate", None, 2),
+ lambda pkt: pkt.ext3 == 0),
+ ConditionalField(BitField("spare1", 0x0, 3),
+ lambda pkt: pkt.ext3 == 0),
+
+ ConditionalField(BitField("ext5", 0x1, 1),
+ lambda pkt: pkt.ext4 == 0),
+ ConditionalField(BitField("hdr", None, 1),
+ lambda pkt: pkt.ext4 == 0),
+ ConditionalField(BitField("multiFr", None, 1),
+ lambda pkt: pkt.ext4 == 0),
+ ConditionalField(BitField("mode", None, 1),
+ lambda pkt: pkt.ext4 == 0),
+ ConditionalField(BitField("lli", None, 1),
+ lambda pkt: pkt.ext4 == 0),
+ ConditionalField(BitField("assig", None, 1),
+ lambda pkt: pkt.ext4 == 0),
+ ConditionalField(BitField("inbNeg", None, 1),
+ lambda pkt: pkt.ext4 == 0),
+ ConditionalField(BitField("spare2", 0x0, 1),
+ lambda pkt: pkt.ext4 == 0),
+
+ BitField("ext6", None, 1),
+ BitField("layer1Id", None, 2),
+ BitField("userInf", None, 4),
+ BitField("sync", None, 1),
+
+ ConditionalField(BitField("ext7", None, 1),
+ lambda pkt: pkt.ext6 == 0),
+ ConditionalField(BitField("stopBit", None, 1),
+ lambda pkt: pkt.ext6 == 0),
+ ConditionalField(BitField("negoc", None, 1),
+ lambda pkt: pkt.ext6 == 0),
+ ConditionalField(BitField("nbDataBit", None, 1),
+ lambda pkt: pkt.ext6 == 0),
+ ConditionalField(BitField("userRate", None, 4),
+ lambda pkt: pkt.ext6 == 0),
+
+ ConditionalField(BitField("ext8", None, 1),
+ lambda pkt: pkt.ext7 == 0),
+ ConditionalField(BitField("interRate", None, 2),
+ lambda pkt: pkt.ext7 == 0),
+ ConditionalField(BitField("nicTX", None, 1),
+ lambda pkt: pkt.ext7 == 0),
+ ConditionalField(BitField("nicRX", None, 1),
+ lambda pkt: pkt.ext7 == 0),
+ ConditionalField(BitField("parity", None, 3),
+ lambda pkt: pkt.ext7 == 0),
+
+ ConditionalField(BitField("ext9", None, 1),
+ lambda pkt: pkt.ext8 == 0),
+ ConditionalField(BitField("connEle", None, 2),
+ lambda pkt: pkt.ext8 == 0),
+ ConditionalField(BitField("modemType", None, 5),
+ lambda pkt: pkt.ext8 == 0),
+
+ ConditionalField(BitField("ext10", None, 1),
+ lambda pkt: pkt.ext9 == 0),
+ ConditionalField(BitField("otherModemType", None, 2),
+ lambda pkt: pkt.ext9 == 0),
+ ConditionalField(BitField("netUserRate", None, 5),
+ lambda pkt: pkt.ext9 == 0),
+
+ ConditionalField(BitField("ext11", None, 1),
+ lambda pkt: pkt.ext10 == 0),
+ ConditionalField(BitField("chanCoding", None, 4),
+ lambda pkt: pkt.ext10 == 0),
+ ConditionalField(BitField("maxTrafficChan", None, 3),
+ lambda pkt: pkt.ext10 == 0),
+
+ ConditionalField(BitField("ext12", None, 1),
+ lambda pkt: pkt.ext11 == 0),
+ ConditionalField(BitField("uimi", None, 3),
+ lambda pkt: pkt.ext11 == 0),
+ ConditionalField(BitField("airInterfaceUserRate", None, 4),
+ lambda pkt: pkt.ext11 == 0),
+
+ ConditionalField(BitField("ext13", 0x1, 1),
+ lambda pkt: pkt.ext12 == 0),
+ ConditionalField(BitField("layer2Ch", None, 2),
+ lambda pkt: pkt.ext12 == 0),
+ ConditionalField(BitField("userInfoL2", 0x0, 5),
+ lambda pkt: pkt.ext12 == 0)
+ ]
+
+ def post_build(self, p, pay):
+ aList = []
+ a = []
+ i = 0
+ for i in range(0, len(self.fields_desc)):
+ aList.append(self.fields_desc[i].name)
+ for i in aList:
+ a.append(getattr(self, i))
+ res = adapt(2, 15, a, self.fields_desc, 1)
+ if res[0] is not 0:
+ p = p[:-res[0]]
+ if self.lengthBC is None:
+ p = struct.pack(">B", len(p)-1) + p[1:]
+ return p + pay
+
+
+class CallControlCapabilities(Packet):
+ """Call Control Capabilities Section 10.5.4.5a"""
+ name = "Call Control Capabilities"
+ fields_desc = [
+ XByteField("lengthCCC", 0x3),
+ BitField("spare", 0x0, 6),
+ BitField("pcp", 0x0, 1),
+ BitField("dtmf", 0x0, 1)
+ ]
+
+
+class CallState(Packet):
+ """Call State Section 10.5.4.6"""
+ name = "Call State"
+ fields_desc = [
+ BitField("codingStd", 0x0, 2),
+ BitField("stateValue", 0x0, 6)
+ ]
+
+
+# len 3 to 43
+class CalledPartyBcdNumber(Packet):
+ """Called party BCD number Section 10.5.4.7"""
+ name = "Called Party BCD Number"
+ fields_desc = [
+ XByteField("lengthCPBN", None),
+ BitField("ext", 0x1, 1),
+ BitField("typeNb", 0x0, 3),
+ BitField("nbPlanId", 0x0, 4),
+ # optional
+ BitField("nbDigit2", None, 4),
+ BitField("nbDigit1", None, 4),
+ BitField("nbDigit4", None, 4),
+ BitField("nbDigit3", None, 4),
+
+ BitField("nbDigit6", None, 4),
+ BitField("nbDigit5", None, 4),
+ BitField("nbDigit8", None, 4),
+ BitField("nbDigit7", None, 4),
+
+ BitField("nbDigit10", None, 4),
+ BitField("nbDigit9", None, 4),
+ BitField("nbDigit12", None, 4),
+ BitField("nbDigit11", None, 4),
+
+ BitField("nbDigit14", None, 4),
+ BitField("nbDigit13", None, 4),
+ BitField("nbDigit16", None, 4),
+ BitField("nbDigit15", None, 4),
+
+ BitField("nbDigit18", None, 4),
+ BitField("nbDigit17", None, 4),
+ BitField("nbDigit20", None, 4),
+ BitField("nbDigit19", None, 4),
+
+ BitField("nbDigit22", None, 4),
+ BitField("nbDigit21", None, 4),
+ BitField("nbDigit24", None, 4),
+ BitField("nbDigit23", None, 4),
+
+ BitField("nbDigit26", None, 4),
+ BitField("nbDigit25", None, 4),
+ BitField("nbDigit28", None, 4),
+ BitField("nbDigit27", None, 4),
+
+ BitField("nbDigit30", None, 4),
+ BitField("nbDigit29", None, 4),
+ BitField("nbDigit32", None, 4),
+ BitField("nbDigit31", None, 4),
+
+ BitField("nbDigit34", None, 4),
+ BitField("nbDigit33", None, 4),
+ BitField("nbDigit36", None, 4),
+ BitField("nbDigit35", None, 4),
+
+ BitField("nbDigit38", None, 4),
+ BitField("nbDigit37", None, 4),
+ BitField("nbDigit40", None, 4),
+ BitField("nbDigit39", None, 4),
+# ^^^^^^ 20 first optional bytes ^^^^^^^^^^^^^^^
+ BitField("nbDigit42", None, 4),
+ BitField("nbDigit41", None, 4),
+ BitField("nbDigit44", None, 4),
+ BitField("nbDigit43", None, 4),
+
+ BitField("nbDigit46", None, 4),
+ BitField("nbDigit45", None, 4),
+ BitField("nbDigit48", None, 4),
+ BitField("nbDigit47", None, 4),
+
+ BitField("nbDigit50", None, 4),
+ BitField("nbDigit49", None, 4),
+ BitField("nbDigit52", None, 4),
+ BitField("nbDigit51", None, 4),
+
+ BitField("nbDigit54", None, 4),
+ BitField("nbDigit53", None, 4),
+ BitField("nbDigit56", None, 4),
+ BitField("nbDigit55", None, 4),
+
+ BitField("nbDigit58", None, 4),
+ BitField("nbDigit57", None, 4),
+ BitField("nbDigit60", None, 4),
+ BitField("nbDigit59", None, 4),
+
+ BitField("nbDigit62", None, 4),
+ BitField("nbDigit61", None, 4),
+ BitField("nbDigit64", None, 4),
+ BitField("nbDigit63", None, 4),
+
+ BitField("nbDigit66", None, 4),
+ BitField("nbDigit65", None, 4),
+ BitField("nbDigit68", None, 4),
+ BitField("nbDigit67", None, 4),
+
+ BitField("nbDigit70", None, 4),
+ BitField("nbDigit69", None, 4),
+ BitField("nbDigit72", None, 4),
+ BitField("nbDigit71", None, 4),
+
+ BitField("nbDigit74", None, 4),
+ BitField("nbDigit73", None, 4),
+ BitField("nbDigit76", None, 4),
+ BitField("nbDigit75", None, 4),
+
+ BitField("nbDigit78", None, 4),
+ BitField("nbDigit77", None, 4),
+ BitField("nbDigit80", None, 4),
+ BitField("nbDigit79", None, 4),
+ ]
+
+ def post_build(self, p, pay):
+ aList = []
+ a = []
+ i = 0
+ for i in range(0, len(self.fields_desc)):
+ aList.append(self.fields_desc[i].name)
+ for i in aList:
+ a.append(getattr(self, i))
+ res = adapt(2, 42, a, self.fields_desc, 1)
+ if self.lengthCPBN is None:
+ p = struct.pack(">B", res[1]) + p[1:]
+ if res[0] is not 0:
+ p = p[:-res[0]]
+ return p + pay
+
+
+# len 2 to 23
+class CalledPartySubaddress(Packet):
+ """Called party subaddress Section 10.5.4.8"""
+ name = "Called Party Subaddress"
+ fields_desc = [
+ XByteField("lengthCPS", None),
+ # optional
+ BitField("ext", None, 1),
+ BitField("subAddr", None, 3),
+ BitField("oddEven", None, 1),
+ BitField("spare", None, 3),
+
+ ByteField("subInfo0", None),
+ ByteField("subInfo1", None),
+ ByteField("subInfo2", None),
+ ByteField("subInfo3", None),
+ ByteField("subInfo4", None),
+ ByteField("subInfo5", None),
+ ByteField("subInfo6", None),
+ ByteField("subInfo7", None),
+ ByteField("subInfo8", None),
+ ByteField("subInfo9", None),
+ ByteField("subInfo10", None),
+ ByteField("subInfo11", None),
+ ByteField("subInfo12", None),
+ ByteField("subInfo13", None),
+ ByteField("subInfo14", None),
+ ByteField("subInfo15", None),
+ ByteField("subInfo16", None),
+ ByteField("subInfo17", None),
+ ByteField("subInfo18", None),
+ ByteField("subInfo19", None)
+ ]
+
+ def post_build(self, p, pay):
+ aList = []
+ a = []
+ i = 0
+ for i in range(0, len(self.fields_desc)):
+ aList.append(self.fields_desc[i].name)
+ for i in aList:
+ a.append(getattr(self, i))
+ res = adapt(2, 23, a, self.fields_desc, 1)
+ if self.lengthCPS is None:
+ p = struct.pack(">B", res[1]) + p[1:]
+ if res[0] is not 0:
+ p = p[:-res[0]]
+ return p + pay
+
+
+# len 3 to 14
+class CallingPartyBcdNumber(Packet):
+ """Called party subaddress Section 10.5.4.9"""
+ name = "Called Party Subaddress"
+ fields_desc = [
+ XByteField("lengthCPBN", None),
+ BitField("ext", 0x1, 1),
+ BitField("typeNb", 0x0, 3),
+ BitField("nbPlanId", 0x0, 4),
+ # optional
+ ConditionalField(BitField("ext1", 0x1, 1),
+ lambda pkt: pkt.ext == 0),
+ ConditionalField(BitField("presId", None, 2),
+ lambda pkt: pkt.ext == 0),
+ ConditionalField(BitField("spare", None, 3),
+ lambda pkt: pkt.ext == 0),
+ ConditionalField(BitField("screenId", 0x0, 2),
+ lambda pkt: pkt.ext == 0),
+
+ BitField("nbDigit2", None, 4),
+ BitField("nbDigit1", None, 4),
+
+ BitField("nbDigit4", None, 4),
+ BitField("nbDigit3", None, 4),
+
+ BitField("nbDigit6", None, 4),
+ BitField("nbDigit5", None, 4),
+
+ BitField("nbDigit8", None, 4),
+ BitField("nbDigit7", None, 4),
+
+ BitField("nbDigit10", None, 4),
+ BitField("nbDigit9", None, 4),
+
+ BitField("nbDigit12", None, 4),
+ BitField("nbDigit11", None, 4),
+
+ BitField("nbDigit14", None, 4),
+ BitField("nbDigit13", None, 4),
+
+ BitField("nbDigit16", None, 4),
+ BitField("nbDigit15", None, 4),
+
+ BitField("nbDigit18", None, 4),
+ BitField("nbDigit17", None, 4),
+
+ BitField("nbDigit20", None, 4),
+ BitField("nbDigit19", None, 4),
+ ]
+
+ def post_build(self, p, pay):
+ aList = []
+ a = []
+ i = 0
+ for i in range(0, len(self.fields_desc)):
+ aList.append(self.fields_desc[i].name)
+ for i in aList:
+ a.append(getattr(self, i))
+ res = adapt(2, 13, a, self.fields_desc, 1)
+ if res[0] is not 0:
+ p = p[:-res[0]]
+ if self.lengthCPBN is None:
+ p = struct.pack(">B", len(p)-1) + p[1:]
+ return p + pay
+
+
+# len 2 to 23
+class CallingPartySubaddress(Packet):
+ """Calling party subaddress Section 10.5.4.10"""
+ name = "Calling Party Subaddress"
+ fields_desc = [
+ XByteField("lengthCPS", None),
+ # optional
+ BitField("ext1", None, 1),
+ BitField("typeAddr", None, 3),
+ BitField("oddEven", None, 1),
+ BitField("spare", None, 3),
+
+ ByteField("subInfo0", None),
+ ByteField("subInfo1", None),
+ ByteField("subInfo2", None),
+ ByteField("subInfo3", None),
+ ByteField("subInfo4", None),
+ ByteField("subInfo5", None),
+ ByteField("subInfo6", None),
+ ByteField("subInfo7", None),
+ ByteField("subInfo8", None),
+ ByteField("subInfo9", None),
+ ByteField("subInfo10", None),
+ ByteField("subInfo11", None),
+ ByteField("subInfo12", None),
+ ByteField("subInfo13", None),
+ ByteField("subInfo14", None),
+ ByteField("subInfo15", None),
+ ByteField("subInfo16", None),
+ ByteField("subInfo17", None),
+ ByteField("subInfo18", None),
+ ByteField("subInfo19", None)
+ ]
+
+ def post_build(self, p, pay):
+ aList = []
+ a = []
+ i = 0
+ for i in range(0, len(self.fields_desc)):
+ aList.append(self.fields_desc[i].name)
+ for i in aList:
+ a.append(getattr(self, i))
+ res = adapt(1, 22, a, self.fields_desc, 1)
+ if self.lengthCPS is None:
+ p = struct.pack(">B", res[1]) + p[1:]
+ if res[0] is not 0:
+ p = p[:-res[0]]
+ return p + pay
+
+
+# len 4 to 32
+class Cause(Packet):
+ """Cause Section 10.5.4.11"""
+ name = "Cause"
+ fields_desc = [
+
+ XByteField("lengthC", None),
+
+ BitField("ext", 0x1, 1),
+ BitField("codingStd", 0x0, 2),
+ BitField("spare", 0x0, 1),
+ BitField("location", 0x0, 4),
+
+ ConditionalField(BitField("ext1", 0x1, 1),
+ lambda pkt: pkt.ext == 0),
+ ConditionalField(BitField("recommendation", 0x1, 7),
+ lambda pkt: pkt.ext == 0),
+ # optional
+ BitField("ext2", None, 1),
+ BitField("causeValue", None, 7),
+
+ ByteField("diagnositc0", None),
+ ByteField("diagnositc1", None),
+ ByteField("diagnositc2", None),
+ ByteField("diagnositc3", None),
+ ByteField("diagnositc4", None),
+ ByteField("diagnositc5", None),
+ ByteField("diagnositc6", None),
+ ByteField("diagnositc7", None),
+ ByteField("diagnositc8", None),
+ ByteField("diagnositc9", None),
+ ByteField("diagnositc10", None),
+ ByteField("diagnositc11", None),
+ ByteField("diagnositc12", None),
+ ByteField("diagnositc13", None),
+ ByteField("diagnositc14", None),
+ ByteField("diagnositc15", None),
+ ByteField("diagnositc16", None),
+ ByteField("diagnositc17", None),
+ ByteField("diagnositc18", None),
+ ByteField("diagnositc19", None),
+ ByteField("diagnositc20", None),
+ ByteField("diagnositc21", None),
+ ByteField("diagnositc22", None),
+ ByteField("diagnositc23", None),
+ ByteField("diagnositc24", None),
+ ByteField("diagnositc25", None),
+ ByteField("diagnositc26", None),
+ ]
+
+ def post_build(self, p, pay):
+ aList = []
+ a = []
+ i = 0
+ for i in range(0, len(self.fields_desc)):
+ aList.append(self.fields_desc[i].name)
+ for i in aList:
+ a.append(getattr(self, i))
+ res = adapt(3, 31, a, self.fields_desc, 1)
+ if res[0] is not 0:
+ p = p[:-res[0]]
+ if self.lengthC is None:
+ p = struct.pack(">B", len(p)-1) + p[1:]
+ return p + pay
+
+
+class ClirSuppression(Packet):
+ """CLIR suppression Section 10.5.4.11a"""
+ name = "Clir Suppression"
+ fields_desc = [
+ ]
+
+
+class ClirInvocation(Packet):
+ """CLIR invocation Section 10.5.4.11b"""
+ name = "Clir Invocation"
+ fields_desc = [
+ ]
+
+
+class CongestionLevel(Packet):
+ """Congestion level Section 10.5.4.12"""
+ name = "Congestion Level"
+ fields_desc = [
+ BitField("notDef", 0x0, 4) # not defined by the std
+ ]
+
+
+# len 3 to 14
+class ConnectedNumber(Packet):
+ """Connected number Section 10.5.4.13"""
+ name = "Connected Number"
+ fields_desc = [
+
+ XByteField("lengthCN", None),
+
+ BitField("ext", 0x1, 1),
+ BitField("typeNb", 0x0, 3),
+ BitField("typePlanId", 0x0, 4),
+ # optional
+ ConditionalField(BitField("ext1", 0x1, 1),
+ lambda pkt: pkt.ext == 0),
+ ConditionalField(BitField("presId", None, 2),
+ lambda pkt: pkt.ext == 0),
+ ConditionalField(BitField("spare", None, 3),
+ lambda pkt: pkt.ext == 0),
+ ConditionalField(BitField("screenId", None, 2),
+ lambda pkt: pkt.ext == 0),
+
+ BitField("nbDigit2", None, 4),
+ BitField("nbDigit1", None, 4),
+
+ BitField("nbDigit4", None, 4),
+ BitField("nbDigit3", None, 4),
+
+ BitField("nbDigit6", None, 4),
+ BitField("nbDigit5", None, 4),
+
+ BitField("nbDigit8", None, 4),
+ BitField("nbDigit7", None, 4),
+
+ BitField("nbDigit10", None, 4),
+ BitField("nbDigit9", None, 4),
+
+ BitField("nbDigit12", None, 4),
+ BitField("nbDigit11", None, 4),
+
+ BitField("nbDigit14", None, 4),
+ BitField("nbDigit13", None, 4),
+
+ BitField("nbDigit16", None, 4),
+ BitField("nbDigit15", None, 4),
+
+ BitField("nbDigit18", None, 4),
+ BitField("nbDigit17", None, 4),
+
+ BitField("nbDigit20", None, 4),
+ BitField("nbDigit19", None, 4)
+ ]
+
+ def post_build(self, p, pay):
+ aList = []
+ a = []
+ i = 0
+ for i in range(0, len(self.fields_desc)):
+ aList.append(self.fields_desc[i].name)
+ for i in aList:
+ a.append(getattr(self, i))
+ res = adapt(2, 13, a, self.fields_desc, 1)
+ if res[0] is not 0:
+ p = p[:-res[0]]
+ if self.lengthCN is None:
+ p = struct.pack(">B", len(p)-1) + p[1:]
+ return p + pay
+
+
+# len 2 to 23
+class ConnectedSubaddress(Packet):
+ """Connected subaddress Section 10.5.4.14"""
+ name = "Connected Subaddress"
+ fields_desc = [
+
+ XByteField("lengthCS", None),
+ # optional
+ BitField("ext", None, 1),
+ BitField("typeOfSub", None, 3),
+ BitField("oddEven", None, 1),
+ BitField("spare", None, 3),
+
+ ByteField("subInfo0", None),
+ ByteField("subInfo1", None),
+ ByteField("subInfo2", None),
+ ByteField("subInfo3", None),
+ ByteField("subInfo4", None),
+ ByteField("subInfo5", None),
+ ByteField("subInfo6", None),
+ ByteField("subInfo7", None),
+ ByteField("subInfo8", None),
+ ByteField("subInfo9", None),
+ ByteField("subInfo10", None),
+ ByteField("subInfo11", None),
+ ByteField("subInfo12", None),
+ ByteField("subInfo13", None),
+ ByteField("subInfo14", None),
+ ByteField("subInfo15", None),
+ ByteField("subInfo16", None),
+ ByteField("subInfo17", None),
+ ByteField("subInfo18", None),
+ ByteField("subInfo19", None)
+ ]
+
+ def post_build(self, p, pay):
+ aList = []
+ a = []
+ i = 0
+ for i in range(0, len(self.fields_desc)):
+ aList.append(self.fields_desc[i].name)
+ for i in aList:
+ a.append(getattr(self, i))
+ res = adapt(1, 22, a, self.fields_desc, 1)
+ if self.lengthCS is None:
+ p = struct.pack(">B", res[1]) + p[1:]
+ if res[0] is not 0:
+ p = p[:-res[0]]
+ return p + pay
+
+
+# len 2 to L3 (251) (done)
+class Facility(Packet):
+ """Facility Section 10.5.4.15"""
+ name = "Facility"
+ fields_desc = [
+ XByteField("lengthF", None),
+ # optional
+ ByteField("facilityInfo1", None),
+ ByteField("facilityInfo2", None),
+ ByteField("facilityInfo3", None),
+ ByteField("facilityInfo4", None),
+ ByteField("facilityInfo5", None),
+ ByteField("facilityInfo6", None),
+ ByteField("facilityInfo7", None),
+ ByteField("facilityInfo8", None),
+ ByteField("facilityInfo9", None),
+ ByteField("facilityInfo10", None),
+ ByteField("facilityInfo11", None),
+ ByteField("facilityInfo12", None),
+ ByteField("facilityInfo13", None),
+ ByteField("facilityInfo14", None),
+ ByteField("facilityInfo15", None),
+ ByteField("facilityInfo16", None),
+ ByteField("facilityInfo17", None),
+ ByteField("facilityInfo18", None),
+ ByteField("facilityInfo19", None),
+ ByteField("facilityInfo20", None),
+ ByteField("facilityInfo21", None),
+ ByteField("facilityInfo22", None),
+ ByteField("facilityInfo23", None),
+ ByteField("facilityInfo24", None),
+ ByteField("facilityInfo25", None),
+ ByteField("facilityInfo26", None),
+ ByteField("facilityInfo27", None),
+ ByteField("facilityInfo28", None),
+ ByteField("facilityInfo29", None),
+ ByteField("facilityInfo30", None),
+ ByteField("facilityInfo31", None),
+ ByteField("facilityInfo32", None),
+ ByteField("facilityInfo33", None),
+ ByteField("facilityInfo34", None),
+ ByteField("facilityInfo35", None),
+ ByteField("facilityInfo36", None),
+ ByteField("facilityInfo37", None),
+ ByteField("facilityInfo38", None),
+ ByteField("facilityInfo39", None),
+ ByteField("facilityInfo40", None),
+ ByteField("facilityInfo41", None),
+ ByteField("facilityInfo42", None),
+ ByteField("facilityInfo43", None),
+ ByteField("facilityInfo44", None),
+ ByteField("facilityInfo45", None),
+ ByteField("facilityInfo46", None),
+ ByteField("facilityInfo47", None),
+ ByteField("facilityInfo48", None),
+ ByteField("facilityInfo49", None),
+ ByteField("facilityInfo50", None),
+ ByteField("facilityInfo51", None),
+ ByteField("facilityInfo52", None),
+ ByteField("facilityInfo53", None),
+ ByteField("facilityInfo54", None),
+ ByteField("facilityInfo55", None),
+ ByteField("facilityInfo56", None),
+ ByteField("facilityInfo57", None),
+ ByteField("facilityInfo58", None),
+ ByteField("facilityInfo59", None),
+ ByteField("facilityInfo60", None),
+ ByteField("facilityInfo61", None),
+ ByteField("facilityInfo62", None),
+ ByteField("facilityInfo63", None),
+ ByteField("facilityInfo64", None),
+ ByteField("facilityInfo65", None),
+ ByteField("facilityInfo66", None),
+ ByteField("facilityInfo67", None),
+ ByteField("facilityInfo68", None),
+ ByteField("facilityInfo69", None),
+ ByteField("facilityInfo70", None),
+ ByteField("facilityInfo71", None),
+ ByteField("facilityInfo72", None),
+ ByteField("facilityInfo73", None),
+ ByteField("facilityInfo74", None),
+ ByteField("facilityInfo75", None),
+ ByteField("facilityInfo76", None),
+ ByteField("facilityInfo77", None),
+ ByteField("facilityInfo78", None),
+ ByteField("facilityInfo79", None),
+ ByteField("facilityInfo80", None),
+ ByteField("facilityInfo81", None),
+ ByteField("facilityInfo82", None),
+ ByteField("facilityInfo83", None),
+ ByteField("facilityInfo84", None),
+ ByteField("facilityInfo85", None),
+ ByteField("facilityInfo86", None),
+ ByteField("facilityInfo87", None),
+ ByteField("facilityInfo88", None),
+ ByteField("facilityInfo89", None),
+ ByteField("facilityInfo90", None),
+ ByteField("facilityInfo91", None),
+ ByteField("facilityInfo92", None),
+ ByteField("facilityInfo93", None),
+ ByteField("facilityInfo94", None),
+ ByteField("facilityInfo95", None),
+ ByteField("facilityInfo96", None),
+ ByteField("facilityInfo97", None),
+ ByteField("facilityInfo98", None),
+ ByteField("facilityInfo99", None),
+ ByteField("facilityInfo100", None),
+ ByteField("facilityInfo101", None),
+ ByteField("facilityInfo102", None),
+ ByteField("facilityInfo103", None),
+ ByteField("facilityInfo104", None),
+ ByteField("facilityInfo105", None),
+ ByteField("facilityInfo106", None),
+ ByteField("facilityInfo107", None),
+ ByteField("facilityInfo108", None),
+ ByteField("facilityInfo109", None),
+ ByteField("facilityInfo110", None),
+ ByteField("facilityInfo111", None),
+ ByteField("facilityInfo112", None),
+ ByteField("facilityInfo113", None),
+ ByteField("facilityInfo114", None),
+ ByteField("facilityInfo115", None),
+ ByteField("facilityInfo116", None),
+ ByteField("facilityInfo117", None),
+ ByteField("facilityInfo118", None),
+ ByteField("facilityInfo119", None),
+ ByteField("facilityInfo120", None),
+ ByteField("facilityInfo121", None),
+ ByteField("facilityInfo122", None),
+ ByteField("facilityInfo123", None),
+ ByteField("facilityInfo124", None),
+ ByteField("facilityInfo125", None),
+ ByteField("facilityInfo126", None),
+ ByteField("facilityInfo127", None),
+ ByteField("facilityInfo128", None),
+ ByteField("facilityInfo129", None),
+ ByteField("facilityInfo130", None),
+ ByteField("facilityInfo131", None),
+ ByteField("facilityInfo132", None),
+ ByteField("facilityInfo133", None),
+ ByteField("facilityInfo134", None),
+ ByteField("facilityInfo135", None),
+ ByteField("facilityInfo136", None),
+ ByteField("facilityInfo137", None),
+ ByteField("facilityInfo138", None),
+ ByteField("facilityInfo139", None),
+ ByteField("facilityInfo140", None),
+ ByteField("facilityInfo141", None),
+ ByteField("facilityInfo142", None),
+ ByteField("facilityInfo143", None),
+ ByteField("facilityInfo144", None),
+ ByteField("facilityInfo145", None),
+ ByteField("facilityInfo146", None),
+ ByteField("facilityInfo147", None),
+ ByteField("facilityInfo148", None),
+ ByteField("facilityInfo149", None),
+ ByteField("facilityInfo150", None),
+ ByteField("facilityInfo151", None),
+ ByteField("facilityInfo152", None),
+ ByteField("facilityInfo153", None),
+ ByteField("facilityInfo154", None),
+ ByteField("facilityInfo155", None),
+ ByteField("facilityInfo156", None),
+ ByteField("facilityInfo157", None),
+ ByteField("facilityInfo158", None),
+ ByteField("facilityInfo159", None),
+ ByteField("facilityInfo160", None),
+ ByteField("facilityInfo161", None),
+ ByteField("facilityInfo162", None),
+ ByteField("facilityInfo163", None),
+ ByteField("facilityInfo164", None),
+ ByteField("facilityInfo165", None),
+ ByteField("facilityInfo166", None),
+ ByteField("facilityInfo167", None),
+ ByteField("facilityInfo168", None),
+ ByteField("facilityInfo169", None),
+ ByteField("facilityInfo170", None),
+ ByteField("facilityInfo171", None),
+ ByteField("facilityInfo172", None),
+ ByteField("facilityInfo173", None),
+ ByteField("facilityInfo174", None),
+ ByteField("facilityInfo175", None),
+ ByteField("facilityInfo176", None),
+ ByteField("facilityInfo177", None),
+ ByteField("facilityInfo178", None),
+ ByteField("facilityInfo179", None),
+ ByteField("facilityInfo180", None),
+ ByteField("facilityInfo181", None),
+ ByteField("facilityInfo182", None),
+ ByteField("facilityInfo183", None),
+ ByteField("facilityInfo184", None),
+ ByteField("facilityInfo185", None),
+ ByteField("facilityInfo186", None),
+ ByteField("facilityInfo187", None),
+ ByteField("facilityInfo188", None),
+ ByteField("facilityInfo189", None),
+ ByteField("facilityInfo190", None),
+ ByteField("facilityInfo191", None),
+ ByteField("facilityInfo192", None),
+ ByteField("facilityInfo193", None),
+ ByteField("facilityInfo194", None),
+ ByteField("facilityInfo195", None),
+ ByteField("facilityInfo196", None),
+ ByteField("facilityInfo197", None),
+ ByteField("facilityInfo198", None),
+ ByteField("facilityInfo199", None),
+ ByteField("facilityInfo200", None),
+ ByteField("facilityInfo201", None),
+ ByteField("facilityInfo202", None),
+ ByteField("facilityInfo203", None),
+ ByteField("facilityInfo204", None),
+ ByteField("facilityInfo205", None),
+ ByteField("facilityInfo206", None),
+ ByteField("facilityInfo207", None),
+ ByteField("facilityInfo208", None),
+ ByteField("facilityInfo209", None),
+ ByteField("facilityInfo210", None),
+ ByteField("facilityInfo211", None),
+ ByteField("facilityInfo212", None),
+ ByteField("facilityInfo213", None),
+ ByteField("facilityInfo214", None),
+ ByteField("facilityInfo215", None),
+ ByteField("facilityInfo216", None),
+ ByteField("facilityInfo217", None),
+ ByteField("facilityInfo218", None),
+ ByteField("facilityInfo219", None),
+ ByteField("facilityInfo220", None),
+ ByteField("facilityInfo221", None),
+ ByteField("facilityInfo222", None),
+ ByteField("facilityInfo223", None),
+ ByteField("facilityInfo224", None),
+ ByteField("facilityInfo225", None),
+ ByteField("facilityInfo226", None),
+ ByteField("facilityInfo227", None),
+ ByteField("facilityInfo228", None),
+ ByteField("facilityInfo229", None),
+ ByteField("facilityInfo230", None),
+ ByteField("facilityInfo231", None),
+ ByteField("facilityInfo232", None),
+ ByteField("facilityInfo233", None),
+ ByteField("facilityInfo234", None),
+ ByteField("facilityInfo235", None),
+ ByteField("facilityInfo236", None),
+ ByteField("facilityInfo237", None),
+ ByteField("facilityInfo238", None),
+ ByteField("facilityInfo239", None),
+ ByteField("facilityInfo240", None),
+ ByteField("facilityInfo241", None),
+ ByteField("facilityInfo242", None),
+ ByteField("facilityInfo243", None),
+ ByteField("facilityInfo244", None),
+ ByteField("facilityInfo245", None),
+ ByteField("facilityInfo246", None),
+ ByteField("facilityInfo247", None),
+ ByteField("facilityInfo248", None),
+ ByteField("facilityInfo249", None)
+ ]
+
+ def post_build(self, p, pay):
+ aList = []
+ i = 0
+ for i in range(0, len(self.fields_desc)):
+ aList.append(self.fields_desc[i].name)
+ a = []
+ for i in aList:
+ a.append(getattr(self, i))
+ res = adapt(7, 250, a, self.fields_desc, 1)
+ if self.lengthF is None:
+ p = struct.pack(">B", res[1]) + p[1:]
+ if res[0] is not 0:
+ p = p[:-res[0]]
+ return p + pay
+
+
+#len 2 to 5
+class HighLayerCompatibility(Packet):
+ """High layer compatibility Section 10.5.4.16"""
+ name = "High Layer Compatibility"
+ fields_desc = [
+
+ XByteField("lengthHLC", None),
+ # optional
+ BitField("ext", None, 1),
+ BitField("codingStd", None, 2),
+ BitField("interpret", None, 3),
+ BitField("presMeth", None, 2),
+
+ BitField("ext1", None, 1),
+ BitField("highLayerId", None, 7),
+
+ ConditionalField(BitField("ext2", 0x1, 1),
+ lambda pkt: pkt.ext1 == 0),
+ ConditionalField(BitField("exHiLayerId", 0x0, 7),
+ lambda pkt: pkt.ext1 == 0),
+ ]
+
+ def post_build(self, p, pay):
+ aList = []
+ i = 0
+ for i in range(0, len(self.fields_desc)):
+ aList.append(self.fields_desc[i].name)
+ a = []
+ for i in aList:
+ a.append(getattr(self, i))
+ res = adapt(1, 4, a, self.fields_desc, 1)
+ if res[0] is not 0:
+ p = p[:-res[0]]
+ if self.lengthHLC is None:
+ p = struct.pack(">B", len(p)-1) + p[1:]
+ return p + pay
+#
+# 10.5.4.16.1 Static conditions for the high layer
+# compatibility IE contents
+#
+
+
+class KeypadFacility(Packet):
+ """Keypad facility Section 10.5.4.17"""
+ name = "Keypad Facility"
+ fields_desc = [
+ BitField("spare", 0x0, 1),
+ BitField("keyPadInfo", 0x0, 7)
+ ]
+
+
+# len 2 to 15
+class LowLayerCompatibility(Packet):
+ """Low layer compatibility Section 10.5.4.18"""
+ name = "Low Layer Compatibility"
+ fields_desc = [
+
+ XByteField("lengthLLC", None),
+ # optional
+ ByteField("rest0", None),
+ ByteField("rest1", None),
+ ByteField("rest2", None),
+ ByteField("rest3", None),
+ ByteField("rest4", None),
+ ByteField("rest5", None),
+ ByteField("rest6", None),
+ ByteField("rest7", None),
+ ByteField("rest8", None),
+ ByteField("rest9", None),
+ ByteField("rest10", None),
+ ByteField("rest11", None),
+ ByteField("rest12", None)
+ ]
+
+ def post_build(self, p, pay):
+ aList = []
+ i = 0
+ for i in range(0, len(self.fields_desc)):
+ aList.append(self.fields_desc[i].name)
+ a = []
+ for i in aList:
+ a.append(getattr(self, i))
+ res = adapt(1, 14, a, self.fields_desc, 1)
+ if self.lengthLLC is None:
+ p = struct.pack(">B", res[1]) + p[1:]
+ if res[0] is not 0:
+ p = p[:-res[0]]
+ return p + pay
+
+
+class MoreData(Packet):
+ """More data Section 10.5.4.19"""
+ name = "More Data"
+ fields_desc = [
+ ]
+
+
+class NotificationIndicator(Packet):
+ """Notification indicator Section 10.5.4.20"""
+ name = "Notification Indicator"
+ fields_desc = [
+ BitField("ext1", 0x1, 1),
+ BitField("notifDesc", 0x0, 7)
+ ]
+
+
+class ProgressIndicator(Packet):
+ """Progress indicator Section 10.5.4.21"""
+ name = "Progress Indicator"
+ fields_desc = [
+ XByteField("lengthPI", 0x2),
+ BitField("ext", 0x1, 1),
+ BitField("codingStd", 0x0, 2),
+ BitField("spare", 0x0, 1),
+ BitField("location", 0x0, 4),
+ BitField("ext1", 0x1, 1),
+ BitField("progressDesc", 0x0, 7)
+ ]
+
+
+class RecallType(Packet):
+ """Recall type $(CCBS)$ Section 10.5.4.21a"""
+ name = "Recall Type $(CCBS)$"
+ fields_desc = [
+ BitField("spare", 0x0, 5),
+ BitField("recallType", 0x0, 3)
+ ]
+
+
+# len 3 to 19
+class RedirectingPartyBcdNumber(Packet):
+ """Redirecting party BCD number Section 10.5.4.21b"""
+ name = "Redirecting Party BCD Number"
+ fields_desc = [
+
+ XByteField("lengthRPBN", None),
+
+ BitField("ext", 0x1, 1),
+ BitField("typeNb", 0x0, 3),
+ BitField("numberingPlan", 0x0, 4),
+ # optional
+ ConditionalField(BitField("ext1", 0x1, 1),
+ lambda pkt: pkt.ext == 0),
+ ConditionalField(BitField("presId", 0x0, 2),
+ lambda pkt: pkt.ext == 0),
+ ConditionalField(BitField("spare", 0x0, 3),
+ lambda pkt: pkt.ext == 0),
+ ConditionalField(BitField("screenId", 0x0, 2),
+ lambda pkt: pkt.ext == 0),
+
+ BitField("nbDigit2", None, 4),
+ BitField("nbDigit1", None, 4),
+
+ BitField("nbDigit4", None, 4),
+ BitField("nbDigit3", None, 4),
+
+ BitField("nbDigit6", None, 4),
+ BitField("nbDigit5", None, 4),
+
+ BitField("nbDigit8", None, 4),
+ BitField("nbDigit7", None, 4),
+
+ BitField("nbDigit10", None, 4),
+ BitField("nbDigit9", None, 4),
+
+ BitField("nbDigit12", None, 4),
+ BitField("nbDigit11", None, 4),
+
+ BitField("nbDigit14", None, 4),
+ BitField("nbDigit13", None, 4),
+
+ BitField("nbDigit16", None, 4),
+ BitField("nbDigit15", None, 4),
+
+ BitField("nbDigit18", None, 4),
+ BitField("nbDigit17", None, 4),
+
+ BitField("nbDigit20", None, 4),
+ BitField("nbDigit19", None, 4),
+
+ BitField("nbDigit22", None, 4),
+ BitField("nbDigit21", None, 4),
+
+ BitField("nbDigit24", None, 4),
+ BitField("nbDigit23", None, 4),
+
+ BitField("nbDigit26", None, 4),
+ BitField("nbDigit25", None, 4),
+
+ BitField("nbDigit28", None, 4),
+ BitField("nbDigit27", None, 4),
+
+ BitField("nbDigit30", None, 4),
+ BitField("nbDigit29", None, 4),
+ ]
+
+ def post_build(self, p, pay):
+ aList = []
+ i = 0
+ for i in range(0, len(self.fields_desc)):
+ aList.append(self.fields_desc[i].name)
+ a = []
+ for i in aList:
+ a.append(getattr(self, i))
+ res = adapt(2, 18, a, self.fields_desc, 1)
+ if res[0] is not 0:
+ p = p[:-res[0]]
+ if self.lengthRPBN is None:
+ p = struct.pack(">B", len(p)-1) + p[1:]
+ return p + pay
+
+
+# length 2 to 23
+class RedirectingPartySubaddress(Packet):
+ """Redirecting party subaddress Section 10.5.4.21c"""
+ name = "Redirecting Party BCD Number"
+ fields_desc = [
+
+ XByteField("lengthRPS", None),
+ # optional
+ BitField("ext", None, 1),
+ BitField("typeSub", None, 3),
+ BitField("oddEven", None, 1),
+ BitField("spare", None, 3),
+
+ ByteField("subInfo0", None),
+ ByteField("subInfo1", None),
+ ByteField("subInfo2", None),
+ ByteField("subInfo3", None),
+ ByteField("subInfo4", None),
+ ByteField("subInfo5", None),
+ ByteField("subInfo6", None),
+ ByteField("subInfo7", None),
+ ByteField("subInfo8", None),
+ ByteField("subInfo9", None),
+ ByteField("subInfo10", None),
+ ByteField("subInfo11", None),
+ ByteField("subInfo12", None),
+ ByteField("subInfo13", None),
+ ByteField("subInfo14", None),
+ ByteField("subInfo15", None),
+ ByteField("subInfo16", None),
+ ByteField("subInfo17", None),
+ ByteField("subInfo18", None),
+ ByteField("subInfo19", None)
+ ]
+
+ def post_build(self, p, pay):
+ aList = []
+ i = 0
+ for i in range(0, len(self.fields_desc)):
+ aList.append(self.fields_desc[i].name)
+ a = []
+ for i in aList:
+ a.append(getattr(self, i))
+ res = adapt(1, 22, a, self.fields_desc, 1)
+ if self.lengthRPS is None:
+ p = struct.pack(">B", res[1]) + p[1:]
+ if res[0] is not 0:
+ p = p[:-res[0]]
+ return p + pay
+
+
+class RepeatIndicator(Packet):
+ """Repeat indicator Section 10.5.4.22"""
+ name = "Repeat Indicator"
+ fields_desc = [
+ BitField("repeatIndic", 0x0, 4)
+ ]
+
+
+# no upper length min 2(max for L3) (251)
+class SetupContainer(Packet):
+ """SETUP Container $(CCBS)$ Section 10.5.4.22b"""
+ name = "Setup Container $(CCBS)$"
+ fields_desc = [
+ XByteField("lengthSC", None),
+ # optional
+ ByteField("mess1", None),
+ ByteField("mess2", None),
+ ByteField("mess3", None),
+ ByteField("mess4", None),
+ ByteField("mess5", None),
+ ByteField("mess6", None),
+ ByteField("mess7", None),
+ ByteField("mess8", None),
+ ByteField("mess9", None),
+ ByteField("mess10", None),
+ ByteField("mess11", None),
+ ByteField("mess12", None),
+ ByteField("mess13", None),
+ ByteField("mess14", None),
+ ByteField("mess15", None),
+ ByteField("mess16", None),
+ ByteField("mess17", None),
+ ByteField("mess18", None),
+ ByteField("mess19", None),
+ ByteField("mess20", None),
+ ByteField("mess21", None),
+ ByteField("mess22", None),
+ ByteField("mess23", None),
+ ByteField("mess24", None),
+ ByteField("mess25", None),
+ ByteField("mess26", None),
+ ByteField("mess27", None),
+ ByteField("mess28", None),
+ ByteField("mess29", None),
+ ByteField("mess30", None),
+ ByteField("mess31", None),
+ ByteField("mess32", None),
+ ByteField("mess33", None),
+ ByteField("mess34", None),
+ ByteField("mess35", None),
+ ByteField("mess36", None),
+ ByteField("mess37", None),
+ ByteField("mess38", None),
+ ByteField("mess39", None),
+ ByteField("mess40", None),
+ ByteField("mess41", None),
+ ByteField("mess42", None),
+ ByteField("mess43", None),
+ ByteField("mess44", None),
+ ByteField("mess45", None),
+ ByteField("mess46", None),
+ ByteField("mess47", None),
+ ByteField("mess48", None),
+ ByteField("mess49", None),
+ ByteField("mess50", None),
+ ByteField("mess51", None),
+ ByteField("mess52", None),
+ ByteField("mess53", None),
+ ByteField("mess54", None),
+ ByteField("mess55", None),
+ ByteField("mess56", None),
+ ByteField("mess57", None),
+ ByteField("mess58", None),
+ ByteField("mess59", None),
+ ByteField("mess60", None),
+ ByteField("mess61", None),
+ ByteField("mess62", None),
+ ByteField("mess63", None),
+ ByteField("mess64", None),
+ ByteField("mess65", None),
+ ByteField("mess66", None),
+ ByteField("mess67", None),
+ ByteField("mess68", None),
+ ByteField("mess69", None),
+ ByteField("mess70", None),
+ ByteField("mess71", None),
+ ByteField("mess72", None),
+ ByteField("mess73", None),
+ ByteField("mess74", None),
+ ByteField("mess75", None),
+ ByteField("mess76", None),
+ ByteField("mess77", None),
+ ByteField("mess78", None),
+ ByteField("mess79", None),
+ ByteField("mess80", None),
+ ByteField("mess81", None),
+ ByteField("mess82", None),
+ ByteField("mess83", None),
+ ByteField("mess84", None),
+ ByteField("mess85", None),
+ ByteField("mess86", None),
+ ByteField("mess87", None),
+ ByteField("mess88", None),
+ ByteField("mess89", None),
+ ByteField("mess90", None),
+ ByteField("mess91", None),
+ ByteField("mess92", None),
+ ByteField("mess93", None),
+ ByteField("mess94", None),
+ ByteField("mess95", None),
+ ByteField("mess96", None),
+ ByteField("mess97", None),
+ ByteField("mess98", None),
+ ByteField("mess99", None),
+ ByteField("mess100", None),
+ ByteField("mess101", None),
+ ByteField("mess102", None),
+ ByteField("mess103", None),
+ ByteField("mess104", None),
+ ByteField("mess105", None),
+ ByteField("mess106", None),
+ ByteField("mess107", None),
+ ByteField("mess108", None),
+ ByteField("mess109", None),
+ ByteField("mess110", None),
+ ByteField("mess111", None),
+ ByteField("mess112", None),
+ ByteField("mess113", None),
+ ByteField("mess114", None),
+ ByteField("mess115", None),
+ ByteField("mess116", None),
+ ByteField("mess117", None),
+ ByteField("mess118", None),
+ ByteField("mess119", None),
+ ByteField("mess120", None),
+ ByteField("mess121", None),
+ ByteField("mess122", None),
+ ByteField("mess123", None),
+ ByteField("mess124", None),
+ ByteField("mess125", None),
+ ByteField("mess126", None),
+ ByteField("mess127", None),
+ ByteField("mess128", None),
+ ByteField("mess129", None),
+ ByteField("mess130", None),
+ ByteField("mess131", None),
+ ByteField("mess132", None),
+ ByteField("mess133", None),
+ ByteField("mess134", None),
+ ByteField("mess135", None),
+ ByteField("mess136", None),
+ ByteField("mess137", None),
+ ByteField("mess138", None),
+ ByteField("mess139", None),
+ ByteField("mess140", None),
+ ByteField("mess141", None),
+ ByteField("mess142", None),
+ ByteField("mess143", None),
+ ByteField("mess144", None),
+ ByteField("mess145", None),
+ ByteField("mess146", None),
+ ByteField("mess147", None),
+ ByteField("mess148", None),
+ ByteField("mess149", None),
+ ByteField("mess150", None),
+ ByteField("mess151", None),
+ ByteField("mess152", None),
+ ByteField("mess153", None),
+ ByteField("mess154", None),
+ ByteField("mess155", None),
+ ByteField("mess156", None),
+ ByteField("mess157", None),
+ ByteField("mess158", None),
+ ByteField("mess159", None),
+ ByteField("mess160", None),
+ ByteField("mess161", None),
+ ByteField("mess162", None),
+ ByteField("mess163", None),
+ ByteField("mess164", None),
+ ByteField("mess165", None),
+ ByteField("mess166", None),
+ ByteField("mess167", None),
+ ByteField("mess168", None),
+ ByteField("mess169", None),
+ ByteField("mess170", None),
+ ByteField("mess171", None),
+ ByteField("mess172", None),
+ ByteField("mess173", None),
+ ByteField("mess174", None),
+ ByteField("mess175", None),
+ ByteField("mess176", None),
+ ByteField("mess177", None),
+ ByteField("mess178", None),
+ ByteField("mess179", None),
+ ByteField("mess180", None),
+ ByteField("mess181", None),
+ ByteField("mess182", None),
+ ByteField("mess183", None),
+ ByteField("mess184", None),
+ ByteField("mess185", None),
+ ByteField("mess186", None),
+ ByteField("mess187", None),
+ ByteField("mess188", None),
+ ByteField("mess189", None),
+ ByteField("mess190", None),
+ ByteField("mess191", None),
+ ByteField("mess192", None),
+ ByteField("mess193", None),
+ ByteField("mess194", None),
+ ByteField("mess195", None),
+ ByteField("mess196", None),
+ ByteField("mess197", None),
+ ByteField("mess198", None),
+ ByteField("mess199", None),
+ ByteField("mess200", None),
+ ByteField("mess201", None),
+ ByteField("mess202", None),
+ ByteField("mess203", None),
+ ByteField("mess204", None),
+ ByteField("mess205", None),
+ ByteField("mess206", None),
+ ByteField("mess207", None),
+ ByteField("mess208", None),
+ ByteField("mess209", None),
+ ByteField("mess210", None),
+ ByteField("mess211", None),
+ ByteField("mess212", None),
+ ByteField("mess213", None),
+ ByteField("mess214", None),
+ ByteField("mess215", None),
+ ByteField("mess216", None),
+ ByteField("mess217", None),
+ ByteField("mess218", None),
+ ByteField("mess219", None),
+ ByteField("mess220", None),
+ ByteField("mess221", None),
+ ByteField("mess222", None),
+ ByteField("mess223", None),
+ ByteField("mess224", None),
+ ByteField("mess225", None),
+ ByteField("mess226", None),
+ ByteField("mess227", None),
+ ByteField("mess228", None),
+ ByteField("mess229", None),
+ ByteField("mess230", None),
+ ByteField("mess231", None),
+ ByteField("mess232", None),
+ ByteField("mess233", None),
+ ByteField("mess234", None),
+ ByteField("mess235", None),
+ ByteField("mess236", None),
+ ByteField("mess237", None),
+ ByteField("mess238", None),
+ ByteField("mess239", None),
+ ByteField("mess240", None),
+ ByteField("mess241", None),
+ ByteField("mess242", None),
+ ByteField("mess243", None),
+ ByteField("mess244", None),
+ ByteField("mess245", None),
+ ByteField("mess246", None),
+ ByteField("mess247", None),
+ ByteField("mess248", None),
+ ByteField("mess249", None),
+ ]
+
+ def post_build(self, p, pay):
+ aList = []
+ i = 0
+ for i in range(0, len(self.fields_desc)):
+ aList.append(self.fields_desc[i].name)
+ a = []
+ for i in aList:
+ a.append(getattr(self, i))
+ res = adapt(1, 250, a, self.fields_desc, 1)
+ if self.lengthSC is None:
+ p = struct.pack(">B", res[1]) + p[1:]
+ if res[0] is not 0:
+ p = p[:-res[0]]
+ return p + pay
+
+
+class Signal(Packet):
+ """Signal Section 10.5.4.23"""
+ name = "Signal"
+ fields_desc = [
+ ByteField("sigValue", 0x0)
+ ]
+
+
+# length 2 to max for L3 message (251)
+class SsVersionIndicator(Packet):
+ """SS Version Indicator Section 10.5.4.24"""
+ name = "SS Version Indicator"
+ fields_desc = [
+ XByteField("lengthSVI", None),
+ # optional
+ ByteField("info1", None),
+ ByteField("info2", None),
+ ByteField("info3", None),
+ ByteField("info4", None),
+ ByteField("info5", None),
+ ByteField("info6", None),
+ ByteField("info7", None),
+ ByteField("info8", None),
+ ByteField("info9", None),
+ ByteField("info10", None),
+ ByteField("info11", None),
+ ByteField("info12", None),
+ ByteField("info13", None),
+ ByteField("info14", None),
+ ByteField("info15", None),
+ ByteField("info16", None),
+ ByteField("info17", None),
+ ByteField("info18", None),
+ ByteField("info19", None),
+ ByteField("info20", None),
+ ByteField("info21", None),
+ ByteField("info22", None),
+ ByteField("info23", None),
+ ByteField("info24", None),
+ ByteField("info25", None),
+ ByteField("info26", None),
+ ByteField("info27", None),
+ ByteField("info28", None),
+ ByteField("info29", None),
+ ByteField("info30", None),
+ ByteField("info31", None),
+ ByteField("info32", None),
+ ByteField("info33", None),
+ ByteField("info34", None),
+ ByteField("info35", None),
+ ByteField("info36", None),
+ ByteField("info37", None),
+ ByteField("info38", None),
+ ByteField("info39", None),
+ ByteField("info40", None),
+ ByteField("info41", None),
+ ByteField("info42", None),
+ ByteField("info43", None),
+ ByteField("info44", None),
+ ByteField("info45", None),
+ ByteField("info46", None),
+ ByteField("info47", None),
+ ByteField("info48", None),
+ ByteField("info49", None),
+ ByteField("info50", None),
+ ByteField("info51", None),
+ ByteField("info52", None),
+ ByteField("info53", None),
+ ByteField("info54", None),
+ ByteField("info55", None),
+ ByteField("info56", None),
+ ByteField("info57", None),
+ ByteField("info58", None),
+ ByteField("info59", None),
+ ByteField("info60", None),
+ ByteField("info61", None),
+ ByteField("info62", None),
+ ByteField("info63", None),
+ ByteField("info64", None),
+ ByteField("info65", None),
+ ByteField("info66", None),
+ ByteField("info67", None),
+ ByteField("info68", None),
+ ByteField("info69", None),
+ ByteField("info70", None),
+ ByteField("info71", None),
+ ByteField("info72", None),
+ ByteField("info73", None),
+ ByteField("info74", None),
+ ByteField("info75", None),
+ ByteField("info76", None),
+ ByteField("info77", None),
+ ByteField("info78", None),
+ ByteField("info79", None),
+ ByteField("info80", None),
+ ByteField("info81", None),
+ ByteField("info82", None),
+ ByteField("info83", None),
+ ByteField("info84", None),
+ ByteField("info85", None),
+ ByteField("info86", None),
+ ByteField("info87", None),
+ ByteField("info88", None),
+ ByteField("info89", None),
+ ByteField("info90", None),
+ ByteField("info91", None),
+ ByteField("info92", None),
+ ByteField("info93", None),
+ ByteField("info94", None),
+ ByteField("info95", None),
+ ByteField("info96", None),
+ ByteField("info97", None),
+ ByteField("info98", None),
+ ByteField("info99", None),
+ ByteField("info100", None),
+ ByteField("info101", None),
+ ByteField("info102", None),
+ ByteField("info103", None),
+ ByteField("info104", None),
+ ByteField("info105", None),
+ ByteField("info106", None),
+ ByteField("info107", None),
+ ByteField("info108", None),
+ ByteField("info109", None),
+ ByteField("info110", None),
+ ByteField("info111", None),
+ ByteField("info112", None),
+ ByteField("info113", None),
+ ByteField("info114", None),
+ ByteField("info115", None),
+ ByteField("info116", None),
+ ByteField("info117", None),
+ ByteField("info118", None),
+ ByteField("info119", None),
+ ByteField("info120", None),
+ ByteField("info121", None),
+ ByteField("info122", None),
+ ByteField("info123", None),
+ ByteField("info124", None),
+ ByteField("info125", None),
+ ByteField("info126", None),
+ ByteField("info127", None),
+ ByteField("info128", None),
+ ByteField("info129", None),
+ ByteField("info130", None),
+ ByteField("info131", None),
+ ByteField("info132", None),
+ ByteField("info133", None),
+ ByteField("info134", None),
+ ByteField("info135", None),
+ ByteField("info136", None),
+ ByteField("info137", None),
+ ByteField("info138", None),
+ ByteField("info139", None),
+ ByteField("info140", None),
+ ByteField("info141", None),
+ ByteField("info142", None),
+ ByteField("info143", None),
+ ByteField("info144", None),
+ ByteField("info145", None),
+ ByteField("info146", None),
+ ByteField("info147", None),
+ ByteField("info148", None),
+ ByteField("info149", None),
+ ByteField("info150", None),
+ ByteField("info151", None),
+ ByteField("info152", None),
+ ByteField("info153", None),
+ ByteField("info154", None),
+ ByteField("info155", None),
+ ByteField("info156", None),
+ ByteField("info157", None),
+ ByteField("info158", None),
+ ByteField("info159", None),
+ ByteField("info160", None),
+ ByteField("info161", None),
+ ByteField("info162", None),
+ ByteField("info163", None),
+ ByteField("info164", None),
+ ByteField("info165", None),
+ ByteField("info166", None),
+ ByteField("info167", None),
+ ByteField("info168", None),
+ ByteField("info169", None),
+ ByteField("info170", None),
+ ByteField("info171", None),
+ ByteField("info172", None),
+ ByteField("info173", None),
+ ByteField("info174", None),
+ ByteField("info175", None),
+ ByteField("info176", None),
+ ByteField("info177", None),
+ ByteField("info178", None),
+ ByteField("info179", None),
+ ByteField("info180", None),
+ ByteField("info181", None),
+ ByteField("info182", None),
+ ByteField("info183", None),
+ ByteField("info184", None),
+ ByteField("info185", None),
+ ByteField("info186", None),
+ ByteField("info187", None),
+ ByteField("info188", None),
+ ByteField("info189", None),
+ ByteField("info190", None),
+ ByteField("info191", None),
+ ByteField("info192", None),
+ ByteField("info193", None),
+ ByteField("info194", None),
+ ByteField("info195", None),
+ ByteField("info196", None),
+ ByteField("info197", None),
+ ByteField("info198", None),
+ ByteField("info199", None),
+ ByteField("info200", None),
+ ByteField("info201", None),
+ ByteField("info202", None),
+ ByteField("info203", None),
+ ByteField("info204", None),
+ ByteField("info205", None),
+ ByteField("info206", None),
+ ByteField("info207", None),
+ ByteField("info208", None),
+ ByteField("info209", None),
+ ByteField("info210", None),
+ ByteField("info211", None),
+ ByteField("info212", None),
+ ByteField("info213", None),
+ ByteField("info214", None),
+ ByteField("info215", None),
+ ByteField("info216", None),
+ ByteField("info217", None),
+ ByteField("info218", None),
+ ByteField("info219", None),
+ ByteField("info220", None),
+ ByteField("info221", None),
+ ByteField("info222", None),
+ ByteField("info223", None),
+ ByteField("info224", None),
+ ByteField("info225", None),
+ ByteField("info226", None),
+ ByteField("info227", None),
+ ByteField("info228", None),
+ ByteField("info229", None),
+ ByteField("info230", None),
+ ByteField("info231", None),
+ ByteField("info232", None),
+ ByteField("info233", None),
+ ByteField("info234", None),
+ ByteField("info235", None),
+ ByteField("info236", None),
+ ByteField("info237", None),
+ ByteField("info238", None),
+ ByteField("info239", None),
+ ByteField("info240", None),
+ ByteField("info241", None),
+ ByteField("info242", None),
+ ByteField("info243", None),
+ ByteField("info244", None),
+ ByteField("info245", None),
+ ByteField("info246", None),
+ ByteField("info247", None),
+ ByteField("info248", None),
+ ByteField("info249", None),
+ ]
+
+ def post_build(self, p, pay):
+ aList = []
+ i = 0
+ for i in range(0, len(self.fields_desc)):
+ aList.append(self.fields_desc[i].name)
+ a = []
+ for i in aList:
+ a.append(getattr(self, i))
+ res = adapt(1, 250, a, self.fields_desc, 1)
+ if self.lengthSVI is None:
+ p = struct.pack(">B", res[1]) + p[1:]
+ if res[0] is not 0:
+ p = p[:-res[0]]
+ return p + pay
+
+
+# length 3 to 35 or 131
+class UserUser(Packet):
+ """User-user Section 10.5.4.25"""
+ name = "User-User"
+ fields_desc = [
+
+ XByteField("lengthUU", None), # dynamic length of field depending
+ # of the type of message
+ # let user decide which length he
+ # wants to take
+ # => more fuzzing options
+ ByteField("userUserPD", 0x0),
+ # optional
+ ByteField("userUserInfo1", None),
+ ByteField("userUserInfo2", None),
+ ByteField("userUserInfo3", None),
+ ByteField("userUserInfo4", None),
+ ByteField("userUserInfo5", None),
+ ByteField("userUserInfo6", None),
+ ByteField("userUserInfo7", None),
+ ByteField("userUserInfo8", None),
+ ByteField("userUserInfo9", None),
+ ByteField("userUserInfo10", None),
+ ByteField("userUserInfo11", None),
+ ByteField("userUserInfo12", None),
+ ByteField("userUserInfo13", None),
+ ByteField("userUserInfo14", None),
+ ByteField("userUserInfo15", None),
+ ByteField("userUserInfo16", None),
+ ByteField("userUserInfo17", None),
+ ByteField("userUserInfo18", None),
+ ByteField("userUserInfo19", None),
+ ByteField("userUserInfo20", None),
+ ByteField("userUserInfo21", None),
+ ByteField("userUserInfo22", None),
+ ByteField("userUserInfo23", None),
+ ByteField("userUserInfo24", None),
+ ByteField("userUserInfo25", None),
+ ByteField("userUserInfo26", None),
+ ByteField("userUserInfo27", None),
+ ByteField("userUserInfo28", None),
+ ByteField("userUserInfo29", None),
+ ByteField("userUserInfo30", None),
+ ByteField("userUserInfo31", None),
+ ByteField("userUserInfo32", None),
+ # long packet
+ ByteField("userUserInfo33", None),
+ ByteField("userUserInfo34", None),
+ ByteField("userUserInfo35", None),
+ ByteField("userUserInfo36", None),
+ ByteField("userUserInfo37", None),
+ ByteField("userUserInfo38", None),
+ ByteField("userUserInfo39", None),
+ ByteField("userUserInfo40", None),
+ ByteField("userUserInfo41", None),
+ ByteField("userUserInfo42", None),
+ ByteField("userUserInfo43", None),
+ ByteField("userUserInfo44", None),
+ ByteField("userUserInfo45", None),
+ ByteField("userUserInfo46", None),
+ ByteField("userUserInfo47", None),
+ ByteField("userUserInfo48", None),
+ ByteField("userUserInfo49", None),
+ ByteField("userUserInfo50", None),
+ ByteField("userUserInfo51", None),
+ ByteField("userUserInfo52", None),
+ ByteField("userUserInfo53", None),
+ ByteField("userUserInfo54", None),
+ ByteField("userUserInfo55", None),
+ ByteField("userUserInfo56", None),
+ ByteField("userUserInfo57", None),
+ ByteField("userUserInfo58", None),
+ ByteField("userUserInfo59", None),
+ ByteField("userUserInfo60", None),
+ ByteField("userUserInfo61", None),
+ ByteField("userUserInfo62", None),
+ ByteField("userUserInfo63", None),
+ ByteField("userUserInfo64", None),
+ ByteField("userUserInfo65", None),
+ ByteField("userUserInfo66", None),
+ ByteField("userUserInfo67", None),
+ ByteField("userUserInfo68", None),
+ ByteField("userUserInfo69", None),
+ ByteField("userUserInfo70", None),
+ ByteField("userUserInfo71", None),
+ ByteField("userUserInfo72", None),
+ ByteField("userUserInfo73", None),
+ ByteField("userUserInfo74", None),
+ ByteField("userUserInfo75", None),
+ ByteField("userUserInfo76", None),
+ ByteField("userUserInfo77", None),
+ ByteField("userUserInfo78", None),
+ ByteField("userUserInfo79", None),
+ ByteField("userUserInfo80", None),
+ ByteField("userUserInfo81", None),
+ ByteField("userUserInfo82", None),
+ ByteField("userUserInfo83", None),
+ ByteField("userUserInfo84", None),
+ ByteField("userUserInfo85", None),
+ ByteField("userUserInfo86", None),
+ ByteField("userUserInfo87", None),
+ ByteField("userUserInfo88", None),
+ ByteField("userUserInfo89", None),
+ ByteField("userUserInfo90", None),
+ ByteField("userUserInfo91", None),
+ ByteField("userUserInfo92", None),
+ ByteField("userUserInfo93", None),
+ ByteField("userUserInfo94", None),
+ ByteField("userUserInfo95", None),
+ ByteField("userUserInfo96", None),
+ ByteField("userUserInfo97", None),
+ ByteField("userUserInfo98", None),
+ ByteField("userUserInfo99", None),
+ ByteField("userUserInfo100", None),
+ ByteField("userUserInfo101", None),
+ ByteField("userUserInfo102", None),
+ ByteField("userUserInfo103", None),
+ ByteField("userUserInfo104", None),
+ ByteField("userUserInfo105", None),
+ ByteField("userUserInfo106", None),
+ ByteField("userUserInfo107", None),
+ ByteField("userUserInfo108", None),
+ ByteField("userUserInfo109", None),
+ ByteField("userUserInfo110", None),
+ ByteField("userUserInfo111", None),
+ ByteField("userUserInfo112", None),
+ ByteField("userUserInfo113", None),
+ ByteField("userUserInfo114", None),
+ ByteField("userUserInfo115", None),
+ ByteField("userUserInfo116", None),
+ ByteField("userUserInfo117", None),
+ ByteField("userUserInfo118", None),
+ ByteField("userUserInfo119", None),
+ ByteField("userUserInfo120", None),
+ ByteField("userUserInfo121", None),
+ ByteField("userUserInfo122", None),
+ ByteField("userUserInfo123", None),
+ ByteField("userUserInfo124", None),
+ ByteField("userUserInfo125", None),
+ ByteField("userUserInfo126", None),
+ ByteField("userUserInfo127", None),
+ ByteField("userUserInfo128", None),
+ ByteField("userUserInfo129", None),
+ ByteField("userUserInfo130", None),
+ ByteField("userUserInfo131", None)
+ ]
+
+ def post_build(self, p, pay):
+ aList = []
+ i = 0
+ for i in range(0, len(self.fields_desc)):
+ aList.append(self.fields_desc[i].name)
+ a = []
+ for i in aList:
+ a.append(getattr(self, i))
+ res = adapt(2, 133, a, self.fields_desc, 1)
+ if self.lengthUU is None:
+ p = struct.pack(">B", res[1]) + p[1:]
+ if res[0] is not 0:
+ p = p[:-res[0]]
+ return p + pay
+
+
+class AlertingPattern(Packet):
+ """Alerting Pattern 10.5.4.26"""
+ name = "Alerting Pattern"
+ fields_desc = [
+ XByteField("lengthAP", 0x3),
+ BitField("spare", 0x0, 4),
+ BitField("alertingValue", 0x0, 4)
+ ]
+
+
+class AllowedActions(Packet):
+ """Allowed actions $(CCBS)$ Section 10.5.4.26"""
+ name = "Allowed Actions $(CCBS)$"
+ fields_desc = [
+ XByteField("lengthAP", 0x3),
+ BitField("CCBS", 0x0, 1),
+ BitField("spare", 0x0, 7)
+ ]
+
+
+#
+# 10.5.5 GPRS mobility management information elements
+#
+
+
+class AttachType(Packet):
+ """Attach type Section 10.5.5.2"""
+ name = "Attach Type"
+ fields_desc = [
+ BitField("spare", 0x0, 1),
+ BitField("type", 0x1, 3)
+ ]
+
+
+if __name__ == "__main__":
+ interact(mydict=globals(), mybanner="Scapy GSM-UM (Air) Addon")
diff --git a/scripts/external_libs/scapy-2.3.1/python3/scapy/contrib/gtp.py b/scripts/external_libs/scapy-2.3.1/python3/scapy/contrib/gtp.py
new file mode 100644
index 00000000..008a0200
--- /dev/null
+++ b/scripts/external_libs/scapy-2.3.1/python3/scapy/contrib/gtp.py
@@ -0,0 +1,546 @@
+#! /usr/bin/env python
+
+## Copyright (C) 2014 Guillaume Valadon <guillaume.valadon@ssi.gouv.fr>
+## 2014 Alexis Sultan <alexis.sultan@sfr.com>
+## 2012 ffranz <ffranz@iniqua.com>
+##
+## This program is published under a GPLv2 license
+
+# scapy.contrib.description = GTP
+# scapy.contrib.status = loads
+
+import time
+import logging
+
+from scapy.packet import *
+from scapy.fields import *
+from scapy.layers.inet import IP, UDP
+
+# GTP Data types
+
+GTPmessageType = { 1: "echo_request",
+ 2: "echo_response",
+ 16: "create_pdp_context_req",
+ 17: "create_pdp_context_res",
+ 20: "delete_pdp_context_req",
+ 21: "delete_pdp_context_res",
+ 26: "error_indication",
+ 27: "pdu_notification_req",
+ 255: "gtp_u_header" }
+
+IEType = { 1: "Cause",
+ 2: "IMSI",
+ 3: "RAI",
+ 4: "TLLI",
+ 5: "P_TMSI",
+ 14: "Recovery",
+ 15: "SelectionMode",
+ 16: "TEIDI",
+ 17: "TEICP",
+ 19: "TeardownInd",
+ 20: "NSAPI",
+ 26: "ChargingChrt",
+ 27: "TraceReference",
+ 28: "TraceType",
+ 128: "EndUserAddress",
+ 131: "AccessPointName",
+ 132: "ProtocolConfigurationOptions",
+ 133: "GSNAddress",
+ 134: "MSInternationalNumber",
+ 135: "QoS",
+ 148: "CommonFlags",
+ 151: "RatType",
+ 152: "UserLocationInformation",
+ 153: "MSTimeZone",
+ 154: "IMEI" }
+
+CauseValues = { 0: "Request IMSI",
+ 1: "Request IMEI",
+ 2: "Request IMSI and IMEI",
+ 3: "No identity needed",
+ 4: "MS Refuses",
+ 5: "MS is not GPRS Responding",
+ 128: "Request accepted",
+ 129: "New PDP type due to network preference",
+ 130: "New PDP type due to single address bearer only",
+ 192: "Non-existent",
+ 193: "Invalid message format",
+ 194: "IMSI not known",
+ 195: "MS is GPRS Detached",
+ 196: "MS is not GPRS Responding",
+ 197: "MS Refuses",
+ 198: "Version not supported",
+ 199: "No resources available",
+ 200: "Service not supported",
+ 201: "Mandatory IE incorrect",
+ 202: "Mandatory IE missing",
+ 203: "Optional IE incorrect",
+ 204: "System failure",
+ 205: "Roaming restriction",
+ 206: "P-TMSI Signature mismatch",
+ 207: "GPRS connection suspended",
+ 208: "Authentication failure",
+ 209: "User authentication failed",
+ 210: "Context not found",
+ 211: "All dynamic PDP addresses are occupied",
+ 212: "No memory is available",
+ 213: "Reallocation failure",
+ 214: "Unknown mandatory extension header",
+ 215: "Semantic error in the TFT operation",
+ 216: "Syntactic error in TFT operation",
+ 217: "Semantic errors in packet filter(s)",
+ 218: "Syntactic errors in packet filter(s)",
+ 219: "Missing or unknown APN",
+ 220: "Unknown PDP address or PDP type",
+ 221: "PDP context without TFT already activated",
+ 222: "APN access denied : no subscription",
+ 223: "APN Restriction type incompatibility with currently active PDP Contexts",
+ 224: "MS MBMS Capabilities Insufficient",
+ 225: "Invalid Correlation : ID",
+ 226: "MBMS Bearer Context Superseded",
+ 227: "Bearer Control Mode violation",
+ 228: "Collision with network initiated request" }
+
+Selection_Mode = { 11111100: "MS or APN",
+ 11111101: "MS",
+ 11111110: "NET",
+ 11111111: "FutureUse" }
+
+TeardownInd_value = { 254: "False",
+ 255: "True" }
+
+class TBCDByteField(StrFixedLenField):
+
+ def i2h(self, pkt, val):
+ ret = []
+ for i in range(len(val)):
+ byte = ord(val[i])
+ left = byte >> 4
+ right = byte & 0xF
+ if left == 0xF:
+ ret += [ "%d" % right ]
+ else:
+ ret += [ "%d" % right, "%d" % left ]
+ return "".join(ret)
+
+ def i2repr(self, pkt, x):
+ return repr(self.i2h(pkt,x))
+
+ def i2m(self, pkt, val):
+ ret_string = ""
+ for i in range(0, len(val), 2):
+ tmp = val[i:i+2]
+ if len(tmp) == 2:
+ ret_string += chr(int(tmp[1] + tmp[0], 16))
+ else:
+ ret_string += chr(int("F" + tmp[0], 16))
+ return ret_string
+
+class GTPHeader(Packet):
+ # 3GPP TS 29.060 V9.1.0 (2009-12)
+ name = "GTP Header"
+ fields_desc=[ BitField("version", 1, 3),
+ BitField("PT", 1, 1),
+ BitField("reserved", 0, 1),
+ BitField("E", 0, 1),
+ BitField("S", 1, 1),
+ BitField("PN", 0, 1),
+ ByteEnumField("gtp_type", None, GTPmessageType),
+ ShortField("length", None),
+ IntField("teid", 0) ]
+
+ def post_build(self, p, pay):
+ p += pay
+ if self.length is None:
+ l = len(p)-8
+ p = p[:2] + struct.pack("!H", l)+ p[4:]
+ return p
+
+ def hashret(self):
+ return struct.pack("B", self.version) + self.payload.hashret()
+
+ def answers(self, other):
+ return (isinstance(other, GTPHeader) and
+ self.version == other.version and
+ self.payload.answers(other.payload))
+
+class GTPEchoRequest(Packet):
+ # 3GPP TS 29.060 V9.1.0 (2009-12)
+ name = "GTP Echo Request"
+ fields_desc = [ XBitField("seq", 0, 16),
+ ByteField("npdu", 0),
+ ByteField("next_ex", 0),]
+
+ def hashret(self):
+ return struct.pack("H", self.seq)
+
+class IE_Cause(Packet):
+ name = "Cause"
+ fields_desc = [ ByteEnumField("ietype", 1, IEType),
+ BitField("Response", None, 1),
+ BitField("Rejection", None, 1),
+ BitEnumField("CauseValue", None, 6, CauseValues) ]
+ def extract_padding(self, pkt):
+ return "",pkt
+
+class IE_IMSI(Packet):
+ name = "IMSI - Subscriber identity of the MS"
+ fields_desc = [ ByteEnumField("ietype", 2, IEType),
+ TBCDByteField("imsi", str(RandNum(0, 999999999999999)), 8) ]
+ def extract_padding(self, pkt):
+ return "",pkt
+
+class IE_Routing(Packet):
+ name = "Routing Area Identity"
+ fields_desc = [ ByteEnumField("ietype", 3, IEType),
+ TBCDByteField("MCC", "", 2),
+ # MNC: if the third digit of MCC is 0xf, then the length of MNC is 1 byte
+ TBCDByteField("MNC", "", 1),
+ ShortField("LAC", None),
+ ByteField("RAC", None) ]
+ def extract_padding(self, pkt):
+ return "",pkt
+
+class IE_Recovery(Packet):
+ name = "Recovery"
+ fields_desc = [ ByteEnumField("ietype", 14, IEType),
+ ByteField("res-counter", 24) ]
+ def extract_padding(self, pkt):
+ return "",pkt
+
+class IE_SelectionMode(Packet):
+ # Indicates the origin of the APN in the message
+ name = "Selection Mode"
+ fields_desc = [ ByteEnumField("ietype", 15, IEType),
+ BitEnumField("SelectionMode", "MS or APN", 8, Selection_Mode) ]
+ def extract_padding(self, pkt):
+ return "",pkt
+
+class IE_TEIDI(Packet):
+ name = "Tunnel Endpoint Identifier Data"
+ fields_desc = [ ByteEnumField("ietype", 16, IEType),
+ XIntField("TEIDI", RandInt()) ]
+ def extract_padding(self, pkt):
+ return "",pkt
+
+class IE_TEICP(Packet):
+ name = "Tunnel Endpoint Identifier Control Plane"
+ fields_desc = [ ByteEnumField("ietype", 17, IEType),
+ XIntField("TEICI", RandInt())]
+ def extract_padding(self, pkt):
+ return "",pkt
+
+class IE_Teardown(Packet):
+ name = "Teardown Indicator"
+ fields_desc = [ ByteEnumField("ietype", 19, IEType),
+ ByteEnumField("indicator", "True", TeardownInd_value) ]
+ def extract_padding(self, pkt):
+ return "",pkt
+
+class IE_NSAPI(Packet):
+ # Identifies a PDP context in a mobility management context specified by TEICP
+ name = "NSAPI"
+ fields_desc = [ ByteEnumField("ietype", 20, IEType),
+ XBitField("sparebits", 0x0000, 4),
+ XBitField("NSAPI", RandNum(0, 15), 4) ]
+ def extract_padding(self, pkt):
+ return "",pkt
+
+class IE_ChargingCharacteristics(Packet):
+ # Way of informing both the SGSN and GGSN of the rules for
+ name = "Charging Characteristics"
+ fields_desc = [ ByteEnumField("ietype", 26, IEType),
+ # producing charging information based on operator configured triggers.
+ # 0000 .... .... .... : spare
+ # .... 1... .... .... : normal charging
+ # .... .0.. .... .... : prepaid charging
+ # .... ..0. .... .... : flat rate charging
+ # .... ...0 .... .... : hot billing charging
+ # .... .... 0000 0000 : reserved
+ XBitField("Ch_ChSpare", None, 4),
+ XBitField("normal_charging", None, 1),
+ XBitField("prepaid_charging", None, 1),
+ XBitField("flat_rate_charging", None, 1),
+ XBitField("hot_billing_charging", None, 1),
+ XBitField("Ch_ChReserved", 0, 8) ]
+ def extract_padding(self, pkt):
+ return "",pkt
+
+class IE_TraceReference(Packet):
+ # Identifies a record or a collection of records for a particular trace.
+ name = "Trace Reference"
+ fields_desc = [ ByteEnumField("ietype", 27, IEType),
+ XBitField("Trace_reference", None, 16) ]
+ def extract_padding(self, pkt):
+ return "",pkt
+
+class IE_TraceType(Packet):
+ # Indicates the type of the trace
+ name = "Trace Type"
+ fields_desc = [ ByteEnumField("ietype", 28, IEType),
+ XBitField("Trace_type", None, 16) ]
+ def extract_padding(self, pkt):
+ return "",pkt
+
+class IE_EndUserAddress(Packet):
+ # Supply protocol specific information of the external packet
+ name = "End User Addresss"
+ fields_desc = [ ByteEnumField("ietype", 128, IEType),
+ # data network accessed by the GGPRS subscribers.
+ # - Request
+ # 1 Type (1byte)
+ # 2-3 Length (2bytes) - value 2
+ # 4 Spare + PDP Type Organization
+ # 5 PDP Type Number
+ # - Response
+ # 6-n PDP Address
+ BitField("EndUserAddressLength", 2, 16),
+ BitField("EndUserAddress", 1111, 4),
+ BitField("PDPTypeOrganization", 1, 4),
+ XByteField("PDPTypeNumber", None) ]
+ def extract_padding(self, pkt):
+ return "",pkt
+
+class APNStrLenField(StrLenField):
+ # Inspired by DNSStrField
+ def m2i(self, pkt, s):
+ ret_s = ""
+ tmp_s = s
+ while tmp_s:
+ tmp_len = struct.unpack("!B", tmp_s[0])[0] + 1
+ if tmp_len > len(tmp_s):
+ warning("APN prematured end of character-string (size=%i, remaining bytes=%i)" % (tmp_len, len(tmp_s)))
+ ret_s += tmp_s[1:tmp_len]
+ tmp_s = tmp_s[tmp_len:]
+ if len(tmp_s) :
+ ret_s += "."
+ s = ret_s
+ return s
+ def i2m(self, pkt, s):
+ s = "".join(map(lambda x: chr(len(x))+x, s.split(".")))
+ return s
+
+
+class IE_AccessPointName(Packet):
+ # Sent by SGSN or by GGSN as defined in 3GPP TS 23.060
+ name = "Access Point Name"
+ fields_desc = [ ByteEnumField("ietype", 131, IEType),
+ ShortField("length", None),
+ APNStrLenField("APN", "nternet", length_from=lambda x: x.length) ]
+ def extract_padding(self, pkt):
+ return "",pkt
+ def post_build(self, p, pay):
+ if self.length is None:
+ l = len(p)-3
+ p = p[:2] + struct.pack("!B", l)+ p[3:]
+ return p
+
+class IE_ProtocolConfigurationOptions(Packet):
+ name = "Protocol Configuration Options"
+ fields_desc = [ ByteEnumField("ietype", 132, IEType),
+ ShortField("length", 4),
+ StrLenField("Protocol Configuration", "", length_from=lambda x: x.length) ]
+ def extract_padding(self, pkt):
+ return "",pkt
+
+class IE_GSNAddress(Packet):
+ name = "GSN Address"
+ fields_desc = [ ByteEnumField("ietype", 133, IEType),
+ ShortField("length", 4),
+ IPField("address", RandIP()) ]
+ def extract_padding(self, pkt):
+ return "",pkt
+
+class IE_MSInternationalNumber(Packet):
+ name = "MS International Number"
+ fields_desc = [ ByteEnumField("ietype", 134, IEType),
+ ShortField("length", None),
+ FlagsField("flags", 0x91, 8, ["Extension","","","International Number","","","","ISDN numbering"]),
+ TBCDByteField("digits", "33607080910", length_from=lambda x: x.length-1) ]
+ def extract_padding(self, pkt):
+ return "",pkt
+
+class IE_UserLocationInformation(Packet):
+ name = "User Location Information"
+ fields_desc = [ ByteEnumField("ietype", 152, IEType),
+ ShortField("length", None),
+ ByteField("type", 1),
+ # Only type 1 is currently supported
+ TBCDByteField("MCC", "", 2),
+ # MNC: if the third digit of MCC is 0xf, then the length of MNC is 1 byte
+ TBCDByteField("MNC", "", 1),
+ ShortField("LAC", None),
+ ShortField("SAC", None) ]
+ def extract_padding(self, pkt):
+ return "",pkt
+
+class IE_IMEI(Packet):
+ name = "IMEI"
+ fields_desc = [ ByteEnumField("ietype", 154, IEType),
+ ShortField("length", None),
+ TBCDByteField("IMEI", "", length_from=lambda x: x.length) ]
+ def extract_padding(self, pkt):
+ return "",pkt
+
+class IE_NotImplementedTLV(Packet):
+ name = "IE not implemented"
+ fields_desc = [ ByteEnumField("ietype", 0, IEType),
+ ShortField("length", None),
+ StrLenField("data", "", length_from=lambda x: x.length) ]
+ def extract_padding(self, pkt):
+ return "",pkt
+
+ietypecls = { 1: IE_Cause, 2: IE_IMSI, 3: IE_Routing, 14: IE_Recovery, 15: IE_SelectionMode, 16: IE_TEIDI,
+ 17: IE_TEICP, 19: IE_Teardown, 20: IE_NSAPI, 26: IE_ChargingCharacteristics,
+ 27: IE_TraceReference, 28: IE_TraceType,
+ 128: IE_EndUserAddress, 131: IE_AccessPointName, 132: IE_ProtocolConfigurationOptions,
+ 133: IE_GSNAddress, 134: IE_MSInternationalNumber, 152: IE_UserLocationInformation, 154: IE_IMEI }
+
+def IE_Dispatcher(s):
+ """Choose the correct Information Element class."""
+
+ if len(s) < 1:
+ return Raw(s)
+
+ # Get the IE type
+ ietype = ord(s[0])
+ cls = ietypecls.get(ietype, Raw)
+
+ # if ietype greater than 128 are TLVs
+ if cls == Raw and ietype & 128 == 128:
+ cls = IE_NotImplementedTLV
+
+ return cls(s)
+
+class GTPEchoResponse(Packet):
+ # 3GPP TS 29.060 V9.1.0 (2009-12)
+ name = "GTP Echo Response"
+ fields_desc = [ XBitField("seq", 0, 16),
+ ByteField("npdu", 0),
+ ByteField("next_ex", 0),
+ PacketListField("IE_list", [], IE_Dispatcher) ]
+
+ def hashret(self):
+ return struct.pack("H", self.seq)
+
+ def answers(self, other):
+ return self.seq == other.seq
+
+
+class GTPCreatePDPContextRequest(Packet):
+ # 3GPP TS 29.060 V9.1.0 (2009-12)
+ name = "GTP Create PDP Context Request"
+ fields_desc = [ ShortField("seq", RandShort()),
+ ByteField("npdu", 0),
+ ByteField("next_ex", 0),
+ PacketListField("IE_list", [ IE_TEIDI(), IE_NSAPI(), IE_GSNAddress(),
+ IE_GSNAddress(),
+ IE_NotImplementedTLV(ietype=135, length=15,data=RandString(15)) ],
+ IE_Dispatcher) ]
+ def hashret(self):
+ return struct.pack("H", self.seq)
+
+class GTPCreatePDPContextResponse(Packet):
+ # 3GPP TS 29.060 V9.1.0 (2009-12)
+ name = "GTP Create PDP Context Response"
+ fields_desc = [ ShortField("seq", RandShort()),
+ ByteField("npdu", 0),
+ ByteField("next_ex", 0),
+ PacketListField("IE_list", [], IE_Dispatcher) ]
+
+ def hashret(self):
+ return struct.pack("H", self.seq)
+
+ def answers(self, other):
+ return self.seq == other.seq
+
+class GTPErrorIndication(Packet):
+ # 3GPP TS 29.060 V9.1.0 (2009-12)
+ name = "GTP Error Indication"
+ fields_desc = [ XBitField("seq", 0, 16),
+ ByteField("npdu", 0),
+ ByteField("next_ex",0),
+ PacketListField("IE_list", [], IE_Dispatcher) ]
+
+class GTPDeletePDPContextRequest(Packet):
+ # 3GPP TS 29.060 V9.1.0 (2009-12)
+ name = "GTP Delete PDP Context Request"
+ fields_desc = [ XBitField("seq", 0, 16),
+ ByteField("npdu", 0),
+ ByteField("next_ex", 0),
+ PacketListField("IE_list", [], IE_Dispatcher) ]
+
+class GTPDeletePDPContextResponse(Packet):
+ # 3GPP TS 29.060 V9.1.0 (2009-12)
+ name = "GTP Delete PDP Context Response"
+ fields_desc = [ XBitField("seq", 0, 16),
+ ByteField("npdu", 0),
+ ByteField("next_ex",0),
+ PacketListField("IE_list", [], IE_Dispatcher) ]
+
+class GTPPDUNotificationRequest(Packet):
+ # 3GPP TS 29.060 V9.1.0 (2009-12)
+ name = "GTP PDU Notification Request"
+ fields_desc = [ XBitField("seq", 0, 16),
+ ByteField("npdu", 0),
+ ByteField("next_ex", 0),
+ PacketListField("IE_list", [ IE_IMSI(),
+ IE_TEICP(TEICI=RandInt()),
+ IE_EndUserAddress(PDPTypeNumber=0x21),
+ IE_AccessPointName(),
+ IE_GSNAddress(address="127.0.0.1"),
+ ], IE_Dispatcher) ]
+
+class GTP_U_Header(Packet):
+ # 3GPP TS 29.060 V9.1.0 (2009-12)
+ name = "GTP-U Header"
+ # GTP-U protocol is used to transmit T-PDUs between GSN pairs (or between an SGSN and an RNC in UMTS),
+ # encapsulated in G-PDUs. A G-PDU is a packet including a GTP-U header and a T-PDU. The Path Protocol
+ # defines the path and the GTP-U header defines the tunnel. Several tunnels may be multiplexed on a single path.
+ fields_desc = [ BitField("version", 1,3),
+ BitField("PT", 1, 1),
+ BitField("Reserved", 0, 1),
+ BitField("E", 0,1),
+ BitField("S", 0, 1),
+ BitField("PN", 0, 1),
+ ByteEnumField("gtp_type", None, GTPmessageType),
+ BitField("length", None, 16),
+ XBitField("TEID", 0, 32),
+ ConditionalField(XBitField("seq", 0, 16), lambda pkt:pkt.E==1 or pkt.S==1 or pkt.PN==1),
+ ConditionalField(ByteField("npdu", 0), lambda pkt:pkt.E==1 or pkt.S==1 or pkt.PN==1),
+ ConditionalField(ByteField("next_ex", 0), lambda pkt:pkt.E==1 or pkt.S==1 or pkt.PN==1),
+ ]
+
+ def post_build(self, p, pay):
+ p += pay
+ if self.length is None:
+ l = len(p)-8
+ p = p[:2] + struct.pack("!H", l)+ p[4:]
+ return p
+
+class GTPmorethan1500(Packet):
+ # 3GPP TS 29.060 V9.1.0 (2009-12)
+ name = "GTP More than 1500"
+ fields_desc = [ ByteEnumField("IE_Cause", "Cause", IEType),
+ BitField("IE", 1, 12000),]
+
+# Bind GTP-C
+bind_layers(UDP, GTPHeader, dport = 2123)
+bind_layers(UDP, GTPHeader, sport = 2123)
+bind_layers(GTPHeader, GTPEchoRequest, gtp_type = 1)
+bind_layers(GTPHeader, GTPEchoResponse, gtp_type = 2)
+bind_layers(GTPHeader, GTPCreatePDPContextRequest, gtp_type = 16)
+bind_layers(GTPHeader, GTPCreatePDPContextResponse, gtp_type = 17)
+bind_layers(GTPHeader, GTPDeletePDPContextRequest, gtp_type = 20)
+bind_layers(GTPHeader, GTPDeletePDPContextResponse, gtp_type = 21)
+bind_layers(GTPHeader, GTPPDUNotificationRequest, gtp_type = 27)
+
+# Bind GTP-U
+bind_layers(UDP, GTP_U_Header, dport = 2152)
+bind_layers(UDP, GTP_U_Header, sport = 2152)
+bind_layers(GTP_U_Header, IP, gtp_type = 255)
+
+if __name__ == "__main__":
+ from scapy.all import *
+ interact(mydict=globals(), mybanner="GTPv1 add-on")
+
diff --git a/scripts/external_libs/scapy-2.3.1/python3/scapy/contrib/igmp.py b/scripts/external_libs/scapy-2.3.1/python3/scapy/contrib/igmp.py
new file mode 100644
index 00000000..b3505810
--- /dev/null
+++ b/scripts/external_libs/scapy-2.3.1/python3/scapy/contrib/igmp.py
@@ -0,0 +1,171 @@
+#! /usr/bin/env python
+
+# scapy.contrib.description = IGMP/IGMPv2
+# scapy.contrib.status = loads
+
+
+# TODO: scapy 2 has function getmacbyip, maybe it can replace igmpize
+# at least from the MAC layer
+
+from scapy.all import *
+
+#--------------------------------------------------------------------------
+def isValidMCAddr(ip):
+ """convert dotted quad string to long and check the first octet"""
+ FirstOct=atol(ip)>>24 & 0xFF
+ return (FirstOct >= 224) and (FirstOct <= 239)
+
+#--------------------------------------------------------------------------
+
+class IGMP(Packet):
+ """IGMP Message Class for v1 and v2.
+
+This class is derived from class Packet. You need to "igmpize"
+the IP and Ethernet layers before a full packet is sent.
+a=Ether(src="00:01:02:03:04:05")
+b=IP(src="1.2.3.4")
+c=IGMP(type=0x12, gaddr="224.2.3.4")
+c.igmpize(b, a)
+print("Joining IP " + c.gaddr + " MAC " + a.dst)
+sendp(a/b/c, iface="en0")
+
+ Parameters:
+ type IGMP type field, 0x11, 0x12, 0x16 or 0x17
+ mrtime Maximum Response time (zero for v1)
+ gaddr Multicast Group Address 224.x.x.x/4
+
+See RFC2236, Section 2. Introduction for definitions of proper
+IGMPv2 message format http://www.faqs.org/rfcs/rfc2236.html
+
+ """
+ name = "IGMP"
+
+ igmptypes = { 0x11 : "Group Membership Query",
+ 0x12 : "Version 1 - Membership Report",
+ 0x16 : "Version 2 - Membership Report",
+ 0x17 : "Leave Group"}
+
+ fields_desc = [ ByteEnumField("type", 0x11, igmptypes),
+ ByteField("mrtime",20),
+ XShortField("chksum", None),
+ IPField("gaddr", "0.0.0.0")]
+
+#--------------------------------------------------------------------------
+ def post_build(self, p, pay):
+ """Called implicitly before a packet is sent to compute and place IGMP checksum.
+
+ Parameters:
+ self The instantiation of an IGMP class
+ p The IGMP message in hex in network byte order
+ pay Additional payload for the IGMP message
+ """
+ p += pay
+ if self.chksum is None:
+ ck = checksum(p)
+ p = p[:2]+chr(ck>>8)+chr(ck&0xff)+p[4:]
+ return p
+
+#--------------------------------------------------------------------------
+ def mysummary(self):
+ """Display a summary of the IGMP object."""
+
+ if isinstance(self.underlayer, IP):
+ return self.underlayer.sprintf("IGMP: %IP.src% > %IP.dst% %IGMP.type% %IGMP.gaddr%")
+ else:
+ return self.sprintf("IGMP %IGMP.type% %IGMP.gaddr%")
+
+#--------------------------------------------------------------------------
+ def igmpize(self, ip=None, ether=None):
+ """Called to explicitely fixup associated IP and Ethernet headers
+
+ Parameters:
+ self The instantiation of an IGMP class.
+ ip The instantiation of the associated IP class.
+ ether The instantiation of the associated Ethernet.
+
+ Returns:
+ True The tuple ether/ip/self passed all check and represents
+ a proper IGMP packet.
+ False One of more validation checks failed and no fields
+ were adjusted.
+
+ The function will examine the IGMP message to assure proper format.
+ Corrections will be attempted if possible. The IP header is then properly
+ adjusted to ensure correct formatting and assignment. The Ethernet header
+ is then adjusted to the proper IGMP packet format.
+ """
+
+# The rules are:
+# 1. the Max Response time is meaningful only in Membership Queries and should be zero
+# otherwise (RFC 2236, section 2.2)
+
+ if (self.type != 0x11): #rule 1
+ self.mrtime = 0
+
+ if (self.adjust_ip(ip) == True):
+ if (self.adjust_ether(ip, ether) == True): return True
+ return False
+
+#--------------------------------------------------------------------------
+ def adjust_ether (self, ip=None, ether=None):
+ """Called to explicitely fixup an associated Ethernet header
+
+ The function adjusts the ethernet header destination MAC address based on
+ the destination IP address.
+ """
+# The rules are:
+# 1. send to the group mac address address corresponding to the IP.dst
+ if ip != None and ip.haslayer(IP) and ether != None and ether.haslayer(Ether):
+ iplong = atol(ip.dst)
+ ether.dst = "01:00:5e:%02x:%02x:%02x" % ( (iplong>>16)&0x7F, (iplong>>8)&0xFF, (iplong)&0xFF )
+ # print "igmpize ip " + ip.dst + " as mac " + ether.dst
+ return True
+ else:
+ return False
+
+#--------------------------------------------------------------------------
+ def adjust_ip (self, ip=None):
+ """Called to explicitely fixup an associated IP header
+
+ The function adjusts the IP header based on conformance rules
+ and the group address encoded in the IGMP message.
+ The rules are:
+ 1. Send General Group Query to 224.0.0.1 (all systems)
+ 2. Send Leave Group to 224.0.0.2 (all routers)
+ 3a.Otherwise send the packet to the group address
+ 3b.Send reports/joins to the group address
+ 4. ttl = 1 (RFC 2236, section 2)
+ 5. send the packet with the router alert IP option (RFC 2236, section 2)
+ """
+ if ip != None and ip.haslayer(IP):
+ if (self.type == 0x11):
+ if (self.gaddr == "0.0.0.0"):
+ ip.dst = "224.0.0.1" # IP rule 1
+ retCode = True
+ elif isValidMCAddr(self.gaddr):
+ ip.dst = self.gaddr # IP rule 3a
+ retCode = True
+ else:
+ print("Warning: Using invalid Group Address")
+ retCode = False
+ elif ((self.type == 0x17) and isValidMCAddr(self.gaddr)):
+ ip.dst = "224.0.0.2" # IP rule 2
+ retCode = True
+ elif ((self.type == 0x12) or (self.type == 0x16)) and (isValidMCAddr(self.gaddr)):
+ ip.dst = self.gaddr # IP rule 3b
+ retCode = True
+ else:
+ print("Warning: Using invalid IGMP Type")
+ retCode = False
+ else:
+ print("Warning: No IGMP Group Address set")
+ retCode = False
+ if retCode == True:
+ ip.ttl=1 # IP Rule 4
+ ip.options=[IPOption_Router_Alert()] # IP rule 5
+ return retCode
+
+
+bind_layers( IP, IGMP, frag=0, proto=2)
+
+
diff --git a/scripts/external_libs/scapy-2.3.1/python3/scapy/contrib/igmpv3.py b/scripts/external_libs/scapy-2.3.1/python3/scapy/contrib/igmpv3.py
new file mode 100644
index 00000000..8322c7a3
--- /dev/null
+++ b/scripts/external_libs/scapy-2.3.1/python3/scapy/contrib/igmpv3.py
@@ -0,0 +1,270 @@
+#! /usr/bin/env python
+
+# http://trac.secdev.org/scapy/ticket/31
+
+# scapy.contrib.description = IGMPv3
+# scapy.contrib.status = loads
+
+from scapy.packet import *
+
+""" Based on the following references
+ http://www.iana.org/assignments/igmp-type-numbers
+ http://www.rfc-editor.org/rfc/pdfrfc/rfc3376.txt.pdf
+
+"""
+
+# TODO: Merge IGMPv3 packet Bindlayers correct for
+# membership source/Group records
+# ConditionalField parameters for IGMPv3 commented out
+#
+# See RFC3376, Section 4. Message Formats for definitions of proper IGMPv3 message format
+# http://www.faqs.org/rfcs/rfc3376.html
+#
+# See RFC4286, For definitions of proper messages for Multicast Router Discovery.
+# http://www.faqs.org/rfcs/rfc4286.html
+#
+
+#import sys, socket, struct, time
+from scapy.all import *
+print("IGMPv3 is still under development - Nov 2010")
+
+
+class IGMPv3gr(Packet):
+ """IGMP Group Record for IGMPv3 Membership Report
+
+ This class is derived from class Packet and should be concatenated to an
+ instantiation of class IGMPv3. Within the IGMPv3 instantiation, the numgrp
+ element will need to be manipulated to indicate the proper number of
+ group records.
+ """
+ name = "IGMPv3gr"
+ igmpv3grtypes = { 1 : "Mode Is Include",
+ 2 : "Mode Is Exclude",
+ 3 : "Change To Include Mode",
+ 4 : "Change To Exclude Mode",
+ 5 : "Allow New Sources",
+ 6 : "Block Old Sources"}
+
+ fields_desc = [ ByteEnumField("rtype", 1, igmpv3grtypes),
+ ByteField("auxdlen",0),
+ FieldLenField("numsrc", None, "srcaddrs"),
+ IPField("maddr", "0.0.0.0"),
+ FieldListField("srcaddrs", None, IPField("sa", "0.0.0.0"), "numsrc") ]
+ #show_indent=0
+#--------------------------------------------------------------------------
+ def post_build(self, p, pay):
+ """Called implicitly before a packet is sent.
+ """
+ p += pay
+ if self.auxdlen != 0:
+ print("NOTICE: A properly formatted and complaint V3 Group Record should have an Auxiliary Data length of zero (0).")
+ print(" Subsequent Group Records are lost!")
+ return p
+#--------------------------------------------------------------------------
+ def mysummary(self):
+ """Display a summary of the IGMPv3 group record."""
+ return self.sprintf("IGMPv3 Group Record %IGMPv3gr.type% %IGMPv3gr.maddr%")
+
+
+class IGMPv3(Packet):
+ """IGMP Message Class for v3.
+
+ This class is derived from class Packet.
+ The fields defined below are a
+ direct interpretation of the v3 Membership Query Message.
+ Fields 'type' through 'qqic' are directly assignable.
+ For 'numsrc', do not assign a value.
+ Instead add to the 'srcaddrs' list to auto-set 'numsrc'. To
+ assign values to 'srcaddrs', use the following methods:
+ c = IGMPv3()
+ c.srcaddrs = ['1.2.3.4', '5.6.7.8']
+ c.srcaddrs += ['192.168.10.24']
+ At this point, 'c.numsrc' is three (3)
+
+ 'chksum' is automagically calculated before the packet is sent.
+
+ 'mrcode' is also the Advertisement Interval field
+
+ """
+ name = "IGMPv3"
+ igmpv3types = { 0x11 : "Membership Query",
+ 0x22 : "Version 3 Membership Report",
+ 0x30 : "Multicast Router Advertisement",
+ 0x31 : "Multicast Router Solicitation",
+ 0x32 : "Multicast Router Termination"}
+
+ fields_desc = [ ByteEnumField("type", 0x11, igmpv3types),
+ ByteField("mrcode",0),
+ XShortField("chksum", None),
+ IPField("gaddr", "0.0.0.0")
+ ]
+ # use float_encode()
+
+ # if type = 0x11 (Membership Query), the next field is group address
+ # ConditionalField(IPField("gaddr", "0.0.0.0"), "type", lambda x:x==0x11),
+ # else if type = 0x22 (Membership Report), the next fields are
+ # reserved and number of group records
+ #ConditionalField(ShortField("rsvd2", 0), "type", lambda x:x==0x22),
+ #ConditionalField(ShortField("numgrp", 0), "type", lambda x:x==0x22),
+# FieldLenField("numgrp", None, "grprecs")]
+ # else if type = 0x30 (Multicast Router Advertisement), the next fields are
+ # query interval and robustness
+ #ConditionalField(ShortField("qryIntvl", 0), "type", lambda x:x==0x30),
+ #ConditionalField(ShortField("robust", 0), "type", lambda x:x==0x30),
+# The following are only present for membership queries
+ # ConditionalField(BitField("resv", 0, 4), "type", lambda x:x==0x11),
+ # ConditionalField(BitField("s", 0, 1), "type", lambda x:x==0x11),
+ # ConditionalField(BitField("qrv", 0, 3), "type", lambda x:x==0x11),
+ # ConditionalField(ByteField("qqic",0), "type", lambda x:x==0x11),
+ # ConditionalField(FieldLenField("numsrc", None, "srcaddrs"), "type", lambda x:x==0x11),
+ # ConditionalField(FieldListField("srcaddrs", None, IPField("sa", "0.0.0.0"), "numsrc"), "type", lambda x:x==0x11),
+
+#--------------------------------------------------------------------------
+ def float_encode(self, value):
+ """Convert the integer value to its IGMPv3 encoded time value if needed.
+
+ If value < 128, return the value specified. If >= 128, encode as a floating
+ point value. Value can be 0 - 31744.
+ """
+ if value < 128:
+ code = value
+ elif value > 31743:
+ code = 255
+ else:
+ exp=0
+ value>>=3
+ while(value>31):
+ exp+=1
+ value>>=1
+ exp<<=4
+ code = 0x80 | exp | (value & 0x0F)
+ return code
+
+#--------------------------------------------------------------------------
+ def post_build(self, p, pay):
+ """Called implicitly before a packet is sent to compute and place IGMPv3 checksum.
+
+ Parameters:
+ self The instantiation of an IGMPv3 class
+ p The IGMPv3 message in hex in network byte order
+ pay Additional payload for the IGMPv3 message
+ """
+ p += pay
+ if self.type in [0, 0x31, 0x32, 0x22]: # for these, field is reserved (0)
+ p = p[:1]+chr(0)+p[2:]
+ if self.chksum is None:
+ ck = checksum(p)
+ p = p[:2]+chr(ck>>8)+chr(ck&0xff)+p[4:]
+ return p
+
+#--------------------------------------------------------------------------
+ def mysummary(self):
+ """Display a summary of the IGMPv3 object."""
+
+ if isinstance(self.underlayer, IP):
+ return self.underlayer.sprintf("IGMPv3: %IP.src% > %IP.dst% %IGMPv3.type% %IGMPv3.gaddr%")
+ else:
+ return self.sprintf("IGMPv3 %IGMPv3.type% %IGMPv3.gaddr%")
+
+#--------------------------------------------------------------------------
+ def igmpize(self, ip=None, ether=None):
+ """Called to explicitely fixup associated IP and Ethernet headers
+
+ Parameters:
+ self The instantiation of an IGMP class.
+ ip The instantiation of the associated IP class.
+ ether The instantiation of the associated Ethernet.
+
+ Returns:
+ True The tuple ether/ip/self passed all check and represents
+ a proper IGMP packet.
+ False One of more validation checks failed and no fields
+ were adjusted.
+
+ The function will examine the IGMP message to assure proper format.
+ Corrections will be attempted if possible. The IP header is then properly
+ adjusted to ensure correct formatting and assignment. The Ethernet header
+ is then adjusted to the proper IGMP packet format.
+ """
+
+# The rules are:
+# 1. ttl = 1 (RFC 2236, section 2)
+# igmp_binds = [ (IP, IGMP, { "proto": 2 , "ttl": 1 }),
+# 2. tos = 0xC0 (RFC 3376, section 4)
+# (IP, IGMPv3, { "proto": 2 , "ttl": 1, "tos":0xc0 }),
+# (IGMPv3, IGMPv3gr, { }) ]
+# The rules are:
+# 1. the Max Response time is meaningful only in Membership Queries and should be zero
+# otherwise (RFC 2236, section 2.2)
+
+ if (self.type != 0x11): #rule 1
+ self.mrtime = 0
+
+ if (self.adjust_ip(ip) == True):
+ if (self.adjust_ether(ip, ether) == True): return True
+ return False
+
+#--------------------------------------------------------------------------
+ def adjust_ether (self, ip=None, ether=None):
+ """Called to explicitely fixup an associated Ethernet header
+
+ The function adjusts the ethernet header destination MAC address based on
+ the destination IP address.
+ """
+# The rules are:
+# 1. send to the group mac address address corresponding to the IP.dst
+ if ip != None and ip.haslayer(IP) and ether != None and ether.haslayer(Ether):
+ iplong = atol(ip.dst)
+ ether.dst = "01:00:5e:%02x:%02x:%02x" % ( (iplong>>16)&0x7F, (iplong>>8)&0xFF, (iplong)&0xFF )
+ # print "igmpize ip " + ip.dst + " as mac " + ether.dst
+ return True
+ else:
+ return False
+
+#--------------------------------------------------------------------------
+ def adjust_ip (self, ip=None):
+ """Called to explicitely fixup an associated IP header
+
+ The function adjusts the IP header based on conformance rules
+ and the group address encoded in the IGMP message.
+ The rules are:
+ 1. Send General Group Query to 224.0.0.1 (all systems)
+ 2. Send Leave Group to 224.0.0.2 (all routers)
+ 3a.Otherwise send the packet to the group address
+ 3b.Send reports/joins to the group address
+ 4. ttl = 1 (RFC 2236, section 2)
+ 5. send the packet with the router alert IP option (RFC 2236, section 2)
+ """
+ if ip != None and ip.haslayer(IP):
+ if (self.type == 0x11):
+ if (self.gaddr == "0.0.0.0"):
+ ip.dst = "224.0.0.1" # IP rule 1
+ retCode = True
+ elif isValidMCAddr(self.gaddr):
+ ip.dst = self.gaddr # IP rule 3a
+ retCode = True
+ else:
+ print("Warning: Using invalid Group Address")
+ retCode = False
+ elif ((self.type == 0x17) and isValidMCAddr(self.gaddr)):
+ ip.dst = "224.0.0.2" # IP rule 2
+ retCode = True
+ elif ((self.type == 0x12) or (self.type == 0x16)) and (isValidMCAddr(self.gaddr)):
+ ip.dst = self.gaddr # IP rule 3b
+ retCode = True
+ else:
+ print("Warning: Using invalid IGMP Type")
+ retCode = False
+ else:
+ print("Warning: No IGMP Group Address set")
+ retCode = False
+ if retCode == True:
+ ip.ttl=1 # IP Rule 4
+ ip.options=[IPOption_Router_Alert()] # IP rule 5
+ return retCode
+
+
+bind_layers( IP, IGMPv3, frag=0, proto=2, ttl=1, tos=0xc0)
+bind_layers( IGMPv3, IGMPv3gr, frag=0, proto=2)
+bind_layers( IGMPv3gr, IGMPv3gr, frag=0, proto=2)
+
diff --git a/scripts/external_libs/scapy-2.3.1/python3/scapy/contrib/ikev2.py b/scripts/external_libs/scapy-2.3.1/python3/scapy/contrib/ikev2.py
new file mode 100644
index 00000000..fd38b80c
--- /dev/null
+++ b/scripts/external_libs/scapy-2.3.1/python3/scapy/contrib/ikev2.py
@@ -0,0 +1,362 @@
+#!/usr/bin/env python
+
+# http://trac.secdev.org/scapy/ticket/353
+
+# scapy.contrib.description = IKEv2
+# scapy.contrib.status = loads
+
+from scapy.all import *
+import logging
+
+
+## Modified from the original ISAKMP code by Yaron Sheffer <yaronf.ietf@gmail.com>, June 2010.
+
+import struct
+from scapy.packet import *
+from scapy.fields import *
+from scapy.ansmachine import *
+from scapy.layers.inet import IP,UDP
+from scapy.sendrecv import sr
+
+# see http://www.iana.org/assignments/ikev2-parameters for details
+IKEv2AttributeTypes= { "Encryption": (1, { "DES-IV64" : 1,
+ "DES" : 2,
+ "3DES" : 3,
+ "RC5" : 4,
+ "IDEA" : 5,
+ "CAST" : 6,
+ "Blowfish" : 7,
+ "3IDEA" : 8,
+ "DES-IV32" : 9,
+ "AES-CBC" : 12,
+ "AES-CTR" : 13,
+ "AES-CCM-8" : 14,
+ "AES-CCM-12" : 15,
+ "AES-CCM-16" : 16,
+ "AES-GCM-8ICV" : 18,
+ "AES-GCM-12ICV" : 19,
+ "AES-GCM-16ICV" : 20,
+ "Camellia-CBC" : 23,
+ "Camellia-CTR" : 24,
+ "Camellia-CCM-8ICV" : 25,
+ "Camellia-CCM-12ICV" : 26,
+ "Camellia-CCM-16ICV" : 27,
+ }, 0),
+ "PRF": (2, {"PRF_HMAC_MD5":1,
+ "PRF_HMAC_SHA1":2,
+ "PRF_HMAC_TIGER":3,
+ "PRF_AES128_XCBC":4,
+ "PRF_HMAC_SHA2_256":5,
+ "PRF_HMAC_SHA2_384":6,
+ "PRF_HMAC_SHA2_512":7,
+ "PRF_AES128_CMAC":8,
+ }, 0),
+ "Integrity": (3, { "HMAC-MD5-96": 1,
+ "HMAC-SHA1-96": 2,
+ "DES-MAC": 3,
+ "KPDK-MD5": 4,
+ "AES-XCBC-96": 5,
+ "HMAC-MD5-128": 6,
+ "HMAC-SHA1-160": 7,
+ "AES-CMAC-96": 8,
+ "AES-128-GMAC": 9,
+ "AES-192-GMAC": 10,
+ "AES-256-GMAC": 11,
+ "SHA2-256-128": 12,
+ "SHA2-384-192": 13,
+ "SHA2-512-256": 14,
+ }, 0),
+ "GroupDesc": (4, { "768MODPgr" : 1,
+ "1024MODPgr" : 2,
+ "1536MODPgr" : 5,
+ "2048MODPgr" : 14,
+ "3072MODPgr" : 15,
+ "4096MODPgr" : 16,
+ "6144MODPgr" : 17,
+ "8192MODPgr" : 18,
+ "256randECPgr" : 19,
+ "384randECPgr" : 20,
+ "521randECPgr" : 21,
+ "1024MODP160POSgr" : 22,
+ "2048MODP224POSgr" : 23,
+ "2048MODP256POSgr" : 24,
+ "192randECPgr" : 25,
+ "224randECPgr" : 26,
+ }, 0),
+ "Extended Sequence Number": (5, {"No ESN": 0,
+ "ESN": 1, }, 0),
+ }
+
+# the name 'IKEv2TransformTypes' is actually a misnomer (since the table
+# holds info for all IKEv2 Attribute types, not just transforms, but we'll
+# keep it for backwards compatibility... for now at least
+IKEv2TransformTypes = IKEv2AttributeTypes
+
+IKEv2TransformNum = {}
+for n in IKEv2TransformTypes:
+ val = IKEv2TransformTypes[n]
+ tmp = {}
+ for e in val[1]:
+ tmp[val[1][e]] = e
+ IKEv2TransformNum[val[0]] = (n,tmp, val[2])
+
+IKEv2Transforms = {}
+for n in IKEv2TransformTypes:
+ IKEv2Transforms[IKEv2TransformTypes[n][0]]=n
+
+del(n)
+del(e)
+del(tmp)
+del(val)
+
+# Note: Transform and Proposal can only be used inside the SA payload
+IKEv2_payload_type = ["None", "", "Proposal", "Transform"]
+
+IKEv2_payload_type.extend([""] * 29)
+IKEv2_payload_type.extend(["SA","KE","IDi","IDr", "CERT","CERTREQ","AUTH","Nonce","Notify","Delete",
+ "VendorID","TSi","TSr","Encrypted","CP","EAP"])
+
+IKEv2_exchange_type = [""] * 34
+IKEv2_exchange_type.extend(["IKE_SA_INIT","IKE_AUTH","CREATE_CHILD_SA",
+ "INFORMATIONAL", "IKE_SESSION_RESUME"])
+
+
+class IKEv2_class(Packet):
+ def guess_payload_class(self, payload):
+ np = self.next_payload
+ logging.debug("For IKEv2_class np=%d" % np)
+ if np == 0:
+ return conf.raw_layer
+ elif np < len(IKEv2_payload_type):
+ pt = IKEv2_payload_type[np]
+ logging.debug(globals().get("IKEv2_payload_%s" % pt, IKEv2_payload))
+ return globals().get("IKEv2_payload_%s" % pt, IKEv2_payload)
+ else:
+ return IKEv2_payload
+
+
+class IKEv2(IKEv2_class): # rfc4306
+ name = "IKEv2"
+ fields_desc = [
+ StrFixedLenField("init_SPI","",8),
+ StrFixedLenField("resp_SPI","",8),
+ ByteEnumField("next_payload",0,IKEv2_payload_type),
+ XByteField("version",0x20), # IKEv2, right?
+ ByteEnumField("exch_type",0,IKEv2_exchange_type),
+ FlagsField("flags",0, 8, ["res0","res1","res2","Initiator","Version","Response","res6","res7"]),
+ IntField("id",0),
+ IntField("length",None)
+ ]
+
+ def guess_payload_class(self, payload):
+ if self.flags & 1:
+ return conf.raw_layer
+ return IKEv2_class.guess_payload_class(self, payload)
+
+ def answers(self, other):
+ if isinstance(other, IKEv2):
+ if other.init_SPI == self.init_SPI:
+ return 1
+ return 0
+ def post_build(self, p, pay):
+ p += pay
+ if self.length is None:
+ p = p[:24]+struct.pack("!I",len(p))+p[28:]
+ return p
+
+
+class IKEv2_Key_Length_Attribute(IntField):
+ # We only support the fixed-length Key Length attribute (the only one currently defined)
+ name="key length"
+ def __init__(self, name):
+ IntField.__init__(self, name, "0x800E0000")
+
+ def i2h(self, pkt, x):
+ return IntField.i2h(self, pkt, x & 0xFFFF)
+
+ def h2i(self, pkt, x):
+ return IntField.h2i(self, pkt, struct.pack("!I", 0x800E0000 | int(x, 0)))
+
+
+class IKEv2_Transform_ID(ShortField):
+ def i2h(self, pkt, x):
+ if pkt == None:
+ return None
+ else:
+ map = IKEv2TransformNum[pkt.transform_type][1]
+ return map[x]
+
+ def h2i(self, pkt, x):
+ if pkt == None:
+ return None
+ else:
+ map = IKEv2TransformNum[pkt.transform_type][1]
+ for k in keys(map):
+ if map[k] == x:
+ return k
+ return None
+
+class IKEv2_payload_Transform(IKEv2_class):
+ name = "IKE Transform"
+ fields_desc = [
+ ByteEnumField("next_payload",None,{0:"last", 3:"Transform"}),
+ ByteField("res",0),
+ ShortField("length",8),
+ ByteEnumField("transform_type",None,IKEv2Transforms),
+ ByteField("res2",0),
+ IKEv2_Transform_ID("transform_id", 0),
+ ConditionalField(IKEv2_Key_Length_Attribute("key_length"), lambda pkt: pkt.length > 8),
+ ]
+
+class IKEv2_payload_Proposal(IKEv2_class):
+ name = "IKEv2 Proposal"
+ fields_desc = [
+ ByteEnumField("next_payload",None,{0:"last", 2:"Proposal"}),
+ ByteField("res",0),
+ FieldLenField("length",None,"trans","H", adjust=lambda pkt,x:x+8),
+ ByteField("proposal",1),
+ ByteEnumField("proto",1,{1:"IKEv2"}),
+ FieldLenField("SPIsize",None,"SPI","B"),
+ ByteField("trans_nb",None),
+ StrLenField("SPI","",length_from=lambda x:x.SPIsize),
+ PacketLenField("trans",conf.raw_layer(),IKEv2_payload_Transform,length_from=lambda x:x.length-8),
+ ]
+
+
+class IKEv2_payload(IKEv2_class):
+ name = "IKEv2 Payload"
+ fields_desc = [
+ ByteEnumField("next_payload",None,IKEv2_payload_type),
+ FlagsField("flags",0, 8, ["critical","res1","res2","res3","res4","res5","res6","res7"]),
+ FieldLenField("length",None,"load","H", adjust=lambda pkt,x:x+4),
+ StrLenField("load","",length_from=lambda x:x.length-4),
+ ]
+
+
+class IKEv2_payload_VendorID(IKEv2_class):
+ name = "IKEv2 Vendor ID"
+ overload_fields = { IKEv2: { "next_payload":43 }}
+ fields_desc = [
+ ByteEnumField("next_payload",None,IKEv2_payload_type),
+ ByteField("res",0),
+ FieldLenField("length",None,"vendorID","H", adjust=lambda pkt,x:x+4),
+ StrLenField("vendorID","",length_from=lambda x:x.length-4),
+ ]
+
+class IKEv2_payload_Delete(IKEv2_class):
+ name = "IKEv2 Vendor ID"
+ overload_fields = { IKEv2: { "next_payload":42 }}
+ fields_desc = [
+ ByteEnumField("next_payload",None,IKEv2_payload_type),
+ ByteField("res",0),
+ FieldLenField("length",None,"vendorID","H", adjust=lambda pkt,x:x+4),
+ StrLenField("vendorID","",length_from=lambda x:x.length-4),
+ ]
+
+class IKEv2_payload_SA(IKEv2_class):
+ name = "IKEv2 SA"
+ overload_fields = { IKEv2: { "next_payload":33 }}
+ fields_desc = [
+ ByteEnumField("next_payload",None,IKEv2_payload_type),
+ ByteField("res",0),
+ FieldLenField("length",None,"prop","H", adjust=lambda pkt,x:x+4),
+ PacketLenField("prop",conf.raw_layer(),IKEv2_payload_Proposal,length_from=lambda x:x.length-4),
+ ]
+
+class IKEv2_payload_Nonce(IKEv2_class):
+ name = "IKEv2 Nonce"
+ overload_fields = { IKEv2: { "next_payload":40 }}
+ fields_desc = [
+ ByteEnumField("next_payload",None,IKEv2_payload_type),
+ ByteField("res",0),
+ FieldLenField("length",None,"load","H", adjust=lambda pkt,x:x+4),
+ StrLenField("load","",length_from=lambda x:x.length-4),
+ ]
+
+class IKEv2_payload_Notify(IKEv2_class):
+ name = "IKEv2 Notify"
+ overload_fields = { IKEv2: { "next_payload":41 }}
+ fields_desc = [
+ ByteEnumField("next_payload",None,IKEv2_payload_type),
+ ByteField("res",0),
+ FieldLenField("length",None,"load","H", adjust=lambda pkt,x:x+4),
+ StrLenField("load","",length_from=lambda x:x.length-4),
+ ]
+
+class IKEv2_payload_KE(IKEv2_class):
+ name = "IKEv2 Key Exchange"
+ overload_fields = { IKEv2: { "next_payload":34 }}
+ fields_desc = [
+ ByteEnumField("next_payload",None,IKEv2_payload_type),
+ ByteField("res",0),
+ FieldLenField("length",None,"load","H", adjust=lambda pkt,x:x+6),
+ ShortEnumField("group", 0, IKEv2TransformTypes['GroupDesc'][1]),
+ StrLenField("load","",length_from=lambda x:x.length-6),
+ ]
+
+class IKEv2_payload_IDi(IKEv2_class):
+ name = "IKEv2 Identification - Initiator"
+ overload_fields = { IKEv2: { "next_payload":35 }}
+ fields_desc = [
+ ByteEnumField("next_payload",None,IKEv2_payload_type),
+ ByteField("res",0),
+ FieldLenField("length",None,"load","H",adjust=lambda pkt,x:x+8),
+ ByteEnumField("IDtype",1,{1:"IPv4_addr", 11:"Key"}),
+ ByteEnumField("ProtoID",0,{0:"Unused"}),
+ ShortEnumField("Port",0,{0:"Unused"}),
+# IPField("IdentData","127.0.0.1"),
+ StrLenField("load","",length_from=lambda x:x.length-8),
+ ]
+
+class IKEv2_payload_IDr(IKEv2_class):
+ name = "IKEv2 Identification - Responder"
+ overload_fields = { IKEv2: { "next_payload":36 }}
+ fields_desc = [
+ ByteEnumField("next_payload",None,IKEv2_payload_type),
+ ByteField("res",0),
+ FieldLenField("length",None,"load","H",adjust=lambda pkt,x:x+8),
+ ByteEnumField("IDtype",1,{1:"IPv4_addr", 11:"Key"}),
+ ByteEnumField("ProtoID",0,{0:"Unused"}),
+ ShortEnumField("Port",0,{0:"Unused"}),
+# IPField("IdentData","127.0.0.1"),
+ StrLenField("load","",length_from=lambda x:x.length-8),
+ ]
+
+
+
+class IKEv2_payload_Encrypted(IKEv2_class):
+ name = "IKEv2 Encrypted and Authenticated"
+ overload_fields = { IKEv2: { "next_payload":46 }}
+ fields_desc = [
+ ByteEnumField("next_payload",None,IKEv2_payload_type),
+ ByteField("res",0),
+ FieldLenField("length",None,"load","H",adjust=lambda pkt,x:x+4),
+ StrLenField("load","",length_from=lambda x:x.length-4),
+ ]
+
+
+
+IKEv2_payload_type_overload = {}
+for i in range(len(IKEv2_payload_type)):
+ name = "IKEv2_payload_%s" % IKEv2_payload_type[i]
+ if name in globals():
+ IKEv2_payload_type_overload[globals()[name]] = {"next_payload":i}
+
+del(i)
+del(name)
+IKEv2_class.overload_fields = IKEv2_payload_type_overload.copy()
+
+split_layers(UDP, ISAKMP, sport=500)
+split_layers(UDP, ISAKMP, dport=500)
+
+bind_layers( UDP, IKEv2, dport=500, sport=500) # TODO: distinguish IKEv1/IKEv2
+bind_layers( UDP, IKEv2, dport=4500, sport=4500)
+
+def ikev2scan(ip):
+ return sr(IP(dst=ip)/UDP()/IKEv2(init_SPI=RandString(8),
+ exch_type=34)/IKEv2_payload_SA(prop=IKEv2_payload_Proposal()))
+
+# conf.debug_dissector = 1
+
+if __name__ == "__main__":
+ interact(mydict=globals(), mybanner="IKEv2 alpha-level protocol implementation")
diff --git a/scripts/external_libs/scapy-2.3.1/python3/scapy/contrib/ldp.py b/scripts/external_libs/scapy-2.3.1/python3/scapy/contrib/ldp.py
new file mode 100644
index 00000000..bc2464ab
--- /dev/null
+++ b/scripts/external_libs/scapy-2.3.1/python3/scapy/contrib/ldp.py
@@ -0,0 +1,475 @@
+# scapy.contrib.description = Label Distribution Protocol (LDP)
+# scapy.contrib.status = loads
+
+# http://git.savannah.gnu.org/cgit/ldpscapy.git/snapshot/ldpscapy-5285b81d6e628043df2a83301b292f24a95f0ba1.tar.gz
+
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+# Copyright (C) 2010 Florian Duraffourg
+
+import struct
+
+from scapy.packet import *
+from scapy.fields import *
+from scapy.ansmachine import *
+from scapy.layers.inet import UDP
+from scapy.layers.inet import TCP
+from scapy.base_classes import Net
+
+
+# Guess payload
+def guess_payload(p):
+ LDPTypes = {
+ 0x0001: LDPNotification,
+ 0x0100: LDPHello,
+ 0x0200: LDPInit,
+ 0x0201: LDPKeepAlive,
+ 0x0300: LDPAddress,
+ 0x0301: LDPAddressWM,
+ 0x0400: LDPLabelMM,
+ 0x0401: LDPLabelReqM,
+ 0x0404: LDPLabelARM,
+ 0x0402: LDPLabelWM,
+ 0x0403: LDPLabelRelM,
+ }
+ type = struct.unpack("!H",p[0:2])[0]
+ type = type & 0x7fff
+ if type == 0x0001 and struct.unpack("!H",p[2:4])[0] > 20:
+ return LDP
+ if type in LDPTypes:
+ return LDPTypes[type]
+ else:
+ return conf.raw_layer
+
+## Fields ##
+
+# 3.4.1. FEC TLV
+
+class FecTLVField(StrField):
+ islist=1
+ def m2i(self, pkt, x):
+ nbr = struct.unpack("!H",x[2:4])[0]
+ used = 0
+ x=x[4:]
+ list=[]
+ while x:
+ #if x[0] == 1:
+ # list.append('Wildcard')
+ #else:
+ #mask=ord(x[8*i+3])
+ #add=inet_ntoa(x[8*i+4:8*i+8])
+ mask=ord(x[3])
+ nbroctets = mask / 8
+ if mask % 8:
+ nbroctets += 1
+ add=inet_ntoa(x[4:4+nbroctets]+"\x00"*(4-nbroctets))
+ list.append( (add, mask) )
+ used += 4 + nbroctets
+ x=x[4+nbroctets:]
+ return list
+ def i2m(self, pkt, x):
+ if type(x) is str:
+ return x
+ s = "\x01\x00"
+ l = 0
+ fec = ""
+ for o in x:
+ fec += "\x02\x00\x01"
+ # mask length
+ fec += struct.pack("!B",o[1])
+ # Prefix
+ fec += inet_aton(o[0])
+ l += 8
+ s += struct.pack("!H",l)
+ s += fec
+ return s
+ def size(self, s):
+ """Get the size of this field"""
+ l = 4 + struct.unpack("!H",s[2:4])[0]
+ return l
+ def getfield(self, pkt, s):
+ l = self.size(s)
+ return s[l:],self.m2i(pkt, s[:l])
+
+
+# 3.4.2.1. Generic Label TLV
+
+class LabelTLVField(StrField):
+ def m2i(self, pkt, x):
+ return struct.unpack("!I",x[4:8])[0]
+ def i2m(self, pkt, x):
+ if type(x) is str:
+ return x
+ s = "\x02\x00\x00\x04"
+ s += struct.pack("!I",x)
+ return s
+ def size(self, s):
+ """Get the size of this field"""
+ l = 4 + struct.unpack("!H",s[2:4])[0]
+ return l
+ def getfield(self, pkt, s):
+ l = self.size(s)
+ return s[l:],self.m2i(pkt, s[:l])
+
+
+# 3.4.3. Address List TLV
+
+class AddressTLVField(StrField):
+ islist=1
+ def m2i(self, pkt, x):
+ nbr = struct.unpack("!H",x[2:4])[0] - 2
+ nbr /= 4
+ x=x[6:]
+ list=[]
+ for i in range(0,nbr):
+ add = x[4*i:4*i+4]
+ list.append(inet_ntoa(add))
+ return list
+ def i2m(self, pkt, x):
+ if type(x) is str:
+ return x
+ l=2+len(x)*4
+ s = "\x01\x01"+struct.pack("!H",l)+"\x00\x01"
+ for o in x:
+ s += inet_aton(o)
+ return s
+ def size(self, s):
+ """Get the size of this field"""
+ l = 4 + struct.unpack("!H",s[2:4])[0]
+ return l
+ def getfield(self, pkt, s):
+ l = self.size(s)
+ return s[l:],self.m2i(pkt, s[:l])
+
+
+# 3.4.6. Status TLV
+
+class StatusTLVField(StrField):
+ islist=1
+ def m2i(self, pkt, x):
+ l = []
+ statuscode = struct.unpack("!I",x[4:8])[0]
+ l.append( (statuscode & 2**31) >> 31)
+ l.append( (statuscode & 2**30) >> 30)
+ l.append( statuscode & 0x3FFFFFFF )
+ l.append( struct.unpack("!I", x[8:12])[0] )
+ l.append( struct.unpack("!H", x[12:14])[0] )
+ return l
+ def i2m(self, pkt, x):
+ if type(x) is str:
+ return x
+ s = "\x03\x00" + struct.pack("!H",10)
+ statuscode = 0
+ if x[0] != 0:
+ statuscode += 2**31
+ if x[1] != 0:
+ statuscode += 2**30
+ statuscode += x[2]
+ s += struct.pack("!I",statuscode)
+ if len(x) > 3:
+ s += struct.pack("!I",x[3])
+ else:
+ s += "\x00\x00\x00\x00"
+ if len(x) > 4:
+ s += struct.pack("!H",x[4])
+ else:
+ s += "\x00\x00"
+ return s
+ def getfield(self, pkt, s):
+ l = 14
+ return s[l:],self.m2i(pkt, s[:l])
+
+
+# 3.5.2 Common Hello Parameters TLV
+class CommonHelloTLVField(StrField):
+ islist = 1
+ def m2i(self, pkt, x):
+ list = []
+ v = struct.unpack("!H",x[4:6])[0]
+ list.append(v)
+ flags = struct.unpack("B",x[6])[0]
+ v = ( flags & 0x80 ) >> 7
+ list.append(v)
+ v = ( flags & 0x40 ) >> 7
+ list.append(v)
+ return list
+ def i2m(self, pkt, x):
+ if type(x) is str:
+ return x
+ s = "\x04\x00\x00\x04"
+ s += struct.pack("!H",x[0])
+ byte = 0
+ if x[1] == 1:
+ byte += 0x80
+ if x[2] == 1:
+ byte += 0x40
+ s += struct.pack("!B",byte)
+ s += "\x00"
+ return s
+ def getfield(self, pkt, s):
+ l = 8
+ return s[l:],self.m2i(pkt, s[:l])
+
+
+# 3.5.3 Common Session Parameters TLV
+class CommonSessionTLVField(StrField):
+ islist = 1
+ def m2i(self, pkt, x):
+ l = []
+ l.append(struct.unpack("!H",x[6:8])[0])
+ octet = struct.unpack("B",x[8:9])[0]
+ l.append( (octet & 2**7 ) >> 7 )
+ l.append( (octet & 2**6 ) >> 6 )
+ l.append( struct.unpack("B",x[9:10])[0] )
+ l.append( struct.unpack("!H",x[10:12])[0] )
+ l.append( inet_ntoa(x[12:16]) )
+ l.append( struct.unpack("!H",x[16:18])[0] )
+ return l
+ def i2m(self, pkt, x):
+ if type(x) is str:
+ return x
+ s = "\x05\x00\x00\x0E\x00\x01"
+ s += struct.pack("!H",x[0])
+ octet = 0
+ if x[1] != 0:
+ octet += 2**7
+ if x[2] != 0:
+ octet += 2**6
+ s += struct.pack("!B",octet)
+ s += struct.pack("!B",x[3])
+ s += struct.pack("!H",x[4])
+ s += inet_aton(x[5])
+ s += struct.pack("!H",x[6])
+ return s
+ def getfield(self, pkt, s):
+ l = 18
+ return s[l:],self.m2i(pkt, s[:l])
+
+
+
+## Messages ##
+
+# 3.5.1. Notification Message
+class LDPNotification(Packet):
+ name = "LDPNotification"
+ fields_desc = [ BitField("u",0,1),
+ BitField("type", 0x0001, 15),
+ ShortField("len", None),
+ IntField("id", 0) ,
+ StatusTLVField("status",(0,0,0,0,0)) ]
+ def post_build(self, p, pay):
+ if self.len is None:
+ l = len(p) - 4
+ p = p[:2]+struct.pack("!H", l)+p[4:]
+ return p+pay
+ def guess_payload_class(self, p):
+ return guess_payload(p)
+
+
+# 3.5.2. Hello Message
+class LDPHello(Packet):
+ name = "LDPHello"
+ fields_desc = [ BitField("u",0,1),
+ BitField("type", 0x0100, 15),
+ ShortField("len", None),
+ IntField("id", 0) ,
+ CommonHelloTLVField("params",[180,0,0]) ]
+ def post_build(self, p, pay):
+ if self.len is None:
+ l = len(p) - 4
+ p = p[:2]+struct.pack("!H", l)+p[4:]
+ return p+pay
+ def guess_payload_class(self, p):
+ return guess_payload(p)
+
+
+# 3.5.3. Initialization Message
+class LDPInit(Packet):
+ name = "LDPInit"
+ fields_desc = [ BitField("u",0,1),
+ XBitField("type", 0x0200, 15),
+ ShortField("len", None),
+ IntField("id", 0),
+ CommonSessionTLVField("params",None)]
+ def post_build(self, p, pay):
+ if self.len is None:
+ l = len(p) - 4
+ p = p[:2]+struct.pack("!H", l)+p[4:]
+ return p+pay
+ def guess_payload_class(self, p):
+ return guess_payload(p)
+
+
+# 3.5.4. KeepAlive Message
+class LDPKeepAlive(Packet):
+ name = "LDPKeepAlive"
+ fields_desc = [ BitField("u",0,1),
+ XBitField("type", 0x0201, 15),
+ ShortField("len", None),
+ IntField("id", 0)]
+ def post_build(self, p, pay):
+ if self.len is None:
+ l = len(p) - 4
+ p = p[:2]+struct.pack("!H", l)+p[4:]
+ return p+pay
+ def guess_payload_class(self, p):
+ return guess_payload(p)
+
+
+# 3.5.5. Address Message
+
+class LDPAddress(Packet):
+ name = "LDPAddress"
+ fields_desc = [ BitField("u",0,1),
+ XBitField("type", 0x0300, 15),
+ ShortField("len", None),
+ IntField("id", 0),
+ AddressTLVField("address",None) ]
+ def post_build(self, p, pay):
+ if self.len is None:
+ l = len(p) - 4
+ p = p[:2]+struct.pack("!H", l)+p[4:]
+ return p+pay
+ def guess_payload_class(self, p):
+ return guess_payload(p)
+
+
+# 3.5.6. Address Withdraw Message
+
+class LDPAddressWM(Packet):
+ name = "LDPAddressWM"
+ fields_desc = [ BitField("u",0,1),
+ XBitField("type", 0x0301, 15),
+ ShortField("len", None),
+ IntField("id", 0),
+ AddressTLVField("address",None) ]
+ def post_build(self, p, pay):
+ if self.len is None:
+ l = len(p) - 4
+ p = p[:2]+struct.pack("!H", l)+p[4:]
+ return p+pay
+ def guess_payload_class(self, p):
+ return guess_payload(p)
+
+
+# 3.5.7. Label Mapping Message
+
+class LDPLabelMM(Packet):
+ name = "LDPLabelMM"
+ fields_desc = [ BitField("u",0,1),
+ XBitField("type", 0x0400, 15),
+ ShortField("len", None),
+ IntField("id", 0),
+ FecTLVField("fec",None),
+ LabelTLVField("label",0)]
+ def post_build(self, p, pay):
+ if self.len is None:
+ l = len(p) - 4
+ p = p[:2]+struct.pack("!H", l)+p[4:]
+ return p+pay
+ def guess_payload_class(self, p):
+ return guess_payload(p)
+
+# 3.5.8. Label Request Message
+
+class LDPLabelReqM(Packet):
+ name = "LDPLabelReqM"
+ fields_desc = [ BitField("u",0,1),
+ XBitField("type", 0x0401, 15),
+ ShortField("len", None),
+ IntField("id", 0),
+ FecTLVField("fec",None)]
+ def post_build(self, p, pay):
+ if self.len is None:
+ l = len(p) - 4
+ p = p[:2]+struct.pack("!H", l)+p[4:]
+ return p+pay
+ def guess_payload_class(self, p):
+ return guess_payload(p)
+
+
+# 3.5.9. Label Abort Request Message
+
+class LDPLabelARM(Packet):
+ name = "LDPLabelARM"
+ fields_desc = [ BitField("u",0,1),
+ XBitField("type", 0x0404, 15),
+ ShortField("len", None),
+ IntField("id", 0),
+ FecTLVField("fec",None),
+ IntField("labelRMid",0)]
+ def post_build(self, p, pay):
+ if self.len is None:
+ l = len(p) - 4
+ p = p[:2]+struct.pack("!H", l)+p[4:]
+ return p+pay
+ def guess_payload_class(self, p):
+ return guess_payload(p)
+
+
+# 3.5.10. Label Withdraw Message
+
+class LDPLabelWM(Packet):
+ name = "LDPLabelWM"
+ fields_desc = [ BitField("u",0,1),
+ XBitField("type", 0x0402, 15),
+ ShortField("len", None),
+ IntField("id", 0),
+ FecTLVField("fec",None),
+ LabelTLVField("label",0)]
+ def post_build(self, p, pay):
+ if self.len is None:
+ l = len(p) - 4
+ p = p[:2]+struct.pack("!H", l)+p[4:]
+ return p+pay
+ def guess_payload_class(self, p):
+ return guess_payload(p)
+
+
+# 3.5.11. Label Release Message
+
+class LDPLabelRelM(Packet):
+ name = "LDPLabelRelM"
+ fields_desc = [ BitField("u",0,1),
+ XBitField("type", 0x0403, 15),
+ ShortField("len", None),
+ IntField("id", 0),
+ FecTLVField("fec",None),
+ LabelTLVField("label",0)]
+ def post_build(self, p, pay):
+ if self.len is None:
+ l = len(p) - 4
+ p = p[:2]+struct.pack("!H", l)+p[4:]
+ return p+pay
+ def guess_payload_class(self, p):
+ return guess_payload(p)
+
+
+# 3.1. LDP PDUs
+class LDP(Packet):
+ name = "LDP"
+ fields_desc = [ ShortField("version",1),
+ ShortField("len", None),
+ IPField("id","127.0.0.1"),
+ ShortField("space",0) ]
+ def post_build(self, p, pay):
+ if self.len is None:
+ l = len(p)+len(pay)-4
+ p = p[:2]+struct.pack("!H", l)+p[4:]
+ return p+pay
+ def guess_payload_class(self, p):
+ return guess_payload(p)
+
+bind_layers( TCP, LDP, sport=646, dport=646 )
+bind_layers( UDP, LDP, sport=646, dport=646 )
diff --git a/scripts/external_libs/scapy-2.3.1/python3/scapy/contrib/mpls.py b/scripts/external_libs/scapy-2.3.1/python3/scapy/contrib/mpls.py
new file mode 100644
index 00000000..037278c5
--- /dev/null
+++ b/scripts/external_libs/scapy-2.3.1/python3/scapy/contrib/mpls.py
@@ -0,0 +1,17 @@
+# http://trac.secdev.org/scapy/ticket/31
+
+# scapy.contrib.description = MPLS
+# scapy.contrib.status = loads
+
+from scapy.packet import Packet,bind_layers
+from scapy.fields import BitField,ByteField
+from scapy.layers.l2 import Ether
+
+class MPLS(Packet):
+ name = "MPLS"
+ fields_desc = [ BitField("label", 3, 20),
+ BitField("cos", 0, 3),
+ BitField("s", 1, 1),
+ ByteField("ttl", 0) ]
+
+bind_layers(Ether, MPLS, type=0x8847)
diff --git a/scripts/external_libs/scapy-2.3.1/python3/scapy/contrib/ospf.py b/scripts/external_libs/scapy-2.3.1/python3/scapy/contrib/ospf.py
new file mode 100644
index 00000000..a6422bd8
--- /dev/null
+++ b/scripts/external_libs/scapy-2.3.1/python3/scapy/contrib/ospf.py
@@ -0,0 +1,833 @@
+#!/usr/bin/env python
+
+# scapy.contrib.description = OSPF
+# scapy.contrib.status = loads
+
+"""
+OSPF extension for Scapy <http://www.secdev.org/scapy>
+
+This module provides Scapy layers for the Open Shortest Path First
+routing protocol as defined in RFC 2328 and RFC 5340.
+
+Copyright (c) 2008 Dirk Loss : mail dirk-loss de
+Copyright (c) 2010 Jochen Bartl : jochen.bartl gmail com
+
+This program is free software; you can redistribute it and/or
+modify it under the terms of the GNU General Public License
+as published by the Free Software Foundation; either version 2
+of the License, or (at your option) any later version.
+
+This program is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+"""
+
+
+from scapy.all import *
+
+EXT_VERSION = "v0.9.2"
+
+
+class OSPFOptionsField(FlagsField):
+
+ def __init__(self, name="options", default=0, size=8,
+ names=["MT", "E", "MC", "NP", "L", "DC", "O", "DN"]):
+ FlagsField.__init__(self, name, default, size, names)
+
+
+_OSPF_types = {1: "Hello",
+ 2: "DBDesc",
+ 3: "LSReq",
+ 4: "LSUpd",
+ 5: "LSAck"}
+
+
+class OSPF_Hdr(Packet):
+ name = "OSPF Header"
+ fields_desc = [
+ ByteField("version", 2),
+ ByteEnumField("type", 1, _OSPF_types),
+ ShortField("len", None),
+ IPField("src", "1.1.1.1"),
+ IPField("area", "0.0.0.0"), # default: backbone
+ XShortField("chksum", None),
+ ShortEnumField("authtype", 0, {0:"Null", 1:"Simple", 2:"Crypto"}),
+ # Null or Simple Authentication
+ ConditionalField(XLongField("authdata", 0), lambda pkt:pkt.authtype != 2),
+ # Crypto Authentication
+ ConditionalField(XShortField("reserved", 0), lambda pkt:pkt.authtype == 2),
+ ConditionalField(ByteField("keyid", 1), lambda pkt:pkt.authtype == 2),
+ ConditionalField(ByteField("authdatalen", 0), lambda pkt:pkt.authtype == 2),
+ ConditionalField(XIntField("seq", 0), lambda pkt:pkt.authtype == 2),
+ # TODO: Support authdata (which is appended to the packets as if it were padding)
+ ]
+
+ def post_build(self, p, pay):
+ # TODO: Remove LLS data from pay
+ # LLS data blocks may be attached to OSPF Hello and DD packets
+ # The length of the LLS block shall not be included into the length of OSPF packet
+ # See <http://tools.ietf.org/html/rfc5613>
+ p += pay
+ l = self.len
+ if l is None:
+ l = len(p)
+ p = p[:2] + struct.pack("!H", l) + p[4:]
+ if self.chksum is None:
+ if self.authtype == 2:
+ ck = 0 # Crypto, see RFC 2328, D.4.3
+ else:
+ # Checksum is calculated without authentication data
+ # Algorithm is the same as in IP()
+ ck = checksum(p[:16] + p[24:])
+ p = p[:12] + chr(ck >> 8) + chr(ck & 0xff) + p[14:]
+ # TODO: Handle Crypto: Add message digest (RFC 2328, D.4.3)
+ return p
+
+ def hashret(self):
+ return struct.pack("H", self.area) + self.payload.hashret()
+
+ def answers(self, other):
+ if (isinstance(other, OSPF_Hdr) and
+ self.area == other.area and
+ self.type == 5): # Only acknowledgements answer other packets
+ return self.payload.answers(other.payload)
+ return 0
+
+
+class OSPF_Hello(Packet):
+ name = "OSPF Hello"
+ fields_desc = [IPField("mask", "255.255.255.0"),
+ ShortField("hellointerval", 10),
+ OSPFOptionsField(),
+ ByteField("prio", 1),
+ IntField("deadinterval", 40),
+ IPField("router", "0.0.0.0"),
+ IPField("backup", "0.0.0.0"),
+ FieldListField("neighbors", [], IPField("", "0.0.0.0"), length_from=lambda pkt: (pkt.underlayer.len - 44))]
+
+ def guess_payload_class(self, payload):
+ # check presence of LLS data block flag
+ if self.options & 0x10 == 0x10:
+ return OSPF_LLS_Hdr
+ else:
+ return Packet.guess_payload_class(self, payload)
+
+
+class LLS_Generic_TLV(Packet):
+ name = "LLS Generic"
+ fields_desc = [ShortField("type", 1),
+ FieldLenField("len", None, length_of=lambda x: x.val),
+ StrLenField("val", "", length_from=lambda x: x.len)]
+
+ def guess_payload_class(self, p):
+ return conf.padding_layer
+
+
+class LLS_ExtendedOptionsField(FlagsField):
+
+ def __init__(self, name="options", default=0, size=32,
+ names=["LR", "RS"]):
+ FlagsField.__init__(self, name, default, size, names)
+
+
+class LLS_Extended_Options(LLS_Generic_TLV):
+ name = "LLS Extended Options and Flags"
+ fields_desc = [ShortField("type", 1),
+ ShortField("len", 4),
+ LLS_ExtendedOptionsField()]
+
+
+class LLS_Crypto_Auth(LLS_Generic_TLV):
+ name = "LLS Cryptographic Authentication"
+ fields_desc = [ShortField("type", 2),
+ FieldLenField("len", 20, fmt="B", length_of=lambda x: x.authdata),
+ XIntField("sequence", "\x00\x00\x00\x00"),
+ StrLenField("authdata", "\x00" * 16, length_from=lambda x: x.len)]
+
+ def post_build(self, p, pay):
+ p += pay
+ l = self.len
+
+ if l is None:
+ # length = len(sequence) + len(authdata) + len(payload)
+ l = len(p[3:])
+ p = p[:2] + struct.pack("!H", l) + p[3:]
+
+ return p
+
+_OSPF_LLSclasses = {1: "LLS_Extended_Options",
+ 2: "LLS_Crypto_Auth"}
+
+
+def _LLSGuessPayloadClass(p, **kargs):
+ """ Guess the correct LLS class for a given payload """
+
+ cls = conf.raw_layer
+ if len(p) >= 4:
+ typ = struct.unpack("!H", p[0:2])[0]
+ clsname = _OSPF_LLSclasses.get(typ, "LLS_Generic_TLV")
+ cls = globals()[clsname]
+ return cls(p, **kargs)
+
+
+class OSPF_LLS_Hdr(Packet):
+ name = "OSPF Link-local signaling"
+ fields_desc = [XShortField("chksum", None),
+ # FIXME Length should be displayed in 32-bit words
+ ShortField("len", None),
+ PacketListField("llstlv", [], _LLSGuessPayloadClass)]
+
+ def post_build(self, p, pay):
+ p += pay
+ l = self.len
+ if l is None:
+ # Length in 32-bit words
+ l = len(p) / 4
+ p = p[:2] + struct.pack("!H", l) + p[4:]
+ if self.chksum is None:
+ c = checksum(p)
+ p = chr((c >> 8) & 0xff) + chr(c & 0xff) + p[2:]
+ return p
+
+_OSPF_LStypes = {1: "router",
+ 2: "network",
+ 3: "summaryIP",
+ 4: "summaryASBR",
+ 5: "external",
+ 7: "NSSAexternal"}
+
+_OSPF_LSclasses = {1: "OSPF_Router_LSA",
+ 2: "OSPF_Network_LSA",
+ 3: "OSPF_SummaryIP_LSA",
+ 4: "OSPF_SummaryASBR_LSA",
+ 5: "OSPF_External_LSA",
+ 7: "OSPF_NSSA_External_LSA"}
+
+
+def ospf_lsa_checksum(lsa):
+ """ Fletcher checksum for OSPF LSAs, returned as a 2 byte string.
+
+ Give the whole LSA packet as argument.
+ For details on the algorithm, see RFC 2328 chapter 12.1.7 and RFC 905 Annex B.
+ """
+ # This is based on the GPLed C implementation in Zebra <http://www.zebra.org/>
+
+ CHKSUM_OFFSET = 16
+
+ if len(lsa) < CHKSUM_OFFSET:
+ raise Exception("LSA Packet too short (%s bytes)" % len(lsa))
+
+ c0 = c1 = 0
+ # Calculation is done with checksum set to zero
+ lsa = lsa[:CHKSUM_OFFSET] + "\x00\x00" + lsa[CHKSUM_OFFSET + 2:]
+ for char in lsa[2:]: # leave out age
+ c0 += ord(char)
+ c1 += c0
+
+ c0 %= 255
+ c1 %= 255
+
+ x = ((len(lsa) - CHKSUM_OFFSET - 1) * c0 - c1) % 255
+
+ if (x <= 0):
+ x += 255
+
+ y = 510 - c0 - x
+
+ if (y > 255):
+ y -= 255
+ #checksum = (x << 8) + y
+
+ return chr(x) + chr(y)
+
+
+class OSPF_LSA_Hdr(Packet):
+ name = "OSPF LSA Header"
+ fields_desc = [ShortField("age", 1),
+ OSPFOptionsField(),
+ ByteEnumField("type", 1, _OSPF_LStypes),
+ IPField("id", "192.168.0.0"),
+ IPField("adrouter", "1.1.1.1"),
+ XIntField("seq", 0x80000001),
+ XShortField("chksum", 0),
+ ShortField("len", 36)]
+
+ def extract_padding(self, s):
+ return "", s
+
+
+_OSPF_Router_LSA_types = {1: "p2p",
+ 2: "transit",
+ 3: "stub",
+ 4: "virtual"}
+
+
+class OSPF_Link(Packet):
+ name = "OSPF Link"
+ fields_desc = [IPField("id", "192.168.0.0"),
+ IPField("data", "255.255.255.0"),
+ ByteEnumField("type", 3, _OSPF_Router_LSA_types),
+ ByteField("toscount", 0),
+ ShortField("metric", 10),
+ # TODO: define correct conditions
+ ConditionalField(ByteField("tos", 0), lambda pkt: False),
+ ConditionalField(ByteField("reserved", 0), lambda pkt: False),
+ ConditionalField(ShortField("tosmetric", 0), lambda pkt: False)]
+
+ def extract_padding(self, s):
+ return "", s
+
+
+def _LSAGuessPayloadClass(p, **kargs):
+ """ Guess the correct LSA class for a given payload """
+ # This is heavily based on scapy-cdp.py by Nicolas Bareil and Arnaud Ebalard
+ # XXX: This only works if all payload
+ cls = conf.raw_layer
+ if len(p) >= 4:
+ typ = struct.unpack("!B", p[3])[0]
+ clsname = _OSPF_LSclasses.get(typ, "Raw")
+ cls = globals()[clsname]
+ return cls(p, **kargs)
+
+
+class OSPF_BaseLSA(Packet):
+ """ An abstract base class for Link State Advertisements """
+
+ def post_build(self, p, pay):
+ length = self.len
+ if length is None:
+ length = len(p)
+ p = p[:18] + struct.pack("!H", length) + p[20:]
+ if self.chksum is None:
+ chksum = ospf_lsa_checksum(p)
+ p = p[:16] + chksum + p[18:]
+ return p # p+pay?
+
+ def extract_padding(self, s):
+ length = self.len
+ return "", s
+
+
+class OSPF_Router_LSA(OSPF_BaseLSA):
+ name = "OSPF Router LSA"
+ fields_desc = [ShortField("age", 1),
+ OSPFOptionsField(),
+ ByteField("type", 1),
+ IPField("id", "1.1.1.1"),
+ IPField("adrouter", "1.1.1.1"),
+ XIntField("seq", 0x80000001),
+ XShortField("chksum", None),
+ ShortField("len", None),
+ FlagsField("flags", 0, 8, ["B", "E", "V", "W", "Nt"]),
+ ByteField("reserved", 0),
+ FieldLenField("linkcount", None, count_of="linklist"),
+ PacketListField("linklist", [], OSPF_Link,
+ count_from=lambda pkt: pkt.linkcount,
+ length_from=lambda pkt: pkt.linkcount * 12)]
+
+
+class OSPF_Network_LSA(OSPF_BaseLSA):
+ name = "OSPF Network LSA"
+ fields_desc = [ShortField("age", 1),
+ OSPFOptionsField(),
+ ByteField("type", 2),
+ IPField("id", "192.168.0.0"),
+ IPField("adrouter", "1.1.1.1"),
+ XIntField("seq", 0x80000001),
+ XShortField("chksum", None),
+ ShortField("len", None),
+ IPField("mask", "255.255.255.0"),
+ FieldListField("routerlist", [], IPField("", "1.1.1.1"),
+ length_from=lambda pkt: pkt.len - 24)]
+
+
+class OSPF_SummaryIP_LSA(OSPF_BaseLSA):
+ name = "OSPF Summary LSA (IP Network)"
+ fields_desc = [ShortField("age", 1),
+ OSPFOptionsField(),
+ ByteField("type", 3),
+ IPField("id", "192.168.0.0"),
+ IPField("adrouter", "1.1.1.1"),
+ XIntField("seq", 0x80000001),
+ XShortField("chksum", None),
+ ShortField("len", None),
+ IPField("mask", "255.255.255.0"),
+ ByteField("reserved", 0),
+ X3BytesField("metric", 10),
+ # TODO: Define correct conditions
+ ConditionalField(ByteField("tos", 0), lambda pkt:False),
+ ConditionalField(X3BytesField("tosmetric", 0), lambda pkt:False)]
+
+
+class OSPF_SummaryASBR_LSA(OSPF_SummaryIP_LSA):
+ name = "OSPF Summary LSA (AS Boundary Router)"
+ type = 4
+ id = "2.2.2.2"
+ mask = "0.0.0.0"
+ metric = 20
+
+
+class OSPF_External_LSA(OSPF_BaseLSA):
+ name = "OSPF External LSA (ASBR)"
+ fields_desc = [ShortField("age", 1),
+ OSPFOptionsField(),
+ ByteField("type", 5),
+ IPField("id", "192.168.0.0"),
+ IPField("adrouter", "2.2.2.2"),
+ XIntField("seq", 0x80000001),
+ XShortField("chksum", None),
+ ShortField("len", None),
+ IPField("mask", "255.255.255.0"),
+ FlagsField("ebit", 0, 1, ["E"]),
+ BitField("reserved", 0, 7),
+ X3BytesField("metric", 20),
+ IPField("fwdaddr", "0.0.0.0"),
+ XIntField("tag", 0),
+ # TODO: Define correct conditions
+ ConditionalField(ByteField("tos", 0), lambda pkt:False),
+ ConditionalField(X3BytesField("tosmetric", 0), lambda pkt:False)]
+
+
+class OSPF_NSSA_External_LSA(OSPF_External_LSA):
+ name = "OSPF NSSA External LSA"
+ type = 7
+
+
+class OSPF_DBDesc(Packet):
+ name = "OSPF Database Description"
+ fields_desc = [ShortField("mtu", 1500),
+ OSPFOptionsField(),
+ FlagsField("dbdescr", 0, 8, ["MS", "M", "I", "R", "4", "3", "2", "1"]),
+ IntField("ddseq", 1),
+ PacketListField("lsaheaders", None, OSPF_LSA_Hdr,
+ count_from = lambda pkt: None,
+ length_from = lambda pkt: pkt.underlayer.len - 24 - 8)]
+
+ def guess_payload_class(self, payload):
+ # check presence of LLS data block flag
+ if self.options & 0x10 == 0x10:
+ return OSPF_LLS_Hdr
+ else:
+ return Packet.guess_payload_class(self, payload)
+
+
+class OSPF_LSReq_Item(Packet):
+ name = "OSPF Link State Request (item)"
+ fields_desc = [IntEnumField("type", 1, _OSPF_LStypes),
+ IPField("id", "1.1.1.1"),
+ IPField("adrouter", "1.1.1.1")]
+
+ def extract_padding(self, s):
+ return "", s
+
+
+class OSPF_LSReq(Packet):
+ name = "OSPF Link State Request (container)"
+ fields_desc = [PacketListField("requests", None, OSPF_LSReq_Item,
+ count_from = lambda pkt:None,
+ length_from = lambda pkt:pkt.underlayer.len - 24)]
+
+
+class OSPF_LSUpd(Packet):
+ name = "OSPF Link State Update"
+ fields_desc = [FieldLenField("lsacount", None, fmt="!I", count_of="lsalist"),
+ PacketListField("lsalist", [], _LSAGuessPayloadClass,
+ count_from = lambda pkt: pkt.lsacount,
+ length_from = lambda pkt: pkt.underlayer.len - 24)]
+
+
+class OSPF_LSAck(Packet):
+ name = "OSPF Link State Acknowledgement"
+ fields_desc = [PacketListField("lsaheaders", None, OSPF_LSA_Hdr,
+ count_from = lambda pkt: None,
+ length_from = lambda pkt: pkt.underlayer.len - 24)]
+
+ def answers(self, other):
+ if isinstance(other, OSPF_LSUpd):
+ for reqLSA in other.lsalist:
+ for ackLSA in self.lsaheaders:
+ if (reqLSA.type == ackLSA.type and
+ reqLSA.seq == ackLSA.seq):
+ return 1
+ return 0
+
+
+#------------------------------------------------------------------------------
+# OSPFv3
+#------------------------------------------------------------------------------
+# TODO: Add length_from / adjust functionality to IP6Field and remove this class
+class OspfIP6Field(StrField, IP6Field):
+ """
+ Special IP6Field for prefix fields in OSPFv3 LSAs
+ """
+
+ def __init__(self, name, default, length=None, length_from=None):
+ StrField.__init__(self, name, default)
+ self.length_from = length_from
+ if length is not None:
+ self.length_from = lambda pkt, length = length: length
+
+ def any2i(self, pkt, x):
+ return IP6Field.any2i(self, pkt, x)
+
+ def i2repr(self, pkt, x):
+ return IP6Field.i2repr(self, pkt, x)
+
+ def h2i(self, pkt, x):
+ return IP6Field.h2i(self, pkt, x)
+
+ def i2m(self, pkt, x):
+ x = inet_pton(socket.AF_INET6, x)
+ l = self.length_from(pkt)
+ l = self.prefixlen_to_bytelen(l)
+
+ return x[:l]
+
+ def m2i(self, pkt, x):
+ l = self.length_from(pkt)
+
+ prefixlen = self.prefixlen_to_bytelen(l)
+ if l > 128:
+ warning("OspfIP6Field: Prefix length is > 128. Dissection of this packet will fail")
+ else:
+ pad = "\x00" * (16 - prefixlen)
+ x += pad
+
+ return inet_ntop(socket.AF_INET6, x)
+
+ def prefixlen_to_bytelen(self, l):
+ if l <= 32:
+ return 4
+ elif l <= 64:
+ return 8
+ elif l <= 96:
+ return 12
+ else:
+ return 16
+
+ def i2len(self, pkt, x):
+ l = self.length_from(pkt)
+ l = self.prefixlen_to_bytelen(l)
+
+ return l
+
+ def getfield(self, pkt, s):
+ l = self.length_from(pkt)
+ l = self.prefixlen_to_bytelen(l)
+
+ return s[l:], self.m2i(pkt, s[:l])
+
+
+class OSPFv3_Hdr(Packet):
+ name = "OSPFv3 Header"
+ fields_desc = [ByteField("version", 3),
+ ByteEnumField("type", 1, _OSPF_types),
+ ShortField("len", None),
+ IPField("src", "1.1.1.1"),
+ IPField("area", "0.0.0.0"),
+ XShortField("chksum", None),
+ ByteField("instance", 0),
+ ByteField("reserved", 0)]
+
+ def post_build(self, p, pay):
+ p += pay
+ l = self.len
+
+ if l is None:
+ l = len(p)
+ p = p[:2] + struct.pack("!H", l) + p[4:]
+
+ if self.chksum is None:
+ chksum = in6_chksum(89, self.underlayer, p)
+ p = p[:12] + chr(chksum >> 8) + chr(chksum & 0xff) + p[14:]
+
+ return p
+
+
+class OSPFv3OptionsField(FlagsField):
+
+ def __init__(self, name="options", default=0, size=24,
+ names=["V6", "E", "MC", "N", "R", "DC", "AF", "L", "I", "F"]):
+ FlagsField.__init__(self, name, default, size, names)
+
+
+class OSPFv3_Hello(Packet):
+ name = "OSPFv3 Hello"
+ fields_desc = [IntField("intid", 0),
+ ByteField("prio", 1),
+ OSPFv3OptionsField(),
+ ShortField("hellointerval", 10),
+ ShortField("deadinterval", 40),
+ IPField("router", "0.0.0.0"),
+ IPField("backup", "0.0.0.0"),
+ FieldListField("neighbors", [], IPField("", "0.0.0.0"),
+ length_from=lambda pkt: (pkt.underlayer.len - 36))]
+
+
+_OSPFv3_LStypes = {0x2001: "router",
+ 0x2002: "network",
+ 0x2003: "interAreaPrefix",
+ 0x2004: "interAreaRouter",
+ 0x4005: "asExternal",
+ 0x2007: "type7",
+ 0x0008: "link",
+ 0x2009: "intraAreaPrefix"}
+
+_OSPFv3_LSclasses = {0x2001: "OSPFv3_Router_LSA",
+ 0x2002: "OSPFv3_Network_LSA",
+ 0x2003: "OSPFv3_Inter_Area_Prefix_LSA",
+ 0x2004: "OSPFv3_Inter_Area_Router_LSA",
+ 0x4005: "OSPFv3_AS_External_LSA",
+ 0x2007: "OSPFv3_Type_7_LSA",
+ 0x0008: "OSPFv3_Link_LSA",
+ 0x2009: "OSPFv3_Intra_Area_Prefix_LSA"}
+
+
+class OSPFv3_LSA_Hdr(Packet):
+ name = "OSPFv3 LSA Header"
+ fields_desc = [ShortField("age", 1),
+ ShortEnumField("type", 0x2001, _OSPFv3_LStypes),
+ IPField("id", "0.0.0.0"),
+ IPField("adrouter", "1.1.1.1"),
+ XIntField("seq", 0x80000001),
+ XShortField("chksum", 0),
+ ShortField("len", 36)]
+
+ def extract_padding(self, s):
+ return "", s
+
+
+def _OSPFv3_LSAGuessPayloadClass(p, **kargs):
+ """ Guess the correct OSPFv3 LSA class for a given payload """
+
+ cls = conf.raw_layer
+
+ if len(p) >= 6:
+ typ = struct.unpack("!H", p[2:4])[0]
+ clsname = _OSPFv3_LSclasses.get(typ, "Raw")
+ cls = globals()[clsname]
+
+ return cls(p, **kargs)
+
+
+_OSPFv3_Router_LSA_types = {1: "p2p",
+ 2: "transit",
+ 3: "reserved",
+ 4: "virtual"}
+
+
+class OSPFv3_Link(Packet):
+ name = "OSPFv3 Link"
+ fields_desc = [ByteEnumField("type", 1, _OSPFv3_Router_LSA_types),
+ ByteField("reserved", 0),
+ ShortField("metric", 10),
+ IntField("intid", 0),
+ IntField("neighintid", 0),
+ IPField("neighbor", "2.2.2.2")]
+
+ def extract_padding(self, s):
+ return "", s
+
+
+class OSPFv3_Router_LSA(OSPF_BaseLSA):
+ name = "OSPFv3 Router LSA"
+ fields_desc = [ShortField("age", 1),
+ ShortEnumField("type", 0x2001, _OSPFv3_LStypes),
+ IPField("id", "0.0.0.0"),
+ IPField("adrouter", "1.1.1.1"),
+ XIntField("seq", 0x80000001),
+ XShortField("chksum", None),
+ ShortField("len", None),
+ FlagsField("flags", 0, 8, ["B", "E", "V", "W"]),
+ OSPFv3OptionsField(),
+ PacketListField("linklist", [], OSPFv3_Link,
+ length_from=lambda pkt:pkt.len - 24)]
+
+
+class OSPFv3_Network_LSA(OSPF_BaseLSA):
+ name = "OSPFv3 Network LSA"
+ fields_desc = [ShortField("age", 1),
+ ShortEnumField("type", 0x2002, _OSPFv3_LStypes),
+ IPField("id", "0.0.0.0"),
+ IPField("adrouter", "1.1.1.1"),
+ XIntField("seq", 0x80000001),
+ XShortField("chksum", None),
+ ShortField("len", None),
+ ByteField("reserved", 0),
+ OSPFv3OptionsField(),
+ FieldListField("routerlist", [], IPField("", "0.0.0.1"),
+ length_from=lambda pkt: pkt.len - 24)]
+
+
+class OSPFv3PrefixOptionsField(FlagsField):
+
+ def __init__(self, name="prefixoptions", default=0, size=8,
+ names=["NU", "LA", "MC", "P"]):
+ FlagsField.__init__(self, name, default, size, names)
+
+
+class OSPFv3_Inter_Area_Prefix_LSA(OSPF_BaseLSA):
+ name = "OSPFv3 Inter Area Prefix LSA"
+ fields_desc = [ShortField("age", 1),
+ ShortEnumField("type", 0x2003, _OSPFv3_LStypes),
+ IPField("id", "0.0.0.0"),
+ IPField("adrouter", "1.1.1.1"),
+ XIntField("seq", 0x80000001),
+ XShortField("chksum", None),
+ ShortField("len", None),
+ ByteField("reserved", 0),
+ X3BytesField("metric", 10),
+ ByteField("prefixlen", 64),
+ OSPFv3PrefixOptionsField(),
+ ShortField("reserved2", 0),
+ OspfIP6Field("prefix", "2001:db8:0:42::", length_from=lambda pkt: pkt.prefixlen)]
+
+
+class OSPFv3_Inter_Area_Router_LSA(OSPF_BaseLSA):
+ name = "OSPFv3 Inter Area Router LSA"
+ fields_desc = [ShortField("age", 1),
+ ShortEnumField("type", 0x2004, _OSPFv3_LStypes),
+ IPField("id", "0.0.0.0"),
+ IPField("adrouter", "1.1.1.1"),
+ XIntField("seq", 0x80000001),
+ XShortField("chksum", None),
+ ShortField("len", None),
+ ByteField("reserved", 0),
+ X3BytesField("metric", 1),
+ IPField("router", "2.2.2.2")]
+
+
+class OSPFv3_AS_External_LSA(OSPF_BaseLSA):
+ name = "OSPFv3 AS External LSA"
+ fields_desc = [ShortField("age", 1),
+ ShortEnumField("type", 0x4005, _OSPFv3_LStypes),
+ IPField("id", "0.0.0.0"),
+ IPField("adrouter", "1.1.1.1"),
+ XIntField("seq", 0x80000001),
+ XShortField("chksum", None),
+ ShortField("len", None),
+ FlagsField("flags", 0, 8, ["T", "F", "E"]),
+ X3BytesField("metric", 20),
+ ByteField("prefixlen", 64),
+ OSPFv3PrefixOptionsField(),
+ ShortEnumField("reflstype", 0, _OSPFv3_LStypes),
+ OspfIP6Field("prefix", "2001:db8:0:42::", length_from=lambda pkt: pkt.prefixlen),
+ ConditionalField(IP6Field("fwaddr", "::"), lambda pkt: pkt.flags & 0x02 == 0x02),
+ ConditionalField(IntField("tag", 0), lambda pkt: pkt.flags & 0x01 == 0x01),
+ ConditionalField(IPField("reflsid", 0), lambda pkt: pkt.reflstype != 0)]
+
+
+class OSPFv3_Type_7_LSA(OSPFv3_AS_External_LSA):
+ name = "OSPFv3 Type 7 LSA"
+ type = 0x2007
+
+
+class OSPFv3_Prefix_Item(Packet):
+ name = "OSPFv3 Link Prefix Item"
+ fields_desc = [ByteField("prefixlen", 64),
+ OSPFv3PrefixOptionsField(),
+ ShortField("metric", 10),
+ OspfIP6Field("prefix", "2001:db8:0:42::", length_from=lambda pkt: pkt.prefixlen)]
+
+ def extract_padding(self, s):
+ return "", s
+
+
+class OSPFv3_Link_LSA(OSPF_BaseLSA):
+ name = "OSPFv3 Link LSA"
+ fields_desc = [ShortField("age", 1),
+ ShortEnumField("type", 0x0008, _OSPFv3_LStypes),
+ IPField("id", "0.0.0.0"),
+ IPField("adrouter", "1.1.1.1"),
+ XIntField("seq", 0x80000001),
+ XShortField("chksum", None),
+ ShortField("len", None),
+ ByteField("prio", 1),
+ OSPFv3OptionsField(),
+ IP6Field("lladdr", "fe80::"),
+ IntField("prefixes", 0),
+ PacketListField("prefixlist", None, OSPFv3_Prefix_Item,
+ count_from = lambda pkt: pkt.prefixes)]
+
+
+class OSPFv3_Intra_Area_Prefix_LSA(OSPF_BaseLSA):
+ name = "OSPFv3 Intra Area Prefix LSA"
+ fields_desc = [ShortField("age", 1),
+ ShortEnumField("type", 0x2009, _OSPFv3_LStypes),
+ IPField("id", "0.0.0.0"),
+ IPField("adrouter", "1.1.1.1"),
+ XIntField("seq", 0x80000001),
+ XShortField("chksum", None),
+ ShortField("len", None),
+ ShortField("prefixes", 0),
+ ShortEnumField("reflstype", 0, _OSPFv3_LStypes),
+ IPField("reflsid", "0.0.0.0"),
+ IPField("refadrouter", "0.0.0.0"),
+ PacketListField("prefixlist", None, OSPFv3_Prefix_Item,
+ count_from = lambda pkt: pkt.prefixes)]
+
+
+class OSPFv3_DBDesc(Packet):
+ name = "OSPFv3 Database Description"
+ fields_desc = [ByteField("reserved", 0),
+ OSPFv3OptionsField(),
+ ShortField("mtu", 1500),
+ ByteField("reserved2", 0),
+ FlagsField("dbdescr", 0, 8, ["MS", "M", "I", "R"]),
+ IntField("ddseq", 1),
+ PacketListField("lsaheaders", None, OSPFv3_LSA_Hdr,
+ count_from = lambda pkt:None,
+ length_from = lambda pkt:pkt.underlayer.len - 28)]
+
+
+class OSPFv3_LSReq_Item(Packet):
+ name = "OSPFv3 Link State Request (item)"
+ fields_desc = [ShortField("reserved", 0),
+ ShortEnumField("type", 0x2001, _OSPFv3_LStypes),
+ IPField("id", "1.1.1.1"),
+ IPField("adrouter", "1.1.1.1")]
+
+ def extract_padding(self, s):
+ return "", s
+
+
+class OSPFv3_LSReq(Packet):
+ name = "OSPFv3 Link State Request (container)"
+ fields_desc = [PacketListField("requests", None, OSPFv3_LSReq_Item,
+ count_from = lambda pkt:None,
+ length_from = lambda pkt:pkt.underlayer.len - 16)]
+
+
+class OSPFv3_LSUpd(Packet):
+ name = "OSPFv3 Link State Update"
+ fields_desc = [FieldLenField("lsacount", None, fmt="!I", count_of="lsalist"),
+ PacketListField("lsalist", [], _OSPFv3_LSAGuessPayloadClass,
+ count_from = lambda pkt:pkt.lsacount,
+ length_from = lambda pkt:pkt.underlayer.len - 16)]
+
+
+class OSPFv3_LSAck(Packet):
+ name = "OSPFv3 Link State Acknowledgement"
+ fields_desc = [PacketListField("lsaheaders", None, OSPFv3_LSA_Hdr,
+ count_from = lambda pkt:None,
+ length_from = lambda pkt:pkt.underlayer.len - 16)]
+
+
+bind_layers(IP, OSPF_Hdr, proto=89)
+bind_layers(OSPF_Hdr, OSPF_Hello, type=1)
+bind_layers(OSPF_Hdr, OSPF_DBDesc, type=2)
+bind_layers(OSPF_Hdr, OSPF_LSReq, type=3)
+bind_layers(OSPF_Hdr, OSPF_LSUpd, type=4)
+bind_layers(OSPF_Hdr, OSPF_LSAck, type=5)
+
+bind_layers(IPv6, OSPFv3_Hdr, nh=89)
+bind_layers(OSPFv3_Hdr, OSPFv3_Hello, type=1)
+bind_layers(OSPFv3_Hdr, OSPFv3_DBDesc, type=2)
+bind_layers(OSPFv3_Hdr, OSPFv3_LSReq, type=3)
+bind_layers(OSPFv3_Hdr, OSPFv3_LSUpd, type=4)
+bind_layers(OSPFv3_Hdr, OSPFv3_LSAck, type=5)
+
+
+if __name__ == "__main__":
+ interact(mydict=globals(), mybanner="OSPF extension %s" % EXT_VERSION)
diff --git a/scripts/external_libs/scapy-2.3.1/python3/scapy/contrib/ppi.py b/scripts/external_libs/scapy-2.3.1/python3/scapy/contrib/ppi.py
new file mode 100644
index 00000000..f4364096
--- /dev/null
+++ b/scripts/external_libs/scapy-2.3.1/python3/scapy/contrib/ppi.py
@@ -0,0 +1,86 @@
+## This file is (hopefully) part of Scapy
+## See http://www.secdev.org/projects/scapy for more informations
+## <jellch@harris.com>
+## This program is published under a GPLv2 license
+
+# scapy.contrib.description = PPI
+# scapy.contrib.status = loads
+
+
+"""
+PPI (Per-Packet Information).
+"""
+import logging,struct
+from scapy.config import conf
+from scapy.packet import *
+from scapy.fields import *
+from scapy.layers.l2 import Ether
+from scapy.layers.dot11 import Dot11
+
+# Dictionary to map the TLV type to the class name of a sub-packet
+_ppi_types = {}
+def addPPIType(id, value):
+ _ppi_types[id] = value
+def getPPIType(id, default="default"):
+ return _ppi_types.get(id, _ppi_types.get(default, None))
+
+
+# Default PPI Field Header
+class PPIGenericFldHdr(Packet):
+ name = "PPI Field Header"
+ fields_desc = [ LEShortField('pfh_type', 0),
+ FieldLenField('pfh_length', None, length_of="value", fmt='<H', adjust=lambda p,x:x+4),
+ StrLenField("value", "", length_from=lambda p:p.pfh_length) ]
+
+ def extract_padding(self, p):
+ return "",p
+
+def _PPIGuessPayloadClass(p, **kargs):
+ """ This function tells the PacketListField how it should extract the
+ TLVs from the payload. We pass cls only the length string
+ pfh_len says it needs. If a payload is returned, that means
+ part of the sting was unused. This converts to a Raw layer, and
+ the remainder of p is added as Raw's payload. If there is no
+ payload, the remainder of p is added as out's payload.
+ """
+ if len(p) >= 4:
+ t,pfh_len = struct.unpack("<HH", p[:4])
+ # Find out if the value t is in the dict _ppi_types.
+ # If not, return the default TLV class
+ cls = getPPIType(t, "default")
+ pfh_len += 4
+ out = cls(p[:pfh_len], **kargs)
+ if (out.payload):
+ out.payload = conf.raw_layer(out.payload.load)
+ if (len(p) > pfh_len):
+ out.payload.payload = conf.padding_layer(p[pfh_len:])
+ elif (len(p) > pfh_len):
+ out.payload = conf.padding_layer(p[pfh_len:])
+
+ else:
+ out = conf.raw_layer(p, **kargs)
+ return out
+
+
+
+
+class PPI(Packet):
+ name = "PPI Packet Header"
+ fields_desc = [ ByteField('pph_version', 0),
+ ByteField('pph_flags', 0),
+ FieldLenField('pph_len', None, length_of="PPIFieldHeaders", fmt="<H", adjust=lambda p,x:x+8 ),
+ LEIntField('dlt', None),
+ PacketListField("PPIFieldHeaders", [], _PPIGuessPayloadClass, length_from=lambda p:p.pph_len-8,) ]
+ def guess_payload_class(self,payload):
+ return conf.l2types.get(self.dlt, Packet.guess_payload_class(self, payload))
+
+#Register PPI
+addPPIType("default", PPIGenericFldHdr)
+
+conf.l2types.register(192, PPI)
+conf.l2types.register_num2layer(192, PPI)
+
+bind_layers(PPI, Dot11, dlt=conf.l2types.get(Dot11))
+bind_layers(Dot11, PPI)
+bind_layers(PPI, Ether, dlt=conf.l2types.get(Ether))
+bind_layers(Dot11, Ether)
diff --git a/scripts/external_libs/scapy-2.3.1/python3/scapy/contrib/ppi_cace.py b/scripts/external_libs/scapy-2.3.1/python3/scapy/contrib/ppi_cace.py
new file mode 100644
index 00000000..ba2c4abf
--- /dev/null
+++ b/scripts/external_libs/scapy-2.3.1/python3/scapy/contrib/ppi_cace.py
@@ -0,0 +1,87 @@
+## This file is (hopefully) part of Scapy
+## See http://www.secdev.org/projects/scapy for more informations
+## <jellch@harris.com>
+## This program is published under a GPLv2 license
+
+# scapy.contrib.description = PPI CACE
+# scapy.contrib.status = loads
+
+"""
+CACE PPI types
+"""
+import logging,struct
+from scapy.config import conf
+from scapy.packet import *
+from scapy.fields import *
+from scapy.layers.l2 import Ether
+from scapy.layers.dot11 import Dot11
+from scapy.contrib.ppi import *
+
+PPI_DOT11COMMON = 2
+PPI_DOT11NMAC = 3
+PPI_DOT11NMACPHY = 4
+PPI_SPECTRUMMAP = 5
+PPI_PROCESSINFO = 6
+PPI_CAPTUREINFO = 7
+PPI_AGGREGATION = 8
+PPI_DOT3 = 9
+
+# PPI 802.11 Common Field Header Fields
+class dBmByteField(Field):
+ def __init__(self, name, default):
+ Field.__init__(self, name, default, "b")
+ def i2repr(self, pkt, val):
+ if (val != None):
+ val = "%4d dBm" % val
+ return val
+
+class PPITSFTField(LELongField):
+ def i2h(self, pkt, val):
+ flags = 0
+ if (pkt):
+ flags = pkt.getfieldval("Pkt_Flags")
+ if not flags:
+ flags = 0
+ if (flags & 0x02):
+ scale = 1e-3
+ else:
+ scale = 1e-6
+ tout = scale * float(val)
+ return tout
+ def h2i(self, pkt, val):
+ scale = 1e6
+ if pkt:
+ flags = pkt.getfieldval("Pkt_Flags")
+ if flags:
+ if (flags & 0x02):
+ scale = 1e3
+ tout = int((scale * val) + 0.5)
+ return tout
+
+_PPIDot11CommonChFlags = ['','','','','Turbo','CCK','OFDM','2GHz','5GHz',
+ 'PassiveOnly','Dynamic CCK-OFDM','GSFK']
+
+_PPIDot11CommonPktFlags = ['FCS','TSFT_ms','FCS_Invalid','PHY_Error']
+
+# PPI 802.11 Common Field Header
+class Dot11Common(Packet):
+ name = "PPI 802.11-Common"
+ fields_desc = [ LEShortField('pfh_type',PPI_DOT11COMMON),
+ LEShortField('pfh_length', 20),
+ PPITSFTField('TSF_Timer', 0),
+ FlagsField('Pkt_Flags',0, -16, _PPIDot11CommonPktFlags),
+ LEShortField('Rate',0),
+ LEShortField('Ch_Freq',0),
+ FlagsField('Ch_Flags', 0, -16, _PPIDot11CommonChFlags),
+ ByteField('FHSS_Hop',0),
+ ByteField('FHSS_Pat',0),
+ dBmByteField('Antsignal',-128),
+ dBmByteField('Antnoise',-128)]
+
+ def extract_padding(self, p):
+ return "",p
+#Hopefully other CACE defined types will be added here.
+
+#Add the dot11common layer to the PPI array
+addPPIType(PPI_DOT11COMMON, Dot11Common)
+
diff --git a/scripts/external_libs/scapy-2.3.1/python3/scapy/contrib/ppi_geotag.py b/scripts/external_libs/scapy-2.3.1/python3/scapy/contrib/ppi_geotag.py
new file mode 100644
index 00000000..19371512
--- /dev/null
+++ b/scripts/external_libs/scapy-2.3.1/python3/scapy/contrib/ppi_geotag.py
@@ -0,0 +1,464 @@
+## This file is (hopefully) part of Scapy
+## See http://www.secdev.org/projects/scapy for more informations
+## <jellch@harris.com>
+## This program is published under a GPLv2 license
+
+# scapy.contrib.description = PPI GEOLOCATION
+# scapy.contrib.status = loads
+
+
+"""
+PPI-GEOLOCATION tags
+"""
+import struct
+from scapy.packet import *
+from scapy.fields import *
+from scapy.contrib.ppi import PPIGenericFldHdr,addPPIType
+
+CURR_GEOTAG_VER = 2 #Major revision of specification
+
+PPI_GPS = 30002
+PPI_VECTOR = 30003
+PPI_SENSOR = 30004
+PPI_ANTENNA = 30005
+#The FixedX_Y Fields are used to store fixed point numbers in a variety of fields in the GEOLOCATION-TAGS specification
+class Fixed3_6Field(LEIntField):
+ def i2h(self, pkt, x):
+ if x is not None:
+ if (x < 0):
+ warning("Fixed3_6: Internal value too negative: %d" % x)
+ x = 0
+ elif (x > 999999999):
+ warning("Fixed3_6: Internal value too positive: %d" % x)
+ x = 999999999
+ x = x * 1e-6
+ return x
+ def h2i(self, pkt, x):
+ if x is not None:
+ if (x <= -0.5e-6):
+ warning("Fixed3_6: Input value too negative: %.7f" % x)
+ x = 0
+ elif (x >= 999.9999995):
+ warning("Fixed3_6: Input value too positive: %.7f" % x)
+ x = 999.999999
+ x = int(round(x * 1e6))
+ return x
+ def i2m(self, pkt, x):
+ """Convert internal value to machine value"""
+ if x is None:
+ #Try to return zero if undefined
+ x = self.h2i(pkt, 0)
+ return x
+
+ def i2repr(self,pkt,x):
+ if x is None:
+ y=0
+ else:
+ y=self.i2h(pkt,x)
+ return "%3.6f"%(y)
+class Fixed3_7Field(LEIntField):
+ def i2h(self, pkt, x):
+ if x is not None:
+ if (x < 0):
+ warning("Fixed3_7: Internal value too negative: %d" % x)
+ x = 0
+ elif (x > 3600000000):
+ warning("Fixed3_7: Internal value too positive: %d" % x)
+ x = 3600000000
+ x = (x - 1800000000) * 1e-7
+ return x
+ def h2i(self, pkt, x):
+ if x is not None:
+ if (x <= -180.00000005):
+ warning("Fixed3_7: Input value too negative: %.8f" % x)
+ x = -180.0
+ elif (x >= 180.00000005):
+ warning("Fixed3_7: Input value too positive: %.8f" % x)
+ x = 180.0
+ x = int(round((x + 180.0) * 1e7))
+ return x
+ def i2m(self, pkt, x):
+ """Convert internal value to machine value"""
+ if x is None:
+ #Try to return zero if undefined
+ x = self.h2i(pkt, 0)
+ return x
+ def i2repr(self,pkt,x):
+ if x is None:
+ y=0
+ else:
+ y=self.i2h(pkt,x)
+ return "%3.7f"%(y)
+
+class Fixed6_4Field(LEIntField):
+ def i2h(self, pkt, x):
+ if x is not None:
+ if (x < 0):
+ warning("Fixed6_4: Internal value too negative: %d" % x)
+ x = 0
+ elif (x > 3600000000):
+ warning("Fixed6_4: Internal value too positive: %d" % x)
+ x = 3600000000
+ x = (x - 1800000000) * 1e-4
+ return x
+ def h2i(self, pkt, x):
+ if x is not None:
+ if (x <= -180000.00005):
+ warning("Fixed6_4: Input value too negative: %.5f" % x)
+ x = -180000.0
+ elif (x >= 180000.00005):
+ warning("Fixed6_4: Input value too positive: %.5f" % x)
+ x = 180000.0
+ x = int(round((x + 180000.0) * 1e4))
+ return x
+ def i2m(self, pkt, x):
+ """Convert internal value to machine value"""
+ if x is None:
+ #Try to return zero if undefined
+ x = self.h2i(pkt, 0)
+ return x
+ def i2repr(self,pkt,x):
+ if x is None:
+ y=0
+ else:
+ y=self.i2h(pkt,x)
+ return "%6.4f"%(y)
+#The GPS timestamps fractional time counter is stored in a 32-bit unsigned ns counter.
+#The ept field is as well,
+class NSCounter_Field(LEIntField):
+ def i2h(self, pkt, x): #converts nano-seconds to seconds for output
+ if x is not None:
+ if (x < 0):
+ warning("NSCounter_Field: Internal value too negative: %d" % x)
+ x = 0
+ elif (x >= 2**32):
+ warning("NSCounter_Field: Internal value too positive: %d" % x)
+ x = 2**32-1
+ x = (x / 1e9)
+ return x
+ def h2i(self, pkt, x): #converts input in seconds into nano-seconds for storage
+ if x is not None:
+ if (x < 0):
+ warning("NSCounter_Field: Input value too negative: %.10f" % x)
+ x = 0
+ elif (x >= (2**32) / 1e9):
+ warning("NSCounter_Field: Input value too positive: %.10f" % x)
+ x = (2**32-1) / 1e9
+ x = int(round((x * 1e9)))
+ return x
+ def i2repr(self,pkt,x):
+ if x is None:
+ y=0
+ else:
+ y=self.i2h(pkt,x)
+ return "%1.9f"%(y)
+
+class UTCTimeField(IntField):
+ def __init__(self, name, default, epoch=time.gmtime(0), strf="%a, %d %b %Y %H:%M:%S +0000"):
+ IntField.__init__(self, name, default)
+ self.epoch = epoch
+ self.delta = time.mktime(epoch) - time.mktime(time.gmtime(0))
+ self.strf = strf
+ def i2repr(self, pkt, x):
+ if x is None:
+ x = 0
+ x = int(x) + self.delta
+ t = time.strftime(self.strf, time.gmtime(x))
+ return "%s (%d)" % (t, x)
+
+class LETimeField(UTCTimeField,LEIntField):
+ def __init__(self, name, default, epoch=time.gmtime(0), strf="%a, %d %b %Y %H:%M:%S +0000"):
+ LEIntField.__init__(self, name, default)
+ self.epoch = epoch
+ self.delta = time.mktime(epoch) - time.mktime(time.gmtime(0))
+ self.strf = strf
+
+class SignedByteField(Field):
+ def __init__(self, name, default):
+ Field.__init__(self, name, default, "b")
+ def randval(self):
+ return RandSByte()
+
+class XLEShortField(LEShortField,XShortField):
+ def i2repr(self, pkt, x):
+ return XShortField.i2repr(self, pkt, x)
+
+class XLEIntField(LEIntField,XIntField):
+ def i2repr(self, pkt, x):
+ return XIntField.i2repr(self, pkt, x)
+
+class GPSTime_Field(LETimeField):
+ def __init__(self, name, default):
+ return LETimeField.__init__(self, name, default, strf="%a, %d %b %Y %H:%M:%S UTC")
+
+class VectorFlags_Field(XLEIntField):
+ """Represents te VectorFlags field. Handles the RelativeTo:sub-field"""
+ _fwdstr = "DefinesForward"
+ _resmask = 0xfffffff8
+ _relmask = 0x6
+ _relnames = ["RelativeToForward", "RelativeToEarth", "RelativeToCurrent", "RelativeToReserved"]
+ _relvals = [0x00, 0x02, 0x04, 0x06]
+ def i2repr(self, pkt, x):
+ if x is None:
+ return str(x)
+ r = []
+ if (x & 0x1):
+ r.append(self._fwdstr)
+ i = (x & self._relmask) >> 1
+ r.append(self._relnames[i])
+ i = x & self._resmask
+ if (i):
+ r.append("ReservedBits:%08X" % i)
+ sout = "+".join(r)
+ return sout
+ def any2i(self, pkt, x):
+ if type(x) is str:
+ r = x.split("+")
+ y = 0
+ for value in r:
+ if (value == self._fwdstr):
+ y |= 0x1
+ elif (value in self._relnames):
+ i = self._relnames.index(value)
+ y &= (~self._relmask)
+ y |= self._relvals[i]
+ else:
+ #logging.warning("Unknown VectorFlags Argument: %s" % value)
+ pass
+ else:
+ y = x
+ #print "any2i: %s --> %s" % (str(x), str(y))
+ return y
+
+class HCSIFlagsField(FlagsField):
+ """ A FlagsField where each bit/flag turns a conditional field on or off.
+ If the value is None when building a packet, i2m() will check the value of
+ every field in self.names. If the field's value is not None, the corresponding
+ flag will be set. """
+ def i2m(self, pkt, val):
+ if val is None:
+ val = 0
+ if (pkt):
+ for i in range(len(self.names)):
+ name = self.names[i][0]
+ value = pkt.getfieldval(name)
+ if value is not None:
+ val |= 1 << i
+ return val
+
+class HCSINullField(StrFixedLenField):
+ def __init__(self, name, default):
+ return StrFixedLenField.__init__(self, name, default, length=0)
+
+class HCSIDescField(StrFixedLenField):
+ def __init__(self, name, default):
+ return StrFixedLenField.__init__(self, name, default, length=32)
+
+class HCSIAppField(StrFixedLenField):
+ def __init__(self, name, default):
+ return StrFixedLenField.__init__(self, name, default, length=60)
+
+def _FlagsList(myfields):
+ flags = []
+ for i in range(32):
+ flags.append("Reserved%02d" % i)
+ for i in myfields.keys():
+ flags[i] = myfields[i]
+ return flags
+
+# Define all geolocation-tag flags lists
+_hcsi_gps_flags = _FlagsList({0:"No Fix Available", 1:"GPS", 2:"Differential GPS",
+ 3:"Pulse Per Second", 4:"Real Time Kinematic",
+ 5:"Float Real Time Kinematic", 6:"Estimated (Dead Reckoning)",
+ 7:"Manual Input", 8:"Simulation"})
+
+#_hcsi_vector_flags = _FlagsList({0:"ForwardFrame", 1:"RotationsAbsoluteXYZ", 5:"OffsetFromGPS_XYZ"})
+#This has been replaced with the VectorFlags_Field class, in order to handle the RelativeTo:subfield
+
+_hcsi_vector_char_flags = _FlagsList({0:"Antenna", 1:"Direction of Travel",
+ 2:"Front of Vehicle", 3:"Angle of Arrival", 4:"Transmitter Position",
+ 8:"GPS Derived", 9:"INS Derived", 10:"Compass Derived",
+ 11:"Acclerometer Derived", 12:"Human Derived"})
+
+_hcsi_antenna_flags = _FlagsList({ 1:"Horizontal Polarization", 2:"Vertical Polarization",
+ 3:"Circular Polarization Left", 4:"Circular Polarization Right",
+ 16:"Electronically Steerable", 17:"Mechanically Steerable"})
+
+""" HCSI PPI Fields are similar to RadioTap. A mask field called "present" specifies if each field
+is present. All other fields are conditional. When dissecting a packet, each field is present if
+"present" has the corresponding bit set. When building a packet, if "present" is None, the mask is
+set to include every field that does not have a value of None. Otherwise, if the mask field is
+not None, only the fields specified by "present" will be added to the packet.
+
+To build each Packet type, build a list of the fields normally, excluding the present bitmask field.
+The code will then construct conditional versions of each field and add the present field.
+See GPS_Fields as an example. """
+
+# Conditional test for all HCSI Fields
+def _HCSITest(pkt, ibit, name):
+ if pkt.present is None:
+ return (pkt.getfieldval(name) is not None)
+ return pkt.present & ibit
+
+# Wrap optional fields in ConditionalField, add HCSIFlagsField
+def _HCSIBuildFields(fields):
+ names = [f.name for f in fields]
+ cond_fields = [ HCSIFlagsField('present', None, -len(names), names)]
+ for i in range(len(names)):
+ ibit = 1 << i
+ seval = "lambda pkt:_HCSITest(pkt,%s,'%s')" % (ibit, names[i])
+ test = eval(seval)
+ cond_fields.append(ConditionalField(fields[i], test))
+ return cond_fields
+
+class HCSIPacket(Packet):
+ name = "PPI HCSI"
+ fields_desc = [ LEShortField('pfh_type', None),
+ LEShortField('pfh_length', None),
+ ByteField('geotag_ver', CURR_GEOTAG_VER),
+ ByteField('geotag_pad', 0),
+ LEShortField('geotag_len', None)]
+ def post_build(self, p, pay):
+ if self.pfh_length is None:
+ l = len(p) - 4
+ sl = struct.pack('<H',l)
+ p = p[:2] + sl + p[4:]
+ if self.geotag_len is None:
+ l_g = len(p) - 4
+ sl_g = struct.pack('<H',l_g)
+ p = p[:6] + sl_g + p[8:]
+ p += pay
+ return p
+ def extract_padding(self, p):
+ return "",p
+
+#GPS Fields
+GPS_Fields = [FlagsField("GPSFlags", None, -32, _hcsi_gps_flags),
+ Fixed3_7Field("Latitude", None),
+ Fixed3_7Field("Longitude", None), Fixed6_4Field("Altitude", None),
+ Fixed6_4Field("Altitude_g", None), GPSTime_Field("GPSTime", None),
+ NSCounter_Field("FractionalTime", None), Fixed3_6Field("eph", None),
+ Fixed3_6Field("epv", None), NSCounter_Field("ept", None),
+ HCSINullField("Reserved10", None), HCSINullField("Reserved11", None),
+ HCSINullField("Reserved12", None), HCSINullField("Reserved13", None),
+ HCSINullField("Reserved14", None), HCSINullField("Reserved15", None),
+ HCSINullField("Reserved16", None), HCSINullField("Reserved17", None),
+ HCSINullField("Reserved18", None), HCSINullField("Reserved19", None),
+ HCSINullField("Reserved20", None), HCSINullField("Reserved21", None),
+ HCSINullField("Reserved22", None), HCSINullField("Reserved23", None),
+ HCSINullField("Reserved24", None), HCSINullField("Reserved25", None),
+ HCSINullField("Reserved26", None), HCSINullField("Reserved27", None),
+ HCSIDescField("DescString", None), XLEIntField("AppId", None),
+ HCSIAppField("AppData", None), HCSINullField("Extended", None)]
+
+class GPS(HCSIPacket):
+ name = "PPI GPS"
+ fields_desc = [ LEShortField('pfh_type', PPI_GPS), #pfh_type
+ LEShortField('pfh_length', None), #pfh_len
+ ByteField('geotag_ver', CURR_GEOTAG_VER), #base_geotag_header.ver
+ ByteField('geotag_pad', 0), #base_geotag_header.pad
+ LEShortField('geotag_len', None)] + _HCSIBuildFields(GPS_Fields)
+
+
+#Vector Fields
+VEC_Fields = [VectorFlags_Field("VectorFlags", None),
+ FlagsField("VectorChars", None, -32, _hcsi_vector_char_flags),
+ Fixed3_6Field("Pitch", None), Fixed3_6Field("Roll", None),
+ Fixed3_6Field("Heading", None), Fixed6_4Field("Off_X", None),
+ Fixed6_4Field("Off_Y", None), Fixed6_4Field("Off_Z", None),
+ HCSINullField("Reserved08", None), HCSINullField("Reserved09", None),
+ HCSINullField("Reserved10", None), HCSINullField("Reserved11", None),
+ HCSINullField("Reserved12", None), HCSINullField("Reserved13", None),
+ HCSINullField("Reserved14", None), HCSINullField("Reserved15", None),
+ Fixed3_6Field("Err_Rot", None), Fixed6_4Field("Err_Off", None),
+ HCSINullField("Reserved18", None), HCSINullField("Reserved19", None),
+ HCSINullField("Reserved20", None), HCSINullField("Reserved21", None),
+ HCSINullField("Reserved22", None), HCSINullField("Reserved23", None),
+ HCSINullField("Reserved24", None), HCSINullField("Reserved25", None),
+ HCSINullField("Reserved26", None), HCSINullField("Reserved27", None),
+ HCSIDescField("DescString", None), XLEIntField("AppId", None),
+ HCSIAppField("AppData", None), HCSINullField("Extended", None)]
+
+class Vector(HCSIPacket):
+ name = "PPI Vector"
+ fields_desc = [ LEShortField('pfh_type', PPI_VECTOR), #pfh_type
+ LEShortField('pfh_length', None), #pfh_len
+ ByteField('geotag_ver', CURR_GEOTAG_VER), #base_geotag_header.ver
+ ByteField('geotag_pad', 0), #base_geotag_header.pad
+ LEShortField('geotag_len', None)] + _HCSIBuildFields(VEC_Fields)
+
+#Sensor Fields
+# http://www.iana.org/assignments/icmp-parameters
+sensor_types= { 1 : "Velocity",
+ 2 : "Acceleration",
+ 3 : "Jerk",
+ 100 : "Rotation",
+ 101 : "Magnetic",
+ 1000: "Temperature",
+ 1001: "Barometer",
+ 1002: "Humidity",
+ 2000: "TDOA_Clock",
+ 2001: "Phase"
+ }
+SENS_Fields = [ LEShortEnumField('SensorType', None, sensor_types),
+ SignedByteField('ScaleFactor', None),
+ Fixed6_4Field('Val_X', None),
+ Fixed6_4Field('Val_Y', None),
+ Fixed6_4Field('Val_Z', None),
+ Fixed6_4Field('Val_T', None),
+ Fixed6_4Field('Val_E', None),
+ HCSINullField("Reserved07", None), HCSINullField("Reserved08", None),
+ HCSINullField("Reserved09", None), HCSINullField("Reserved10", None),
+ HCSINullField("Reserved11", None), HCSINullField("Reserved12", None),
+ HCSINullField("Reserved13", None), HCSINullField("Reserved14", None),
+ HCSINullField("Reserved15", None), HCSINullField("Reserved16", None),
+ HCSINullField("Reserved17", None), HCSINullField("Reserved18", None),
+ HCSINullField("Reserved19", None), HCSINullField("Reserved20", None),
+ HCSINullField("Reserved21", None), HCSINullField("Reserved22", None),
+ HCSINullField("Reserved23", None), HCSINullField("Reserved24", None),
+ HCSINullField("Reserved25", None), HCSINullField("Reserved26", None),
+ HCSINullField("Reserved27", None),
+ HCSIDescField("DescString", None), XLEIntField("AppId", None),
+ HCSIAppField("AppData", None), HCSINullField("Extended", None)]
+
+
+
+class Sensor(HCSIPacket):
+ name = "PPI Sensor"
+ fields_desc = [ LEShortField('pfh_type', PPI_SENSOR), #pfh_type
+ LEShortField('pfh_length', None), #pfh_len
+ ByteField('geotag_ver', CURR_GEOTAG_VER ), #base_geotag_header.ver
+ ByteField('geotag_pad', 0), #base_geotag_header.pad
+ LEShortField('geotag_len', None)] + _HCSIBuildFields(SENS_Fields)
+
+# HCSIAntenna Fields
+ANT_Fields = [FlagsField("AntennaFlags", None, -32, _hcsi_antenna_flags),
+ ByteField("Gain", None),
+ Fixed3_6Field("HorizBw", None), Fixed3_6Field("VertBw", None),
+ Fixed3_6Field("PrecisionGain",None), XLEShortField("BeamID", None),
+ HCSINullField("Reserved06", None), HCSINullField("Reserved07", None),
+ HCSINullField("Reserved08", None), HCSINullField("Reserved09", None),
+ HCSINullField("Reserved10", None), HCSINullField("Reserved11", None),
+ HCSINullField("Reserved12", None), HCSINullField("Reserved13", None),
+ HCSINullField("Reserved14", None), HCSINullField("Reserved15", None),
+ HCSINullField("Reserved16", None), HCSINullField("Reserved17", None),
+ HCSINullField("Reserved18", None), HCSINullField("Reserved19", None),
+ HCSINullField("Reserved20", None), HCSINullField("Reserved21", None),
+ HCSINullField("Reserved22", None), HCSINullField("Reserved23", None),
+ HCSINullField("Reserved24", None), HCSINullField("Reserved25", None),
+ HCSIDescField("SerialNumber", None), HCSIDescField("ModelName", None),
+ HCSIDescField("DescString", None), XLEIntField("AppId", None),
+ HCSIAppField("AppData", None), HCSINullField("Extended", None)]
+
+class Antenna(HCSIPacket):
+ name = "PPI Antenna"
+ fields_desc = [ LEShortField('pfh_type', PPI_ANTENNA), #pfh_type
+ LEShortField('pfh_length', None), #pfh_len
+ ByteField('geotag_ver', CURR_GEOTAG_VER), #base_geotag_header.ver
+ ByteField('geotag_pad', 0), #base_geotag_header.pad
+ LEShortField('geotag_len', None)] + _HCSIBuildFields(ANT_Fields)
+
+addPPIType(PPI_GPS, GPS)
+addPPIType(PPI_VECTOR, Vector)
+addPPIType(PPI_SENSOR, Sensor)
+addPPIType(PPI_ANTENNA,Antenna)
diff --git a/scripts/external_libs/scapy-2.3.1/python3/scapy/contrib/ripng.py b/scripts/external_libs/scapy-2.3.1/python3/scapy/contrib/ripng.py
new file mode 100644
index 00000000..47e17bc4
--- /dev/null
+++ b/scripts/external_libs/scapy-2.3.1/python3/scapy/contrib/ripng.py
@@ -0,0 +1,41 @@
+#!/usr/bin/env python
+
+# http://trac.secdev.org/scapy/ticket/301
+
+# scapy.contrib.description = RIPng
+# scapy.contrib.status = loads
+
+from scapy.packet import *
+from scapy.fields import *
+from scapy.layers.inet import UDP
+from scapy.layers.inet6 import *
+
+class RIPng(Packet):
+ name = "RIPng header"
+ fields_desc = [
+ ByteEnumField("cmd", 1, {1 : "req", 2 : "resp"}),
+ ByteField("ver", 1),
+ ShortField("null", 0),
+ ]
+
+class RIPngEntry(Packet):
+ name = "RIPng entry"
+ fields_desc = [
+ ConditionalField(IP6Field("prefix", "::"),
+ lambda pkt: pkt.metric != 255),
+ ConditionalField(IP6Field("nexthop", "::"),
+ lambda pkt: pkt.metric == 255),
+ ShortField("routetag", 0),
+ ByteField("prefixlen", 0),
+ ByteEnumField("metric", 1, {16 : "Unreach",
+ 255 : "next-hop entry"})
+ ]
+
+bind_layers(UDP, RIPng, sport=521, dport=521)
+bind_layers(RIPng, RIPngEntry)
+bind_layers(RIPngEntry, RIPngEntry)
+
+if __name__ == "__main__":
+ from scapy.main import interact
+ interact(mydict=globals(), mybanner="RIPng")
+
diff --git a/scripts/external_libs/scapy-2.3.1/python3/scapy/contrib/rsvp.py b/scripts/external_libs/scapy-2.3.1/python3/scapy/contrib/rsvp.py
new file mode 100644
index 00000000..c9d4ebee
--- /dev/null
+++ b/scripts/external_libs/scapy-2.3.1/python3/scapy/contrib/rsvp.py
@@ -0,0 +1,188 @@
+## RSVP layer
+
+# http://trac.secdev.org/scapy/ticket/197
+
+# scapy.contrib.description = RSVP
+# scapy.contrib.status = loads
+
+from scapy.packet import *
+from scapy.fields import *
+from scapy.layers.inet import IP
+
+rsvpmsgtypes = { 0x01 : "Path",
+ 0x02 : "Reservation request",
+ 0x03 : "Path error",
+ 0x04 : "Reservation request error",
+ 0x05 : "Path teardown",
+ 0x06 : "Reservation teardown",
+ 0x07 : "Reservation request acknowledgment"
+}
+
+class RSVP(Packet):
+ name = "RSVP"
+ fields_desc = [ BitField("Version",1,4),
+ BitField("Flags",1,4),
+ ByteEnumField("Class",0x01, rsvpmsgtypes),
+ XShortField("chksum", None),
+ ByteField("TTL",1),
+ XByteField("dataofs", 0),
+ ShortField("Length",None)]
+ def post_build(self, p, pay):
+ p += pay
+ if self.Length is None:
+ l = len(p)
+ p = p[:6]+chr((l>>8)&0xff)+chr(l&0xff)+p[8:]
+ if self.chksum is None:
+ ck = checksum(p)
+ p = p[:2]+chr(ck>>8)+chr(ck&0xff)+p[4:]
+ return p
+
+rsvptypes = { 0x01 : "Session",
+ 0x03 : "HOP",
+ 0x04 : "INTEGRITY",
+ 0x05 : "TIME_VALUES",
+ 0x06 : "ERROR_SPEC",
+ 0x07 : "SCOPE",
+ 0x08 : "STYLE",
+ 0x09 : "FLOWSPEC",
+ 0x0A : "FILTER_SPEC",
+ 0x0B : "SENDER_TEMPLATE",
+ 0x0C : "SENDER_TSPEC",
+ 0x0D : "ADSPEC",
+ 0x0E : "POLICY_DATA",
+ 0x0F : "RESV_CONFIRM",
+ 0x10 : "RSVP_LABEL",
+ 0x11 : "HOP_COUNT",
+ 0x12 : "STRICT_SOURCE_ROUTE",
+ 0x13 : "LABEL_REQUEST",
+ 0x14 : "EXPLICIT_ROUTE",
+ 0x15 : "ROUTE_RECORD",
+ 0x16 : "HELLO",
+ 0x17 : "MESSAGE_ID",
+ 0x18 : "MESSAGE_ID_ACK",
+ 0x19 : "MESSAGE_ID_LIST",
+ 0x1E : "DIAGNOSTIC",
+ 0x1F : "ROUTE",
+ 0x20 : "DIAG_RESPONSE",
+ 0x21 : "DIAG_SELECT",
+ 0x22 : "RECOVERY_LABEL",
+ 0x23 : "UPSTREAM_LABEL",
+ 0x24 : "LABEL_SET",
+ 0x25 : "PROTECTION",
+ 0x26 : "PRIMARY PATH ROUTE",
+ 0x2A : "DSBM IP ADDRESS",
+ 0x2B : "SBM_PRIORITY",
+ 0x2C : "DSBM TIMER INTERVALS",
+ 0x2D : "SBM_INFO",
+ 0x32 : "S2L_SUB_LSP",
+ 0x3F : "DETOUR",
+ 0x40 : "CHALLENGE",
+ 0x41 : "DIFF-SERV",
+ 0x42 : "CLASSTYPE",
+ 0x43 : "LSP_REQUIRED_ATTRIBUTES",
+ 0x80 : "NODE_CHAR",
+ 0x81 : "SUGGESTED_LABEL",
+ 0x82 : "ACCEPTABLE_LABEL_SET",
+ 0x83 : "RESTART_CA",
+ 0x84 : "SESSION-OF-INTEREST",
+ 0x85 : "LINK_CAPABILITY",
+ 0x86 : "Capability Object",
+ 0xA1 : "RSVP_HOP_L2",
+ 0xA2 : "LAN_NHOP_L2",
+ 0xA3 : "LAN_NHOP_L3",
+ 0xA4 : "LAN_LOOPBACK",
+ 0xA5 : "TCLASS",
+ 0xC0 : "TUNNEL",
+ 0xC1 : "LSP_TUNNEL_INTERFACE_ID",
+ 0xC2 : "USER_ERROR_SPEC",
+ 0xC3 : "NOTIFY_REQUEST",
+ 0xC4 : "ADMIN-STATUS",
+ 0xC5 : "LSP_ATTRIBUTES",
+ 0xC6 : "ALARM_SPEC",
+ 0xC7 : "ASSOCIATION",
+ 0xC8 : "SECONDARY_EXPLICIT_ROUTE",
+ 0xC9 : "SECONDARY_RECORD_ROUTE",
+ 0xCD : "FAST_REROUTE",
+ 0xCF : "SESSION_ATTRIBUTE",
+ 0xE1 : "DCLASS",
+ 0xE2 : "PACKETCABLE EXTENSIONS",
+ 0xE3 : "ATM_SERVICECLASS",
+ 0xE4 : "CALL_OPS (ASON)",
+ 0xE5 : "GENERALIZED_UNI",
+ 0xE6 : "CALL_ID",
+ 0xE7 : "3GPP2_Object",
+ 0xE8 : "EXCLUDE_ROUTE"
+}
+
+class RSVP_Object(Packet):
+ name = "RSVP_Object"
+ fields_desc = [ ShortField("Length",4),
+ ByteEnumField("Class",0x01, rsvptypes),
+ ByteField("C-Type",1)]
+ def guess_payload_class(self, payload):
+ if self.Class == 0x03:
+ return RSVP_HOP
+ elif self.Class == 0x05:
+ return RSVP_Time
+ elif self.Class == 0x0c:
+ return RSVP_SenderTSPEC
+ elif self.Class == 0x13:
+ return RSVP_LabelReq
+ elif self.Class == 0xCF:
+ return RSVP_SessionAttrb
+ else:
+ return RSVP_Data
+
+
+
+class RSVP_Data(Packet):
+ name = "Data"
+ fields_desc = [StrLenField("Data","",length_from= lambda pkt:pkt.underlayer.Length - 4)]
+ def default_payload_class(self, payload):
+ return RSVP_Object
+
+class RSVP_HOP(Packet):
+ name = "HOP"
+ fields_desc = [ IPField("neighbor","0.0.0.0"),
+ BitField("inface",1,32)]
+ def default_payload_class(self, payload):
+ return RSVP_Object
+
+class RSVP_Time(Packet):
+ name = "Time Val"
+ fields_desc = [ BitField("refresh",1,32)]
+ def default_payload_class(self, payload):
+ return RSVP_Object
+
+class RSVP_SenderTSPEC(Packet):
+ name = "Sender_TSPEC"
+ fields_desc = [ ByteField("Msg_Format",0),
+ ByteField("reserve",0),
+ ShortField("Data_Length",4),
+ ByteField("Srv_hdr",1),
+ ByteField("reserve2",0),
+ ShortField("Srv_Length",4),
+ StrLenField("Tokens","",length_from= lambda pkt:pkt.underlayer.Length - 12) ]
+ def default_payload_class(self, payload):
+ return RSVP_Object
+
+class RSVP_LabelReq(Packet):
+ name = "Lable Req"
+ fields_desc = [ ShortField("reserve",1),
+ ShortField("L3PID",1)]
+ def default_payload_class(self, payload):
+ return RSVP_Object
+
+class RSVP_SessionAttrb(Packet):
+ name = "Session_Attribute"
+ fields_desc = [ ByteField("Setup_priority",1),
+ ByteField("Hold_priority",1),
+ ByteField("flags",1),
+ ByteField("Name_length",1),
+ StrLenField("Name","",length_from= lambda pkt:pkt.underlayer.Length - 8),
+ ]
+ def default_payload_class(self, payload):
+ return RSVP_Object
+
+bind_layers( IP, RSVP, { "proto" : 46} )
+bind_layers( RSVP, RSVP_Object, {})
diff --git a/scripts/external_libs/scapy-2.3.1/python3/scapy/contrib/skinny.py b/scripts/external_libs/scapy-2.3.1/python3/scapy/contrib/skinny.py
new file mode 100644
index 00000000..47935c9e
--- /dev/null
+++ b/scripts/external_libs/scapy-2.3.1/python3/scapy/contrib/skinny.py
@@ -0,0 +1,499 @@
+#! /usr/bin/env python
+
+# scapy.contrib.description = Skinny Call Control Protocol (SCCP)
+# scapy.contrib.status = loads
+
+
+#############################################################################
+## ##
+## scapy-skinny.py --- Skinny Call Control Protocol (SCCP) extension ##
+## ##
+## Copyright (C) 2006 Nicolas Bareil <nicolas.bareil@ eads.net> ##
+## EADS/CRC security team ##
+## ##
+## This program is free software; you can redistribute it and/or modify it ##
+## under the terms of the GNU General Public License version 2 as ##
+## published by the Free Software Foundation; version 2. ##
+## ##
+## This program is distributed in the hope that it will be useful, but ##
+## WITHOUT ANY WARRANTY; without even the implied warranty of ##
+## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ##
+## General Public License for more details. ##
+## ##
+#############################################################################
+
+from scapy.all import *
+import builtins
+
+#####################################################################
+# Helpers and constants
+#####################################################################
+
+skinny_messages_cls = {
+# Station -> Callmanager
+ 0x0000: "SkinnyMessageKeepAlive",
+ 0x0001: "SkinnyMessageRegister",
+ 0x0002: "SkinnyMessageIpPort",
+ 0x0003: "SkinnyMessageKeypadButton",
+ 0x0004: "SkinnyMessageEnblocCall",
+ 0x0005: "SkinnyMessageStimulus",
+ 0x0006: "SkinnyMessageOffHook",
+ 0x0007: "SkinnyMessageOnHook",
+ 0x0008: "SkinnyMessageHookFlash",
+ 0x0009: "SkinnyMessageForwardStatReq",
+ 0x000A: "SkinnyMessageSpeedDialStatReq",
+ 0x000B: "SkinnyMessageLineStatReq",
+ 0x000C: "SkinnyMessageConfigStatReq",
+ 0x000D: "SkinnyMessageTimeDateReq",
+ 0x000E: "SkinnyMessageButtonTemplateReq",
+ 0x000F: "SkinnyMessageVersionReq",
+ 0x0010: "SkinnyMessageCapabilitiesRes",
+ 0x0011: "SkinnyMessageMediaPortList",
+ 0x0012: "SkinnyMessageServerReq",
+ 0x0020: "SkinnyMessageAlarm",
+ 0x0021: "SkinnyMessageMulticastMediaReceptionAck",
+ 0x0022: "SkinnyMessageOpenReceiveChannelAck",
+ 0x0023: "SkinnyMessageConnectionStatisticsRes",
+ 0x0024: "SkinnyMessageOffHookWithCgpn",
+ 0x0025: "SkinnyMessageSoftKeySetReq",
+ 0x0026: "SkinnyMessageSoftKeyEvent",
+ 0x0027: "SkinnyMessageUnregister",
+ 0x0028: "SkinnyMessageSoftKeyTemplateReq",
+ 0x0029: "SkinnyMessageRegisterTokenReq",
+ 0x002A: "SkinnyMessageMediaTransmissionFailure",
+ 0x002B: "SkinnyMessageHeadsetStatus",
+ 0x002C: "SkinnyMessageMediaResourceNotification",
+ 0x002D: "SkinnyMessageRegisterAvailableLines",
+ 0x002E: "SkinnyMessageDeviceToUserData",
+ 0x002F: "SkinnyMessageDeviceToUserDataResponse",
+ 0x0030: "SkinnyMessageUpdateCapabilities",
+ 0x0031: "SkinnyMessageOpenMultiMediaReceiveChannelAck",
+ 0x0032: "SkinnyMessageClearConference",
+ 0x0033: "SkinnyMessageServiceURLStatReq",
+ 0x0034: "SkinnyMessageFeatureStatReq",
+ 0x0035: "SkinnyMessageCreateConferenceRes",
+ 0x0036: "SkinnyMessageDeleteConferenceRes",
+ 0x0037: "SkinnyMessageModifyConferenceRes",
+ 0x0038: "SkinnyMessageAddParticipantRes",
+ 0x0039: "SkinnyMessageAuditConferenceRes",
+ 0x0040: "SkinnyMessageAuditParticipantRes",
+ 0x0041: "SkinnyMessageDeviceToUserDataVersion1",
+# Callmanager -> Station */
+ 0x0081: "SkinnyMessageRegisterAck",
+ 0x0082: "SkinnyMessageStartTone",
+ 0x0083: "SkinnyMessageStopTone",
+ 0x0085: "SkinnyMessageSetRinger",
+ 0x0086: "SkinnyMessageSetLamp",
+ 0x0087: "SkinnyMessageSetHkFDetect",
+ 0x0088: "SkinnyMessageSpeakerMode",
+ 0x0089: "SkinnyMessageSetMicroMode",
+ 0x008A: "SkinnyMessageStartMediaTransmission",
+ 0x008B: "SkinnyMessageStopMediaTransmission",
+ 0x008C: "SkinnyMessageStartMediaReception",
+ 0x008D: "SkinnyMessageStopMediaReception",
+ 0x008F: "SkinnyMessageCallInfo",
+ 0x0090: "SkinnyMessageForwardStat",
+ 0x0091: "SkinnyMessageSpeedDialStat",
+ 0x0092: "SkinnyMessageLineStat",
+ 0x0093: "SkinnyMessageConfigStat",
+ 0x0094: "SkinnyMessageTimeDate",
+ 0x0095: "SkinnyMessageStartSessionTransmission",
+ 0x0096: "SkinnyMessageStopSessionTransmission",
+ 0x0097: "SkinnyMessageButtonTemplate",
+ 0x0098: "SkinnyMessageVersion",
+ 0x0099: "SkinnyMessageDisplayText",
+ 0x009A: "SkinnyMessageClearDisplay",
+ 0x009B: "SkinnyMessageCapabilitiesReq",
+ 0x009C: "SkinnyMessageEnunciatorCommand",
+ 0x009D: "SkinnyMessageRegisterReject",
+ 0x009E: "SkinnyMessageServerRes",
+ 0x009F: "SkinnyMessageReset",
+ 0x0100: "SkinnyMessageKeepAliveAck",
+ 0x0101: "SkinnyMessageStartMulticastMediaReception",
+ 0x0102: "SkinnyMessageStartMulticastMediaTransmission",
+ 0x0103: "SkinnyMessageStopMulticastMediaReception",
+ 0x0104: "SkinnyMessageStopMulticastMediaTransmission",
+ 0x0105: "SkinnyMessageOpenReceiveChannel",
+ 0x0106: "SkinnyMessageCloseReceiveChannel",
+ 0x0107: "SkinnyMessageConnectionStatisticsReq",
+ 0x0108: "SkinnyMessageSoftKeyTemplateRes",
+ 0x0109: "SkinnyMessageSoftKeySetRes",
+ 0x0110: "SkinnyMessageSoftKeyEvent",
+ 0x0111: "SkinnyMessageCallState",
+ 0x0112: "SkinnyMessagePromptStatus",
+ 0x0113: "SkinnyMessageClearPromptStatus",
+ 0x0114: "SkinnyMessageDisplayNotify",
+ 0x0115: "SkinnyMessageClearNotify",
+ 0x0116: "SkinnyMessageCallPlane",
+ 0x0117: "SkinnyMessageCallPlane",
+ 0x0118: "SkinnyMessageUnregisterAck",
+ 0x0119: "SkinnyMessageBackSpaceReq",
+ 0x011A: "SkinnyMessageRegisterTokenAck",
+ 0x011B: "SkinnyMessageRegisterTokenReject",
+ 0x0042: "SkinnyMessageDeviceToUserDataResponseVersion1",
+ 0x011C: "SkinnyMessageStartMediaFailureDetection",
+ 0x011D: "SkinnyMessageDialedNumber",
+ 0x011E: "SkinnyMessageUserToDeviceData",
+ 0x011F: "SkinnyMessageFeatureStat",
+ 0x0120: "SkinnyMessageDisplayPriNotify",
+ 0x0121: "SkinnyMessageClearPriNotify",
+ 0x0122: "SkinnyMessageStartAnnouncement",
+ 0x0123: "SkinnyMessageStopAnnouncement",
+ 0x0124: "SkinnyMessageAnnouncementFinish",
+ 0x0127: "SkinnyMessageNotifyDtmfTone",
+ 0x0128: "SkinnyMessageSendDtmfTone",
+ 0x0129: "SkinnyMessageSubscribeDtmfPayloadReq",
+ 0x012A: "SkinnyMessageSubscribeDtmfPayloadRes",
+ 0x012B: "SkinnyMessageSubscribeDtmfPayloadErr",
+ 0x012C: "SkinnyMessageUnSubscribeDtmfPayloadReq",
+ 0x012D: "SkinnyMessageUnSubscribeDtmfPayloadRes",
+ 0x012E: "SkinnyMessageUnSubscribeDtmfPayloadErr",
+ 0x012F: "SkinnyMessageServiceURLStat",
+ 0x0130: "SkinnyMessageCallSelectStat",
+ 0x0131: "SkinnyMessageOpenMultiMediaChannel",
+ 0x0132: "SkinnyMessageStartMultiMediaTransmission",
+ 0x0133: "SkinnyMessageStopMultiMediaTransmission",
+ 0x0134: "SkinnyMessageMiscellaneousCommand",
+ 0x0135: "SkinnyMessageFlowControlCommand",
+ 0x0136: "SkinnyMessageCloseMultiMediaReceiveChannel",
+ 0x0137: "SkinnyMessageCreateConferenceReq",
+ 0x0138: "SkinnyMessageDeleteConferenceReq",
+ 0x0139: "SkinnyMessageModifyConferenceReq",
+ 0x013A: "SkinnyMessageAddParticipantReq",
+ 0x013B: "SkinnyMessageDropParticipantReq",
+ 0x013C: "SkinnyMessageAuditConferenceReq",
+ 0x013D: "SkinnyMessageAuditParticipantReq",
+ 0x013F: "SkinnyMessageUserToDeviceDataVersion1",
+ }
+
+skinny_callstates = {
+ 0x1: "Off Hook",
+ 0x2: "On Hook",
+ 0x3: "Ring out",
+ 0xc: "Proceeding",
+}
+
+
+skinny_ring_type = {
+ 0x1: "Ring off"
+}
+
+skinny_speaker_modes = {
+ 0x1: "Speaker on",
+ 0x2: "Speaker off"
+}
+
+skinny_lamp_mode = {
+ 0x1: "Off (?)",
+ 0x2: "On",
+}
+
+skinny_stimulus = {
+ 0x9: "Line"
+}
+
+
+############
+## Fields ##
+############
+
+class SkinnyDateTimeField(StrFixedLenField):
+ def __init__(self, name, default):
+ StrFixedLenField.__init__(self, name, default, 32)
+
+ def m2i(self, pkt, s):
+ year,month,dow,day,hour,min,sec,milisecond=struct.unpack('<8I', s)
+ return (year, month, day, hour, min, sec)
+
+ def i2m(self, pkt, val):
+ if type(val) is str:
+ val = self.h2i(pkt, val)
+ l= val[:2] + (0,) + val[2:7] + (0,)
+ return struct.pack('<8I', *l)
+
+ def i2h(self, pkt, x):
+ if type(x) is str:
+ return x
+ else:
+ return time.ctime(time.mktime(x+(0,0,0)))
+
+ def i2repr(self, pkt, x):
+ return self.i2h(pkt, x)
+
+ def h2i(self, pkt, s):
+ t = ()
+ if type(s) is str:
+ t = time.strptime(s)
+ t = t[:2] + t[2:-3]
+ else:
+ if not s:
+ y,m,d,h,min,sec,rest,rest,rest = time.gmtime(time.time())
+ t = (y,m,d,h,min,sec)
+ else:
+ t=s
+ return t
+
+
+###########################
+## Packet abstract class ##
+###########################
+
+class SkinnyMessageGeneric(Packet):
+ name='Generic message'
+
+class SkinnyMessageKeepAlive(Packet):
+ name='keep alive'
+
+class SkinnyMessageKeepAliveAck(Packet):
+ name='keep alive ack'
+
+class SkinnyMessageOffHook(Packet):
+ name = 'Off Hook'
+ fields_desc = [ LEIntField("unknown1", 0),
+ LEIntField("unknown2", 0),]
+
+class SkinnyMessageOnHook(SkinnyMessageOffHook):
+ name = 'On Hook'
+
+class SkinnyMessageCallState(Packet):
+ name='Skinny Call state message'
+ fields_desc = [ LEIntEnumField("state", 1, skinny_callstates),
+ LEIntField("instance", 1),
+ LEIntField("callid", 0),
+ LEIntField("unknown1", 4),
+ LEIntField("unknown2", 0),
+ LEIntField("unknown3", 0) ]
+
+class SkinnyMessageSoftKeyEvent(Packet):
+ name='Soft Key Event'
+ fields_desc = [ LEIntField("key", 0),
+ LEIntField("instance", 1),
+ LEIntField("callid", 0)]
+
+class SkinnyMessageSetRinger(Packet):
+ name='Ring message'
+ fields_desc = [ LEIntEnumField("ring", 0x1, skinny_ring_type),
+ LEIntField("unknown1", 0),
+ LEIntField("unknown2", 0),
+ LEIntField("unknown3", 0) ]
+
+_skinny_tones = {
+ 0x21: 'Inside dial tone',
+ 0x22: 'xxx',
+ 0x23: 'xxx',
+ 0x24: 'Alerting tone',
+ 0x25: 'Reorder Tone'
+ }
+
+class SkinnyMessageStartTone(Packet):
+ name='Start tone'
+ fields_desc = [ LEIntEnumField("tone", 0x21, _skinny_tones),
+ LEIntField("unknown1", 0),
+ LEIntField("instance", 1),
+ LEIntField("callid", 0)]
+
+class SkinnyMessageStopTone(SkinnyMessageGeneric):
+ name='stop tone'
+ fields_desc = [ LEIntField("instance", 1),
+ LEIntField("callid", 0)]
+
+
+class SkinnyMessageSpeakerMode(Packet):
+ name='Speaker mdoe'
+ fields_desc = [ LEIntEnumField("ring", 0x1, skinny_speaker_modes) ]
+
+class SkinnyMessageSetLamp(Packet):
+ name='Lamp message (light of the phone)'
+ fields_desc = [ LEIntEnumField("stimulus", 0x5, skinny_stimulus),
+ LEIntField("instance", 1),
+ LEIntEnumField("mode", 2, skinny_lamp_mode) ]
+
+class SkinnyMessageSoftKeyEvent(Packet):
+ name=' Call state message'
+ fields_desc = [ LEIntField("instance", 1),
+ LEIntField("callid", 0),
+ LEIntField("set", 0),
+ LEIntField("map", 0xffff)]
+
+class SkinnyMessagePromptStatus(Packet):
+ name='Prompt status'
+ fields_desc = [ LEIntField("timeout", 0),
+ StrFixedLenField("text", "\0"*32, 32),
+ LEIntField("instance", 1),
+ LEIntField("callid", 0)]
+
+class SkinnyMessageCallPlane(Packet):
+ name='Activate/Desactivate Call Plane Message'
+ fields_desc = [ LEIntField("instance", 1)]
+
+class SkinnyMessageTimeDate(Packet):
+ name='Setting date and time'
+ fields_desc = [ SkinnyDateTimeField("settime", None),
+ LEIntField("timestamp", 0) ]
+
+class SkinnyMessageClearPromptStatus(Packet):
+ name='clear prompt status'
+ fields_desc = [ LEIntField("instance", 1),
+ LEIntField("callid", 0)]
+
+class SkinnyMessageKeypadButton(Packet):
+ name='keypad button'
+ fields_desc = [ LEIntField("key", 0),
+ LEIntField("instance", 1),
+ LEIntField("callid", 0)]
+
+class SkinnyMessageDialedNumber(Packet):
+ name='dialed number'
+ fields_desc = [ StrFixedLenField("number", "1337", 24),
+ LEIntField("instance", 1),
+ LEIntField("callid", 0)]
+
+_skinny_message_callinfo_restrictions = ['CallerName'
+ , 'CallerNumber'
+ , 'CalledName'
+ , 'CalledNumber'
+ , 'OriginalCalledName'
+ , 'OriginalCalledNumber'
+ , 'LastRedirectName'
+ , 'LastRedirectNumber'] + ['Bit%d' % i for i in range(8,15)]
+class SkinnyMessageCallInfo(Packet):
+ name='call information'
+ fields_desc = [ StrFixedLenField("callername", "Jean Valjean", 40),
+ StrFixedLenField("callernum", "1337", 24),
+ StrFixedLenField("calledname", "Causette", 40),
+ StrFixedLenField("callednum", "1034", 24),
+ LEIntField("lineinstance", 1),
+ LEIntField("callid", 0),
+ StrFixedLenField("originalcalledname", "Causette", 40),
+ StrFixedLenField("originalcallednum", "1034", 24),
+ StrFixedLenField("lastredirectingname", "Causette", 40),
+ StrFixedLenField("lastredirectingnum", "1034", 24),
+ LEIntField("originalredirectreason", 0),
+ LEIntField("lastredirectreason", 0),
+ StrFixedLenField('voicemailboxG', '\0'*24, 24),
+ StrFixedLenField('voicemailboxD', '\0'*24, 24),
+ StrFixedLenField('originalvoicemailboxD', '\0'*24, 24),
+ StrFixedLenField('lastvoicemailboxD', '\0'*24, 24),
+ LEIntField('security', 0),
+ FlagsField('restriction', 0, 16, _skinny_message_callinfo_restrictions),
+ LEIntField('unknown', 0)]
+
+
+class SkinnyRateField(LEIntField):
+ def i2repr(self, pkt, x):
+ if x is None:
+ x=0
+ return '%d ms/pkt' % x
+
+_skinny_codecs = {
+ 0x0: 'xxx',
+ 0x1: 'xxx',
+ 0x2: 'xxx',
+ 0x3: 'xxx',
+ 0x4: 'G711 ulaw 64k'
+ }
+
+_skinny_echo = {
+ 0x0: 'echo cancelation off',
+ 0x1: 'echo cancelation on'
+ }
+
+class SkinnyMessageOpenReceiveChannel(Packet):
+ name='open receive channel'
+ fields_desc = [LEIntField('conference', 0),
+ LEIntField('passthru', 0),
+ SkinnyRateField('rate', 20),
+ LEIntEnumField('codec', 4, _skinny_codecs),
+ LEIntEnumField('echo', 0, _skinny_echo),
+ LEIntField('unknown1', 0),
+ LEIntField('callid', 0)]
+
+ def guess_payload_class(self, p):
+ return conf.padding_layer
+
+_skinny_receive_channel_status = {
+ 0x0: 'ok',
+ 0x1: 'ko'
+ }
+
+class SkinnyMessageOpenReceiveChannelAck(Packet):
+ name='open receive channel'
+ fields_desc = [LEIntEnumField('status', 0, _skinny_receive_channel_status),
+ IPField('remote', '0.0.0.0'),
+ LEIntField('port', RandShort()),
+ LEIntField('passthru', 0),
+ LEIntField('callid', 0)]
+
+_skinny_silence = {
+ 0x0: 'silence suppression off',
+ 0x1: 'silence suppression on',
+ }
+
+class SkinnyFramePerPacketField(LEIntField):
+ def i2repr(self, pkt, x):
+ if x is None:
+ x=0
+ return '%d frames/pkt' % x
+
+class SkinnyMessageStartMediaTransmission(Packet):
+ name='start multimedia transmission'
+ fields_desc = [LEIntField('conference', 0),
+ LEIntField('passthru', 0),
+ IPField('remote', '0.0.0.0'),
+ LEIntField('port', RandShort()),
+ SkinnyRateField('rate', 20),
+ LEIntEnumField('codec', 4, _skinny_codecs),
+ LEIntField('precedence', 200),
+ LEIntEnumField('silence', 0, _skinny_silence),
+ SkinnyFramePerPacketField('maxframes', 0),
+ LEIntField('unknown1', 0),
+ LEIntField('callid', 0)]
+
+ def guess_payload_class(self, p):
+ return conf.padding_layer
+
+class SkinnyMessageCloseReceiveChannel(Packet):
+ name='close receive channel'
+ fields_desc = [LEIntField('conference', 0),
+ LEIntField('passthru', 0),
+ IPField('remote', '0.0.0.0'),
+ LEIntField('port', RandShort()),
+ SkinnyRateField('rate', 20),
+ LEIntEnumField('codec', 4, _skinny_codecs),
+ LEIntField('precedence', 200),
+ LEIntEnumField('silence', 0, _skinny_silence),
+ LEIntField('callid', 0)]
+
+class SkinnyMessageStopMultiMediaTransmission(Packet):
+ name='stop multimedia transmission'
+ fields_desc = [LEIntField('conference', 0),
+ LEIntField('passthru', 0),
+ LEIntField('callid', 0)]
+
+class Skinny(Packet):
+ name="Skinny"
+ fields_desc = [ LEIntField("len", None),
+ LEIntField("res",0),
+ LEIntEnumField("msg",0, skinny_messages) ]
+
+ def post_build(self, pkt, p):
+ if self.len is None:
+ l=len(p)+len(pkt)-8 # on compte pas les headers len et reserved
+ pkt=struct.pack('@I', l)+pkt[4:]
+ return pkt+p
+
+# An helper
+def get_cls(name, fallback_cls):
+ return globals().get(name, fallback_cls)
+ #return builtins.__dict__.get(name, fallback_cls)
+
+for msgid,strcls in skinny_messages_cls.items():
+ cls=get_cls(strcls, SkinnyMessageGeneric)
+ bind_layers(Skinny, cls, {"msg": msgid})
+
+bind_layers(TCP, Skinny, { "dport": 2000 } )
+bind_layers(TCP, Skinny, { "sport": 2000 } )
+
+if __name__ == "__main__":
+ interact(mydict=globals(),mybanner="Welcome to Skinny add-on")
+
diff --git a/scripts/external_libs/scapy-2.3.1/python3/scapy/contrib/ubberlogger.py b/scripts/external_libs/scapy-2.3.1/python3/scapy/contrib/ubberlogger.py
new file mode 100644
index 00000000..1c01db2f
--- /dev/null
+++ b/scripts/external_libs/scapy-2.3.1/python3/scapy/contrib/ubberlogger.py
@@ -0,0 +1,101 @@
+# Author: Sylvain SARMEJEANNE
+# http://trac.secdev.org/scapy/ticket/1
+
+# scapy.contrib.description = Ubberlogger dissectors
+# scapy.contrib.status = untested
+
+from scapy.packet import *
+from scapy.fields import *
+
+# Syscalls known by Uberlogger
+uberlogger_sys_calls = {0:"READ_ID",
+ 1:"OPEN_ID",
+ 2:"WRITE_ID",
+ 3:"CHMOD_ID",
+ 4:"CHOWN_ID",
+ 5:"SETUID_ID",
+ 6:"CHROOT_ID",
+ 7:"CREATE_MODULE_ID",
+ 8:"INIT_MODULE_ID",
+ 9:"DELETE_MODULE_ID",
+ 10:"CAPSET_ID",
+ 11:"CAPGET_ID",
+ 12:"FORK_ID",
+ 13:"EXECVE_ID"}
+
+# First part of the header
+class Uberlogger_honeypot_caract(Packet):
+ name = "Uberlogger honeypot_caract"
+ fields_desc = [ByteField("honeypot_id", 0),
+ ByteField("reserved", 0),
+ ByteField("os_type_and_version", 0)]
+
+# Second part of the header
+class Uberlogger_uber_h(Packet):
+ name = "Uberlogger uber_h"
+ fields_desc = [ByteEnumField("syscall_type", 0, uberlogger_sys_calls),
+ IntField("time_sec", 0),
+ IntField("time_usec", 0),
+ IntField("pid", 0),
+ IntField("uid", 0),
+ IntField("euid", 0),
+ IntField("cap_effective", 0),
+ IntField("cap_inheritable", 0),
+ IntField("cap_permitted", 0),
+ IntField("res", 0),
+ IntField("length", 0)]
+
+# The 9 following classes are options depending on the syscall type
+class Uberlogger_capget_data(Packet):
+ name = "Uberlogger capget_data"
+ fields_desc = [IntField("target_pid", 0)]
+
+class Uberlogger_capset_data(Packet):
+ name = "Uberlogger capset_data"
+ fields_desc = [IntField("target_pid", 0),
+ IntField("effective_cap", 0),
+ IntField("permitted_cap", 0),
+ IntField("inheritable_cap", 0)]
+
+class Uberlogger_chmod_data(Packet):
+ name = "Uberlogger chmod_data"
+ fields_desc = [ShortField("mode", 0)]
+
+class Uberlogger_chown_data(Packet):
+ name = "Uberlogger chown_data"
+ fields_desc = [IntField("uid", 0),
+ IntField("gid", 0)]
+
+class Uberlogger_open_data(Packet):
+ name = "Uberlogger open_data"
+ fields_desc = [IntField("flags", 0),
+ IntField("mode", 0)]
+
+class Uberlogger_read_data(Packet):
+ name = "Uberlogger read_data"
+ fields_desc = [IntField("fd", 0),
+ IntField("count", 0)]
+
+class Uberlogger_setuid_data(Packet):
+ name = "Uberlogger setuid_data"
+ fields_desc = [IntField("uid", 0)]
+
+class Uberlogger_create_module_data(Packet):
+ name = "Uberlogger create_module_data"
+ fields_desc = [IntField("size", 0)]
+
+class Uberlogger_execve_data(Packet):
+ name = "Uberlogger execve_data"
+ fields_desc = [IntField("nbarg", 0)]
+
+# Layer bounds for Uberlogger
+bind_layers(Uberlogger_honeypot_caract,Uberlogger_uber_h)
+bind_layers(Uberlogger_uber_h,Uberlogger_capget_data)
+bind_layers(Uberlogger_uber_h,Uberlogger_capset_data)
+bind_layers(Uberlogger_uber_h,Uberlogger_chmod_data)
+bind_layers(Uberlogger_uber_h,Uberlogger_chown_data)
+bind_layers(Uberlogger_uber_h,Uberlogger_open_data)
+bind_layers(Uberlogger_uber_h,Uberlogger_read_data)
+bind_layers(Uberlogger_uber_h,Uberlogger_setuid_data)
+bind_layers(Uberlogger_uber_h,Uberlogger_create_module_data)
+bind_layers(Uberlogger_uber_h,Uberlogger_execve_data)
diff --git a/scripts/external_libs/scapy-2.3.1/python3/scapy/contrib/vqp.py b/scripts/external_libs/scapy-2.3.1/python3/scapy/contrib/vqp.py
new file mode 100644
index 00000000..9328cea4
--- /dev/null
+++ b/scripts/external_libs/scapy-2.3.1/python3/scapy/contrib/vqp.py
@@ -0,0 +1,58 @@
+
+# http://trac.secdev.org/scapy/ticket/147
+
+# scapy.contrib.description = VLAN Query Protocol
+# scapy.contrib.status = loads
+
+from scapy.packet import *
+from scapy.fields import *
+from scapy.layers.inet import UDP
+
+class VQP(Packet):
+ name = "VQP"
+ fields_desc = [
+ ByteField("const", 1),
+ ByteEnumField("type", 1, {
+ 1:"requestPort", 2:"responseVLAN",
+ 3:"requestReconfirm", 4:"responseReconfirm"
+ }),
+ ByteEnumField("errorcodeaction", 0, {
+ 0:"none",3:"accessDenied",
+ 4:"shutdownPort", 5:"wrongDomain"
+ }),
+ ByteEnumField("unknown", 2, {
+ 2:"inGoodResponse", 6:"inRequests"
+ }),
+ IntField("seq",0),
+ ]
+
+class VQPEntry(Packet):
+ name = "VQPEntry"
+ fields_desc = [
+ IntEnumField("datatype", 0, {
+ 3073:"clientIPAddress", 3074:"portName",
+ 3075:"VLANName", 3076:"Domain", 3077:"ethernetPacket",
+ 3078:"ReqMACAddress", 3079:"unknown",
+ 3080:"ResMACAddress"
+ }),
+ FieldLenField("len", None),
+ ConditionalField(IPField("datatom", "0.0.0.0"),
+ lambda p:p.datatype==3073),
+ ConditionalField(MACField("data", "00:00:00:00:00:00"),
+ lambda p:p.datatype==3078),
+ ConditionalField(MACField("data", "00:00:00:00:00:00"),
+ lambda p:p.datatype==3080),
+ ConditionalField(StrLenField("data", None,
+ length_from=lambda p:p.len),
+ lambda p:p.datatype not in [3073, 3078, 3080]),
+ ]
+ def post_build(self, p, pay):
+ if self.len is None:
+ l = len(p.data)
+ p = p[:2]+struct.pack("!H",l)+p[4:]
+ return p
+
+bind_layers(UDP, VQP, sport=1589)
+bind_layers(UDP, VQP, dport=1589)
+bind_layers(VQP, VQPEntry, )
+bind_layers(VQPEntry, VQPEntry, )
diff --git a/scripts/external_libs/scapy-2.3.1/python3/scapy/contrib/vtp.py b/scripts/external_libs/scapy-2.3.1/python3/scapy/contrib/vtp.py
new file mode 100644
index 00000000..af5c2823
--- /dev/null
+++ b/scripts/external_libs/scapy-2.3.1/python3/scapy/contrib/vtp.py
@@ -0,0 +1,171 @@
+#!/usr/bin/env python
+
+# scapy.contrib.description = VLAN Trunking Protocol (VTP)
+# scapy.contrib.status = loads
+
+"""
+ VTP Scapy Extension
+ ~~~~~~~~~~~~~~~~~~~~~
+
+ :version: 2009-02-15
+ :copyright: 2009 by Jochen Bartl
+ :e-mail: lobo@c3a.de / jochen.bartl@gmail.com
+ :license: GPL v2
+
+ This program is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License
+ as published by the Free Software Foundation; either version 2
+ of the License, or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ :TODO
+
+ - Join messages
+ - RE MD5 hash calculation
+ - Have a closer look at 8 byte padding in summary adv.
+ "debug sw-vlan vtp packets" sais the TLV length is invalid,
+ when I change the values
+ '\x00\x00\x00\x01\x06\x01\x00\x02'
+ * \x00\x00 ?
+ * \x00\x01 tlvtype?
+ * \x06 length?
+ * \x00\x02 value?
+ - h2i function for VTPTimeStampField
+
+ :References:
+
+ - Understanding VLAN Trunk Protocol (VTP)
+ http://www.cisco.com/en/US/tech/tk389/tk689/technologies_tech_note09186a0080094c52.shtml
+"""
+
+from scapy.all import *
+
+_VTP_VLAN_TYPE = {
+ 1 : 'Ethernet',
+ 2 : 'FDDI',
+ 3 : 'TrCRF',
+ 4 : 'FDDI-net',
+ 5 : 'TrBRF'
+ }
+
+_VTP_VLANINFO_TLV_TYPE = {
+ 0x01 : 'Source-Routing Ring Number',
+ 0x02 : 'Source-Routing Bridge Number',
+ 0x03 : 'Spanning-Tree Protocol Type',
+ 0x04 : 'Parent VLAN',
+ 0x05 : 'Translationally Bridged VLANs',
+ 0x06 : 'Pruning',
+ 0x07 : 'Bridge Type',
+ 0x08 : 'Max ARE Hop Count',
+ 0x09 : 'Max STE Hop Count',
+ 0x0A : 'Backup CRF Mode'
+ }
+
+
+class VTPVlanInfoTlv(Packet):
+ name = "VTP VLAN Info TLV"
+ fields_desc = [
+ ByteEnumField("type", 0, _VTP_VLANINFO_TLV_TYPE),
+ ByteField("length", 0),
+ StrLenField("value", None, length_from=lambda pkt : pkt.length + 1)
+ ]
+
+ def guess_payload_class(self, p):
+ return conf.padding_layer
+
+class VTPVlanInfo(Packet):
+ name = "VTP VLAN Info"
+ fields_desc = [
+ ByteField("len", None), # FIXME: compute length
+ ByteEnumField("status", 0, {0 : "active", 1 : "suspended"}),
+ ByteEnumField("type", 1, _VTP_VLAN_TYPE),
+ FieldLenField("vlannamelen", None, "vlanname", "B"),
+ ShortField("vlanid", 1),
+ ShortField("mtu", 1500),
+ XIntField("dot10index", None),
+ StrLenField("vlanname", "default", length_from=lambda pkt:4 * ((pkt.vlannamelen + 3) / 4)),
+ ConditionalField(PacketListField("tlvlist", [], VTPVlanInfoTlv,
+ length_from=lambda pkt:pkt.len - 12 - (4 * ((pkt.vlannamelen + 3) / 4))),
+ lambda pkt:pkt.type not in [1, 2])
+ ]
+
+ def post_build(self, p, pay):
+ vlannamelen = 4 * ((len(self.vlanname) + 3) / 4)
+
+ if self.len == None:
+ l = vlannamelen + 12
+ p = chr(l & 0xff) + p[1:]
+
+ # Pad vlan name with zeros if vlannamelen > len(vlanname)
+ l = vlannamelen - len(self.vlanname)
+ if l != 0:
+ p += "\x00" * l
+
+ p += pay
+
+ return p
+
+ def guess_payload_class(self, p):
+ return conf.padding_layer
+
+_VTP_Types = {
+ 1 : 'Summary Advertisement',
+ 2 : 'Subset Advertisements',
+ 3 : 'Advertisement Request',
+ 4 : 'Join'
+ }
+
+class VTPTimeStampField(StrFixedLenField):
+ def __init__(self, name, default):
+ StrFixedLenField.__init__(self, name, default, 12)
+
+ def i2repr(self, pkt, x):
+ return "%s-%s-%s %s:%s:%s" % (x[:2], x[2:4], x[4:6], x[6:8], x[8:10], x[10:12])
+
+class VTP(Packet):
+ name = "VTP"
+ fields_desc = [
+ ByteField("ver", 2),
+ ByteEnumField("code", 1, _VTP_Types),
+ ConditionalField(ByteField("followers", 1),
+ lambda pkt:pkt.code == 1),
+ ConditionalField(ByteField("seq", 1),
+ lambda pkt:pkt.code == 2),
+ ConditionalField(ByteField("reserved", 0),
+ lambda pkt:pkt.code == 3),
+ ByteField("domnamelen", None),
+ StrFixedLenField("domname", "manbearpig", 32),
+ ConditionalField(SignedIntField("rev", 0),
+ lambda pkt:pkt.code == 1 or
+ pkt.code == 2),
+ # updater identity
+ ConditionalField(IPField("uid", "192.168.0.1"),
+ lambda pkt:pkt.code == 1),
+ ConditionalField(VTPTimeStampField("timestamp", '930301000000'),
+ lambda pkt:pkt.code == 1),
+ ConditionalField(StrFixedLenField("md5", "\x00" * 16, 16),
+ lambda pkt:pkt.code == 1),
+ ConditionalField(
+ PacketListField("vlaninfo", [], VTPVlanInfo),
+ lambda pkt: pkt.code == 2),
+ ConditionalField(ShortField("startvalue", 0),
+ lambda pkt:pkt.code == 3)
+ ]
+
+ def post_build(self, p, pay):
+ if self.domnamelen == None:
+ domnamelen = len(self.domname.strip("\x00"))
+ p = p[:3] + chr(domnamelen & 0xff) + p[4:]
+
+ p += pay
+
+ return p
+
+bind_layers(SNAP, VTP, code=0x2003)
+
+if __name__ == '__main__':
+ interact(mydict=globals(), mybanner="VTP")
diff --git a/scripts/external_libs/scapy-2.3.1/python3/scapy/contrib/wpa_eapol.py b/scripts/external_libs/scapy-2.3.1/python3/scapy/contrib/wpa_eapol.py
new file mode 100644
index 00000000..084eedd8
--- /dev/null
+++ b/scripts/external_libs/scapy-2.3.1/python3/scapy/contrib/wpa_eapol.py
@@ -0,0 +1,35 @@
+
+# http://trac.secdev.org/scapy/ticket/104
+
+# scapy.contrib.description = WPA EAPOL dissector
+# scapy.contrib.status = loads
+
+from scapy.packet import *
+from scapy.fields import *
+from scapy.layers.l2 import *
+
+class WPA_key(Packet):
+ name = "WPA_key"
+ fields_desc = [ ByteField("descriptor_type", 1),
+ ShortField("key_info",0),
+ LenField("len", None, "H"),
+ StrFixedLenField("replay_counter", "", 8),
+ StrFixedLenField("nonce", "", 32),
+ StrFixedLenField("key_iv", "", 16),
+ StrFixedLenField("wpa_key_rsc", "", 8),
+ StrFixedLenField("wpa_key_id", "", 8),
+ StrFixedLenField("wpa_key_mic", "", 16),
+ LenField("wpa_key_length", None, "H"),
+ StrLenField("wpa_key", "", length_from=lambda pkt:pkt.wpa_key_length) ]
+ def extract_padding(self, s):
+ l = self.len
+ return s[:l],s[l:]
+ def hashret(self):
+ return chr(self.type)+self.payload.hashret()
+ def answers(self, other):
+ if isinstance(other,WPA_key):
+ return 1
+ return 0
+
+
+bind_layers( EAPOL, WPA_key, type=3)
diff --git a/scripts/external_libs/scapy-2.3.1/python3/scapy/crypto/__init__.py b/scripts/external_libs/scapy-2.3.1/python3/scapy/crypto/__init__.py
new file mode 100644
index 00000000..b441863e
--- /dev/null
+++ b/scripts/external_libs/scapy-2.3.1/python3/scapy/crypto/__init__.py
@@ -0,0 +1,17 @@
+## This file is part of Scapy
+## See http://www.secdev.org/projects/scapy for more informations
+## Copyright (C) Arnaud Ebalard <arno@natisbad.org>
+## This program is published under a GPLv2 license
+
+"""
+Tools for handling with digital certificates.
+"""
+
+try:
+ import Crypto
+except ImportError:
+ import logging
+ log_loading = logging.getLogger("scapy.loading")
+ log_loading.info("Can't import python Crypto lib. Disabled certificate manipulation tools")
+else:
+ from scapy.crypto.cert import *
diff --git a/scripts/external_libs/scapy-2.3.1/python3/scapy/crypto/cert.py b/scripts/external_libs/scapy-2.3.1/python3/scapy/crypto/cert.py
new file mode 100644
index 00000000..c4291059
--- /dev/null
+++ b/scripts/external_libs/scapy-2.3.1/python3/scapy/crypto/cert.py
@@ -0,0 +1,2486 @@
+## This file is part of Scapy
+## See http://www.secdev.org/projects/scapy for more informations
+## Copyright (C) Arnaud Ebalard <arno@natisbad.org>
+## This program is published under a GPLv2 license
+
+"""
+Cryptographic certificates.
+"""
+
+import os, sys, math, socket, struct, hmac, string, time, random, tempfile
+from subprocess import Popen, PIPE
+from scapy.utils import strxor
+try:
+ HAS_HASHLIB=True
+ import hashlib
+except:
+ HAS_HASHLIB=False
+
+from Crypto.PublicKey import *
+from Crypto.Cipher import *
+from Crypto.Hash import *
+from Crypto.Util import number
+
+# Maximum allowed size in bytes for a certificate file, to avoid
+# loading huge file when importing a cert
+MAX_KEY_SIZE=50*1024
+MAX_CERT_SIZE=50*1024
+MAX_CRL_SIZE=10*1024*1024 # some are that big
+
+#####################################################################
+# Some helpers
+#####################################################################
+
+def popen3(cmd):
+ p = Popen(cmd, shell=False, stdin=PIPE, stdout=PIPE, stderr=PIPE,
+ close_fds=True)
+ return p.stdout, p.stdin, p.stderr
+
+def warning(m):
+ print("WARNING: %s" % m)
+
+def randstring(l):
+ """
+ Returns a random string of length l (l >= 0)
+ """
+ tmp = map(lambda x: struct.pack("B", random.randrange(0, 256, 1)), [""]*l)
+ return "".join(tmp)
+
+def zerofree_randstring(l):
+ """
+ Returns a random string of length l (l >= 0) without zero in it.
+ """
+ tmp = map(lambda x: struct.pack("B", random.randrange(1, 256, 1)), [""]*l)
+ return "".join(tmp)
+
+def strand(s1, s2):
+ """
+ Returns the binary AND of the 2 provided strings s1 and s2. s1 and s2
+ must be of same length.
+ """
+ return "".join(map(lambda x,y:chr(ord(x)&ord(y)), s1, s2))
+
+# OS2IP function defined in RFC 3447 for octet string to integer conversion
+def pkcs_os2ip(x):
+ """
+ Accepts a byte string as input parameter and return the associated long
+ value:
+
+ Input : x octet string to be converted
+
+ Output: x corresponding nonnegative integer
+
+ Reverse function is pkcs_i2osp()
+ """
+ return number.bytes_to_long(x)
+
+# IP2OS function defined in RFC 3447 for octet string to integer conversion
+def pkcs_i2osp(x,xLen):
+ """
+ Converts a long (the first parameter) to the associated byte string
+ representation of length l (second parameter). Basically, the length
+ parameters allow the function to perform the associated padding.
+
+ Input : x nonnegative integer to be converted
+ xLen intended length of the resulting octet string
+
+ Output: x corresponding nonnegative integer
+
+ Reverse function is pkcs_os2ip().
+ """
+ z = number.long_to_bytes(x)
+ padlen = max(0, xLen-len(z))
+ return '\x00'*padlen + z
+
+# for every hash function a tuple is provided, giving access to
+# - hash output length in byte
+# - associated hash function that take data to be hashed as parameter
+# XXX I do not provide update() at the moment.
+# - DER encoding of the leading bits of digestInfo (the hash value
+# will be concatenated to create the complete digestInfo).
+#
+# Notes:
+# - MD4 asn.1 value should be verified. Also, as stated in
+# PKCS#1 v2.1, MD4 should not be used.
+# - hashlib is available from http://code.krypto.org/python/hashlib/
+# - 'tls' one is the concatenation of both md5 and sha1 hashes used
+# by SSL/TLS when signing/verifying things
+_hashFuncParams = {
+ "md2" : (16,
+ lambda x: MD2.new(x).digest(),
+ '\x30\x20\x30\x0c\x06\x08\x2a\x86\x48\x86\xf7\x0d\x02\x02\x05\x00\x04\x10'),
+ "md4" : (16,
+ lambda x: MD4.new(x).digest(),
+ '\x30\x20\x30\x0c\x06\x08\x2a\x86\x48\x86\xf7\x0d\x02\x04\x05\x00\x04\x10'), # is that right ?
+ "md5" : (16,
+ lambda x: MD5.new(x).digest(),
+ '\x30\x20\x30\x0c\x06\x08\x2a\x86\x48\x86\xf7\x0d\x02\x05\x05\x00\x04\x10'),
+ "sha1" : (20,
+ lambda x: SHA.new(x).digest(),
+ '\x30\x21\x30\x09\x06\x05\x2b\x0e\x03\x02\x1a\x05\x00\x04\x14'),
+ "tls" : (36,
+ lambda x: MD5.new(x).digest() + SHA.new(x).digest(),
+ '') }
+
+if HAS_HASHLIB:
+ _hashFuncParams["sha224"] = (28,
+ lambda x: hashlib.sha224(x).digest(),
+ '\x30\x2d\x30\x0d\x06\x09\x60\x86\x48\x01\x65\x03\x04\x02\x04\x05\x00\x04\x1c')
+ _hashFuncParams["sha256"] = (32,
+ lambda x: hashlib.sha256(x).digest(),
+ '\x30\x31\x30\x0d\x06\x09\x60\x86\x48\x01\x65\x03\x04\x02\x01\x05\x00\x04\x20')
+ _hashFuncParams["sha384"] = (48,
+ lambda x: hashlib.sha384(x).digest(),
+ '\x30\x41\x30\x0d\x06\x09\x60\x86\x48\x01\x65\x03\x04\x02\x02\x05\x00\x04\x30')
+ _hashFuncParams["sha512"] = (64,
+ lambda x: hashlib.sha512(x).digest(),
+ '\x30\x51\x30\x0d\x06\x09\x60\x86\x48\x01\x65\x03\x04\x02\x03\x05\x00\x04\x40')
+else:
+ warning("hashlib support is not available. Consider installing it")
+ warning("if you need sha224, sha256, sha384 and sha512 algs.")
+
+def pkcs_mgf1(mgfSeed, maskLen, h):
+ """
+ Implements generic MGF1 Mask Generation function as described in
+ Appendix B.2.1 of RFC 3447. The hash function is passed by name.
+ valid values are 'md2', 'md4', 'md5', 'sha1', 'tls, 'sha256',
+ 'sha384' and 'sha512'. Returns None on error.
+
+ Input:
+ mgfSeed: seed from which mask is generated, an octet string
+ maskLen: intended length in octets of the mask, at most 2^32 * hLen
+ hLen (see below)
+ h : hash function name (in 'md2', 'md4', 'md5', 'sha1', 'tls',
+ 'sha256', 'sha384'). hLen denotes the length in octets of
+ the hash function output.
+
+ Output:
+ an octet string of length maskLen
+ """
+
+ # steps are those of Appendix B.2.1
+ if not h in _hashFuncParams:
+ warning("pkcs_mgf1: invalid hash (%s) provided")
+ return None
+ hLen = _hashFuncParams[h][0]
+ hFunc = _hashFuncParams[h][1]
+ if maskLen > 2**32 * hLen: # 1)
+ warning("pkcs_mgf1: maskLen > 2**32 * hLen")
+ return None
+ T = "" # 2)
+ maxCounter = math.ceil(float(maskLen) / float(hLen)) # 3)
+ counter = 0
+ while counter < maxCounter:
+ C = pkcs_i2osp(counter, 4)
+ T += hFunc(mgfSeed + C)
+ counter += 1
+ return T[:maskLen]
+
+
+def pkcs_emsa_pss_encode(M, emBits, h, mgf, sLen):
+ """
+ Implements EMSA-PSS-ENCODE() function described in Sect. 9.1.1 of RFC 3447
+
+ Input:
+ M : message to be encoded, an octet string
+ emBits: maximal bit length of the integer resulting of pkcs_os2ip(EM),
+ where EM is the encoded message, output of the function.
+ h : hash function name (in 'md2', 'md4', 'md5', 'sha1', 'tls',
+ 'sha256', 'sha384'). hLen denotes the length in octets of
+ the hash function output.
+ mgf : the mask generation function f : seed, maskLen -> mask
+ sLen : intended length in octets of the salt
+
+ Output:
+ encoded message, an octet string of length emLen = ceil(emBits/8)
+
+ On error, None is returned.
+ """
+
+ # 1) is not done
+ hLen = _hashFuncParams[h][0] # 2)
+ hFunc = _hashFuncParams[h][1]
+ mHash = hFunc(M)
+ emLen = int(math.ceil(emBits/8.))
+ if emLen < hLen + sLen + 2: # 3)
+ warning("encoding error (emLen < hLen + sLen + 2)")
+ return None
+ salt = randstring(sLen) # 4)
+ MPrime = '\x00'*8 + mHash + salt # 5)
+ H = hFunc(MPrime) # 6)
+ PS = '\x00'*(emLen - sLen - hLen - 2) # 7)
+ DB = PS + '\x01' + salt # 8)
+ dbMask = mgf(H, emLen - hLen - 1) # 9)
+ maskedDB = strxor(DB, dbMask) # 10)
+ l = (8*emLen - emBits)/8 # 11)
+ rem = 8*emLen - emBits - 8*l # additionnal bits
+ andMask = l*'\x00'
+ if rem:
+ j = chr(reduce(lambda x,y: x+y, map(lambda x: 1<<x, range(8-rem))))
+ andMask += j
+ l += 1
+ maskedDB = strand(maskedDB[:l], andMask) + maskedDB[l:]
+ EM = maskedDB + H + '\xbc' # 12)
+ return EM # 13)
+
+
+def pkcs_emsa_pss_verify(M, EM, emBits, h, mgf, sLen):
+ """
+ Implements EMSA-PSS-VERIFY() function described in Sect. 9.1.2 of RFC 3447
+
+ Input:
+ M : message to be encoded, an octet string
+ EM : encoded message, an octet string of length emLen = ceil(emBits/8)
+ emBits: maximal bit length of the integer resulting of pkcs_os2ip(EM)
+ h : hash function name (in 'md2', 'md4', 'md5', 'sha1', 'tls',
+ 'sha256', 'sha384'). hLen denotes the length in octets of
+ the hash function output.
+ mgf : the mask generation function f : seed, maskLen -> mask
+ sLen : intended length in octets of the salt
+
+ Output:
+ True if the verification is ok, False otherwise.
+ """
+
+ # 1) is not done
+ hLen = _hashFuncParams[h][0] # 2)
+ hFunc = _hashFuncParams[h][1]
+ mHash = hFunc(M)
+ emLen = int(math.ceil(emBits/8.)) # 3)
+ if emLen < hLen + sLen + 2:
+ return False
+ if EM[-1] != '\xbc': # 4)
+ return False
+ l = emLen - hLen - 1 # 5)
+ maskedDB = EM[:l]
+ H = EM[l:l+hLen]
+ l = (8*emLen - emBits)/8 # 6)
+ rem = 8*emLen - emBits - 8*l # additionnal bits
+ andMask = l*'\xff'
+ if rem:
+ val = reduce(lambda x,y: x+y, map(lambda x: 1<<x, range(8-rem)))
+ j = chr(~val & 0xff)
+ andMask += j
+ l += 1
+ if strand(maskedDB[:l], andMask) != '\x00'*l:
+ return False
+ dbMask = mgf(H, emLen - hLen - 1) # 7)
+ DB = strxor(maskedDB, dbMask) # 8)
+ l = (8*emLen - emBits)/8 # 9)
+ rem = 8*emLen - emBits - 8*l # additionnal bits
+ andMask = l*'\x00'
+ if rem:
+ j = chr(reduce(lambda x,y: x+y, map(lambda x: 1<<x, range(8-rem))))
+ andMask += j
+ l += 1
+ DB = strand(DB[:l], andMask) + DB[l:]
+ l = emLen - hLen - sLen - 1 # 10)
+ if DB[:l] != '\x00'*(l-1) + '\x01':
+ return False
+ salt = DB[-sLen:] # 11)
+ MPrime = '\x00'*8 + mHash + salt # 12)
+ HPrime = hFunc(MPrime) # 13)
+ return H == HPrime # 14)
+
+
+def pkcs_emsa_pkcs1_v1_5_encode(M, emLen, h): # section 9.2 of RFC 3447
+ """
+ Implements EMSA-PKCS1-V1_5-ENCODE() function described in Sect.
+ 9.2 of RFC 3447.
+
+ Input:
+ M : message to be encode, an octet string
+ emLen: intended length in octets of the encoded message, at least
+ tLen + 11, where tLen is the octet length of the DER encoding
+ T of a certain value computed during the encoding operation.
+ h : hash function name (in 'md2', 'md4', 'md5', 'sha1', 'tls',
+ 'sha256', 'sha384'). hLen denotes the length in octets of
+ the hash function output.
+
+ Output:
+ encoded message, an octet string of length emLen
+
+ On error, None is returned.
+ """
+ hLen = _hashFuncParams[h][0] # 1)
+ hFunc = _hashFuncParams[h][1]
+ H = hFunc(M)
+ hLeadingDigestInfo = _hashFuncParams[h][2] # 2)
+ T = hLeadingDigestInfo + H
+ tLen = len(T)
+ if emLen < tLen + 11: # 3)
+ warning("pkcs_emsa_pkcs1_v1_5_encode: intended encoded message length too short")
+ return None
+ PS = '\xff'*(emLen - tLen - 3) # 4)
+ EM = '\x00' + '\x01' + PS + '\x00' + T # 5)
+ return EM # 6)
+
+
+# XXX should add other pgf1 instance in a better fashion.
+
+def create_ca_file(anchor_list, filename):
+ """
+ Concatenate all the certificates (PEM format for the export) in
+ 'anchor_list' and write the result to file 'filename'. On success
+ 'filename' is returned, None otherwise.
+
+ If you are used to OpenSSL tools, this function builds a CAfile
+ that can be used for certificate and CRL check.
+
+ Also see create_temporary_ca_file().
+ """
+ try:
+ f = open(filename, "w")
+ for a in anchor_list:
+ s = a.output(fmt="PEM")
+ f.write(s)
+ f.close()
+ except:
+ return None
+ return filename
+
+def create_temporary_ca_file(anchor_list):
+ """
+ Concatenate all the certificates (PEM format for the export) in
+ 'anchor_list' and write the result to file to a temporary file
+ using mkstemp() from tempfile module. On success 'filename' is
+ returned, None otherwise.
+
+ If you are used to OpenSSL tools, this function builds a CAfile
+ that can be used for certificate and CRL check.
+
+ Also see create_temporary_ca_file().
+ """
+ try:
+ f, fname = tempfile.mkstemp()
+ for a in anchor_list:
+ s = a.output(fmt="PEM")
+ l = os.write(f, s)
+ os.close(f)
+ except:
+ return None
+ return fname
+
+def create_temporary_ca_path(anchor_list, folder):
+ """
+ Create a CA path folder as defined in OpenSSL terminology, by
+ storing all certificates in 'anchor_list' list in PEM format
+ under provided 'folder' and then creating the associated links
+ using the hash as usually done by c_rehash.
+
+ Note that you can also include CRL in 'anchor_list'. In that
+ case, they will also be stored under 'folder' and associated
+ links will be created.
+
+ In folder, the files are created with names of the form
+ 0...ZZ.pem. If you provide an empty list, folder will be created
+ if it does not already exist, but that's all.
+
+ The number of certificates written to folder is returned on
+ success, None on error.
+ """
+ # We should probably avoid writing duplicate anchors and also
+ # check if they are all certs.
+ try:
+ if not os.path.isdir(folder):
+ os.makedirs(folder)
+ except:
+ return None
+
+ l = len(anchor_list)
+ if l == 0:
+ return None
+ fmtstr = "%%0%sd.pem" % math.ceil(math.log(l, 10))
+ i = 0
+ try:
+ for a in anchor_list:
+ fname = os.path.join(folder, fmtstr % i)
+ f = open(fname, "w")
+ s = a.output(fmt="PEM")
+ f.write(s)
+ f.close()
+ i += 1
+ except:
+ return None
+
+ r,w,e=popen3(["c_rehash", folder])
+ r.close(); w.close(); e.close()
+
+ return l
+
+
+#####################################################################
+# Public Key Cryptography related stuff
+#####################################################################
+
+class OSSLHelper:
+ def _apply_ossl_cmd(self, osslcmd, rawdata):
+ r,w,e=popen3(osslcmd)
+ w.write(rawdata)
+ w.close()
+ res = r.read()
+ r.close()
+ e.close()
+ return res
+
+class _EncryptAndVerify:
+ ### Below are encryption methods
+
+ def _rsaep(self, m):
+ """
+ Internal method providing raw RSA encryption, i.e. simple modular
+ exponentiation of the given message representative 'm', a long
+ between 0 and n-1.
+
+ This is the encryption primitive RSAEP described in PKCS#1 v2.1,
+ i.e. RFC 3447 Sect. 5.1.1.
+
+ Input:
+ m: message representative, a long between 0 and n-1, where
+ n is the key modulus.
+
+ Output:
+ ciphertext representative, a long between 0 and n-1
+
+ Not intended to be used directly. Please, see encrypt() method.
+ """
+
+ n = self.modulus
+ if type(m) is int:
+ m = long(m)
+ if type(m) is not long or m > n-1:
+ warning("Key._rsaep() expects a long between 0 and n-1")
+ return None
+
+ return self.key.encrypt(m, "")[0]
+
+
+ def _rsaes_pkcs1_v1_5_encrypt(self, M):
+ """
+ Implements RSAES-PKCS1-V1_5-ENCRYPT() function described in section
+ 7.2.1 of RFC 3447.
+
+ Input:
+ M: message to be encrypted, an octet string of length mLen, where
+ mLen <= k - 11 (k denotes the length in octets of the key modulus)
+
+ Output:
+ ciphertext, an octet string of length k
+
+ On error, None is returned.
+ """
+
+ # 1) Length checking
+ mLen = len(M)
+ k = self.modulusLen / 8
+ if mLen > k - 11:
+ warning("Key._rsaes_pkcs1_v1_5_encrypt(): message too "
+ "long (%d > %d - 11)" % (mLen, k))
+ return None
+
+ # 2) EME-PKCS1-v1_5 encoding
+ PS = zerofree_randstring(k - mLen - 3) # 2.a)
+ EM = '\x00' + '\x02' + PS + '\x00' + M # 2.b)
+
+ # 3) RSA encryption
+ m = pkcs_os2ip(EM) # 3.a)
+ c = self._rsaep(m) # 3.b)
+ C = pkcs_i2osp(c, k) # 3.c)
+
+ return C # 4)
+
+
+ def _rsaes_oaep_encrypt(self, M, h=None, mgf=None, L=None):
+ """
+ Internal method providing RSAES-OAEP-ENCRYPT as defined in Sect.
+ 7.1.1 of RFC 3447. Not intended to be used directly. Please, see
+ encrypt() method for type "OAEP".
+
+
+ Input:
+ M : message to be encrypted, an octet string of length mLen
+ where mLen <= k - 2*hLen - 2 (k denotes the length in octets
+ of the RSA modulus and hLen the length in octets of the hash
+ function output)
+ h : hash function name (in 'md2', 'md4', 'md5', 'sha1', 'tls',
+ 'sha256', 'sha384'). hLen denotes the length in octets of
+ the hash function output. 'sha1' is used by default if not
+ provided.
+ mgf: the mask generation function f : seed, maskLen -> mask
+ L : optional label to be associated with the message; the default
+ value for L, if not provided is the empty string
+
+ Output:
+ ciphertext, an octet string of length k
+
+ On error, None is returned.
+ """
+ # The steps below are the one described in Sect. 7.1.1 of RFC 3447.
+ # 1) Length Checking
+ # 1.a) is not done
+ mLen = len(M)
+ if h is None:
+ h = "sha1"
+ if not h in _hashFuncParams:
+ warning("Key._rsaes_oaep_encrypt(): unknown hash function %s.", h)
+ return None
+ hLen = _hashFuncParams[h][0]
+ hFun = _hashFuncParams[h][1]
+ k = self.modulusLen / 8
+ if mLen > k - 2*hLen - 2: # 1.b)
+ warning("Key._rsaes_oaep_encrypt(): message too long.")
+ return None
+
+ # 2) EME-OAEP encoding
+ if L is None: # 2.a)
+ L = ""
+ lHash = hFun(L)
+ PS = '\x00'*(k - mLen - 2*hLen - 2) # 2.b)
+ DB = lHash + PS + '\x01' + M # 2.c)
+ seed = randstring(hLen) # 2.d)
+ if mgf is None: # 2.e)
+ mgf = lambda x,y: pkcs_mgf1(x,y,h)
+ dbMask = mgf(seed, k - hLen - 1)
+ maskedDB = strxor(DB, dbMask) # 2.f)
+ seedMask = mgf(maskedDB, hLen) # 2.g)
+ maskedSeed = strxor(seed, seedMask) # 2.h)
+ EM = '\x00' + maskedSeed + maskedDB # 2.i)
+
+ # 3) RSA Encryption
+ m = pkcs_os2ip(EM) # 3.a)
+ c = self._rsaep(m) # 3.b)
+ C = pkcs_i2osp(c, k) # 3.c)
+
+ return C # 4)
+
+
+ def encrypt(self, m, t=None, h=None, mgf=None, L=None):
+ """
+ Encrypt message 'm' using 't' encryption scheme where 't' can be:
+
+ - None: the message 'm' is directly applied the RSAEP encryption
+ primitive, as described in PKCS#1 v2.1, i.e. RFC 3447
+ Sect 5.1.1. Simply put, the message undergo a modular
+ exponentiation using the public key. Additionnal method
+ parameters are just ignored.
+
+ - 'pkcs': the message 'm' is applied RSAES-PKCS1-V1_5-ENCRYPT encryption
+ scheme as described in section 7.2.1 of RFC 3447. In that
+ context, other parameters ('h', 'mgf', 'l') are not used.
+
+ - 'oaep': the message 'm' is applied the RSAES-OAEP-ENCRYPT encryption
+ scheme, as described in PKCS#1 v2.1, i.e. RFC 3447 Sect
+ 7.1.1. In that context,
+
+ o 'h' parameter provides the name of the hash method to use.
+ Possible values are "md2", "md4", "md5", "sha1", "tls",
+ "sha224", "sha256", "sha384" and "sha512". if none is provided,
+ sha1 is used.
+
+ o 'mgf' is the mask generation function. By default, mgf
+ is derived from the provided hash function using the
+ generic MGF1 (see pkcs_mgf1() for details).
+
+ o 'L' is the optional label to be associated with the
+ message. If not provided, the default value is used, i.e
+ the empty string. No check is done on the input limitation
+ of the hash function regarding the size of 'L' (for
+ instance, 2^61 - 1 for SHA-1). You have been warned.
+ """
+
+ if t is None: # Raw encryption
+ m = pkcs_os2ip(m)
+ c = self._rsaep(m)
+ return pkcs_i2osp(c, self.modulusLen/8)
+
+ elif t == "pkcs":
+ return self._rsaes_pkcs1_v1_5_encrypt(m)
+
+ elif t == "oaep":
+ return self._rsaes_oaep_encrypt(m, h, mgf, L)
+
+ else:
+ warning("Key.encrypt(): Unknown encryption type (%s) provided" % t)
+ return None
+
+ ### Below are verification related methods
+
+ def _rsavp1(self, s):
+ """
+ Internal method providing raw RSA verification, i.e. simple modular
+ exponentiation of the given signature representative 'c', an integer
+ between 0 and n-1.
+
+ This is the signature verification primitive RSAVP1 described in
+ PKCS#1 v2.1, i.e. RFC 3447 Sect. 5.2.2.
+
+ Input:
+ s: signature representative, an integer between 0 and n-1,
+ where n is the key modulus.
+
+ Output:
+ message representative, an integer between 0 and n-1
+
+ Not intended to be used directly. Please, see verify() method.
+ """
+ return self._rsaep(s)
+
+ def _rsassa_pss_verify(self, M, S, h=None, mgf=None, sLen=None):
+ """
+ Implements RSASSA-PSS-VERIFY() function described in Sect 8.1.2
+ of RFC 3447
+
+ Input:
+ M: message whose signature is to be verified
+ S: signature to be verified, an octet string of length k, where k
+ is the length in octets of the RSA modulus n.
+
+ Output:
+ True is the signature is valid. False otherwise.
+ """
+
+ # Set default parameters if not provided
+ if h is None: # By default, sha1
+ h = "sha1"
+ if not h in _hashFuncParams:
+ warning("Key._rsassa_pss_verify(): unknown hash function "
+ "provided (%s)" % h)
+ return False
+ if mgf is None: # use mgf1 with underlying hash function
+ mgf = lambda x,y: pkcs_mgf1(x, y, h)
+ if sLen is None: # use Hash output length (A.2.3 of RFC 3447)
+ hLen = _hashFuncParams[h][0]
+ sLen = hLen
+
+ # 1) Length checking
+ modBits = self.modulusLen
+ k = modBits / 8
+ if len(S) != k:
+ return False
+
+ # 2) RSA verification
+ s = pkcs_os2ip(S) # 2.a)
+ m = self._rsavp1(s) # 2.b)
+ emLen = math.ceil((modBits - 1) / 8.) # 2.c)
+ EM = pkcs_i2osp(m, emLen)
+
+ # 3) EMSA-PSS verification
+ Result = pkcs_emsa_pss_verify(M, EM, modBits - 1, h, mgf, sLen)
+
+ return Result # 4)
+
+
+ def _rsassa_pkcs1_v1_5_verify(self, M, S, h):
+ """
+ Implements RSASSA-PKCS1-v1_5-VERIFY() function as described in
+ Sect. 8.2.2 of RFC 3447.
+
+ Input:
+ M: message whose signature is to be verified, an octet string
+ S: signature to be verified, an octet string of length k, where
+ k is the length in octets of the RSA modulus n
+ h: hash function name (in 'md2', 'md4', 'md5', 'sha1', 'tls',
+ 'sha256', 'sha384').
+
+ Output:
+ True if the signature is valid. False otherwise.
+ """
+
+ # 1) Length checking
+ k = self.modulusLen / 8
+ if len(S) != k:
+ warning("invalid signature (len(S) != k)")
+ return False
+
+ # 2) RSA verification
+ s = pkcs_os2ip(S) # 2.a)
+ m = self._rsavp1(s) # 2.b)
+ EM = pkcs_i2osp(m, k) # 2.c)
+
+ # 3) EMSA-PKCS1-v1_5 encoding
+ EMPrime = pkcs_emsa_pkcs1_v1_5_encode(M, k, h)
+ if EMPrime is None:
+ warning("Key._rsassa_pkcs1_v1_5_verify(): unable to encode.")
+ return False
+
+ # 4) Comparison
+ return EM == EMPrime
+
+
+ def verify(self, M, S, t=None, h=None, mgf=None, sLen=None):
+ """
+ Verify alleged signature 'S' is indeed the signature of message 'M' using
+ 't' signature scheme where 't' can be:
+
+ - None: the alleged signature 'S' is directly applied the RSAVP1 signature
+ primitive, as described in PKCS#1 v2.1, i.e. RFC 3447 Sect
+ 5.2.1. Simply put, the provided signature is applied a moular
+ exponentiation using the public key. Then, a comparison of the
+ result is done against 'M'. On match, True is returned.
+ Additionnal method parameters are just ignored.
+
+ - 'pkcs': the alleged signature 'S' and message 'M' are applied
+ RSASSA-PKCS1-v1_5-VERIFY signature verification scheme as
+ described in Sect. 8.2.2 of RFC 3447. In that context,
+ the hash function name is passed using 'h'. Possible values are
+ "md2", "md4", "md5", "sha1", "tls", "sha224", "sha256", "sha384"
+ and "sha512". If none is provided, sha1 is used. Other additionnal
+ parameters are ignored.
+
+ - 'pss': the alleged signature 'S' and message 'M' are applied
+ RSASSA-PSS-VERIFY signature scheme as described in Sect. 8.1.2.
+ of RFC 3447. In that context,
+
+ o 'h' parameter provides the name of the hash method to use.
+ Possible values are "md2", "md4", "md5", "sha1", "tls", "sha224",
+ "sha256", "sha384" and "sha512". if none is provided, sha1
+ is used.
+
+ o 'mgf' is the mask generation function. By default, mgf
+ is derived from the provided hash function using the
+ generic MGF1 (see pkcs_mgf1() for details).
+
+ o 'sLen' is the length in octet of the salt. You can overload the
+ default value (the octet length of the hash value for provided
+ algorithm) by providing another one with that parameter.
+ """
+ if t is None: # RSAVP1
+ S = pkcs_os2ip(S)
+ n = self.modulus
+ if S > n-1:
+ warning("Signature to be verified is too long for key modulus")
+ return False
+ m = self._rsavp1(S)
+ if m is None:
+ return False
+ l = int(math.ceil(math.log(m, 2) / 8.)) # Hack
+ m = pkcs_i2osp(m, l)
+ return M == m
+
+ elif t == "pkcs": # RSASSA-PKCS1-v1_5-VERIFY
+ if h is None:
+ h = "sha1"
+ return self._rsassa_pkcs1_v1_5_verify(M, S, h)
+
+ elif t == "pss": # RSASSA-PSS-VERIFY
+ return self._rsassa_pss_verify(M, S, h, mgf, sLen)
+
+ else:
+ warning("Key.verify(): Unknown signature type (%s) provided" % t)
+ return None
+
+class _DecryptAndSignMethods(OSSLHelper):
+ ### Below are decryption related methods. Encryption ones are inherited
+ ### from PubKey
+
+ def _rsadp(self, c):
+ """
+ Internal method providing raw RSA decryption, i.e. simple modular
+ exponentiation of the given ciphertext representative 'c', a long
+ between 0 and n-1.
+
+ This is the decryption primitive RSADP described in PKCS#1 v2.1,
+ i.e. RFC 3447 Sect. 5.1.2.
+
+ Input:
+ c: ciphertest representative, a long between 0 and n-1, where
+ n is the key modulus.
+
+ Output:
+ ciphertext representative, a long between 0 and n-1
+
+ Not intended to be used directly. Please, see encrypt() method.
+ """
+
+ n = self.modulus
+ if type(c) is int:
+ c = long(c)
+ if type(c) is not long or c > n-1:
+ warning("Key._rsaep() expects a long between 0 and n-1")
+ return None
+
+ return self.key.decrypt(c)
+
+
+ def _rsaes_pkcs1_v1_5_decrypt(self, C):
+ """
+ Implements RSAES-PKCS1-V1_5-DECRYPT() function described in section
+ 7.2.2 of RFC 3447.
+
+ Input:
+ C: ciphertext to be decrypted, an octet string of length k, where
+ k is the length in octets of the RSA modulus n.
+
+ Output:
+ an octet string of length k at most k - 11
+
+ on error, None is returned.
+ """
+
+ # 1) Length checking
+ cLen = len(C)
+ k = self.modulusLen / 8
+ if cLen != k or k < 11:
+ warning("Key._rsaes_pkcs1_v1_5_decrypt() decryption error "
+ "(cLen != k or k < 11)")
+ return None
+
+ # 2) RSA decryption
+ c = pkcs_os2ip(C) # 2.a)
+ m = self._rsadp(c) # 2.b)
+ EM = pkcs_i2osp(m, k) # 2.c)
+
+ # 3) EME-PKCS1-v1_5 decoding
+
+ # I am aware of the note at the end of 7.2.2 regarding error
+ # conditions reporting but the one provided below are for _local_
+ # debugging purposes. --arno
+
+ if EM[0] != '\x00':
+ warning("Key._rsaes_pkcs1_v1_5_decrypt(): decryption error "
+ "(first byte is not 0x00)")
+ return None
+
+ if EM[1] != '\x02':
+ warning("Key._rsaes_pkcs1_v1_5_decrypt(): decryption error "
+ "(second byte is not 0x02)")
+ return None
+
+ tmp = EM[2:].split('\x00', 1)
+ if len(tmp) != 2:
+ warning("Key._rsaes_pkcs1_v1_5_decrypt(): decryption error "
+ "(no 0x00 to separate PS from M)")
+ return None
+
+ PS, M = tmp
+ if len(PS) < 8:
+ warning("Key._rsaes_pkcs1_v1_5_decrypt(): decryption error "
+ "(PS is less than 8 byte long)")
+ return None
+
+ return M # 4)
+
+
+ def _rsaes_oaep_decrypt(self, C, h=None, mgf=None, L=None):
+ """
+ Internal method providing RSAES-OAEP-DECRYPT as defined in Sect.
+ 7.1.2 of RFC 3447. Not intended to be used directly. Please, see
+ encrypt() method for type "OAEP".
+
+
+ Input:
+ C : ciphertext to be decrypted, an octet string of length k, where
+ k = 2*hLen + 2 (k denotes the length in octets of the RSA modulus
+ and hLen the length in octets of the hash function output)
+ h : hash function name (in 'md2', 'md4', 'md5', 'sha1', 'tls',
+ 'sha256', 'sha384'). 'sha1' is used if none is provided.
+ mgf: the mask generation function f : seed, maskLen -> mask
+ L : optional label whose association with the message is to be
+ verified; the default value for L, if not provided is the empty
+ string.
+
+ Output:
+ message, an octet string of length k mLen, where mLen <= k - 2*hLen - 2
+
+ On error, None is returned.
+ """
+ # The steps below are the one described in Sect. 7.1.2 of RFC 3447.
+
+ # 1) Length Checking
+ # 1.a) is not done
+ if h is None:
+ h = "sha1"
+ if not h in _hashFuncParams:
+ warning("Key._rsaes_oaep_decrypt(): unknown hash function %s.", h)
+ return None
+ hLen = _hashFuncParams[h][0]
+ hFun = _hashFuncParams[h][1]
+ k = self.modulusLen / 8
+ cLen = len(C)
+ if cLen != k: # 1.b)
+ warning("Key._rsaes_oaep_decrypt(): decryption error. "
+ "(cLen != k)")
+ return None
+ if k < 2*hLen + 2:
+ warning("Key._rsaes_oaep_decrypt(): decryption error. "
+ "(k < 2*hLen + 2)")
+ return None
+
+ # 2) RSA decryption
+ c = pkcs_os2ip(C) # 2.a)
+ m = self._rsadp(c) # 2.b)
+ EM = pkcs_i2osp(m, k) # 2.c)
+
+ # 3) EME-OAEP decoding
+ if L is None: # 3.a)
+ L = ""
+ lHash = hFun(L)
+ Y = EM[:1] # 3.b)
+ if Y != '\x00':
+ warning("Key._rsaes_oaep_decrypt(): decryption error. "
+ "(Y is not zero)")
+ return None
+ maskedSeed = EM[1:1+hLen]
+ maskedDB = EM[1+hLen:]
+ if mgf is None:
+ mgf = lambda x,y: pkcs_mgf1(x, y, h)
+ seedMask = mgf(maskedDB, hLen) # 3.c)
+ seed = strxor(maskedSeed, seedMask) # 3.d)
+ dbMask = mgf(seed, k - hLen - 1) # 3.e)
+ DB = strxor(maskedDB, dbMask) # 3.f)
+
+ # I am aware of the note at the end of 7.1.2 regarding error
+ # conditions reporting but the one provided below are for _local_
+ # debugging purposes. --arno
+
+ lHashPrime = DB[:hLen] # 3.g)
+ tmp = DB[hLen:].split('\x01', 1)
+ if len(tmp) != 2:
+ warning("Key._rsaes_oaep_decrypt(): decryption error. "
+ "(0x01 separator not found)")
+ return None
+ PS, M = tmp
+ if PS != '\x00'*len(PS):
+ warning("Key._rsaes_oaep_decrypt(): decryption error. "
+ "(invalid padding string)")
+ return None
+ if lHash != lHashPrime:
+ warning("Key._rsaes_oaep_decrypt(): decryption error. "
+ "(invalid hash)")
+ return None
+ return M # 4)
+
+
+ def decrypt(self, C, t=None, h=None, mgf=None, L=None):
+ """
+ Decrypt ciphertext 'C' using 't' decryption scheme where 't' can be:
+
+ - None: the ciphertext 'C' is directly applied the RSADP decryption
+ primitive, as described in PKCS#1 v2.1, i.e. RFC 3447
+ Sect 5.1.2. Simply, put the message undergo a modular
+ exponentiation using the private key. Additionnal method
+ parameters are just ignored.
+
+ - 'pkcs': the ciphertext 'C' is applied RSAES-PKCS1-V1_5-DECRYPT
+ decryption scheme as described in section 7.2.2 of RFC 3447.
+ In that context, other parameters ('h', 'mgf', 'l') are not
+ used.
+
+ - 'oaep': the ciphertext 'C' is applied the RSAES-OAEP-DECRYPT decryption
+ scheme, as described in PKCS#1 v2.1, i.e. RFC 3447 Sect
+ 7.1.2. In that context,
+
+ o 'h' parameter provides the name of the hash method to use.
+ Possible values are "md2", "md4", "md5", "sha1", "tls",
+ "sha224", "sha256", "sha384" and "sha512". if none is provided,
+ sha1 is used by default.
+
+ o 'mgf' is the mask generation function. By default, mgf
+ is derived from the provided hash function using the
+ generic MGF1 (see pkcs_mgf1() for details).
+
+ o 'L' is the optional label to be associated with the
+ message. If not provided, the default value is used, i.e
+ the empty string. No check is done on the input limitation
+ of the hash function regarding the size of 'L' (for
+ instance, 2^61 - 1 for SHA-1). You have been warned.
+ """
+ if t is None:
+ C = pkcs_os2ip(C)
+ c = self._rsadp(C)
+ l = int(math.ceil(math.log(c, 2) / 8.)) # Hack
+ return pkcs_i2osp(c, l)
+
+ elif t == "pkcs":
+ return self._rsaes_pkcs1_v1_5_decrypt(C)
+
+ elif t == "oaep":
+ return self._rsaes_oaep_decrypt(C, h, mgf, L)
+
+ else:
+ warning("Key.decrypt(): Unknown decryption type (%s) provided" % t)
+ return None
+
+ ### Below are signature related methods. Verification ones are inherited from
+ ### PubKey
+
+ def _rsasp1(self, m):
+ """
+ Internal method providing raw RSA signature, i.e. simple modular
+ exponentiation of the given message representative 'm', an integer
+ between 0 and n-1.
+
+ This is the signature primitive RSASP1 described in PKCS#1 v2.1,
+ i.e. RFC 3447 Sect. 5.2.1.
+
+ Input:
+ m: message representative, an integer between 0 and n-1, where
+ n is the key modulus.
+
+ Output:
+ signature representative, an integer between 0 and n-1
+
+ Not intended to be used directly. Please, see sign() method.
+ """
+ return self._rsadp(m)
+
+
+ def _rsassa_pss_sign(self, M, h=None, mgf=None, sLen=None):
+ """
+ Implements RSASSA-PSS-SIGN() function described in Sect. 8.1.1 of
+ RFC 3447.
+
+ Input:
+ M: message to be signed, an octet string
+
+ Output:
+ signature, an octet string of length k, where k is the length in
+ octets of the RSA modulus n.
+
+ On error, None is returned.
+ """
+
+ # Set default parameters if not provided
+ if h is None: # By default, sha1
+ h = "sha1"
+ if not h in _hashFuncParams:
+ warning("Key._rsassa_pss_sign(): unknown hash function "
+ "provided (%s)" % h)
+ return None
+ if mgf is None: # use mgf1 with underlying hash function
+ mgf = lambda x,y: pkcs_mgf1(x, y, h)
+ if sLen is None: # use Hash output length (A.2.3 of RFC 3447)
+ hLen = _hashFuncParams[h][0]
+ sLen = hLen
+
+ # 1) EMSA-PSS encoding
+ modBits = self.modulusLen
+ k = modBits / 8
+ EM = pkcs_emsa_pss_encode(M, modBits - 1, h, mgf, sLen)
+ if EM is None:
+ warning("Key._rsassa_pss_sign(): unable to encode")
+ return None
+
+ # 2) RSA signature
+ m = pkcs_os2ip(EM) # 2.a)
+ s = self._rsasp1(m) # 2.b)
+ S = pkcs_i2osp(s, k) # 2.c)
+
+ return S # 3)
+
+
+ def _rsassa_pkcs1_v1_5_sign(self, M, h):
+ """
+ Implements RSASSA-PKCS1-v1_5-SIGN() function as described in
+ Sect. 8.2.1 of RFC 3447.
+
+ Input:
+ M: message to be signed, an octet string
+ h: hash function name (in 'md2', 'md4', 'md5', 'sha1', 'tls'
+ 'sha256', 'sha384').
+
+ Output:
+ the signature, an octet string.
+ """
+
+ # 1) EMSA-PKCS1-v1_5 encoding
+ k = self.modulusLen / 8
+ EM = pkcs_emsa_pkcs1_v1_5_encode(M, k, h)
+ if EM is None:
+ warning("Key._rsassa_pkcs1_v1_5_sign(): unable to encode")
+ return None
+
+ # 2) RSA signature
+ m = pkcs_os2ip(EM) # 2.a)
+ s = self._rsasp1(m) # 2.b)
+ S = pkcs_i2osp(s, k) # 2.c)
+
+ return S # 3)
+
+
+ def sign(self, M, t=None, h=None, mgf=None, sLen=None):
+ """
+ Sign message 'M' using 't' signature scheme where 't' can be:
+
+ - None: the message 'M' is directly applied the RSASP1 signature
+ primitive, as described in PKCS#1 v2.1, i.e. RFC 3447 Sect
+ 5.2.1. Simply put, the message undergo a modular exponentiation
+ using the private key. Additionnal method parameters are just
+ ignored.
+
+ - 'pkcs': the message 'M' is applied RSASSA-PKCS1-v1_5-SIGN signature
+ scheme as described in Sect. 8.2.1 of RFC 3447. In that context,
+ the hash function name is passed using 'h'. Possible values are
+ "md2", "md4", "md5", "sha1", "tls", "sha224", "sha256", "sha384"
+ and "sha512". If none is provided, sha1 is used. Other additionnal
+ parameters are ignored.
+
+ - 'pss' : the message 'M' is applied RSASSA-PSS-SIGN signature scheme as
+ described in Sect. 8.1.1. of RFC 3447. In that context,
+
+ o 'h' parameter provides the name of the hash method to use.
+ Possible values are "md2", "md4", "md5", "sha1", "tls", "sha224",
+ "sha256", "sha384" and "sha512". if none is provided, sha1
+ is used.
+
+ o 'mgf' is the mask generation function. By default, mgf
+ is derived from the provided hash function using the
+ generic MGF1 (see pkcs_mgf1() for details).
+
+ o 'sLen' is the length in octet of the salt. You can overload the
+ default value (the octet length of the hash value for provided
+ algorithm) by providing another one with that parameter.
+ """
+
+ if t is None: # RSASP1
+ M = pkcs_os2ip(M)
+ n = self.modulus
+ if M > n-1:
+ warning("Message to be signed is too long for key modulus")
+ return None
+ s = self._rsasp1(M)
+ if s is None:
+ return None
+ return pkcs_i2osp(s, self.modulusLen/8)
+
+ elif t == "pkcs": # RSASSA-PKCS1-v1_5-SIGN
+ if h is None:
+ h = "sha1"
+ return self._rsassa_pkcs1_v1_5_sign(M, h)
+
+ elif t == "pss": # RSASSA-PSS-SIGN
+ return self._rsassa_pss_sign(M, h, mgf, sLen)
+
+ else:
+ warning("Key.sign(): Unknown signature type (%s) provided" % t)
+ return None
+
+
+def openssl_parse_RSA(fmt="PEM"):
+ return popen3(['openssl', 'rsa', '-text', '-pubin', '-inform', fmt, '-noout'])
+def openssl_convert_RSA(infmt="PEM", outfmt="DER"):
+ return ['openssl', 'rsa', '-pubin', '-inform', infmt, '-outform', outfmt]
+
+class PubKey(OSSLHelper, _EncryptAndVerify):
+ # Below are the fields we recognize in the -text output of openssl
+ # and from which we extract information. We expect them in that
+ # order. Number of spaces does matter.
+ possible_fields = [ "Modulus (",
+ "Exponent:" ]
+ possible_fields_count = len(possible_fields)
+
+ def __init__(self, keypath):
+ error_msg = "Unable to import key."
+
+ # XXX Temporary hack to use PubKey inside Cert
+ if type(keypath) is tuple:
+ e, m, mLen = keypath
+ self.modulus = m
+ self.modulusLen = mLen
+ self.pubExp = e
+ return
+
+ fields_dict = {}
+ for k in self.possible_fields:
+ fields_dict[k] = None
+
+ self.keypath = None
+ rawkey = None
+
+ if (not '\x00' in keypath) and os.path.isfile(keypath): # file
+ self.keypath = keypath
+ key_size = os.path.getsize(keypath)
+ if key_size > MAX_KEY_SIZE:
+ raise Exception(error_msg)
+ try:
+ f = open(keypath)
+ rawkey = f.read()
+ f.close()
+ except:
+ raise Exception(error_msg)
+ else:
+ rawkey = keypath
+
+ if rawkey is None:
+ raise Exception(error_msg)
+
+ self.rawkey = rawkey
+
+ key_header = "-----BEGIN PUBLIC KEY-----"
+ key_footer = "-----END PUBLIC KEY-----"
+ l = rawkey.split(key_header, 1)
+ if len(l) == 2: # looks like PEM
+ tmp = l[1]
+ l = tmp.split(key_footer, 1)
+ if len(l) == 2:
+ tmp = l[0]
+ rawkey = "%s%s%s\n" % (key_header, tmp, key_footer)
+ else:
+ raise Exception(error_msg)
+ r,w,e = openssl_parse_RSA("PEM")
+ w.write(rawkey)
+ w.close()
+ textkey = r.read()
+ r.close()
+ res = e.read()
+ e.close()
+ if res == '':
+ self.format = "PEM"
+ self.pemkey = rawkey
+ self.textkey = textkey
+ cmd = openssl_convert_RSA_cmd("PEM", "DER")
+ self.derkey = self._apply_ossl_cmd(cmd, rawkey)
+ else:
+ raise Exception(error_msg)
+ else: # not PEM, try DER
+ r,w,e = openssl_parse_RSA("DER")
+ w.write(rawkey)
+ w.close()
+ textkey = r.read()
+ r.close()
+ res = e.read()
+ if res == '':
+ self.format = "DER"
+ self.derkey = rawkey
+ self.textkey = textkey
+ cmd = openssl_convert_RSA_cmd("DER", "PEM")
+ self.pemkey = self._apply_ossl_cmd(cmd, rawkey)
+ cmd = openssl_convert_RSA_cmd("DER", "DER")
+ self.derkey = self._apply_ossl_cmd(cmd, rawkey)
+ else:
+ try: # Perhaps it is a cert
+ c = Cert(keypath)
+ except:
+ raise Exception(error_msg)
+ # TODO:
+ # Reconstruct a key (der and pem) and provide:
+ # self.format
+ # self.derkey
+ # self.pemkey
+ # self.textkey
+ # self.keypath
+
+ self.osslcmdbase = ['openssl', 'rsa', '-pubin', '-inform', self.format]
+
+ self.keypath = keypath
+
+ # Parse the -text output of openssl to make things available
+ l = self.textkey.split('\n', 1)
+ if len(l) != 2:
+ raise Exception(error_msg)
+ cur, tmp = l
+ i = 0
+ k = self.possible_fields[i] # Modulus (
+ cur = cur[len(k):] + '\n'
+ while k:
+ l = tmp.split('\n', 1)
+ if len(l) != 2: # Over
+ fields_dict[k] = cur
+ break
+ l, tmp = l
+
+ newkey = 0
+ # skip fields we have already seen, this is the purpose of 'i'
+ for j in range(i, self.possible_fields_count):
+ f = self.possible_fields[j]
+ if l.startswith(f):
+ fields_dict[k] = cur
+ cur = l[len(f):] + '\n'
+ k = f
+ newkey = 1
+ i = j+1
+ break
+ if newkey == 1:
+ continue
+ cur += l + '\n'
+
+ # modulus and modulus length
+ v = fields_dict["Modulus ("]
+ self.modulusLen = None
+ if v:
+ v, rem = v.split(' bit):', 1)
+ self.modulusLen = int(v)
+ rem = rem.replace('\n','').replace(' ','').replace(':','')
+ self.modulus = long(rem, 16)
+ if self.modulus is None:
+ raise Exception(error_msg)
+
+ # public exponent
+ v = fields_dict["Exponent:"]
+ self.pubExp = None
+ if v:
+ self.pubExp = long(v.split('(', 1)[0])
+ if self.pubExp is None:
+ raise Exception(error_msg)
+
+ self.key = RSA.construct((self.modulus, self.pubExp, ))
+
+ def __str__(self):
+ return self.derkey
+
+
+class Key(_DecryptAndSignMethods, _EncryptAndVerify):
+ # Below are the fields we recognize in the -text output of openssl
+ # and from which we extract information. We expect them in that
+ # order. Number of spaces does matter.
+ possible_fields = [ "Private-Key: (",
+ "modulus:",
+ "publicExponent:",
+ "privateExponent:",
+ "prime1:",
+ "prime2:",
+ "exponent1:",
+ "exponent2:",
+ "coefficient:" ]
+ possible_fields_count = len(possible_fields)
+
+ def __init__(self, keypath):
+ error_msg = "Unable to import key."
+
+ fields_dict = {}
+ for k in self.possible_fields:
+ fields_dict[k] = None
+
+ self.keypath = None
+ rawkey = None
+
+ if (not '\x00' in keypath) and os.path.isfile(keypath):
+ self.keypath = keypath
+ key_size = os.path.getsize(keypath)
+ if key_size > MAX_KEY_SIZE:
+ raise Exception(error_msg)
+ try:
+ f = open(keypath)
+ rawkey = f.read()
+ f.close()
+ except:
+ raise Exception(error_msg)
+ else:
+ rawkey = keypath
+
+ if rawkey is None:
+ raise Exception(error_msg)
+
+ self.rawkey = rawkey
+
+ # Let's try to get file format : PEM or DER.
+ fmtstr = 'openssl rsa -text -inform %s -noout'
+ convertstr = 'openssl rsa -inform %s -outform %s'
+ key_header = "-----BEGIN RSA PRIVATE KEY-----"
+ key_footer = "-----END RSA PRIVATE KEY-----"
+ l = rawkey.split(key_header, 1)
+ if len(l) == 2: # looks like PEM
+ tmp = l[1]
+ l = tmp.split(key_footer, 1)
+ if len(l) == 2:
+ tmp = l[0]
+ rawkey = "%s%s%s\n" % (key_header, tmp, key_footer)
+ else:
+ raise Exception(error_msg)
+ r,w,e = popen3((fmtstr % "PEM").split(" "))
+ w.write(rawkey)
+ w.close()
+ textkey = r.read()
+ r.close()
+ res = e.read()
+ e.close()
+ if res == '':
+ self.format = "PEM"
+ self.pemkey = rawkey
+ self.textkey = textkey
+ cmd = (convertstr % ("PEM", "DER")).split(" ")
+ self.derkey = self._apply_ossl_cmd(cmd, rawkey)
+ else:
+ raise Exception(error_msg)
+ else: # not PEM, try DER
+ r,w,e = popen3((fmtstr % "DER").split(" "))
+ w.write(rawkey)
+ w.close()
+ textkey = r.read()
+ r.close()
+ res = e.read()
+ if res == '':
+ self.format = "DER"
+ self.derkey = rawkey
+ self.textkey = textkey
+ cmd = (convertstr % ("DER", "PEM")).split(" ")
+ self.pemkey = self._apply_ossl_cmd(cmd, rawkey)
+ cmd = (convertstr % ("DER", "DER")).split(" ")
+ self.derkey = self._apply_ossl_cmd(cmd, rawkey)
+ else:
+ raise Exception(error_msg)
+
+ self.osslcmdbase = ['openssl', 'rsa', '-inform', self.format]
+
+ r,w,e = popen3(["openssl", "asn1parse", "-inform", "DER"])
+ w.write(self.derkey)
+ w.close()
+ self.asn1parsekey = r.read()
+ r.close()
+ res = e.read()
+ e.close()
+ if res != '':
+ raise Exception(error_msg)
+
+ self.keypath = keypath
+
+ # Parse the -text output of openssl to make things available
+ l = self.textkey.split('\n', 1)
+ if len(l) != 2:
+ raise Exception(error_msg)
+ cur, tmp = l
+ i = 0
+ k = self.possible_fields[i] # Private-Key: (
+ cur = cur[len(k):] + '\n'
+ while k:
+ l = tmp.split('\n', 1)
+ if len(l) != 2: # Over
+ fields_dict[k] = cur
+ break
+ l, tmp = l
+
+ newkey = 0
+ # skip fields we have already seen, this is the purpose of 'i'
+ for j in range(i, self.possible_fields_count):
+ f = self.possible_fields[j]
+ if l.startswith(f):
+ fields_dict[k] = cur
+ cur = l[len(f):] + '\n'
+ k = f
+ newkey = 1
+ i = j+1
+ break
+ if newkey == 1:
+ continue
+ cur += l + '\n'
+
+ # modulus length
+ v = fields_dict["Private-Key: ("]
+ self.modulusLen = None
+ if v:
+ self.modulusLen = int(v.split(' bit', 1)[0])
+ if self.modulusLen is None:
+ raise Exception(error_msg)
+
+ # public exponent
+ v = fields_dict["publicExponent:"]
+ self.pubExp = None
+ if v:
+ self.pubExp = long(v.split('(', 1)[0])
+ if self.pubExp is None:
+ raise Exception(error_msg)
+
+ tmp = {}
+ for k in ["modulus:", "privateExponent:", "prime1:", "prime2:",
+ "exponent1:", "exponent2:", "coefficient:"]:
+ v = fields_dict[k]
+ if v:
+ s = v.replace('\n', '').replace(' ', '').replace(':', '')
+ tmp[k] = long(s, 16)
+ else:
+ raise Exception(error_msg)
+
+ self.modulus = tmp["modulus:"]
+ self.privExp = tmp["privateExponent:"]
+ self.prime1 = tmp["prime1:"]
+ self.prime2 = tmp["prime2:"]
+ self.exponent1 = tmp["exponent1:"]
+ self.exponent2 = tmp["exponent2:"]
+ self.coefficient = tmp["coefficient:"]
+
+ self.key = RSA.construct((self.modulus, self.pubExp, self.privExp))
+
+ def __str__(self):
+ return self.derkey
+
+
+# We inherit from PubKey to get access to all encryption and verification
+# methods. To have that working, we simply need Cert to provide
+# modulusLen and key attribute.
+# XXX Yes, it is a hack.
+class Cert(OSSLHelper, _EncryptAndVerify):
+ # Below are the fields we recognize in the -text output of openssl
+ # and from which we extract information. We expect them in that
+ # order. Number of spaces does matter.
+ possible_fields = [ " Version:",
+ " Serial Number:",
+ " Signature Algorithm:",
+ " Issuer:",
+ " Not Before:",
+ " Not After :",
+ " Subject:",
+ " Public Key Algorithm:",
+ " Modulus (",
+ " Exponent:",
+ " X509v3 Subject Key Identifier:",
+ " X509v3 Authority Key Identifier:",
+ " keyid:",
+ " DirName:",
+ " serial:",
+ " X509v3 Basic Constraints:",
+ " X509v3 Key Usage:",
+ " X509v3 Extended Key Usage:",
+ " X509v3 CRL Distribution Points:",
+ " Authority Information Access:",
+ " Signature Algorithm:" ]
+ possible_fields_count = len(possible_fields)
+
+ def __init__(self, certpath):
+ error_msg = "Unable to import certificate."
+
+ fields_dict = {}
+ for k in self.possible_fields:
+ fields_dict[k] = None
+
+ self.certpath = None
+ rawcert = None
+
+ if (not '\x00' in certpath) and os.path.isfile(certpath): # file
+ self.certpath = certpath
+ cert_size = os.path.getsize(certpath)
+ if cert_size > MAX_CERT_SIZE:
+ raise Exception(error_msg)
+ try:
+ f = open(certpath)
+ rawcert = f.read()
+ f.close()
+ except:
+ raise Exception(error_msg)
+ else:
+ rawcert = certpath
+
+ if rawcert is None:
+ raise Exception(error_msg)
+
+ self.rawcert = rawcert
+
+ # Let's try to get file format : PEM or DER.
+ fmtstr = 'openssl x509 -text -inform %s -noout'
+ convertstr = 'openssl x509 -inform %s -outform %s'
+ cert_header = "-----BEGIN CERTIFICATE-----"
+ cert_footer = "-----END CERTIFICATE-----"
+ l = rawcert.split(cert_header, 1)
+ if len(l) == 2: # looks like PEM
+ tmp = l[1]
+ l = tmp.split(cert_footer, 1)
+ if len(l) == 2:
+ tmp = l[0]
+ rawcert = "%s%s%s\n" % (cert_header, tmp, cert_footer)
+ else:
+ raise Exception(error_msg)
+ r,w,e = popen3((fmtstr % "PEM").split(" "))
+ w.write(rawcert)
+ w.close()
+ textcert = r.read()
+ r.close()
+ res = e.read()
+ e.close()
+ if res == '':
+ self.format = "PEM"
+ self.pemcert = rawcert
+ self.textcert = textcert
+ cmd = (convertstr % ("PEM", "DER")).split(" ")
+ self.dercert = self._apply_ossl_cmd(cmd, rawcert)
+ else:
+ raise Exception(error_msg)
+ else: # not PEM, try DER
+ r,w,e = popen3((fmtstr % "DER").split(" "))
+ w.write(rawcert)
+ w.close()
+ textcert = r.read()
+ r.close()
+ res = e.read()
+ if res == '':
+ self.format = "DER"
+ self.dercert = rawcert
+ self.textcert = textcert
+ cmd = (convertstr % ("DER", "PEM")).split(" ")
+ self.pemcert = self._apply_ossl_cmd(cmd, rawcert)
+ cmd = (convertstr % ("DER", "DER")).split(" ")
+ self.dercert = self._apply_ossl_cmd(cmd, rawcert)
+ else:
+ raise Exception(error_msg)
+
+ self.osslcmdbase = ['openssl', 'x509', '-inform', self.format]
+
+ r,w,e = popen3('openssl asn1parse -inform DER'.split(' '))
+ w.write(self.dercert)
+ w.close()
+ self.asn1parsecert = r.read()
+ r.close()
+ res = e.read()
+ e.close()
+ if res != '':
+ raise Exception(error_msg)
+
+ # Grab _raw_ X509v3 Authority Key Identifier, if any.
+ tmp = self.asn1parsecert.split(":X509v3 Authority Key Identifier", 1)
+ self.authorityKeyID = None
+ if len(tmp) == 2:
+ tmp = tmp[1]
+ tmp = tmp.split("[HEX DUMP]:", 1)[1]
+ self.authorityKeyID=tmp.split('\n',1)[0]
+
+ # Grab _raw_ X509v3 Subject Key Identifier, if any.
+ tmp = self.asn1parsecert.split(":X509v3 Subject Key Identifier", 1)
+ self.subjectKeyID = None
+ if len(tmp) == 2:
+ tmp = tmp[1]
+ tmp = tmp.split("[HEX DUMP]:", 1)[1]
+ self.subjectKeyID=tmp.split('\n',1)[0]
+
+ # Get tbsCertificate using the worst hack. output of asn1parse
+ # looks like that:
+ #
+ # 0:d=0 hl=4 l=1298 cons: SEQUENCE
+ # 4:d=1 hl=4 l=1018 cons: SEQUENCE
+ # ...
+ #
+ l1,l2 = self.asn1parsecert.split('\n', 2)[:2]
+ hl1 = int(l1.split("hl=",1)[1].split("l=",1)[0])
+ rem = l2.split("hl=",1)[1]
+ hl2, rem = rem.split("l=",1)
+ hl2 = int(hl2)
+ l = int(rem.split("cons",1)[0])
+ self.tbsCertificate = self.dercert[hl1:hl1+hl2+l]
+
+ # Parse the -text output of openssl to make things available
+ tmp = self.textcert.split('\n', 2)[2]
+ l = tmp.split('\n', 1)
+ if len(l) != 2:
+ raise Exception(error_msg)
+ cur, tmp = l
+ i = 0
+ k = self.possible_fields[i] # Version:
+ cur = cur[len(k):] + '\n'
+ while k:
+ l = tmp.split('\n', 1)
+ if len(l) != 2: # Over
+ fields_dict[k] = cur
+ break
+ l, tmp = l
+
+ newkey = 0
+ # skip fields we have already seen, this is the purpose of 'i'
+ for j in range(i, self.possible_fields_count):
+ f = self.possible_fields[j]
+ if l.startswith(f):
+ fields_dict[k] = cur
+ cur = l[len(f):] + '\n'
+ k = f
+ newkey = 1
+ i = j+1
+ break
+ if newkey == 1:
+ continue
+ cur += l + '\n'
+
+ # version
+ v = fields_dict[" Version:"]
+ self.version = None
+ if v:
+ self.version = int(v[1:2])
+ if self.version is None:
+ raise Exception(error_msg)
+
+ # serial number
+ v = fields_dict[" Serial Number:"]
+ self.serial = None
+ if v:
+ v = v.replace('\n', '').strip()
+ if "0x" in v:
+ v = v.split("0x", 1)[1].split(')', 1)[0]
+ v = v.replace(':', '').upper()
+ if len(v) % 2:
+ v = '0' + v
+ self.serial = v
+ if self.serial is None:
+ raise Exception(error_msg)
+
+ # Signature Algorithm
+ v = fields_dict[" Signature Algorithm:"]
+ self.sigAlg = None
+ if v:
+ v = v.split('\n',1)[0]
+ v = v.strip()
+ self.sigAlg = v
+ if self.sigAlg is None:
+ raise Exception(error_msg)
+
+ # issuer
+ v = fields_dict[" Issuer:"]
+ self.issuer = None
+ if v:
+ v = v.split('\n',1)[0]
+ v = v.strip()
+ self.issuer = v
+ if self.issuer is None:
+ raise Exception(error_msg)
+
+ # not before
+ v = fields_dict[" Not Before:"]
+ self.notBefore_str = None
+ if v:
+ v = v.split('\n',1)[0]
+ v = v.strip()
+ self.notBefore_str = v
+ if self.notBefore_str is None:
+ raise Exception(error_msg)
+ try:
+ self.notBefore = time.strptime(self.notBefore_str,
+ "%b %d %H:%M:%S %Y %Z")
+ except:
+ self.notBefore = time.strptime(self.notBefore_str,
+ "%b %d %H:%M:%S %Y")
+ self.notBefore_str_simple = time.strftime("%x", self.notBefore)
+
+ # not after
+ v = fields_dict[" Not After :"]
+ self.notAfter_str = None
+ if v:
+ v = v.split('\n',1)[0]
+ v = v.strip()
+ self.notAfter_str = v
+ if self.notAfter_str is None:
+ raise Exception(error_msg)
+ try:
+ self.notAfter = time.strptime(self.notAfter_str,
+ "%b %d %H:%M:%S %Y %Z")
+ except:
+ self.notAfter = time.strptime(self.notAfter_str,
+ "%b %d %H:%M:%S %Y")
+ self.notAfter_str_simple = time.strftime("%x", self.notAfter)
+
+ # subject
+ v = fields_dict[" Subject:"]
+ self.subject = None
+ if v:
+ v = v.split('\n',1)[0]
+ v = v.strip()
+ self.subject = v
+ if self.subject is None:
+ raise Exception(error_msg)
+
+ # Public Key Algorithm
+ v = fields_dict[" Public Key Algorithm:"]
+ self.pubKeyAlg = None
+ if v:
+ v = v.split('\n',1)[0]
+ v = v.strip()
+ self.pubKeyAlg = v
+ if self.pubKeyAlg is None:
+ raise Exception(error_msg)
+
+ # Modulus
+ v = fields_dict[" Modulus ("]
+ self.modulus = None
+ if v:
+ v,t = v.split(' bit):',1)
+ self.modulusLen = int(v)
+ t = t.replace(' ', '').replace('\n', ''). replace(':', '')
+ self.modulus_hexdump = t
+ self.modulus = long(t, 16)
+ if self.modulus is None:
+ raise Exception(error_msg)
+
+ # Exponent
+ v = fields_dict[" Exponent:"]
+ self.exponent = None
+ if v:
+ v = v.split('(',1)[0]
+ self.exponent = long(v)
+ if self.exponent is None:
+ raise Exception(error_msg)
+
+ # Public Key instance
+ self.key = RSA.construct((self.modulus, self.exponent, ))
+
+ # Subject Key Identifier
+
+ # Authority Key Identifier: keyid, dirname and serial
+ self.authorityKeyID_keyid = None
+ self.authorityKeyID_dirname = None
+ self.authorityKeyID_serial = None
+ if self.authorityKeyID: # (hex version already done using asn1parse)
+ v = fields_dict[" keyid:"]
+ if v:
+ v = v.split('\n',1)[0]
+ v = v.strip().replace(':', '')
+ self.authorityKeyID_keyid = v
+ v = fields_dict[" DirName:"]
+ if v:
+ v = v.split('\n',1)[0]
+ self.authorityKeyID_dirname = v
+ v = fields_dict[" serial:"]
+ if v:
+ v = v.split('\n',1)[0]
+ v = v.strip().replace(':', '')
+ self.authorityKeyID_serial = v
+
+ # Basic constraints
+ self.basicConstraintsCritical = False
+ self.basicConstraints=None
+ v = fields_dict[" X509v3 Basic Constraints:"]
+ if v:
+ self.basicConstraints = {}
+ v,t = v.split('\n',2)[:2]
+ if "critical" in v:
+ self.basicConstraintsCritical = True
+ if "CA:" in t:
+ self.basicConstraints["CA"] = t.split('CA:')[1][:4] == "TRUE"
+ if "pathlen:" in t:
+ self.basicConstraints["pathlen"] = int(t.split('pathlen:')[1])
+
+ # X509v3 Key Usage
+ self.keyUsage = []
+ v = fields_dict[" X509v3 Key Usage:"]
+ if v:
+ # man 5 x509v3_config
+ ku_mapping = {"Digital Signature": "digitalSignature",
+ "Non Repudiation": "nonRepudiation",
+ "Key Encipherment": "keyEncipherment",
+ "Data Encipherment": "dataEncipherment",
+ "Key Agreement": "keyAgreement",
+ "Certificate Sign": "keyCertSign",
+ "CRL Sign": "cRLSign",
+ "Encipher Only": "encipherOnly",
+ "Decipher Only": "decipherOnly"}
+ v = v.split('\n',2)[1]
+ l = map(lambda x: x.strip(), v.split(','))
+ while l:
+ c = l.pop()
+ if c in ku_mapping:
+ self.keyUsage.append(ku_mapping[c])
+ else:
+ self.keyUsage.append(c) # Add it anyway
+ print("Found unknown X509v3 Key Usage: '%s'" % c)
+ print("Report it to arno (at) natisbad.org for addition")
+
+ # X509v3 Extended Key Usage
+ self.extKeyUsage = []
+ v = fields_dict[" X509v3 Extended Key Usage:"]
+ if v:
+ # man 5 x509v3_config:
+ eku_mapping = {"TLS Web Server Authentication": "serverAuth",
+ "TLS Web Client Authentication": "clientAuth",
+ "Code Signing": "codeSigning",
+ "E-mail Protection": "emailProtection",
+ "Time Stamping": "timeStamping",
+ "Microsoft Individual Code Signing": "msCodeInd",
+ "Microsoft Commercial Code Signing": "msCodeCom",
+ "Microsoft Trust List Signing": "msCTLSign",
+ "Microsoft Encrypted File System": "msEFS",
+ "Microsoft Server Gated Crypto": "msSGC",
+ "Netscape Server Gated Crypto": "nsSGC",
+ "IPSec End System": "iPsecEndSystem",
+ "IPSec Tunnel": "iPsecTunnel",
+ "IPSec User": "iPsecUser"}
+ v = v.split('\n',2)[1]
+ l = map(lambda x: x.strip(), v.split(','))
+ while l:
+ c = l.pop()
+ if c in eku_mapping:
+ self.extKeyUsage.append(eku_mapping[c])
+ else:
+ self.extKeyUsage.append(c) # Add it anyway
+ print("Found unknown X509v3 Extended Key Usage: '%s'" % c)
+ print("Report it to arno (at) natisbad.org for addition")
+
+ # CRL Distribution points
+ self.cRLDistributionPoints = []
+ v = fields_dict[" X509v3 CRL Distribution Points:"]
+ if v:
+ v = v.split("\n\n", 1)[0]
+ v = v.split("URI:")[1:]
+ self.CRLDistributionPoints = map(lambda x: x.strip(), v)
+
+ # Authority Information Access: list of tuples ("method", "location")
+ self.authorityInfoAccess = []
+ v = fields_dict[" Authority Information Access:"]
+ if v:
+ v = v.split("\n\n", 1)[0]
+ v = v.split("\n")[1:]
+ for e in v:
+ method, location = map(lambda x: x.strip(), e.split(" - ", 1))
+ self.authorityInfoAccess.append((method, location))
+
+ # signature field
+ v = fields_dict[" Signature Algorithm:" ]
+ self.sig = None
+ if v:
+ v = v.split('\n',1)[1]
+ v = v.replace(' ', '').replace('\n', '')
+ self.sig = "".join(map(lambda x: chr(int(x, 16)), v.split(':')))
+ self.sigLen = len(self.sig)
+ if self.sig is None:
+ raise Exception(error_msg)
+
+ def isIssuerCert(self, other):
+ """
+ True if 'other' issued 'self', i.e.:
+ - self.issuer == other.subject
+ - self is signed by other
+ """
+ # XXX should be done on raw values, instead of their textual repr
+ if self.issuer != other.subject:
+ return False
+
+ # Sanity check regarding modulus length and the
+ # signature length
+ keyLen = (other.modulusLen + 7)/8
+ if keyLen != self.sigLen:
+ return False
+
+ unenc = other.encrypt(self.sig) # public key encryption, i.e. decrypt
+
+ # XXX Check block type (00 or 01 and type of padding)
+ unenc = unenc[1:]
+ if not '\x00' in unenc:
+ return False
+ pos = unenc.index('\x00')
+ unenc = unenc[pos+1:]
+
+ found = None
+ for k in _hashFuncParams.keys():
+ if self.sigAlg.startswith(k):
+ found = k
+ break
+ if not found:
+ return False
+ hlen, hfunc, digestInfo = _hashFuncParams[k]
+
+ if len(unenc) != (hlen+len(digestInfo)):
+ return False
+
+ if not unenc.startswith(digestInfo):
+ return False
+
+ h = unenc[-hlen:]
+ myh = hfunc(self.tbsCertificate)
+
+ return h == myh
+
+ def chain(self, certlist):
+ """
+ Construct the chain of certificates leading from 'self' to the
+ self signed root using the certificates in 'certlist'. If the
+ list does not provide all the required certs to go to the root
+ the function returns a incomplete chain starting with the
+ certificate. This fact can be tested by tchecking if the last
+ certificate of the returned chain is self signed (if c is the
+ result, c[-1].isSelfSigned())
+ """
+ d = {}
+ for c in certlist:
+ # XXX we should check if we have duplicate
+ d[c.subject] = c
+ res = [self]
+ cur = self
+ while not cur.isSelfSigned():
+ if cur.issuer in d:
+ possible_issuer = d[cur.issuer]
+ if cur.isIssuerCert(possible_issuer):
+ res.append(possible_issuer)
+ cur = possible_issuer
+ else:
+ break
+ return res
+
+ def remainingDays(self, now=None):
+ """
+ Based on the value of notBefore field, returns the number of
+ days the certificate will still be valid. The date used for the
+ comparison is the current and local date, as returned by
+ time.localtime(), except if 'now' argument is provided another
+ one. 'now' argument can be given as either a time tuple or a string
+ representing the date. Accepted format for the string version
+ are:
+
+ - '%b %d %H:%M:%S %Y %Z' e.g. 'Jan 30 07:38:59 2008 GMT'
+ - '%m/%d/%y' e.g. '01/30/08' (less precise)
+
+ If the certificate is no more valid at the date considered, then,
+ a negative value is returned representing the number of days
+ since it has expired.
+
+ The number of days is returned as a float to deal with the unlikely
+ case of certificates that are still just valid.
+ """
+ if now is None:
+ now = time.localtime()
+ elif type(now) is str:
+ try:
+ if '/' in now:
+ now = time.strptime(now, '%m/%d/%y')
+ else:
+ now = time.strptime(now, '%b %d %H:%M:%S %Y %Z')
+ except:
+ warning("Bad time string provided '%s'. Using current time" % now)
+ now = time.localtime()
+
+ now = time.mktime(now)
+ nft = time.mktime(self.notAfter)
+ diff = (nft - now)/(24.*3600)
+ return diff
+
+
+ # return SHA-1 hash of cert embedded public key
+ # !! At the moment, the trailing 0 is in the hashed string if any
+ def keyHash(self):
+ m = self.modulus_hexdump
+ res = []
+ i = 0
+ l = len(m)
+ while i<l: # get a string version of modulus
+ res.append(struct.pack("B", int(m[i:i+2], 16)))
+ i += 2
+ return sha.new("".join(res)).digest()
+
+ def output(self, fmt="DER"):
+ if fmt == "DER":
+ return self.dercert
+ elif fmt == "PEM":
+ return self.pemcert
+ elif fmt == "TXT":
+ return self.textcert
+
+ def export(self, filename, fmt="DER"):
+ """
+ Export certificate in 'fmt' format (PEM, DER or TXT) to file 'filename'
+ """
+ f = open(filename, "wb")
+ f.write(self.output(fmt))
+ f.close()
+
+ def isSelfSigned(self):
+ """
+ Return True if the certificate is self signed:
+ - issuer and subject are the same
+ - the signature of the certificate is valid.
+ """
+ if self.issuer == self.subject:
+ return self.isIssuerCert(self)
+ return False
+
+ # Print main informations stored in certificate
+ def show(self):
+ print("Serial: %s" % self.serial)
+ print("Issuer: " + self.issuer)
+ print("Subject: " + self.subject)
+ print("Validity: %s to %s" % (self.notBefore_str_simple,
+ self.notAfter_str_simple))
+
+ def __repr__(self):
+ return "[X.509 Cert. Subject:%s, Issuer:%s]" % (self.subject, self.issuer)
+
+ def __str__(self):
+ return self.dercert
+
+ def verifychain(self, anchors, untrusted=None):
+ """
+ Perform verification of certificate chains for that certificate. The
+ behavior of verifychain method is mapped (and also based) on openssl
+ verify userland tool (man 1 verify).
+ A list of anchors is required. untrusted parameter can be provided
+ a list of untrusted certificates that can be used to reconstruct the
+ chain.
+
+ If you have a lot of certificates to verify against the same
+ list of anchor, consider constructing this list as a cafile
+ and use .verifychain_from_cafile() instead.
+ """
+ cafile = create_temporary_ca_file(anchors)
+ if not cafile:
+ return False
+ untrusted_file = None
+ if untrusted:
+ untrusted_file = create_temporary_ca_file(untrusted) # hack
+ if not untrusted_file:
+ os.unlink(cafile)
+ return False
+ res = self.verifychain_from_cafile(cafile,
+ untrusted_file=untrusted_file)
+ os.unlink(cafile)
+ if untrusted_file:
+ os.unlink(untrusted_file)
+ return res
+
+ def verifychain_from_cafile(self, cafile, untrusted_file=None):
+ """
+ Does the same job as .verifychain() but using the list of anchors
+ from the cafile. This is useful (because more efficient) if
+ you have a lot of certificates to verify do it that way: it
+ avoids the creation of a cafile from anchors at each call.
+
+ As for .verifychain(), a list of untrusted certificates can be
+ passed (as a file, this time)
+ """
+ cmd = ["openssl", "verify", "-CAfile", cafile]
+ if untrusted_file:
+ cmd += ["-untrusted", untrusted_file]
+ try:
+ pemcert = self.output(fmt="PEM")
+ cmdres = self._apply_ossl_cmd(cmd, pemcert)
+ except:
+ return False
+ return cmdres.endswith("\nOK\n") or cmdres.endswith(": OK\n")
+
+ def verifychain_from_capath(self, capath, untrusted_file=None):
+ """
+ Does the same job as .verifychain_from_cafile() but using the list
+ of anchors in capath directory. The directory should contain
+ certificates files in PEM format with associated links as
+ created using c_rehash utility (man c_rehash).
+
+ As for .verifychain_from_cafile(), a list of untrusted certificates
+ can be passed as a file (concatenation of the certificates in
+ PEM format)
+ """
+ cmd = ["openssl", "verify", "-CApath", capath]
+ if untrusted_file:
+ cmd += ["-untrusted", untrusted_file]
+ try:
+ pemcert = self.output(fmt="PEM")
+ cmdres = self._apply_ossl_cmd(cmd, pemcert)
+ except:
+ return False
+ return cmdres.endswith("\nOK\n") or cmdres.endswith(": OK\n")
+
+ def is_revoked(self, crl_list):
+ """
+ Given a list of trusted CRL (their signature has already been
+ verified with trusted anchors), this function returns True if
+ the certificate is marked as revoked by one of those CRL.
+
+ Note that if the Certificate was on hold in a previous CRL and
+ is now valid again in a new CRL and bot are in the list, it
+ will be considered revoked: this is because _all_ CRLs are
+ checked (not only the freshest) and revocation status is not
+ handled.
+
+ Also note that the check on the issuer is performed on the
+ Authority Key Identifier if available in _both_ the CRL and the
+ Cert. Otherwise, the issuers are simply compared.
+ """
+ for c in crl_list:
+ if (self.authorityKeyID is not None and
+ c.authorityKeyID is not None and
+ self.authorityKeyID == c.authorityKeyID):
+ return self.serial in map(lambda x: x[0], c.revoked_cert_serials)
+ elif (self.issuer == c.issuer):
+ return self.serial in map(lambda x: x[0], c.revoked_cert_serials)
+ return False
+
+def print_chain(l):
+ llen = len(l) - 1
+ if llen < 0:
+ return ""
+ c = l[llen]
+ llen -= 1
+ s = "_ "
+ if not c.isSelfSigned():
+ s = "_ ... [Missing Root]\n"
+ else:
+ s += "%s [Self Signed]\n" % c.subject
+ i = 1
+ while (llen != -1):
+ c = l[llen]
+ s += "%s\_ %s" % (" "*i, c.subject)
+ if llen != 0:
+ s += "\n"
+ i += 2
+ llen -= 1
+ print(s)
+
+# import popen2
+# a=popen3("openssl crl -text -inform DER -noout ", capturestderr=True)
+# a.tochild.write(open("samples/klasa1.crl").read())
+# a.tochild.close()
+# a.poll()
+
+class CRL(OSSLHelper):
+ # Below are the fields we recognize in the -text output of openssl
+ # and from which we extract information. We expect them in that
+ # order. Number of spaces does matter.
+ possible_fields = [ " Version",
+ " Signature Algorithm:",
+ " Issuer:",
+ " Last Update:",
+ " Next Update:",
+ " CRL extensions:",
+ " X509v3 Issuer Alternative Name:",
+ " X509v3 Authority Key Identifier:",
+ " keyid:",
+ " DirName:",
+ " serial:",
+ " X509v3 CRL Number:",
+ "Revoked Certificates:",
+ "No Revoked Certificates.",
+ " Signature Algorithm:" ]
+ possible_fields_count = len(possible_fields)
+
+ def __init__(self, crlpath):
+ error_msg = "Unable to import CRL."
+
+ fields_dict = {}
+ for k in self.possible_fields:
+ fields_dict[k] = None
+
+ self.crlpath = None
+ rawcrl = None
+
+ if (not '\x00' in crlpath) and os.path.isfile(crlpath):
+ self.crlpath = crlpath
+ cert_size = os.path.getsize(crlpath)
+ if cert_size > MAX_CRL_SIZE:
+ raise Exception(error_msg)
+ try:
+ f = open(crlpath)
+ rawcrl = f.read()
+ f.close()
+ except:
+ raise Exception(error_msg)
+ else:
+ rawcrl = crlpath
+
+ if rawcrl is None:
+ raise Exception(error_msg)
+
+ self.rawcrl = rawcrl
+
+ # Let's try to get file format : PEM or DER.
+ fmtstr = 'openssl crl -text -inform %s -noout'
+ convertstr = 'openssl crl -inform %s -outform %s'
+ crl_header = "-----BEGIN X509 CRL-----"
+ crl_footer = "-----END X509 CRL-----"
+ l = rawcrl.split(crl_header, 1)
+ if len(l) == 2: # looks like PEM
+ tmp = l[1]
+ l = tmp.split(crl_footer, 1)
+ if len(l) == 2:
+ tmp = l[0]
+ rawcrl = "%s%s%s\n" % (crl_header, tmp, crl_footer)
+ else:
+ raise Exception(error_msg)
+ r,w,e = popen3((fmtstr % "PEM").split(" "))
+ w.write(rawcrl)
+ w.close()
+ textcrl = r.read()
+ r.close()
+ res = e.read()
+ e.close()
+ if res == '':
+ self.format = "PEM"
+ self.pemcrl = rawcrl
+ self.textcrl = textcrl
+ cmd = (convertstr % ("PEM", "DER")).split(" ")
+ self.dercrl = self._apply_ossl_cmd(cmd, rawcrl)
+ else:
+ raise Exception(error_msg)
+ else: # not PEM, try DER
+ r,w,e = popen3((fmtstr % "DER").split(' '))
+ w.write(rawcrl)
+ w.close()
+ textcrl = r.read()
+ r.close()
+ res = e.read()
+ if res == '':
+ self.format = "DER"
+ self.dercrl = rawcrl
+ self.textcrl = textcrl
+ cmd = (convertstr % ("DER", "PEM")).split(" ")
+ self.pemcrl = self._apply_ossl_cmd(cmd, rawcrl)
+ cmd = (convertstr % ("DER", "DER")).split(" ")
+ self.dercrl = self._apply_ossl_cmd(cmd, rawcrl)
+ else:
+ raise Exception(error_msg)
+
+ self.osslcmdbase = ['openssl', 'crl', '-inform', self.format]
+
+ r,w,e = popen3(('openssl asn1parse -inform DER').split(" "))
+ w.write(self.dercrl)
+ w.close()
+ self.asn1parsecrl = r.read()
+ r.close()
+ res = e.read()
+ e.close()
+ if res != '':
+ raise Exception(error_msg)
+
+ # Grab _raw_ X509v3 Authority Key Identifier, if any.
+ tmp = self.asn1parsecrl.split(":X509v3 Authority Key Identifier", 1)
+ self.authorityKeyID = None
+ if len(tmp) == 2:
+ tmp = tmp[1]
+ tmp = tmp.split("[HEX DUMP]:", 1)[1]
+ self.authorityKeyID=tmp.split('\n',1)[0]
+
+ # Parse the -text output of openssl to make things available
+ tmp = self.textcrl.split('\n', 1)[1]
+ l = tmp.split('\n', 1)
+ if len(l) != 2:
+ raise Exception(error_msg)
+ cur, tmp = l
+ i = 0
+ k = self.possible_fields[i] # Version
+ cur = cur[len(k):] + '\n'
+ while k:
+ l = tmp.split('\n', 1)
+ if len(l) != 2: # Over
+ fields_dict[k] = cur
+ break
+ l, tmp = l
+
+ newkey = 0
+ # skip fields we have already seen, this is the purpose of 'i'
+ for j in range(i, self.possible_fields_count):
+ f = self.possible_fields[j]
+ if l.startswith(f):
+ fields_dict[k] = cur
+ cur = l[len(f):] + '\n'
+ k = f
+ newkey = 1
+ i = j+1
+ break
+ if newkey == 1:
+ continue
+ cur += l + '\n'
+
+ # version
+ v = fields_dict[" Version"]
+ self.version = None
+ if v:
+ self.version = int(v[1:2])
+ if self.version is None:
+ raise Exception(error_msg)
+
+ # signature algorithm
+ v = fields_dict[" Signature Algorithm:"]
+ self.sigAlg = None
+ if v:
+ v = v.split('\n',1)[0]
+ v = v.strip()
+ self.sigAlg = v
+ if self.sigAlg is None:
+ raise Exception(error_msg)
+
+ # issuer
+ v = fields_dict[" Issuer:"]
+ self.issuer = None
+ if v:
+ v = v.split('\n',1)[0]
+ v = v.strip()
+ self.issuer = v
+ if self.issuer is None:
+ raise Exception(error_msg)
+
+ # last update
+ v = fields_dict[" Last Update:"]
+ self.lastUpdate_str = None
+ if v:
+ v = v.split('\n',1)[0]
+ v = v.strip()
+ self.lastUpdate_str = v
+ if self.lastUpdate_str is None:
+ raise Exception(error_msg)
+ self.lastUpdate = time.strptime(self.lastUpdate_str,
+ "%b %d %H:%M:%S %Y %Z")
+ self.lastUpdate_str_simple = time.strftime("%x", self.lastUpdate)
+
+ # next update
+ v = fields_dict[" Next Update:"]
+ self.nextUpdate_str = None
+ if v:
+ v = v.split('\n',1)[0]
+ v = v.strip()
+ self.nextUpdate_str = v
+ if self.nextUpdate_str is None:
+ raise Exception(error_msg)
+ self.nextUpdate = time.strptime(self.nextUpdate_str,
+ "%b %d %H:%M:%S %Y %Z")
+ self.nextUpdate_str_simple = time.strftime("%x", self.nextUpdate)
+
+ # XXX Do something for Issuer Alternative Name
+
+ # Authority Key Identifier: keyid, dirname and serial
+ self.authorityKeyID_keyid = None
+ self.authorityKeyID_dirname = None
+ self.authorityKeyID_serial = None
+ if self.authorityKeyID: # (hex version already done using asn1parse)
+ v = fields_dict[" keyid:"]
+ if v:
+ v = v.split('\n',1)[0]
+ v = v.strip().replace(':', '')
+ self.authorityKeyID_keyid = v
+ v = fields_dict[" DirName:"]
+ if v:
+ v = v.split('\n',1)[0]
+ self.authorityKeyID_dirname = v
+ v = fields_dict[" serial:"]
+ if v:
+ v = v.split('\n',1)[0]
+ v = v.strip().replace(':', '')
+ self.authorityKeyID_serial = v
+
+ # number
+ v = fields_dict[" X509v3 CRL Number:"]
+ self.number = None
+ if v:
+ v = v.split('\n',2)[1]
+ v = v.strip()
+ self.number = int(v)
+
+ # Get the list of serial numbers of revoked certificates
+ self.revoked_cert_serials = []
+ v = fields_dict["Revoked Certificates:"]
+ t = fields_dict["No Revoked Certificates."]
+ if (t is None and v is not None):
+ v = v.split("Serial Number: ")[1:]
+ for r in v:
+ s,d = r.split('\n', 1)
+ s = s.split('\n', 1)[0]
+ d = d.split("Revocation Date:", 1)[1]
+ d = time.strptime(d.strip(), "%b %d %H:%M:%S %Y %Z")
+ self.revoked_cert_serials.append((s,d))
+
+ # signature field
+ v = fields_dict[" Signature Algorithm:" ]
+ self.sig = None
+ if v:
+ v = v.split('\n',1)[1]
+ v = v.replace(' ', '').replace('\n', '')
+ self.sig = "".join(map(lambda x: chr(int(x, 16)), v.split(':')))
+ self.sigLen = len(self.sig)
+ if self.sig is None:
+ raise Exception(error_msg)
+
+ def __str__(self):
+ return self.dercrl
+
+ # Print main informations stored in CRL
+ def show(self):
+ print("Version: %d" % self.version)
+ print("sigAlg: " + self.sigAlg)
+ print("Issuer: " + self.issuer)
+ print("lastUpdate: %s" % self.lastUpdate_str_simple)
+ print("nextUpdate: %s" % self.nextUpdate_str_simple)
+
+ def verify(self, anchors):
+ """
+ Return True if the CRL is signed by one of the provided
+ anchors. False on error (invalid signature, missing anchorand, ...)
+ """
+ cafile = create_temporary_ca_file(anchors)
+ if cafile is None:
+ return False
+ try:
+ cmd = self.osslcmdbase + ["-noout", "-CAfile", cafile]
+ cmdres = self._apply_ossl_cmd(cmd, self.rawcrl)
+ except:
+ os.unlink(cafile)
+ return False
+ os.unlink(cafile)
+ return "verify OK" in cmdres
+
+
+
diff --git a/scripts/external_libs/scapy-2.3.1/python3/scapy/dadict.py b/scripts/external_libs/scapy-2.3.1/python3/scapy/dadict.py
new file mode 100644
index 00000000..0fdcc135
--- /dev/null
+++ b/scripts/external_libs/scapy-2.3.1/python3/scapy/dadict.py
@@ -0,0 +1,91 @@
+## This file is part of Scapy
+## See http://www.secdev.org/projects/scapy for more informations
+## Copyright (C) Philippe Biondi <phil@secdev.org>
+## This program is published under a GPLv2 license
+
+"""
+Direct Access dictionary.
+"""
+
+from .error import Scapy_Exception
+
+###############################
+## Direct Access dictionnary ##
+###############################
+
+def fixname(x):
+ if x and x[0] in "0123456789":
+ x = "n_"+x
+ return x.translate("________________________________________________0123456789_______ABCDEFGHIJKLMNOPQRSTUVWXYZ______abcdefghijklmnopqrstuvwxyz_____________________________________________________________________________________________________________________________________")
+
+
+class DADict_Exception(Scapy_Exception):
+ pass
+
+class DADict:
+ def __init__(self, _name="DADict", **kargs):
+ self._name=_name
+ self.__dict__.update(kargs)
+ def fixname(self,val):
+ return fixname(val)
+ def __contains__(self, val):
+ return val in self.__dict__
+ def __getitem__(self, attr):
+ return getattr(self, attr)
+ def __setitem__(self, attr, val):
+ return setattr(self, self.fixname(attr), val)
+ def __iter__(self):
+ #return iter(map(lambda (x,y):y,filter(lambda (x,y):x and x[0]!="_", self.__dict__.items())))
+ #return iter(map(lambda a:a[1],filter(lambda a:a[0] and a[0][0]!="_", self.__dict__.items())))
+ return iter([a[1] for a in self.__dict__.items() if a[0] and a[0][0]!=" "])
+ def _show(self):
+ for k in self.__dict__.keys():
+ if k and k[0] != "_":
+ print("%10s = %r" % (k,getattr(self,k)))
+ def __repr__(self):
+ #return "<%s/ %s>" % (self._name," ".join(filter(lambda x:x and x[0]!="_",self.__dict__.keys())))
+ return "<%s/ %s>" % (self._name," ".join([ x for x in self.__dict__.keys() if x and x[0]!="_"]))
+
+ def _branch(self, br, uniq=0):
+ if uniq and br._name in self:
+ raise DADict_Exception("DADict: [%s] already branched in [%s]" % (br._name, self._name))
+ self[br._name] = br
+
+ def _my_find(self, *args, **kargs):
+ if args and self._name not in args:
+ return False
+ for k in kargs:
+ if k not in self or self[k] != kargs[k]:
+ return False
+ return True
+
+ def _find(self, *args, **kargs):
+ return self._recurs_find((), *args, **kargs)
+ def _recurs_find(self, path, *args, **kargs):
+ if self in path:
+ return None
+ if self._my_find(*args, **kargs):
+ return self
+ for o in self:
+ if isinstance(o, DADict):
+ p = o._recurs_find(path+(self,), *args, **kargs)
+ if p is not None:
+ return p
+ return None
+ def _find_all(self, *args, **kargs):
+ return self._recurs_find_all((), *args, **kargs)
+ def _recurs_find_all(self, path, *args, **kargs):
+ r = []
+ if self in path:
+ return r
+ if self._my_find(*args, **kargs):
+ r.append(self)
+ for o in self:
+ if isinstance(o, DADict):
+ p = o._recurs_find_all(path+(self,), *args, **kargs)
+ r += p
+ return r
+ def keys(self):
+ #return filter(lambda x:x and x[0]!="_", self.__dict__.keys())
+ return [ x for x in self.__dict__.keys() if x and x[0]!="_" ]
+
diff --git a/scripts/external_libs/scapy-2.3.1/python3/scapy/data.py b/scripts/external_libs/scapy-2.3.1/python3/scapy/data.py
new file mode 100644
index 00000000..fc92ebe2
--- /dev/null
+++ b/scripts/external_libs/scapy-2.3.1/python3/scapy/data.py
@@ -0,0 +1,215 @@
+## This file is part of Scapy
+## See http://www.secdev.org/projects/scapy for more informations
+## Copyright (C) Philippe Biondi <phil@secdev.org>
+## This program is published under a GPLv2 license
+
+"""
+Global variables and functions for handling external data sets.
+"""
+
+import os,sys,re
+from .dadict import DADict
+from .error import log_loading
+
+############
+## Consts ##
+############
+
+ETHER_ANY = b"\x00"*6
+ETHER_BROADCAST = b"\xff"*6
+
+ETH_P_ALL = 3
+ETH_P_IP = 0x800
+ETH_P_ARP = 0x806
+ETH_P_IPV6 = 0x86dd
+
+# From net/if_arp.h
+ARPHDR_ETHER = 1
+ARPHDR_METRICOM = 23
+ARPHDR_PPP = 512
+ARPHDR_LOOPBACK = 772
+ARPHDR_TUN = 65534
+
+
+# From net/ipv6.h on Linux (+ Additions)
+IPV6_ADDR_UNICAST = 0x01
+IPV6_ADDR_MULTICAST = 0x02
+IPV6_ADDR_CAST_MASK = 0x0F
+IPV6_ADDR_LOOPBACK = 0x10
+IPV6_ADDR_GLOBAL = 0x00
+IPV6_ADDR_LINKLOCAL = 0x20
+IPV6_ADDR_SITELOCAL = 0x40 # deprecated since Sept. 2004 by RFC 3879
+IPV6_ADDR_SCOPE_MASK = 0xF0
+#IPV6_ADDR_COMPATv4 = 0x80 # deprecated; i.e. ::/96
+#IPV6_ADDR_MAPPED = 0x1000 # i.e.; ::ffff:0.0.0.0/96
+IPV6_ADDR_6TO4 = 0x0100 # Added to have more specific info (should be 0x0101 ?)
+IPV6_ADDR_UNSPECIFIED = 0x10000
+
+
+
+
+MTU = 0x7fff # a.k.a give me all you have
+
+WINDOWS=sys.platform.startswith("win")
+
+
+# file parsing to get some values :
+
+def load_protocols(filename):
+ spaces = re.compile("[ \t]+|\n")
+ dct = DADict(_name=filename)
+ try:
+ for l in open(filename):
+ try:
+ shrp = l.find("#")
+ if shrp >= 0:
+ l = l[:shrp]
+ l = l.strip()
+ if not l:
+ continue
+ lt = tuple(re.split(spaces, l))
+ if len(lt) < 2 or not lt[0]:
+ continue
+ dct[lt[0]] = int(lt[1])
+ except Exception as e:
+ log_loading.info("Couldn't parse file [%s]: line [%r] (%s)" % (filename,l,e))
+ except IOError:
+ log_loading.info("Can't open %s file" % filename)
+ return dct
+
+def load_ethertypes(filename):
+ spaces = re.compile("[ \t]+|\n")
+ dct = DADict(_name=filename)
+ try:
+ f=open(filename)
+ for l in f:
+ try:
+ shrp = l.find("#")
+ if shrp >= 0:
+ l = l[:shrp]
+ l = l.strip()
+ if not l:
+ continue
+ lt = tuple(re.split(spaces, l))
+ if len(lt) < 2 or not lt[0]:
+ continue
+ dct[lt[0]] = int(lt[1], 16)
+ except Exception as e:
+ log_loading.info("Couldn't parse file [%s]: line [%r] (%s)" % (filename,l,e))
+ f.close()
+ except IOError as msg:
+ pass
+ return dct
+
+def load_services(filename):
+ spaces = re.compile("[ \t]+|\n")
+ tdct=DADict(_name="%s-tcp"%filename)
+ udct=DADict(_name="%s-udp"%filename)
+ try:
+ f=open(filename)
+ for l in f:
+ try:
+ shrp = l.find("#")
+ if shrp >= 0:
+ l = l[:shrp]
+ l = l.strip()
+ if not l:
+ continue
+ lt = tuple(re.split(spaces, l))
+ if len(lt) < 2 or not lt[0]:
+ continue
+ if lt[1].endswith("/tcp"):
+ tdct[lt[0]] = int(lt[1].split('/')[0])
+ elif lt[1].endswith("/udp"):
+ udct[lt[0]] = int(lt[1].split('/')[0])
+ except Exception as e:
+ log_loading.warning("Couldn't file [%s]: line [%r] (%s)" % (filename,l,e))
+ f.close()
+ except IOError:
+ log_loading.info("Can't open /etc/services file")
+ return tdct,udct
+
+
+class ManufDA(DADict):
+ def fixname(self, val):
+ return val
+ def _get_manuf_couple(self, mac):
+ oui = ":".join(mac.split(":")[:3]).upper()
+ return self.__dict__.get(oui,(mac,mac))
+ def _get_manuf(self, mac):
+ return self._get_manuf_couple(mac)[1]
+ def _get_short_manuf(self, mac):
+ return self._get_manuf_couple(mac)[0]
+ def _resolve_MAC(self, mac):
+ oui = ":".join(mac.split(":")[:3]).upper()
+ if oui in self:
+ return ":".join([self[oui][0]]+ mac.split(":")[3:])
+ return mac
+
+
+
+
+def load_manuf(filename):
+ try:
+ manufdb=ManufDA(_name=filename)
+ for l in open(filename, "r", encoding = 'utf-8'):
+ try:
+ l = l.strip()
+ if not l or l.startswith("#"):
+ continue
+ oui,shrt=l.split()[:2]
+ i = l.find("#")
+ if i < 0:
+ lng=shrt
+ else:
+ lng = l[i+2:]
+ manufdb[oui] = shrt,lng
+ except Exception as e:
+ log_loading.warning("Couldn't parse one line from [%s] [%r] (%s)" % (filename, l, e))
+ except IOError:
+ #log_loading.warning("Couldn't open [%s] file" % filename)
+ pass
+ return manufdb
+
+
+
+if WINDOWS:
+ ETHER_TYPES=load_ethertypes("ethertypes")
+ IP_PROTOS=load_protocols(os.environ["SystemRoot"]+"\system32\drivers\etc\protocol")
+ TCP_SERVICES,UDP_SERVICES=load_services(os.environ["SystemRoot"] + "\system32\drivers\etc\services")
+ MANUFDB = load_manuf(os.environ["ProgramFiles"] + "\\wireshark\\manuf")
+else:
+ IP_PROTOS=load_protocols("/etc/protocols")
+ ETHER_TYPES=load_ethertypes("/etc/ethertypes")
+ TCP_SERVICES,UDP_SERVICES=load_services("/etc/services")
+ MANUFDB = load_manuf("/usr/share/wireshark/manuf")
+
+
+
+#####################
+## knowledge bases ##
+#####################
+
+class KnowledgeBase:
+ def __init__(self, filename):
+ self.filename = filename
+ self.base = None
+
+ def lazy_init(self):
+ self.base = ""
+
+ def reload(self, filename = None):
+ if filename is not None:
+ self.filename = filename
+ oldbase = self.base
+ self.base = None
+ self.lazy_init()
+ if self.base is None:
+ self.base = oldbase
+
+ def get_base(self):
+ if self.base is None:
+ self.lazy_init()
+ return self.base
+
+
diff --git a/scripts/external_libs/scapy-2.3.1/python3/scapy/error.py b/scripts/external_libs/scapy-2.3.1/python3/scapy/error.py
new file mode 100644
index 00000000..1753d523
--- /dev/null
+++ b/scripts/external_libs/scapy-2.3.1/python3/scapy/error.py
@@ -0,0 +1,60 @@
+## This file is part of Scapy
+## See http://www.secdev.org/projects/scapy for more informations
+## Copyright (C) Philippe Biondi <phil@secdev.org>
+## This program is published under a GPLv2 license
+
+"""
+Logging subsystem and basic exception class.
+"""
+
+#############################
+##### Logging subsystem #####
+#############################
+
+class Scapy_Exception(Exception):
+ pass
+
+import logging,traceback,time
+
+class ScapyFreqFilter(logging.Filter):
+ def __init__(self):
+ logging.Filter.__init__(self)
+ self.warning_table = {}
+ def filter(self, record):
+ from .config import conf
+ wt = conf.warning_threshold
+ if wt > 0:
+ stk = traceback.extract_stack()
+ caller=None
+ for f,l,n,c in stk:
+ if n == 'warning':
+ break
+ caller = l
+ tm,nb = self.warning_table.get(caller, (0,0))
+ ltm = time.time()
+ if ltm-tm > wt:
+ tm = ltm
+ nb = 0
+ else:
+ if nb < 2:
+ nb += 1
+ if nb == 2:
+ record.msg = "more "+record.msg
+ else:
+ return 0
+ self.warning_table[caller] = (tm,nb)
+ return 1
+
+log_scapy = logging.getLogger("scapy")
+console_handler = logging.StreamHandler()
+console_handler.setFormatter(logging.Formatter("%(levelname)s: %(message)s"))
+log_scapy.addHandler(console_handler)
+log_runtime = logging.getLogger("scapy.runtime") # logs at runtime
+log_runtime.addFilter(ScapyFreqFilter())
+log_interactive = logging.getLogger("scapy.interactive") # logs in interactive functions
+log_loading = logging.getLogger("scapy.loading") # logs when loading scapy
+
+
+def warning(x):
+ log_runtime.warning(x)
+
diff --git a/scripts/external_libs/scapy-2.3.1/python3/scapy/fields.py b/scripts/external_libs/scapy-2.3.1/python3/scapy/fields.py
new file mode 100644
index 00000000..5482ce87
--- /dev/null
+++ b/scripts/external_libs/scapy-2.3.1/python3/scapy/fields.py
@@ -0,0 +1,935 @@
+## This file is part of Scapy
+## See http://www.secdev.org/projects/scapy for more informations
+## Copyright (C) Philippe Biondi <phil@secdev.org>
+## This program is published under a GPLv2 license
+
+"""
+Fields: basic data structures that make up parts of packets.
+"""
+
+import struct,copy,socket
+from .config import conf
+from .volatile import *
+from .data import *
+from .utils import *
+from .base_classes import BasePacket,Gen,Net
+
+
+############
+## Fields ##
+############
+
+class Field:
+ """For more informations on how this work, please refer to
+ http://www.secdev.org/projects/scapy/files/scapydoc.pdf
+ chapter ``Adding a New Field''"""
+ islist=0
+ holds_packets=0
+ def __init__(self, name, default, fmt="H"):
+ self.name = name
+ if fmt[0] in "@=<>!":
+ self.fmt = fmt
+ else:
+ self.fmt = "!"+fmt
+ self.default = self.any2i(None,default)
+ self.sz = struct.calcsize(self.fmt)
+ self.owners = []
+ self.offset =0;
+
+
+ def get_size_bytes (self):
+ if hasattr(self, 'size'):
+ return 0; # bitfield
+ else:
+ return self.sz
+
+ def register_owner(self, cls):
+ self.owners.append(cls)
+
+ def i2len(self, pkt, x):
+ """Convert internal value to a length usable by a FieldLenField"""
+ return self.sz
+ def i2count(self, pkt, x):
+ """Convert internal value to a number of elements usable by a FieldLenField.
+ Always 1 except for list fields"""
+ return 1
+ def i2b(self, pkt, x):
+ """Convert internal value to internal value"""
+ if type(x) is str:
+ x = bytes([ ord(i) for i in x ])
+ return x
+ def h2i(self, pkt, x):
+ """Convert human value to internal value"""
+ if type(x) is str:
+ x = bytes([ ord(i) for i in x ])
+ return x
+ def i2h(self, pkt, x):
+ """Convert internal value to human value"""
+ return x
+ def m2i(self, pkt, x):
+ """Convert machine value to internal value"""
+ return x
+ def i2m(self, pkt, x):
+ """Convert internal value to machine value"""
+ if x is None:
+ x = 0
+ return x
+ def any2i(self, pkt, x):
+ """Try to understand the most input values possible and make an internal value from them"""
+ return self.h2i(pkt, x)
+ def i2repr(self, pkt, x):
+ """Convert internal value to a nice representation"""
+ return repr(self.i2h(pkt,x))
+ def addfield(self, pkt, s, val):
+ """Add an internal value to a string"""
+ return s+struct.pack(self.fmt, self.i2m(pkt,val))
+ def getfield(self, pkt, s):
+ """Extract an internal value from a string"""
+ return s[self.sz:], self.m2i(pkt, struct.unpack(self.fmt, s[:self.sz])[0])
+ def do_copy(self, x):
+ if hasattr(x, "copy"):
+ return x.copy()
+ if type(x) is list:
+ x = x[:]
+ for i in range(len(x)):
+ if isinstance(x[i], BasePacket):
+ x[i] = x[i].copy()
+ return x
+ def __repr__(self):
+ return "<Field (%s).%s>" % (",".join(x.__name__ for x in self.owners),self.name)
+ def copy(self):
+ return copy.deepcopy(self)
+ def randval(self):
+ """Return a volatile object whose value is both random and suitable for this field"""
+ fmtt = self.fmt[-1]
+ if fmtt in "BHIQ":
+ return {"B":RandByte,"H":RandShort,"I":RandInt, "Q":RandLong}[fmtt]()
+ elif fmtt == "s":
+ if self.fmt[0] in "0123456789":
+ l = int(self.fmt[:-1])
+ else:
+ l = int(self.fmt[1:-1])
+ return RandBin(l)
+ else:
+ warning("no random class for [%s] (fmt=%s)." % (self.name, self.fmt))
+
+
+
+
+class Emph:
+ fld = b""
+ def __init__(self, fld):
+ self.fld = fld
+ def __getattr__(self, attr):
+ return getattr(self.fld,attr)
+ def __hash__(self):
+ return hash(self.fld)
+ def __eq__(self, other):
+ return self.fld == other
+
+
+class ActionField:
+ _fld = None
+ def __init__(self, fld, action_method, **kargs):
+ self._fld = fld
+ self._action_method = action_method
+ self._privdata = kargs
+ def any2i(self, pkt, val):
+ getattr(pkt, self._action_method)(val, self._fld, **self._privdata)
+ return getattr(self._fld, "any2i")(pkt, val)
+ def __getattr__(self, attr):
+ return getattr(self._fld,attr)
+
+
+class ConditionalField:
+ fld = None
+ def __init__(self, fld, cond):
+ self.fld = fld
+ self.cond = cond
+ def _evalcond(self,pkt):
+ return self.cond(pkt)
+
+ def getfield(self, pkt, s):
+ if self._evalcond(pkt):
+ return self.fld.getfield(pkt,s)
+ else:
+ return s,None
+
+ def addfield(self, pkt, s, val):
+ if self._evalcond(pkt):
+ return self.fld.addfield(pkt,s,val)
+ else:
+ return s
+ def __getattr__(self, attr):
+ return getattr(self.fld,attr)
+
+
+class PadField:
+ """Add bytes after the proxified field so that it ends at the specified
+ alignment from its begining"""
+ _fld = None
+ def __init__(self, fld, align, padwith=None):
+ self._fld = fld
+ self._align = align
+ self._padwith = padwith or b""
+
+ def padlen(self, flen):
+ return -flen%self._align
+
+ def getfield(self, pkt, s):
+ remain,val = self._fld.getfield(pkt,s)
+ padlen = self.padlen(len(s)-len(remain))
+ return remain[padlen:], val
+
+ def addfield(self, pkt, s, val):
+ sval = self._fld.addfield(pkt, b"", val)
+ return s+sval+struct.pack("%is" % (self.padlen(len(sval))), self._padwith)
+
+ def __getattr__(self, attr):
+ return getattr(self._fld,attr)
+
+
+class MACField(Field):
+ def __init__(self, name, default):
+ Field.__init__(self, name, default, "6s")
+ def i2m(self, pkt, x):
+ if x is None:
+ return b"\0\0\0\0\0\0"
+ return mac2str(x)
+ def m2i(self, pkt, x):
+ return str2mac(x)
+ def any2i(self, pkt, x):
+ if type(x) is bytes and len(x) is 6:
+ x = self.m2i(pkt, x)
+ return x
+ def i2repr(self, pkt, x):
+ x = self.i2h(pkt, x)
+ if self in conf.resolve:
+ x = conf.manufdb._resolve_MAC(x)
+ return x
+ def randval(self):
+ return RandMAC()
+
+
+class IPField(Field):
+ def __init__(self, name, default):
+ Field.__init__(self, name, default, "4s")
+ def h2i(self, pkt, x):
+ if type(x) is str:
+ try:
+ inet_aton(x)
+ except socket.error:
+ x = Net(x)
+ elif type(x) is list:
+ x = [self.h2i(pkt, n) for n in x]
+ return x
+ def resolve(self, x):
+ if self in conf.resolve:
+ try:
+ ret = socket.gethostbyaddr(x)[0]
+ except:
+ pass
+ else:
+ if ret:
+ return ret
+ return x
+ def i2m(self, pkt, x):
+ return inet_aton(x)
+ def m2i(self, pkt, x):
+ return inet_ntoa(x)
+ def any2i(self, pkt, x):
+ return self.h2i(pkt,x)
+ def i2repr(self, pkt, x):
+ return self.resolve(self.i2h(pkt, x))
+ def randval(self):
+ return RandIP()
+
+class SourceIPField(IPField):
+ def __init__(self, name, dstname):
+ IPField.__init__(self, name, None)
+ self.dstname = dstname
+ def i2m(self, pkt, x):
+ if x is None:
+ iff,x,gw = pkt.route()
+ if x is None:
+ x = "0.0.0.0"
+ return IPField.i2m(self, pkt, x)
+ def i2h(self, pkt, x):
+ if x is None:
+ dst=getattr(pkt,self.dstname)
+ if isinstance(dst,Gen):
+ #r = map(conf.route.route, dst)
+ r = [ conf.route.route(i) for i in dst ]
+ r.sort()
+ if r[0] != r[-1]:
+ warning("More than one possible route for %s"%repr(dst))
+ iff,x,gw = r[0]
+ else:
+ iff,x,gw = conf.route.route(dst)
+ return IPField.i2h(self, pkt, x)
+
+
+
+
+class ByteField(Field):
+ def __init__(self, name, default):
+ Field.__init__(self, name, default, "B")
+
+class XByteField(ByteField):
+ def i2repr(self, pkt, x):
+ return lhex(self.i2h(pkt, x))
+
+class OByteField(ByteField):
+ def i2repr(self, pkt, x):
+ return "%03o"%self.i2h(pkt, x)
+
+class X3BytesField(XByteField):
+ def __init__(self, name, default):
+ Field.__init__(self, name, default, "!I")
+ def addfield(self, pkt, s, val):
+ return s+struct.pack(self.fmt, self.i2m(pkt,val))[1:4]
+ def getfield(self, pkt, s):
+ return s[3:], self.m2i(pkt, struct.unpack(self.fmt, b"\x00"+s[:3])[0])
+
+
+class ThreeBytesField(X3BytesField, ByteField):
+ def i2repr(self, pkt, x):
+ return ByteField.i2repr(self, pkt, x)
+
+
+class ShortField(Field):
+ def __init__(self, name, default):
+ Field.__init__(self, name, default, "H")
+
+class LEShortField(Field):
+ def __init__(self, name, default):
+ Field.__init__(self, name, default, "<H")
+
+class XShortField(ShortField):
+ def i2repr(self, pkt, x):
+ return lhex(self.i2h(pkt, x))
+
+
+class IntField(Field):
+ def __init__(self, name, default):
+ Field.__init__(self, name, default, "I")
+
+class SignedIntField(Field):
+ def __init__(self, name, default):
+ Field.__init__(self, name, default, "i")
+ def randval(self):
+ return RandSInt()
+
+class LEIntField(Field):
+ def __init__(self, name, default):
+ Field.__init__(self, name, default, "<I")
+
+class LESignedIntField(Field):
+ def __init__(self, name, default):
+ Field.__init__(self, name, default, "<i")
+ def randval(self):
+ return RandSInt()
+
+class XIntField(IntField):
+ def i2repr(self, pkt, x):
+ return lhex(self.i2h(pkt, x))
+
+
+class LongField(Field):
+ def __init__(self, name, default):
+ Field.__init__(self, name, default, "Q")
+
+class XLongField(LongField):
+ def i2repr(self, pkt, x):
+ return lhex(self.i2h(pkt, x))
+
+class IEEEFloatField(Field):
+ def __init__(self, name, default):
+ Field.__init__(self, name, default, "f")
+
+class IEEEDoubleField(Field):
+ def __init__(self, name, default):
+ Field.__init__(self, name, default, "d")
+
+
+class StrField(Field):
+ def __init__(self, name, default, fmt="H", remain=0):
+ Field.__init__(self,name,default,fmt)
+ self.remain = remain
+ #def i2h(self, pkt, x):
+ def i2repr(self, pkt, x):
+ try:
+ if type(x) is bytes:
+ x = x.decode('ascii')
+ except UnicodeDecodeError:
+ pass
+ return repr(x)
+ #def i2repr(self, pkt, x):
+ # return repr(self.i2h(pkt,x))
+ def i2len(self, pkt, i):
+ return len(i)
+ def i2m(self, pkt, x):
+ if x is None:
+ x = b""
+ elif type(x) is not bytes:
+ x=str(x).encode('ascii')
+ return x
+ def addfield(self, pkt, s, val):
+ return s+self.i2m(pkt, val)
+ def getfield(self, pkt, s):
+ if self.remain == 0:
+ return b"",self.m2i(pkt, s)
+ else:
+ return s[-self.remain:],self.m2i(pkt, s[:-self.remain])
+ def randval(self):
+ return RandBin(RandNum(0,1200))
+
+class PacketField(StrField):
+ holds_packets=1
+ def __init__(self, name, default, cls, remain=0): #is remain used somewhere?
+ StrField.__init__(self, name, default, remain=remain)
+ self.cls = cls
+ def i2m(self, pkt, i):
+ return bytes(i)
+ def m2i(self, pkt, m):
+ return self.cls(m)
+ def getfield(self, pkt, s):
+ i = self.m2i(pkt, s)
+ remain = b""
+ if conf.padding_layer in i:
+ r = i[conf.padding_layer]
+ del(r.underlayer.payload)
+ remain = r.load
+ return remain,i
+
+class PacketLenField(PacketField):
+ holds_packets=1
+ def __init__(self, name, default, cls, length_from=None):
+ PacketField.__init__(self, name, default, cls)
+ self.length_from = length_from
+ def getfield(self, pkt, s):
+ l = self.length_from(pkt)
+ try:
+ i = self.m2i(pkt, s[:l])
+ except Exception:
+ if conf.debug_dissector:
+ raise
+ i = conf.raw_layer(load=s[:l])
+ return s[l:],i
+
+
+class PacketListField(PacketField):
+ islist = 1
+ holds_packets=1
+ def __init__(self, name, default, cls, count_from=None, length_from=None):
+ if default is None:
+ default = [] # Create a new list for each instance
+ PacketField.__init__(self, name, default, cls)
+ self.count_from = count_from
+ self.length_from = length_from
+
+
+ def any2i(self, pkt, x):
+ if type(x) is not list:
+ return [x]
+ else:
+ return x
+ def i2count(self, pkt, val):
+ if type(val) is list:
+ return len(val)
+ return 1
+ def i2len(self, pkt, val):
+ return sum( len(p) for p in val )
+ def do_copy(self, x):
+ #return map(lambda p:p.copy(), x)
+ return [ i.copy() for i in x ]
+ def getfield(self, pkt, s):
+ c = l = None
+ if self.length_from is not None:
+ l = self.length_from(pkt)
+ elif self.count_from is not None:
+ c = self.count_from(pkt)
+
+ lst = []
+ ret = b""
+ remain = s
+ if l is not None:
+ remain,ret = s[:l],s[l:]
+ while remain:
+ if c is not None:
+ if c <= 0:
+ break
+ c -= 1
+ try:
+ p = self.m2i(pkt,remain)
+ except Exception:
+ if conf.debug_dissector:
+ raise
+ p = conf.raw_layer(load=remain)
+ remain = b""
+ else:
+ if conf.padding_layer in p:
+ pad = p[conf.padding_layer]
+ remain = pad.load
+ del(pad.underlayer.payload)
+ else:
+ remain = b""
+ lst.append(p)
+ return remain+ret,lst
+ def addfield(self, pkt, s, val):
+ return s+b"".join([ bytes(i) for i in val ])
+
+
+class StrFixedLenField(StrField):
+ def __init__(self, name, default, length=None, length_from=None):
+ StrField.__init__(self, name, default)
+ self.length_from = length_from
+ if length is not None:
+ self.length_from = lambda pkt,length=length: length
+ def i2repr(self, pkt, v):
+ if type(v) is bytes:
+ v = v.rstrip(b"\0")
+ return repr(v)
+ def getfield(self, pkt, s):
+ l = self.length_from(pkt)
+ return s[l:], self.m2i(pkt,s[:l])
+ def addfield(self, pkt, s, val):
+ l = self.length_from(pkt)
+ return s+struct.pack("%is"%l,self.i2m(pkt, val))
+ def randval(self):
+ try:
+ l = self.length_from(None)
+ except:
+ l = RandNum(0,200)
+ return RandBin(l)
+
+class StrFixedLenEnumField(StrFixedLenField):
+ def __init__(self, name, default, length=None, enum=None, length_from=None):
+ StrFixedLenField.__init__(self, name, default, length=length, length_from=length_from)
+ self.enum = enum
+ def i2repr(self, pkt, v):
+ r = v.rstrip("\0")
+ rr = repr(r)
+ if v in self.enum:
+ rr = "%s (%s)" % (rr, self.enum[v])
+ elif r in self.enum:
+ rr = "%s (%s)" % (rr, self.enum[r])
+ return rr
+
+class NetBIOSNameField(StrFixedLenField):
+ def __init__(self, name, default, length=31):
+ StrFixedLenField.__init__(self, name, default, length)
+ def i2m(self, pkt, x):
+ l = self.length_from(pkt)//2
+ if x is None:
+ x = b""
+ x += b" "*(l)
+ x = x[:l]
+ #x = b"".join(map(lambda x: chr(0x41+(ord(x)>>4))+chr(0x41+(ord(x)&0xf)), x))
+ x = b"".join([ bytes([0x41+(i>>4),0x41+(i&0xf)]) for i in x ])
+ x = b" "+x
+ return x
+ def m2i(self, pkt, x):
+ x = x.strip(b"\x00").strip(b" ")
+ #return b"".join(map(lambda x,y: chr((((ord(x)-1)&0xf)<<4)+((ord(y)-1)&0xf)), x[::2],x[1::2]))
+ return b"".join(map(lambda x,y: bytes([(((x-1)&0xf)<<4)+((y-1)&0xf)]), x[::2],x[1::2]))
+
+class StrLenField(StrField):
+ def __init__(self, name, default, fld=None, length_from=None):
+ StrField.__init__(self, name, default)
+ self.length_from = length_from
+ def getfield(self, pkt, s):
+ l = self.length_from(pkt)
+ return s[l:], self.m2i(pkt,s[:l])
+
+class FieldListField(Field):
+ islist=1
+ def __init__(self, name, default, field, length_from=None, count_from=None):
+ if default is None:
+ default = [] # Create a new list for each instance
+ Field.__init__(self, name, default)
+ self.count_from = count_from
+ self.length_from = length_from
+ self.field = field
+
+ def i2count(self, pkt, val):
+ if type(val) is list:
+ return len(val)
+ return 1
+ def i2len(self, pkt, val):
+ return sum( self.field.i2len(pkt,v) for v in val )
+
+ def i2m(self, pkt, val):
+ if val is None:
+ val = []
+ return val
+ def any2i(self, pkt, x):
+ if type(x) is not list:
+ return [x]
+ else:
+ return x
+ def addfield(self, pkt, s, val):
+ val = self.i2m(pkt, val)
+ for v in val:
+ s = self.field.addfield(pkt, s, v)
+ return s
+ def getfield(self, pkt, s):
+ c = l = None
+ if self.length_from is not None:
+ l = self.length_from(pkt)
+ elif self.count_from is not None:
+ c = self.count_from(pkt)
+
+ val = []
+ ret=b""
+ if l is not None:
+ s,ret = s[:l],s[l:]
+
+ while s:
+ if c is not None:
+ if c <= 0:
+ break
+ c -= 1
+ s,v = self.field.getfield(pkt, s)
+ val.append(v)
+ return s+ret, val
+
+class FieldLenField(Field):
+ def __init__(self, name, default, length_of=None, fmt = "H", count_of=None, adjust=lambda pkt,x:x, fld=None):
+ Field.__init__(self, name, default, fmt)
+ self.length_of=length_of
+ self.count_of=count_of
+ self.adjust=adjust
+ if fld is not None:
+ FIELD_LENGTH_MANAGEMENT_DEPRECATION(self.__class__.__name__)
+ self.length_of = fld
+ def i2m(self, pkt, x):
+ if x is None:
+ if self.length_of is not None:
+ fld,fval = pkt.getfield_and_val(self.length_of)
+ f = fld.i2len(pkt, fval)
+ else:
+ fld,fval = pkt.getfield_and_val(self.count_of)
+ f = fld.i2count(pkt, fval)
+ x = self.adjust(pkt,f)
+ return x
+
+class StrNullField(StrField):
+ def addfield(self, pkt, s, val):
+ return s+self.i2m(pkt, val)+b"\x00"
+ def getfield(self, pkt, s):
+ l = s.find(b"\x00")
+ if l < 0:
+ #XXX \x00 not found
+ return "",s
+ return s[l+1:],self.m2i(pkt, s[:l])
+ def randval(self):
+ return RandTermString(RandNum(0,1200),b"\x00")
+
+class StrStopField(StrField):
+ def __init__(self, name, default, stop, additionnal=0):
+ Field.__init__(self, name, default)
+ self.stop=stop
+ self.additionnal=additionnal
+ def getfield(self, pkt, s):
+ l = s.find(self.stop)
+ if l < 0:
+ return b"",s
+# raise Scapy_Exception,"StrStopField: stop value [%s] not found" %stop
+ l += len(self.stop)+self.additionnal
+ return s[l:],s[:l]
+ def randval(self):
+ return RandTermString(RandNum(0,1200),self.stop)
+
+class LenField(Field):
+ def i2m(self, pkt, x):
+ if x is None:
+ x = len(pkt.payload)
+ return x
+
+class BCDFloatField(Field):
+ def i2m(self, pkt, x):
+ return int(256*x)
+ def m2i(self, pkt, x):
+ return x/256.0
+
+class BitField(Field):
+ def __init__(self, name, default, size):
+ Field.__init__(self, name, default)
+ self.rev = size < 0
+ self.size = abs(size)
+ def reverse(self, val):
+ if self.size == 16:
+ val = socket.ntohs(val)
+ elif self.size == 32:
+ val = socket.ntohl(val)
+ return val
+
+ def addfield(self, pkt, s, val):
+ val = self.i2m(pkt, val)
+ if type(s) is tuple:
+ s,bitsdone,v = s
+ else:
+ bitsdone = 0
+ v = 0
+ if self.rev:
+ val = self.reverse(val)
+ v <<= self.size
+ v |= val & ((1<<self.size) - 1)
+ bitsdone += self.size
+ while bitsdone >= 8:
+ bitsdone -= 8
+ s = s+struct.pack("!B", v >> bitsdone)
+ v &= (1<<bitsdone)-1
+ if bitsdone:
+ return s,bitsdone,v
+ else:
+ return s
+ def getfield(self, pkt, s):
+ if type(s) is tuple:
+ s,bn = s
+ else:
+ bn = 0
+ # we don't want to process all the string
+ nb_bytes = (self.size+bn-1)//8 + 1
+ w = s[:nb_bytes]
+
+ # split the substring byte by byte
+ bs = struct.unpack('!%dB' % nb_bytes , w)
+
+ b = 0
+ for c in range(nb_bytes):
+ b |= int(bs[c]) << (nb_bytes-c-1)*8
+
+ # get rid of high order bits
+ b &= (1 << (nb_bytes*8-bn)) - 1
+
+ # remove low order bits
+ b = b >> (nb_bytes*8 - self.size - bn)
+
+ if self.rev:
+ b = self.reverse(b)
+
+ bn += self.size
+ s = s[bn//8:]
+ bn = bn%8
+ b = self.m2i(pkt, b)
+ if bn:
+ return (s,bn),b
+ else:
+ return s,b
+ def randval(self):
+ return RandNum(0,2**self.size-1)
+
+
+class BitFieldLenField(BitField):
+ def __init__(self, name, default, size, length_of=None, count_of=None, adjust=lambda pkt,x:x):
+ BitField.__init__(self, name, default, size)
+ self.length_of=length_of
+ self.count_of=count_of
+ self.adjust=adjust
+ def i2m(self, pkt, x):
+ #return FieldLenField.i2m.im_func(self, pkt, x)
+ return FieldLenField.i2m(self, pkt, x)
+
+
+class XBitField(BitField):
+ def i2repr(self, pkt, x):
+ return lhex(self.i2h(pkt,x))
+
+
+class EnumField(Field):
+ def __init__(self, name, default, enum, fmt = "H"):
+ i2s = self.i2s = {}
+ s2i = self.s2i = {}
+ if type(enum) is list:
+ keys = range(len(enum))
+ else:
+ keys = enum.keys()
+ if list(filter(lambda x: type(x) is str, keys)):
+ i2s,s2i = s2i,i2s
+ for k in keys:
+ i2s[k] = enum[k]
+ s2i[enum[k]] = k
+ Field.__init__(self, name, default, fmt)
+ def any2i_one(self, pkt, x):
+ if type(x) is str:
+ x = self.s2i[x]
+ return x
+ def i2repr_one(self, pkt, x):
+ if self not in conf.noenum and not isinstance(x,VolatileValue) and x in self.i2s:
+ return self.i2s[x]
+ return repr(x)
+
+ def any2i(self, pkt, x):
+ if type(x) is list:
+ return list(map(lambda z,pkt=pkt:self.any2i_one(pkt,z), x))
+ else:
+ return self.any2i_one(pkt,x)
+ def i2repr(self, pkt, x):
+ if type(x) is list:
+ return list(map(lambda z,pkt=pkt:self.i2repr_one(pkt,z), x))
+ else:
+ return self.i2repr_one(pkt,x)
+
+class CharEnumField(EnumField):
+ def __init__(self, name, default, enum, fmt = "1s"):
+ EnumField.__init__(self, name, default, enum, fmt)
+ k = self.i2s.keys()
+ if k and len(k[0]) != 1:
+ self.i2s,self.s2i = self.s2i,self.i2s
+ def any2i_one(self, pkt, x):
+ if len(x) != 1:
+ x = self.s2i[x]
+ return x
+
+class BitEnumField(BitField,EnumField):
+ def __init__(self, name, default, size, enum):
+ EnumField.__init__(self, name, default, enum)
+ self.rev = size < 0
+ self.size = abs(size)
+ def any2i(self, pkt, x):
+ return EnumField.any2i(self, pkt, x)
+ def i2repr(self, pkt, x):
+ return EnumField.i2repr(self, pkt, x)
+
+class ShortEnumField(EnumField):
+ def __init__(self, name, default, enum):
+ EnumField.__init__(self, name, default, enum, "H")
+
+class LEShortEnumField(EnumField):
+ def __init__(self, name, default, enum):
+ EnumField.__init__(self, name, default, enum, "<H")
+
+class ByteEnumField(EnumField):
+ def __init__(self, name, default, enum):
+ EnumField.__init__(self, name, default, enum, "B")
+
+class IntEnumField(EnumField):
+ def __init__(self, name, default, enum):
+ EnumField.__init__(self, name, default, enum, "I")
+
+class SignedIntEnumField(EnumField):
+ def __init__(self, name, default, enum):
+ EnumField.__init__(self, name, default, enum, "i")
+ def randval(self):
+ return RandSInt()
+
+class LEIntEnumField(EnumField):
+ def __init__(self, name, default, enum):
+ EnumField.__init__(self, name, default, enum, "<I")
+
+class XShortEnumField(ShortEnumField):
+ def i2repr_one(self, pkt, x):
+ if self not in conf.noenum and not isinstance(x,VolatileValue) and x in self.i2s:
+ return self.i2s[x]
+ return lhex(x)
+
+class MultiEnumField(EnumField):
+ def __init__(self, name, default, enum, depends_on, fmt = "H"):
+
+ self.depends_on = depends_on
+ self.i2s_multi = enum
+ self.s2i_multi = {}
+ self.s2i_all = {}
+ for m in enum:
+ self.s2i_multi[m] = s2i = {}
+ for k,v in enum[m].items():
+ s2i[v] = k
+ self.s2i_all[v] = k
+ Field.__init__(self, name, default, fmt)
+ def any2i_one(self, pkt, x):
+ if type (x) is str:
+ v = self.depends_on(pkt)
+ if v in self.s2i_multi:
+ s2i = self.s2i_multi[v]
+ if x in s2i:
+ return s2i[x]
+ return self.s2i_all[x]
+ return x
+ def i2repr_one(self, pkt, x):
+ v = self.depends_on(pkt)
+ if v in self.i2s_multi:
+ return self.i2s_multi[v].get(x,x)
+ return x
+
+class BitMultiEnumField(BitField,MultiEnumField):
+ def __init__(self, name, default, size, enum, depends_on):
+ MultiEnumField.__init__(self, name, default, enum)
+ self.rev = size < 0
+ self.size = abs(size)
+ def any2i(self, pkt, x):
+ return MultiEnumField.any2i(self, pkt, x)
+ def i2repr(self, pkt, x):
+ return MultiEnumField.i2repr(self, pkt, x)
+
+
+# Little endian long field
+class LELongField(Field):
+ def __init__(self, name, default):
+ Field.__init__(self, name, default, "<Q")
+
+# Little endian fixed length field
+class LEFieldLenField(FieldLenField):
+ def __init__(self, name, default, length_of=None, fmt = "<H", count_of=None, adjust=lambda pkt,x:x, fld=None):
+ FieldLenField.__init__(self, name, default, length_of=length_of, fmt=fmt, fld=fld, adjust=adjust)
+
+
+class FlagsField(BitField):
+ def __init__(self, name, default, size, names):
+ self.multi = type(names) is list
+ if self.multi:
+ #self.names = map(lambda x:[x], names)
+ self.names = [ [x] for x in names ]
+ else:
+ self.names = names
+ BitField.__init__(self, name, default, size)
+ def any2i(self, pkt, x):
+ if type(x) is str:
+ if self.multi:
+ #x = map(lambda y:[y], x.split("+"))
+ x = [ [y] for y in x.split("+") ]
+ y = 0
+ for i in x:
+ y |= 1 << self.names.index(i)
+ x = y
+ return x
+ def i2repr(self, pkt, x):
+ if type(x) is list or type(x) is tuple:
+ return repr(x)
+ if self.multi:
+ r = []
+ else:
+ r = ""
+ i=0
+ while x:
+ if x & 1:
+ r += self.names[i]
+ i += 1
+ x >>= 1
+ if self.multi:
+ r = "+".join(r)
+ return r
+
+
+
+
+class FixedPointField(BitField):
+ def __init__(self, name, default, size, frac_bits=16):
+ self.frac_bits = frac_bits
+ BitField.__init__(self, name, default, size)
+
+ def any2i(self, pkt, val):
+ if val is None:
+ return val
+ ival = int(val)
+ fract = int( (val-ival) * 2**self.frac_bits )
+ return (ival << self.frac_bits) | fract
+
+ def i2h(self, pkt, val):
+ int_part = val >> self.frac_bits
+ frac_part = val & (1 << self.frac_bits) - 1
+ frac_part /= 2.0**self.frac_bits
+ return int_part+frac_part
+ def i2repr(self, pkt, val):
+ return self.i2h(pkt, val)
diff --git a/scripts/external_libs/scapy-2.3.1/python3/scapy/layers/__init__.py b/scripts/external_libs/scapy-2.3.1/python3/scapy/layers/__init__.py
new file mode 100644
index 00000000..a3f2afb9
--- /dev/null
+++ b/scripts/external_libs/scapy-2.3.1/python3/scapy/layers/__init__.py
@@ -0,0 +1,8 @@
+## This file is part of Scapy
+## See http://www.secdev.org/projects/scapy for more informations
+## Copyright (C) Philippe Biondi <phil@secdev.org>
+## This program is published under a GPLv2 license
+
+"""
+Layer package.
+"""
diff --git a/scripts/external_libs/scapy-2.3.1/python3/scapy/layers/all.py b/scripts/external_libs/scapy-2.3.1/python3/scapy/layers/all.py
new file mode 100644
index 00000000..8104b6a2
--- /dev/null
+++ b/scripts/external_libs/scapy-2.3.1/python3/scapy/layers/all.py
@@ -0,0 +1,45 @@
+## This file is part of Scapy
+## See http://www.secdev.org/projects/scapy for more informations
+## Copyright (C) Philippe Biondi <phil@secdev.org>
+## This program is published under a GPLv2 license
+
+"""
+All layers. Configurable with conf.load_layers.
+"""
+
+import importlib
+from scapy.config import conf
+from scapy.error import log_loading
+import logging
+log = logging.getLogger("scapy.loading")
+
+#log_loading.info("Please, report issues to https://github.com/phaethon/scapy")
+
+def _import_star(m):
+ #mod = __import__("." + m, globals(), locals())
+ mod = importlib.import_module("scapy.layers." + m)
+ for k,v in mod.__dict__.items():
+ globals()[k] = v
+
+
+for _l in ['l2','inet','inet6']:
+ log_loading.debug("Loading layer %s" % _l)
+ #print "load ",_l
+ _import_star(_l)
+
+#def _import_star(m):
+ #mod = __import__("." + m, globals(), locals())
+# mod = importlib.import_module("scapy.layers." + m)
+# for k,v in mod.__dict__.items():
+# globals()[k] = v
+
+#for _l in conf.load_layers:
+# log_loading.debug("Loading layer %s" % _l)
+# try:
+# _import_star(_l)
+# except Exception as e:
+# log.warning("can't import layer %s: %s" % (_l,e))
+
+
+
+
diff --git a/scripts/external_libs/scapy-2.3.1/python3/scapy/layers/bluetooth.py b/scripts/external_libs/scapy-2.3.1/python3/scapy/layers/bluetooth.py
new file mode 100644
index 00000000..5dd365a4
--- /dev/null
+++ b/scripts/external_libs/scapy-2.3.1/python3/scapy/layers/bluetooth.py
@@ -0,0 +1,213 @@
+## This file is part of Scapy
+## See http://www.secdev.org/projects/scapy for more informations
+## Copyright (C) Philippe Biondi <phil@secdev.org>
+## This program is published under a GPLv2 license
+
+"""
+Bluetooth layers, sockets and send/receive functions.
+"""
+
+import socket,struct
+
+from scapy.config import conf
+from scapy.packet import *
+from scapy.fields import *
+from scapy.supersocket import SuperSocket
+from scapy.data import MTU
+
+
+class HCI_Hdr(Packet):
+ name = "HCI header"
+ fields_desc = [ ByteEnumField("type",2,{1:"command",2:"ACLdata",3:"SCOdata",4:"event",5:"vendor"}),]
+
+ def mysummary(self):
+ return self.sprintf("HCI %type%")
+
+class HCI_ACL_Hdr(Packet):
+ name = "HCI ACL header"
+ fields_desc = [ ByteField("handle",0), # Actually, handle is 12 bits and flags is 4.
+ ByteField("flags",0), # I wait to write a LEBitField
+ LEShortField("len",None), ]
+ def post_build(self, p, pay):
+ p += pay
+ if self.len is None:
+ l = len(p)-4
+ #p = p[:2]+chr(l&0xff)+chr((l>>8)&0xff)+p[4:]
+ p = p[:2]+bytes([(l&0xff),((l>>8)&0xff)])+p[4:]
+ return p
+
+
+class L2CAP_Hdr(Packet):
+ name = "L2CAP header"
+ fields_desc = [ LEShortField("len",None),
+ LEShortEnumField("cid",0,{1:"control"}),]
+
+ def post_build(self, p, pay):
+ p += pay
+ if self.len is None:
+ l = len(p)-4
+ #p = p[:2]+chr(l&0xff)+chr((l>>8)&0xff)+p[4:]
+ p = p[:2]+bytes([(l&0xff),((l>>8)&0xff)])+p[4:]
+ return p
+
+
+
+class L2CAP_CmdHdr(Packet):
+ name = "L2CAP command header"
+ fields_desc = [
+ ByteEnumField("code",8,{1:"rej",2:"conn_req",3:"conn_resp",
+ 4:"conf_req",5:"conf_resp",6:"disconn_req",
+ 7:"disconn_resp",8:"echo_req",9:"echo_resp",
+ 10:"info_req",11:"info_resp"}),
+ ByteField("id",0),
+ LEShortField("len",None) ]
+ def post_build(self, p, pay):
+ p += pay
+ if self.len is None:
+ l = len(p)-4
+ #p = p[:2]+chr(l&0xff)+chr((l>>8)&0xff)+p[4:]
+ p = p[:2]+bytes([(l&0xff),((l>>8)&0xff)])+p[4:]
+ return p
+ def answers(self, other):
+ if other.id == self.id:
+ if self.code == 1:
+ return 1
+ if other.code in [2,4,6,8,10] and self.code == other.code+1:
+ if other.code == 8:
+ return 1
+ return self.payload.answers(other.payload)
+ return 0
+
+class L2CAP_ConnReq(Packet):
+ name = "L2CAP Conn Req"
+ fields_desc = [ LEShortEnumField("psm",0,{1:"SDP",3:"RFCOMM",5:"telephony control"}),
+ LEShortField("scid",0),
+ ]
+
+class L2CAP_ConnResp(Packet):
+ name = "L2CAP Conn Resp"
+ fields_desc = [ LEShortField("dcid",0),
+ LEShortField("scid",0),
+ LEShortEnumField("result",0,["no_info","authen_pend","author_pend"]),
+ LEShortEnumField("status",0,["success","pend","bad_psm",
+ "cr_sec_block","cr_no_mem"]),
+ ]
+ def answers(self, other):
+ return self.scid == other.scid
+
+class L2CAP_CmdRej(Packet):
+ name = "L2CAP Command Rej"
+ fields_desc = [ LEShortField("reason",0),
+ ]
+
+
+class L2CAP_ConfReq(Packet):
+ name = "L2CAP Conf Req"
+ fields_desc = [ LEShortField("dcid",0),
+ LEShortField("flags",0),
+ ]
+
+class L2CAP_ConfResp(Packet):
+ name = "L2CAP Conf Resp"
+ fields_desc = [ LEShortField("scid",0),
+ LEShortField("flags",0),
+ LEShortEnumField("result",0,["success","unaccept","reject","unknown"]),
+ ]
+ def answers(self, other):
+ return self.scid == other.scid
+
+
+class L2CAP_DisconnReq(Packet):
+ name = "L2CAP Disconn Req"
+ fields_desc = [ LEShortField("dcid",0),
+ LEShortField("scid",0), ]
+
+class L2CAP_DisconnResp(Packet):
+ name = "L2CAP Disconn Resp"
+ fields_desc = [ LEShortField("dcid",0),
+ LEShortField("scid",0), ]
+ def answers(self, other):
+ return self.scid == other.scid
+
+
+
+class L2CAP_InfoReq(Packet):
+ name = "L2CAP Info Req"
+ fields_desc = [ LEShortEnumField("type",0,{1:"CL_MTU",2:"FEAT_MASK"}),
+ StrField("data","")
+ ]
+
+
+class L2CAP_InfoResp(Packet):
+ name = "L2CAP Info Resp"
+ fields_desc = [ LEShortField("type",0),
+ LEShortEnumField("result",0,["success","not_supp"]),
+ StrField("data",""), ]
+ def answers(self, other):
+ return self.type == other.type
+
+
+
+bind_layers( HCI_Hdr, HCI_ACL_Hdr, type=2)
+bind_layers( HCI_Hdr, conf.raw_layer, )
+bind_layers( HCI_ACL_Hdr, L2CAP_Hdr, )
+bind_layers( L2CAP_Hdr, L2CAP_CmdHdr, cid=1)
+bind_layers( L2CAP_CmdHdr, L2CAP_CmdRej, code=1)
+bind_layers( L2CAP_CmdHdr, L2CAP_ConnReq, code=2)
+bind_layers( L2CAP_CmdHdr, L2CAP_ConnResp, code=3)
+bind_layers( L2CAP_CmdHdr, L2CAP_ConfReq, code=4)
+bind_layers( L2CAP_CmdHdr, L2CAP_ConfResp, code=5)
+bind_layers( L2CAP_CmdHdr, L2CAP_DisconnReq, code=6)
+bind_layers( L2CAP_CmdHdr, L2CAP_DisconnResp, code=7)
+bind_layers( L2CAP_CmdHdr, L2CAP_InfoReq, code=10)
+bind_layers( L2CAP_CmdHdr, L2CAP_InfoResp, code=11)
+
+class BluetoothL2CAPSocket(SuperSocket):
+ desc = "read/write packets on a connected L2CAP socket"
+ def __init__(self, peer):
+ s = socket.socket(socket.AF_BLUETOOTH, socket.SOCK_RAW,
+ socket.BTPROTO_L2CAP)
+ s.connect((peer,0))
+
+ self.ins = self.outs = s
+
+ def recv(self, x=MTU):
+ return L2CAP_CmdHdr(self.ins.recv(x))
+
+
+class BluetoothHCISocket(SuperSocket):
+ desc = "read/write on a BlueTooth HCI socket"
+ def __init__(self, iface=0x10000, type=None):
+ s = socket.socket(socket.AF_BLUETOOTH, socket.SOCK_RAW, socket.BTPROTO_HCI)
+ s.setsockopt(socket.SOL_HCI, socket.HCI_DATA_DIR,1)
+ s.setsockopt(socket.SOL_HCI, socket.HCI_TIME_STAMP,1)
+ s.setsockopt(socket.SOL_HCI, socket.HCI_FILTER, struct.pack("IIIh2x", 0xffffffff,0xffffffff,0xffffffff,0)) #type mask, event mask, event mask, opcode
+ s.bind((iface,))
+ self.ins = self.outs = s
+# s.connect((peer,0))
+
+
+ def recv(self, x):
+ return HCI_Hdr(self.ins.recv(x))
+
+## Bluetooth
+
+
+@conf.commands.register
+def srbt(peer, pkts, inter=0.1, *args, **kargs):
+ """send and receive using a bluetooth socket"""
+ s = conf.BTsocket(peer=peer)
+ a,b = sndrcv(s,pkts,inter=inter,*args,**kargs)
+ s.close()
+ return a,b
+
+@conf.commands.register
+def srbt1(peer, pkts, *args, **kargs):
+ """send and receive 1 packet using a bluetooth socket"""
+ a,b = srbt(peer, pkts, *args, **kargs)
+ if len(a) > 0:
+ return a[0][1]
+
+
+
+conf.BTsocket = BluetoothL2CAPSocket
diff --git a/scripts/external_libs/scapy-2.3.1/python3/scapy/layers/dhcp.py b/scripts/external_libs/scapy-2.3.1/python3/scapy/layers/dhcp.py
new file mode 100644
index 00000000..e2b7c1f1
--- /dev/null
+++ b/scripts/external_libs/scapy-2.3.1/python3/scapy/layers/dhcp.py
@@ -0,0 +1,381 @@
+## This file is part of Scapy
+## See http://www.secdev.org/projects/scapy for more informations
+## Copyright (C) Philippe Biondi <phil@secdev.org>
+## This program is published under a GPLv2 license
+
+"""
+DHCP (Dynamic Host Configuration Protocol) d BOOTP
+"""
+
+import struct
+
+from scapy.packet import *
+from scapy.fields import *
+from scapy.ansmachine import *
+from scapy.layers.inet import UDP,IP
+from scapy.layers.l2 import Ether
+from scapy.base_classes import Net
+from scapy.volatile import RandField
+
+from scapy.arch import get_if_raw_hwaddr
+from scapy.sendrecv import srp1
+from scapy.utils import str2bytes
+
+dhcpmagic=b"c\x82Sc"
+
+
+class BOOTP(Packet):
+ name = "BOOTP"
+ fields_desc = [ ByteEnumField("op",1, {1:"BOOTREQUEST", 2:"BOOTREPLY"}),
+ ByteField("htype",1),
+ ByteField("hlen",6),
+ ByteField("hops",0),
+ IntField("xid",0),
+ ShortField("secs",0),
+ FlagsField("flags", 0, 16, "???????????????B"),
+ IPField("ciaddr","0.0.0.0"),
+ IPField("yiaddr","0.0.0.0"),
+ IPField("siaddr","0.0.0.0"),
+ IPField("giaddr","0.0.0.0"),
+ Field("chaddr",b"", "16s"),
+ Field("sname",b"","64s"),
+ Field("file",b"","128s"),
+ StrField("options",b"") ]
+ def guess_payload_class(self, payload):
+ if self.options[:len(dhcpmagic)] == dhcpmagic:
+ return DHCP
+ else:
+ return Packet.guess_payload_class(self, payload)
+ def extract_padding(self,s):
+ if self.options[:len(dhcpmagic)] == dhcpmagic:
+ # set BOOTP options to DHCP magic cookie and make rest a payload of DHCP options
+ payload = self.options[len(dhcpmagic):]
+ self.options = self.options[:len(dhcpmagic)]
+ return payload, None
+ else:
+ return b"", None
+ def hashret(self):
+ return struct.pack("L", self.xid)
+ def answers(self, other):
+ if not isinstance(other, BOOTP):
+ return 0
+ return self.xid == other.xid
+
+
+
+#DHCP_UNKNOWN, DHCP_IP, DHCP_IPLIST, DHCP_TYPE \
+#= range(4)
+#
+
+DHCPTypes = {
+ 1: "discover",
+ 2: "offer",
+ 3: "request",
+ 4: "decline",
+ 5: "ack",
+ 6: "nak",
+ 7: "release",
+ 8: "inform",
+ 9: "force_renew",
+ 10:"lease_query",
+ 11:"lease_unassigned",
+ 12:"lease_unknown",
+ 13:"lease_active",
+ }
+
+DHCPOptions = {
+ 0: "pad",
+ 1: IPField("subnet_mask", "0.0.0.0"),
+ 2: "time_zone",
+ 3: IPField("router","0.0.0.0"),
+ 4: IPField("time_server","0.0.0.0"),
+ 5: IPField("IEN_name_server","0.0.0.0"),
+ 6: IPField("name_server","0.0.0.0"),
+ 7: IPField("log_server","0.0.0.0"),
+ 8: IPField("cookie_server","0.0.0.0"),
+ 9: IPField("lpr_server","0.0.0.0"),
+ 12: "hostname",
+ 14: "dump_path",
+ 15: "domain",
+ 17: "root_disk_path",
+ 22: "max_dgram_reass_size",
+ 23: "default_ttl",
+ 24: "pmtu_timeout",
+ 28: IPField("broadcast_address","0.0.0.0"),
+ 35: "arp_cache_timeout",
+ 36: "ether_or_dot3",
+ 37: "tcp_ttl",
+ 38: "tcp_keepalive_interval",
+ 39: "tcp_keepalive_garbage",
+ 40: "NIS_domain",
+ 41: IPField("NIS_server","0.0.0.0"),
+ 42: IPField("NTP_server","0.0.0.0"),
+ 43: "vendor_specific",
+ 44: IPField("NetBIOS_server","0.0.0.0"),
+ 45: IPField("NetBIOS_dist_server","0.0.0.0"),
+ 50: IPField("requested_addr","0.0.0.0"),
+ 51: IntField("lease_time", 43200),
+ 54: IPField("server_id","0.0.0.0"),
+ 55: "param_req_list",
+ 57: ShortField("max_dhcp_size", 1500),
+ 58: IntField("renewal_time", 21600),
+ 59: IntField("rebinding_time", 37800),
+ 60: "vendor_class_id",
+ 61: "client_id",
+
+ 64: "NISplus_domain",
+ 65: IPField("NISplus_server","0.0.0.0"),
+ 69: IPField("SMTP_server","0.0.0.0"),
+ 70: IPField("POP3_server","0.0.0.0"),
+ 71: IPField("NNTP_server","0.0.0.0"),
+ 72: IPField("WWW_server","0.0.0.0"),
+ 73: IPField("Finger_server","0.0.0.0"),
+ 74: IPField("IRC_server","0.0.0.0"),
+ 75: IPField("StreetTalk_server","0.0.0.0"),
+ 76: "StreetTalk_Dir_Assistance",
+ 82: "relay_agent_Information",
+ 53: ByteEnumField("message-type", 1, DHCPTypes),
+ # 55: DHCPRequestListField("request-list"),
+ 255: "end"
+ }
+
+DHCPRevOptions = {}
+
+for k,v in DHCPOptions.items():
+ if type(v) is str:
+ n = v
+ v = None
+ else:
+ n = v.name
+ DHCPRevOptions[n] = (k,v)
+del(n)
+del(v)
+del(k)
+
+
+
+
+class RandDHCPOptions(RandField):
+ def __init__(self, size=None, rndstr=None):
+ if size is None:
+ size = RandNumExpo(0.05)
+ self.size = size
+ if rndstr is None:
+ rndstr = RandBin(RandNum(0,255))
+ self.rndstr=rndstr
+ self._opts = list(DHCPOptions.values())
+ self._opts.remove("pad")
+ self._opts.remove("end")
+ def _fix(self):
+ op = []
+ for k in range(self.size):
+ o = random.choice(self._opts)
+ if type(o) is str:
+ op.append((o,self.rndstr*1))
+ else:
+ op.append((o.name, o.randval()._fix()))
+ return op
+
+
+class DHCPOptionsField(StrField):
+ islist=1
+ def i2repr(self,pkt,x):
+ s = []
+ for v in x:
+ if type(v) is tuple and len(v) >= 2:
+ if v[0] in DHCPRevOptions and isinstance(DHCPRevOptions[v[0]][1],Field):
+ f = DHCPRevOptions[v[0]][1]
+ vv = ",".join(f.i2repr(pkt,val) for val in v[1:])
+ else:
+ vv = ",".join(repr(val) for val in v[1:])
+ r = "%s=%s" % (v[0],vv)
+ s.append(r)
+ else:
+ s.append(sane(v))
+ return "[%s]" % (" ".join(s))
+
+ def getfield(self, pkt, s):
+ return b"", self.m2i(pkt, s)
+
+ def m2i(self, pkt, x):
+ opt = []
+ while x:
+ #o = ord(x[0])
+ o = x[0]
+ if o == 255:
+ opt.append("end")
+ x = x[1:]
+ continue
+ if o == 0:
+ opt.append("pad")
+ x = x[1:]
+ continue
+ #if len(x) < 2 or len(x) < ord(x[1])+2:
+ if len(x) < 2 or len(x) < x[1]+2:
+ opt.append(x)
+ break
+ elif o in DHCPOptions:
+ f = DHCPOptions[o]
+
+ if isinstance(f, str):
+ #olen = ord(x[1])
+ olen = x[1]
+ opt.append( (f,x[2:olen+2]) )
+ x = x[olen+2:]
+ else:
+ olen = x[1]
+ lval = [f.name]
+ try:
+ left = x[2:olen+2]
+ while left:
+ left, val = f.getfield(pkt,left)
+ lval.append(val)
+ except:
+ opt.append(x)
+ break
+ else:
+ otuple = tuple(lval)
+ opt.append(otuple)
+ x = x[olen+2:]
+ else:
+ #olen = ord(x[1])
+ olen = x[1]
+ opt.append((o, x[2:olen+2]))
+ x = x[olen+2:]
+ return opt
+ def i2m(self, pkt, x):
+ if type(x) is str:
+ return x
+ s = b""
+ for o in x:
+ if type(o) is tuple and len(o) >= 2:
+ name = o[0]
+ lval = o[1:]
+
+ if isinstance(name, int):
+ onum, oval = name, b"".join(lval)
+ elif name in DHCPRevOptions:
+ onum, f = DHCPRevOptions[name]
+ if f is not None:
+ lval = [f.addfield(pkt,b"",f.any2i(pkt,val)) for val in lval]
+ oval = b"".join(lval)
+ else:
+ warning("Unknown field option %s" % name)
+ continue
+
+ s += bytes([onum])
+ s += bytes([len(oval)])
+ s += oval
+
+ elif (type(o) is str and o in DHCPRevOptions and
+ DHCPRevOptions[o][1] == None):
+ s += bytes([DHCPRevOptions[o][0]])
+ elif type(o) is int:
+ s += chr(o)+b"\0"
+ elif type(o) is str:
+ s += str2bytes(o)
+ elif type(o) is bytes:
+ s += o
+ else:
+ warning("Malformed option %s" % o)
+ return s
+
+
+class DHCP(Packet):
+ name = "DHCP options"
+ fields_desc = [ DHCPOptionsField("options",b"") ]
+
+
+bind_layers( UDP, BOOTP, dport=67, sport=68)
+bind_layers( UDP, BOOTP, dport=68, sport=67)
+bind_bottom_up( UDP, BOOTP, dport=67, sport=67)
+bind_layers( BOOTP, DHCP, options=b'c\x82Sc')
+
+def dhcp_request(iface=None,**kargs):
+ if conf.checkIPaddr != 0:
+ warning("conf.checkIPaddr is not 0, I may not be able to match the answer")
+ if iface is None:
+ iface = conf.iface
+ hw = get_if_raw_hwaddr(iface)
+ return srp1(Ether(dst="ff:ff:ff:ff:ff:ff")/IP(src="0.0.0.0",dst="255.255.255.255")/UDP(sport=68,dport=67)
+ /BOOTP(chaddr=hw)/DHCP(options=[("message-type","discover"),"end"]),iface=iface,**kargs)
+
+
+class BOOTP_am(AnsweringMachine):
+ function_name = "bootpd"
+ filter = "udp and port 68 and port 67"
+ send_function = staticmethod(sendp)
+ def parse_options(self, pool=Net("192.168.1.128/25"), network="192.168.1.0/24",gw="192.168.1.1",
+ domain="localnet", renewal_time=60, lease_time=1800):
+ if type(pool) is str:
+ poom = Net(pool)
+ self.domain = domain
+ netw,msk = (network.split("/")+["32"])[:2]
+ msk = itom(int(msk))
+ self.netmask = ltoa(msk)
+ self.network = ltoa(atol(netw)&msk)
+ self.broadcast = ltoa( atol(self.network) | (0xffffffff&~msk) )
+ self.gw = gw
+ if isinstance(pool,Gen):
+ pool = [k for k in pool if k not in [gw, self.network, self.broadcast]]
+ pool.reverse()
+ if len(pool) == 1:
+ pool, = pool
+ self.pool = pool
+ self.lease_time = lease_time
+ self.renewal_time = renewal_time
+ self.leases = {}
+
+ def is_request(self, req):
+ if not req.haslayer(BOOTP):
+ return 0
+ reqb = req.getlayer(BOOTP)
+ if reqb.op != 1:
+ return 0
+ return 1
+
+ def print_reply(self, req, reply):
+ print("Reply %s to %s" % (reply.getlayer(IP).dst,reply.dst))
+
+ def make_reply(self, req):
+ mac = req.src
+ if type(self.pool) is list:
+ if not mac in self.leases:
+ self.leases[mac] = self.pool.pop()
+ ip = self.leases[mac]
+ else:
+ ip = self.pool
+
+ repb = req.getlayer(BOOTP).copy()
+ repb.op="BOOTREPLY"
+ repb.yiaddr = ip
+ repb.siaddr = self.gw
+ repb.ciaddr = self.gw
+ repb.giaddr = self.gw
+ del(repb.payload)
+ rep=Ether(dst=mac)/IP(dst=ip)/UDP(sport=req.dport,dport=req.sport)/repb
+ return rep
+
+
+class DHCP_am(BOOTP_am):
+ function_name="dhcpd"
+ def make_reply(self, req):
+ resp = BOOTP_am.make_reply(self, req)
+ if DHCP in req:
+ dhcp_options = [(op[0],{1:2,3:5}.get(op[1],op[1]))
+ for op in req[DHCP].options
+ if type(op) is tuple and op[0] == "message-type"]
+ dhcp_options += [("server_id",self.gw),
+ ("domain", self.domain),
+ ("router", self.gw),
+ ("name_server", self.gw),
+ ("broadcast_address", self.broadcast),
+ ("subnet_mask", self.netmask),
+ ("renewal_time", self.renewal_time),
+ ("lease_time", self.lease_time),
+ "end"
+ ]
+ resp /= DHCP(options=dhcp_options)
+ return resp
+
+
diff --git a/scripts/external_libs/scapy-2.3.1/python3/scapy/layers/dhcp6.py b/scripts/external_libs/scapy-2.3.1/python3/scapy/layers/dhcp6.py
new file mode 100644
index 00000000..a11a4149
--- /dev/null
+++ b/scripts/external_libs/scapy-2.3.1/python3/scapy/layers/dhcp6.py
@@ -0,0 +1,1718 @@
+## This file is part of Scapy
+## See http://www.secdev.org/projects/scapy for more informations
+## Copyright (C) Philippe Biondi <phil@secdev.org>
+## This program is published under a GPLv2 license
+
+## Copyright (C) 2005 Guillaume Valadon <guedou@hongo.wide.ad.jp>
+## Arnaud Ebalard <arnaud.ebalard@eads.net>
+
+"""
+DHCPv6: Dynamic Host Configuration Protocol for IPv6. [RFC 3315]
+"""
+
+import socket
+from scapy.packet import *
+from scapy.fields import *
+from scapy.utils6 import *
+from scapy.layers.inet6 import *
+from scapy.ansmachine import AnsweringMachine
+
+#############################################################################
+# Helpers ##
+#############################################################################
+
+def get_cls(name, fallback_cls):
+ return globals().get(name, fallback_cls)
+
+
+#############################################################################
+#############################################################################
+### DHCPv6 ###
+#############################################################################
+#############################################################################
+
+All_DHCP_Relay_Agents_and_Servers = "ff02::1:2"
+All_DHCP_Servers = "ff05::1:3" # Site-Local scope : deprecated by 3879
+
+dhcp6opts = { 1: "CLIENTID",
+ 2: "SERVERID",
+ 3: "IA_NA",
+ 4: "IA_TA",
+ 5: "IAADDR",
+ 6: "ORO",
+ 7: "PREFERENCE",
+ 8: "ELAPSED_TIME",
+ 9: "RELAY_MSG",
+ 11: "AUTH",
+ 12: "UNICAST",
+ 13: "STATUS_CODE",
+ 14: "RAPID_COMMIT",
+ 15: "USER_CLASS",
+ 16: "VENDOR_CLASS",
+ 17: "VENDOR_OPTS",
+ 18: "INTERFACE_ID",
+ 19: "RECONF_MSG",
+ 20: "RECONF_ACCEPT",
+ 21: "SIP Servers Domain Name List", #RFC3319
+ 22: "SIP Servers IPv6 Address List", #RFC3319
+ 23: "DNS Recursive Name Server Option", #RFC3646
+ 24: "Domain Search List option", #RFC3646
+ 25: "OPTION_IA_PD", #RFC3633
+ 26: "OPTION_IAPREFIX", #RFC3633
+ 27: "OPTION_NIS_SERVERS", #RFC3898
+ 28: "OPTION_NISP_SERVERS", #RFC3898
+ 29: "OPTION_NIS_DOMAIN_NAME", #RFC3898
+ 30: "OPTION_NISP_DOMAIN_NAME", #RFC3898
+ 31: "OPTION_SNTP_SERVERS", #RFC4075
+ 32: "OPTION_INFORMATION_REFRESH_TIME", #RFC4242
+ 33: "OPTION_BCMCS_SERVER_D", #RFC4280
+ 34: "OPTION_BCMCS_SERVER_A", #RFC4280
+ 36: "OPTION_GEOCONF_CIVIC", #RFC-ietf-geopriv-dhcp-civil-09.txt
+ 37: "OPTION_REMOTE_ID", #RFC4649
+ 38: "OPTION_SUBSCRIBER_ID", #RFC4580
+ 39: "OPTION_CLIENT_FQDN" } #RFC4704
+
+dhcp6opts_by_code = { 1: "DHCP6OptClientId",
+ 2: "DHCP6OptServerId",
+ 3: "DHCP6OptIA_NA",
+ 4: "DHCP6OptIA_TA",
+ 5: "DHCP6OptIAAddress",
+ 6: "DHCP6OptOptReq",
+ 7: "DHCP6OptPref",
+ 8: "DHCP6OptElapsedTime",
+ 9: "DHCP6OptRelayMsg",
+ 11: "DHCP6OptAuth",
+ 12: "DHCP6OptServerUnicast",
+ 13: "DHCP6OptStatusCode",
+ 14: "DHCP6OptRapidCommit",
+ 15: "DHCP6OptUserClass",
+ 16: "DHCP6OptVendorClass",
+ 17: "DHCP6OptVendorSpecificInfo",
+ 18: "DHCP6OptIfaceId",
+ 19: "DHCP6OptReconfMsg",
+ 20: "DHCP6OptReconfAccept",
+ 21: "DHCP6OptSIPDomains", #RFC3319
+ 22: "DHCP6OptSIPServers", #RFC3319
+ 23: "DHCP6OptDNSServers", #RFC3646
+ 24: "DHCP6OptDNSDomains", #RFC3646
+ 25: "DHCP6OptIA_PD", #RFC3633
+ 26: "DHCP6OptIAPrefix", #RFC3633
+ 27: "DHCP6OptNISServers", #RFC3898
+ 28: "DHCP6OptNISPServers", #RFC3898
+ 29: "DHCP6OptNISDomain", #RFC3898
+ 30: "DHCP6OptNISPDomain", #RFC3898
+ 31: "DHCP6OptSNTPServers", #RFC4075
+ 32: "DHCP6OptInfoRefreshTime", #RFC4242
+ 33: "DHCP6OptBCMCSDomains", #RFC4280
+ 34: "DHCP6OptBCMCSServers", #RFC4280
+ #36: "DHCP6OptGeoConf", #RFC-ietf-geopriv-dhcp-civil-09.txt
+ 37: "DHCP6OptRemoteID", #RFC4649
+ 38: "DHCP6OptSubscriberID", #RFC4580
+ 39: "DHCP6OptClientFQDN", #RFC4704
+ #40: "DHCP6OptPANAAgent", #RFC-ietf-dhc-paa-option-05.txt
+ #41: "DHCP6OptNewPOSIXTimeZone, #RFC4833
+ #42: "DHCP6OptNewTZDBTimeZone, #RFC4833
+ 43: "DHCP6OptRelayAgentERO" #RFC4994
+ #44: "DHCP6OptLQQuery", #RFC5007
+ #45: "DHCP6OptLQClientData", #RFC5007
+ #46: "DHCP6OptLQClientTime", #RFC5007
+ #47: "DHCP6OptLQRelayData", #RFC5007
+ #48: "DHCP6OptLQClientLink", #RFC5007
+}
+
+
+# sect 5.3 RFC 3315 : DHCP6 Messages types
+dhcp6types = { 1:"SOLICIT",
+ 2:"ADVERTISE",
+ 3:"REQUEST",
+ 4:"CONFIRM",
+ 5:"RENEW",
+ 6:"REBIND",
+ 7:"REPLY",
+ 8:"RELEASE",
+ 9:"DECLINE",
+ 10:"RECONFIGURE",
+ 11:"INFORMATION-REQUEST",
+ 12:"RELAY-FORW",
+ 13:"RELAY-REPL" }
+
+
+#####################################################################
+### DHCPv6 DUID related stuff ###
+#####################################################################
+
+duidtypes = { 1: "Link-layer address plus time",
+ 2: "Vendor-assigned unique ID based on Enterprise Number",
+ 3: "Link-layer Address" }
+
+# DUID hardware types - RFC 826 - Extracted from
+# http://www.iana.org/assignments/arp-parameters on 31/10/06
+# We should add the length of every kind of address.
+duidhwtypes = { 0: "NET/ROM pseudo", # Not referenced by IANA
+ 1: "Ethernet (10Mb)",
+ 2: "Experimental Ethernet (3Mb)",
+ 3: "Amateur Radio AX.25",
+ 4: "Proteon ProNET Token Ring",
+ 5: "Chaos",
+ 6: "IEEE 802 Networks",
+ 7: "ARCNET",
+ 8: "Hyperchannel",
+ 9: "Lanstar",
+ 10: "Autonet Short Address",
+ 11: "LocalTalk",
+ 12: "LocalNet (IBM PCNet or SYTEK LocalNET)",
+ 13: "Ultra link",
+ 14: "SMDS",
+ 15: "Frame Relay",
+ 16: "Asynchronous Transmission Mode (ATM)",
+ 17: "HDLC",
+ 18: "Fibre Channel",
+ 19: "Asynchronous Transmission Mode (ATM)",
+ 20: "Serial Line",
+ 21: "Asynchronous Transmission Mode (ATM)",
+ 22: "MIL-STD-188-220",
+ 23: "Metricom",
+ 24: "IEEE 1394.1995",
+ 25: "MAPOS",
+ 26: "Twinaxial",
+ 27: "EUI-64",
+ 28: "HIPARP",
+ 29: "IP and ARP over ISO 7816-3",
+ 30: "ARPSec",
+ 31: "IPsec tunnel",
+ 32: "InfiniBand (TM)",
+ 33: "TIA-102 Project 25 Common Air Interface (CAI)" }
+
+class UTCTimeField(IntField):
+ epoch = (2000, 1, 1, 0, 0, 0, 5, 1, 0) # required Epoch
+ def i2repr(self, pkt, x):
+ x = self.i2h(pkt, x)
+ from time import gmtime, strftime, mktime
+ delta = mktime(self.epoch) - mktime(gmtime(0))
+ x = x + delta
+ t = strftime("%a, %d %b %Y %H:%M:%S +0000", gmtime(x))
+ return "%s (%d)" % (t, x)
+
+class _LLAddrField(MACField):
+ pass
+
+# XXX We only support Ethernet addresses at the moment. _LLAddrField
+# will be modified when needed. Ask us. --arno
+class DUID_LLT(Packet): # sect 9.2 RFC 3315
+ name = "DUID - Link-layer address plus time"
+ fields_desc = [ ShortEnumField("type", 1, duidtypes),
+ XShortEnumField("hwtype", 1, duidhwtypes),
+ UTCTimeField("timeval", 0), # i.e. 01 Jan 2000
+ _LLAddrField("lladdr", ETHER_ANY) ]
+
+# In fact, IANA enterprise-numbers file available at
+# http//www.iana.org/asignments/enterprise-numbers)
+# is simply huge (more than 2Mo and 600Ko in bz2). I'll
+# add only most common vendors, and encountered values.
+# -- arno
+iana_enterprise_num = { 9: "ciscoSystems",
+ 35: "Nortel Networks",
+ 43: "3Com",
+ 311: "Microsoft",
+ 2636: "Juniper Networks, Inc.",
+ 4526: "Netgear",
+ 5771: "Cisco Systems, Inc.",
+ 5842: "Cisco Systems",
+ 16885: "Nortel Networks" }
+
+class DUID_EN(Packet): # sect 9.3 RFC 3315
+ name = "DUID - Assigned by Vendor Based on Enterprise Number"
+ fields_desc = [ ShortEnumField("type", 2, duidtypes),
+ IntEnumField("enterprisenum", 311, iana_enterprise_num),
+ StrField("id",b"") ]
+
+class DUID_LL(Packet): # sect 9.4 RFC 3315
+ name = "DUID - Based on Link-layer Address"
+ fields_desc = [ ShortEnumField("type", 3, duidtypes),
+ XShortEnumField("hwtype", 1, duidhwtypes),
+ _LLAddrField("lladdr", ETHER_ANY) ]
+
+duid_cls = { 1: "DUID_LLT",
+ 2: "DUID_EN",
+ 3: "DUID_LL"}
+
+#####################################################################
+### DHCPv6 Options classes ###
+#####################################################################
+
+class _DHCP6OptGuessPayload(Packet):
+ def guess_payload_class(self, payload):
+ cls = conf.raw_layer
+ if len(payload) > 2 :
+ opt = struct.unpack("!H", payload[:2])[0]
+ cls = get_cls(dhcp6opts_by_code.get(opt, "DHCP6OptUnknown"), DHCP6OptUnknown)
+ return cls
+
+class DHCP6OptUnknown(_DHCP6OptGuessPayload): # A generic DHCPv6 Option
+ name = "Unknown DHCPv6 OPtion"
+ fields_desc = [ ShortEnumField("optcode", 0, dhcp6opts),
+ FieldLenField("optlen", None, length_of="data", fmt="!H"),
+ StrLenField("data", b"",
+ length_from = lambda pkt: pkt.optlen)]
+
+class _DUIDField(PacketField):
+ holds_packets=1
+ def __init__(self, name, default, length_from=None):
+ StrField.__init__(self, name, default)
+ self.length_from = length_from
+
+ def i2m(self, pkt, i):
+ return bytes(i)
+
+ def m2i(self, pkt, x):
+ cls = conf.raw_layer
+ if len(x) > 4:
+ o = struct.unpack("!H", x[:2])[0]
+ cls = get_cls(duid_cls.get(o, conf.raw_layer), conf.raw_layer)
+ return cls(x)
+
+ def getfield(self, pkt, s):
+ l = self.length_from(pkt)
+ return s[l:], self.m2i(pkt,s[:l])
+
+
+class DHCP6OptClientId(_DHCP6OptGuessPayload): # RFC sect 22.2
+ name = "DHCP6 Client Identifier Option"
+ fields_desc = [ ShortEnumField("optcode", 1, dhcp6opts),
+ FieldLenField("optlen", None, length_of="duid", fmt="!H"),
+ _DUIDField("duid", "",
+ length_from = lambda pkt: pkt.optlen) ]
+
+
+class DHCP6OptServerId(DHCP6OptClientId): # RFC sect 22.3
+ name = "DHCP6 Server Identifier Option"
+ optcode = 2
+
+# Should be encapsulated in the option field of IA_NA or IA_TA options
+# Can only appear at that location.
+# TODO : last field IAaddr-options is not defined in the reference document
+class DHCP6OptIAAddress(_DHCP6OptGuessPayload): # RFC sect 22.6
+ name = "DHCP6 IA Address Option (IA_TA or IA_NA suboption)"
+ fields_desc = [ ShortEnumField("optcode", 5, dhcp6opts),
+ FieldLenField("optlen", None, length_of="iaaddropts",
+ fmt="!H", adjust = lambda pkt,x: x+24),
+ IP6Field("addr", "::"),
+ IntField("preflft", 0),
+ IntField("validlft", 0),
+ XIntField("iaid", None),
+ StrLenField("iaaddropts", b"",
+ length_from = lambda pkt: pkt.optlen - 24) ]
+ def guess_payload_class(self, payload):
+ return conf.padding_layer
+
+class _IANAOptField(PacketListField):
+ def i2len(self, pkt, z):
+ if z is None or z == []:
+ return 0
+ return sum(map(lambda x: len(bytes(x)) ,z))
+
+ def getfield(self, pkt, s):
+ l = self.length_from(pkt)
+ lst = []
+ remain, payl = s[:l], s[l:]
+ while len(remain)>0:
+ p = self.m2i(pkt,remain)
+ if conf.padding_layer in p:
+ pad = p[conf.padding_layer]
+ remain = pad.load
+ del(pad.underlayer.payload)
+ else:
+ remain = ""
+ lst.append(p)
+ return payl,lst
+
+class DHCP6OptIA_NA(_DHCP6OptGuessPayload): # RFC sect 22.4
+ name = "DHCP6 Identity Association for Non-temporary Addresses Option"
+ fields_desc = [ ShortEnumField("optcode", 3, dhcp6opts),
+ FieldLenField("optlen", None, length_of="ianaopts",
+ fmt="!H", adjust = lambda pkt,x: x+12),
+ XIntField("iaid", None),
+ IntField("T1", None),
+ IntField("T2", None),
+ _IANAOptField("ianaopts", [], DHCP6OptIAAddress,
+ length_from = lambda pkt: pkt.optlen-12) ]
+
+class _IATAOptField(_IANAOptField):
+ pass
+
+class DHCP6OptIA_TA(_DHCP6OptGuessPayload): # RFC sect 22.5
+ name = "DHCP6 Identity Association for Temporary Addresses Option"
+ fields_desc = [ ShortEnumField("optcode", 4, dhcp6opts),
+ FieldLenField("optlen", None, length_of="iataopts",
+ fmt="!H", adjust = lambda pkt,x: x+4),
+ XIntField("iaid", None),
+ _IATAOptField("iataopts", [], DHCP6OptIAAddress,
+ length_from = lambda pkt: pkt.optlen-4) ]
+
+
+#### DHCPv6 Option Request Option ###################################
+
+class _OptReqListField(StrLenField):
+ islist = 1
+ def i2h(self, pkt, x):
+ if x is None:
+ return []
+ return x
+
+ def i2len(self, pkt, x):
+ return 2*len(x)
+
+ def any2i(self, pkt, x):
+ return x
+
+ def i2repr(self, pkt, x):
+ s = []
+ for y in self.i2h(pkt, x):
+ if y in dhcp6opts:
+ s.append(dhcp6opts[y])
+ else:
+ s.append("%d" % y)
+ return "[%s]" % ", ".join(s)
+
+ def m2i(self, pkt, x):
+ r = []
+ while len(x) != 0:
+ if len(x)<2:
+ warning("Odd length for requested option field. Rejecting last byte")
+ return r
+ r.append(struct.unpack("!H", x[:2])[0])
+ x = x[2:]
+ return r
+
+ def i2m(self, pkt, x):
+ return b"".join(map(lambda y: struct.pack("!H", y), x))
+
+# A client may include an ORO in a solicit, Request, Renew, Rebind,
+# Confirm or Information-request
+class DHCP6OptOptReq(_DHCP6OptGuessPayload): # RFC sect 22.7
+ name = "DHCP6 Option Request Option"
+ fields_desc = [ ShortEnumField("optcode", 6, dhcp6opts),
+ FieldLenField("optlen", None, length_of="reqopts", fmt="!H"),
+ _OptReqListField("reqopts", [23, 24],
+ length_from = lambda pkt: pkt.optlen) ]
+
+
+#### DHCPv6 Preference Option #######################################
+
+# emise par un serveur pour affecter le choix fait par le client. Dans
+# les messages Advertise, a priori
+class DHCP6OptPref(_DHCP6OptGuessPayload): # RFC sect 22.8
+ name = "DHCP6 Preference Option"
+ fields_desc = [ ShortEnumField("optcode", 7, dhcp6opts),
+ ShortField("optlen", 1 ),
+ ByteField("prefval",255) ]
+
+
+#### DHCPv6 Elapsed Time Option #####################################
+
+class _ElapsedTimeField(ShortField):
+ def i2repr(self, pkt, x):
+ if x == 0xffff:
+ return "infinity (0xffff)"
+ return "%.2f sec" % (self.i2h(pkt, x)/100.)
+
+class DHCP6OptElapsedTime(_DHCP6OptGuessPayload):# RFC sect 22.9
+ name = "DHCP6 Elapsed Time Option"
+ fields_desc = [ ShortEnumField("optcode", 8, dhcp6opts),
+ ShortField("optlen", 2),
+ _ElapsedTimeField("elapsedtime", 0) ]
+
+
+#### DHCPv6 Relay Message Option ####################################
+
+# Relayed message is seen as a payload.
+class DHCP6OptRelayMsg(_DHCP6OptGuessPayload):# RFC sect 22.10
+ name = "DHCP6 Relay Message Option"
+ fields_desc = [ ShortEnumField("optcode", 9, dhcp6opts),
+ ShortField("optlen", None ) ]
+ def post_build(self, p, pay):
+ if self.optlen is None:
+ l = len(pay)
+ p = p[:2]+struct.pack("!H", l)
+ return p + pay
+
+
+#### DHCPv6 Authentication Option ###################################
+
+# The following fields are set in an Authentication option for the
+# Reconfigure Key Authentication Protocol:
+#
+# protocol 3
+#
+# algorithm 1
+#
+# RDM 0
+#
+# The format of the Authentication information for the Reconfigure Key
+# Authentication Protocol is:
+#
+# 0 1 2 3
+# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+# | Type | Value (128 bits) |
+# +-+-+-+-+-+-+-+-+ |
+# . .
+# . .
+# . +-+-+-+-+-+-+-+-+
+# | |
+# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+#
+# Type Type of data in Value field carried in this option:
+#
+# 1 Reconfigure Key value (used in Reply message).
+#
+# 2 HMAC-MD5 digest of the message (used in Reconfigure
+# message).
+#
+# Value Data as defined by field.
+
+
+# TODO : Decoding only at the moment
+class DHCP6OptAuth(_DHCP6OptGuessPayload): # RFC sect 22.11
+ name = "DHCP6 Option - Authentication"
+ fields_desc = [ ShortEnumField("optcode", 11, dhcp6opts),
+ FieldLenField("optlen", None, length_of="authinfo",
+ adjust = lambda pkt,x: x+11),
+ ByteField("proto", 3), # TODO : XXX
+ ByteField("alg", 1), # TODO : XXX
+ ByteField("rdm", 0), # TODO : XXX
+ StrFixedLenField("replay", b"A"*8, 8), # TODO: XXX
+ StrLenField("authinfo", b"",
+ length_from = lambda pkt: pkt.optlen - 11) ]
+
+#### DHCPv6 Server Unicast Option ###################################
+
+class _SrvAddrField(IP6Field):
+ def i2h(self, pkt, x):
+ if x is None:
+ return "::"
+ return x
+
+ def i2m(self, pkt, x):
+ return inet_pton(socket.AF_INET6, self.i2h(pkt,x))
+
+class DHCP6OptServerUnicast(_DHCP6OptGuessPayload):# RFC sect 22.12
+ name = "DHCP6 Server Unicast Option"
+ fields_desc = [ ShortEnumField("optcode", 12, dhcp6opts),
+ ShortField("optlen", 16 ),
+ _SrvAddrField("srvaddr",None) ]
+
+
+#### DHCPv6 Status Code Option ######################################
+
+dhcp6statuscodes = { 0:"Success", # sect 24.4
+ 1:"UnspecFail",
+ 2:"NoAddrsAvail",
+ 3:"NoBinding",
+ 4:"NotOnLink",
+ 5:"UseMulticast",
+ 6:"NoPrefixAvail"} # From RFC3633
+
+class DHCP6OptStatusCode(_DHCP6OptGuessPayload):# RFC sect 22.13
+ name = "DHCP6 Status Code Option"
+ fields_desc = [ ShortEnumField("optcode", 13, dhcp6opts),
+ FieldLenField("optlen", None, length_of="statusmsg",
+ fmt="!H", adjust = lambda pkt,x:x+2),
+ ShortEnumField("statuscode",None,dhcp6statuscodes),
+ StrLenField("statusmsg", b"",
+ length_from = lambda pkt: pkt.optlen-2) ]
+
+
+#### DHCPv6 Rapid Commit Option #####################################
+
+class DHCP6OptRapidCommit(_DHCP6OptGuessPayload): # RFC sect 22.14
+ name = "DHCP6 Rapid Commit Option"
+ fields_desc = [ ShortEnumField("optcode", 14, dhcp6opts),
+ ShortField("optlen", 0)]
+
+
+#### DHCPv6 User Class Option #######################################
+
+class _UserClassDataField(PacketListField):
+ def i2len(self, pkt, z):
+ if z is None or z == []:
+ return 0
+ return sum(map(lambda x: len(bytes(x)) ,z))
+
+ def getfield(self, pkt, s):
+ l = self.length_from(pkt)
+ lst = []
+ remain, payl = s[:l], s[l:]
+ while len(remain)>0:
+ p = self.m2i(pkt,remain)
+ if conf.padding_layer in p:
+ pad = p[conf.padding_layer]
+ remain = pad.load
+ del(pad.underlayer.payload)
+ else:
+ remain = b""
+ lst.append(p)
+ return payl,lst
+
+
+class USER_CLASS_DATA(Packet):
+ name = "user class data"
+ fields_desc = [ FieldLenField("len", None, length_of="data"),
+ StrLenField("data", b"",
+ length_from = lambda pkt: pkt.len) ]
+ def guess_payload_class(self, payload):
+ return conf.padding_layer
+
+class DHCP6OptUserClass(_DHCP6OptGuessPayload):# RFC sect 22.15
+ name = "DHCP6 User Class Option"
+ fields_desc = [ ShortEnumField("optcode", 15, dhcp6opts),
+ FieldLenField("optlen", None, fmt="!H",
+ length_of="userclassdata"),
+ _UserClassDataField("userclassdata", [], USER_CLASS_DATA,
+ length_from = lambda pkt: pkt.optlen) ]
+
+
+#### DHCPv6 Vendor Class Option #####################################
+
+class _VendorClassDataField(_UserClassDataField):
+ pass
+
+class VENDOR_CLASS_DATA(USER_CLASS_DATA):
+ name = "vendor class data"
+
+class DHCP6OptVendorClass(_DHCP6OptGuessPayload):# RFC sect 22.16
+ name = "DHCP6 Vendor Class Option"
+ fields_desc = [ ShortEnumField("optcode", 16, dhcp6opts),
+ FieldLenField("optlen", None, length_of="vcdata", fmt="!H",
+ adjust = lambda pkt,x: x+4),
+ IntEnumField("enterprisenum",None , iana_enterprise_num ),
+ _VendorClassDataField("vcdata", [], VENDOR_CLASS_DATA,
+ length_from = lambda pkt: pkt.optlen-4) ]
+
+#### DHCPv6 Vendor-Specific Information Option ######################
+
+class VENDOR_SPECIFIC_OPTION(_DHCP6OptGuessPayload):
+ name = "vendor specific option data"
+ fields_desc = [ ShortField("optcode", None),
+ FieldLenField("optlen", None, length_of="optdata"),
+ StrLenField("optdata", b"",
+ length_from = lambda pkt: pkt.optlen) ]
+ def guess_payload_class(self, payload):
+ return conf.padding_layer
+
+# The third one that will be used for nothing interesting
+class DHCP6OptVendorSpecificInfo(_DHCP6OptGuessPayload):# RFC sect 22.17
+ name = "DHCP6 Vendor-specific Information Option"
+ fields_desc = [ ShortEnumField("optcode", 17, dhcp6opts),
+ FieldLenField("optlen", None, length_of="vso", fmt="!H",
+ adjust = lambda pkt,x: x+4),
+ IntEnumField("enterprisenum",None , iana_enterprise_num),
+ _VendorClassDataField("vso", [], VENDOR_SPECIFIC_OPTION,
+ length_from = lambda pkt: pkt.optlen-4) ]
+
+#### DHCPv6 Interface-ID Option #####################################
+
+# Repasser sur cette option a la fin. Elle a pas l'air d'etre des
+# masses critique.
+class DHCP6OptIfaceId(_DHCP6OptGuessPayload):# RFC sect 22.18
+ name = "DHCP6 Interface-Id Option"
+ fields_desc = [ ShortEnumField("optcode", 18, dhcp6opts),
+ FieldLenField("optlen", None, fmt="!H",
+ length_of="ifaceid"),
+ StrLenField("ifaceid", b"",
+ length_from = lambda pkt: pkt.optlen) ]
+
+
+#### DHCPv6 Reconfigure Message Option ##############################
+
+# A server includes a Reconfigure Message option in a Reconfigure
+# message to indicate to the client whether the client responds with a
+# renew message or an Informatiion-request message.
+class DHCP6OptReconfMsg(_DHCP6OptGuessPayload): # RFC sect 22.19
+ name = "DHCP6 Reconfigure Message Option"
+ fields_desc = [ ShortEnumField("optcode", 19, dhcp6opts),
+ ShortField("optlen", 1 ),
+ ByteEnumField("msgtype", 11, { 5:"Renew Message",
+ 11:"Information Request"}) ]
+
+
+#### DHCPv6 Reconfigure Accept Option ###############################
+
+# A client uses the Reconfigure Accept option to announce to the
+# server whether the client is willing to accept Recoonfigure
+# messages, and a server uses this option to tell the client whether
+# or not to accept Reconfigure messages. The default behavior in the
+# absence of this option, means unwillingness to accept reconfigure
+# messages, or instruction not to accept Reconfigure messages, for the
+# client and server messages, respectively.
+class DHCP6OptReconfAccept(_DHCP6OptGuessPayload): # RFC sect 22.20
+ name = "DHCP6 Reconfigure Accept Option"
+ fields_desc = [ ShortEnumField("optcode", 20, dhcp6opts),
+ ShortField("optlen", 0)]
+
+# As required in Sect 8. of RFC 3315, Domain Names must be encoded as
+# described in section 3.1 of RFC 1035
+# XXX Label should be at most 63 octets in length : we do not enforce it
+# Total length of domain should be 255 : we do not enforce it either
+class DomainNameListField(StrLenField):
+ islist = 1
+
+ def i2len(self, pkt, x):
+ return len(self.i2m(pkt, x))
+
+ def m2i(self, pkt, x):
+ res = []
+ while x:
+ cur = []
+ #while x and x[0] != b'\x00':
+ while x and x[0] != 0:
+ l = (x[0])
+ cur.append(x[1:l+1])
+ x = x[l+1:]
+ res.append(b".".join(cur))
+ if x and x[0] == 0:
+ x = x[1:]
+ return res
+
+ def i2m(self, pkt, x):
+ def conditionalTrailingDot(z):
+ if z and z[-1] == 0:
+ return z
+ return z+b'\x00'
+ res = b""
+ x = [ i.encode('ascii') for i in x if type(i) is str ]
+ tmp = map(lambda y: map((lambda z: chr(len(z)).encode('ascii')+z), y.split(b'.')), x)
+ return b"".join(map(lambda x: conditionalTrailingDot(b"".join(x)), tmp))
+
+class DHCP6OptSIPDomains(_DHCP6OptGuessPayload): #RFC3319
+ name = "DHCP6 Option - SIP Servers Domain Name List"
+ fields_desc = [ ShortEnumField("optcode", 21, dhcp6opts),
+ FieldLenField("optlen", None, length_of="sipdomains"),
+ DomainNameListField("sipdomains", [],
+ length_from = lambda pkt: pkt.optlen) ]
+
+class DHCP6OptSIPServers(_DHCP6OptGuessPayload): #RFC3319
+ name = "DHCP6 Option - SIP Servers IPv6 Address List"
+ fields_desc = [ ShortEnumField("optcode", 22, dhcp6opts),
+ FieldLenField("optlen", None, length_of="sipservers"),
+ IP6ListField("sipservers", [],
+ length_from = lambda pkt: pkt.optlen) ]
+
+class DHCP6OptDNSServers(_DHCP6OptGuessPayload): #RFC3646
+ name = "DHCP6 Option - DNS Recursive Name Server"
+ fields_desc = [ ShortEnumField("optcode", 23, dhcp6opts),
+ FieldLenField("optlen", None, length_of="dnsservers"),
+ IP6ListField("dnsservers", [],
+ length_from = lambda pkt: pkt.optlen) ]
+
+class DHCP6OptDNSDomains(_DHCP6OptGuessPayload): #RFC3646
+ name = "DHCP6 Option - Domain Search List option"
+ fields_desc = [ ShortEnumField("optcode", 24, dhcp6opts),
+ FieldLenField("optlen", None, length_of="dnsdomains"),
+ DomainNameListField("dnsdomains", [],
+ length_from = lambda pkt: pkt.optlen) ]
+
+# TODO: Implement iaprefopts correctly when provided with more
+# information about it.
+class DHCP6OptIAPrefix(_DHCP6OptGuessPayload): #RFC3633
+ name = "DHCP6 Option - IA_PD Prefix option"
+ fields_desc = [ ShortEnumField("optcode", 26, dhcp6opts),
+ FieldLenField("optlen", None, length_of="iaprefopts",
+ adjust = lambda pkt,x: x+26),
+ IntField("preflft", 0),
+ IntField("validlft", 0),
+ ByteField("plen", 48), # TODO: Challenge that default value
+ IP6Field("prefix", "2001:db8::"), # At least, global and won't hurt
+ StrLenField("iaprefopts", b"",
+ length_from = lambda pkt: pkt.optlen-26) ]
+
+class DHCP6OptIA_PD(_DHCP6OptGuessPayload): #RFC3633
+ name = "DHCP6 Option - Identity Association for Prefix Delegation"
+ fields_desc = [ ShortEnumField("optcode", 25, dhcp6opts),
+ FieldLenField("optlen", None, length_of="iapdopt",
+ adjust = lambda pkt,x: x+12),
+ IntField("iaid", 0),
+ IntField("T1", 0),
+ IntField("T2", 0),
+ PacketListField("iapdopt", [], DHCP6OptIAPrefix,
+ length_from = lambda pkt: pkt.optlen-12) ]
+
+class DHCP6OptNISServers(_DHCP6OptGuessPayload): #RFC3898
+ name = "DHCP6 Option - NIS Servers"
+ fields_desc = [ ShortEnumField("optcode", 27, dhcp6opts),
+ FieldLenField("optlen", None, length_of="nisservers"),
+ IP6ListField("nisservers", [],
+ length_from = lambda pkt: pkt.optlen) ]
+
+class DHCP6OptNISPServers(_DHCP6OptGuessPayload): #RFC3898
+ name = "DHCP6 Option - NIS+ Servers"
+ fields_desc = [ ShortEnumField("optcode", 28, dhcp6opts),
+ FieldLenField("optlen", None, length_of="nispservers"),
+ IP6ListField("nispservers", [],
+ length_from = lambda pkt: pkt.optlen) ]
+
+class DomainNameField(StrLenField):
+ def getfield(self, pkt, s):
+ l = self.length_from(pkt)
+ return s[l:], self.m2i(pkt,s[:l])
+
+ def i2len(self, pkt, x):
+ return len(self.i2m(pkt, x))
+
+ def m2i(self, pkt, x):
+ cur = []
+ while x:
+ l = (x[0])
+ cur.append(x[1:1+l])
+ x = x[l+1:]
+ ret_str = b".".join(cur)
+ return ret_str
+
+ def i2m(self, pkt, x):
+ if not x:
+ return b""
+ tmp = b"".join(map(lambda z: chr(len(z)).encode('ascii')+z, x.split(b'.')))
+ return tmp
+
+class DHCP6OptNISDomain(_DHCP6OptGuessPayload): #RFC3898
+ name = "DHCP6 Option - NIS Domain Name"
+ fields_desc = [ ShortEnumField("optcode", 29, dhcp6opts),
+ FieldLenField("optlen", None, length_of="nisdomain"),
+ DomainNameField("nisdomain", "",
+ length_from = lambda pkt: pkt.optlen) ]
+
+class DHCP6OptNISPDomain(_DHCP6OptGuessPayload): #RFC3898
+ name = "DHCP6 Option - NIS+ Domain Name"
+ fields_desc = [ ShortEnumField("optcode", 30, dhcp6opts),
+ FieldLenField("optlen", None, length_of="nispdomain"),
+ DomainNameField("nispdomain", "",
+ length_from= lambda pkt: pkt.optlen) ]
+
+class DHCP6OptSNTPServers(_DHCP6OptGuessPayload): #RFC4075
+ name = "DHCP6 option - SNTP Servers"
+ fields_desc = [ ShortEnumField("optcode", 31, dhcp6opts),
+ FieldLenField("optlen", None, length_of="sntpservers"),
+ IP6ListField("sntpservers", [],
+ length_from = lambda pkt: pkt.optlen) ]
+
+IRT_DEFAULT=86400
+IRT_MINIMUM=600
+class DHCP6OptInfoRefreshTime(_DHCP6OptGuessPayload): #RFC4242
+ name = "DHCP6 Option - Information Refresh Time"
+ fields_desc = [ ShortEnumField("optcode", 32, dhcp6opts),
+ ShortField("optlen", 4),
+ IntField("reftime", IRT_DEFAULT)] # One day
+
+class DHCP6OptBCMCSDomains(_DHCP6OptGuessPayload): #RFC4280
+ name = "DHCP6 Option - BCMCS Domain Name List"
+ fields_desc = [ ShortEnumField("optcode", 33, dhcp6opts),
+ FieldLenField("optlen", None, length_of="bcmcsdomains"),
+ DomainNameListField("bcmcsdomains", [],
+ length_from = lambda pkt: pkt.optlen) ]
+
+class DHCP6OptBCMCSServers(_DHCP6OptGuessPayload): #RFC4280
+ name = "DHCP6 Option - BCMCS Addresses List"
+ fields_desc = [ ShortEnumField("optcode", 34, dhcp6opts),
+ FieldLenField("optlen", None, length_of="bcmcsservers"),
+ IP6ListField("bcmcsservers", [],
+ length_from= lambda pkt: pkt.optlen) ]
+
+# TODO : Does Nothing at the moment
+class DHCP6OptGeoConf(_DHCP6OptGuessPayload): #RFC-ietf-geopriv-dhcp-civil-09.txt
+ name = ""
+ fields_desc = [ ShortEnumField("optcode", 36, dhcp6opts),
+ FieldLenField("optlen", None, length_of="optdata"),
+ StrLenField("optdata", "",
+ length_from = lambda pkt: pkt.optlen) ]
+
+# TODO: see if we encounter opaque values from vendor devices
+class DHCP6OptRemoteID(_DHCP6OptGuessPayload): #RFC4649
+ name = "DHCP6 Option - Relay Agent Remote-ID"
+ fields_desc = [ ShortEnumField("optcode", 37, dhcp6opts),
+ FieldLenField("optlen", None, length_of="remoteid",
+ adjust = lambda pkt,x: x+4),
+ IntEnumField("enterprisenum", None, iana_enterprise_num),
+ StrLenField("remoteid", b"",
+ length_from = lambda pkt: pkt.optlen-4) ]
+
+# TODO : 'subscriberid' default value should be at least 1 byte long
+class DHCP6OptSubscriberID(_DHCP6OptGuessPayload): #RFC4580
+ name = "DHCP6 Option - Subscriber ID"
+ fields_desc = [ ShortEnumField("optcode", 38, dhcp6opts),
+ FieldLenField("optlen", None, length_of="subscriberid"),
+ StrLenField("subscriberid", b"",
+ length_from = lambda pkt: pkt.optlen) ]
+
+# TODO : "The data in the Domain Name field MUST be encoded
+# as described in Section 8 of [5]"
+class DHCP6OptClientFQDN(_DHCP6OptGuessPayload): #RFC4704
+ name = "DHCP6 Option - Client FQDN"
+ fields_desc = [ ShortEnumField("optcode", 39, dhcp6opts),
+ FieldLenField("optlen", None, length_of="fqdn",
+ adjust = lambda pkt,x: x+1),
+ BitField("res", 0, 5),
+ FlagsField("flags", 0, 3, "SON" ),
+ DomainNameField("fqdn", "",
+ length_from = lambda pkt: pkt.optlen-1) ]
+
+class DHCP6OptRelayAgentERO(_DHCP6OptGuessPayload): # RFC4994
+ name = "DHCP6 Option - RelayRequest Option"
+ fields_desc = [ ShortEnumField("optcode", 43, dhcp6opts),
+ FieldLenField("optlen", None, length_of="reqopts", fmt="!H"),
+ _OptReqListField("reqopts", [23, 24],
+ length_from = lambda pkt: pkt.optlen) ]
+
+#####################################################################
+### DHCPv6 messages ###
+#####################################################################
+
+# Some state parameters of the protocols that should probably be
+# useful to have in the configuration (and keep up-to-date)
+DHCP6RelayAgentUnicastAddr=""
+DHCP6RelayHopCount=""
+DHCP6ServerUnicastAddr=""
+DHCP6ClientUnicastAddr=""
+DHCP6ClientIA_TA=""
+DHCP6ClientIA_NA=""
+DHCP6ClientIAID=""
+T1="" # Voir 2462
+T2="" # Voir 2462
+DHCP6ServerDUID=""
+DHCP6CurrentTransactionID="" # devrait etre utilise pour matcher une
+# reponse et mis a jour en mode client par une valeur aleatoire pour
+# laquelle on attend un retour de la part d'un serveur.
+DHCP6PrefVal="" # la valeur de preference a utiliser dans
+# les options preference
+
+# Emitted by :
+# - server : ADVERTISE, REPLY, RECONFIGURE, RELAY-REPL (vers relay)
+# - client : SOLICIT, REQUEST, CONFIRM, RENEW, REBIND, RELEASE, DECLINE,
+# INFORMATION REQUEST
+# - relay : RELAY-FORW (toward server)
+
+class _DHCP6GuessPayload(Packet):
+ def guess_payload_class(self, payload):
+ if len(payload) > 1 :
+ print((payload[0]))
+ return get_cls(dhcp6opts.get(ord(payload[0]),"DHCP6OptUnknown"), conf.raw_layer)
+ return conf.raw_layer
+
+#####################################################################
+## DHCPv6 messages sent between Clients and Servers (types 1 to 11)
+# Comme specifie en section 15.1 de la RFC 3315, les valeurs de
+# transaction id sont selectionnees de maniere aleatoire par le client
+# a chaque emission et doivent matcher dans les reponses faites par
+# les clients
+class DHCP6(_DHCP6OptGuessPayload):
+ name = "DHCPv6 Generic Message)"
+ fields_desc = [ ByteEnumField("msgtype",None,dhcp6types),
+ X3BytesField("trid",0x000000) ]
+ overload_fields = { UDP: {"sport": 546, "dport": 547} }
+
+ def hashret(self):
+ return struct.pack("!I", self.trid)[1:4]
+
+#####################################################################
+# Solicit Message : sect 17.1.1 RFC3315
+# - sent by client
+# - must include a client identifier option
+# - the client may include IA options for any IAs to which it wants the
+# server to assign address
+# - The client use IA_NA options to request the assignment of
+# non-temporary addresses and uses IA_TA options to request the
+# assignment of temporary addresses
+# - The client should include an Option Request option to indicate the
+# options the client is interested in receiving (eventually
+# including hints)
+# - The client includes a Reconfigure Accept option if is willing to
+# accept Reconfigure messages from the server.
+# Le cas du send and reply est assez particulier car suivant la
+# presence d'une option rapid commit dans le solicit, l'attente
+# s'arrete au premier message de reponse recu ou alors apres un
+# timeout. De la meme maniere, si un message Advertise arrive avec une
+# valeur de preference de 255, il arrete l'attente et envoie une
+# Request.
+# - The client announces its intention to use DHCP authentication by
+# including an Authentication option in its solicit message. The
+# server selects a key for the client based on the client's DUID. The
+# client and server use that key to authenticate all DHCP messages
+# exchanged during the session
+
+class DHCP6_Solicit(DHCP6):
+ name = "DHCPv6 Solicit Message"
+ msgtype = 1
+ overload_fields = { UDP: {"sport": 546, "dport": 547} }
+
+#####################################################################
+# Advertise Message
+# - sent by server
+# - Includes a server identifier option
+# - Includes a client identifier option
+# - the client identifier option must match the client's DUID
+# - transaction ID must match
+
+class DHCP6_Advertise(DHCP6):
+ name = "DHCPv6 Advertise Message"
+ msgtype = 2
+ overload_fields = { UDP: {"sport": 547, "dport": 546} }
+
+ def answers(self, other):
+ return (isinstance(other,DHCP6_Solicit) and
+ other.msgtype == 1 and
+ self.trid == other.trid)
+
+#####################################################################
+# Request Message
+# - sent by clients
+# - includes a server identifier option
+# - the content of Server Identifier option must match server's DUID
+# - includes a client identifier option
+# - must include an ORO Option (even with hints) p40
+# - can includes a reconfigure Accept option indicating whether or
+# not the client is willing to accept Reconfigure messages from
+# the server (p40)
+# - When the server receives a Request message via unicast from a
+# client to which the server has not sent a unicast option, the server
+# discards the Request message and responds with a Reply message
+# containinig Status Code option with the value UseMulticast, a Server
+# Identifier Option containing the server's DUID, the client
+# Identifier option from the client message and no other option.
+
+class DHCP6_Request(DHCP6):
+ name = "DHCPv6 Request Message"
+ msgtype = 3
+
+#####################################################################
+# Confirm Message
+# - sent by clients
+# - must include a clien identifier option
+# - When the server receives a Confirm Message, the server determines
+# whether the addresses in the Confirm message are appropriate for the
+# link to which the client is attached. cf p50
+
+class DHCP6_Confirm(DHCP6):
+ name = "DHCPv6 Confirm Message"
+ msgtype = 4
+
+#####################################################################
+# Renew Message
+# - sent by clients
+# - must include a server identifier option
+# - content of server identifier option must match the server's identifier
+# - must include a client identifier option
+# - the clients includes any IA assigned to the interface that may
+# have moved to a new link, along with the addresses associated with
+# those IAs in its confirm messages
+# - When the server receives a Renew message that contains an IA
+# option from a client, it locates the client's binding and verifies
+# that the information in the IA from the client matches the
+# information for that client. If the server cannot find a client
+# entry for the IA the server returns the IA containing no addresses
+# with a status code option est to NoBinding in the Reply message. cf
+# p51 pour le reste.
+
+class DHCP6_Renew(DHCP6):
+ name = "DHCPv6 Renew Message"
+ msgtype = 5
+
+#####################################################################
+# Rebind Message
+# - sent by clients
+# - must include a client identifier option
+# cf p52
+
+class DHCP6_Rebind(DHCP6):
+ name = "DHCPv6 Rebind Message"
+ msgtype = 6
+
+#####################################################################
+# Reply Message
+# - sent by servers
+# - the message must include a server identifier option
+# - transaction-id field must match the value of original message
+# The server includes a Rapid Commit option in the Reply message to
+# indicate that the reply is in response to a solicit message
+# - if the client receives a reply message with a Status code option
+# with the value UseMulticast, the client records the receipt of the
+# message and sends subsequent messages to the server through the
+# interface on which the message was received using multicast. The
+# client resends the original message using multicast
+# - When the client receives a NotOnLink status from the server in
+# response to a Confirm message, the client performs DHCP server
+# solicitation as described in section 17 and client-initiated
+# configuration as descrribed in section 18 (RFC 3315)
+# - when the client receives a NotOnLink status from the server in
+# response to a Request, the client can either re-issue the Request
+# without specifying any addresses or restart the DHCP server
+# discovery process.
+# - the server must include a server identifier option containing the
+# server's DUID in the Reply message
+
+class DHCP6_Reply(DHCP6):
+ name = "DHCPv6 Reply Message"
+ msgtype = 7
+
+ overload_fields = { UDP: {"sport": 547, "dport": 546} }
+
+ def answers(self, other):
+
+ types = (DHCP6_InfoRequest, DHCP6_Confirm, DHCP6_Rebind, DHCP6_Decline, DHCP6_Request, DHCP6_Release, DHCP6_Renew)
+
+ return (isinstance(other, types) and
+ self.trid == other.trid)
+
+#####################################################################
+# Release Message
+# - sent by clients
+# - must include a server identifier option
+# cf p53
+
+class DHCP6_Release(DHCP6):
+ name = "DHCPv6 Release Message"
+ msgtype = 8
+
+#####################################################################
+# Decline Message
+# - sent by clients
+# - must include a client identifier option
+# - Server identifier option must match server identifier
+# - The addresses to be declined must be included in the IAs. Any
+# addresses for the IAs the client wishes to continue to use should
+# not be in added to the IAs.
+# - cf p54
+
+class DHCP6_Decline(DHCP6):
+ name = "DHCPv6 Decline Message"
+ msgtype = 9
+
+#####################################################################
+# Reconfigure Message
+# - sent by servers
+# - must be unicast to the client
+# - must include a server identifier option
+# - must include a client identifier option that contains the client DUID
+# - must contain a Reconfigure Message Option and the message type
+# must be a valid value
+# - the server sets the transaction-id to 0
+# - The server must use DHCP Authentication in the Reconfigure
+# message. Autant dire que ca va pas etre le type de message qu'on va
+# voir le plus souvent.
+
+class DHCP6_Reconf(DHCP6):
+ name = "DHCPv6 Reconfigure Message"
+ msgtype = 10
+ overload_fields = { UDP: { "sport": 547, "dport": 546 } }
+
+
+#####################################################################
+# Information-Request Message
+# - sent by clients when needs configuration information but no
+# addresses.
+# - client should include a client identifier option to identify
+# itself. If it doesn't the server is not able to return client
+# specific options or the server can choose to not respond to the
+# message at all. The client must include a client identifier option
+# if the message will be authenticated.
+# - client must include an ORO of option she's interested in receiving
+# (can include hints)
+
+class DHCP6_InfoRequest(DHCP6):
+ name = "DHCPv6 Information Request Message"
+ msgtype = 11
+
+#####################################################################
+# sent between Relay Agents and Servers
+#
+# Normalement, doit inclure une option "Relay Message Option"
+# peut en inclure d'autres.
+# voir section 7.1 de la 3315
+
+# Relay-Forward Message
+# - sent by relay agents to servers
+# If the relay agent relays messages to the All_DHCP_Servers multicast
+# address or other multicast addresses, it sets the Hop Limit field to
+# 32.
+
+class DHCP6_RelayForward(_DHCP6GuessPayload,Packet):
+ name = "DHCPv6 Relay Forward Message (Relay Agent/Server Message)"
+ fields_desc = [ ByteEnumField("msgtype", 12, dhcp6types),
+ ByteField("hopcount", None),
+ IP6Field("linkaddr", "::"),
+ IP6Field("peeraddr", "::") ]
+ def hashret(self): # we filter on peer address field
+ return inet_pton(socket.AF_INET6, self.peeraddr)
+
+#####################################################################
+# sent between Relay Agents and Servers
+# Normalement, doit inclure une option "Relay Message Option"
+# peut en inclure d'autres.
+# Les valeurs des champs hop-count, link-addr et peer-addr
+# sont copiees du messsage Forward associe. POur le suivi de session.
+# Pour le moment, comme decrit dans le commentaire, le hashret
+# se limite au contenu du champ peer address.
+# Voir section 7.2 de la 3315.
+
+# Relay-Reply Message
+# - sent by servers to relay agents
+# - if the solicit message was received in a Relay-Forward message,
+# the server constructs a relay-reply message with the Advertise
+# message in the payload of a relay-message. cf page 37/101. Envoie de
+# ce message en unicast au relay-agent. utilisation de l'adresse ip
+# presente en ip source du paquet recu
+
+class DHCP6_RelayReply(DHCP6_RelayForward):
+ name = "DHCPv6 Relay Reply Message (Relay Agent/Server Message)"
+ msgtype = 13
+ def hashret(self): # We filter on peer address field.
+ return inet_pton(socket.AF_INET6, self.peeraddr)
+ def answers(self, other):
+ return (isinstance(other, DHCP6_RelayForward) and
+ self.hopcount == other.hopcount and
+ self.linkaddr == other.linkaddr and
+ self.peeraddr == other.peeraddr )
+
+
+dhcp6_cls_by_type = { 1: "DHCP6_Solicit",
+ 2: "DHCP6_Advertise",
+ 3: "DHCP6_Request",
+ 4: "DHCP6_Confirm",
+ 5: "DHCP6_Renew",
+ 6: "DHCP6_Rebind",
+ 7: "DHCP6_Reply",
+ 8: "DHCP6_Release",
+ 9: "DHCP6_Decline",
+ 10: "DHCP6_Reconf",
+ 11: "DHCP6_InfoRequest",
+ 12: "DHCP6_RelayForward",
+ 13: "DHCP6_RelayReply" }
+
+def _dhcp6_dispatcher(x, *args, **kargs):
+ cls = conf.raw_layer
+ if len(x) >= 2:
+ cls = get_cls(dhcp6_cls_by_type.get((x[0]), "Raw"), conf.raw_layer)
+ return cls(x, *args, **kargs)
+
+bind_bottom_up(UDP, _dhcp6_dispatcher, { "dport": 547 } )
+bind_bottom_up(UDP, _dhcp6_dispatcher, { "dport": 546 } )
+
+
+
+class DHCPv6_am(AnsweringMachine):
+ function_name = "dhcp6d"
+ filter = "udp and port 546 and port 547"
+ send_function = staticmethod(send)
+ def usage(self):
+ msg = """
+dhcp6d( dns="2001:500::1035", domain="localdomain, local", duid=None)
+ iface=conf.iface6, advpref=255, sntpservers=None,
+ sipdomains=None, sipservers=None,
+ nisdomain=None, nisservers=None,
+ nispdomain=None, nispservers=None,
+ bcmcsdomain=None, bcmcsservers=None)
+
+ debug : When set, additional debugging information is printed.
+
+ duid : some DUID class (DUID_LLT, DUID_LL or DUID_EN). If none
+ is provided a DUID_LLT is constructed based on the MAC
+ address of the sending interface and launch time of dhcp6d
+ answering machine.
+
+ iface : the interface to listen/reply on if you do not want to use
+ conf.iface6.
+
+ advpref : Value in [0,255] given to Advertise preference field.
+ By default, 255 is used. Be aware that this specific
+ value makes clients stops waiting for further Advertise
+ messages from other servers.
+
+ dns : list of recursive DNS servers addresses (as a string or list).
+ By default, it is set empty and the associated DHCP6OptDNSServers
+ option is inactive. See RFC 3646 for details.
+ domain : a list of DNS search domain (as a string or list). By default,
+ it is empty and the associated DHCP6OptDomains option is inactive.
+ See RFC 3646 for details.
+
+ sntpservers : a list of SNTP servers IPv6 addresses. By default,
+ it is empty and the associated DHCP6OptSNTPServers option
+ is inactive.
+
+ sipdomains : a list of SIP domains. By default, it is empty and the
+ associated DHCP6OptSIPDomains option is inactive. See RFC 3319
+ for details.
+ sipservers : a list of SIP servers IPv6 addresses. By default, it is
+ empty and the associated DHCP6OptSIPDomains option is inactive.
+ See RFC 3319 for details.
+
+ nisdomain : a list of NIS domains. By default, it is empty and the
+ associated DHCP6OptNISDomains option is inactive. See RFC 3898
+ for details. See RFC 3646 for details.
+ nisservers : a list of NIS servers IPv6 addresses. By default, it is
+ empty and the associated DHCP6OptNISServers option is inactive.
+ See RFC 3646 for details.
+
+ nispdomain : a list of NIS+ domains. By default, it is empty and the
+ associated DHCP6OptNISPDomains option is inactive. See RFC 3898
+ for details.
+ nispservers : a list of NIS+ servers IPv6 addresses. By default, it is
+ empty and the associated DHCP6OptNISServers option is inactive.
+ See RFC 3898 for details.
+
+ bcmcsdomain : a list of BCMCS domains. By default, it is empty and the
+ associated DHCP6OptBCMCSDomains option is inactive. See RFC 4280
+ for details.
+ bcmcsservers : a list of BCMCS servers IPv6 addresses. By default, it is
+ empty and the associated DHCP6OptBCMCSServers option is inactive.
+ See RFC 4280 for details.
+
+ If you have a need for others, just ask ... or provide a patch."""
+ print(msg)
+
+ def parse_options(self, dns="2001:500::1035", domain="localdomain, local",
+ startip="2001:db8::1", endip="2001:db8::20", duid=None,
+ sntpservers=None, sipdomains=None, sipservers=None,
+ nisdomain=None, nisservers=None, nispdomain=None,
+ nispservers=None, bcmcsservers=None, bcmcsdomains=None,
+ iface=None, debug=0, advpref=255):
+ def norm_list(val, param_name):
+ if val is None:
+ return None
+ if type(val) is list:
+ return val
+ elif type(val) is str:
+ l = val.split(',')
+ return map(lambda x: x.strip(), l)
+ else:
+ print("Bad '%s' parameter provided." % param_name)
+ self.usage()
+ return -1
+
+ if iface is None:
+ iface = conf.iface6
+
+ self.debug = debug
+
+ # Dictionary of provided DHCPv6 options, keyed by option type
+ self.dhcpv6_options={}
+
+ for o in [(dns, "dns", 23, lambda x: DHCP6OptDNSServers(dnsservers=x)),
+ (domain, "domain", 24, lambda x: DHCP6OptDNSDomains(dnsdomains=x)),
+ (sntpservers, "sntpservers", 31, lambda x: DHCP6OptSNTPServers(sntpservers=x)),
+ (sipservers, "sipservers", 22, lambda x: DHCP6OptSIPServers(sipservers=x)),
+ (sipdomains, "sipdomains", 21, lambda x: DHCP6OptSIPDomains(sipdomains=x)),
+ (nisservers, "nisservers", 27, lambda x: DHCP6OptNISServers(nisservers=x)),
+ (nisdomain, "nisdomain", 29, lambda x: DHCP6OptNISDomain(nisdomain=(x+[""])[0])),
+ (nispservers, "nispservers", 28, lambda x: DHCP6OptNISPServers(nispservers=x)),
+ (nispdomain, "nispdomain", 30, lambda x: DHCP6OptNISPDomain(nispdomain=(x+[""])[0])),
+ (bcmcsservers, "bcmcsservers", 33, lambda x: DHCP6OptBCMCSServers(bcmcsservers=x)),
+ (bcmcsdomains, "bcmcsdomains", 34, lambda x: DHCP6OptBCMCSDomains(bcmcsdomains=x))]:
+
+ opt = norm_list(o[0], o[1])
+ if opt == -1: # Usage() was triggered
+ return False
+ elif opt is None: # We won't return that option
+ pass
+ else:
+ self.dhcpv6_options[o[2]] = o[3](opt)
+
+ if self.debug:
+ print("\n[+] List of active DHCPv6 options:")
+ opts = self.dhcpv6_options.keys()
+ opts.sort()
+ for i in opts:
+ print(" %d: %s" % (i, repr(self.dhcpv6_options[i])))
+
+ # Preference value used in Advertise.
+ self.advpref = advpref
+
+ # IP Pool
+ self.startip = startip
+ self.endip = endip
+ # XXX TODO Check IPs are in same subnet
+
+ ####
+ # The interface we are listening/replying on
+ self.iface = iface
+
+ ####
+ # Generate a server DUID
+ if duid is not None:
+ self.duid = duid
+ else:
+ # Timeval
+ from time import gmtime, strftime, mktime
+ epoch = (2000, 1, 1, 0, 0, 0, 5, 1, 0)
+ delta = mktime(epoch) - mktime(gmtime(0))
+ timeval = time.time() - delta
+
+ # Mac Address
+ rawmac = get_if_raw_hwaddr(iface)
+ mac = ":".join(map(lambda x: "%.02x" % x, rawmac))
+
+ self.duid = DUID_LLT(timeval = timeval, lladdr = mac)
+
+ if self.debug:
+ print("\n[+] Our server DUID:" )
+ self.duid.show(label_lvl=" "*4)
+
+ ####
+ # Find the source address we will use
+ #l = filter(lambda x: x[2] == iface and in6_islladdr(x[0]), in6_getifaddr())
+ l = [ x for x in in6_getifaddr() if x[2] == iface and in6_islladdr(x[0]) ]
+ if not l:
+ warning("Unable to get a Link-Local address")
+ return
+
+ self.src_addr = l[0][0]
+
+ ####
+ # Our leases
+ self.leases = {}
+
+
+ if self.debug:
+ print("\n[+] Starting DHCPv6 service on %s:" % self.iface )
+
+ def is_request(self, p):
+ if not IPv6 in p:
+ return False
+
+ src = p[IPv6].src
+ dst = p[IPv6].dst
+
+ p = p[IPv6].payload
+ if not isinstance(p, UDP) or p.sport != 546 or p.dport != 547 :
+ return False
+
+ p = p.payload
+ if not isinstance(p, DHCP6):
+ return False
+
+ # Message we considered client messages :
+ # Solicit (1), Request (3), Confirm (4), Renew (5), Rebind (6)
+ # Decline (9), Release (8), Information-request (11),
+ if not (p.msgtype in [1, 3, 4, 5, 6, 8, 9, 11]):
+ return False
+
+ # Message validation following section 15 of RFC 3315
+
+ if ((p.msgtype == 1) or # Solicit
+ (p.msgtype == 6) or # Rebind
+ (p.msgtype == 4)): # Confirm
+ if ((not DHCP6OptClientId in p) or
+ DHCP6OptServerId in p):
+ return False
+
+ if (p.msgtype == 6 or # Rebind
+ p.msgtype == 4): # Confirm
+ # XXX We do not reply to Confirm or Rebind as we
+ # XXX do not support address assignment
+ return False
+
+ elif (p.msgtype == 3 or # Request
+ p.msgtype == 5 or # Renew
+ p.msgtype == 8): # Release
+
+ # Both options must be present
+ if ((not DHCP6OptServerId in p) or
+ (not DHCP6OptClientId in p)):
+ return False
+ # provided server DUID must match ours
+ duid = p[DHCP6OptServerId].duid
+ if (type(duid) != type(self.duid)):
+ return False
+ if bytes(duid) != bytes(self.duid):
+ return False
+
+ if (p.msgtype == 5 or # Renew
+ p.msgtype == 8): # Release
+ # XXX We do not reply to Renew or Release as we
+ # XXX do not support address assignment
+ return False
+
+ elif p.msgtype == 9: # Decline
+ # XXX We should check if we are tracking that client
+ if not self.debug:
+ return False
+
+ bo = Color.bold
+ g = Color.green + bo
+ b = Color.blue + bo
+ n = Color.normal
+ r = Color.red
+
+ vendor = in6_addrtovendor(src)
+ if (vendor and vendor != "UNKNOWN"):
+ vendor = " [" + b + vendor + n + "]"
+ else:
+ vendor = ""
+ src = bo + src + n
+
+ it = p
+ addrs = []
+ while it:
+ l = []
+ if isinstance(it, DHCP6OptIA_NA):
+ l = it.ianaopts
+ elif isinstance(it, DHCP6OptIA_TA):
+ l = it.iataopts
+
+ #opsaddr = filter(lambda x: isinstance(x, DHCP6OptIAAddress),l)
+ opsaddr = [ x for x in l if isinstance(x, DHCP6OptIAAddress) ]
+ a=map(lambda x: x.addr, opsaddr)
+ addrs += a
+ it = it.payload
+
+ addrs = map(lambda x: bo + x + n, addrs)
+ if debug:
+ msg = r + "[DEBUG]" + n + " Received " + g + "Decline" + n
+ msg += " from " + bo + src + vendor + " for "
+ msg += ", ".join(addrs)+ n
+ print(msg)
+
+ # See sect 18.1.7
+
+ # Sent by a client to warn us she has determined
+ # one or more addresses assigned to her is already
+ # used on the link.
+ # We should simply log that fact. No messaged should
+ # be sent in return.
+
+ # - Message must include a Server identifier option
+ # - the content of the Server identifier option must
+ # match the server's identifier
+ # - the message must include a Client Identifier option
+ return False
+
+ elif p.msgtype == 11: # Information-Request
+ if DHCP6OptServerId in p:
+ duid = p[DHCP6OptServerId].duid
+ if (type(duid) != type(self.duid)):
+ return False
+ if bytes(duid) != bytes(self.duid):
+ return False
+ if ((DHCP6OptIA_NA in p) or
+ (DHCP6OptIA_TA in p) or
+ (DHCP6OptIA_PD in p)):
+ return False
+ else:
+ return False
+
+ return True
+
+ def print_reply(self, req, reply):
+ def norm(s):
+ if s.startswith("DHCPv6 "):
+ s = s[7:]
+ if s.endswith(" Message"):
+ s = s[:-8]
+ return s
+
+ if reply is None:
+ return
+
+ bo = Color.bold
+ g = Color.green + bo
+ b = Color.blue + bo
+ n = Color.normal
+ reqtype = g + norm(req.getlayer(UDP).payload.name) + n
+ reqsrc = req.getlayer(IPv6).src
+ vendor = in6_addrtovendor(reqsrc)
+ if (vendor and vendor != "UNKNOWN"):
+ vendor = " [" + b + vendor + n + "]"
+ else:
+ vendor = ""
+ reqsrc = bo + reqsrc + n
+ reptype = g + norm(reply.getlayer(UDP).payload.name) + n
+
+ print("Sent %s answering to %s from %s%s" % (reptype, reqtype, reqsrc, vendor))
+
+ def make_reply(self, req):
+ req_mac_src = req.src
+ req_mac_dst = req.dst
+
+ p = req[IPv6]
+ req_src = p.src
+ req_dst = p.dst
+
+ p = p.payload.payload
+
+ msgtype = p.msgtype
+ trid = p.trid
+
+ if msgtype == 1: # SOLICIT (See Sect 17.1 and 17.2 of RFC 3315)
+
+ # XXX We don't support address or prefix assignment
+ # XXX We also do not support relay function --arno
+
+ client_duid = p[DHCP6OptClientId].duid
+ resp = IPv6(src=self.src_addr, dst=req_src)
+ resp /= UDP(sport=547, dport=546)
+
+ if p.haslayer(DHCP6OptRapidCommit):
+ # construct a Reply packet
+ resp /= DHCP6_Reply(trid=trid)
+ resp /= DHCP6OptRapidCommit() # See 17.1.2
+ resp /= DHCP6OptServerId(duid = self.duid)
+ resp /= DHCP6OptClientId(duid = client_duid)
+
+ else: # No Rapid Commit in the packet. Reply with an Advertise
+
+ if (p.haslayer(DHCP6OptIA_NA) or
+ p.haslayer(DHCP6OptIA_TA)):
+ # XXX We don't assign addresses at the moment
+ msg = "Scapy6 dhcp6d does not support address assignment"
+ resp /= DHCP6_Advertise(trid = trid)
+ resp /= DHCP6OptStatusCode(statuscode=2, statusmsg=msg)
+ resp /= DHCP6OptServerId(duid = self.duid)
+ resp /= DHCP6OptClientId(duid = client_duid)
+
+ elif p.haslayer(DHCP6OptIA_PD):
+ # XXX We don't assign prefixes at the moment
+ msg = "Scapy6 dhcp6d does not support prefix assignment"
+ resp /= DHCP6_Advertise(trid = trid)
+ resp /= DHCP6OptStatusCode(statuscode=6, statusmsg=msg)
+ resp /= DHCP6OptServerId(duid = self.duid)
+ resp /= DHCP6OptClientId(duid = client_duid)
+
+ else: # Usual case, no request for prefixes or addresse
+ resp /= DHCP6_Advertise(trid = trid)
+ resp /= DHCP6OptPref(prefval = self.advpref)
+ resp /= DHCP6OptServerId(duid = self.duid)
+ resp /= DHCP6OptClientId(duid = client_duid)
+ resp /= DHCP6OptReconfAccept()
+
+ # See which options should be included
+ reqopts = []
+ if p.haslayer(DHCP6OptOptReq): # add only asked ones
+ reqopts = p[DHCP6OptOptReq].reqopts
+ for o in self.dhcpv6_options.keys():
+ if o in reqopts:
+ resp /= self.dhcpv6_options[o]
+ else: # advertise everything we have available
+ for o in self.dhcpv6_options.keys():
+ resp /= self.dhcpv6_options[o]
+
+ return resp
+
+ elif msgtype == 3: #REQUEST (INFO-REQUEST is further below)
+ client_duid = p[DHCP6OptClientId].duid
+ resp = IPv6(src=self.src_addr, dst=req_src)
+ resp /= UDP(sport=547, dport=546)
+ resp /= DHCP6_Solicit(trid=trid)
+ resp /= DHCP6OptServerId(duid = self.duid)
+ resp /= DHCP6OptClientId(duid = client_duid)
+
+ # See which options should be included
+ reqopts = []
+ if p.haslayer(DHCP6OptOptReq): # add only asked ones
+ reqopts = p[DHCP6OptOptReq].reqopts
+ for o in self.dhcpv6_options.keys():
+ if o in reqopts:
+ resp /= self.dhcpv6_options[o]
+ else:
+ # advertise everything we have available.
+ # Should not happen has clients MUST include
+ # and ORO in requests (sec 18.1.1) -- arno
+ for o in self.dhcpv6_options.keys():
+ resp /= self.dhcpv6_options[o]
+
+ return resp
+
+ elif msgtype == 4: # CONFIRM
+ # see Sect 18.1.2
+
+ # Client want to check if addresses it was assigned
+ # are still appropriate
+
+ # Server must discard any Confirm messages that
+ # do not include a Client Identifier option OR
+ # THAT DO INCLUDE a Server Identifier Option
+
+ # XXX we must discard the SOLICIT if it is received with
+ # a unicast destination address
+
+ pass
+
+ elif msgtype == 5: # RENEW
+ # see Sect 18.1.3
+
+ # Clients want to extend lifetime of assigned addresses
+ # and update configuration parameters. This message is sent
+ # specifically to the server that provided her the info
+
+ # - Received message must include a Server Identifier
+ # option.
+ # - the content of server identifier option must match
+ # the server's identifier.
+ # - the message must include a Client identifier option
+
+ pass
+
+ elif msgtype == 6: # REBIND
+ # see Sect 18.1.4
+
+ # Same purpose as the Renew message but sent to any
+ # available server after he received no response
+ # to its previous Renew message.
+
+
+ # - Message must include a Client Identifier Option
+ # - Message can't include a Server identifier option
+
+ # XXX we must discard the SOLICIT if it is received with
+ # a unicast destination address
+
+ pass
+
+ elif msgtype == 8: # RELEASE
+ # See section 18.1.6
+
+ # Message is sent to the server to indicate that
+ # she will no longer use the addresses that was assigned
+ # We should parse the message and verify our dictionary
+ # to log that fact.
+
+
+ # - The message must include a server identifier option
+ # - The content of the Server Identifier option must
+ # match the server's identifier
+ # - the message must include a Client Identifier option
+
+ pass
+
+ elif msgtype == 9: # DECLINE
+ # See section 18.1.7
+ pass
+
+ elif msgtype == 11: # INFO-REQUEST
+ client_duid = None
+ if not p.haslayer(DHCP6OptClientId):
+ if self.debug:
+ warning("Received Info Request message without Client Id option")
+ else:
+ client_duid = p[DHCP6OptClientId].duid
+
+ resp = IPv6(src=self.src_addr, dst=req_src)
+ resp /= UDP(sport=547, dport=546)
+ resp /= DHCP6_Reply(trid=trid)
+ resp /= DHCP6OptServerId(duid = self.duid)
+
+ if client_duid:
+ resp /= DHCP6OptClientId(duid = client_duid)
+
+ # Stack requested options if available
+ reqopts = []
+ if p.haslayer(DHCP6OptOptReq):
+ reqopts = p[DHCP6OptOptReq].reqopts
+ for o in self.dhcpv6_options.keys():
+ resp /= self.dhcpv6_options[o]
+
+ return resp
+
+ else:
+ # what else ?
+ pass
+
+ # - We won't support reemission
+ # - We won't support relay role, nor relay forwarded messages
+ # at the beginning
diff --git a/scripts/external_libs/scapy-2.3.1/python3/scapy/layers/dns.py b/scripts/external_libs/scapy-2.3.1/python3/scapy/layers/dns.py
new file mode 100644
index 00000000..8a01e273
--- /dev/null
+++ b/scripts/external_libs/scapy-2.3.1/python3/scapy/layers/dns.py
@@ -0,0 +1,712 @@
+## This file is part of Scapy
+## See http://www.secdev.org/projects/scapy for more informations
+## Copyright (C) Philippe Biondi <phil@secdev.org>
+## This program is published under a GPLv2 license
+
+"""
+DNS: Domain Name System.
+"""
+
+import socket,struct
+
+from scapy.packet import *
+from scapy.fields import *
+from scapy.ansmachine import *
+from scapy.layers.inet import IP, UDP
+from scapy.utils import str2bytes
+
+class DNSStrField(StrField):
+
+ def h2i(self, pkt, x):
+ if type(x) == str:
+ x = x.encode('ascii')
+ if x == b"":
+ return b"."
+ return x
+
+ def i2m(self, pkt, x):
+ if type(x) == str:
+ x = x.encode('ascii')
+ if x == b".":
+ return b"\x00"
+
+ x = [k[:63] for k in x.split(b".")] # Truncate chunks that cannot be encoded (more than 63 bytes..)
+ x = map(lambda y: bytes([len(y)]) + y, x)
+ x = b"".join(x)
+ if x[-1] != 0:
+ x += b"\x00"
+ return x
+
+ def getfield(self, pkt, s):
+ n = b""
+
+ #if ord(s[0]) == 0:
+ if (s[0]) == 0:
+ return s[1:], b"."
+
+ while 1:
+ #l = ord(s[0])
+ l = (s[0])
+ s = s[1:]
+ if not l:
+ break
+ if l & 0xc0:
+ raise Scapy_Exception("DNS message can't be compressed at this point!")
+ else:
+ n += s[:l]+b"."
+ s = s[l:]
+ return s, n
+
+
+class DNSRRCountField(ShortField):
+ holds_packets=1
+ def __init__(self, name, default, rr):
+ ShortField.__init__(self, name, default)
+ self.rr = rr
+ def _countRR(self, pkt):
+ x = getattr(pkt,self.rr)
+ i = 0
+ while isinstance(x, DNSRR) or isinstance(x, DNSQR) or isdnssecRR(x):
+ x = x.payload
+ i += 1
+ return i
+
+ def i2m(self, pkt, x):
+ if x is None:
+ x = self._countRR(pkt)
+ return x
+ def i2h(self, pkt, x):
+ if x is None:
+ x = self._countRR(pkt)
+ return x
+
+
+def DNSgetstr(s,p):
+ name = b""
+ q = 0
+ jpath = [p]
+ while 1:
+ if p >= len(s):
+ warning("DNS RR prematured end (ofs=%i, len=%i)"%(p,len(s)))
+ break
+ #l = ord(s[p])
+ l = s[p]
+ p += 1
+ if l & 0xc0:
+ if not q:
+ q = p+1
+ if p >= len(s):
+ warning("DNS incomplete jump token at (ofs=%i)" % p)
+ break
+ p = ((l & 0x3f) << 8) + s[p] - 12
+ if p in jpath:
+ warning("DNS decompression loop detected")
+ break
+ jpath.append(p)
+ continue
+ elif l > 0:
+ name += s[p:p+l]+b"."
+ p += l
+ continue
+ break
+ if q:
+ p = q
+ return name,p
+
+
+class DNSRRField(StrField):
+ holds_packets=1
+ def __init__(self, name, countfld, passon=1):
+ StrField.__init__(self, name, None)
+ self.countfld = countfld
+ self.passon = passon
+ def i2m(self, pkt, x):
+ if x is None:
+ return b""
+ return bytes(x)
+ def decodeRR(self, name, s, p):
+ ret = s[p:p+10]
+ type,cls,ttl,rdlen = struct.unpack("!HHIH", ret)
+ p += 10
+ rr = DNSRR(b"\x00"+ret+s[p:p+rdlen])
+ if type in [2, 3, 4, 5]:
+ rr.rdata = DNSgetstr(s,p)[0]
+ del(rr.rdlen)
+ elif type in dnsRRdispatcher.keys():
+ rr = dnsRRdispatcher[type](b"\x00"+ret+s[p:p+rdlen])
+ else:
+ del(rr.rdlen)
+
+ p += rdlen
+
+ rr.rrname = name
+ return rr,p
+ def getfield(self, pkt, s):
+ if type(s) is tuple :
+ s,p = s
+ else:
+ p = 0
+ ret = None
+ c = getattr(pkt, self.countfld)
+ if c > len(s):
+ warning("wrong value: DNS.%s=%i" % (self.countfld,c))
+ return s,b""
+ while c:
+ c -= 1
+ name,p = DNSgetstr(s,p)
+ rr,p = self.decodeRR(name, s, p)
+ if ret is None:
+ ret = rr
+ else:
+ ret.add_payload(rr)
+ if self.passon:
+ return (s,p),ret
+ else:
+ return s[p:],ret
+
+
+class DNSQRField(DNSRRField):
+ holds_packets=1
+ def decodeRR(self, name, s, p):
+ ret = s[p:p+4]
+ p += 4
+ rr = DNSQR(b"\x00"+ret)
+ rr.qname = name
+ return rr,p
+
+
+
+class RDataField(StrLenField):
+ def m2i(self, pkt, s):
+ family = None
+ if pkt.type == 1: # A
+ family = socket.AF_INET
+ elif pkt.type == 12: # PTR
+ s = DNSgetstr(s, 0)[0]
+ elif pkt.type == 16: # TXT
+ ret_s = b""
+ tmp_s = s
+ # RDATA contains a list of strings, each are prepended with
+ # a byte containing the size of the following string.
+ while tmp_s:
+ tmp_len = struct.unpack("!B", bytes([tmp_s[0]]))[0] + 1
+ if tmp_len > len(tmp_s):
+ warning("DNS RR TXT prematured end of character-string (size=%i, remaining bytes=%i)" % (tmp_len, len(tmp_s)))
+ ret_s += tmp_s[1:tmp_len]
+ tmp_s = tmp_s[tmp_len:]
+ s = ret_s
+ elif pkt.type == 28: # AAAA
+ family = socket.AF_INET6
+ if family is not None:
+ s = inet_ntop(family, s)
+ return s
+ def i2m(self, pkt, s):
+ if pkt.type == 1: # A
+ if s:
+ if type(s) is bytes:
+ s = s.decode('ascii')
+ s = inet_aton(s)
+ elif pkt.type in [2,3,4,5]: # NS, MD, MF, CNAME
+ s = b"".join(map(lambda x: bytes([len(x)]) + x, s.split(b".")))
+ #if ord(s[-1]):
+ if s[-1]:
+ s += b"\x00"
+ elif pkt.type == 16: # TXT
+ if s:
+ ret_s = b""
+ # The initial string must be splitted into a list of strings
+ # prepended with theirs sizes.
+ while len(s) >= 255:
+ ret_s += b"\xff" + s[:255]
+ s = s[255:]
+ # The remaining string is less than 255 bytes long
+ if len(s):
+ ret_s += struct.pack("!B", len(s)) + s
+ s = ret_s
+ elif pkt.type == 28: # AAAA
+ if s:
+ s = inet_pton(socket.AF_INET6, s)
+ return s
+
+class RDLenField(Field):
+ def __init__(self, name):
+ Field.__init__(self, name, None, "H")
+ def i2m(self, pkt, x):
+ if x is None:
+ rdataf = pkt.get_field("rdata")
+ x = len(rdataf.i2m(pkt, pkt.rdata))
+ return x
+ def i2h(self, pkt, x):
+ if x is None:
+ rdataf = pkt.get_field("rdata")
+ x = len(rdataf.i2m(pkt, pkt.rdata))
+ return x
+
+
+class DNS(Packet):
+ name = "DNS"
+ fields_desc = [ ShortField("id", 0),
+ BitField("qr", 0, 1),
+ BitEnumField("opcode", 0, 4, {0:"QUERY",1:"IQUERY",2:"STATUS"}),
+ BitField("aa", 0, 1),
+ BitField("tc", 0, 1),
+ BitField("rd", 0, 1),
+ BitField("ra", 0, 1),
+ BitField("z", 0, 1),
+ # AD and CD bits are defined in RFC 2535
+ BitField("ad", 0, 1), # Authentic Data
+ BitField("cd", 0, 1), # Checking Disabled
+ BitEnumField("rcode", 0, 4, {0:"ok", 1:"format-error", 2:"server-failure", 3:"name-error", 4:"not-implemented", 5:"refused"}),
+ DNSRRCountField("qdcount", None, "qd"),
+ DNSRRCountField("ancount", None, "an"),
+ DNSRRCountField("nscount", None, "ns"),
+ DNSRRCountField("arcount", None, "ar"),
+ DNSQRField("qd", "qdcount"),
+ DNSRRField("an", "ancount"),
+ DNSRRField("ns", "nscount"),
+ DNSRRField("ar", "arcount",0) ]
+ def answers(self, other):
+ return (isinstance(other, DNS)
+ and self.id == other.id
+ and self.qr == 1
+ and other.qr == 0)
+
+ def mysummary(self):
+ type = ["Qry","Ans"][self.qr]
+ name = ""
+ if self.qr:
+ type = "Ans"
+ if self.ancount > 0 and isinstance(self.an, DNSRR):
+ name = ' "%s"' % self.an.getstrval("rdata")
+ else:
+ type = "Qry"
+ if self.qdcount > 0 and isinstance(self.qd, DNSQR):
+ name = ' "%s"' % self.qd.getstrval("qname")
+ return 'DNS %s%s ' % (type, name)
+
+dnstypes = { 0:"ANY", 255:"ALL",
+ 1:"A", 2:"NS", 3:"MD", 4:"MF", 5:"CNAME", 6:"SOA", 7: "MB", 8:"MG",
+ 9:"MR",10:"NULL",11:"WKS",12:"PTR",13:"HINFO",14:"MINFO",15:"MX",16:"TXT",
+ 17:"RP",18:"AFSDB",28:"AAAA", 33:"SRV",38:"A6",39:"DNAME",
+ 41:"OPT", 43:"DS", 46:"RRSIG", 47:"NSEC", 48:"DNSKEY",
+ 50: "NSEC3", 51: "NSEC3PARAM", 32769:"DLV" }
+
+dnsqtypes = {251:"IXFR",252:"AXFR",253:"MAILB",254:"MAILA",255:"ALL"}
+dnsqtypes.update(dnstypes)
+dnsclasses = {1: 'IN', 2: 'CS', 3: 'CH', 4: 'HS', 255: 'ANY'}
+
+
+class DNSQR(Packet):
+ name = "DNS Question Record"
+ show_indent=0
+ fields_desc = [ DNSStrField("qname",b""),
+ ShortEnumField("qtype", 1, dnsqtypes),
+ ShortEnumField("qclass", 1, dnsclasses) ]
+
+
+
+# RFC 2671 - Extension Mechanisms for DNS (EDNS0)
+
+class EDNS0TLV(Packet):
+ name = "DNS EDNS0 TLV"
+ fields_desc = [ ShortEnumField("optcode", 0, { 0: "Reserved", 1: "LLQ", 2: "UL", 3: "NSID", 4: "Reserved", 5: "PING" }),
+ FieldLenField("optlen", None, "optdata", fmt="H"),
+ StrLenField("optdata", b"", length_from=lambda pkt: pkt.optlen) ]
+
+ def extract_padding(self, p):
+ return b"", p
+
+class DNSRROPT(Packet):
+ name = "DNS OPT Resource Record"
+ fields_desc = [ DNSStrField("rrname",b""),
+ ShortEnumField("type", 41, dnstypes),
+ ShortField("rclass", 4096),
+ ByteField("extrcode", 0),
+ ByteField("version", 0),
+ # version 0 means EDNS0
+ BitEnumField("z", 32768, 16, { 32768: "D0" }),
+ # D0 means DNSSEC OK from RFC 3225
+ FieldLenField("rdlen", None, length_of="rdata", fmt="H"),
+ PacketListField("rdata", [], EDNS0TLV, length_from=lambda pkt: pkt.rdlen) ]
+
+# RFC 4034 - Resource Records for the DNS Security Extensions
+
+# 09/2013 from http://www.iana.org/assignments/dns-sec-alg-numbers/dns-sec-alg-numbers.xhtml
+dnssecalgotypes = { 0:"Reserved", 1:"RSA/MD5", 2:"Diffie-Hellman", 3:"DSA/SHA-1",
+ 4:"Reserved", 5:"RSA/SHA-1", 6:"DSA-NSEC3-SHA1",
+ 7:"RSASHA1-NSEC3-SHA1", 8:"RSA/SHA-256", 9:"Reserved",
+ 10:"RSA/SHA-512", 11:"Reserved", 12:"GOST R 34.10-2001",
+ 13:"ECDSA Curve P-256 with SHA-256", 14: "ECDSA Curve P-384 with SHA-384",
+ 252:"Reserved for Indirect Keys", 253:"Private algorithms - domain name",
+ 254:"Private algorithms - OID", 255:"Reserved" }
+
+# 09/2013 from http://www.iana.org/assignments/ds-rr-types/ds-rr-types.xhtml
+dnssecdigesttypes = { 0:"Reserved", 1:"SHA-1", 2:"SHA-256", 3:"GOST R 34.11-94", 4:"SHA-384" }
+
+
+class TimeField(IntField):
+
+ def any2i(self, pkt, x):
+ if type(x) == str:
+ import time, calendar
+ t = time.strptime(x, "%Y%m%d%H%M%S")
+ return int(calendar.timegm(t))
+ return x
+
+ def i2repr(self, pkt, x):
+ import time
+ x = self.i2h(pkt, x)
+ t = time.strftime("%Y%m%d%H%M%S", time.gmtime(x))
+ return "%s (%d)" % (t ,x)
+
+
+def bitmap2RRlist(bitmap):
+ """
+ Decode the 'Type Bit Maps' field of the NSEC Resource Record into an
+ integer list.
+ """
+ # RFC 4034, 4.1.2. The Type Bit Maps Field
+
+ RRlist = []
+
+ while bitmap:
+
+ if len(bitmap) < 2:
+ warning("bitmap too short (%i)" % len(bitmap))
+ return
+
+ #window_block = ord(bitmap[0]) # window number
+ window_block = (bitmap[0]) # window number
+ offset = 256*window_block # offset of the Ressource Record
+ #bitmap_len = ord(bitmap[0]) # length of the bitmap in bytes
+ bitmap_len = (bitmap[1]) # length of the bitmap in bytes
+
+ if bitmap_len <= 0 or bitmap_len > 32:
+ warning("bitmap length is no valid (%i)" % bitmap_len)
+ return
+
+ tmp_bitmap = bitmap[2:2+bitmap_len]
+
+ # Let's compare each bit of tmp_bitmap and compute the real RR value
+ for b in range(len(tmp_bitmap)):
+ v = 128
+ for i in range(8):
+ #if ord(tmp_bitmap[b]) & v:
+ if (tmp_bitmap[b]) & v:
+ # each of the RR is encoded as a bit
+ RRlist += [ offset + b*8 + i ]
+ v = v >> 1
+
+# Next block if any
+ bitmap = bitmap[2+bitmap_len:]
+
+ return RRlist
+
+
+def RRlist2bitmap(lst):
+ """
+ Encode a list of integers representing Resource Records to a bitmap field
+ used in the NSEC Resource Record.
+ """
+ # RFC 4034, 4.1.2. The Type Bit Maps Field
+
+ import math
+
+ bitmap = b""
+ lst = list(set(lst))
+ lst.sort()
+
+ #lst = filter(lambda x: x <= 65535, lst)
+ #lst = map(lambda x: abs(x), lst)
+ lst = [ abs(x) for x in lst if x<= 65535 ]
+
+ # number of window blocks
+ max_window_blocks = int(math.ceil(lst[-1] / 256.))
+ min_window_blocks = int(math.floor(lst[0] / 256.))
+ if min_window_blocks == max_window_blocks:
+ max_window_blocks += 1
+
+ for wb in range(min_window_blocks, max_window_blocks+1):
+ # First, filter out RR not encoded in the current window block
+ # i.e. keep everything between 256*wb <= 256*(wb+1)
+ #rrlist = filter(lambda x: 256*wb <= x and x < 256*(wb+1), lst)
+ rrlist = [ x for x in lst if 256*wb <= x and x < 256*(wb+1) ]
+ rrlist.sort()
+ if rrlist == []:
+ continue
+
+ # Compute the number of bytes used to store the bitmap
+ if rrlist[-1] == 0: # only one element in the list
+ bs = 1
+ else:
+ max = rrlist[-1] - 256*wb
+ #bs = int(math.ceil(max / 8)) + 1 # use at least 1 byte
+ bs = int(max // 8) + 1 # use at least 1 byte
+ if bs > 32: # Don't encode more than 256 bits / values
+ bs = 32
+
+ bitmap += struct.pack("B", wb)
+ bitmap += struct.pack("B", bs)
+
+ # Generate the bitmap
+ for tmp in range(bs):
+ v = 0
+ # Remove out of range Ressource Records
+ #tmp_rrlist = filter(lambda x: 256*wb+8*tmp <= x and x < 256*wb+8*tmp+8, rrlist)
+ tmp_rrlist = [ x for x in rrlist if 256*wb+8*tmp <= x and x < 256*wb+8*tmp+8 ]
+ if not tmp_rrlist == []:
+ # 1. rescale to fit into 8 bits
+ tmp_rrlist = map(lambda x: (x-256*wb)-(tmp*8), tmp_rrlist)
+ # 2. x gives the bit position ; compute the corresponding value
+ tmp_rrlist = map(lambda x: 2**(7-x) , tmp_rrlist)
+ # 3. sum everything
+ #v = reduce(lambda x,y: x+y, tmp_rrlist)
+ v = sum(tmp_rrlist)
+ bitmap += struct.pack("B", v)
+
+ return bitmap
+
+
+class RRlistField(StrField):
+ def h2i(self, pkt, x):
+ if type(x) == list:
+ return RRlist2bitmap(x)
+ return x
+
+ def i2repr(self, pkt, x):
+ x = self.i2h(pkt, x)
+ rrlist = bitmap2RRlist(x)
+ return [ dnstypes.get(rr, rr) for rr in rrlist ]
+
+
+class _DNSRRdummy(Packet):
+ name = "Dummy class that implements post_build() for Ressource Records"
+ def post_build(self, pkt, pay):
+ if not self.rdlen == None:
+ return pkt
+
+ lrrname = len(self.fields_desc[0].i2m(b"", self.getfieldval("rrname")))
+ l = len(pkt) - lrrname - 10
+ pkt = pkt[:lrrname+8] + struct.pack("!H", l) + pkt[lrrname+8+2:]
+
+ return pkt
+
+class DNSRRSOA(_DNSRRdummy):
+ name = "DNS SOA Resource Record"
+ fields_desc = [ DNSStrField("rrname",b""),
+ ShortEnumField("type", 6, dnstypes),
+ ShortEnumField("rclass", 1, dnsclasses),
+ IntField("ttl", 0),
+ ShortField("rdlen", None),
+ DNSStrField("mname", b""),
+ DNSStrField("rname", b""),
+ IntField("serial", 0),
+ IntField("refresh", 0),
+ IntField("retry", 0),
+ IntField("expire", 0),
+ IntField("minimum", 0)
+ ]
+
+class DNSRRRSIG(_DNSRRdummy):
+ name = "DNS RRSIG Resource Record"
+ fields_desc = [ DNSStrField("rrname",b""),
+ ShortEnumField("type", 46, dnstypes),
+ ShortEnumField("rclass", 1, dnsclasses),
+ IntField("ttl", 0),
+ ShortField("rdlen", None),
+ ShortEnumField("typecovered", 1, dnstypes),
+ ByteEnumField("algorithm", 5, dnssecalgotypes),
+ ByteField("labels", 0),
+ IntField("originalttl", 0),
+ TimeField("expiration", 0),
+ TimeField("inception", 0),
+ ShortField("keytag", 0),
+ DNSStrField("signersname", b""),
+ StrField("signature", b"")
+ ]
+
+
+class DNSRRNSEC(_DNSRRdummy):
+ name = "DNS NSEC Resource Record"
+ fields_desc = [ DNSStrField("rrname",b""),
+ ShortEnumField("type", 47, dnstypes),
+ ShortEnumField("rclass", 1, dnsclasses),
+ IntField("ttl", 0),
+ ShortField("rdlen", None),
+ DNSStrField("nextname", b""),
+ RRlistField("typebitmaps", b"")
+ ]
+
+
+class DNSRRDNSKEY(_DNSRRdummy):
+ name = "DNS DNSKEY Resource Record"
+ fields_desc = [ DNSStrField("rrname",b""),
+ ShortEnumField("type", 48, dnstypes),
+ ShortEnumField("rclass", 1, dnsclasses),
+ IntField("ttl", 0),
+ ShortField("rdlen", None),
+ FlagsField("flags", 256, 16, "S???????Z???????"),
+ # S: Secure Entry Point
+ # Z: Zone Key
+ ByteField("protocol", 3),
+ ByteEnumField("algorithm", 5, dnssecalgotypes),
+ StrField("publickey", b"")
+ ]
+
+
+class DNSRRDS(_DNSRRdummy):
+ name = "DNS DS Resource Record"
+ fields_desc = [ DNSStrField("rrname",b""),
+ ShortEnumField("type", 43, dnstypes),
+ ShortEnumField("rclass", 1, dnsclasses),
+ IntField("ttl", 0),
+ ShortField("rdlen", None),
+ ShortField("keytag", 0),
+ ByteEnumField("algorithm", 5, dnssecalgotypes),
+ ByteEnumField("digesttype", 5, dnssecdigesttypes),
+ StrField("digest", b"")
+ ]
+
+
+# RFC 5074 - DNSSEC Lookaside Validation (DLV)
+class DNSRRDLV(DNSRRDS):
+ name = "DNS DLV Resource Record"
+ def __init__(self, *args, **kargs):
+ DNSRRDS.__init__(self, *args, **kargs)
+ if not kargs.get('type', 0):
+ self.type = 32769
+
+# RFC 5155 - DNS Security (DNSSEC) Hashed Authenticated Denial of Existence
+class DNSRRNSEC3(_DNSRRdummy):
+ name = "DNS NSEC3 Resource Record"
+ fields_desc = [ DNSStrField("rrname",b""),
+ ShortEnumField("type", 50, dnstypes),
+ ShortEnumField("rclass", 1, dnsclasses),
+ IntField("ttl", 0),
+ ShortField("rdlen", None),
+ ByteField("hashalg", 0),
+ BitEnumField("flags", 0, 8, {1:"Opt-Out"}),
+ ShortField("iterations", 0),
+ FieldLenField("saltlength", 0, fmt="!B", length_of="salt"),
+ StrLenField("salt", b"", length_from=lambda x: x.saltlength),
+ FieldLenField("hashlength", 0, fmt="!B", length_of="nexthashedownername"),
+ StrLenField("nexthashedownername", b"", length_from=lambda x: x.hashlength),
+ RRlistField("typebitmaps", b"")
+ ]
+
+
+class DNSRRNSEC3PARAM(_DNSRRdummy):
+ name = "DNS NSEC3PARAM Resource Record"
+ fields_desc = [ DNSStrField("rrname",b""),
+ ShortEnumField("type", 51, dnstypes),
+ ShortEnumField("rclass", 1, dnsclasses),
+ IntField("ttl", 0),
+ ShortField("rdlen", None),
+ ByteField("hashalg", 0),
+ ByteField("flags", 0),
+ ShortField("iterations", 0),
+ FieldLenField("saltlength", 0, fmt="!B", length_of="salt"),
+ StrLenField("salt", b"", length_from=lambda pkt: pkt.saltlength)
+ ]
+
+
+dnssecclasses = [ DNSRROPT, DNSRRRSIG, DNSRRDLV, DNSRRDNSKEY, DNSRRNSEC, DNSRRDS, DNSRRNSEC3, DNSRRNSEC3PARAM ]
+
+def isdnssecRR(obj):
+ list = [ isinstance (obj, cls) for cls in dnssecclasses ]
+ ret = False
+ for i in list:
+ ret = ret or i
+ return ret
+
+dnsRRdispatcher = { #6: DNSRRSOA,
+ 41: DNSRROPT, # RFC 1671
+ 43: DNSRRDS, # RFC 4034
+ 46: DNSRRRSIG, # RFC 4034
+ 47: DNSRRNSEC, # RFC 4034
+ 48: DNSRRDNSKEY, # RFC 4034
+ 50: DNSRRNSEC3, # RFC 5155
+ 51: DNSRRNSEC3PARAM, # RFC 5155
+ 32769: DNSRRDLV # RFC 4431
+ }
+
+class DNSRR(Packet):
+ name = "DNS Resource Record"
+ show_indent=0
+ fields_desc = [ DNSStrField("rrname",""),
+ ShortEnumField("type", 1, dnstypes),
+ ShortEnumField("rclass", 1, dnsclasses),
+ IntField("ttl", 0),
+ RDLenField("rdlen"),
+ RDataField("rdata", "", length_from=lambda pkt:pkt.rdlen) ]
+
+bind_layers( UDP, DNS, dport=53)
+bind_layers( UDP, DNS, sport=53)
+
+
+@conf.commands.register
+def dyndns_add(nameserver, name, rdata, type="A", ttl=10):
+ """Send a DNS add message to a nameserver for "name" to have a new "rdata"
+dyndns_add(nameserver, name, rdata, type="A", ttl=10) -> result code (0=ok)
+
+example: dyndns_add("ns1.toto.com", "dyn.toto.com", "127.0.0.1")
+RFC2136
+"""
+ zone = name[name.find(".")+1:]
+ r=sr1(IP(dst=nameserver)/UDP()/DNS(opcode=5,
+ qd=[DNSQR(qname=zone, qtype="SOA")],
+ ns=[DNSRR(rrname=name, type="A",
+ ttl=ttl, rdata=rdata)]),
+ verbose=0, timeout=5)
+ if r and r.haslayer(DNS):
+ return r.getlayer(DNS).rcode
+ else:
+ return -1
+
+
+
+
+@conf.commands.register
+def dyndns_del(nameserver, name, type="ALL", ttl=10):
+ """Send a DNS delete message to a nameserver for "name"
+dyndns_del(nameserver, name, type="ANY", ttl=10) -> result code (0=ok)
+
+example: dyndns_del("ns1.toto.com", "dyn.toto.com")
+RFC2136
+"""
+ zone = name[name.find(".")+1:]
+ r=sr1(IP(dst=nameserver)/UDP()/DNS(opcode=5,
+ qd=[DNSQR(qname=zone, qtype="SOA")],
+ ns=[DNSRR(rrname=name, type=type,
+ rclass="ANY", ttl=0, rdata=b"")]),
+ verbose=0, timeout=5)
+ if r and r.haslayer(DNS):
+ return r.getlayer(DNS).rcode
+ else:
+ return -1
+
+
+class DNS_am(AnsweringMachine):
+ function_name="dns_spoof"
+ filter = "udp port 53"
+
+ def parse_options(self, joker="192.168.1.1", match=None):
+ if match is None:
+ self.match = {}
+ else:
+ self.match = match
+ self.joker=joker
+
+ def is_request(self, req):
+ return req.haslayer(DNS) and req.getlayer(DNS).qr == 0
+
+ def make_reply(self, req):
+ ip = req.getlayer(IP)
+ dns = req.getlayer(DNS)
+ resp = IP(dst=ip.src, src=ip.dst)/UDP(dport=ip.sport,sport=ip.dport)
+ rdata = self.match.get(dns.qd.qname, self.joker)
+ resp /= DNS(id=dns.id, qr=1, qd=dns.qd,
+ an=DNSRR(rrname=dns.qd.qname, ttl=10, rdata=rdata))
+ return resp
+
+
diff --git a/scripts/external_libs/scapy-2.3.1/python3/scapy/layers/dot11.py b/scripts/external_libs/scapy-2.3.1/python3/scapy/layers/dot11.py
new file mode 100644
index 00000000..417a470e
--- /dev/null
+++ b/scripts/external_libs/scapy-2.3.1/python3/scapy/layers/dot11.py
@@ -0,0 +1,560 @@
+## This file is part of Scapy
+## See http://www.secdev.org/projects/scapy for more informations
+## Copyright (C) Philippe Biondi <phil@secdev.org>
+## This program is published under a GPLv2 license
+
+"""
+Wireless LAN according to IEEE 802.11.
+"""
+
+import re,struct
+
+from scapy.packet import *
+from scapy.fields import *
+from scapy.plist import PacketList
+from scapy.layers.l2 import *
+
+
+try:
+ from Crypto.Cipher import ARC4
+except ImportError:
+ log_loading.info("Can't import python Crypto lib. Won't be able to decrypt WEP.")
+
+
+### Fields
+
+class Dot11AddrMACField(MACField):
+ def is_applicable(self, pkt):
+ return 1
+ def addfield(self, pkt, s, val):
+ if self.is_applicable(pkt):
+ return MACField.addfield(self, pkt, s, val)
+ else:
+ return s
+ def getfield(self, pkt, s):
+ if self.is_applicable(pkt):
+ return MACField.getfield(self, pkt, s)
+ else:
+ return s,None
+
+class Dot11Addr2MACField(Dot11AddrMACField):
+ def is_applicable(self, pkt):
+ if pkt.type == 1:
+ return pkt.subtype in [ 0xb, 0xa, 0xe, 0xf] # RTS, PS-Poll, CF-End, CF-End+CF-Ack
+ return 1
+
+class Dot11Addr3MACField(Dot11AddrMACField):
+ def is_applicable(self, pkt):
+ if pkt.type in [0,2]:
+ return 1
+ return 0
+
+class Dot11Addr4MACField(Dot11AddrMACField):
+ def is_applicable(self, pkt):
+ if pkt.type == 2:
+ if pkt.FCfield & 0x3 == 0x3: # To-DS and From-DS are set
+ return 1
+ return 0
+
+
+### Layers
+
+
+class PrismHeader(Packet):
+ """ iwpriv wlan0 monitor 3 """
+ name = "Prism header"
+ fields_desc = [ LEIntField("msgcode",68),
+ LEIntField("len",144),
+ StrFixedLenField("dev","",16),
+ LEIntField("hosttime_did",0),
+ LEShortField("hosttime_status",0),
+ LEShortField("hosttime_len",0),
+ LEIntField("hosttime",0),
+ LEIntField("mactime_did",0),
+ LEShortField("mactime_status",0),
+ LEShortField("mactime_len",0),
+ LEIntField("mactime",0),
+ LEIntField("channel_did",0),
+ LEShortField("channel_status",0),
+ LEShortField("channel_len",0),
+ LEIntField("channel",0),
+ LEIntField("rssi_did",0),
+ LEShortField("rssi_status",0),
+ LEShortField("rssi_len",0),
+ LEIntField("rssi",0),
+ LEIntField("sq_did",0),
+ LEShortField("sq_status",0),
+ LEShortField("sq_len",0),
+ LEIntField("sq",0),
+ LEIntField("signal_did",0),
+ LEShortField("signal_status",0),
+ LEShortField("signal_len",0),
+ LESignedIntField("signal",0),
+ LEIntField("noise_did",0),
+ LEShortField("noise_status",0),
+ LEShortField("noise_len",0),
+ LEIntField("noise",0),
+ LEIntField("rate_did",0),
+ LEShortField("rate_status",0),
+ LEShortField("rate_len",0),
+ LEIntField("rate",0),
+ LEIntField("istx_did",0),
+ LEShortField("istx_status",0),
+ LEShortField("istx_len",0),
+ LEIntField("istx",0),
+ LEIntField("frmlen_did",0),
+ LEShortField("frmlen_status",0),
+ LEShortField("frmlen_len",0),
+ LEIntField("frmlen",0),
+ ]
+ def answers(self, other):
+ if isinstance(other, PrismHeader):
+ return self.payload.answers(other.payload)
+ else:
+ return self.payload.answers(other)
+
+class RadioTap(Packet):
+ name = "RadioTap dummy"
+ fields_desc = [ ByteField('version', 0),
+ ByteField('pad', 0),
+ FieldLenField('len', None, 'notdecoded', '<H', adjust=lambda pkt,x:x+8),
+ FlagsField('present', None, -32, ['TSFT','Flags','Rate','Channel','FHSS','dBm_AntSignal',
+ 'dBm_AntNoise','Lock_Quality','TX_Attenuation','dB_TX_Attenuation',
+ 'dBm_TX_Power', 'Antenna', 'dB_AntSignal', 'dB_AntNoise',
+ 'b14', 'b15','b16','b17','b18','b19','b20','b21','b22','b23',
+ 'b24','b25','b26','b27','b28','b29','b30','Ext']),
+ StrLenField('notdecoded', "", length_from= lambda pkt:pkt.len-8) ]
+
+class PPI(Packet):
+ name = "Per-Packet Information header (partial)"
+ fields_desc = [ ByteField("version", 0),
+ ByteField("flags", 0),
+ FieldLenField("len", None, fmt="<H", length_of="fields", adjust=lambda pkt,x:x+8),
+ LEIntField("dlt", 0),
+ StrLenField("notdecoded", "", length_from = lambda pkt:pkt.len-8)
+ ]
+
+
+
+class Dot11SCField(LEShortField):
+ def is_applicable(self, pkt):
+ return pkt.type != 1 # control frame
+ def addfield(self, pkt, s, val):
+ if self.is_applicable(pkt):
+ return LEShortField.addfield(self, pkt, s, val)
+ else:
+ return s
+ def getfield(self, pkt, s):
+ if self.is_applicable(pkt):
+ return LEShortField.getfield(self, pkt, s)
+ else:
+ return s,None
+
+class Dot11(Packet):
+ name = "802.11"
+ fields_desc = [
+ BitField("subtype", 0, 4),
+ BitEnumField("type", 0, 2, ["Management", "Control", "Data", "Reserved"]),
+ BitField("proto", 0, 2),
+ FlagsField("FCfield", 0, 8, ["to-DS", "from-DS", "MF", "retry", "pw-mgt", "MD", "wep", "order"]),
+ ShortField("ID",0),
+ MACField("addr1", ETHER_ANY),
+ Dot11Addr2MACField("addr2", ETHER_ANY),
+ Dot11Addr3MACField("addr3", ETHER_ANY),
+ Dot11SCField("SC", 0),
+ Dot11Addr4MACField("addr4", ETHER_ANY)
+ ]
+ def mysummary(self):
+ return self.sprintf("802.11 %Dot11.type% %Dot11.subtype% %Dot11.addr2% > %Dot11.addr1%")
+ def guess_payload_class(self, payload):
+ if self.type == 0x02 and (self.subtype >= 0x08 and self.subtype <=0xF and self.subtype != 0xD):
+ return Dot11QoS
+ elif self.FCfield & 0x40:
+ return Dot11WEP
+ else:
+ return Packet.guess_payload_class(self, payload)
+ def answers(self, other):
+ if isinstance(other,Dot11):
+ if self.type == 0: # management
+ if self.addr1.lower() != other.addr2.lower(): # check resp DA w/ req SA
+ return 0
+ if (other.subtype,self.subtype) in [(0,1),(2,3),(4,5)]:
+ return 1
+ if self.subtype == other.subtype == 11: # auth
+ return self.payload.answers(other.payload)
+ elif self.type == 1: # control
+ return 0
+ elif self.type == 2: # data
+ return self.payload.answers(other.payload)
+ elif self.type == 3: # reserved
+ return 0
+ return 0
+ def unwep(self, key=None, warn=1):
+ if self.FCfield & 0x40 == 0:
+ if warn:
+ warning("No WEP to remove")
+ return
+ if isinstance(self.payload.payload, NoPayload):
+ if key or conf.wepkey:
+ self.payload.decrypt(key)
+ if isinstance(self.payload.payload, NoPayload):
+ if warn:
+ warning("Dot11 can't be decrypted. Check conf.wepkey.")
+ return
+ self.FCfield &= ~0x40
+ self.payload=self.payload.payload
+
+
+class Dot11QoS(Packet):
+ name = "802.11 QoS"
+ fields_desc = [ BitField("TID",None,4),
+ BitField("EOSP",None,1),
+ BitField("Ack Policy",None,2),
+ BitField("Reserved",None,1),
+ ByteField("TXOP",None) ]
+ def guess_payload_class(self, payload):
+ if isinstance(self.underlayer, Dot11):
+ if self.underlayer.FCfield & 0x40:
+ return Dot11WEP
+ return Packet.guess_payload_class(self, payload)
+
+
+capability_list = [ "res8", "res9", "short-slot", "res11",
+ "res12", "DSSS-OFDM", "res14", "res15",
+ "ESS", "IBSS", "CFP", "CFP-req",
+ "privacy", "short-preamble", "PBCC", "agility"]
+
+reason_code = {0:"reserved",1:"unspec", 2:"auth-expired",
+ 3:"deauth-ST-leaving",
+ 4:"inactivity", 5:"AP-full", 6:"class2-from-nonauth",
+ 7:"class3-from-nonass", 8:"disas-ST-leaving",
+ 9:"ST-not-auth"}
+
+status_code = {0:"success", 1:"failure", 10:"cannot-support-all-cap",
+ 11:"inexist-asso", 12:"asso-denied", 13:"algo-unsupported",
+ 14:"bad-seq-num", 15:"challenge-failure",
+ 16:"timeout", 17:"AP-full",18:"rate-unsupported" }
+
+class Dot11Beacon(Packet):
+ name = "802.11 Beacon"
+ fields_desc = [ LELongField("timestamp", 0),
+ LEShortField("beacon_interval", 0x0064),
+ FlagsField("cap", 0, 16, capability_list) ]
+
+
+class Dot11Elt(Packet):
+ name = "802.11 Information Element"
+ fields_desc = [ ByteEnumField("ID", 0, {0:"SSID", 1:"Rates", 2: "FHset", 3:"DSset", 4:"CFset", 5:"TIM", 6:"IBSSset", 16:"challenge",
+ 42:"ERPinfo", 46:"QoS Capability", 47:"ERPinfo", 48:"RSNinfo", 50:"ESRates",221:"vendor",68:"reserved"}),
+ FieldLenField("len", None, "info", "B"),
+ StrLenField("info", "", length_from=lambda x:x.len) ]
+ def mysummary(self):
+ if self.ID == 0:
+ return "SSID=%s"%repr(self.info),[Dot11]
+ else:
+ return ""
+
+class Dot11ATIM(Packet):
+ name = "802.11 ATIM"
+
+class Dot11Disas(Packet):
+ name = "802.11 Disassociation"
+ fields_desc = [ LEShortEnumField("reason", 1, reason_code) ]
+
+class Dot11AssoReq(Packet):
+ name = "802.11 Association Request"
+ fields_desc = [ FlagsField("cap", 0, 16, capability_list),
+ LEShortField("listen_interval", 0x00c8) ]
+
+
+class Dot11AssoResp(Packet):
+ name = "802.11 Association Response"
+ fields_desc = [ FlagsField("cap", 0, 16, capability_list),
+ LEShortField("status", 0),
+ LEShortField("AID", 0) ]
+
+class Dot11ReassoReq(Packet):
+ name = "802.11 Reassociation Request"
+ fields_desc = [ FlagsField("cap", 0, 16, capability_list),
+ LEShortField("listen_interval", 0x00c8),
+ MACField("current_AP", ETHER_ANY) ]
+
+
+class Dot11ReassoResp(Dot11AssoResp):
+ name = "802.11 Reassociation Response"
+
+class Dot11ProbeReq(Packet):
+ name = "802.11 Probe Request"
+
+class Dot11ProbeResp(Packet):
+ name = "802.11 Probe Response"
+ fields_desc = [ LELongField("timestamp", 0),
+ LEShortField("beacon_interval", 0x0064),
+ FlagsField("cap", 0, 16, capability_list) ]
+
+class Dot11Auth(Packet):
+ name = "802.11 Authentication"
+ fields_desc = [ LEShortEnumField("algo", 0, ["open", "sharedkey"]),
+ LEShortField("seqnum", 0),
+ LEShortEnumField("status", 0, status_code) ]
+ def answers(self, other):
+ if self.seqnum == other.seqnum+1:
+ return 1
+ return 0
+
+class Dot11Deauth(Packet):
+ name = "802.11 Deauthentication"
+ fields_desc = [ LEShortEnumField("reason", 1, reason_code) ]
+
+
+
+class Dot11WEP(Packet):
+ name = "802.11 WEP packet"
+ fields_desc = [ StrFixedLenField("iv", b"\0\0\0", 3),
+ ByteField("keyid", 0),
+ StrField("wepdata",None,remain=4),
+ IntField("icv",None) ]
+
+ def post_dissect(self, s):
+# self.icv, = struct.unpack("!I",self.wepdata[-4:])
+# self.wepdata = self.wepdata[:-4]
+ self.decrypt()
+
+ def build_payload(self):
+ if self.wepdata is None:
+ return Packet.build_payload(self)
+ return b""
+
+ def post_build(self, p, pay):
+ if self.wepdata is None:
+ key = conf.wepkey
+ if key:
+ if self.icv is None:
+ pay += struct.pack("<I",crc32(pay))
+ icv = b""
+ else:
+ icv = p[4:8]
+ c = ARC4.new(self.iv+key)
+ p = p[:4]+c.encrypt(pay)+icv
+ else:
+ warning("No WEP key set (conf.wepkey).. strange results expected..")
+ return p
+
+
+ def decrypt(self,key=None):
+ if key is None:
+ key = conf.wepkey
+ if key:
+ c = ARC4.new(self.iv+key)
+ self.add_payload(LLC(c.decrypt(self.wepdata)))
+
+
+bind_layers( PrismHeader, Dot11, )
+bind_layers( RadioTap, Dot11, )
+bind_layers( PPI, Dot11, dlt=105)
+bind_layers( Dot11, LLC, type=2)
+bind_layers( Dot11QoS, LLC, )
+bind_layers( Dot11, Dot11AssoReq, subtype=0, type=0)
+bind_layers( Dot11, Dot11AssoResp, subtype=1, type=0)
+bind_layers( Dot11, Dot11ReassoReq, subtype=2, type=0)
+bind_layers( Dot11, Dot11ReassoResp, subtype=3, type=0)
+bind_layers( Dot11, Dot11ProbeReq, subtype=4, type=0)
+bind_layers( Dot11, Dot11ProbeResp, subtype=5, type=0)
+bind_layers( Dot11, Dot11Beacon, subtype=8, type=0)
+bind_layers( Dot11, Dot11ATIM, subtype=9, type=0)
+bind_layers( Dot11, Dot11Disas, subtype=10, type=0)
+bind_layers( Dot11, Dot11Auth, subtype=11, type=0)
+bind_layers( Dot11, Dot11Deauth, subtype=12, type=0)
+bind_layers( Dot11Beacon, Dot11Elt, )
+bind_layers( Dot11AssoReq, Dot11Elt, )
+bind_layers( Dot11AssoResp, Dot11Elt, )
+bind_layers( Dot11ReassoReq, Dot11Elt, )
+bind_layers( Dot11ReassoResp, Dot11Elt, )
+bind_layers( Dot11ProbeReq, Dot11Elt, )
+bind_layers( Dot11ProbeResp, Dot11Elt, )
+bind_layers( Dot11Auth, Dot11Elt, )
+bind_layers( Dot11Elt, Dot11Elt, )
+
+
+conf.l2types.register(105, Dot11)
+conf.l2types.register_num2layer(801, Dot11)
+conf.l2types.register(119, PrismHeader)
+conf.l2types.register_num2layer(802, PrismHeader)
+conf.l2types.register(127, RadioTap)
+conf.l2types.register(0xc0, PPI)
+conf.l2types.register_num2layer(803, RadioTap)
+
+
+class WiFi_am(AnsweringMachine):
+ """Before using this, initialize "iffrom" and "ifto" interfaces:
+iwconfig iffrom mode monitor
+iwpriv orig_ifto hostapd 1
+ifconfig ifto up
+note: if ifto=wlan0ap then orig_ifto=wlan0
+note: ifto and iffrom must be set on the same channel
+ex:
+ifconfig eth1 up
+iwconfig eth1 mode monitor
+iwconfig eth1 channel 11
+iwpriv wlan0 hostapd 1
+ifconfig wlan0ap up
+iwconfig wlan0 channel 11
+iwconfig wlan0 essid dontexist
+iwconfig wlan0 mode managed
+"""
+ function_name = "airpwn"
+ filter = None
+
+ def parse_options(self, iffrom, ifto, replace, pattern="", ignorepattern=""):
+ self.iffrom = iffrom
+ self.ifto = ifto
+ ptrn = re.compile(pattern)
+ iptrn = re.compile(ignorepattern)
+
+ def is_request(self, pkt):
+ if not isinstance(pkt,Dot11):
+ return 0
+ if not pkt.FCfield & 1:
+ return 0
+ if not pkt.haslayer(TCP):
+ return 0
+ ip = pkt.getlayer(IP)
+ tcp = pkt.getlayer(TCP)
+ pay = str(tcp.payload)
+ if not self.ptrn.match(pay):
+ return 0
+ if self.iptrn.match(pay):
+ return 0
+
+ def make_reply(self, p):
+ ip = p.getlayer(IP)
+ tcp = p.getlayer(TCP)
+ pay = str(tcp.payload)
+ del(p.payload.payload.payload)
+ p.FCfield="from-DS"
+ p.addr1,p.addr2 = p.addr2,p.addr1
+ p /= IP(src=ip.dst,dst=ip.src)
+ p /= TCP(sport=tcp.dport, dport=tcp.sport,
+ seq=tcp.ack, ack=tcp.seq+len(pay),
+ flags="PA")
+ q = p.copy()
+ p /= self.replace
+ q.ID += 1
+ q.getlayer(TCP).flags="RA"
+ q.getlayer(TCP).seq+=len(replace)
+ return [p,q]
+
+ def print_reply(self):
+ print(p.sprintf("Sent %IP.src%:%IP.sport% > %IP.dst%:%TCP.dport%"))
+
+ def send_reply(self, reply):
+ sendp(reply, iface=self.ifto, **self.optsend)
+
+ def sniff(self):
+ sniff(iface=self.iffrom, **self.optsniff)
+
+
+
+plst=[]
+def get_toDS():
+ global plst
+ while 1:
+ p,=sniff(iface="eth1",count=1)
+ if not isinstance(p,Dot11):
+ continue
+ if p.FCfield & 1:
+ plst.append(p)
+ print(".")
+
+
+# if not ifto.endswith("ap"):
+# print("iwpriv %s hostapd 1" % ifto)
+# os.system("iwpriv %s hostapd 1" % ifto)
+# ifto += "ap"
+#
+# os.system("iwconfig %s mode monitor" % iffrom)
+#
+
+def airpwn(iffrom, ifto, replace, pattern="", ignorepattern=""):
+ """Before using this, initialize "iffrom" and "ifto" interfaces:
+iwconfig iffrom mode monitor
+iwpriv orig_ifto hostapd 1
+ifconfig ifto up
+note: if ifto=wlan0ap then orig_ifto=wlan0
+note: ifto and iffrom must be set on the same channel
+ex:
+ifconfig eth1 up
+iwconfig eth1 mode monitor
+iwconfig eth1 channel 11
+iwpriv wlan0 hostapd 1
+ifconfig wlan0ap up
+iwconfig wlan0 channel 11
+iwconfig wlan0 essid dontexist
+iwconfig wlan0 mode managed
+"""
+
+ ptrn = re.compile(pattern)
+ iptrn = re.compile(ignorepattern)
+ def do_airpwn(p, ifto=ifto, replace=replace, ptrn=ptrn, iptrn=iptrn):
+ if not isinstance(p,Dot11):
+ return
+ if not p.FCfield & 1:
+ return
+ if not p.haslayer(TCP):
+ return
+ ip = p.getlayer(IP)
+ tcp = p.getlayer(TCP)
+ pay = str(tcp.payload)
+# print "got tcp"
+ if not ptrn.match(pay):
+ return
+# print "match 1"
+ if iptrn.match(pay):
+ return
+# print "match 2"
+ del(p.payload.payload.payload)
+ p.FCfield="from-DS"
+ p.addr1,p.addr2 = p.addr2,p.addr1
+ q = p.copy()
+ p /= IP(src=ip.dst,dst=ip.src)
+ p /= TCP(sport=tcp.dport, dport=tcp.sport,
+ seq=tcp.ack, ack=tcp.seq+len(pay),
+ flags="PA")
+ q = p.copy()
+ p /= replace
+ q.ID += 1
+ q.getlayer(TCP).flags="RA"
+ q.getlayer(TCP).seq+=len(replace)
+
+ sendp([p,q], iface=ifto, verbose=0)
+# print "send",repr(p)
+# print "send",repr(q)
+ print(p.sprintf("Sent %IP.src%:%IP.sport% > %IP.dst%:%TCP.dport%"))
+
+ sniff(iface=iffrom,prn=do_airpwn)
+
+
+
+conf.stats_dot11_protocols += [Dot11WEP, Dot11Beacon, ]
+
+
+
+
+
+class Dot11PacketList(PacketList):
+ def __init__(self, res=None, name="Dot11List", stats=None):
+ if stats is None:
+ stats = conf.stats_dot11_protocols
+
+ PacketList.__init__(self, res, name, stats)
+ def toEthernet(self):
+ #data = map(lambda x:x.getlayer(Dot11), filter(lambda x : x.haslayer(Dot11) and x.type == 2, self.res))
+ data = [ x.getlayer(Dot11) for x in self.res if x.haslayer(Dot11) and x.type == 2 ]
+ r2 = []
+ for p in data:
+ q = p.copy()
+ q.unwep()
+ r2.append(Ether()/q.payload.payload.payload) #Dot11/LLC/SNAP/IP
+ return PacketList(r2,name="Ether from %s"%self.listname)
+
+
diff --git a/scripts/external_libs/scapy-2.3.1/python3/scapy/layers/gprs.py b/scripts/external_libs/scapy-2.3.1/python3/scapy/layers/gprs.py
new file mode 100644
index 00000000..31a931fe
--- /dev/null
+++ b/scripts/external_libs/scapy-2.3.1/python3/scapy/layers/gprs.py
@@ -0,0 +1,21 @@
+## This file is part of Scapy
+## See http://www.secdev.org/projects/scapy for more informations
+## Copyright (C) Philippe Biondi <phil@secdev.org>
+## This program is published under a GPLv2 license
+
+"""
+GPRS (General Packet Radio Service) for mobile data communication.
+"""
+
+from scapy.fields import *
+from scapy.packet import *
+from scapy.layers.inet import IP
+
+class GPRS(Packet):
+ name = "GPRSdummy"
+ fields_desc = [
+ StrStopField("dummy","","\x65\x00\x00",1)
+ ]
+
+
+bind_layers( GPRS, IP, )
diff --git a/scripts/external_libs/scapy-2.3.1/python3/scapy/layers/hsrp.py b/scripts/external_libs/scapy-2.3.1/python3/scapy/layers/hsrp.py
new file mode 100644
index 00000000..7193b97e
--- /dev/null
+++ b/scripts/external_libs/scapy-2.3.1/python3/scapy/layers/hsrp.py
@@ -0,0 +1,79 @@
+## This file is part of Scapy
+## See http://www.secdev.org/projects/scapy for more informations
+## Copyright (C) Philippe Biondi <phil@secdev.org>
+## This program is published under a GPLv2 license
+
+#############################################################################
+## ##
+## hsrp.py --- HSRP protocol support for Scapy ##
+## ##
+## Copyright (C) 2010 Mathieu RENARD mathieu.renard(at)gmail.com ##
+## ##
+## This program is free software; you can redistribute it and/or modify it ##
+## under the terms of the GNU General Public License version 2 as ##
+## published by the Free Software Foundation; version 2. ##
+## ##
+## This program is distributed in the hope that it will be useful, but ##
+## WITHOUT ANY WARRANTY; without even the implied warranty of ##
+## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ##
+## General Public License for more details. ##
+## ##
+#############################################################################
+## HSRP Version 1
+## Ref. RFC 2281
+## HSRP Version 2
+## Ref. http://www.smartnetworks.jp/2006/02/hsrp_8_hsrp_version_2.html
+##
+## $Log: hsrp.py,v $
+## Revision 0.2 2011/05/01 15:23:34 mrenard
+## Cleanup code
+
+"""
+HSRP (Hot Standby Router Protocol): proprietary redundancy protocol for Cisco routers.
+"""
+
+from scapy.fields import *
+from scapy.packet import *
+from scapy.layers.inet import UDP
+
+
+class HSRP(Packet):
+ name = "HSRP"
+ fields_desc = [
+ ByteField("version", 0),
+ ByteEnumField("opcode", 0, {0: "Hello", 1: "Coup", 2: "Resign", 3: "Advertise"}),
+ ByteEnumField("state", 16, {0: "Initial", 1: "Learn", 2: "Listen", 4: "Speak", 8: "Standby", 16: "Active"}),
+ ByteField("hellotime", 3),
+ ByteField("holdtime", 10),
+ ByteField("priority", 120),
+ ByteField("group", 1),
+ ByteField("reserved", 0),
+ StrFixedLenField("auth", "cisco" + "\00" * 3, 8),
+ IPField("virtualIP", "192.168.1.1")]
+
+ def guess_payload_class(self, payload):
+ if self.underlayer.len > 28:
+ return HSRPmd5
+ else:
+ return Packet.guess_payload_class(self, payload)
+
+
+class HSRPmd5(Packet):
+ name = "HSRP MD5 Authentication"
+ fields_desc = [
+ ByteEnumField("type", 4, {4: "MD5 authentication"}),
+ ByteField("len", None),
+ ByteEnumField("algo", 0, {1: "MD5"}),
+ ByteField("padding", 0x00),
+ XShortField("flags", 0x00),
+ IPField("sourceip", None),
+ XIntField("keyid", 0x00),
+ StrFixedLenField("authdigest", "\00" * 16, 16)]
+
+ def post_build(self, p, pay):
+ if self.len is None and pay:
+ l = len(pay)
+ p = p[:1] + hex(l)[30:] + p[30:]
+ return p
+
+bind_layers(UDP, HSRP, dport=1985, sport=1985)
diff --git a/scripts/external_libs/scapy-2.3.1/python3/scapy/layers/inet.py b/scripts/external_libs/scapy-2.3.1/python3/scapy/layers/inet.py
new file mode 100644
index 00000000..04b99e89
--- /dev/null
+++ b/scripts/external_libs/scapy-2.3.1/python3/scapy/layers/inet.py
@@ -0,0 +1,1569 @@
+## This file is part of Scapy
+## See http://www.secdev.org/projects/scapy for more informations
+## Copyright (C) Philippe Biondi <phil@secdev.org>
+## This program is published under a GPLv2 license
+
+"""
+IPv4 (Internet Protocol v4).
+"""
+
+import os,time,struct,re,socket,types
+from select import select
+from collections import defaultdict
+from scapy.utils import checksum
+from scapy.layers.l2 import *
+from scapy.config import conf
+from scapy.fields import *
+from scapy.packet import *
+from scapy.volatile import *
+from scapy.sendrecv import sr,sr1,srp1
+from scapy.plist import PacketList,SndRcvList
+from scapy.automaton import Automaton,ATMT
+
+import scapy.as_resolvers
+
+
+####################
+## IP Tools class ##
+####################
+
+class IPTools:
+ """Add more powers to a class that have a "src" attribute."""
+ def whois(self):
+ os.system("whois %s" % self.src)
+ def ottl(self):
+ t = [32,64,128,255]+[self.ttl]
+ t.sort()
+ return t[t.index(self.ttl)+1]
+ def hops(self):
+ return self.ottl()-self.ttl-1
+
+
+_ip_options_names = { 0: "end_of_list",
+ 1: "nop",
+ 2: "security",
+ 3: "loose_source_route",
+ 4: "timestamp",
+ 5: "extended_security",
+ 6: "commercial_security",
+ 7: "record_route",
+ 8: "stream_id",
+ 9: "strict_source_route",
+ 10: "experimental_measurement",
+ 11: "mtu_probe",
+ 12: "mtu_reply",
+ 13: "flow_control",
+ 14: "access_control",
+ 15: "encode",
+ 16: "imi_traffic_descriptor",
+ 17: "extended_IP",
+ 18: "traceroute",
+ 19: "address_extension",
+ 20: "router_alert",
+ 21: "selective_directed_broadcast_mode",
+ 23: "dynamic_packet_state",
+ 24: "upstream_multicast_packet",
+ 25: "quick_start",
+ 30: "rfc4727_experiment",
+ }
+
+
+class _IPOption_HDR(Packet):
+ fields_desc = [ BitField("copy_flag",0, 1),
+ BitEnumField("optclass",0,2,{0:"control",2:"debug"}),
+ BitEnumField("option",0,5, _ip_options_names) ]
+
+class IPOption(Packet):
+ name = "IP Option"
+ fields_desc = [ _IPOption_HDR,
+ FieldLenField("length", None, fmt="B", # Only option 0 and 1 have no length and value
+ length_of="value", adjust=lambda pkt,l:l+2),
+ StrLenField("value", "",length_from=lambda pkt:pkt.length-2) ]
+
+ def extract_padding(self, p):
+ return b"",p
+
+ registered_ip_options = {}
+ @classmethod
+ def register_variant(cls):
+ cls.registered_ip_options[cls.option.default] = cls
+ @classmethod
+ def dispatch_hook(cls, pkt=None, *args, **kargs):
+ if pkt:
+ opt = pkt[0]&0x1f
+ if opt in cls.registered_ip_options:
+ return cls.registered_ip_options[opt]
+ return cls
+
+class IPOption_EOL(IPOption):
+ name = "IP Option End of Options List"
+ option = 0
+ fields_desc = [ _IPOption_HDR ]
+
+
+class IPOption_NOP(IPOption):
+ name = "IP Option No Operation"
+ option=1
+ fields_desc = [ _IPOption_HDR ]
+
+class IPOption_Security(IPOption):
+ name = "IP Option Security"
+ copy_flag = 1
+ option = 2
+ fields_desc = [ _IPOption_HDR,
+ ByteField("length", 11),
+ ShortField("security",0),
+ ShortField("compartment",0),
+ ShortField("handling_restrictions",0),
+ StrFixedLenField("transmission_control_code","xxx",3),
+ ]
+
+class IPOption_LSRR(IPOption):
+ name = "IP Option Loose Source and Record Route"
+ copy_flag = 1
+ option = 3
+ fields_desc = [ _IPOption_HDR,
+ FieldLenField("length", None, fmt="B",
+ length_of="routers", adjust=lambda pkt,l:l+3),
+ ByteField("pointer",4), # 4 is first IP
+ FieldListField("routers",[],IPField("","0.0.0.0"),
+ length_from=lambda pkt:pkt.length-3)
+ ]
+ def get_current_router(self):
+ return self.routers[self.pointer//4-1]
+
+class IPOption_RR(IPOption_LSRR):
+ name = "IP Option Record Route"
+ option = 7
+
+class IPOption_SSRR(IPOption_LSRR):
+ name = "IP Option Strict Source and Record Route"
+ option = 9
+
+class IPOption_Stream_Id(IPOption):
+ name = "IP Option Stream ID"
+ option = 8
+ fields_desc = [ _IPOption_HDR,
+ ByteField("length", 4),
+ ShortField("security",0), ]
+
+class IPOption_MTU_Probe(IPOption):
+ name = "IP Option MTU Probe"
+ option = 11
+ fields_desc = [ _IPOption_HDR,
+ ByteField("length", 4),
+ ShortField("mtu",0), ]
+
+class IPOption_MTU_Reply(IPOption_MTU_Probe):
+ name = "IP Option MTU Reply"
+ option = 12
+
+class IPOption_Traceroute(IPOption):
+ name = "IP Option Traceroute"
+ copy_flag = 1
+ option = 18
+ fields_desc = [ _IPOption_HDR,
+ ByteField("length", 12),
+ ShortField("id",0),
+ ShortField("outbound_hops",0),
+ ShortField("return_hops",0),
+ IPField("originator_ip","0.0.0.0") ]
+
+class IPOption_Address_Extension(IPOption):
+ name = "IP Option Address Extension"
+ copy_flag = 1
+ option = 19
+ fields_desc = [ _IPOption_HDR,
+ ByteField("length", 10),
+ IPField("src_ext","0.0.0.0"),
+ IPField("dst_ext","0.0.0.0") ]
+
+class IPOption_Router_Alert(IPOption):
+ name = "IP Option Router Alert"
+ copy_flag = 1
+ option = 20
+ fields_desc = [ _IPOption_HDR,
+ ByteField("length", 4),
+ ShortEnumField("alert",0, {0:"router_shall_examine_packet"}), ]
+
+
+class IPOption_SDBM(IPOption):
+ name = "IP Option Selective Directed Broadcast Mode"
+ copy_flag = 1
+ option = 21
+ fields_desc = [ _IPOption_HDR,
+ FieldLenField("length", None, fmt="B",
+ length_of="addresses", adjust=lambda pkt,l:l+2),
+ FieldListField("addresses",[],IPField("","0.0.0.0"),
+ length_from=lambda pkt:pkt.length-2)
+ ]
+
+
+
+TCPOptions = (
+ { 0 : ("EOL",None),
+ 1 : ("NOP",None),
+ 2 : ("MSS","!H"),
+ 3 : ("WScale","!B"),
+ 4 : ("SAckOK",None),
+ 5 : ("SAck","!"),
+ 8 : ("Timestamp","!II"),
+ 14 : ("AltChkSum","!BH"),
+ 15 : ("AltChkSumOpt",None),
+ 25 : ("Mood","!p")
+ },
+ { "EOL":0,
+ "NOP":1,
+ "MSS":2,
+ "WScale":3,
+ "SAckOK":4,
+ "SAck":5,
+ "Timestamp":8,
+ "AltChkSum":14,
+ "AltChkSumOpt":15,
+ "Mood":25
+ } )
+
+class TCPOptionsField(StrField):
+ islist=1
+ def getfield(self, pkt, s):
+ opsz = (pkt.dataofs-5)*4
+ if opsz < 0:
+ warning("bad dataofs (%i). Assuming dataofs=5"%pkt.dataofs)
+ opsz = 0
+ return s[opsz:],self.m2i(pkt,s[:opsz])
+ def m2i(self, pkt, x):
+ opt = []
+ while x:
+ onum = x[0]
+ if onum == 0:
+ opt.append(("EOL",None))
+ x=x[1:]
+ break
+ if onum == 1:
+ opt.append(("NOP",None))
+ x=x[1:]
+ continue
+ olen = x[1]
+ if olen < 2:
+ warning("Malformed TCP option (announced length is %i)" % olen)
+ olen = 2
+ oval = x[2:olen]
+ if onum in TCPOptions[0]:
+ oname, ofmt = TCPOptions[0][onum]
+ if onum == 5: #SAck
+ ofmt += "%iI" % (len(oval)//4)
+ if ofmt and struct.calcsize(ofmt) == len(oval):
+ oval = struct.unpack(ofmt, oval)
+ if len(oval) == 1:
+ oval = oval[0]
+ opt.append((oname, oval))
+ else:
+ opt.append((onum, oval))
+ x = x[olen:]
+ return opt
+
+ def i2m(self, pkt, x):
+ opt = b""
+ for oname,oval in x:
+ if type(oname) is str:
+ if oname == "NOP":
+ opt += b"\x01"
+ continue
+ elif oname == "EOL":
+ opt += b"\x00"
+ continue
+ elif oname in TCPOptions[1]:
+ onum = TCPOptions[1][oname]
+ ofmt = TCPOptions[0][onum][1]
+ if onum == 5: #SAck
+ ofmt += b"%iI" % len(oval)
+ if ofmt is not None and (type(oval) is not str or "s" in ofmt):
+ if type(oval) is not tuple:
+ oval = (oval,)
+ oval = struct.pack(ofmt, *oval)
+ else:
+ warning("option [%s] unknown. Skipped."%oname)
+ continue
+ else:
+ onum = oname
+ if type(oval) is not str:
+ warning("option [%i] is not string."%onum)
+ continue
+ opt += bytes([(onum), (2+len(oval))]) + oval
+ return opt+b"\x00"*(3-((len(opt)+3)%4))
+ def randval(self):
+ return [] # XXX
+
+
+class ICMPTimeStampField(IntField):
+ re_hmsm = re.compile("([0-2]?[0-9])[Hh:](([0-5]?[0-9])([Mm:]([0-5]?[0-9])([sS:.]([0-9]{0,3}))?)?)?$")
+ def i2repr(self, pkt, val):
+ if val is None:
+ return "--"
+ else:
+ sec, milli = divmod(val, 1000)
+ min, sec = divmod(sec, 60)
+ hour, min = divmod(min, 60)
+ return "%d:%d:%d.%d" %(hour, min, sec, int(milli))
+ def any2i(self, pkt, val):
+ if type(val) is str:
+ hmsms = self.re_hmsm.match(val)
+ if hmsms:
+ h,_,m,_,s,_,ms = hmsms = hmsms.groups()
+ ms = int(((ms or "")+"000")[:3])
+ val = ((int(h)*60+int(m or 0))*60+int(s or 0))*1000+ms
+ else:
+ val = 0
+ elif val is None:
+ val = int((time.time()%(24*60*60))*1000)
+ return val
+
+
+class IP(Packet, IPTools):
+ name = "IP"
+ fields_desc = [ BitField("version" , 4 , 4),
+ BitField("ihl", None, 4),
+ XByteField("tos", 0),
+ ShortField("len", None),
+ ShortField("id", 1),
+ FlagsField("flags", 0, 3, ["MF","DF","evil"]),
+ BitField("frag", 0, 13),
+ ByteField("ttl", 64),
+ ByteEnumField("proto", 0, IP_PROTOS),
+ XShortField("chksum", None),
+ #IPField("src", "127.0.0.1"),
+ #Emph(SourceIPField("src","dst")),
+ #Emph(IPField("dst", "127.0.0.1")),
+
+ Emph(IPField("src", "16.0.0.1")),
+ Emph(IPField("dst", "48.0.0.1")),
+ PacketListField("options", [], IPOption, length_from=lambda p:p.ihl*4-20) ]
+ def post_build(self, p, pay):
+ ihl = self.ihl
+ p += b"\0"*((-len(p))%4) # pad IP options if needed
+ if ihl is None:
+ ihl = len(p)//4
+ p = bytes([((self.version&0xf)<<4) | ihl&0x0f])+p[1:]
+ if self.len is None:
+ l = len(p)+len(pay)
+ p = p[:2]+struct.pack("!H", l)+p[4:]
+ if self.chksum is None:
+ ck = checksum(p)
+ p = p[:10]+bytes([ck>>8])+bytes([ck&0xff])+p[12:]
+ return p+pay
+
+ def extract_padding(self, s):
+ l = self.len - (self.ihl << 2)
+ return s[:l],s[l:]
+
+ def send(self, s, slp=0):
+ for p in self:
+ try:
+ s.sendto(bytes(p), (p.dst,0))
+ except socket.error as msg:
+ log_runtime.error(msg)
+ if slp:
+ time.sleep(slp)
+ def route(self):
+ dst = self.dst
+ if isinstance(dst,Gen):
+ dst = next(iter(dst))
+ return conf.route.route(dst)
+ def hashret(self):
+ if ( (self.proto == socket.IPPROTO_ICMP)
+ and (isinstance(self.payload, ICMP))
+ and (self.payload.type in [3,4,5,11,12]) ):
+ return self.payload.payload.hashret()
+ else:
+ if conf.checkIPsrc and conf.checkIPaddr:
+ return strxor(inet_aton(self.src),inet_aton(self.dst))+struct.pack("B",self.proto)+self.payload.hashret()
+ else:
+ return struct.pack("B", self.proto)+self.payload.hashret()
+ def answers(self, other):
+ if not isinstance(other,IP):
+ return 0
+ if conf.checkIPaddr and (self.dst != other.src):
+ return 0
+ if ( (self.proto == socket.IPPROTO_ICMP) and
+ (isinstance(self.payload, ICMP)) and
+ (self.payload.type in [3,4,5,11,12]) ):
+ # ICMP error message
+ return self.payload.payload.answers(other)
+
+ else:
+ if ( (conf.checkIPaddr and (self.src != other.dst)) or
+ (self.proto != other.proto) ):
+ return 0
+ return self.payload.answers(other.payload)
+ def mysummary(self):
+ s = self.sprintf("%IP.src% > %IP.dst% %IP.proto%")
+ if self.frag:
+ s += " frag:%i" % self.frag
+ return s
+
+ def fragment(self, fragsize=1480):
+ """Fragment IP datagrams"""
+ fragsize = (fragsize+7)//8*8
+ lst = []
+ fnb = 0
+ fl = self
+ while fl.underlayer is not None:
+ fnb += 1
+ fl = fl.underlayer
+
+ for p in fl:
+ s = bytes(p[fnb].payload)
+ nb = (len(s)+fragsize-1)//fragsize
+ for i in range(nb):
+ q = p.copy()
+ del(q[fnb].payload)
+ del(q[fnb].chksum)
+ del(q[fnb].len)
+ if i == nb-1:
+ q[IP].flags &= ~1
+ else:
+ q[IP].flags |= 1
+ q[IP].frag = i*fragsize//8
+ r = conf.raw_layer(load=s[i*fragsize:(i+1)*fragsize])
+ r.overload_fields = p[IP].payload.overload_fields.copy()
+ q.add_payload(r)
+ lst.append(q)
+ return lst
+
+
+class TCP(Packet):
+ name = "TCP"
+ fields_desc = [ ShortEnumField("sport", 20, TCP_SERVICES),
+ ShortEnumField("dport", 80, TCP_SERVICES),
+ IntField("seq", 0),
+ IntField("ack", 0),
+ BitField("dataofs", None, 4),
+ BitField("reserved", 0, 4),
+ FlagsField("flags", 0x2, 8, "FSRPAUEC"),
+ ShortField("window", 8192),
+ XShortField("chksum", None),
+ ShortField("urgptr", 0),
+ TCPOptionsField("options", {}) ]
+ def post_build(self, p, pay):
+ p += pay
+ dataofs = self.dataofs
+ if dataofs is None:
+ dataofs = 5+((len(self.get_field("options").i2m(self,self.options))+3)//4)
+ p = p[:12]+bytes([(dataofs << 4) | (p[12])&0x0f])+p[13:]
+ if self.chksum is None:
+ if isinstance(self.underlayer, IP):
+ if self.underlayer.len is not None:
+ ln = self.underlayer.len-20
+ else:
+ ln = len(p)
+ psdhdr = struct.pack("!4s4sHH",
+ inet_aton(self.underlayer.src),
+ inet_aton(self.underlayer.dst),
+ self.underlayer.proto,
+ ln)
+ ck=checksum(psdhdr+p)
+ p = p[:16]+struct.pack("!H", ck)+p[18:]
+ elif conf.ipv6_enabled and isinstance(self.underlayer, scapy.layers.inet6.IPv6) or isinstance(self.underlayer, scapy.layers.inet6._IPv6ExtHdr):
+ ck = scapy.layers.inet6.in6_chksum(socket.IPPROTO_TCP, self.underlayer, p)
+ p = p[:16]+struct.pack("!H", ck)+p[18:]
+ else:
+ warning("No IP underlayer to compute checksum. Leaving null.")
+ return p
+ def hashret(self):
+ if conf.checkIPsrc:
+ return struct.pack("H",self.sport ^ self.dport)+self.payload.hashret()
+ else:
+ return self.payload.hashret()
+ def answers(self, other):
+ if not isinstance(other, TCP):
+ return 0
+ if conf.checkIPsrc:
+ if not ((self.sport == other.dport) and
+ (self.dport == other.sport)):
+ return 0
+ if (abs(other.seq-self.ack) > 2+len(other.payload)):
+ return 0
+ return 1
+ def mysummary(self):
+ if isinstance(self.underlayer, IP):
+ return self.underlayer.sprintf("TCP %IP.src%:%TCP.sport% > %IP.dst%:%TCP.dport% %TCP.flags%")
+ elif conf.ipv6_enabled and isinstance(self.underlayer, scapy.layers.inet6.IPv6):
+ return self.underlayer.sprintf("TCP %IPv6.src%:%TCP.sport% > %IPv6.dst%:%TCP.dport% %TCP.flags%")
+ else:
+ return self.sprintf("TCP %TCP.sport% > %TCP.dport% %TCP.flags%")
+
+class UDP(Packet):
+ name = "UDP"
+ fields_desc = [ ShortEnumField("sport", 53, UDP_SERVICES),
+ ShortEnumField("dport", 53, UDP_SERVICES),
+ ShortField("len", None),
+ XShortField("chksum", None), ]
+ def post_build(self, p, pay):
+ p += pay
+ l = self.len
+ if l is None:
+ l = len(p)
+ p = p[:4]+struct.pack("!H",l)+p[6:]
+ if self.chksum is None:
+ if isinstance(self.underlayer, IP):
+ if self.underlayer.len is not None:
+ ln = self.underlayer.len-20
+ else:
+ ln = len(p)
+ psdhdr = struct.pack("!4s4sHH",
+ inet_aton(self.underlayer.src),
+ inet_aton(self.underlayer.dst),
+ self.underlayer.proto,
+ ln)
+ ck=checksum(psdhdr+p)
+ p = p[:6]+struct.pack("!H", ck)+p[8:]
+ elif isinstance(self.underlayer, scapy.layers.inet6.IPv6) or isinstance(self.underlayer, scapy.layers.inet6._IPv6ExtHdr):
+ ck = scapy.layers.inet6.in6_chksum(socket.IPPROTO_UDP, self.underlayer, p)
+ p = p[:6]+struct.pack("!H", ck)+p[8:]
+ else:
+ warning("No IP underlayer to compute checksum. Leaving null.")
+ return p
+ def extract_padding(self, s):
+ l = self.len - 8
+ return s[:l],s[l:]
+ def hashret(self):
+ return self.payload.hashret()
+ def answers(self, other):
+ if not isinstance(other, UDP):
+ return 0
+ if conf.checkIPsrc:
+ if self.dport != other.sport:
+ return 0
+ return self.payload.answers(other.payload)
+ def mysummary(self):
+ if isinstance(self.underlayer, IP):
+ return self.underlayer.sprintf("UDP %IP.src%:%UDP.sport% > %IP.dst%:%UDP.dport%")
+ elif isinstance(self.underlayer, scapy.layers.inet6.IPv6):
+ return self.underlayer.sprintf("UDP %IPv6.src%:%UDP.sport% > %IPv6.dst%:%UDP.dport%")
+ else:
+ return self.sprintf("UDP %UDP.sport% > %UDP.dport%")
+
+icmptypes = { 0 : "echo-reply",
+ 3 : "dest-unreach",
+ 4 : "source-quench",
+ 5 : "redirect",
+ 8 : "echo-request",
+ 9 : "router-advertisement",
+ 10 : "router-solicitation",
+ 11 : "time-exceeded",
+ 12 : "parameter-problem",
+ 13 : "timestamp-request",
+ 14 : "timestamp-reply",
+ 15 : "information-request",
+ 16 : "information-response",
+ 17 : "address-mask-request",
+ 18 : "address-mask-reply" }
+
+icmpcodes = { 3 : { 0 : "network-unreachable",
+ 1 : "host-unreachable",
+ 2 : "protocol-unreachable",
+ 3 : "port-unreachable",
+ 4 : "fragmentation-needed",
+ 5 : "source-route-failed",
+ 6 : "network-unknown",
+ 7 : "host-unknown",
+ 9 : "network-prohibited",
+ 10 : "host-prohibited",
+ 11 : "TOS-network-unreachable",
+ 12 : "TOS-host-unreachable",
+ 13 : "communication-prohibited",
+ 14 : "host-precedence-violation",
+ 15 : "precedence-cutoff", },
+ 5 : { 0 : "network-redirect",
+ 1 : "host-redirect",
+ 2 : "TOS-network-redirect",
+ 3 : "TOS-host-redirect", },
+ 11 : { 0 : "ttl-zero-during-transit",
+ 1 : "ttl-zero-during-reassembly", },
+ 12 : { 0 : "ip-header-bad",
+ 1 : "required-option-missing", }, }
+
+
+
+
+class ICMP(Packet):
+ name = "ICMP"
+ fields_desc = [ ByteEnumField("type",8, icmptypes),
+ MultiEnumField("code",0, icmpcodes, depends_on=lambda pkt:pkt.type,fmt="B"),
+ XShortField("chksum", None),
+ ConditionalField(XShortField("id",0), lambda pkt:pkt.type in [0,8,13,14,15,16,17,18]),
+ ConditionalField(XShortField("seq",0), lambda pkt:pkt.type in [0,8,13,14,15,16,17,18]),
+ ConditionalField(ICMPTimeStampField("ts_ori", None), lambda pkt:pkt.type in [13,14]),
+ ConditionalField(ICMPTimeStampField("ts_rx", None), lambda pkt:pkt.type in [13,14]),
+ ConditionalField(ICMPTimeStampField("ts_tx", None), lambda pkt:pkt.type in [13,14]),
+ ConditionalField(IPField("gw","0.0.0.0"), lambda pkt:pkt.type==5),
+ ConditionalField(ByteField("ptr",0), lambda pkt:pkt.type==12),
+ ConditionalField(X3BytesField("reserved",0), lambda pkt:pkt.type==12),
+ ConditionalField(IPField("addr_mask","0.0.0.0"), lambda pkt:pkt.type in [17,18]),
+ ConditionalField(IntField("unused",0), lambda pkt:pkt.type not in [0,5,8,12,13,14,15,16,17,18]),
+
+ ]
+ def post_build(self, p, pay):
+ p += pay
+ if self.chksum is None:
+ ck = checksum(p)
+ p = p[:2]+bytes([ck>>8, ck&0xff])+p[4:]
+ return p
+
+ def hashret(self):
+ if self.type in [0,8,13,14,15,16,17,18]:
+ return struct.pack("HH",self.id,self.seq)+self.payload.hashret()
+ return self.payload.hashret()
+ def answers(self, other):
+ if not isinstance(other,ICMP):
+ return 0
+ if ( (other.type,self.type) in [(8,0),(13,14),(15,16),(17,18)] and
+ self.id == other.id and
+ self.seq == other.seq ):
+ return 1
+ return 0
+
+ def guess_payload_class(self, payload):
+ if self.type in [3,4,5,11,12]:
+ return IPerror
+ else:
+ return None
+ def mysummary(self):
+ if isinstance(self.underlayer, IP):
+ return self.underlayer.sprintf("ICMP %IP.src% > %IP.dst% %ICMP.type% %ICMP.code%")
+ else:
+ return self.sprintf("ICMP %ICMP.type% %ICMP.code%")
+
+
+
+
+
+class IPerror(IP):
+ name = "IP in ICMP"
+ def answers(self, other):
+ if not isinstance(other, IP):
+ return 0
+ if not ( ((conf.checkIPsrc == 0) or (self.dst == other.dst)) and
+ (self.src == other.src) and
+ ( ((conf.checkIPID == 0)
+ or (self.id == other.id)
+ or (conf.checkIPID == 1 and self.id == socket.htons(other.id)))) and
+ (self.proto == other.proto) ):
+ return 0
+ return self.payload.answers(other.payload)
+ def mysummary(self):
+ return Packet.mysummary(self)
+
+
+class TCPerror(TCP):
+ fields_desc = [ ShortEnumField("sport", 20, TCP_SERVICES),
+ ShortEnumField("dport", 80, TCP_SERVICES),
+ IntField("seq", 0) ]
+ name = "TCP in ICMP"
+ def post_build(self, p, pay):
+ p += pay
+ return p
+ def answers(self, other):
+ if not isinstance(other, TCP):
+ return 0
+ if conf.checkIPsrc:
+ if not ((self.sport == other.sport) and
+ (self.dport == other.dport)):
+ return 0
+ if conf.check_TCPerror_seqack:
+ if self.seq is not None:
+ if self.seq != other.seq:
+ return 0
+ if self.ack is not None:
+ if self.ack != other.ack:
+ return 0
+ return 1
+ def mysummary(self):
+ return Packet.mysummary(self)
+
+
+class UDPerror(UDP):
+ name = "UDP in ICMP"
+ def answers(self, other):
+ if not isinstance(other, UDP):
+ return 0
+ if conf.checkIPsrc:
+ if not ((self.sport == other.sport) and
+ (self.dport == other.dport)):
+ return 0
+ return 1
+ def mysummary(self):
+ return Packet.mysummary(self)
+
+
+
+class ICMPerror(ICMP):
+ name = "ICMP in ICMP"
+ def answers(self, other):
+ if not isinstance(other,ICMP):
+ return 0
+ if not ((self.type == other.type) and
+ (self.code == other.code)):
+ return 0
+ if self.code in [0,8,13,14,17,18]:
+ if (self.id == other.id and
+ self.seq == other.seq):
+ return 1
+ else:
+ return 0
+ else:
+ return 1
+ def mysummary(self):
+ return Packet.mysummary(self)
+
+bind_layers( Ether, IP, type=2048)
+bind_layers( CookedLinux, IP, proto=2048)
+bind_layers( GRE, IP, proto=2048)
+bind_layers( SNAP, IP, code=2048)
+bind_layers( IPerror, IPerror, frag=0, proto=4)
+bind_layers( IPerror, ICMPerror, frag=0, proto=1)
+bind_layers( IPerror, TCPerror, frag=0, proto=6)
+bind_layers( IPerror, UDPerror, frag=0, proto=17)
+bind_layers( IP, IP, frag=0, proto=4)
+bind_layers( IP, ICMP, frag=0, proto=1)
+bind_layers( IP, TCP, frag=0, proto=6)
+bind_layers( IP, UDP, frag=0, proto=17)
+bind_layers( IP, GRE, frag=0, proto=47)
+
+conf.l2types.register(101, IP)
+conf.l2types.register_num2layer(12, IP)
+
+conf.l3types.register(ETH_P_IP, IP)
+conf.l3types.register_num2layer(ETH_P_ALL, IP)
+
+
+conf.neighbor.register_l3(Ether, IP, lambda l2,l3: getmacbyip(l3.dst))
+conf.neighbor.register_l3(Dot3, IP, lambda l2,l3: getmacbyip(l3.dst))
+
+
+###################
+## Fragmentation ##
+###################
+
+@conf.commands.register
+def fragment(pkt, fragsize=1480):
+ """Fragment a big IP datagram"""
+ fragsize = (fragsize+7)//8*8
+ lst = []
+ for p in pkt:
+ s = bytes(p[IP].payload)
+ nb = (len(s)+fragsize-1)//fragsize
+ for i in range(nb):
+ q = p.copy()
+ del(q[IP].payload)
+ del(q[IP].chksum)
+ del(q[IP].len)
+ if i == nb-1:
+ q[IP].flags &= ~1
+ else:
+ q[IP].flags |= 1
+ q[IP].frag = i*fragsize//8
+ r = conf.raw_layer(load=s[i*fragsize:(i+1)*fragsize])
+ r.overload_fields = p[IP].payload.overload_fields.copy()
+ q.add_payload(r)
+ lst.append(q)
+ return lst
+
+def overlap_frag(p, overlap, fragsize=8, overlap_fragsize=None):
+ if overlap_fragsize is None:
+ overlap_fragsize = fragsize
+ q = p.copy()
+ del(q[IP].payload)
+ q[IP].add_payload(overlap)
+
+ qfrag = fragment(q, overlap_fragsize)
+ qfrag[-1][IP].flags |= 1
+ return qfrag+fragment(p, fragsize)
+
+@conf.commands.register
+def defrag(plist):
+ """defrag(plist) -> ([not fragmented], [defragmented],
+ [ [bad fragments], [bad fragments], ... ])"""
+ frags = defaultdict(PacketList)
+ nofrag = PacketList()
+ for p in plist:
+ ip = p[IP]
+ if IP not in p:
+ nofrag.append(p)
+ continue
+ if ip.frag == 0 and ip.flags & 1 == 0:
+ nofrag.append(p)
+ continue
+ uniq = (ip.id,ip.src,ip.dst,ip.proto)
+ frags[uniq].append(p)
+ defrag = []
+ missfrag = []
+ for lst in frags.values():
+ lst.sort(key=lambda x: x.frag)
+ p = lst[0]
+ lastp = lst[-1]
+ if p.frag > 0 or lastp.flags & 1 != 0: # first or last fragment missing
+ missfrag.append(lst)
+ continue
+ p = p.copy()
+ if conf.padding_layer in p:
+ del(p[conf.padding_layer].underlayer.payload)
+ ip = p[IP]
+ if ip.len is None or ip.ihl is None:
+ clen = len(ip.payload)
+ else:
+ clen = ip.len - (ip.ihl<<2)
+ txt = conf.raw_layer()
+ for q in lst[1:]:
+ if clen != q.frag<<3: # Wrong fragmentation offset
+ if clen > q.frag<<3:
+ warning("Fragment overlap (%i > %i) %r || %r || %r" % (clen, q.frag<<3, p,txt,q))
+ missfrag.append(lst)
+ break
+ if q[IP].len is None or q[IP].ihl is None:
+ clen += len(q[IP].payload)
+ else:
+ clen += q[IP].len - (q[IP].ihl<<2)
+ if conf.padding_layer in q:
+ del(q[conf.padding_layer].underlayer.payload)
+ txt.add_payload(q[IP].payload.copy())
+ else:
+ ip.flags &= ~1 # !MF
+ del(ip.chksum)
+ del(ip.len)
+ p = p/txt
+ defrag.append(p)
+ defrag2=PacketList()
+ for p in defrag:
+ defrag2.append(p.__class__(bytes(p)))
+ return nofrag,defrag2,missfrag
+
+@conf.commands.register
+def defragment(plist):
+ """defragment(plist) -> plist defragmented as much as possible """
+ frags = defaultdict(lambda:[])
+ final = []
+
+ pos = 0
+ for p in plist:
+ p._defrag_pos = pos
+ pos += 1
+ if IP in p:
+ ip = p[IP]
+ if ip.frag != 0 or ip.flags & 1:
+ ip = p[IP]
+ uniq = (ip.id,ip.src,ip.dst,ip.proto)
+ frags[uniq].append(p)
+ continue
+ final.append(p)
+
+ defrag = []
+ missfrag = []
+ for lst in frags.values():
+ lst.sort(key=lambda x: x.frag)
+ p = lst[0]
+ lastp = lst[-1]
+ if p.frag > 0 or lastp.flags & 1 != 0: # first or last fragment missing
+ missfrag += lst
+ continue
+ p = p.copy()
+ if conf.padding_layer in p:
+ del(p[conf.padding_layer].underlayer.payload)
+ ip = p[IP]
+ if ip.len is None or ip.ihl is None:
+ clen = len(ip.payload)
+ else:
+ clen = ip.len - (ip.ihl<<2)
+ txt = conf.raw_layer()
+ for q in lst[1:]:
+ if clen != q.frag<<3: # Wrong fragmentation offset
+ if clen > q.frag<<3:
+ warning("Fragment overlap (%i > %i) %r || %r || %r" % (clen, q.frag<<3, p,txt,q))
+ missfrag += lst
+ break
+ if q[IP].len is None or q[IP].ihl is None:
+ clen += len(q[IP].payload)
+ else:
+ clen += q[IP].len - (q[IP].ihl<<2)
+ if conf.padding_layer in q:
+ del(q[conf.padding_layer].underlayer.payload)
+ txt.add_payload(q[IP].payload.copy())
+ else:
+ ip.flags &= ~1 # !MF
+ del(ip.chksum)
+ del(ip.len)
+ p = p/txt
+ p._defrag_pos = max(x._defrag_pos for x in lst)
+ defrag.append(p)
+ defrag2=[]
+ for p in defrag:
+ q = p.__class__(bytes(p))
+ q._defrag_pos = p._defrag_pos
+ defrag2.append(q)
+ final += defrag2
+ final += missfrag
+ final.sort(key=lambda x: x._defrag_pos)
+ for p in final:
+ del(p._defrag_pos)
+
+ if hasattr(plist, "listname"):
+ name = "Defragmented %s" % plist.listname
+ else:
+ name = "Defragmented"
+
+ return PacketList(final, name=name)
+
+
+
+### Add timeskew_graph() method to PacketList
+def _packetlist_timeskew_graph(self, ip, **kargs):
+ """Tries to graph the timeskew between the timestamps and real time for a given ip"""
+ res = map(lambda x: self._elt2pkt(x), self.res)
+ b = filter(lambda x:x.haslayer(IP) and x.getlayer(IP).src == ip and x.haslayer(TCP), res)
+ c = []
+ for p in b:
+ opts = p.getlayer(TCP).options
+ for o in opts:
+ if o[0] == "Timestamp":
+ c.append((p.time,o[1][0]))
+ if not c:
+ warning("No timestamps found in packet list")
+ return
+ #d = map(lambda (x,y): (x%2000,((x-c[0][0])-((y-c[0][1])/1000.0))),c)
+ d = map(lambda a: (a[0]%2000,((a[0]-c[0][0])-((a[1]-c[0][1])/1000.0))),c)
+ return plt.plot(d, **kargs)
+
+#PacketList.timeskew_graph = types.MethodType(_packetlist_timeskew_graph, None)
+
+
+### Create a new packet list
+class TracerouteResult(SndRcvList):
+ def __init__(self, res=None, name="Traceroute", stats=None):
+ PacketList.__init__(self, res, name, stats, vector_index = 1)
+ self.graphdef = None
+ self.graphASres = 0
+ self.padding = 0
+ self.hloc = None
+ self.nloc = None
+
+ def show(self):
+ #return self.make_table(lambda (s,r): (s.sprintf("%IP.dst%:{TCP:tcp%ir,TCP.dport%}{UDP:udp%ir,UDP.dport%}{ICMP:ICMP}"),
+ return self.make_table(lambda s,r: (s.sprintf("%IP.dst%:{TCP:tcp%ir,TCP.dport%}{UDP:udp%ir,UDP.dport%}{ICMP:ICMP}"),
+ s.ttl,
+ r.sprintf("%-15s,IP.src% {TCP:%TCP.flags%}{ICMP:%ir,ICMP.type%}")))
+
+
+ def get_trace(self):
+ raw_trace = {}
+ for s,r in self.res:
+ if IP not in s:
+ continue
+ d = s[IP].dst
+ if d not in raw_trace:
+ raw_trace[d] = {}
+ raw_trace[d][s[IP].ttl] = r[IP].src, ICMP not in r
+
+ trace = {}
+ for k in raw_trace.keys():
+ m = [ x for x in raw_trace[k].keys() if raw_trace[k][x][1] ]
+ if not m:
+ trace[k] = raw_trace[k]
+ else:
+ m = min(m)
+ trace[k] = {i: raw_trace[k][i] for i in raw_trace[k].keys() if not raw_trace[k][i][1] or i<=m}
+
+ return trace
+
+ def trace3D(self):
+ """Give a 3D representation of the traceroute.
+ right button: rotate the scene
+ middle button: zoom
+ left button: move the scene
+ left button on a ball: toggle IP displaying
+ ctrl-left button on a ball: scan ports 21,22,23,25,80 and 443 and display the result"""
+ trace = self.get_trace()
+ import visual
+
+ class IPsphere(visual.sphere):
+ def __init__(self, ip, **kargs):
+ visual.sphere.__init__(self, **kargs)
+ self.ip=ip
+ self.label=None
+ self.setlabel(self.ip)
+ def setlabel(self, txt,visible=None):
+ if self.label is not None:
+ if visible is None:
+ visible = self.label.visible
+ self.label.visible = 0
+ elif visible is None:
+ visible=0
+ self.label=visual.label(text=txt, pos=self.pos, space=self.radius, xoffset=10, yoffset=20, visible=visible)
+ def action(self):
+ self.label.visible ^= 1
+
+ visual.scene = visual.display()
+ visual.scene.exit = True
+ start = visual.box()
+ rings={}
+ tr3d = {}
+ for i in trace:
+ tr = trace[i]
+ tr3d[i] = []
+ ttl = tr.keys()
+ for t in range(1,max(ttl)+1):
+ if t not in rings:
+ rings[t] = []
+ if t in tr:
+ if tr[t] not in rings[t]:
+ rings[t].append(tr[t])
+ tr3d[i].append(rings[t].index(tr[t]))
+ else:
+ rings[t].append(("unk",-1))
+ tr3d[i].append(len(rings[t])-1)
+ for t in rings:
+ r = rings[t]
+ l = len(r)
+ for i in range(l):
+ if r[i][1] == -1:
+ col = (0.75,0.75,0.75)
+ elif r[i][1]:
+ col = visual.color.green
+ else:
+ col = visual.color.blue
+
+ s = IPsphere(pos=((l-1)*visual.cos(2*i*visual.pi/l),(l-1)*visual.sin(2*i*visual.pi/l),2*t),
+ ip = r[i][0],
+ color = col)
+ for trlst in tr3d.values():
+ if t <= len(trlst):
+ if trlst[t-1] == i:
+ trlst[t-1] = s
+ forecol = colgen(0.625, 0.4375, 0.25, 0.125)
+ for trlst in tr3d.values():
+ col = next(forecol)
+ start = (0,0,0)
+ for ip in trlst:
+ visual.cylinder(pos=start,axis=ip.pos-start,color=col,radius=0.2)
+ start = ip.pos
+
+ movcenter=None
+ while 1:
+ visual.rate(50)
+ if visual.scene.kb.keys:
+ k = visual.scene.kb.getkey()
+ if k == "esc" or k == "q":
+ break
+ if visual.scene.mouse.events:
+ ev = visual.scene.mouse.getevent()
+ if ev.press == "left":
+ o = ev.pick
+ if o:
+ if ev.ctrl:
+ if o.ip == "unk":
+ continue
+ savcolor = o.color
+ o.color = (1,0,0)
+ a,b=sr(IP(dst=o.ip)/TCP(dport=[21,22,23,25,80,443]),timeout=2)
+ o.color = savcolor
+ if len(a) == 0:
+ txt = "%s:\nno results" % o.ip
+ else:
+ txt = "%s:\n" % o.ip
+ for s,r in a:
+ txt += r.sprintf("{TCP:%IP.src%:%TCP.sport% %TCP.flags%}{TCPerror:%IPerror.dst%:%TCPerror.dport% %IP.src% %ir,ICMP.type%}\n")
+ o.setlabel(txt, visible=1)
+ else:
+ if hasattr(o, "action"):
+ o.action()
+ elif ev.drag == "left":
+ movcenter = ev.pos
+ elif ev.drop == "left":
+ movcenter = None
+ if movcenter:
+ visual.scene.center -= visual.scene.mouse.pos-movcenter
+ movcenter = visual.scene.mouse.pos
+
+## world_trace needs to be reimplemented as gnuplot dependency is removed
+# def world_trace(self):
+# from modules.geo import locate_ip
+# ips = {}
+# rt = {}
+# ports_done = {}
+# for s,r in self.res:
+# ips[r.src] = None
+# if s.haslayer(TCP) or s.haslayer(UDP):
+# trace_id = (s.src,s.dst,s.proto,s.dport)
+# elif s.haslayer(ICMP):
+# trace_id = (s.src,s.dst,s.proto,s.type)
+# else:
+# trace_id = (s.src,s.dst,s.proto,0)
+# trace = rt.get(trace_id,{})
+# if not r.haslayer(ICMP) or r.type != 11:
+# if trace_id in ports_done:
+# continue
+# ports_done[trace_id] = None
+# trace[s.ttl] = r.src
+# rt[trace_id] = trace
+#
+# trt = {}
+# for trace_id in rt:
+# trace = rt[trace_id]
+# loctrace = []
+# for i in range(max(trace.keys())):
+# ip = trace.get(i,None)
+# if ip is None:
+# continue
+# loc = locate_ip(ip)
+# if loc is None:
+# continue
+## loctrace.append((ip,loc)) # no labels yet
+# loctrace.append(loc)
+# if loctrace:
+# trt[trace_id] = loctrace
+#
+# tr = map(lambda x: Gnuplot.Data(x,with_="lines"), trt.values())
+# g = Gnuplot.Gnuplot()
+# world = Gnuplot.File(conf.gnuplot_world,with_="lines")
+# g.plot(world,*tr)
+# return g
+
+ def make_graph(self,ASres=None,padding=0):
+ if ASres is None:
+ ASres = conf.AS_resolver
+ self.graphASres = ASres
+ self.graphpadding = padding
+ ips = {}
+ rt = {}
+ ports = {}
+ ports_done = {}
+ for s,r in self.res:
+ r = r.getlayer(IP) or (conf.ipv6_enabled and r[scapy.layers.inet6.IPv6]) or r
+ s = s.getlayer(IP) or (conf.ipv6_enabled and s[scapy.layers.inet6.IPv6]) or s
+ ips[r.src] = None
+ if TCP in s:
+ trace_id = (s.src,s.dst,6,s.dport)
+ elif UDP in s:
+ trace_id = (s.src,s.dst,17,s.dport)
+ elif ICMP in s:
+ trace_id = (s.src,s.dst,1,s.type)
+ else:
+ trace_id = (s.src,s.dst,s.proto,0)
+ trace = rt.get(trace_id,{})
+ ttl = conf.ipv6_enabled and scapy.layers.inet6.IPv6 in s and s.hlim or s.ttl
+ if not (ICMP in r and r[ICMP].type == 11) and not (conf.ipv6_enabled and scapy.layers.inet6.IPv6 in r and scapy.layers.inet6.ICMPv6TimeExceeded in r):
+ if trace_id in ports_done:
+ continue
+ ports_done[trace_id] = None
+ p = ports.get(r.src,[])
+ if TCP in r:
+ p.append(r.sprintf("<T%ir,TCP.sport%> %TCP.sport% %TCP.flags%"))
+ trace[ttl] = r.sprintf('"%r,src%":T%ir,TCP.sport%')
+ elif UDP in r:
+ p.append(r.sprintf("<U%ir,UDP.sport%> %UDP.sport%"))
+ trace[ttl] = r.sprintf('"%r,src%":U%ir,UDP.sport%')
+ elif ICMP in r:
+ p.append(r.sprintf("<I%ir,ICMP.type%> ICMP %ICMP.type%"))
+ trace[ttl] = r.sprintf('"%r,src%":I%ir,ICMP.type%')
+ else:
+ p.append(r.sprintf("{IP:<P%ir,proto%> IP %proto%}{IPv6:<P%ir,nh%> IPv6 %nh%}"))
+ trace[ttl] = r.sprintf('"%r,src%":{IP:P%ir,proto%}{IPv6:P%ir,nh%}')
+ ports[r.src] = p
+ else:
+ trace[ttl] = r.sprintf('"%r,src%"')
+ rt[trace_id] = trace
+
+ # Fill holes with unk%i nodes
+ unknown_label = incremental_label("unk%i")
+ blackholes = []
+ bhip = {}
+ for rtk in rt:
+ trace = rt[rtk]
+ k = trace.keys()
+ for n in range(min(k), max(k)):
+ if not n in trace:
+ trace[n] = next(unknown_label)
+ if not rtk in ports_done:
+ if rtk[2] == 1: #ICMP
+ bh = "%s %i/icmp" % (rtk[1],rtk[3])
+ elif rtk[2] == 6: #TCP
+ bh = "%s %i/tcp" % (rtk[1],rtk[3])
+ elif rtk[2] == 17: #UDP
+ bh = '%s %i/udp' % (rtk[1],rtk[3])
+ else:
+ bh = '%s %i/proto' % (rtk[1],rtk[2])
+ ips[bh] = None
+ bhip[rtk[1]] = bh
+ bh = '"%s"' % bh
+ trace[max(k)+1] = bh
+ blackholes.append(bh)
+
+ # Find AS numbers
+ ASN_query_list = dict.fromkeys(map(lambda x:x.rsplit(" ",1)[0],ips)).keys()
+ if ASres is None:
+ ASNlist = []
+ else:
+ ASNlist = ASres.resolve(*ASN_query_list)
+
+ ASNs = {}
+ ASDs = {}
+ for ip,asn,desc, in ASNlist:
+ if asn is None:
+ continue
+ iplist = ASNs.get(asn,[])
+ if ip in bhip:
+ if ip in ports:
+ iplist.append(ip)
+ iplist.append(bhip[ip])
+ else:
+ iplist.append(ip)
+ ASNs[asn] = iplist
+ ASDs[asn] = desc
+
+
+ backcolorlist=colgen("60","86","ba","ff")
+ forecolorlist=colgen("a0","70","40","20")
+
+ s = "digraph trace {\n"
+
+ s += "\n\tnode [shape=ellipse,color=black,style=solid];\n\n"
+
+ s += "\n#ASN clustering\n"
+ for asn in ASNs:
+ s += '\tsubgraph cluster_%s {\n' % asn
+ col = next(backcolorlist)
+ s += '\t\tcolor="#%s%s%s";' % col
+ s += '\t\tnode [fillcolor="#%s%s%s",style=filled];' % col
+ s += '\t\tfontsize = 10;'
+ s += '\t\tlabel = "%s\\n[%s]"\n' % (asn,ASDs[asn])
+ for ip in ASNs[asn]:
+
+ s += '\t\t"%s";\n'%ip
+ s += "\t}\n"
+
+
+
+
+ s += "#endpoints\n"
+ for p in ports:
+ s += '\t"%s" [shape=record,color=black,fillcolor=green,style=filled,label="%s|%s"];\n' % (p,p,"|".join(ports[p]))
+
+ s += "\n#Blackholes\n"
+ for bh in blackholes:
+ s += '\t%s [shape=octagon,color=black,fillcolor=red,style=filled];\n' % bh
+
+ if padding:
+ s += "\n#Padding\n"
+ pad={}
+ for snd,rcv in self.res:
+ if rcv.src not in ports and rcv.haslayer(conf.padding_layer):
+ p = rcv.getlayer(conf.padding_layer).load
+ if p != "\x00"*len(p):
+ pad[rcv.src]=None
+ for rcv in pad:
+ s += '\t"%s" [shape=triangle,color=black,fillcolor=red,style=filled];\n' % rcv
+
+
+
+ s += "\n\tnode [shape=ellipse,color=black,style=solid];\n\n"
+
+
+ for rtk in rt:
+ s += "#---[%s\n" % repr(rtk)
+ s += '\t\tedge [color="#%s%s%s"];\n' % next(forecolorlist)
+ trace = rt[rtk]
+ k = trace.keys()
+ for n in range(min(k), max(k)):
+ s += '\t%s ->\n' % trace[n]
+ s += '\t%s;\n' % trace[max(k)]
+
+ s += "}\n";
+ self.graphdef = s
+
+ def graph(self, ASres=None, padding=0, **kargs):
+ """x.graph(ASres=conf.AS_resolver, other args):
+ ASres=None : no AS resolver => no clustering
+ ASres=AS_resolver() : default whois AS resolver (riswhois.ripe.net)
+ ASres=AS_resolver_cymru(): use whois.cymru.com whois database
+ ASres=AS_resolver(server="whois.ra.net")
+ format: output type (svg, ps, gif, jpg, etc.), passed to dot's "-T" option
+ figsize: w,h tuple in inches. See matplotlib documentation
+ target: filename. If None uses matplotlib to display
+ prog: which graphviz program to use"""
+ if ASres is None:
+ ASres = conf.AS_resolver
+ if (self.graphdef is None or
+ self.graphASres != ASres or
+ self.graphpadding != padding):
+ self.make_graph(ASres,padding)
+
+ return do_graph(self.graphdef, **kargs)
+
+
+
+@conf.commands.register
+def traceroute(target, dport=80, minttl=1, maxttl=30, sport=RandShort(), l4 = None, filter=None, timeout=2, verbose=None, **kargs):
+ """Instant TCP traceroute
+traceroute(target, [maxttl=30,] [dport=80,] [sport=80,] [verbose=conf.verb]) -> None
+"""
+ if verbose is None:
+ verbose = conf.verb
+ if filter is None:
+ # we only consider ICMP error packets and TCP packets with at
+ # least the ACK flag set *and* either the SYN or the RST flag
+ # set
+ filter="(icmp and (icmp[0]=3 or icmp[0]=4 or icmp[0]=5 or icmp[0]=11 or icmp[0]=12)) or (tcp and (tcp[13] & 0x16 > 0x10))"
+ if l4 is None:
+ a,b = sr(IP(dst=target, id=RandShort(), ttl=(minttl,maxttl))/TCP(seq=RandInt(),sport=sport, dport=dport),
+ timeout=timeout, filter=filter, verbose=verbose, **kargs)
+ else:
+ # this should always work
+ filter="ip"
+ a,b = sr(IP(dst=target, id=RandShort(), ttl=(minttl,maxttl))/l4,
+ timeout=timeout, filter=filter, verbose=verbose, **kargs)
+
+ a = TracerouteResult(a.res)
+
+ if verbose:
+ a.show()
+ return a,b
+
+
+
+#############################
+## Simple TCP client stack ##
+#############################
+
+class TCP_client(Automaton):
+
+ def parse_args(self, ip, port, *args, **kargs):
+ self.dst = next(iter(Net(ip)))
+ self.dport = port
+ self.sport = random.randrange(0,2**16)
+ self.l4 = IP(dst=ip)/TCP(sport=self.sport, dport=self.dport, flags=0,
+ seq=random.randrange(0,2**32))
+ self.src = self.l4.src
+ self.swin=self.l4[TCP].window
+ self.dwin=1
+ self.rcvbuf=""
+ bpf = "host %s and host %s and port %i and port %i" % (self.src,
+ self.dst,
+ self.sport,
+ self.dport)
+
+# bpf=None
+ Automaton.parse_args(self, filter=bpf, **kargs)
+
+
+ def master_filter(self, pkt):
+ return (IP in pkt and
+ pkt[IP].src == self.dst and
+ pkt[IP].dst == self.src and
+ TCP in pkt and
+ pkt[TCP].sport == self.dport and
+ pkt[TCP].dport == self.sport and
+ self.l4[TCP].seq >= pkt[TCP].ack and # XXX: seq/ack 2^32 wrap up
+ ((self.l4[TCP].ack == 0) or (self.l4[TCP].ack <= pkt[TCP].seq <= self.l4[TCP].ack+self.swin)) )
+
+
+ @ATMT.state(initial=1)
+ def START(self):
+ pass
+
+ @ATMT.state()
+ def SYN_SENT(self):
+ pass
+
+ @ATMT.state()
+ def ESTABLISHED(self):
+ pass
+
+ @ATMT.state()
+ def LAST_ACK(self):
+ pass
+
+ @ATMT.state(final=1)
+ def CLOSED(self):
+ pass
+
+
+ @ATMT.condition(START)
+ def connect(self):
+ raise self.SYN_SENT()
+ @ATMT.action(connect)
+ def send_syn(self):
+ self.l4[TCP].flags = "S"
+ self.send(self.l4)
+ self.l4[TCP].seq += 1
+
+
+ @ATMT.receive_condition(SYN_SENT)
+ def synack_received(self, pkt):
+ if pkt[TCP].flags & 0x3f == 0x12:
+ raise self.ESTABLISHED().action_parameters(pkt)
+ @ATMT.action(synack_received)
+ def send_ack_of_synack(self, pkt):
+ self.l4[TCP].ack = pkt[TCP].seq+1
+ self.l4[TCP].flags = "A"
+ self.send(self.l4)
+
+ @ATMT.receive_condition(ESTABLISHED)
+ def incoming_data_received(self, pkt):
+ if not isinstance(pkt[TCP].payload, NoPayload) and not isinstance(pkt[TCP].payload, conf.padding_layer):
+ raise self.ESTABLISHED().action_parameters(pkt)
+ @ATMT.action(incoming_data_received)
+ def receive_data(self,pkt):
+ data = (bytes(pkt[TCP].payload))
+ if data and self.l4[TCP].ack == pkt[TCP].seq:
+ self.l4[TCP].ack += len(data)
+ self.l4[TCP].flags = "A"
+ self.send(self.l4)
+ self.rcvbuf += data
+ if pkt[TCP].flags & 8 != 0: #PUSH
+ self.oi.tcp.send(self.rcvbuf)
+ self.rcvbuf = ""
+
+ @ATMT.ioevent(ESTABLISHED,name="tcp", as_supersocket="tcplink")
+ def outgoing_data_received(self, fd):
+ raise self.ESTABLISHED().action_parameters(fd.recv())
+ @ATMT.action(outgoing_data_received)
+ def send_data(self, d):
+ self.l4[TCP].flags = "PA"
+ self.send(self.l4/d)
+ self.l4[TCP].seq += len(d)
+
+
+ @ATMT.receive_condition(ESTABLISHED)
+ def reset_received(self, pkt):
+ if pkt[TCP].flags & 4 != 0:
+ raise self.CLOSED()
+
+ @ATMT.receive_condition(ESTABLISHED)
+ def fin_received(self, pkt):
+ if pkt[TCP].flags & 0x1 == 1:
+ raise self.LAST_ACK().action_parameters(pkt)
+ @ATMT.action(fin_received)
+ def send_finack(self, pkt):
+ self.l4[TCP].flags = "FA"
+ self.l4[TCP].ack = pkt[TCP].seq+1
+ self.send(self.l4)
+ self.l4[TCP].seq += 1
+
+ @ATMT.receive_condition(LAST_ACK)
+ def ack_of_fin_received(self, pkt):
+ if pkt[TCP].flags & 0x3f == 0x10:
+ raise self.CLOSED()
+
+
+
+
+#####################
+## Reporting stuff ##
+#####################
+
+def report_ports(target, ports):
+ """portscan a target and output a LaTeX table
+report_ports(target, ports) -> string"""
+ ans,unans = sr(IP(dst=target)/TCP(dport=ports),timeout=5)
+ rep = "\\begin{tabular}{|r|l|l|}\n\\hline\n"
+ for s,r in ans:
+ if not r.haslayer(ICMP):
+ if r.payload.flags == 0x12:
+ rep += r.sprintf("%TCP.sport% & open & SA \\\\\n")
+ rep += "\\hline\n"
+ for s,r in ans:
+ if r.haslayer(ICMP):
+ rep += r.sprintf("%TCPerror.dport% & closed & ICMP type %ICMP.type%/%ICMP.code% from %IP.src% \\\\\n")
+ elif r.payload.flags != 0x12:
+ rep += r.sprintf("%TCP.sport% & closed & TCP %TCP.flags% \\\\\n")
+ rep += "\\hline\n"
+ for i in unans:
+ rep += i.sprintf("%TCP.dport% & ? & unanswered \\\\\n")
+ rep += "\\hline\n\\end{tabular}\n"
+ return rep
+
+
+
+def IPID_count(lst, funcID=lambda x:x[1].id, funcpres=lambda x:x[1].summary()):
+ idlst = map(funcID, lst)
+ idlst.sort()
+ #classes = [idlst[0]]+map(lambda x:x[1],filter(lambda (x,y): abs(x-y)>50, map(lambda x,y: (x,y),idlst[:-1], idlst[1:])))
+ classes = [idlst[0]]+list(map(lambda x:x[1],filter(lambda a: abs(a[0]-a[1])>50, map(lambda x,y: (x,y),idlst[:-1], idlst[1:]))))
+ lst = map(lambda x:(funcID(x), funcpres(x)), lst)
+ lst.sort()
+ print("Probably %i classes:" % len(classes), classes)
+ for id,pr in lst:
+ print("%5i" % id, pr)
+
+
+def fragleak(target,sport=123, dport=123, timeout=0.2, onlyasc=0):
+ load = "XXXXYYYYYYYYYY"
+# getmacbyip(target)
+# pkt = IP(dst=target, id=RandShort(), options="\x22"*40)/UDP()/load
+ pkt = IP(dst=target, id=RandShort(), options="\x00"*40, flags=1)/UDP(sport=sport, dport=sport)/load
+ s=conf.L3socket()
+ intr=0
+ found={}
+ try:
+ while 1:
+ try:
+ if not intr:
+ s.send(pkt)
+ sin,sout,serr = select([s],[],[],timeout)
+ if not sin:
+ continue
+ ans=s.recv(1600)
+ if not isinstance(ans, IP): #TODO: IPv6
+ continue
+ if not isinstance(ans.payload, ICMP):
+ continue
+ if not isinstance(ans.payload.payload, IPerror):
+ continue
+ if ans.payload.payload.dst != target:
+ continue
+ if ans.src != target:
+ print("leak from", ans.src,end=" ")
+
+
+# print repr(ans)
+ if not ans.haslayer(conf.padding_layer):
+ continue
+
+
+# print repr(ans.payload.payload.payload.payload)
+
+# if not isinstance(ans.payload.payload.payload.payload, conf.raw_layer):
+# continue
+# leak = ans.payload.payload.payload.payload.load[len(load):]
+ leak = ans.getlayer(conf.padding_layer).load
+ if leak not in found:
+ found[leak]=None
+ linehexdump(leak, onlyasc=onlyasc)
+ except KeyboardInterrupt:
+ if intr:
+ raise
+ intr=1
+ except KeyboardInterrupt:
+ pass
+
+def fragleak2(target, timeout=0.4, onlyasc=0):
+ found={}
+ try:
+ while 1:
+ p = sr1(IP(dst=target, options="\x00"*40, proto=200)/"XXXXYYYYYYYYYYYY",timeout=timeout,verbose=0)
+ if not p:
+ continue
+ if conf.padding_layer in p:
+ leak = p[conf.padding_layer].load
+ if leak not in found:
+ found[leak]=None
+ linehexdump(leak,onlyasc=onlyasc)
+ except:
+ pass
+
+
+conf.stats_classic_protocols += [TCP,UDP,ICMP]
+conf.stats_dot11_protocols += [TCP,UDP,ICMP]
+
+if conf.ipv6_enabled:
+ import scapy.layers.inet6
diff --git a/scripts/external_libs/scapy-2.3.1/python3/scapy/layers/inet6.py b/scripts/external_libs/scapy-2.3.1/python3/scapy/layers/inet6.py
new file mode 100644
index 00000000..c2e4a037
--- /dev/null
+++ b/scripts/external_libs/scapy-2.3.1/python3/scapy/layers/inet6.py
@@ -0,0 +1,3047 @@
+#! /usr/bin/env python
+#############################################################################
+## ##
+## inet6.py --- IPv6 support for Scapy ##
+## see http://natisbad.org/IPv6/ ##
+## for more informations ##
+## ##
+## Copyright (C) 2005 Guillaume Valadon <guedou@hongo.wide.ad.jp> ##
+## Arnaud Ebalard <arnaud.ebalard@eads.net> ##
+## ##
+## This program is free software; you can redistribute it and/or modify it ##
+## under the terms of the GNU General Public License version 2 as ##
+## published by the Free Software Foundation. ##
+## ##
+## This program is distributed in the hope that it will be useful, but ##
+## WITHOUT ANY WARRANTY; without even the implied warranty of ##
+## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ##
+## General Public License for more details. ##
+## ##
+#############################################################################
+
+"""
+IPv6 (Internet Protocol v6).
+"""
+
+
+import socket
+if not socket.has_ipv6:
+ raise socket.error("can't use AF_INET6, IPv6 is disabled")
+if not hasattr(socket, "IPPROTO_IPV6"):
+ # Workaround for http://bugs.python.org/issue6926
+ socket.IPPROTO_IPV6 = 41
+
+if not hasattr(socket, "IPPROTO_IPIP"):
+ socket.IPPROTO_IPIP = 4
+
+if not ('IPPROTO_IPIP ' in globals()):
+ IPPROTO_IPIP=4
+
+
+
+from scapy.config import conf
+from scapy.layers.l2 import *
+from scapy.layers.inet import *
+from scapy.fields import *
+from scapy.packet import *
+from scapy.volatile import *
+from scapy.sendrecv import sr,sr1,srp1
+from scapy.as_resolvers import AS_resolver_riswhois
+from scapy.supersocket import SuperSocket,L3RawSocket
+from scapy.arch import *
+from scapy.utils6 import *
+
+
+#############################################################################
+# Helpers ##
+#############################################################################
+
+def get_cls(name, fallback_cls):
+ return globals().get(name, fallback_cls)
+
+
+##########################
+## Neighbor cache stuff ##
+##########################
+
+conf.netcache.new_cache("in6_neighbor", 120)
+
+def neighsol(addr, src, iface, timeout=1, chainCC=0):
+ """
+ Sends an ICMPv6 Neighbor Solicitation message to get the MAC address
+ of the neighbor with specified IPv6 address addr. 'src' address is
+ used as source of the message. Message is sent on iface. By default,
+ timeout waiting for an answer is 1 second.
+
+ If no answer is gathered, None is returned. Else, the answer is
+ returned (ethernet frame).
+ """
+
+ nsma = in6_getnsma(inet_pton(socket.AF_INET6, addr))
+ d = inet_ntop(socket.AF_INET6, nsma)
+ dm = in6_getnsmac(nsma)
+ p = Ether(dst=dm)/IPv6(dst=d, src=src, hlim=255)
+ p /= ICMPv6ND_NS(tgt=addr)
+ p /= ICMPv6NDOptSrcLLAddr(lladdr=get_if_hwaddr(iface))
+ res = srp1(p,type=ETH_P_IPV6, iface=iface, timeout=1, verbose=0,
+ chainCC=chainCC)
+
+ return res
+
+def getmacbyip6(ip6, chainCC=0):
+ """
+ Returns the mac address to be used for provided 'ip6' peer.
+ neighborCache.get() method is used on instantiated neighbor cache.
+ Resolution mechanism is described in associated doc string.
+
+ (chainCC parameter value ends up being passed to sending function
+ used to perform the resolution, if needed)
+ """
+
+ if in6_ismaddr(ip6): # Multicast
+ mac = in6_getnsmac(inet_pton(socket.AF_INET6, ip6))
+ return mac
+
+ iff,a,nh = conf.route6.route(ip6, dev=conf.iface6)
+
+ if iff == LOOPBACK_NAME:
+ return "ff:ff:ff:ff:ff:ff"
+
+ if nh != '::':
+ ip6 = nh # Found next hop
+
+ mac = conf.netcache.in6_neighbor.get(ip6)
+ if mac:
+ return mac
+
+ res = neighsol(ip6, a, iff, chainCC=chainCC)
+
+ if res is not None:
+ if ICMPv6NDOptDstLLAddr in res:
+ mac = res[ICMPv6NDOptDstLLAddr].lladdr
+ else:
+ mac = res.src
+ conf.netcache.in6_neighbor[ip6] = mac
+ return mac
+
+ return None
+
+
+#############################################################################
+#############################################################################
+### IPv6 addresses manipulation routines ###
+#############################################################################
+#############################################################################
+
+class Net6(Gen): # syntax ex. fec0::/126
+ """Generate a list of IPv6s from a network address or a name"""
+ name = "ipv6"
+ ipaddress = re.compile(r"^([a-fA-F0-9:]+)(/[1]?[0-3]?[0-9])?$")
+
+ def __init__(self, net):
+ self.repr = net
+
+ tmp = net.split('/')+["128"]
+ if not self.ipaddress.match(net):
+ tmp[0]=socket.getaddrinfo(tmp[0], None, socket.AF_INET6)[0][-1][0]
+
+ netmask = int(tmp[1])
+ self.net = inet_pton(socket.AF_INET6, tmp[0])
+ self.mask = in6_cidr2mask(netmask)
+ self.plen = netmask
+
+ def __iter__(self):
+ def m8(i):
+ if i % 8 == 0:
+ return i
+ #tuple = filter(lambda x: m8(x), range(8, 129))
+ tuple = [ x for x in range(8, 129) if m8(x) ]
+
+ a = in6_and(self.net, self.mask)
+ tmp = map(lambda x: x, struct.unpack('16B', a))
+
+ def parse_digit(a, netmask):
+ netmask = min(8,max(netmask,0))
+ a = (int(a) & (0xff<<netmask),(int(a) | (0xff>>(8-netmask)))+1)
+ return a
+ self.parsed = map(lambda x,y: parse_digit(x,y), tmp, map(lambda x,nm=self.plen: x-nm, tuple))
+
+ def rec(n, l):
+ if n and n % 2 == 0:
+ sep = ':'
+ else:
+ sep = ''
+ if n == 16:
+ return l
+ else:
+ ll = []
+ for i in range(*self.parsed[n]):
+ for y in l:
+ ll += [y+sep+'%.2x'%i]
+ return rec(n+1, ll)
+
+ return iter(rec(0, ['']))
+
+ def __repr__(self):
+ return "<Net6 %s>" % self.repr
+
+
+
+
+
+
+#############################################################################
+#############################################################################
+### IPv6 Class ###
+#############################################################################
+#############################################################################
+
+class IP6Field(Field):
+ def __init__(self, name, default):
+ Field.__init__(self, name, default, "16s")
+ def h2i(self, pkt, x):
+ if type(x) is str:
+ try:
+ x = in6_ptop(x)
+ except socket.error:
+ x = Net6(x)
+ elif type(x) is list:
+ x = map(Net6, x)
+ return x
+ def i2m(self, pkt, x):
+ return inet_pton(socket.AF_INET6, x)
+ def m2i(self, pkt, x):
+ return inet_ntop(socket.AF_INET6, x)
+ def any2i(self, pkt, x):
+ return self.h2i(pkt,x)
+ def i2repr(self, pkt, x):
+ if x is None:
+ return self.i2h(pkt,x)
+ elif not isinstance(x, Net6) and not type(x) is list:
+ if in6_isaddrTeredo(x): # print Teredo info
+ server, flag, maddr, mport = teredoAddrExtractInfo(x)
+ return "%s [Teredo srv: %s cli: %s:%s]" % (self.i2h(pkt, x), server, maddr,mport)
+ elif in6_isaddr6to4(x): # print encapsulated address
+ vaddr = in6_6to4ExtractAddr(x)
+ return "%s [6to4 GW: %s]" % (self.i2h(pkt, x), vaddr)
+ return self.i2h(pkt, x) # No specific information to return
+ def randval(self):
+ return RandIP6()
+
+class SourceIP6Field(IP6Field):
+ def __init__(self, name, dstname):
+ IP6Field.__init__(self, name, None)
+ self.dstname = dstname
+ def i2m(self, pkt, x):
+ if x is None:
+ dst=getattr(pkt,self.dstname)
+ iff,x,nh = conf.route6.route(dst)
+ return IP6Field.i2m(self, pkt, x)
+ def i2h(self, pkt, x):
+ if x is None:
+ dst=getattr(pkt,self.dstname)
+ if isinstance(dst,Gen):
+ r = map(conf.route6.route, dst)
+ r.sort()
+ if r[0] == r[-1]:
+ x=r[0][1]
+ else:
+ warning("More than one possible route for %s"%repr(dst))
+ return None
+ else:
+ iff,x,nh = conf.route6.route(dst)
+ return IP6Field.i2h(self, pkt, x)
+
+ipv6nh = { 0:"Hop-by-Hop Option Header",
+ 4:"IP",
+ 6:"TCP",
+ 17:"UDP",
+ 41:"IPv6",
+ 43:"Routing Header",
+ 44:"Fragment Header",
+ 47:"GRE",
+ 50:"ESP Header",
+ 51:"AH Header",
+ 58:"ICMPv6",
+ 59:"No Next Header",
+ 60:"Destination Option Header",
+ 135:"Mobility Header"}
+
+ipv6nhcls = { 0: "IPv6ExtHdrHopByHop",
+ 4: "IP",
+ 6: "TCP",
+ 17: "UDP",
+ 43: "IPv6ExtHdrRouting",
+ 44: "IPv6ExtHdrFragment",
+ #50: "IPv6ExtHrESP",
+ #51: "IPv6ExtHdrAH",
+ 58: "ICMPv6Unknown",
+ 59: "Raw",
+ 60: "IPv6ExtHdrDestOpt" }
+
+class IP6ListField(StrField):
+ islist = 1
+ def __init__(self, name, default, count_from=None, length_from=None):
+ if default is None:
+ default = []
+ StrField.__init__(self, name, default)
+ self.count_from = count_from
+ self.length_from = length_from
+
+ def i2len(self, pkt, i):
+ return 16*len(i)
+
+ def i2count(self, pkt, i):
+ if type(i) is list:
+ return len(i)
+ return 0
+
+ def getfield(self, pkt, s):
+ c = l = None
+ if self.length_from is not None:
+ l = self.length_from(pkt)
+ elif self.count_from is not None:
+ c = self.count_from(pkt)
+
+ lst = []
+ ret = b""
+ remain = s
+ if l is not None:
+ remain,ret = s[:l],s[l:]
+ while remain:
+ if c is not None:
+ if c <= 0:
+ break
+ c -= 1
+ addr = inet_ntop(socket.AF_INET6, remain[:16])
+ lst.append(addr)
+ remain = remain[16:]
+ return remain+ret,lst
+
+ def i2m(self, pkt, x):
+ s = b''
+ for y in x:
+ try:
+ y = inet_pton(socket.AF_INET6, y)
+ except:
+ y = socket.getaddrinfo(y, None, socket.AF_INET6)[0][-1][0]
+ y = inet_pton(socket.AF_INET6, y)
+ s += y
+ return s
+
+ def i2repr(self,pkt,x):
+ s = []
+ if x == None:
+ return "[]"
+ for y in x:
+ s.append('%s' % y)
+ return "[ %s ]" % (", ".join(s))
+
+class _IPv6GuessPayload:
+ name = "Dummy class that implements guess_payload_class() for IPv6"
+ def default_payload_class(self,p):
+ if self.nh == 58: # ICMPv6
+ #t = ord(p[0])
+ t = p[0]
+ if len(p) > 2 and t == 139 or t == 140: # Node Info Query
+ return _niquery_guesser(p)
+ if len(p) >= icmp6typesminhdrlen.get(t, sys.maxsize): # Other ICMPv6 messages
+ return get_cls(icmp6typescls.get(t,"Raw"), "Raw")
+ return Raw
+ elif self.nh == 135 and len(p) > 3: # Mobile IPv6
+ #return _mip6_mhtype2cls.get(ord(p[2]), MIP6MH_Generic)
+ return _mip6_mhtype2cls.get(p[2], MIP6MH_Generic)
+ else:
+ return get_cls(ipv6nhcls.get(self.nh,"Raw"), "Raw")
+
+class IPv6(_IPv6GuessPayload, Packet, IPTools):
+ name = "IPv6"
+ fields_desc = [ BitField("version" , 6 , 4),
+ BitField("tc", 0, 8), #TODO: IPv6, ByteField ?
+ BitField("fl", 0, 20),
+ ShortField("plen", None),
+ ByteEnumField("nh", 59, ipv6nh),
+ ByteField("hlim", 64),
+ IP6Field("src", "::2"),
+ #SourceIP6Field("src", "dst"), # dst is for src @ selection
+ IP6Field("dst", "::1") ]
+
+ def route(self):
+ dst = self.dst
+ if isinstance(dst,Gen):
+ dst = next(iter(dst))
+ return conf.route6.route(dst)
+
+ def mysummary(self):
+ return "%s > %s (%i)" % (self.src,self.dst, self.nh)
+
+ def post_build(self, p, pay):
+ p += pay
+ if self.plen is None:
+ l = len(p) - 40
+ p = p[:4]+struct.pack("!H", l)+p[6:]
+ return p
+
+ def extract_padding(self, s):
+ l = self.plen
+ return s[:l], s[l:]
+
+ def hashret(self):
+ if self.nh == 58 and isinstance(self.payload, _ICMPv6):
+ if self.payload.type < 128:
+ return self.payload.payload.hashret()
+ elif (self.payload.type in [133,134,135,136,144,145]):
+ return struct.pack("B", self.nh)+self.payload.hashret()
+
+ nh = self.nh
+ sd = self.dst
+ ss = self.src
+ if self.nh == 43 and isinstance(self.payload, IPv6ExtHdrRouting):
+ # With routing header, the destination is the last
+ # address of the IPv6 list if segleft > 0
+ nh = self.payload.nh
+ try:
+ sd = self.addresses[-1]
+ except IndexError:
+ sd = '::1'
+ # TODO: big bug with ICMPv6 error messages as the destination of IPerror6
+ # could be anything from the original list ...
+ if 1:
+ sd = inet_pton(socket.AF_INET6, sd)
+ for a in self.addresses:
+ a = inet_pton(socket.AF_INET6, a)
+ sd = strxor(sd, a)
+ sd = inet_ntop(socket.AF_INET6, sd)
+
+ if self.nh == 44 and isinstance(self.payload, IPv6ExtHdrFragment):
+ nh = self.payload.nh
+
+ if self.nh == 0 and isinstance(self.payload, IPv6ExtHdrHopByHop):
+ nh = self.payload.nh
+
+ if self.nh == 60 and isinstance(self.payload, IPv6ExtHdrDestOpt):
+ foundhao = None
+ for o in self.payload.options:
+ if isinstance(o, HAO):
+ foundhao = o
+ if foundhao:
+ nh = self.payload.nh # XXX what if another extension follows ?
+ ss = foundhao.hoa
+
+ if conf.checkIPsrc and conf.checkIPaddr:
+ sd = inet_pton(socket.AF_INET6, sd)
+ ss = inet_pton(socket.AF_INET6, self.src)
+ return struct.pack("B",nh)+self.payload.hashret()
+ else:
+ return struct.pack("B", nh)+self.payload.hashret()
+
+ def answers(self, other):
+ if not isinstance(other, IPv6): # self is reply, other is request
+ return False
+ if conf.checkIPaddr:
+ ss = inet_pton(socket.AF_INET6, self.src)
+ sd = inet_pton(socket.AF_INET6, self.dst)
+ os = inet_pton(socket.AF_INET6, other.src)
+ od = inet_pton(socket.AF_INET6, other.dst)
+ # request was sent to a multicast address (other.dst)
+ # Check reply destination addr matches request source addr (i.e
+ # sd == os) except when reply is multicasted too
+ # XXX test mcast scope matching ?
+ if in6_ismaddr(other.dst):
+ if in6_ismaddr(self.dst):
+ if ((od == sd) or
+ (in6_isaddrllallnodes(self.dst) and in6_isaddrllallservers(other.dst))):
+ return self.payload.answers(other.payload)
+ return False
+ if (os == sd):
+ return self.payload.answers(other.payload)
+ return False
+ elif (sd != os): # or ss != od): <- removed for ICMP errors
+ return False
+ if self.nh == 58 and isinstance(self.payload, _ICMPv6) and self.payload.type < 128:
+ # ICMPv6 Error message -> generated by IPv6 packet
+ # Note : at the moment, we jump the ICMPv6 specific class
+ # to call answers() method of erroneous packet (over
+ # initial packet). There can be cases where an ICMPv6 error
+ # class could implement a specific answers method that perform
+ # a specific task. Currently, don't see any use ...
+ return self.payload.payload.answers(other)
+ elif other.nh == 0 and isinstance(other.payload, IPv6ExtHdrHopByHop):
+ return self.payload.answers(other.payload.payload)
+ elif other.nh == 44 and isinstance(other.payload, IPv6ExtHdrFragment):
+ return self.payload.answers(other.payload.payload)
+ elif other.nh == 43 and isinstance(other.payload, IPv6ExtHdrRouting):
+ return self.payload.answers(other.payload.payload) # Buggy if self.payload is a IPv6ExtHdrRouting
+ elif other.nh == 60 and isinstance(other.payload, IPv6ExtHdrDestOpt):
+ return self.payload.payload.answers(other.payload.payload)
+ elif self.nh == 60 and isinstance(self.payload, IPv6ExtHdrDestOpt): # BU in reply to BRR, for instance
+ return self.payload.payload.answers(other.payload)
+ else:
+ if (self.nh != other.nh):
+ return False
+ return self.payload.answers(other.payload)
+
+
+conf.neighbor.register_l3(Ether, IPv6, lambda l2,l3: getmacbyip6(l3.dst))
+
+
+class IPerror6(IPv6):
+ name = "IPv6 in ICMPv6"
+ def answers(self, other):
+ if not isinstance(other, IPv6):
+ return False
+ sd = inet_pton(socket.AF_INET6, self.dst)
+ ss = inet_pton(socket.AF_INET6, self.src)
+ od = inet_pton(socket.AF_INET6, other.dst)
+ os = inet_pton(socket.AF_INET6, other.src)
+
+ # Make sure that the ICMPv6 error is related to the packet scapy sent
+ if isinstance(self.underlayer, _ICMPv6) and self.underlayer.type < 128:
+
+ # find upper layer for self (possible citation)
+ selfup = self.payload
+ while selfup is not None and isinstance(selfup, _IPv6ExtHdr):
+ selfup = selfup.payload
+
+ # find upper layer for other (initial packet). Also look for RH
+ otherup = other.payload
+ request_has_rh = False
+ while otherup is not None and isinstance(otherup, _IPv6ExtHdr):
+ if isinstance(otherup, IPv6ExtHdrRouting):
+ request_has_rh = True
+ otherup = otherup.payload
+
+ if ((ss == os and sd == od) or # <- Basic case
+ (ss == os and request_has_rh)): # <- Request has a RH :
+ # don't check dst address
+
+ # Let's deal with possible MSS Clamping
+ if (isinstance(selfup, TCP) and
+ isinstance(otherup, TCP) and
+ selfup.options != otherup.options): # seems clamped
+
+ # Save fields modified by MSS clamping
+ old_otherup_opts = otherup.options
+ old_otherup_cksum = otherup.chksum
+ old_otherup_dataofs = otherup.dataofs
+ old_selfup_opts = selfup.options
+ old_selfup_cksum = selfup.chksum
+ old_selfup_dataofs = selfup.dataofs
+
+ # Nullify them
+ otherup.options = []
+ otherup.chksum = 0
+ otherup.dataofs = 0
+ selfup.options = []
+ selfup.chksum = 0
+ selfup.dataofs = 0
+
+ # Test it and save result
+ s1 = bytes(selfup)
+ s2 = bytes(otherup)
+ l = min(len(s1), len(s2))
+ res = s1[:l] == s2[:l]
+
+ # recall saved values
+ otherup.options = old_otherup_opts
+ otherup.chksum = old_otherup_cksum
+ otherup.dataofs = old_otherup_dataofs
+ selfup.options = old_selfup_opts
+ selfup.chksum = old_selfup_cksum
+ selfup.dataofs = old_selfup_dataofs
+
+ return res
+
+ s1 = bytes(selfup)
+ s2 = bytes(otherup)
+ l = min(len(s1), len(s2))
+ return s1[:l] == s2[:l]
+
+ return False
+
+ def mysummary(self):
+ return Packet.mysummary(self)
+
+
+#############################################################################
+#############################################################################
+### Upper Layer Checksum computation ###
+#############################################################################
+#############################################################################
+
+class PseudoIPv6(Packet): # IPv6 Pseudo-header for checksum computation
+ name = "Pseudo IPv6 Header"
+ fields_desc = [ IP6Field("src", "::"),
+ IP6Field("dst", "::"),
+ ShortField("uplen", None),
+ BitField("zero", 0, 24),
+ ByteField("nh", 0) ]
+
+def in6_chksum(nh, u, p):
+ """
+ Performs IPv6 Upper Layer checksum computation. Provided parameters are:
+
+ - 'nh' : value of upper layer protocol
+ - 'u' : upper layer instance (TCP, UDP, ICMPv6*, ). Instance must be
+ provided with all under layers (IPv6 and all extension headers,
+ for example)
+ - 'p' : the payload of the upper layer provided as a string
+
+ Functions operate by filling a pseudo header class instance (PseudoIPv6)
+ with
+ - Next Header value
+ - the address of _final_ destination (if some Routing Header with non
+ segleft field is present in underlayer classes, last address is used.)
+ - the address of _real_ source (basically the source address of an
+ IPv6 class instance available in the underlayer or the source address
+ in HAO option if some Destination Option header found in underlayer
+ includes this option).
+ - the length is the length of provided payload string ('p')
+ """
+
+ ph6 = PseudoIPv6()
+ ph6.nh = nh
+ rthdr = 0
+ hahdr = 0
+ final_dest_addr_found = 0
+ while u != None and not isinstance(u, IPv6):
+ if (isinstance(u, IPv6ExtHdrRouting) and
+ u.segleft != 0 and len(u.addresses) != 0 and
+ final_dest_addr_found == 0):
+ rthdr = u.addresses[-1]
+ final_dest_addr_found = 1
+ elif (isinstance(u, IPv6ExtHdrDestOpt) and (len(u.options) == 1) and
+ isinstance(u.options[0], HAO)):
+ hahdr = u.options[0].hoa
+ u = u.underlayer
+ if u is None:
+ warning("No IPv6 underlayer to compute checksum. Leaving null.")
+ return 0
+ if hahdr:
+ ph6.src = hahdr
+ else:
+ ph6.src = u.src
+ if rthdr:
+ ph6.dst = rthdr
+ else:
+ ph6.dst = u.dst
+ ph6.uplen = len(p)
+ ph6s = bytes(ph6)
+ return checksum(ph6s+p)
+
+
+#############################################################################
+#############################################################################
+### Extension Headers ###
+#############################################################################
+#############################################################################
+
+
+# Inherited by all extension header classes
+class _IPv6ExtHdr(_IPv6GuessPayload, Packet):
+ name = 'Abstract IPV6 Option Header'
+ aliastypes = [IPv6, IPerror6] # TODO ...
+
+
+#################### IPv6 options for Extension Headers #####################
+
+_hbhopts = { 0x00: "Pad1",
+ 0x01: "PadN",
+ 0x04: "Tunnel Encapsulation Limit",
+ 0x05: "Router Alert",
+ 0x06: "Quick-Start",
+ 0xc2: "Jumbo Payload",
+ 0xc9: "Home Address Option" }
+
+class _OTypeField(ByteEnumField):
+ """
+ Modified BytEnumField that displays information regarding the IPv6 option
+ based on its option type value (What should be done by nodes that process
+ the option if they do not understand it ...)
+
+ It is used by Jumbo, Pad1, PadN, RouterAlert, HAO options
+ """
+ pol = {0x00: "00: skip",
+ 0x40: "01: discard",
+ 0x80: "10: discard+ICMP",
+ 0xC0: "11: discard+ICMP not mcast"}
+
+ enroutechange = {0x00: "0: Don't change en-route",
+ 0x20: "1: May change en-route" }
+
+ def i2repr(self, pkt, x):
+ s = self.i2s.get(x, repr(x))
+ polstr = self.pol[(x & 0xC0)]
+ enroutechangestr = self.enroutechange[(x & 0x20)]
+ return "%s [%s, %s]" % (s, polstr, enroutechangestr)
+
+class HBHOptUnknown(Packet): # IPv6 Hop-By-Hop Option
+ name = "Scapy6 Unknown Option"
+ fields_desc = [_OTypeField("otype", 0x01, _hbhopts),
+ FieldLenField("optlen", None, length_of="optdata", fmt="B"),
+ StrLenField("optdata", "",
+ length_from = lambda pkt: pkt.optlen) ]
+ def alignment_delta(self, curpos): # By default, no alignment requirement
+ """
+ As specified in section 4.2 of RFC 2460, every options has
+ an alignment requirement ususally expressed xn+y, meaning
+ the Option Type must appear at an integer multiple of x octest
+ from the start of the header, plus y octet.
+
+ That function is provided the current position from the
+ start of the header and returns required padding length.
+ """
+ return 0
+
+class Pad1(Packet): # IPv6 Hop-By-Hop Option
+ name = "Pad1"
+ fields_desc = [ _OTypeField("otype", 0x00, _hbhopts) ]
+ def alignment_delta(self, curpos): # No alignment requirement
+ return 0
+
+class PadN(Packet): # IPv6 Hop-By-Hop Option
+ name = "PadN"
+ fields_desc = [_OTypeField("otype", 0x01, _hbhopts),
+ FieldLenField("optlen", None, length_of="optdata", fmt="B"),
+ StrLenField("optdata", "",
+ length_from = lambda pkt: pkt.optlen)]
+ def alignment_delta(self, curpos): # No alignment requirement
+ return 0
+
+class RouterAlert(Packet): # RFC 2711 - IPv6 Hop-By-Hop Option
+ name = "Router Alert"
+ fields_desc = [_OTypeField("otype", 0x05, _hbhopts),
+ ByteField("optlen", 2),
+ ShortEnumField("value", None,
+ { 0: "Datagram contains a MLD message",
+ 1: "Datagram contains RSVP message",
+ 2: "Datagram contains an Active Network message" }) ]
+ # TODO : Check IANA has not defined new values for value field of RouterAlertOption
+ # TODO : now that we have that option, we should do something in MLD class that need it
+ def alignment_delta(self, curpos): # alignment requirement : 2n+0
+ x = 2 ; y = 0
+ delta = x*((curpos - y + x - 1)//x) + y - curpos
+ return delta
+
+class Jumbo(Packet): # IPv6 Hop-By-Hop Option
+ name = "Jumbo Payload"
+ fields_desc = [_OTypeField("otype", 0xC2, _hbhopts),
+ ByteField("optlen", 4),
+ IntField("jumboplen", None) ]
+ def alignment_delta(self, curpos): # alignment requirement : 4n+2
+ x = 4 ; y = 2
+ delta = x*((curpos - y + x - 1)//x) + y - curpos
+ return delta
+
+class HAO(Packet): # IPv6 Destination Options Header Option
+ name = "Home Address Option"
+ fields_desc = [_OTypeField("otype", 0xC9, _hbhopts),
+ ByteField("optlen", 16),
+ IP6Field("hoa", "::") ]
+ def alignment_delta(self, curpos): # alignment requirement : 8n+6
+ x = 8 ; y = 6
+ delta = x*((curpos - y + x - 1)//x) + y - curpos
+ return delta
+
+_hbhoptcls = { 0x00: Pad1,
+ 0x01: PadN,
+ 0x05: RouterAlert,
+ 0xC2: Jumbo,
+ 0xC9: HAO }
+
+
+######################## Hop-by-Hop Extension Header ########################
+
+class _HopByHopOptionsField(PacketListField):
+ islist = 1
+ holds_packet = 1
+ def __init__(self, name, default, cls, curpos, count_from=None, length_from=None):
+ self.curpos = curpos
+ PacketListField.__init__(self, name, default, cls, count_from=count_from, length_from=length_from)
+
+ def i2len(self, pkt, i):
+ l = len(self.i2m(pkt, i))
+ return l
+
+ def i2count(self, pkt, i):
+ if type(i) is list:
+ return len(i)
+ return 0
+
+ def getfield(self, pkt, s):
+ c = l = None
+ if self.length_from is not None:
+ l = self.length_from(pkt)
+ elif self.count_from is not None:
+ c = self.count_from(pkt)
+
+ opt = []
+ ret = b""
+ x = s
+ if l is not None:
+ x,ret = s[:l],s[l:]
+ while x:
+ if c is not None:
+ if c <= 0:
+ break
+ c -= 1
+ #o = ord(x[0]) # Option type
+ o = x[0] # Option type
+ cls = self.cls
+ if o in _hbhoptcls:
+ cls = _hbhoptcls[o]
+ try:
+ op = cls(x)
+ except:
+ op = self.cls(x)
+ opt.append(op)
+ if isinstance(op.payload, conf.raw_layer):
+ x = op.payload.load
+ del(op.payload)
+ else:
+ x = b""
+ return x+ret,opt
+
+ def i2m(self, pkt, x):
+ autopad = None
+ try:
+ autopad = getattr(pkt, "autopad") # Hack : 'autopad' phantom field
+ except:
+ autopad = 1
+
+ if not autopad:
+ return b"".join(map(bytes, x))
+
+ curpos = self.curpos
+ s = b""
+ for p in x:
+ d = p.alignment_delta(curpos)
+ curpos += d
+ if d == 1:
+ s += bytes(Pad1())
+ elif d != 0:
+ s += bytes(PadN(optdata=b'\x00'*(d-2)))
+ pstr = bytes(p)
+ curpos += len(pstr)
+ s += pstr
+
+ # Let's make the class including our option field
+ # a multiple of 8 octets long
+ d = curpos % 8
+ if d == 0:
+ return s
+ d = 8 - d
+ if d == 1:
+ s += bytes(Pad1())
+ elif d != 0:
+ s += bytes(PadN(optdata=b'\x00'*(d-2)))
+
+ return s
+
+ def addfield(self, pkt, s, val):
+ return s+self.i2m(pkt, val)
+
+class _PhantomAutoPadField(ByteField):
+ def addfield(self, pkt, s, val):
+ return s
+
+ def getfield(self, pkt, s):
+ return s, 1
+
+ def i2repr(self, pkt, x):
+ if x:
+ return "On"
+ return "Off"
+
+
+class IPv6ExtHdrHopByHop(_IPv6ExtHdr):
+ name = "IPv6 Extension Header - Hop-by-Hop Options Header"
+ fields_desc = [ ByteEnumField("nh", 59, ipv6nh),
+ FieldLenField("len", None, length_of="options", fmt="B",
+ adjust = lambda pkt,x: (x+2+7)//8 - 1),
+ _PhantomAutoPadField("autopad", 1), # autopad activated by default
+ _HopByHopOptionsField("options", [], HBHOptUnknown, 2,
+ length_from = lambda pkt: (8*(pkt.len+1))-2) ]
+ overload_fields = {IPv6: { "nh": 0 }}
+
+
+######################## Destination Option Header ##########################
+
+class IPv6ExtHdrDestOpt(_IPv6ExtHdr):
+ name = "IPv6 Extension Header - Destination Options Header"
+ fields_desc = [ ByteEnumField("nh", 59, ipv6nh),
+ FieldLenField("len", None, length_of="options", fmt="B",
+ adjust = lambda pkt,x: (x+2+7)//8 - 1),
+ _PhantomAutoPadField("autopad", 1), # autopad activated by default
+ _HopByHopOptionsField("options", [], HBHOptUnknown, 2,
+ length_from = lambda pkt: (8*(pkt.len+1))-2) ]
+ overload_fields = {IPv6: { "nh": 60 }}
+
+
+############################# Routing Header ################################
+
+class IPv6ExtHdrRouting(_IPv6ExtHdr):
+ name = "IPv6 Option Header Routing"
+ fields_desc = [ ByteEnumField("nh", 59, ipv6nh),
+ FieldLenField("len", None, count_of="addresses", fmt="B",
+ adjust = lambda pkt,x:2*x), # in 8 bytes blocks
+ ByteField("type", 0),
+ ByteField("segleft", None),
+ BitField("reserved", 0, 32), # There is meaning in this field ...
+ IP6ListField("addresses", [],
+ length_from = lambda pkt: 8*pkt.len)]
+ overload_fields = {IPv6: { "nh": 43 }}
+
+ def post_build(self, pkt, pay):
+ if self.segleft is None:
+ pkt = pkt[:3]+struct.pack("B", len(self.addresses))+pkt[4:]
+ return _IPv6ExtHdr.post_build(self, pkt, pay)
+
+########################### Fragmentation Header ############################
+
+class IPv6ExtHdrFragment(_IPv6ExtHdr):
+ name = "IPv6 Extension Header - Fragmentation header"
+ fields_desc = [ ByteEnumField("nh", 59, ipv6nh),
+ BitField("res1", 0, 8),
+ BitField("offset", 0, 13),
+ BitField("res2", 0, 2),
+ BitField("m", 0, 1),
+ IntField("id", None) ]
+ overload_fields = {IPv6: { "nh": 44 }}
+
+
+def defragment6(pktlist):
+ """
+ Performs defragmentation of a list of IPv6 packets. Packets are reordered.
+ Crap is dropped. What lacks is completed by 'X' characters.
+ """
+
+ l = [ x for x in pktlist if IPv6ExtHdrFragment in x ] # remove non fragments
+ if not l:
+ return []
+
+ id = l[0][IPv6ExtHdrFragment].id
+
+ llen = len(l)
+ l = [ x for x in l if x[IPv6ExtHdrFragment].id == id ]
+ if len(l) != llen:
+ warning("defragment6: some fragmented packets have been removed from list")
+ llen = len(l)
+
+ # reorder fragments
+ i = 0
+ res = []
+ while l:
+ min_pos = 0
+ min_offset = l[0][IPv6ExtHdrFragment].offset
+ for p in l:
+ cur_offset = p[IPv6ExtHdrFragment].offset
+ if cur_offset < min_offset:
+ min_pos = 0
+ min_offset = cur_offset
+ res.append(l[min_pos])
+ del(l[min_pos])
+
+ # regenerate the fragmentable part
+ fragmentable = b""
+ for p in res:
+ q=p[IPv6ExtHdrFragment]
+ offset = 8*q.offset
+ if offset != len(fragmentable):
+ warning("Expected an offset of %d. Found %d. Padding with XXXX" % (len(fragmentable), offset))
+ fragmentable += b"X"*(offset - len(fragmentable))
+ fragmentable += bytes(q.payload)
+
+ # Regenerate the unfragmentable part.
+ q = res[0]
+ nh = q[IPv6ExtHdrFragment].nh
+ q[IPv6ExtHdrFragment].underlayer.nh = nh
+ q[IPv6ExtHdrFragment].underlayer.payload = None
+ q /= conf.raw_layer(load=fragmentable)
+
+ return IPv6(bytes(q))
+
+
+def fragment6(pkt, fragSize):
+ """
+ Performs fragmentation of an IPv6 packet. Provided packet ('pkt') must already
+ contain an IPv6ExtHdrFragment() class. 'fragSize' argument is the expected
+ maximum size of fragments (MTU). The list of packets is returned.
+
+ If packet does not contain an IPv6ExtHdrFragment class, it is returned in
+ result list.
+ """
+
+ pkt = pkt.copy()
+
+ if not IPv6ExtHdrFragment in pkt:
+ # TODO : automatically add a fragment before upper Layer
+ # at the moment, we do nothing and return initial packet
+ # as single element of a list
+ return [pkt]
+
+ # If the payload is bigger than 65535, a Jumbo payload must be used, as
+ # an IPv6 packet can't be bigger than 65535 bytes.
+ if len(bytes(pkt[IPv6ExtHdrFragment])) > 65535:
+ warning("An IPv6 packet can'be bigger than 65535, please use a Jumbo payload.")
+ return []
+
+ s = bytes(pkt) # for instantiation to get upper layer checksum right
+
+ if len(s) <= fragSize:
+ return [pkt]
+
+ # Fragmentable part : fake IPv6 for Fragmentable part length computation
+ fragPart = pkt[IPv6ExtHdrFragment].payload
+ tmp = bytes(IPv6(src="::1", dst="::1")/fragPart)
+ fragPartLen = len(tmp) - 40 # basic IPv6 header length
+ fragPartStr = s[-fragPartLen:]
+
+ # Grab Next Header for use in Fragment Header
+ nh = IPv6(tmp[:40]).nh
+
+ # Keep fragment header
+ fragHeader = pkt[IPv6ExtHdrFragment]
+ fragHeader.payload = None # detach payload
+
+ # Unfragmentable Part
+ unfragPartLen = len(s) - fragPartLen - 8
+ unfragPart = pkt
+ pkt[IPv6ExtHdrFragment].underlayer.payload = None # detach payload
+
+ # Cut the fragmentable part to fit fragSize. Inner fragments have
+ # a length that is an integer multiple of 8 octets. last Frag MTU
+ # can be anything below MTU
+ lastFragSize = fragSize - unfragPartLen - 8
+ innerFragSize = lastFragSize - (lastFragSize % 8)
+
+ if lastFragSize <= 0 or innerFragSize == 0:
+ warning("Provided fragment size value is too low. " +
+ "Should be more than %d" % (unfragPartLen + 8))
+ return [unfragPart/fragHeader/fragPart]
+
+ remain = fragPartStr
+ res = []
+ fragOffset = 0 # offset, incremeted during creation
+ fragId = random.randint(0,0xffffffff) # random id ...
+ if fragHeader.id is not None: # ... except id provided by user
+ fragId = fragHeader.id
+ fragHeader.m = 1
+ fragHeader.id = fragId
+ fragHeader.nh = nh
+
+ # Main loop : cut, fit to FRAGSIZEs, fragOffset, Id ...
+ while True:
+ if (len(remain) > lastFragSize):
+ tmp = remain[:innerFragSize]
+ remain = remain[innerFragSize:]
+ fragHeader.offset = fragOffset # update offset
+ fragOffset += (innerFragSize // 8) # compute new one
+ if IPv6 in unfragPart:
+ unfragPart[IPv6].plen = None
+ tempo = unfragPart/fragHeader/conf.raw_layer(load=tmp)
+ res.append(tempo)
+ else:
+ fragHeader.offset = fragOffset # update offSet
+ fragHeader.m = 0
+ if IPv6 in unfragPart:
+ unfragPart[IPv6].plen = None
+ tempo = unfragPart/fragHeader/conf.raw_layer(load=remain)
+ res.append(tempo)
+ break
+ return res
+
+
+############################### AH Header ###################################
+
+# class _AHFieldLenField(FieldLenField):
+# def getfield(self, pkt, s):
+# l = getattr(pkt, self.fld)
+# l = (l*8)-self.shift
+# i = self.m2i(pkt, s[:l])
+# return s[l:],i
+
+# class _AHICVStrLenField(StrLenField):
+# def i2len(self, pkt, x):
+
+
+
+# class IPv6ExtHdrAH(_IPv6ExtHdr):
+# name = "IPv6 Extension Header - AH"
+# fields_desc = [ ByteEnumField("nh", 59, ipv6nh),
+# _AHFieldLenField("len", None, "icv"),
+# ShortField("res", 0),
+# IntField("spi", 0),
+# IntField("sn", 0),
+# _AHICVStrLenField("icv", None, "len", shift=2) ]
+# overload_fields = {IPv6: { "nh": 51 }}
+
+# def post_build(self, pkt, pay):
+# if self.len is None:
+# pkt = pkt[0]+struct.pack("!B", 2*len(self.addresses))+pkt[2:]
+# if self.segleft is None:
+# pkt = pkt[:3]+struct.pack("!B", len(self.addresses))+pkt[4:]
+# return _IPv6ExtHdr.post_build(self, pkt, pay)
+
+
+############################### ESP Header ##################################
+
+# class IPv6ExtHdrESP(_IPv6extHdr):
+# name = "IPv6 Extension Header - ESP"
+# fields_desc = [ IntField("spi", 0),
+# IntField("sn", 0),
+# # there is things to extract from IKE work
+# ]
+# overloads_fields = {IPv6: { "nh": 50 }}
+
+
+
+#############################################################################
+#############################################################################
+### ICMPv6* Classes ###
+#############################################################################
+#############################################################################
+
+icmp6typescls = { 1: "ICMPv6DestUnreach",
+ 2: "ICMPv6PacketTooBig",
+ 3: "ICMPv6TimeExceeded",
+ 4: "ICMPv6ParamProblem",
+ 128: "ICMPv6EchoRequest",
+ 129: "ICMPv6EchoReply",
+ 130: "ICMPv6MLQuery",
+ 131: "ICMPv6MLReport",
+ 132: "ICMPv6MLDone",
+ 133: "ICMPv6ND_RS",
+ 134: "ICMPv6ND_RA",
+ 135: "ICMPv6ND_NS",
+ 136: "ICMPv6ND_NA",
+ 137: "ICMPv6ND_Redirect",
+ #138: Do Me - RFC 2894 - Seems painful
+ 139: "ICMPv6NIQuery",
+ 140: "ICMPv6NIReply",
+ 141: "ICMPv6ND_INDSol",
+ 142: "ICMPv6ND_INDAdv",
+ #143: Do Me - RFC 3810
+ 144: "ICMPv6HAADRequest",
+ 145: "ICMPv6HAADReply",
+ 146: "ICMPv6MPSol",
+ 147: "ICMPv6MPAdv",
+ #148: Do Me - SEND related - RFC 3971
+ #149: Do Me - SEND related - RFC 3971
+ 151: "ICMPv6MRD_Advertisement",
+ 152: "ICMPv6MRD_Solicitation",
+ 153: "ICMPv6MRD_Termination",
+ }
+
+icmp6typesminhdrlen = { 1: 8,
+ 2: 8,
+ 3: 8,
+ 4: 8,
+ 128: 8,
+ 129: 8,
+ 130: 24,
+ 131: 24,
+ 132: 24,
+ 133: 8,
+ 134: 16,
+ 135: 24,
+ 136: 24,
+ 137: 40,
+ #139:
+ #140
+ 141: 8,
+ 142: 8,
+ 144: 8,
+ 145: 8,
+ 146: 8,
+ 147: 8,
+ 151: 8,
+ 152: 4,
+ 153: 4
+ }
+
+icmp6types = { 1 : "Destination unreachable",
+ 2 : "Packet too big",
+ 3 : "Time exceeded",
+ 4 : "Parameter problem",
+ 100 : "Private Experimentation",
+ 101 : "Private Experimentation",
+ 128 : "Echo Request",
+ 129 : "Echo Reply",
+ 130 : "MLD Query",
+ 131 : "MLD Report",
+ 132 : "MLD Done",
+ 133 : "Router Solicitation",
+ 134 : "Router Advertisement",
+ 135 : "Neighbor Solicitation",
+ 136 : "Neighbor Advertisement",
+ 137 : "Redirect Message",
+ 138 : "Router Renumbering",
+ 139 : "ICMP Node Information Query",
+ 140 : "ICMP Node Information Response",
+ 141 : "Inverse Neighbor Discovery Solicitation Message",
+ 142 : "Inverse Neighbor Discovery Advertisement Message",
+ 143 : "Version 2 Multicast Listener Report",
+ 144 : "Home Agent Address Discovery Request Message",
+ 145 : "Home Agent Address Discovery Reply Message",
+ 146 : "Mobile Prefix Solicitation",
+ 147 : "Mobile Prefix Advertisement",
+ 148 : "Certification Path Solicitation",
+ 149 : "Certification Path Advertisement",
+ 151 : "Multicast Router Advertisement",
+ 152 : "Multicast Router Solicitation",
+ 153 : "Multicast Router Termination",
+ 200 : "Private Experimentation",
+ 201 : "Private Experimentation" }
+
+
+class _ICMPv6(Packet):
+ name = "ICMPv6 dummy class"
+ overload_fields = {IPv6: {"nh": 58}}
+ def post_build(self, p, pay):
+ p += pay
+ if self.cksum == None:
+ chksum = in6_chksum(58, self.underlayer, p)
+ p = p[:2]+struct.pack("!H", chksum)+p[4:]
+ return p
+
+ def hashret(self):
+ return self.payload.hashret()
+
+ def answers(self, other):
+ # isinstance(self.underlayer, _IPv6ExtHdr) may introduce a bug ...
+ if (isinstance(self.underlayer, IPerror6) or
+ isinstance(self.underlayer, _IPv6ExtHdr) and
+ isinstance(other, _ICMPv6)):
+ if not ((self.type == other.type) and
+ (self.code == other.code)):
+ return 0
+ return 1
+ return 0
+
+
+class _ICMPv6Error(_ICMPv6):
+ name = "ICMPv6 errors dummy class"
+ def guess_payload_class(self,p):
+ return IPerror6
+
+class ICMPv6Unknown(_ICMPv6):
+ name = "Scapy6 ICMPv6 fallback class"
+ fields_desc = [ ByteEnumField("type",1, icmp6types),
+ ByteField("code",0),
+ XShortField("cksum", None),
+ StrField("msgbody", "")]
+
+
+################################## RFC 2460 #################################
+
+class ICMPv6DestUnreach(_ICMPv6Error):
+ name = "ICMPv6 Destination Unreachable"
+ fields_desc = [ ByteEnumField("type",1, icmp6types),
+ ByteEnumField("code",0, { 0: "No route to destination",
+ 1: "Communication with destination administratively prohibited",
+ 2: "Beyond scope of source address",
+ 3: "Address unreachable",
+ 4: "Port unreachable" }),
+ XShortField("cksum", None),
+ XIntField("unused",0x00000000)]
+
+class ICMPv6PacketTooBig(_ICMPv6Error):
+ name = "ICMPv6 Packet Too Big"
+ fields_desc = [ ByteEnumField("type",2, icmp6types),
+ ByteField("code",0),
+ XShortField("cksum", None),
+ IntField("mtu",1280)]
+
+class ICMPv6TimeExceeded(_ICMPv6Error):
+ name = "ICMPv6 Time Exceeded"
+ fields_desc = [ ByteEnumField("type",3, icmp6types),
+ ByteEnumField("code",0, { 0: "hop limit exceeded in transit",
+ 1: "fragment reassembly time exceeded"}),
+ XShortField("cksum", None),
+ XIntField("unused",0x00000000)]
+
+# The default pointer value is set to the next header field of
+# the encapsulated IPv6 packet
+class ICMPv6ParamProblem(_ICMPv6Error):
+ name = "ICMPv6 Parameter Problem"
+ fields_desc = [ ByteEnumField("type",4, icmp6types),
+ ByteEnumField("code",0, {0: "erroneous header field encountered",
+ 1: "unrecognized Next Header type encountered",
+ 2: "unrecognized IPv6 option encountered"}),
+ XShortField("cksum", None),
+ IntField("ptr",6)]
+
+class ICMPv6EchoRequest(_ICMPv6):
+ name = "ICMPv6 Echo Request"
+ fields_desc = [ ByteEnumField("type", 128, icmp6types),
+ ByteField("code", 0),
+ XShortField("cksum", None),
+ XShortField("id",0),
+ XShortField("seq",0),
+ StrField("data", "")]
+ def mysummary(self):
+ return self.sprintf("%name% (id: %id% seq: %seq%)")
+ def hashret(self):
+ return struct.pack("HH",self.id,self.seq)+self.payload.hashret()
+
+
+class ICMPv6EchoReply(ICMPv6EchoRequest):
+ name = "ICMPv6 Echo Reply"
+ type = 129
+ def answers(self, other):
+ # We could match data content between request and reply.
+ return (isinstance(other, ICMPv6EchoRequest) and
+ self.id == other.id and self.seq == other.seq and
+ self.data == other.data)
+
+
+############ ICMPv6 Multicast Listener Discovery (RFC3810) ##################
+
+# tous les messages MLD sont emis avec une adresse source lien-locale
+# -> Y veiller dans le post_build si aucune n'est specifiee
+# La valeur de Hop-Limit doit etre de 1
+# "and an IPv6 Router Alert option in a Hop-by-Hop Options
+# header. (The router alert option is necessary to cause routers to
+# examine MLD messages sent to multicast addresses in which the router
+# itself has no interest"
+class _ICMPv6ML(_ICMPv6):
+ fields_desc = [ ByteEnumField("type", 130, icmp6types),
+ ByteField("code", 0),
+ XShortField("cksum", None),
+ ShortField("mrd", 0),
+ ShortField("reserved", 0),
+ IP6Field("mladdr","::")]
+
+# general queries are sent to the link-scope all-nodes multicast
+# address ff02::1, with a multicast address field of 0 and a MRD of
+# [Query Response Interval]
+# Default value for mladdr is set to 0 for a General Query, and
+# overloaded by the user for a Multicast Address specific query
+# TODO : See what we can do to automatically include a Router Alert
+# Option in a Destination Option Header.
+class ICMPv6MLQuery(_ICMPv6ML): # RFC 2710
+ name = "MLD - Multicast Listener Query"
+ type = 130
+ mrd = 10000
+ mladdr = "::" # 10s for mrd
+ overload_fields = {IPv6: { "dst": "ff02::1", "hlim": 1, "nh": 58 }}
+ def hashret(self):
+ if self.mladdr != "::":
+ return struct.pack("HH",self.mladdr)+self.payload.hashret()
+ else:
+ return self.payload.hashret()
+
+
+# TODO : See what we can do to automatically include a Router Alert
+# Option in a Destination Option Header.
+class ICMPv6MLReport(_ICMPv6ML): # RFC 2710
+ name = "MLD - Multicast Listener Report"
+ type = 131
+ overload_fields = {IPv6: {"hlim": 1, "nh": 58}}
+ # implementer le hashret et le answers
+
+# When a node ceases to listen to a multicast address on an interface,
+# it SHOULD send a single Done message to the link-scope all-routers
+# multicast address (FF02::2), carrying in its multicast address field
+# the address to which it is ceasing to listen
+# TODO : See what we can do to automatically include a Router Alert
+# Option in a Destination Option Header.
+class ICMPv6MLDone(_ICMPv6ML): # RFC 2710
+ name = "MLD - Multicast Listener Done"
+ type = 132
+ overload_fields = {IPv6: { "dst": "ff02::2", "hlim": 1, "nh": 58}}
+
+
+########## ICMPv6 MRD - Multicast Router Discovery (RFC 4286) ###############
+
+# TODO:
+# - 04/09/06 troglocan : find a way to automatically add a router alert
+# option for all MRD packets. This could be done in a specific
+# way when IPv6 is the under layer with some specific keyword
+# like 'exthdr'. This would allow to keep compatibility with
+# providing IPv6 fields to be overloaded in fields_desc.
+#
+# At the moment, if user inserts an IPv6 Router alert option
+# none of the IPv6 default values of IPv6 layer will be set.
+
+class ICMPv6MRD_Advertisement(_ICMPv6):
+ name = "ICMPv6 Multicast Router Discovery Advertisement"
+ fields_desc = [ByteEnumField("type", 151, icmp6types),
+ ByteField("advinter", 20),
+ XShortField("cksum", None),
+ ShortField("queryint", 0),
+ ShortField("robustness", 0)]
+ overload_fields = {IPv6: { "nh": 58, "hlim": 1, "dst": "ff02::2"}}
+ # IPv6 Router Alert requires manual inclusion
+ def extract_padding(self, s):
+ return s[:8], s[8:]
+
+class ICMPv6MRD_Solicitation(_ICMPv6):
+ name = "ICMPv6 Multicast Router Discovery Solicitation"
+ fields_desc = [ByteEnumField("type", 152, icmp6types),
+ ByteField("res", 0),
+ XShortField("cksum", None) ]
+ overload_fields = {IPv6: { "nh": 58, "hlim": 1, "dst": "ff02::2"}}
+ # IPv6 Router Alert requires manual inclusion
+ def extract_padding(self, s):
+ return s[:4], s[4:]
+
+class ICMPv6MRD_Termination(_ICMPv6):
+ name = "ICMPv6 Multicast Router Discovery Termination"
+ fields_desc = [ByteEnumField("type", 153, icmp6types),
+ ByteField("res", 0),
+ XShortField("cksum", None) ]
+ overload_fields = {IPv6: { "nh": 58, "hlim": 1, "dst": "ff02::6A"}}
+ # IPv6 Router Alert requires manual inclusion
+ def extract_padding(self, s):
+ return s[:4], s[4:]
+
+
+################### ICMPv6 Neighbor Discovery (RFC 2461) ####################
+
+icmp6ndopts = { 1: "Source Link-Layer Address",
+ 2: "Target Link-Layer Address",
+ 3: "Prefix Information",
+ 4: "Redirected Header",
+ 5: "MTU",
+ 6: "NBMA Shortcut Limit Option", # RFC2491
+ 7: "Advertisement Interval Option",
+ 8: "Home Agent Information Option",
+ 9: "Source Address List",
+ 10: "Target Address List",
+ 11: "CGA Option", # RFC 3971
+ 12: "RSA Signature Option", # RFC 3971
+ 13: "Timestamp Option", # RFC 3971
+ 14: "Nonce option", # RFC 3971
+ 15: "Trust Anchor Option", # RFC 3971
+ 16: "Certificate Option", # RFC 3971
+ 17: "IP Address Option", # RFC 4068
+ 18: "New Router Prefix Information Option", # RFC 4068
+ 19: "Link-layer Address Option", # RFC 4068
+ 20: "Neighbor Advertisement Acknowledgement Option",
+ 21: "CARD Request Option", # RFC 4065/4066/4067
+ 22: "CARD Reply Option", # RFC 4065/4066/4067
+ 23: "MAP Option", # RFC 4140
+ 24: "Route Information Option", # RFC 4191
+ 25: "Recusive DNS Server Option",
+ 26: "IPv6 Router Advertisement Flags Option"
+ }
+
+icmp6ndoptscls = { 1: "ICMPv6NDOptSrcLLAddr",
+ 2: "ICMPv6NDOptDstLLAddr",
+ 3: "ICMPv6NDOptPrefixInfo",
+ 4: "ICMPv6NDOptRedirectedHdr",
+ 5: "ICMPv6NDOptMTU",
+ 6: "ICMPv6NDOptShortcutLimit",
+ 7: "ICMPv6NDOptAdvInterval",
+ 8: "ICMPv6NDOptHAInfo",
+ 9: "ICMPv6NDOptSrcAddrList",
+ 10: "ICMPv6NDOptTgtAddrList",
+ #11: Do Me,
+ #12: Do Me,
+ #13: Do Me,
+ #14: Do Me,
+ #15: Do Me,
+ #16: Do Me,
+ 17: "ICMPv6NDOptIPAddr",
+ 18: "ICMPv6NDOptNewRtrPrefix",
+ 19: "ICMPv6NDOptLLA",
+ #18: Do Me,
+ #19: Do Me,
+ #20: Do Me,
+ #21: Do Me,
+ #22: Do Me,
+ 23: "ICMPv6NDOptMAP",
+ 24: "ICMPv6NDOptRouteInfo",
+ 25: "ICMPv6NDOptRDNSS",
+ 26: "ICMPv6NDOptEFA"
+ }
+
+class _ICMPv6NDGuessPayload:
+ name = "Dummy ND class that implements guess_payload_class()"
+ def guess_payload_class(self,p):
+ if len(p) > 1:
+ #return get_cls(icmp6ndoptscls.get(ord(p[0]),"Raw"), "Raw") # s/Raw/ICMPv6NDOptUnknown/g ?
+ return get_cls(icmp6ndoptscls.get(p[0],"Raw"), "Raw") # s/Raw/ICMPv6NDOptUnknown/g ?
+
+
+# Beginning of ICMPv6 Neighbor Discovery Options.
+
+class ICMPv6NDOptUnknown(_ICMPv6NDGuessPayload, Packet):
+ name = "ICMPv6 Neighbor Discovery Option - Scapy Unimplemented"
+ fields_desc = [ ByteField("type",None),
+ FieldLenField("len",None,length_of="data",fmt="B",
+ adjust = lambda pkt,x: x+2),
+ StrLenField("data","",
+ length_from = lambda pkt: pkt.len-2) ]
+
+# NOTE: len includes type and len field. Expressed in unit of 8 bytes
+# TODO: Revoir le coup du ETHER_ANY
+class ICMPv6NDOptSrcLLAddr(_ICMPv6NDGuessPayload, Packet):
+ name = "ICMPv6 Neighbor Discovery Option - Source Link-Layer Address"
+ fields_desc = [ ByteField("type", 1),
+ ByteField("len", 1),
+ MACField("lladdr", ETHER_ANY) ]
+ def mysummary(self):
+ return self.sprintf("%name% %lladdr%")
+
+class ICMPv6NDOptDstLLAddr(ICMPv6NDOptSrcLLAddr):
+ name = "ICMPv6 Neighbor Discovery Option - Destination Link-Layer Address"
+ type = 2
+
+class ICMPv6NDOptPrefixInfo(_ICMPv6NDGuessPayload, Packet):
+ name = "ICMPv6 Neighbor Discovery Option - Prefix Information"
+ fields_desc = [ ByteField("type",3),
+ ByteField("len",4),
+ ByteField("prefixlen",None),
+ BitField("L",1,1),
+ BitField("A",1,1),
+ BitField("R",0,1),
+ BitField("res1",0,5),
+ XIntField("validlifetime",0xffffffff),
+ XIntField("preferredlifetime",0xffffffff),
+ XIntField("res2",0x00000000),
+ IP6Field("prefix","::") ]
+ def mysummary(self):
+ return self.sprintf("%name% %prefix%")
+
+# TODO: We should also limit the size of included packet to something
+# like (initiallen - 40 - 2)
+class TruncPktLenField(PacketLenField):
+
+ def __init__(self, name, default, cls, cur_shift, length_from=None, shift=0):
+ PacketLenField.__init__(self, name, default, cls, length_from=length_from)
+ self.cur_shift = cur_shift
+
+ def getfield(self, pkt, s):
+ l = self.length_from(pkt)
+ i = self.m2i(pkt, s[:l])
+ return s[l:],i
+
+ def m2i(self, pkt, m):
+ s = None
+ try: # It can happen we have sth shorter than 40 bytes
+ s = self.cls(m)
+ except:
+ return conf.raw_layer(m)
+ return s
+
+ def i2m(self, pkt, x):
+ s = bytes(x)
+ l = len(s)
+ r = (l + self.cur_shift) % 8
+ l = l - r
+ return s[:l]
+
+ def i2len(self, pkt, i):
+ return len(self.i2m(pkt, i))
+
+
+# Faire un post_build pour le recalcul de la taille (en multiple de 8 octets)
+class ICMPv6NDOptRedirectedHdr(_ICMPv6NDGuessPayload, Packet):
+ name = "ICMPv6 Neighbor Discovery Option - Redirected Header"
+ fields_desc = [ ByteField("type",4),
+ FieldLenField("len", None, length_of="pkt", fmt="B",
+ adjust = lambda pkt,x:(x+8)//8),
+ StrFixedLenField("res", b"\x00"*6, 6),
+ TruncPktLenField("pkt", b"", IPv6, 8,
+ length_from = lambda pkt: 8*pkt.len-8) ]
+
+# See which value should be used for default MTU instead of 1280
+class ICMPv6NDOptMTU(_ICMPv6NDGuessPayload, Packet):
+ name = "ICMPv6 Neighbor Discovery Option - MTU"
+ fields_desc = [ ByteField("type",5),
+ ByteField("len",1),
+ XShortField("res",0),
+ IntField("mtu",1280)]
+
+class ICMPv6NDOptShortcutLimit(_ICMPv6NDGuessPayload, Packet): # RFC 2491
+ name = "ICMPv6 Neighbor Discovery Option - NBMA Shortcut Limit"
+ fields_desc = [ ByteField("type", 6),
+ ByteField("len", 1),
+ ByteField("shortcutlim", 40), # XXX
+ ByteField("res1", 0),
+ IntField("res2", 0) ]
+
+class ICMPv6NDOptAdvInterval(_ICMPv6NDGuessPayload, Packet):
+ name = "ICMPv6 Neighbor Discovery - Interval Advertisement"
+ fields_desc = [ ByteField("type",7),
+ ByteField("len",1),
+ ShortField("res", 0),
+ IntField("advint", 0) ]
+ def mysummary(self):
+ return self.sprintf("%name% %advint% milliseconds")
+
+class ICMPv6NDOptHAInfo(_ICMPv6NDGuessPayload, Packet):
+ name = "ICMPv6 Neighbor Discovery - Home Agent Information"
+ fields_desc = [ ByteField("type",8),
+ ByteField("len",1),
+ ShortField("res", 0),
+ ShortField("pref", 0),
+ ShortField("lifetime", 1)]
+ def mysummary(self):
+ return self.sprintf("%name% %pref% %lifetime% seconds")
+
+# type 9 : See ICMPv6NDOptSrcAddrList class below in IND (RFC 3122) support
+
+# type 10 : See ICMPv6NDOptTgtAddrList class below in IND (RFC 3122) support
+
+class ICMPv6NDOptIPAddr(_ICMPv6NDGuessPayload, Packet): # RFC 4068
+ name = "ICMPv6 Neighbor Discovery - IP Address Option (FH for MIPv6)"
+ fields_desc = [ ByteField("type",17),
+ ByteField("len", 3),
+ ByteEnumField("optcode", 1, {1: "Old Care-Of Address",
+ 2: "New Care-Of Address",
+ 3: "NAR's IP address" }),
+ ByteField("plen", 64),
+ IntField("res", 0),
+ IP6Field("addr", "::") ]
+
+class ICMPv6NDOptNewRtrPrefix(_ICMPv6NDGuessPayload, Packet): # RFC 4068
+ name = "ICMPv6 Neighbor Discovery - New Router Prefix Information Option (FH for MIPv6)"
+ fields_desc = [ ByteField("type",18),
+ ByteField("len", 3),
+ ByteField("optcode", 0),
+ ByteField("plen", 64),
+ IntField("res", 0),
+ IP6Field("prefix", "::") ]
+
+_rfc4068_lla_optcode = {0: "Wildcard requesting resolution for all nearby AP",
+ 1: "LLA for the new AP",
+ 2: "LLA of the MN",
+ 3: "LLA of the NAR",
+ 4: "LLA of the src of TrSolPr or PrRtAdv msg",
+ 5: "AP identified by LLA belongs to current iface of router",
+ 6: "No preifx info available for AP identified by the LLA",
+ 7: "No fast handovers support for AP identified by the LLA" }
+
+class ICMPv6NDOptLLA(_ICMPv6NDGuessPayload, Packet): # RFC 4068
+ name = "ICMPv6 Neighbor Discovery - Link-Layer Address (LLA) Option (FH for MIPv6)"
+ fields_desc = [ ByteField("type", 19),
+ ByteField("len", 1),
+ ByteEnumField("optcode", 0, _rfc4068_lla_optcode),
+ MACField("lla", ETHER_ANY) ] # We only support ethernet
+
+class ICMPv6NDOptMAP(_ICMPv6NDGuessPayload, Packet): # RFC 4140
+ name = "ICMPv6 Neighbor Discovery - MAP Option"
+ fields_desc = [ ByteField("type", 23),
+ ByteField("len", 3),
+ BitField("dist", 1, 4),
+ BitField("pref", 15, 4), # highest availability
+ BitField("R", 1, 1),
+ BitField("res", 0, 7),
+ IntField("validlifetime", 0xffffffff),
+ IP6Field("addr", "::") ]
+
+
+class IP6PrefixField(IP6Field):
+ def __init__(self, name, default):
+ IP6Field.__init__(self, name, default)
+ self.length_from = lambda pkt: 8*(pkt.len - 1)
+
+ def addfield(self, pkt, s, val):
+ return s + self.i2m(pkt, val)
+
+ def getfield(self, pkt, s):
+ l = self.length_from(pkt)
+ p = s[:l]
+ if l < 16:
+ p += b'\x00'*(16-l)
+ return s[l:], self.m2i(pkt,p)
+
+ def i2len(self, pkt, x):
+ return len(self.i2m(pkt, x))
+
+ def i2m(self, pkt, x):
+ l = pkt.len
+
+ if x is None:
+ x = "::"
+ if l is None:
+ l = 1
+ x = inet_pton(socket.AF_INET6, x)
+
+ if l is None:
+ return x
+ if l in [0, 1]:
+ return b""
+ if l in [2, 3]:
+ return x[:8*(l-1)]
+
+ return x + b'\x00'*8*(l-3)
+
+class ICMPv6NDOptRouteInfo(_ICMPv6NDGuessPayload, Packet): # RFC 4191
+ name = "ICMPv6 Neighbor Discovery Option - Route Information Option"
+ fields_desc = [ ByteField("type",24),
+ FieldLenField("len", None, length_of="prefix", fmt="B",
+ adjust = lambda pkt,x: x//8 + 1),
+ ByteField("plen", None),
+ BitField("res1",0,3),
+ BitField("prf",0,2),
+ BitField("res2",0,3),
+ IntField("rtlifetime", 0xffffffff),
+ IP6PrefixField("prefix", None) ]
+
+class ICMPv6NDOptRDNSS(_ICMPv6NDGuessPayload, Packet): # RFC 5006
+ name = "ICMPv6 Neighbor Discovery Option - Recursive DNS Server Option"
+ fields_desc = [ ByteField("type", 25),
+ FieldLenField("len", None, count_of="dns", fmt="B",
+ adjust = lambda pkt,x: 2*x+1),
+ ShortField("res", None),
+ IntField("lifetime", 0xffffffff),
+ IP6ListField("dns", [],
+ length_from = lambda pkt: 8*(pkt.len-1)) ]
+
+class ICMPv6NDOptEFA(_ICMPv6NDGuessPayload, Packet): # RFC 5175 (prev. 5075)
+ name = "ICMPv6 Neighbor Discovery Option - Expanded Flags Option"
+ fields_desc = [ ByteField("type", 26),
+ ByteField("len", 1),
+ BitField("res", 0, 48) ]
+
+# End of ICMPv6 Neighbor Discovery Options.
+
+class ICMPv6ND_RS(_ICMPv6NDGuessPayload, _ICMPv6):
+ name = "ICMPv6 Neighbor Discovery - Router Solicitation"
+ fields_desc = [ ByteEnumField("type", 133, icmp6types),
+ ByteField("code",0),
+ XShortField("cksum", None),
+ IntField("res",0) ]
+ overload_fields = {IPv6: { "nh": 58, "dst": "ff02::2", "hlim": 255 }}
+
+class ICMPv6ND_RA(_ICMPv6NDGuessPayload, _ICMPv6):
+ name = "ICMPv6 Neighbor Discovery - Router Advertisement"
+ fields_desc = [ ByteEnumField("type", 134, icmp6types),
+ ByteField("code",0),
+ XShortField("cksum", None),
+ ByteField("chlim",0),
+ BitField("M",0,1),
+ BitField("O",0,1),
+ BitField("H",0,1),
+ BitEnumField("prf",1,2, { 0: "Medium (default)",
+ 1: "High",
+ 2: "Reserved",
+ 3: "Low" } ), # RFC 4191
+ BitField("P",0,1),
+ BitField("res",0,2),
+ ShortField("routerlifetime",1800),
+ IntField("reachabletime",0),
+ IntField("retranstimer",0) ]
+ overload_fields = {IPv6: { "nh": 58, "dst": "ff02::1", "hlim": 255 }}
+
+ def answers(self, other):
+ return isinstance(other, ICMPv6ND_RS)
+
+class ICMPv6ND_NS(_ICMPv6NDGuessPayload, _ICMPv6, Packet):
+ name = "ICMPv6 Neighbor Discovery - Neighbor Solicitation"
+ fields_desc = [ ByteEnumField("type",135, icmp6types),
+ ByteField("code",0),
+ XShortField("cksum", None),
+ IntField("res", 0),
+ IP6Field("tgt","::") ]
+ overload_fields = {IPv6: { "nh": 58, "dst": "ff02::1", "hlim": 255 }}
+
+ def mysummary(self):
+ return self.sprintf("%name% (tgt: %tgt%)")
+
+ def hashret(self):
+ return self.getbyteval("tgt")+self.payload.hashret()
+
+class ICMPv6ND_NA(_ICMPv6NDGuessPayload, _ICMPv6, Packet):
+ name = "ICMPv6 Neighbor Discovery - Neighbor Advertisement"
+ fields_desc = [ ByteEnumField("type",136, icmp6types),
+ ByteField("code",0),
+ XShortField("cksum", None),
+ BitField("R",1,1),
+ BitField("S",0,1),
+ BitField("O",1,1),
+ XBitField("res",0,29),
+ IP6Field("tgt","::") ]
+ overload_fields = {IPv6: { "nh": 58, "dst": "ff02::1", "hlim": 255 }}
+
+ def mysummary(self):
+ return self.sprintf("%name% (tgt: %tgt%)")
+
+ def hashret(self):
+ return self.getbyteval("tgt")+self.payload.hashret()
+
+ def answers(self, other):
+ return isinstance(other, ICMPv6ND_NS) and self.tgt == other.tgt
+
+# associated possible options : target link-layer option, Redirected header
+class ICMPv6ND_Redirect(_ICMPv6NDGuessPayload, _ICMPv6, Packet):
+ name = "ICMPv6 Neighbor Discovery - Redirect"
+ fields_desc = [ ByteEnumField("type",137, icmp6types),
+ ByteField("code",0),
+ XShortField("cksum", None),
+ XIntField("res",0),
+ IP6Field("tgt","::"),
+ IP6Field("dst","::") ]
+ overload_fields = {IPv6: { "nh": 58, "dst": "ff02::1", "hlim": 255 }}
+
+
+
+################ ICMPv6 Inverse Neighbor Discovery (RFC 3122) ###############
+
+class ICMPv6NDOptSrcAddrList(_ICMPv6NDGuessPayload, Packet):
+ name = "ICMPv6 Inverse Neighbor Discovery Option - Source Address List"
+ fields_desc = [ ByteField("type",9),
+ FieldLenField("len", None, count_of="addrlist", fmt="B",
+ adjust = lambda pkt,x: 2*x+1),
+ StrFixedLenField("res", "\x00"*6, 6),
+ IP6ListField("addrlist", [],
+ length_from = lambda pkt: 8*(pkt.len-1)) ]
+
+class ICMPv6NDOptTgtAddrList(ICMPv6NDOptSrcAddrList):
+ name = "ICMPv6 Inverse Neighbor Discovery Option - Target Address List"
+ type = 10
+
+
+# RFC3122
+# Options requises : source lladdr et target lladdr
+# Autres options valides : source address list, MTU
+# - Comme precise dans le document, il serait bien de prendre l'adresse L2
+# demandee dans l'option requise target lladdr et l'utiliser au niveau
+# de l'adresse destination ethernet si aucune adresse n'est precisee
+# - ca semble pas forcement pratique si l'utilisateur doit preciser toutes
+# les options.
+# Ether() must use the target lladdr as destination
+class ICMPv6ND_INDSol(_ICMPv6NDGuessPayload, _ICMPv6):
+ name = "ICMPv6 Inverse Neighbor Discovery Solicitation"
+ fields_desc = [ ByteEnumField("type",141, icmp6types),
+ ByteField("code",0),
+ XShortField("cksum",None),
+ XIntField("reserved",0) ]
+ overload_fields = {IPv6: { "nh": 58, "dst": "ff02::1", "hlim": 255 }}
+
+# Options requises : target lladdr, target address list
+# Autres options valides : MTU
+class ICMPv6ND_INDAdv(_ICMPv6NDGuessPayload, _ICMPv6):
+ name = "ICMPv6 Inverse Neighbor Discovery Advertisement"
+ fields_desc = [ ByteEnumField("type",142, icmp6types),
+ ByteField("code",0),
+ XShortField("cksum",None),
+ XIntField("reserved",0) ]
+ overload_fields = {IPv6: { "nh": 58, "dst": "ff02::1", "hlim": 255 }}
+
+
+###############################################################################
+# ICMPv6 Node Information Queries (RFC 4620)
+###############################################################################
+
+# [ ] Add automatic destination address computation using computeNIGroupAddr
+# in IPv6 class (Scapy6 modification when integrated) if :
+# - it is not provided
+# - upper layer is ICMPv6NIQueryName() with a valid value
+# [ ] Try to be liberal in what we accept as internal values for _explicit_
+# DNS elements provided by users. Any string should be considered
+# valid and kept like it has been provided. At the moment, i2repr() will
+# crash on many inputs
+# [ ] Do the documentation
+# [ ] Add regression tests
+# [ ] Perform test against real machines (NOOP reply is proof of implementation).
+# [ ] Check if there are differences between different stacks. Among *BSD,
+# with others.
+# [ ] Deal with flags in a consistent way.
+# [ ] Implement compression in names2dnsrepr() and decompresiion in
+# dnsrepr2names(). Should be deactivable.
+
+icmp6_niqtypes = { 0: "NOOP",
+ 2: "Node Name",
+ 3: "IPv6 Address",
+ 4: "IPv4 Address" }
+
+
+class _ICMPv6NIHashret:
+ def hashret(self):
+ return self.nonce
+
+class _ICMPv6NIAnswers:
+ def answers(self, other):
+ return self.nonce == other.nonce
+
+# Buggy; always returns the same value during a session
+class NonceField(StrFixedLenField):
+ def __init__(self, name, default=None):
+ StrFixedLenField.__init__(self, name, default, 8)
+ if default is None:
+ self.default = self.randval()
+
+# Compute the NI group Address. Can take a FQDN as input parameter
+def computeNIGroupAddr(name):
+ import md5
+ name = name.lower().split(".")[0]
+ record = chr(len(name))+name
+ h = md5.new(record)
+ h = h.digest()
+ addr = "ff02::2:%2x%2x:%2x%2x" % struct.unpack("BBBB", h[:4])
+ return addr
+
+
+# Here is the deal. First, that protocol is a piece of shit. Then, we
+# provide 4 classes for the different kinds of Requests (one for every
+# valid qtype: NOOP, Node Name, IPv6@, IPv4@). They all share the same
+# data field class that is made to be smart by guessing the specifc
+# type of value provided :
+#
+# - IPv6 if acceptable for inet_pton(AF_INET6, ): code is set to 0,
+# if not overriden by user
+# - IPv4 if acceptable for inet_pton(AF_INET, ): code is set to 2,
+# if not overriden
+# - Name in the other cases: code is set to 0, if not overriden by user
+#
+# Internal storage, is not only the value, but the a pair providing
+# the type and the value (1 is IPv6@, 1 is Name or string, 2 is IPv4@)
+#
+# Note : I merged getfield() and m2i(). m2i() should not be called
+# directly anyway. Same remark for addfield() and i2m()
+#
+# -- arno
+
+# "The type of information present in the Data field of a query is
+# declared by the ICMP Code, whereas the type of information in a
+# Reply is determined by the Qtype"
+
+def names2dnsrepr(x):
+ """
+ Take as input a list of DNS names or a single DNS name
+ and encode it in DNS format (with possible compression)
+ If a string that is already a DNS name in DNS format
+ is passed, it is returned unmodified. Result is a string.
+ !!! At the moment, compression is not implemented !!!
+ """
+
+ if type(x) is str:
+ if x and x[-1] == '\x00': # stupid heuristic
+ return x.encode('ascii')
+ x = [x.encode('ascii')]
+ elif type(x) is bytes:
+ if x and x[-1] == 0:
+ return x
+ x = [x]
+
+ res = []
+ for n in x:
+ if type(n) is str:
+ n = n.encode('ascii')
+ termin = b"\x00"
+ if n.count(b'.') == 0: # single-component gets one more
+ termin += bytes([0])
+ n = b"".join(map(lambda y: chr(len(y)).encode('ascii')+y, n.split(b"."))) + termin
+ res.append(n)
+ return b"".join(res)
+
+
+def dnsrepr2names(x):
+ """
+ Take as input a DNS encoded string (possibly compressed)
+ and returns a list of DNS names contained in it.
+ If provided string is already in printable format
+ (does not end with a null character, a one element list
+ is returned). Result is a list.
+ """
+ res = []
+ cur = b""
+ if type(x) is str:
+ x = x.encode('ascii')
+ while x:
+ #l = ord(x[0])
+ l = x[0]
+ x = x[1:]
+ if l == 0:
+ if cur and cur[-1] == ord('.'):
+ cur = cur[:-1]
+ res.append(cur)
+ cur = b""
+ #if x and ord(x[0]) == 0: # single component
+ if x and x[0] == 0: # single component
+ x = x[1:]
+ continue
+ if l & 0xc0: # XXX TODO : work on that -- arno
+ raise Exception("DNS message can't be compressed at this point!")
+ else:
+ cur += x[:l]+b"."
+ x = x[l:]
+ return res
+
+
+class NIQueryDataField(StrField):
+ def __init__(self, name, default):
+ StrField.__init__(self, name, default)
+
+ def i2h(self, pkt, x):
+ if x is None:
+ return x
+ t,val = x
+ if t == 1:
+ val = dnsrepr2names(val)[0]
+ return val
+
+ def h2i(self, pkt, x):
+ if x is tuple and type(x[0]) is int:
+ return x
+
+ val = None
+ try: # Try IPv6
+ inet_pton(socket.AF_INET6, x)
+ val = (0, x)
+ except:
+ try: # Try IPv4
+ inet_pton(socket.AF_INET, x)
+ val = (2, x)
+ except: # Try DNS
+ if x is None:
+ x = b""
+ x = names2dnsrepr(x)
+ val = (1, x)
+ return val
+
+ def i2repr(self, pkt, x):
+ t,val = x
+ if t == 1: # DNS Name
+ # we don't use dnsrepr2names() to deal with
+ # possible weird data extracted info
+ res = []
+ weird = None
+ while val:
+ #l = ord(val[0])
+ l = val[0]
+ val = val[1:]
+ if l == 0:
+ if (len(res) > 1 and val): # fqdn with data behind
+ weird = val
+ elif len(val) > 1: # single label with data behind
+ weird = val[1:]
+ break
+ res.append(val[:l]+".")
+ val = val[l:]
+ tmp = "".join(res)
+ if tmp and tmp[-1] == '.':
+ tmp = tmp[:-1]
+ return tmp
+ return repr(val)
+
+ def getfield(self, pkt, s):
+ qtype = getattr(pkt, "qtype")
+ if qtype == 0: # NOOP
+ return s, (0, b"")
+ else:
+ code = getattr(pkt, "code")
+ if code == 0: # IPv6 Addr
+ return s[16:], (0, inet_ntop(socket.AF_INET6, s[:16]))
+ elif code == 2: # IPv4 Addr
+ return s[4:], (2, inet_ntop(socket.AF_INET, s[:4]))
+ else: # Name or Unknown
+ return b"", (1, s)
+
+ def addfield(self, pkt, s, val):
+ if ((type(val) is tuple and val[1] is None) or
+ val is None):
+ val = (1, b"")
+ t = val[0]
+ if t == 1:
+ if type(val[1]) is str:
+ tmp = val[1].encode('ascii')
+ else:
+ tmp = val[1]
+ return s + tmp
+ elif t == 0:
+ return s + inet_pton(socket.AF_INET6, val[1])
+ else:
+ return s + inet_pton(socket.AF_INET, val[1])
+
+class NIQueryCodeField(ByteEnumField):
+ def i2m(self, pkt, x):
+ if x is None:
+ d = pkt.getfieldval("data")
+ if d is None:
+ return 1
+ elif d[0] == 0: # IPv6 address
+ return 0
+ elif d[0] == 1: # Name
+ return 1
+ elif d[0] == 2: # IPv4 address
+ return 2
+ else:
+ return 1
+ return x
+
+
+_niquery_code = {0: "IPv6 Query", 1: "Name Query", 2: "IPv4 Query"}
+
+#_niquery_flags = { 2: "All unicast addresses", 4: "IPv4 addresses",
+# 8: "Link-local addresses", 16: "Site-local addresses",
+# 32: "Global addresses" }
+
+# "This NI type has no defined flags and never has a Data Field". Used
+# to know if the destination is up and implements NI protocol.
+class ICMPv6NIQueryNOOP(_ICMPv6NIHashret, _ICMPv6):
+ name = "ICMPv6 Node Information Query - NOOP Query"
+ fields_desc = [ ByteEnumField("type", 139, icmp6types),
+ NIQueryCodeField("code", None, _niquery_code),
+ XShortField("cksum", None),
+ ShortEnumField("qtype", 0, icmp6_niqtypes),
+ BitField("unused", 0, 10),
+ FlagsField("flags", 0, 6, "TACLSG"),
+ NonceField("nonce", None),
+ NIQueryDataField("data", None) ]
+
+class ICMPv6NIQueryName(ICMPv6NIQueryNOOP):
+ name = "ICMPv6 Node Information Query - IPv6 Name Query"
+ qtype = 2
+
+# We ask for the IPv6 address of the peer
+class ICMPv6NIQueryIPv6(ICMPv6NIQueryNOOP):
+ name = "ICMPv6 Node Information Query - IPv6 Address Query"
+ qtype = 3
+ flags = 0x3E
+
+class ICMPv6NIQueryIPv4(ICMPv6NIQueryNOOP):
+ name = "ICMPv6 Node Information Query - IPv4 Address Query"
+ qtype = 4
+
+_nireply_code = { 0: "Successful Reply",
+ 1: "Response Refusal",
+ 3: "Unknown query type" }
+
+_nireply_flags = { 1: "Reply set incomplete",
+ 2: "All unicast addresses",
+ 4: "IPv4 addresses",
+ 8: "Link-local addresses",
+ 16: "Site-local addresses",
+ 32: "Global addresses" }
+
+# Internal repr is one of those :
+# (0, "some string") : unknow qtype value are mapped to that one
+# (3, [ (ttl, ip6), ... ])
+# (4, [ (ttl, ip4), ... ])
+# (2, [ttl, dns_names]) : dns_names is one string that contains
+# all the DNS names. Internally it is kept ready to be sent
+# (undissected). i2repr() decode it for user. This is to
+# make build after dissection bijective.
+#
+# I also merged getfield() and m2i(), and addfield() and i2m().
+class NIReplyDataField(StrField):
+
+ def i2h(self, pkt, x):
+ if x is None:
+ return x
+ t,val = x
+ if t == 2:
+ ttl, dnsnames = val
+ val = [ttl] + dnsrepr2names(dnsnames)
+ return val
+
+ def h2i(self, pkt, x):
+ qtype = 0 # We will decode it as string if not
+ # overridden through 'qtype' in pkt
+
+ # No user hint, let's use 'qtype' value for that purpose
+ if type(x) is not tuple:
+ if pkt is not None:
+ qtype = getattr(pkt, "qtype")
+ else:
+ qtype = x[0]
+ x = x[1]
+
+ # From that point on, x is the value (second element of the tuple)
+
+ if qtype == 2: # DNS name
+ if type(x) is str: # listify the string
+ x = x.encode('ascii')
+ x = [x]
+ elif type(x) is bytes:
+ x = [x]
+ if type(x) is list and x and type(x[0]) is not int: # ttl was omitted : use 0
+ x = [0] + x
+ ttl = x[0]
+ names = x[1:]
+ return (2, [ttl, names2dnsrepr(names)])
+
+ elif qtype in [3, 4]: # IPv4 or IPv6 addr
+ if type(x) is str or type(x) is bytes:
+ x = [x] # User directly provided an IP, instead of list
+
+ # List elements are not tuples, user probably
+ # omitted ttl value : we will use 0 instead
+ def addttl(x):
+ if type(x) is str or type(x) is bytes:
+ return (0, x)
+ return x
+
+ return (qtype, list(map(addttl, x)))
+
+ return (qtype, x)
+
+
+ def addfield(self, pkt, s, val):
+ t,tmp = val
+ if tmp is None:
+ tmp = b""
+ if t == 2:
+ ttl,dnsstr = tmp
+ return s+ struct.pack("!I", ttl) + dnsstr
+ elif t == 3:
+ #return s + "".join(map(lambda (x,y): struct.pack("!I", x)+inet_pton(socket.AF_INET6, y), tmp))
+ return s + b"".join(map(lambda a: struct.pack("!I", a[0])+inet_pton(socket.AF_INET6, a[1]), tmp))
+ elif t == 4:
+ #return s + "".join(map(lambda (x,y): struct.pack("!I", x)+inet_pton(socket.AF_INET, y), tmp))
+ return s + b"".join(map(lambda a: struct.pack("!I", a[0])+inet_pton(socket.AF_INET, a[1]), tmp))
+ else:
+ return s + tmp
+
+ def getfield(self, pkt, s):
+ code = getattr(pkt, "code")
+ if code != 0:
+ return s, (0, b"")
+
+ qtype = getattr(pkt, "qtype")
+ if qtype == 0: # NOOP
+ return s, (0, b"")
+
+ elif qtype == 2:
+ if len(s) < 4:
+ return s, (0, b"")
+ ttl = struct.unpack("!I", s[:4])[0]
+ return b"", (2, [ttl, s[4:]])
+
+ elif qtype == 3: # IPv6 addresses with TTLs
+ # XXX TODO : get the real length
+ res = []
+ while len(s) >= 20: # 4 + 16
+ ttl = struct.unpack("!I", s[:4])[0]
+ ip = inet_ntop(socket.AF_INET6, s[4:20])
+ res.append((ttl, ip))
+ s = s[20:]
+ return s, (3, res)
+
+ elif qtype == 4: # IPv4 addresses with TTLs
+ # XXX TODO : get the real length
+ res = []
+ while len(s) >= 8: # 4 + 4
+ ttl = struct.unpack("!I", s[:4])[0]
+ ip = inet_ntop(socket.AF_INET, s[4:8])
+ res.append((ttl, ip))
+ s = s[8:]
+ return s, (4, res)
+ else:
+ # XXX TODO : implement me and deal with real length
+ return b"", (0, s)
+
+ def i2repr(self, pkt, x):
+ if x is None:
+ return "[]"
+
+ if type(x) is tuple and len(x) == 2:
+ t, val = x
+ if t == 2: # DNS names
+ ttl,l = val
+ l = dnsrepr2names(l)
+ return "ttl:%d %s" % (ttl, ", ".join(l))
+ elif t == 3 or t == 4:
+ #return "[ %s ]" % (", ".join(map(lambda (x,y): "(%d, %s)" % (x, y), val)))
+ return "[ %s ]" % (", ".join(map(lambda a: "(%d, %s)" % a, val)))
+ return repr(val)
+ return repr(x) # XXX should not happen
+
+# By default, sent responses have code set to 0 (successful)
+class ICMPv6NIReplyNOOP(_ICMPv6NIAnswers, _ICMPv6NIHashret, _ICMPv6):
+ name = "ICMPv6 Node Information Reply - NOOP Reply"
+ fields_desc = [ ByteEnumField("type", 140, icmp6types),
+ ByteEnumField("code", 0, _nireply_code),
+ XShortField("cksum", None),
+ ShortEnumField("qtype", 0, icmp6_niqtypes),
+ BitField("unused", 0, 10),
+ FlagsField("flags", 0, 6, "TACLSG"),
+ NonceField("nonce", None),
+ NIReplyDataField("data", None)]
+
+class ICMPv6NIReplyName(ICMPv6NIReplyNOOP):
+ name = "ICMPv6 Node Information Reply - Node Names"
+ qtype = 2
+
+class ICMPv6NIReplyIPv6(ICMPv6NIReplyNOOP):
+ name = "ICMPv6 Node Information Reply - IPv6 addresses"
+ qtype = 3
+
+class ICMPv6NIReplyIPv4(ICMPv6NIReplyNOOP):
+ name = "ICMPv6 Node Information Reply - IPv4 addresses"
+ qtype = 4
+
+class ICMPv6NIReplyRefuse(ICMPv6NIReplyNOOP):
+ name = "ICMPv6 Node Information Reply - Responder refuses to supply answer"
+ code = 1
+
+class ICMPv6NIReplyUnknown(ICMPv6NIReplyNOOP):
+ name = "ICMPv6 Node Information Reply - Qtype unknown to the responder"
+ code = 2
+
+
+def _niquery_guesser(p):
+ cls = conf.raw_layer
+ #type = ord(p[0])
+ type = p[0]
+ if type == 139: # Node Info Query specific stuff
+ if len(p) > 6:
+ qtype, = struct.unpack("!H", p[4:6])
+ cls = { 0: ICMPv6NIQueryNOOP,
+ 2: ICMPv6NIQueryName,
+ 3: ICMPv6NIQueryIPv6,
+ 4: ICMPv6NIQueryIPv4 }.get(qtype, conf.raw_layer)
+ elif type == 140: # Node Info Reply specific stuff
+ #code = ord(p[1])
+ code = p[1]
+ if code == 0:
+ if len(p) > 6:
+ qtype, = struct.unpack("!H", p[4:6])
+ cls = { 2: ICMPv6NIReplyName,
+ 3: ICMPv6NIReplyIPv6,
+ 4: ICMPv6NIReplyIPv4 }.get(qtype, ICMPv6NIReplyNOOP)
+ elif code == 1:
+ cls = ICMPv6NIReplyRefuse
+ elif code == 2:
+ cls = ICMPv6NIReplyUnknown
+ return cls
+
+
+#############################################################################
+#############################################################################
+### Mobile IPv6 (RFC 3775) and Nemo (RFC 3963) ###
+#############################################################################
+#############################################################################
+
+# Mobile IPv6 ICMPv6 related classes
+
+class ICMPv6HAADRequest(_ICMPv6):
+ name = 'ICMPv6 Home Agent Address Discovery Request'
+ fields_desc = [ ByteEnumField("type", 144, icmp6types),
+ ByteField("code", 0),
+ XShortField("cksum", None),
+ XShortField("id", None),
+ BitEnumField("R", 1, 1, {1: 'MR'}),
+ XBitField("res", 0, 15) ]
+ def hashret(self):
+ return struct.pack("!H",self.id)+self.payload.hashret()
+
+class ICMPv6HAADReply(_ICMPv6):
+ name = 'ICMPv6 Home Agent Address Discovery Reply'
+ fields_desc = [ ByteEnumField("type", 145, icmp6types),
+ ByteField("code", 0),
+ XShortField("cksum", None),
+ XShortField("id", None),
+ BitEnumField("R", 1, 1, {1: 'MR'}),
+ XBitField("res", 0, 15),
+ IP6ListField('addresses', None) ]
+ def hashret(self):
+ return struct.pack("!H",self.id)+self.payload.hashret()
+
+ def answers(self, other):
+ if not isinstance(other, ICMPv6HAADRequest):
+ return 0
+ return self.id == other.id
+
+class ICMPv6MPSol(_ICMPv6):
+ name = 'ICMPv6 Mobile Prefix Solicitation'
+ fields_desc = [ ByteEnumField("type", 146, icmp6types),
+ ByteField("code", 0),
+ XShortField("cksum", None),
+ XShortField("id", None),
+ XShortField("res", 0) ]
+ def _hashret(self):
+ return struct.pack("!H",self.id)
+
+class ICMPv6MPAdv(_ICMPv6NDGuessPayload, _ICMPv6):
+ name = 'ICMPv6 Mobile Prefix Advertisement'
+ fields_desc = [ ByteEnumField("type", 147, icmp6types),
+ ByteField("code", 0),
+ XShortField("cksum", None),
+ XShortField("id", None),
+ BitEnumField("flags", 2, 2, {2: 'M', 1:'O'}),
+ XBitField("res", 0, 14) ]
+ def hashret(self):
+ return struct.pack("!H",self.id)
+
+ def answers(self, other):
+ return isinstance(other, ICMPv6MPSol)
+
+# Mobile IPv6 Options classes
+
+
+_mobopttypes = { 2: "Binding Refresh Advice",
+ 3: "Alternate Care-of Address",
+ 4: "Nonce Indices",
+ 5: "Binding Authorization Data",
+ 6: "Mobile Network Prefix (RFC3963)",
+ 7: "Link-Layer Address (RFC4068)",
+ 8: "Mobile Node Identifier (RFC4283)",
+ 9: "Mobility Message Authentication (RFC4285)",
+ 10: "Replay Protection (RFC4285)",
+ 11: "CGA Parameters Request (RFC4866)",
+ 12: "CGA Parameters (RFC4866)",
+ 13: "Signature (RFC4866)",
+ 14: "Home Keygen Token (RFC4866)",
+ 15: "Care-of Test Init (RFC4866)",
+ 16: "Care-of Test (RFC4866)" }
+
+
+class _MIP6OptAlign:
+ """ Mobile IPv6 options have alignment requirements of the form x*n+y.
+ This class is inherited by all MIPv6 options to help in computing the
+ required Padding for that option, i.e. the need for a Pad1 or PadN
+ option before it. They only need to provide x and y as class
+ parameters. (x=0 and y=0 are used when no alignment is required)"""
+ def alignment_delta(self, curpos):
+ x = self.x ; y = self.y
+ if x == 0 and y ==0:
+ return 0
+ delta = x*((curpos - y + x - 1)//x) + y - curpos
+ return delta
+
+
+class MIP6OptBRAdvice(_MIP6OptAlign, Packet):
+ name = 'Mobile IPv6 Option - Binding Refresh Advice'
+ fields_desc = [ ByteEnumField('otype', 2, _mobopttypes),
+ ByteField('olen', 2),
+ ShortField('rinter', 0) ]
+ x = 2 ; y = 0# alignment requirement: 2n
+
+class MIP6OptAltCoA(_MIP6OptAlign, Packet):
+ name = 'MIPv6 Option - Alternate Care-of Address'
+ fields_desc = [ ByteEnumField('otype', 3, _mobopttypes),
+ ByteField('olen', 16),
+ IP6Field("acoa", "::") ]
+ x = 8 ; y = 6 # alignment requirement: 8n+6
+
+class MIP6OptNonceIndices(_MIP6OptAlign, Packet):
+ name = 'MIPv6 Option - Nonce Indices'
+ fields_desc = [ ByteEnumField('otype', 4, _mobopttypes),
+ ByteField('olen', 16),
+ ShortField('hni', 0),
+ ShortField('coni', 0) ]
+ x = 2 ; y = 0 # alignment requirement: 2n
+
+class MIP6OptBindingAuthData(_MIP6OptAlign, Packet):
+ name = 'MIPv6 Option - Binding Authorization Data'
+ fields_desc = [ ByteEnumField('otype', 5, _mobopttypes),
+ ByteField('olen', 16),
+ BitField('authenticator', 0, 96) ]
+ x = 8 ; y = 2 # alignment requirement: 8n+2
+
+class MIP6OptMobNetPrefix(_MIP6OptAlign, Packet): # NEMO - RFC 3963
+ name = 'NEMO Option - Mobile Network Prefix'
+ fields_desc = [ ByteEnumField("otype", 6, _mobopttypes),
+ ByteField("olen", 18),
+ ByteField("reserved", 0),
+ ByteField("plen", 64),
+ IP6Field("prefix", "::") ]
+ x = 8 ; y = 4 # alignment requirement: 8n+4
+
+class MIP6OptLLAddr(_MIP6OptAlign, Packet): # Sect 6.4.4 of RFC 4068
+ name = "MIPv6 Option - Link-Layer Address (MH-LLA)"
+ fields_desc = [ ByteEnumField("otype", 7, _mobopttypes),
+ ByteField("olen", 7),
+ ByteEnumField("ocode", 2, _rfc4068_lla_optcode),
+ ByteField("pad", 0),
+ MACField("lla", ETHER_ANY) ] # Only support ethernet
+ x = 0 ; y = 0 # alignment requirement: none
+
+class MIP6OptMNID(_MIP6OptAlign, Packet): # RFC 4283
+ name = "MIPv6 Option - Mobile Node Identifier"
+ fields_desc = [ ByteEnumField("otype", 8, _mobopttypes),
+ FieldLenField("olen", None, length_of="id", fmt="B",
+ adjust = lambda pkt,x: x+1),
+ ByteEnumField("subtype", 1, {1: "NAI"}),
+ StrLenField("id", "",
+ length_from = lambda pkt: pkt.olen-1) ]
+ x = 0 ; y = 0 # alignment requirement: none
+
+# We only support decoding and basic build. Automatic HMAC computation is
+# too much work for our current needs. It is left to the user (I mean ...
+# you). --arno
+class MIP6OptMsgAuth(_MIP6OptAlign, Packet): # RFC 4285 (Sect. 5)
+ name = "MIPv6 Option - Mobility Message Authentication"
+ fields_desc = [ ByteEnumField("otype", 9, _mobopttypes),
+ FieldLenField("olen", None, length_of="authdata", fmt="B",
+ adjust = lambda pkt,x: x+5),
+ ByteEnumField("subtype", 1, {1: "MN-HA authentication mobility option",
+ 2: "MN-AAA authentication mobility option"}),
+ IntField("mspi", None),
+ StrLenField("authdata", "A"*12,
+ length_from = lambda pkt: pkt.olen-5) ]
+ x = 4 ; y = 1 # alignment requirement: 4n+1
+
+# Extracted from RFC 1305 (NTP) :
+# NTP timestamps are represented as a 64-bit unsigned fixed-point number,
+# in seconds relative to 0h on 1 January 1900. The integer part is in the
+# first 32 bits and the fraction part in the last 32 bits.
+class NTPTimestampField(LongField):
+ epoch = (1900, 1, 1, 0, 0, 0, 5, 1, 0)
+ def i2repr(self, pkt, x):
+ if x < ((50*31536000)<<32):
+ return "Some date a few decades ago (%d)" % x
+
+ # delta from epoch (= (1900, 1, 1, 0, 0, 0, 5, 1, 0)) to
+ # January 1st 1970 :
+ delta = -2209075761
+ i = int(x >> 32)
+ j = float(x & 0xffffffff) * 2.0**-32
+ res = i + j + delta
+ from time import strftime
+ t = time.strftime("%a, %d %b %Y %H:%M:%S +0000", time.gmtime(res))
+
+ return "%s (%d)" % (t, x)
+
+class MIP6OptReplayProtection(_MIP6OptAlign, Packet): # RFC 4285 (Sect. 6)
+ name = "MIPv6 option - Replay Protection"
+ fields_desc = [ ByteEnumField("otype", 10, _mobopttypes),
+ ByteField("olen", 8),
+ NTPTimestampField("timestamp", 0) ]
+ x = 8 ; y = 2 # alignment requirement: 8n+2
+
+class MIP6OptCGAParamsReq(_MIP6OptAlign, Packet): # RFC 4866 (Sect. 5.6)
+ name = "MIPv6 option - CGA Parameters Request"
+ fields_desc = [ ByteEnumField("otype", 11, _mobopttypes),
+ ByteField("olen", 0) ]
+ x = 0 ; y = 0 # alignment requirement: none
+
+# XXX TODO: deal with CGA param fragmentation and build of defragmented
+# XXX version. Passing of a big CGAParam structure should be
+# XXX simplified. Make it hold packets, by the way --arno
+class MIP6OptCGAParams(_MIP6OptAlign, Packet): # RFC 4866 (Sect. 5.1)
+ name = "MIPv6 option - CGA Parameters"
+ fields_desc = [ ByteEnumField("otype", 12, _mobopttypes),
+ FieldLenField("olen", None, length_of="cgaparams", fmt="B"),
+ StrLenField("cgaparams", "",
+ length_from = lambda pkt: pkt.olen) ]
+ x = 0 ; y = 0 # alignment requirement: none
+
+class MIP6OptSignature(_MIP6OptAlign, Packet): # RFC 4866 (Sect. 5.2)
+ name = "MIPv6 option - Signature"
+ fields_desc = [ ByteEnumField("otype", 13, _mobopttypes),
+ FieldLenField("olen", None, length_of="sig", fmt="B"),
+ StrLenField("sig", "",
+ length_from = lambda pkt: pkt.olen) ]
+ x = 0 ; y = 0 # alignment requirement: none
+
+class MIP6OptHomeKeygenToken(_MIP6OptAlign, Packet): # RFC 4866 (Sect. 5.3)
+ name = "MIPv6 option - Home Keygen Token"
+ fields_desc = [ ByteEnumField("otype", 14, _mobopttypes),
+ FieldLenField("olen", None, length_of="hkt", fmt="B"),
+ StrLenField("hkt", "",
+ length_from = lambda pkt: pkt.olen) ]
+ x = 0 ; y = 0 # alignment requirement: none
+
+class MIP6OptCareOfTestInit(_MIP6OptAlign, Packet): # RFC 4866 (Sect. 5.4)
+ name = "MIPv6 option - Care-of Test Init"
+ fields_desc = [ ByteEnumField("otype", 15, _mobopttypes),
+ ByteField("olen", 0) ]
+ x = 0 ; y = 0 # alignment requirement: none
+
+class MIP6OptCareOfTest(_MIP6OptAlign, Packet): # RFC 4866 (Sect. 5.5)
+ name = "MIPv6 option - Care-of Test"
+ fields_desc = [ ByteEnumField("otype", 16, _mobopttypes),
+ FieldLenField("olen", None, length_of="cokt", fmt="B"),
+ StrLenField("cokt", '\x00'*8,
+ length_from = lambda pkt: pkt.olen) ]
+ x = 0 ; y = 0 # alignment requirement: none
+
+class MIP6OptUnknown(_MIP6OptAlign, Packet):
+ name = 'Scapy6 - Unknown Mobility Option'
+ fields_desc = [ ByteEnumField("otype", 6, _mobopttypes),
+ FieldLenField("olen", None, length_of="odata", fmt="B"),
+ StrLenField("odata", "",
+ length_from = lambda pkt: pkt.olen) ]
+ x = 0 ; y = 0 # alignment requirement: none
+
+moboptcls = { 0: Pad1,
+ 1: PadN,
+ 2: MIP6OptBRAdvice,
+ 3: MIP6OptAltCoA,
+ 4: MIP6OptNonceIndices,
+ 5: MIP6OptBindingAuthData,
+ 6: MIP6OptMobNetPrefix,
+ 7: MIP6OptLLAddr,
+ 8: MIP6OptMNID,
+ 9: MIP6OptMsgAuth,
+ 10: MIP6OptReplayProtection,
+ 11: MIP6OptCGAParamsReq,
+ 12: MIP6OptCGAParams,
+ 13: MIP6OptSignature,
+ 14: MIP6OptHomeKeygenToken,
+ 15: MIP6OptCareOfTestInit,
+ 16: MIP6OptCareOfTest }
+
+
+# Main Mobile IPv6 Classes
+
+mhtypes = { 0: 'BRR',
+ 1: 'HoTI',
+ 2: 'CoTI',
+ 3: 'HoT',
+ 4: 'CoT',
+ 5: 'BU',
+ 6: 'BA',
+ 7: 'BE',
+ 8: 'Fast BU',
+ 9: 'Fast BA',
+ 10: 'Fast NA' }
+
+# From http://www.iana.org/assignments/mobility-parameters
+bastatus = { 0: 'Binding Update accepted',
+ 1: 'Accepted but prefix discovery necessary',
+ 128: 'Reason unspecified',
+ 129: 'Administratively prohibited',
+ 130: 'Insufficient resources',
+ 131: 'Home registration not supported',
+ 132: 'Not home subnet',
+ 133: 'Not home agent for this mobile node',
+ 134: 'Duplicate Address Detection failed',
+ 135: 'Sequence number out of window',
+ 136: 'Expired home nonce index',
+ 137: 'Expired care-of nonce index',
+ 138: 'Expired nonces',
+ 139: 'Registration type change disallowed',
+ 140: 'Mobile Router Operation not permitted',
+ 141: 'Invalid Prefix',
+ 142: 'Not Authorized for Prefix',
+ 143: 'Forwarding Setup failed (prefixes missing)',
+ 144: 'MIPV6-ID-MISMATCH',
+ 145: 'MIPV6-MESG-ID-REQD',
+ 146: 'MIPV6-AUTH-FAIL',
+ 147: 'Permanent home keygen token unavailable',
+ 148: 'CGA and signature verification failed',
+ 149: 'Permanent home keygen token exists',
+ 150: 'Non-null home nonce index expected' }
+
+
+class _MobilityHeader(Packet):
+ name = 'Dummy IPv6 Mobility Header'
+ overload_fields = { IPv6: { "nh": 135 }}
+
+ def post_build(self, p, pay):
+ p += pay
+ l = self.len
+ if self.len is None:
+ l = (len(p)-8)//8
+ p = bytes([p[0]]) + struct.pack("B", l) + p[2:]
+ if self.cksum is None:
+ cksum = in6_chksum(135, self.underlayer, p)
+ else:
+ cksum = self.cksum
+ p = p[:4]+struct.pack("!H", cksum)+p[6:]
+ return p
+
+
+class MIP6MH_Generic(_MobilityHeader): # Mainly for decoding of unknown msg
+ name = "IPv6 Mobility Header - Generic Message"
+ fields_desc = [ ByteEnumField("nh", 59, ipv6nh),
+ ByteField("len", None),
+ ByteEnumField("mhtype", None, mhtypes),
+ ByteField("res", None),
+ XShortField("cksum", None),
+ StrLenField("msg", b"\x00"*2,
+ length_from = lambda pkt: 8*pkt.len-6) ]
+
+
+
+# TODO: make a generic _OptionsField
+class _MobilityOptionsField(PacketListField):
+ islist = 1
+ holds_packet = 1
+
+ def __init__(self, name, default, cls, curpos, count_from=None, length_from=None):
+ self.curpos = curpos
+ PacketListField.__init__(self, name, default, cls, count_from=count_from, length_from=length_from)
+
+ def getfield(self, pkt, s):
+ l = self.length_from(pkt)
+ return s[l:],self.m2i(pkt, s[:l])
+
+ def i2len(self, pkt, i):
+ return len(self.i2m(pkt, i))
+
+ def m2i(self, pkt, x):
+ opt = []
+ while x:
+ #o = ord(x[0]) # Option type
+ o = x[0] # Option type
+ cls = self.cls
+ if o in moboptcls:
+ cls = moboptcls[o]
+ try:
+ op = cls(x)
+ except:
+ op = self.cls(x)
+ opt.append(op)
+ if isinstance(op.payload, conf.raw_layer):
+ x = op.payload.load
+ del(op.payload)
+ else:
+ x = b""
+ return opt
+
+ def i2m(self, pkt, x):
+ autopad = None
+ try:
+ autopad = getattr(pkt, "autopad") # Hack : 'autopad' phantom field
+ except:
+ autopad = 1
+
+ if not autopad:
+ return b"".join(map(str, x))
+
+ curpos = self.curpos
+ s = b""
+ for p in x:
+ d = p.alignment_delta(curpos)
+ curpos += d
+ if d == 1:
+ s += bytes(Pad1())
+ elif d != 0:
+ s += bytes(PadN(optdata=b'\x00'*(d-2)))
+ pstr = bytes(p)
+ curpos += len(pstr)
+ s += pstr
+
+ # Let's make the class including our option field
+ # a multiple of 8 octets long
+ d = curpos % 8
+ if d == 0:
+ return s
+ d = 8 - d
+ if d == 1:
+ s +=bytes(Pad1())
+ elif d != 0:
+ s += bytes(PadN(optdata=b'\x00'*(d-2)))
+
+ return s
+
+ def addfield(self, pkt, s, val):
+ return s+self.i2m(pkt, val)
+
+class MIP6MH_BRR(_MobilityHeader):
+ name = "IPv6 Mobility Header - Binding Refresh Request"
+ fields_desc = [ ByteEnumField("nh", 59, ipv6nh),
+ ByteField("len", None),
+ ByteEnumField("mhtype", 0, mhtypes),
+ ByteField("res", None),
+ XShortField("cksum", None),
+ ShortField("res2", None),
+ _PhantomAutoPadField("autopad", 1), # autopad activated by default
+ _MobilityOptionsField("options", [], MIP6OptUnknown, 8,
+ length_from = lambda pkt: 8*pkt.len) ]
+ overload_fields = { IPv6: { "nh": 135 } }
+ def hashret(self):
+ # Hack: BRR, BU and BA have the same hashret that returns the same
+ # value "\x00\x08\x09" (concatenation of mhtypes). This is
+ # because we need match BA with BU and BU with BRR. --arno
+ return b"\x00\x08\x09"
+
+class MIP6MH_HoTI(_MobilityHeader):
+ name = "IPv6 Mobility Header - Home Test Init"
+ fields_desc = [ ByteEnumField("nh", 59, ipv6nh),
+ ByteField("len", None),
+ ByteEnumField("mhtype", 1, mhtypes),
+ ByteField("res", None),
+ XShortField("cksum", None),
+ StrFixedLenField("reserved", "\x00"*2, 2),
+ StrFixedLenField("cookie", "\x00"*8, 8),
+ _PhantomAutoPadField("autopad", 1), # autopad activated by default
+ _MobilityOptionsField("options", [], MIP6OptUnknown, 16,
+ length_from = lambda pkt: 8*(pkt.len-1)) ]
+ overload_fields = { IPv6: { "nh": 135 } }
+ def hashret(self):
+ return self.cookie
+
+class MIP6MH_CoTI(MIP6MH_HoTI):
+ name = "IPv6 Mobility Header - Care-of Test Init"
+ mhtype = 2
+ def hashret(self):
+ return self.cookie
+
+class MIP6MH_HoT(_MobilityHeader):
+ name = "IPv6 Mobility Header - Home Test"
+ fields_desc = [ ByteEnumField("nh", 59, ipv6nh),
+ ByteField("len", None),
+ ByteEnumField("mhtype", 3, mhtypes),
+ ByteField("res", None),
+ XShortField("cksum", None),
+ ShortField("index", None),
+ StrFixedLenField("cookie", "\x00"*8, 8),
+ StrFixedLenField("token", "\x00"*8, 8),
+ _PhantomAutoPadField("autopad", 1), # autopad activated by default
+ _MobilityOptionsField("options", [], MIP6OptUnknown, 24,
+ length_from = lambda pkt: 8*(pkt.len-2)) ]
+ overload_fields = { IPv6: { "nh": 135 } }
+ def hashret(self):
+ return self.cookie
+ def answers(self):
+ if (isinstance(other, MIP6MH_HoTI) and
+ self.cookie == other.cookie):
+ return 1
+ return 0
+
+class MIP6MH_CoT(MIP6MH_HoT):
+ name = "IPv6 Mobility Header - Care-of Test"
+ mhtype = 4
+ def hashret(self):
+ return self.cookie
+
+ def answers(self):
+ if (isinstance(other, MIP6MH_CoTI) and
+ self.cookie == other.cookie):
+ return 1
+ return 0
+
+class LifetimeField(ShortField):
+ def i2repr(self, pkt, x):
+ return "%d sec" % (4*x)
+
+class MIP6MH_BU(_MobilityHeader):
+ name = "IPv6 Mobility Header - Binding Update"
+ fields_desc = [ ByteEnumField("nh", 59, ipv6nh),
+ ByteField("len", None), # unit == 8 bytes (excluding the first 8 bytes)
+ ByteEnumField("mhtype", 5, mhtypes),
+ ByteField("res", None),
+ XShortField("cksum", None),
+ XShortField("seq", None), # TODO: ShortNonceField
+ FlagsField("flags", "KHA", 7, "PRMKLHA"),
+ XBitField("reserved", 0, 9),
+ LifetimeField("mhtime", 3), # unit == 4 seconds
+ _PhantomAutoPadField("autopad", 1), # autopad activated by default
+ _MobilityOptionsField("options", [], MIP6OptUnknown, 12,
+ length_from = lambda pkt: 8*pkt.len - 4) ]
+ overload_fields = { IPv6: { "nh": 135 } }
+
+ def hashret(self): # Hack: see comment in MIP6MH_BRR.hashret()
+ return "\x00\x08\x09"
+
+ def answers(self, other):
+ if isinstance(other, MIP6MH_BRR):
+ return 1
+ return 0
+
+class MIP6MH_BA(_MobilityHeader):
+ name = "IPv6 Mobility Header - Binding ACK"
+ fields_desc = [ ByteEnumField("nh", 59, ipv6nh),
+ ByteField("len", None), # unit == 8 bytes (excluding the first 8 bytes)
+ ByteEnumField("mhtype", 6, mhtypes),
+ ByteField("res", None),
+ XShortField("cksum", None),
+ ByteEnumField("status", 0, bastatus),
+ FlagsField("flags", "K", 3, "PRK"),
+ XBitField("res2", None, 5),
+ XShortField("seq", None), # TODO: ShortNonceField
+ XShortField("mhtime", 0), # unit == 4 seconds
+ _PhantomAutoPadField("autopad", 1), # autopad activated by default
+ _MobilityOptionsField("options", [], MIP6OptUnknown, 12,
+ length_from = lambda pkt: 8*pkt.len-4) ]
+ overload_fields = { IPv6: { "nh": 135 }}
+
+ def hashret(self): # Hack: see comment in MIP6MH_BRR.hashret()
+ return "\x00\x08\x09"
+
+ def answers(self, other):
+ if (isinstance(other, MIP6MH_BU) and
+ other.mhtype == 5 and
+ self.mhtype == 6 and
+ other.flags & 0x1 and # Ack request flags is set
+ self.seq == other.seq):
+ return 1
+ return 0
+
+_bestatus = { 1: 'Unknown binding for Home Address destination option',
+ 2: 'Unrecognized MH Type value' }
+
+# TODO: match Binding Error to its stimulus
+class MIP6MH_BE(_MobilityHeader):
+ name = "IPv6 Mobility Header - Binding Error"
+ fields_desc = [ ByteEnumField("nh", 59, ipv6nh),
+ ByteField("len", None), # unit == 8 bytes (excluding the first 8 bytes)
+ ByteEnumField("mhtype", 7, mhtypes),
+ ByteField("res", 0),
+ XShortField("cksum", None),
+ ByteEnumField("status", 0, _bestatus),
+ ByteField("reserved", 0),
+ IP6Field("ha", "::"),
+ _MobilityOptionsField("options", [], MIP6OptUnknown, 24,
+ length_from = lambda pkt: 8*(pkt.len-2)) ]
+ overload_fields = { IPv6: { "nh": 135 }}
+
+_mip6_mhtype2cls = { 0: MIP6MH_BRR,
+ 1: MIP6MH_HoTI,
+ 2: MIP6MH_CoTI,
+ 3: MIP6MH_HoT,
+ 4: MIP6MH_CoT,
+ 5: MIP6MH_BU,
+ 6: MIP6MH_BA,
+ 7: MIP6MH_BE }
+
+
+#############################################################################
+#############################################################################
+### Traceroute6 ###
+#############################################################################
+#############################################################################
+
+class AS_resolver6(AS_resolver_riswhois):
+ def _resolve_one(self, ip):
+ """
+ overloaded version to provide a Whois resolution on the
+ embedded IPv4 address if the address is 6to4 or Teredo.
+ Otherwise, the native IPv6 address is passed.
+ """
+
+ if in6_isaddr6to4(ip): # for 6to4, use embedded @
+ tmp = inet_pton(socket.AF_INET6, ip)
+ addr = inet_ntop(socket.AF_INET, tmp[2:6])
+ elif in6_isaddrTeredo(ip): # for Teredo, use mapped address
+ addr = teredoAddrExtractInfo(ip)[2]
+ else:
+ addr = ip
+
+ _, asn, desc = AS_resolver_riswhois._resolve_one(self, addr)
+
+ return ip,asn,desc
+
+class TracerouteResult6(TracerouteResult):
+ def show(self):
+ #return self.make_table(lambda (s,r): (s.sprintf("%-42s,IPv6.dst%:{TCP:tcp%TCP.dport%}{UDP:udp%UDP.dport%}{ICMPv6EchoRequest:IER}"), # TODO: ICMPv6 !
+ return self.make_table(lambda s,r: (s.sprintf("%-42s,IPv6.dst%:{TCP:tcp%TCP.dport%}{UDP:udp%UDP.dport%}{ICMPv6EchoRequest:IER}"), # TODO: ICMPv6 !
+ s.hlim,
+ r.sprintf("%-42s,IPv6.src% {TCP:%TCP.flags%}"+
+ "{ICMPv6DestUnreach:%ir,type%}{ICMPv6PacketTooBig:%ir,type%}"+
+ "{ICMPv6TimeExceeded:%ir,type%}{ICMPv6ParamProblem:%ir,type%}"+
+ "{ICMPv6EchoReply:%ir,type%}")))
+
+ def get_trace(self):
+ trace = {}
+
+ for s,r in self.res:
+ if IPv6 not in s:
+ continue
+ d = s[IPv6].dst
+ if d not in trace:
+ trace[d] = {}
+
+ t = not (ICMPv6TimeExceeded in r or
+ ICMPv6DestUnreach in r or
+ ICMPv6PacketTooBig in r or
+ ICMPv6ParamProblem in r)
+
+ trace[d][s[IPv6].hlim] = r[IPv6].src, t
+
+ for k in trace.values():
+ #m = filter(lambda x: k[x][1], k.keys())
+ m = [ x for x in k.keys() if k[x][1] ]
+ if not m:
+ continue
+ m = min(m)
+ for l in k.keys():
+ if l > m:
+ del(k[l])
+
+ return trace
+
+ def graph(self, ASres=AS_resolver6(), **kargs):
+ TracerouteResult.graph(self, ASres=ASres, **kargs)
+
+def traceroute6(target, dport=80, minttl=1, maxttl=30, sport=RandShort(),
+ l4 = None, timeout=2, verbose=None, **kargs):
+ """
+ Instant TCP traceroute using IPv6 :
+ traceroute6(target, [maxttl=30], [dport=80], [sport=80]) -> None
+ """
+ if verbose is None:
+ verbose = conf.verb
+
+ if l4 is None:
+ a,b = sr(IPv6(dst=target, hlim=(minttl,maxttl))/TCP(seq=RandInt(),sport=sport, dport=dport),
+ timeout=timeout, filter="icmp6 or tcp", verbose=verbose, **kargs)
+ else:
+ a,b = sr(IPv6(dst=target, hlim=(minttl,maxttl))/l4,
+ timeout=timeout, verbose=verbose, **kargs)
+
+ a = TracerouteResult6(a.res)
+
+ if verbose:
+ a.display()
+
+ return a,b
+
+#############################################################################
+#############################################################################
+### Sockets ###
+#############################################################################
+#############################################################################
+
+class L3RawSocket6(L3RawSocket):
+ def __init__(self, type = ETH_P_IPV6, filter=None, iface=None, promisc=None, nofilter=0):
+ L3RawSocket.__init__(self, type, filter, iface, promisc)
+ # NOTE: if fragmentation is needed, it will be done by the kernel (RFC 2292)
+ self.outs = socket.socket(socket.AF_INET6, socket.SOCK_RAW, socket.IPPROTO_RAW)
+ self.ins = socket.socket(socket.AF_PACKET, socket.SOCK_RAW, socket.htons(type))
+
+def IPv6inIP(dst='203.178.135.36', src=None):
+ _IPv6inIP.dst = dst
+ _IPv6inIP.src = src
+ if not conf.L3socket == _IPv6inIP:
+ _IPv6inIP.cls = conf.L3socket
+ else:
+ del(conf.L3socket)
+ return _IPv6inIP
+
+class _IPv6inIP(SuperSocket):
+ dst = '127.0.0.1'
+ src = None
+ cls = None
+
+ def __init__(self, family=socket.AF_INET6, type=socket.SOCK_STREAM, proto=0, **args):
+ SuperSocket.__init__(self, family, type, proto)
+ self.worker = self.cls(**args)
+
+ def set(self, dst, src=None):
+ _IPv6inIP.src = src
+ _IPv6inIP.dst = dst
+
+ def nonblock_recv(self):
+ p = self.worker.nonblock_recv()
+ return self._recv(p)
+
+ def recv(self, x):
+ p = self.worker.recv(x)
+ return self._recv(p, x)
+
+ def _recv(self, p, x=MTU):
+ if p is None:
+ return p
+ elif isinstance(p, IP):
+ # TODO: verify checksum
+ if p.src == self.dst and p.proto == socket.IPPROTO_IPV6:
+ if isinstance(p.payload, IPv6):
+ return p.payload
+ return p
+
+ def send(self, x):
+ return self.worker.send(IP(dst=self.dst, src=self.src, proto=socket.IPPROTO_IPV6)/x)
+
+
+#############################################################################
+#############################################################################
+### Layers binding ###
+#############################################################################
+#############################################################################
+
+conf.l3types.register(ETH_P_IPV6, IPv6)
+conf.l2types.register(31, IPv6)
+
+bind_layers(Ether, IPv6, type = 0x86dd )
+bind_layers(CookedLinux, IPv6, proto = 0x86dd )
+bind_layers(IPerror6, TCPerror, nh = socket.IPPROTO_TCP )
+bind_layers(IPerror6, UDPerror, nh = socket.IPPROTO_UDP )
+bind_layers(IPv6, TCP, nh = socket.IPPROTO_TCP )
+bind_layers(IPv6, UDP, nh = socket.IPPROTO_UDP )
+bind_layers(IP, IPv6, proto = socket.IPPROTO_IPV6 )
+bind_layers(IPv6, IPv6, nh = socket.IPPROTO_IPV6 )
+
+bind_layers(IPv6, IP, nh = IPPROTO_IPIP )
diff --git a/scripts/external_libs/scapy-2.3.1/python3/scapy/layers/ipsec.py b/scripts/external_libs/scapy-2.3.1/python3/scapy/layers/ipsec.py
new file mode 100644
index 00000000..a14925fb
--- /dev/null
+++ b/scripts/external_libs/scapy-2.3.1/python3/scapy/layers/ipsec.py
@@ -0,0 +1,995 @@
+#############################################################################
+## ipsec.py --- IPSec support for Scapy ##
+## ##
+## Copyright (C) 2014 6WIND ##
+## ##
+## This program is free software; you can redistribute it and/or modify it ##
+## under the terms of the GNU General Public License version 2 as ##
+## published by the Free Software Foundation. ##
+## ##
+## This program is distributed in the hope that it will be useful, but ##
+## WITHOUT ANY WARRANTY; without even the implied warranty of ##
+## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ##
+## General Public License for more details. ##
+#############################################################################
+"""
+IPSec layer
+===========
+
+Example of use:
+
+>>> sa = SecurityAssociation(ESP, spi=0xdeadbeef, crypt_algo='AES-CBC',
+... crypt_key='sixteenbytes key')
+>>> p = IP(src='1.1.1.1', dst='2.2.2.2')
+>>> p /= TCP(sport=45012, dport=80)
+>>> p /= Raw(b'testdata')
+>>> p = IP(bytes(p))
+>>> p
+<IP version=4L ihl=5L tos=0x0 len=48 id=1 flags= frag=0L ttl=64 proto=tcp chksum=0x74c2 src=1.1.1.1 dst=2.2.2.2 options=[] |<TCP sport=45012 dport=http seq=0 ack=0 dataofs=5L reserved=0L flags=S window=8192 chksum=0x1914 urgptr=0 options=[] |<Raw load='testdata' |>>>
+>>>
+>>> e = sa.encrypt(p)
+>>> e
+<IP version=4L ihl=5L tos=0x0 len=76 id=1 flags= frag=0L ttl=64 proto=esp chksum=0x747a src=1.1.1.1 dst=2.2.2.2 |<ESP spi=0xdeadbeef seq=1 data='\xf8\xdb\x1e\x83[T\xab\\\xd2\x1b\xed\xd1\xe5\xc8Y\xc2\xa5d\x92\xc1\x05\x17\xa6\x92\x831\xe6\xc1]\x9a\xd6K}W\x8bFfd\xa5B*+\xde\xc8\x89\xbf{\xa9' |>>
+>>>
+>>> d = sa.decrypt(e)
+>>> d
+<IP version=4L ihl=5L tos=0x0 len=48 id=1 flags= frag=0L ttl=64 proto=tcp chksum=0x74c2 src=1.1.1.1 dst=2.2.2.2 |<TCP sport=45012 dport=http seq=0 ack=0 dataofs=5L reserved=0L flags=S window=8192 chksum=0x1914 urgptr=0 options=[] |<Raw load='testdata' |>>>
+>>>
+>>> d == p
+True
+"""
+
+import socket
+
+if not hasattr(socket, 'IPPROTO_AH'):
+ socket.IPPROTO_AH = 51
+if not hasattr(socket, 'IPPROTO_ESP'):
+ socket.IPPROTO_ESP = 50
+
+
+import fractions
+
+from scapy.data import IP_PROTOS
+
+from scapy.fields import ByteEnumField, ByteField, StrField, XIntField, IntField, \
+ ShortField, PacketField
+
+from scapy.packet import Packet, bind_layers, Raw
+
+from scapy.layers.inet import IP, UDP
+from scapy.layers.inet6 import IPv6, IPv6ExtHdrHopByHop, IPv6ExtHdrDestOpt, \
+ IPv6ExtHdrRouting
+
+
+#------------------------------------------------------------------------------
+class AH(Packet):
+ """
+ Authentication Header
+
+ See https://tools.ietf.org/rfc/rfc4302.txt
+ """
+
+ name = 'AH'
+
+ fields_desc = [
+ ByteEnumField('nh', None, IP_PROTOS),
+ ByteField('payloadlen', None),
+ ShortField('reserved', None),
+ XIntField('spi', 0x0),
+ IntField('seq', 0),
+ StrField('icv', None),
+ StrField('padding', None),
+ ]
+
+ overload_fields = {
+ IP: {'proto': socket.IPPROTO_AH},
+ IPv6: {'nh': socket.IPPROTO_AH},
+ IPv6ExtHdrHopByHop: {'nh': socket.IPPROTO_AH},
+ IPv6ExtHdrDestOpt: {'nh': socket.IPPROTO_AH},
+ IPv6ExtHdrRouting: {'nh': socket.IPPROTO_AH},
+ }
+
+bind_layers(IP, AH, proto=socket.IPPROTO_AH)
+bind_layers(IPv6, AH, nh=socket.IPPROTO_AH)
+
+#------------------------------------------------------------------------------
+class ESP(Packet):
+ """
+ Encapsulated Security Payload
+
+ See https://tools.ietf.org/rfc/rfc4303.txt
+ """
+ name = 'ESP'
+
+ fields_desc = [
+ XIntField('spi', 0x0),
+ IntField('seq', 0),
+ StrField('data', None),
+ ]
+
+ overload_fields = {
+ IP: {'proto': socket.IPPROTO_ESP},
+ IPv6: {'nh': socket.IPPROTO_ESP},
+ IPv6ExtHdrHopByHop: {'nh': socket.IPPROTO_ESP},
+ IPv6ExtHdrDestOpt: {'nh': socket.IPPROTO_ESP},
+ IPv6ExtHdrRouting: {'nh': socket.IPPROTO_ESP},
+ }
+
+bind_layers(IP, ESP, proto=socket.IPPROTO_ESP)
+bind_layers(IPv6, ESP, nh=socket.IPPROTO_ESP)
+bind_layers(UDP, ESP, dport=4500) # NAT-Traversal encapsulation
+bind_layers(UDP, ESP, sport=4500) # NAT-Traversal encapsulation
+
+#------------------------------------------------------------------------------
+class _ESPPlain(Packet):
+ """
+ Internal class to represent unencrypted ESP packets.
+ """
+ name = 'ESP'
+
+ fields_desc = [
+ XIntField('spi', 0x0),
+ IntField('seq', 0),
+
+ StrField('iv', ''),
+ PacketField('data', '', Raw),
+ StrField('padding', ''),
+
+ ByteField('padlen', 0),
+ ByteEnumField('nh', 0, IP_PROTOS),
+ StrField('icv', ''),
+ ]
+
+ def data_for_encryption(self):
+ return bytes(self.data) + self.padding + chr(self.padlen).encode('ascii') + chr(self.nh).encode('ascii')
+
+#------------------------------------------------------------------------------
+try:
+ from Crypto.Cipher import AES
+ from Crypto.Cipher import DES
+ from Crypto.Cipher import DES3
+ from Crypto.Cipher import CAST
+ from Crypto.Cipher import Blowfish
+ from Crypto.Util import Counter
+ from Crypto import Random
+except ImportError:
+ # no error if pycrypto is not available but encryption won't be supported
+ AES = None
+ DES = None
+ DES3 = None
+ CAST = None
+ Blowfish = None
+ Random = None
+
+#------------------------------------------------------------------------------
+def _lcm(a, b):
+ """
+ Least Common Multiple between 2 integers.
+ """
+ if a == 0 or b == 0:
+ return 0
+ else:
+ return abs(a * b) // fractions.gcd(a, b)
+
+class CryptAlgo(object):
+ """
+ IPSec encryption algorithm
+ """
+
+ def __init__(self, name, cipher, mode, block_size=None, iv_size=None, key_size=None):
+ """
+ @param name: the name of this encryption algorithm
+ @param cipher: a Cipher module
+ @param mode: the mode used with the cipher module
+ @param block_size: the length a block for this algo. Defaults to the
+ `block_size` of the cipher.
+ @param iv_size: the length of the initialization vector of this algo.
+ Defaults to the `block_size` of the cipher.
+ @param key_size: an integer or list/tuple of integers. If specified,
+ force the secret keys length to one of the values.
+ Defaults to the `key_size` of the cipher.
+ """
+ self.name = name
+ self.cipher = cipher
+ self.mode = mode
+
+ if block_size is not None:
+ self.block_size = block_size
+ elif cipher is not None:
+ self.block_size = cipher.block_size
+ else:
+ self.block_size = 1
+
+ if iv_size is None:
+ self.iv_size = self.block_size
+ else:
+ self.iv_size = iv_size
+
+ if key_size is not None:
+ self.key_size = key_size
+ elif cipher is not None:
+ self.key_size = cipher.key_size
+ else:
+ self.key_size = None
+
+ def check_key(self, key):
+ """
+ Check that the key length is valid.
+
+ @param key: a byte string
+ """
+ if self.key_size and not (len(key) == self.key_size or len(key) in self.key_size):
+ raise TypeError('invalid key size %s, must be %s' %
+ (len(key), self.key_size))
+
+ def generate_iv(self):
+ """
+ Generate a random initialization vector. If pycrypto is not available,
+ return a buffer of the correct length filled with only '\x00'.
+ """
+ if Random:
+ return Random.get_random_bytes(self.iv_size)
+ else:
+ return chr(0) * self.iv_size
+
+ def new_cipher(self, key, iv):
+ """
+ @param key: the secret key, a byte string
+ @param iv: the initialization vector, a byte string
+ @return: an initialized cipher object for this algo
+ """
+ if type(key) is str:
+ key = key.encode('ascii')
+ if (hasattr(self.cipher, 'MODE_CTR') and self.mode == self.cipher.MODE_CTR
+ or hasattr(self.cipher, 'MODE_GCM') and self.mode == self.cipher.MODE_GCM):
+ # in counter mode, the "iv" must be incremented for each block
+ # it is calculated like this:
+ # +---------+------------------+---------+
+ # | nonce | IV | counter |
+ # +---------+------------------+---------+
+ # m bytes n bytes 4 bytes
+ # <-------------------------------------->
+ # block_size
+ nonce_size = self.cipher.block_size - self.iv_size - 4
+
+ # instead of asking for an extra parameter, we extract the last
+ # nonce_size bytes of the key and use them as the nonce.
+ # +----------------------------+---------+
+ # | cipher key | nonce |
+ # +----------------------------+---------+
+ # <--------->
+ # nonce_size
+ cipher_key, nonce = key[:-nonce_size], key[-nonce_size:]
+
+ return self.cipher.new(cipher_key, self.mode,
+ counter=Counter.new(4 * 8, prefix=nonce + iv))
+ else:
+ return self.cipher.new(key, self.mode, iv)
+
+ def pad(self, esp):
+ """
+ Add the correct amount of padding so that the data to encrypt is
+ exactly a multiple of the algorithm's block size.
+
+ Also, make sure that the total ESP packet length is a multiple of 4 or
+ 8 bytes with IP or IPv6 respectively.
+
+ @param esp: an unencrypted _ESPPlain packet
+ """
+ # 2 extra bytes for padlen and nh
+ data_len = len(esp.data) + 2
+
+ # according to the RFC4303, section 2.4. Padding (for Encryption)
+ # the size of the ESP payload must be a multiple of 32 bits
+ align = _lcm(self.block_size, 4)
+
+ # pad for block size
+ esp.padlen = -data_len % align
+
+ # padding must be an array of bytes starting from 1 to padlen
+ esp.padding = ''
+ for b in range(1, esp.padlen + 1):
+ esp.padding += bytes([b])
+
+ # If the following test fails, it means that this algo does not comply
+ # with the RFC
+ payload_len = len(esp.iv) + len(esp.data) + len(esp.padding) + 2
+ if payload_len % 4 != 0:
+ raise ValueError('The size of the ESP data is not aligned to 32 bits after padding.')
+
+ return esp
+
+ def encrypt(self, esp, key):
+ """
+ Encrypt an ESP packet
+
+ @param esp: an unencrypted _ESPPlain packet with valid padding
+ @param key: the secret key used for encryption
+
+ @return: a valid ESP packet encrypted with this algorithm
+ """
+ data = esp.data_for_encryption()
+
+ if self.cipher:
+ self.check_key(key)
+ cipher = self.new_cipher(key, esp.iv)
+ data = cipher.encrypt(data)
+
+ return ESP(spi=esp.spi, seq=esp.seq, data=esp.iv + data)
+
+ def decrypt(self, esp, key, icv_size=0):
+ """
+ Decrypt an ESP packet
+
+ @param esp: an encrypted ESP packet
+ @param key: the secret key used for encryption
+ @param icv_size: the length of the icv used for integrity check
+
+ @return: a valid ESP packet encrypted with this algorithm
+ """
+ self.check_key(key)
+
+ iv = esp.data[:self.iv_size]
+ data = esp.data[self.iv_size:len(esp.data) - icv_size]
+ icv = esp.data[len(esp.data) - icv_size:]
+
+ if self.cipher:
+ cipher = self.new_cipher(key, iv)
+ data = cipher.decrypt(data)
+
+ # extract padlen and nh
+ #padlen = ord(data[-2])
+ padlen = (data[-2])
+ #nh = ord(data[-1])
+ nh = (data[-1])
+
+ # then use padlen to determine data and padding
+ data = data[:len(data) - padlen - 2]
+ padding = data[len(data) - padlen - 2: len(data) - 2]
+
+ return _ESPPlain(spi=esp.spi,
+ seq=esp.seq,
+ iv=iv,
+ data=data,
+ padding=padding,
+ padlen=padlen,
+ nh=nh,
+ icv=icv)
+
+#------------------------------------------------------------------------------
+# The names of the encryption algorithms are the same than in scapy.contrib.ikev2
+# see http://www.iana.org/assignments/ikev2-parameters/ikev2-parameters.xhtml
+
+CRYPT_ALGOS = {
+ 'NULL': CryptAlgo('NULL', cipher=None, mode=None, iv_size=0),
+}
+
+if AES:
+ CRYPT_ALGOS['AES-CBC'] = CryptAlgo('AES-CBC',
+ cipher=AES,
+ mode=AES.MODE_CBC)
+ # specific case for counter mode:
+ # the last 4 bytes of the key are used to carry the nonce of the counter
+ CRYPT_ALGOS['AES-CTR'] = CryptAlgo('AES-CTR',
+ cipher=AES,
+ mode=AES.MODE_CTR,
+ block_size=1,
+ iv_size=8,
+ key_size=(16 + 4, 24 + 4, 32 + 4))
+if DES:
+ CRYPT_ALGOS['DES'] = CryptAlgo('DES',
+ cipher=DES,
+ mode=DES.MODE_CBC)
+if Blowfish:
+ CRYPT_ALGOS['Blowfish'] = CryptAlgo('Blowfish',
+ cipher=Blowfish,
+ mode=Blowfish.MODE_CBC)
+if DES3:
+ CRYPT_ALGOS['3DES'] = CryptAlgo('3DES',
+ cipher=DES3,
+ mode=DES3.MODE_CBC)
+if CAST:
+ CRYPT_ALGOS['CAST'] = CryptAlgo('CAST',
+ cipher=CAST,
+ mode=CAST.MODE_CBC)
+
+#------------------------------------------------------------------------------
+try:
+ from Crypto.Hash import HMAC
+ from Crypto.Hash import SHA
+ from Crypto.Hash import MD5
+ from Crypto.Hash import SHA256
+ from Crypto.Hash import SHA384
+ from Crypto.Hash import SHA512
+except ImportError:
+ # no error if pycrypto is not available but authentication won't be supported
+ HMAC = None
+ SHA = None
+ MD5 = None
+ SHA256 = None
+ SHA384 = None
+try:
+ from Crypto.Hash import XCBCMAC
+except ImportError:
+ XCBCMAC = None
+
+#------------------------------------------------------------------------------
+class IPSecIntegrityError(Exception):
+ """
+ Error risen when the integrity check fails.
+ """
+ pass
+
+class AuthAlgo(object):
+ """
+ IPSec integrity algorithm
+ """
+
+ def __init__(self, name, mac, digestmod, icv_size, key_size=None):
+ """
+ @param name: the name of this integrity algorithm
+ @param mac: a Message Authentication Code module
+ @param digestmod: a Hash or Cipher module
+ @param icv_size: the length of the integrity check value of this algo
+ @param key_size: an integer or list/tuple of integers. If specified,
+ force the secret keys length to one of the values.
+ Defaults to the `key_size` of the cipher.
+ """
+ self.name = name
+ self.mac = mac
+ self.digestmod = digestmod
+ self.icv_size = icv_size
+ self.key_size = key_size
+
+ def check_key(self, key):
+ """
+ Check that the key length is valid.
+
+ @param key: a byte string
+ """
+ if self.key_size and len(key) not in self.key_size:
+ raise TypeError('invalid key size %s, must be one of %s' %
+ (len(key), self.key_size))
+
+ def new_mac(self, key):
+ """
+ @param key: a byte string
+ @return: an initialized mac object for this algo
+ """
+ if type(key) is str:
+ key = key.encode('ascii')
+ if self.mac is XCBCMAC:
+ # specific case here, ciphermod instead of digestmod
+ return self.mac.new(key, ciphermod=self.digestmod)
+ else:
+ print(self.mac)
+ return self.mac.new(key, digestmod=self.digestmod)
+
+ def sign(self, pkt, key):
+ """
+ Sign an IPSec (ESP or AH) packet with this algo.
+
+ @param pkt: a packet that contains a valid encrypted ESP or AH layer
+ @param key: the authentication key, a byte string
+
+ @return: the signed packet
+ """
+ if not self.mac:
+ return pkt
+
+ self.check_key(key)
+
+ mac = self.new_mac(key)
+
+ if pkt.haslayer(ESP):
+ mac.update(bytes(pkt[ESP]))
+ pkt[ESP].data += mac.digest()[:self.icv_size]
+
+ elif pkt.haslayer(AH):
+ clone = zero_mutable_fields(pkt.copy(), sending=True)
+ mac.update(bytes(clone))
+ pkt[AH].icv = mac.digest()[:self.icv_size]
+
+ return pkt
+
+ def verify(self, pkt, key):
+ """
+ Check that the integrity check value (icv) of a packet is valid.
+
+ @param pkt: a packet that contains a valid encrypted ESP or AH layer
+ @param key: the authentication key, a byte string
+
+ @raise IPSecIntegrityError: if the integrity check fails
+ """
+ if not self.mac or self.icv_size == 0:
+ return
+
+ self.check_key(key)
+
+ mac = self.new_mac(key)
+
+ pkt_icv = 'not found'
+ computed_icv = 'not computed'
+
+ if isinstance(pkt, ESP):
+ pkt_icv = pkt.data[len(pkt.data) - self.icv_size:]
+
+ pkt = pkt.copy()
+ pkt.data = pkt.data[:len(pkt.data) - self.icv_size]
+ mac.update(bytes(pkt))
+ computed_icv = mac.digest()[:self.icv_size]
+
+ elif pkt.haslayer(AH):
+ pkt_icv = pkt[AH].icv[:self.icv_size]
+
+ clone = zero_mutable_fields(pkt.copy(), sending=False)
+ mac.update(bytes(clone))
+ computed_icv = mac.digest()[:self.icv_size]
+
+ if pkt_icv != computed_icv:
+ raise IPSecIntegrityError('pkt_icv=%r, computed_icv=%r' %
+ (pkt_icv, computed_icv))
+
+#------------------------------------------------------------------------------
+# The names of the integrity algorithms are the same than in scapy.contrib.ikev2
+# see http://www.iana.org/assignments/ikev2-parameters/ikev2-parameters.xhtml
+
+AUTH_ALGOS = {
+ 'NULL': AuthAlgo('NULL', mac=None, digestmod=None, icv_size=0),
+}
+
+if HMAC:
+ if SHA:
+ AUTH_ALGOS['HMAC-SHA1-96'] = AuthAlgo('HMAC-SHA1-96',
+ mac=HMAC,
+ digestmod=SHA,
+ icv_size=12)
+ if SHA256:
+ AUTH_ALGOS['SHA2-256-128'] = AuthAlgo('SHA2-256-128',
+ mac=HMAC,
+ digestmod=SHA256,
+ icv_size=16)
+ if SHA384:
+ AUTH_ALGOS['SHA2-384-192'] = AuthAlgo('SHA2-384-192',
+ mac=HMAC,
+ digestmod=SHA384,
+ icv_size=24)
+ if SHA512:
+ AUTH_ALGOS['SHA2-512-256'] = AuthAlgo('SHA2-512-256',
+ mac=HMAC,
+ digestmod=SHA512,
+ icv_size=32)
+ if MD5:
+ AUTH_ALGOS['HMAC-MD5-96'] = AuthAlgo('HMAC-MD5-96',
+ mac=HMAC,
+ digestmod=MD5,
+ icv_size=12)
+if AES and XCBCMAC:
+ AUTH_ALGOS['AES-XCBC-96'] = AuthAlgo('AES-XCBC-96',
+ mac=XCBCMAC,
+ digestmod=AES,
+ icv_size=12,
+ key_size=(16,))
+
+#------------------------------------------------------------------------------
+
+
+#------------------------------------------------------------------------------
+def split_for_transport(orig_pkt, transport_proto):
+ """
+ Split an IP(v6) packet in the correct location to insert an ESP or AH
+ header.
+
+ @param orig_pkt: the packet to split. Must be an IP or IPv6 packet
+ @param transport_proto: the IPSec protocol number that will be inserted
+ at the split position.
+ @return: a tuple (header, nh, payload) where nh is the protocol number of
+ payload.
+ """
+ header = orig_pkt.copy()
+ next_hdr = header.payload
+ nh = None
+
+ if header.version == 4:
+ nh = header.proto
+ header.proto = transport_proto
+ header.remove_payload()
+ del header.chksum
+ del header.len
+
+ return header, nh, next_hdr
+ else:
+ found_rt_hdr = False
+ prev = header
+
+ # Since the RFC 4302 is vague about where the ESP/AH headers should be
+ # inserted in IPv6, I chose to follow the linux implementation.
+ while isinstance(next_hdr, (IPv6ExtHdrHopByHop, IPv6ExtHdrRouting, IPv6ExtHdrDestOpt)):
+ if isinstance(next_hdr, IPv6ExtHdrHopByHop):
+ pass
+ if isinstance(next_hdr, IPv6ExtHdrRouting):
+ found_rt_hdr = True
+ elif isinstance(next_hdr, IPv6ExtHdrDestOpt) and found_rt_hdr:
+ break
+
+ prev = next_hdr
+ next_hdr = next_hdr.payload
+
+ nh = prev.nh
+ prev.nh = transport_proto
+ prev.remove_payload()
+ del header.plen
+
+ return header, nh, next_hdr
+
+#------------------------------------------------------------------------------
+# see RFC 4302 - Appendix A. Mutability of IP Options/Extension Headers
+IMMUTABLE_IPV4_OPTIONS = (
+ 0, # End Of List
+ 1, # No OPeration
+ 2, # Security
+ 5, # Extended Security
+ 6, # Commercial Security
+ 20, # Router Alert
+ 21, # Sender Directed Multi-Destination Delivery
+)
+def zero_mutable_fields(pkt, sending=False):
+ """
+ When using AH, all "mutable" fields must be "zeroed" before calculating
+ the ICV. See RFC 4302, Section 3.3.3.1. Handling Mutable Fields.
+
+ @param pkt: an IP(v6) packet containing an AH layer.
+ NOTE: The packet will be modified
+ @param sending: if true, ipv6 routing headers will not be reordered
+ """
+
+ if pkt.haslayer(AH):
+ pkt[AH].icv = chr(0) * len(pkt[AH].icv)
+ else:
+ raise TypeError('no AH layer found')
+
+ if pkt.version == 4:
+ # the tos field has been replaced by DSCP and ECN
+ # Routers may rewrite the DS field as needed to provide a
+ # desired local or end-to-end service
+ pkt.tos = 0
+ # an intermediate router might set the DF bit, even if the source
+ # did not select it.
+ pkt.flags = 0
+ # changed en route as a normal course of processing by routers
+ pkt.ttl = 0
+ # will change if any of these other fields change
+ pkt.chksum = 0
+
+ immutable_opts = []
+ for opt in pkt.options:
+ if opt.option in IMMUTABLE_IPV4_OPTIONS:
+ immutable_opts.append(opt)
+ else:
+ immutable_opts.append(Raw(chr(0) * len(opt)))
+ pkt.options = immutable_opts
+
+ else:
+ # holds DSCP and ECN
+ pkt.tc = 0
+ # The flow label described in AHv1 was mutable, and in RFC 2460 [DH98]
+ # was potentially mutable. To retain compatibility with existing AH
+ # implementations, the flow label is not included in the ICV in AHv2.
+ pkt.fl = 0
+ # same as ttl
+ pkt.hlim = 0
+
+ next_hdr = pkt.payload
+
+ while isinstance(next_hdr, (IPv6ExtHdrHopByHop, IPv6ExtHdrRouting, IPv6ExtHdrDestOpt)):
+ if isinstance(next_hdr, (IPv6ExtHdrHopByHop, IPv6ExtHdrDestOpt)):
+ for opt in next_hdr.options:
+ if opt.otype & 0x20:
+ # option data can change en-route and must be zeroed
+ opt.optdata = chr(0) * opt.optlen
+ elif isinstance(next_hdr, IPv6ExtHdrRouting) and sending:
+ # The sender must order the field so that it appears as it
+ # will at the receiver, prior to performing the ICV computation.
+ next_hdr.segleft = 0
+ if next_hdr.addresses:
+ final = next_hdr.addresses.pop()
+ next_hdr.addresses.insert(0, pkt.dst)
+ pkt.dst = final
+ else:
+ break
+
+ next_hdr = next_hdr.payload
+
+ return pkt
+
+#------------------------------------------------------------------------------
+class SecurityAssociation(object):
+ """
+ This class is responsible of "encryption" and "decryption" of IPSec packets.
+ """
+
+ SUPPORTED_PROTOS = (IP, IPv6)
+
+ def __init__(self, proto, spi, seq_num=1, crypt_algo=None, crypt_key=None,
+ auth_algo=None, auth_key=None, tunnel_header=None, nat_t_header=None):
+ """
+ @param proto: the IPSec proto to use (ESP or AH)
+ @param spi: the Security Parameters Index of this SA
+ @param seq_num: the initial value for the sequence number on encrypted
+ packets
+ @param crypt_algo: the encryption algorithm name (only used with ESP)
+ @param crypt_key: the encryption key (only used with ESP)
+ @param auth_algo: the integrity algorithm name
+ @param auth_key: the integrity key
+ @param tunnel_header: an instance of a IP(v6) header that will be used
+ to encapsulate the encrypted packets.
+ @param nat_t_header: an instance of a UDP header that will be used
+ for NAT-Traversal.
+ """
+
+ if proto not in (ESP, AH, ESP.name, AH.name):
+ raise ValueError("proto must be either ESP or AH")
+ if isinstance(proto, str):
+ self.proto = eval(proto)
+ else:
+ self.proto = proto
+
+ self.spi = spi
+ self.seq_num = seq_num
+
+ if crypt_algo:
+ if crypt_algo not in CRYPT_ALGOS:
+ raise TypeError('unsupported encryption algo %r, try %r' %
+ (crypt_algo, CRYPT_ALGOS.keys()))
+ self.crypt_algo = CRYPT_ALGOS[crypt_algo]
+ self.crypt_algo.check_key(crypt_key)
+ self.crypt_key = crypt_key
+ else:
+ self.crypt_algo = CRYPT_ALGOS['NULL']
+ self.crypt_key = None
+
+ if auth_algo:
+ if auth_algo not in AUTH_ALGOS:
+ raise TypeError('unsupported integrity algo %r, try %r' %
+ (auth_algo, AUTH_ALGOS.keys()))
+ self.auth_algo = AUTH_ALGOS[auth_algo]
+ self.auth_algo.check_key(auth_key)
+ self.auth_key = auth_key
+ else:
+ self.auth_algo = AUTH_ALGOS['NULL']
+ self.auth_key = None
+
+ if tunnel_header and not isinstance(tunnel_header, (IP, IPv6)):
+ raise TypeError('tunnel_header must be %s or %s' % (IP.name, IPv6.name))
+ self.tunnel_header = tunnel_header
+
+ if nat_t_header:
+ if proto is not ESP:
+ raise TypeError('nat_t_header is only allowed with ESP')
+ if not isinstance(nat_t_header, UDP):
+ raise TypeError('nat_t_header must be %s' % UDP.name)
+ self.nat_t_header = nat_t_header
+
+ def check_spi(self, pkt):
+ if pkt.spi != self.spi:
+ raise TypeError('packet spi=0x%x does not match the SA spi=0x%x' %
+ (pkt.spi, self.spi))
+
+ def _encrypt_esp(self, pkt, seq_num=None, iv=None):
+
+ if iv is None:
+ iv = self.crypt_algo.generate_iv()
+ else:
+ if len(iv) != self.crypt_algo.iv_size:
+ raise TypeError('iv length must be %s' % self.crypt_algo.iv_size)
+
+ esp = _ESPPlain(spi=self.spi, seq=seq_num or self.seq_num, iv=iv)
+
+ if self.tunnel_header:
+ tunnel = self.tunnel_header.copy()
+
+ if tunnel.version == 4:
+ del tunnel.proto
+ del tunnel.len
+ del tunnel.chksum
+ else:
+ del tunnel.nh
+ del tunnel.plen
+
+ pkt = tunnel.__class__(bytes(tunnel / pkt))
+
+ ip_header, nh, payload = split_for_transport(pkt, socket.IPPROTO_ESP)
+ esp.data = payload
+ esp.nh = nh
+
+ esp = self.crypt_algo.pad(esp)
+ esp = self.crypt_algo.encrypt(esp, self.crypt_key)
+
+ self.auth_algo.sign(esp, self.auth_key)
+
+ if self.nat_t_header:
+ nat_t_header = self.nat_t_header.copy()
+ nat_t_header.chksum = 0
+ del nat_t_header.len
+ if ip_header.version == 4:
+ del ip_header.proto
+ else:
+ del ip_header.nh
+ ip_header /= nat_t_header
+
+ if ip_header.version == 4:
+ ip_header.len = len(ip_header) + len(esp)
+ del ip_header.chksum
+ ip_header = ip_header.__class__(bytes(ip_header))
+ else:
+ ip_header.plen = len(ip_header.payload) + len(esp)
+
+ # sequence number must always change, unless specified by the user
+ if seq_num is None:
+ self.seq_num += 1
+
+ return ip_header / esp
+
+ def _encrypt_ah(self, pkt, seq_num=None):
+
+ ah = AH(spi=self.spi, seq=seq_num or self.seq_num,
+ icv=chr(0) * self.auth_algo.icv_size)
+
+ if self.tunnel_header:
+ tunnel = self.tunnel_header.copy()
+
+ if tunnel.version == 4:
+ del tunnel.proto
+ del tunnel.len
+ del tunnel.chksum
+ else:
+ del tunnel.nh
+ del tunnel.plen
+
+ pkt = tunnel.__class__(bytes(tunnel / pkt))
+
+ ip_header, nh, payload = split_for_transport(pkt, socket.IPPROTO_AH)
+ ah.nh = nh
+
+ if ip_header.version == 6 and len(ah) % 8 != 0:
+ # For IPv6, the total length of the header must be a multiple of
+ # 8-octet units.
+ ah.padding = chr(0) * (-len(ah) % 8)
+ elif len(ah) % 4 != 0:
+ # For IPv4, the total length of the header must be a multiple of
+ # 4-octet units.
+ ah.padding = chr(0) * (-len(ah) % 4)
+
+ # RFC 4302 - Section 2.2. Payload Length
+ # This 8-bit field specifies the length of AH in 32-bit words (4-byte
+ # units), minus "2".
+ ah.payloadlen = len(ah) // 4 - 2
+
+ if ip_header.version == 4:
+ ip_header.len = len(ip_header) + len(ah) + len(payload)
+ del ip_header.chksum
+ ip_header = ip_header.__class__(bytes(ip_header))
+ else:
+ ip_header.plen = len(ip_header.payload) + len(ah) + len(payload)
+
+ signed_pkt = self.auth_algo.sign(ip_header / ah / payload, self.auth_key)
+
+ # sequence number must always change, unless specified by the user
+ if seq_num is None:
+ self.seq_num += 1
+
+ return signed_pkt
+
+ def encrypt(self, pkt, seq_num=None, iv=None):
+ """
+ Encrypt (and encapsulate) an IP(v6) packet with ESP or AH according
+ to this SecurityAssociation.
+
+ @param pkt: the packet to encrypt
+ @param seq_num: if specified, use this sequence number instead of the
+ generated one
+ @param iv: if specified, use this initialization vector for
+ encryption instead of a random one.
+
+ @return: the encrypted/encapsulated packet
+ """
+ if not isinstance(pkt, self.SUPPORTED_PROTOS):
+ raise TypeError('cannot encrypt %s, supported protos are %s'
+ % (pkt.__class__, self.SUPPORTED_PROTOS))
+ if self.proto is ESP:
+ return self._encrypt_esp(pkt, seq_num=seq_num, iv=iv)
+ else:
+ return self._encrypt_ah(pkt, seq_num=seq_num)
+
+ def _decrypt_esp(self, pkt, verify=True):
+
+ encrypted = pkt[ESP]
+
+ if verify:
+ self.check_spi(pkt)
+ self.auth_algo.verify(encrypted, self.auth_key)
+
+ esp = self.crypt_algo.decrypt(encrypted, self.crypt_key,
+ self.auth_algo.icv_size)
+
+ if self.tunnel_header:
+ # drop the tunnel header and return the payload untouched
+
+ pkt.remove_payload()
+ if pkt.version == 4:
+ pkt.proto = esp.nh
+ else:
+ pkt.nh = esp.nh
+ cls = pkt.guess_payload_class(esp.data)
+
+ return cls(esp.data)
+ else:
+ ip_header = pkt
+
+ if ip_header.version == 4:
+ ip_header.proto = esp.nh
+ del ip_header.chksum
+ ip_header.remove_payload()
+ ip_header.len = len(ip_header) + len(esp.data)
+ # recompute checksum
+ ip_header = ip_header.__class__(bytes(ip_header))
+ else:
+ encrypted.underlayer.nh = esp.nh
+ encrypted.underlayer.remove_payload()
+ ip_header.plen = len(ip_header.payload) + len(esp.data)
+
+ cls = ip_header.guess_payload_class(esp.data)
+
+ # reassemble the ip_header with the ESP payload
+ return ip_header / cls(esp.data)
+
+ def _decrypt_ah(self, pkt, verify=True):
+
+ if verify:
+ self.check_spi(pkt)
+ self.auth_algo.verify(pkt, self.auth_key)
+
+ ah = pkt[AH]
+ payload = ah.payload
+ payload.remove_underlayer(None) # useless argument...
+
+ if self.tunnel_header:
+ return payload
+ else:
+ ip_header = pkt
+
+ if ip_header.version == 4:
+ ip_header.proto = ah.nh
+ del ip_header.chksum
+ ip_header.remove_payload()
+ ip_header.len = len(ip_header) + len(payload)
+ # recompute checksum
+ ip_header = ip_header.__class__(bytes(ip_header))
+ else:
+ ah.underlayer.nh = ah.nh
+ ah.underlayer.remove_payload()
+ ip_header.plen = len(ip_header.payload) + len(payload)
+
+ # reassemble the ip_header with the AH payload
+ return ip_header / payload
+
+ def decrypt(self, pkt, verify=True):
+ """
+ Decrypt (and decapsulate) an IP(v6) packet containing ESP or AH.
+
+ @param pkt: the packet to decrypt
+ @param verify: if False, do not perform the integrity check
+
+ @return: the decrypted/decapsulated packet
+ @raise IPSecIntegrityError: if the integrity check fails
+ """
+ if not isinstance(pkt, self.SUPPORTED_PROTOS):
+ raise TypeError('cannot decrypt %s, supported protos are %s'
+ % (pkt.__class__, self.SUPPORTED_PROTOS))
+
+ if self.proto is ESP and pkt.haslayer(ESP):
+ return self._decrypt_esp(pkt, verify=verify)
+ elif self.proto is AH and pkt.haslayer(AH):
+ return self._decrypt_ah(pkt, verify=verify)
+ else:
+ raise TypeError('%s has no %s layer' % (pkt, self.proto.name))
diff --git a/scripts/external_libs/scapy-2.3.1/python3/scapy/layers/ir.py b/scripts/external_libs/scapy-2.3.1/python3/scapy/layers/ir.py
new file mode 100644
index 00000000..90935aa3
--- /dev/null
+++ b/scripts/external_libs/scapy-2.3.1/python3/scapy/layers/ir.py
@@ -0,0 +1,44 @@
+## This file is part of Scapy
+## See http://www.secdev.org/projects/scapy for more informations
+## Copyright (C) Philippe Biondi <phil@secdev.org>
+## This program is published under a GPLv2 license
+
+"""
+IrDA infrared data communication.
+"""
+
+from scapy.packet import *
+from scapy.fields import *
+from scapy.layers.l2 import CookedLinux
+
+
+
+# IR
+
+class IrLAPHead(Packet):
+ name = "IrDA Link Access Protocol Header"
+ fields_desc = [ XBitField("Address", 0x7f, 7),
+ BitEnumField("Type", 1, 1, {"Response":0,
+ "Command":1})]
+
+class IrLAPCommand(Packet):
+ name = "IrDA Link Access Protocol Command"
+ fields_desc = [ XByteField("Control", 0),
+ XByteField("Format identifier", 0),
+ XIntField("Source address", 0),
+ XIntField("Destination address", 0xffffffff),
+ XByteField("Discovery flags", 0x1),
+ ByteEnumField("Slot number", 255, {"final":255}),
+ XByteField("Version", 0)]
+
+
+class IrLMP(Packet):
+ name = "IrDA Link Management Protocol"
+ fields_desc = [ XShortField("Service hints", 0),
+ XByteField("Character set", 0),
+ StrField("Device name", "") ]
+
+
+bind_layers( CookedLinux, IrLAPHead, proto=23)
+bind_layers( IrLAPHead, IrLAPCommand, Type=1)
+bind_layers( IrLAPCommand, IrLMP, )
diff --git a/scripts/external_libs/scapy-2.3.1/python3/scapy/layers/isakmp.py b/scripts/external_libs/scapy-2.3.1/python3/scapy/layers/isakmp.py
new file mode 100644
index 00000000..97def8f5
--- /dev/null
+++ b/scripts/external_libs/scapy-2.3.1/python3/scapy/layers/isakmp.py
@@ -0,0 +1,355 @@
+## This file is part of Scapy
+## See http://www.secdev.org/projects/scapy for more informations
+## Copyright (C) Philippe Biondi <phil@secdev.org>
+## This program is published under a GPLv2 license
+
+"""
+ISAKMP (Internet Security Association and Key Management Protocol).
+"""
+
+import struct
+from scapy.packet import *
+from scapy.fields import *
+from scapy.ansmachine import *
+from scapy.layers.inet import IP,UDP
+from scapy.sendrecv import sr
+
+
+# see http://www.iana.org/assignments/ipsec-registry for details
+ISAKMPAttributeTypes= { "Encryption": (1, { "DES-CBC" : 1,
+ "IDEA-CBC" : 2,
+ "Blowfish-CBC" : 3,
+ "RC5-R16-B64-CBC" : 4,
+ "3DES-CBC" : 5,
+ "CAST-CBC" : 6,
+ "AES-CBC" : 7,
+ "CAMELLIA-CBC" : 8, }, 0),
+ "Hash": (2, { "MD5": 1,
+ "SHA": 2,
+ "Tiger": 3,
+ "SHA2-256": 4,
+ "SHA2-384": 5,
+ "SHA2-512": 6,}, 0),
+ "Authentication":(3, { "PSK": 1,
+ "DSS": 2,
+ "RSA Sig": 3,
+ "RSA Encryption": 4,
+ "RSA Encryption Revised": 5,
+ "ElGamal Encryption": 6,
+ "ElGamal Encryption Revised": 7,
+ "ECDSA Sig": 8,
+ "HybridInitRSA": 64221,
+ "HybridRespRSA": 64222,
+ "HybridInitDSS": 64223,
+ "HybridRespDSS": 64224,
+ "XAUTHInitPreShared": 65001,
+ "XAUTHRespPreShared": 65002,
+ "XAUTHInitDSS": 65003,
+ "XAUTHRespDSS": 65004,
+ "XAUTHInitRSA": 65005,
+ "XAUTHRespRSA": 65006,
+ "XAUTHInitRSAEncryption": 65007,
+ "XAUTHRespRSAEncryption": 65008,
+ "XAUTHInitRSARevisedEncryption": 65009,
+ "XAUTHRespRSARevisedEncryptio": 65010, }, 0),
+ "GroupDesc": (4, { "768MODPgr" : 1,
+ "1024MODPgr" : 2,
+ "EC2Ngr155" : 3,
+ "EC2Ngr185" : 4,
+ "1536MODPgr" : 5,
+ "2048MODPgr" : 14,
+ "3072MODPgr" : 15,
+ "4096MODPgr" : 16,
+ "6144MODPgr" : 17,
+ "8192MODPgr" : 18, }, 0),
+ "GroupType": (5, {"MODP": 1,
+ "ECP": 2,
+ "EC2N": 3}, 0),
+ "GroupPrime": (6, {}, 1),
+ "GroupGenerator1":(7, {}, 1),
+ "GroupGenerator2":(8, {}, 1),
+ "GroupCurveA": (9, {}, 1),
+ "GroupCurveB": (10, {}, 1),
+ "LifeType": (11, {"Seconds": 1,
+ "Kilobytes": 2, }, 0),
+ "LifeDuration": (12, {}, 1),
+ "PRF": (13, {}, 0),
+ "KeyLength": (14, {}, 0),
+ "FieldSize": (15, {}, 0),
+ "GroupOrder": (16, {}, 1),
+ }
+
+# the name 'ISAKMPTransformTypes' is actually a misnomer (since the table
+# holds info for all ISAKMP Attribute types, not just transforms, but we'll
+# keep it for backwards compatibility... for now at least
+ISAKMPTransformTypes = ISAKMPAttributeTypes
+
+ISAKMPTransformNum = {}
+for n in ISAKMPTransformTypes:
+ val = ISAKMPTransformTypes[n]
+ tmp = {}
+ for e in val[1]:
+ tmp[val[1][e]] = e
+ ISAKMPTransformNum[val[0]] = (n,tmp, val[2])
+del(n)
+del(e)
+del(tmp)
+del(val)
+
+
+class ISAKMPTransformSetField(StrLenField):
+ islist=1
+ #def type2num(self, (typ,val)):
+ def type2num(self, typval):
+ typ = typval[0]
+ val = typval[1]
+ type_val,enc_dict,tlv = ISAKMPTransformTypes.get(typval[0], (typval[0],{},0))
+ val = enc_dict.get(val, val)
+ s = b""
+ if (val & ~0xffff):
+ if not tlv:
+ warning("%r should not be TLV but is too big => using TLV encoding" % typval[0])
+ n = 0
+ while val:
+ s = bytes([(val&0xff)])+s
+ val >>= 8
+ n += 1
+ val = n
+ else:
+ type_val |= 0x8000
+ return struct.pack("!HH",type_val, val)+s
+ def num2type(self, typ, enc):
+ val = ISAKMPTransformNum.get(typ,(typ,{}))
+ enc = val[1].get(enc,enc)
+ return (val[0],enc)
+ def i2m(self, pkt, i):
+ if i is None:
+ return b""
+ i = map(self.type2num, i)
+ return b"".join(i)
+ def m2i(self, pkt, m):
+ # I try to ensure that we don't read off the end of our packet based
+ # on bad length fields we're provided in the packet. There are still
+ # conditions where struct.unpack() may not get enough packet data, but
+ # worst case that should result in broken attributes (which would
+ # be expected). (wam)
+ lst = []
+ while len(m) >= 4:
+ trans_type, = struct.unpack("!H", m[:2])
+ is_tlv = not (trans_type & 0x8000)
+ if is_tlv:
+ # We should probably check to make sure the attribute type we
+ # are looking at is allowed to have a TLV format and issue a
+ # warning if we're given an TLV on a basic attribute.
+ value_len, = struct.unpack("!H", m[2:4])
+ if value_len+4 > len(m):
+ warning("Bad length for ISAKMP tranform type=%#6x" % trans_type)
+ value = m[4:4+value_len]
+ r = 0
+ for i in struct.unpack("!%s" % ("B"*len(value),), value):
+ r = (r << 8) | i
+ value = r
+ #value = reduce(lambda x,y: (x<<8)|y, struct.unpack("!%s" % ("B"*len(value),), value),0)
+ else:
+ trans_type &= 0x7fff
+ value_len=0
+ value, = struct.unpack("!H", m[2:4])
+ m=m[4+value_len:]
+ lst.append(self.num2type(trans_type, value))
+ if len(m) > 0:
+ warning("Extra bytes after ISAKMP transform dissection [%r]" % m)
+ return lst
+
+
+ISAKMP_payload_type = ["None","SA","Proposal","Transform","KE","ID","CERT","CR","Hash",
+ "SIG","Nonce","Notification","Delete","VendorID"]
+
+ISAKMP_exchange_type = ["None","base","identity prot.",
+ "auth only", "aggressive", "info"]
+
+
+class ISAKMP_class(Packet):
+ def guess_payload_class(self, payload):
+ np = self.next_payload
+ if np == 0:
+ return conf.raw_layer
+ elif np < len(ISAKMP_payload_type):
+ pt = ISAKMP_payload_type[np]
+ return globals().get("ISAKMP_payload_%s" % pt, ISAKMP_payload)
+ else:
+ return ISAKMP_payload
+
+
+class ISAKMP(ISAKMP_class): # rfc2408
+ name = "ISAKMP"
+ fields_desc = [
+ StrFixedLenField("init_cookie","",8),
+ StrFixedLenField("resp_cookie","",8),
+ ByteEnumField("next_payload",0,ISAKMP_payload_type),
+ XByteField("version",0x10),
+ ByteEnumField("exch_type",0,ISAKMP_exchange_type),
+ FlagsField("flags",0, 8, ["encryption","commit","auth_only","res3","res4","res5","res6","res7"]), # XXX use a Flag field
+ IntField("id",0),
+ IntField("length",None)
+ ]
+
+ def guess_payload_class(self, payload):
+ if self.flags & 1:
+ return conf.raw_layer
+ return ISAKMP_class.guess_payload_class(self, payload)
+
+ def answers(self, other):
+ if isinstance(other, ISAKMP):
+ if other.init_cookie == self.init_cookie:
+ return 1
+ return 0
+ def post_build(self, p, pay):
+ p += pay
+ if self.length is None:
+ p = p[:24]+struct.pack("!I",len(p))+p[28:]
+ return p
+
+
+
+
+class ISAKMP_payload_Transform(ISAKMP_class):
+ name = "IKE Transform"
+ fields_desc = [
+ ByteEnumField("next_payload",None,ISAKMP_payload_type),
+ ByteField("res",0),
+# ShortField("len",None),
+ ShortField("length",None),
+ ByteField("num",None),
+ ByteEnumField("id",1,{1:"KEY_IKE"}),
+ ShortField("res2",0),
+ ISAKMPTransformSetField("transforms",None,length_from=lambda x:x.length-8)
+# XIntField("enc",0x80010005L),
+# XIntField("hash",0x80020002L),
+# XIntField("auth",0x80030001L),
+# XIntField("group",0x80040002L),
+# XIntField("life_type",0x800b0001L),
+# XIntField("durationh",0x000c0004L),
+# XIntField("durationl",0x00007080L),
+ ]
+ def post_build(self, p, pay):
+ if self.length is None:
+ l = len(p)
+ p = p[:2]+bytes([((l>>8)&0xff),(l&0xff)])+p[4:]
+ p += pay
+ return p
+
+
+
+
+class ISAKMP_payload_Proposal(ISAKMP_class):
+ name = "IKE proposal"
+# ISAKMP_payload_type = 0
+ fields_desc = [
+ ByteEnumField("next_payload",None,ISAKMP_payload_type),
+ ByteField("res",0),
+ FieldLenField("length",None,"trans","H", adjust=lambda pkt,x:x+8),
+ ByteField("proposal",1),
+ ByteEnumField("proto",1,{1:"ISAKMP"}),
+ FieldLenField("SPIsize",None,"SPI","B"),
+ ByteField("trans_nb",None),
+ StrLenField("SPI","",length_from=lambda x:x.SPIsize),
+ PacketLenField("trans",conf.raw_layer(),ISAKMP_payload_Transform,length_from=lambda x:x.length-8),
+ ]
+
+
+class ISAKMP_payload(ISAKMP_class):
+ name = "ISAKMP payload"
+ fields_desc = [
+ ByteEnumField("next_payload",None,ISAKMP_payload_type),
+ ByteField("res",0),
+ FieldLenField("length",None,"load","H", adjust=lambda pkt,x:x+4),
+ StrLenField("load","",length_from=lambda x:x.length-4),
+ ]
+
+
+class ISAKMP_payload_VendorID(ISAKMP_class):
+ name = "ISAKMP Vendor ID"
+ overload_fields = { ISAKMP: { "next_payload":13 }}
+ fields_desc = [
+ ByteEnumField("next_payload",None,ISAKMP_payload_type),
+ ByteField("res",0),
+ FieldLenField("length",None,"vendorID","H", adjust=lambda pkt,x:x+4),
+ StrLenField("vendorID","",length_from=lambda x:x.length-4),
+ ]
+
+class ISAKMP_payload_SA(ISAKMP_class):
+ name = "ISAKMP SA"
+ overload_fields = { ISAKMP: { "next_payload":1 }}
+ fields_desc = [
+ ByteEnumField("next_payload",None,ISAKMP_payload_type),
+ ByteField("res",0),
+ FieldLenField("length",None,"prop","H", adjust=lambda pkt,x:x+12),
+ IntEnumField("DOI",1,{1:"IPSEC"}),
+ IntEnumField("situation",1,{1:"identity"}),
+ PacketLenField("prop",conf.raw_layer(),ISAKMP_payload_Proposal,length_from=lambda x:x.length-12),
+ ]
+
+class ISAKMP_payload_Nonce(ISAKMP_class):
+ name = "ISAKMP Nonce"
+ overload_fields = { ISAKMP: { "next_payload":10 }}
+ fields_desc = [
+ ByteEnumField("next_payload",None,ISAKMP_payload_type),
+ ByteField("res",0),
+ FieldLenField("length",None,"load","H", adjust=lambda pkt,x:x+4),
+ StrLenField("load","",length_from=lambda x:x.length-4),
+ ]
+
+class ISAKMP_payload_KE(ISAKMP_class):
+ name = "ISAKMP Key Exchange"
+ overload_fields = { ISAKMP: { "next_payload":4 }}
+ fields_desc = [
+ ByteEnumField("next_payload",None,ISAKMP_payload_type),
+ ByteField("res",0),
+ FieldLenField("length",None,"load","H", adjust=lambda pkt,x:x+4),
+ StrLenField("load","",length_from=lambda x:x.length-4),
+ ]
+
+class ISAKMP_payload_ID(ISAKMP_class):
+ name = "ISAKMP Identification"
+ overload_fields = { ISAKMP: { "next_payload":5 }}
+ fields_desc = [
+ ByteEnumField("next_payload",None,ISAKMP_payload_type),
+ ByteField("res",0),
+ FieldLenField("length",None,"load","H",adjust=lambda pkt,x:x+8),
+ ByteEnumField("IDtype",1,{1:"IPv4_addr", 11:"Key"}),
+ ByteEnumField("ProtoID",0,{0:"Unused"}),
+ ShortEnumField("Port",0,{0:"Unused"}),
+# IPField("IdentData","127.0.0.1"),
+ StrLenField("load","",length_from=lambda x:x.length-8),
+ ]
+
+
+
+class ISAKMP_payload_Hash(ISAKMP_class):
+ name = "ISAKMP Hash"
+ overload_fields = { ISAKMP: { "next_payload":8 }}
+ fields_desc = [
+ ByteEnumField("next_payload",None,ISAKMP_payload_type),
+ ByteField("res",0),
+ FieldLenField("length",None,"load","H",adjust=lambda pkt,x:x+4),
+ StrLenField("load","",length_from=lambda x:x.length-4),
+ ]
+
+
+
+ISAKMP_payload_type_overload = {}
+for i in range(len(ISAKMP_payload_type)):
+ name = "ISAKMP_payload_%s" % ISAKMP_payload_type[i]
+ if name in globals():
+ ISAKMP_payload_type_overload[globals()[name]] = {"next_payload":i}
+
+del(i)
+del(name)
+ISAKMP_class.overload_fields = ISAKMP_payload_type_overload.copy()
+
+
+bind_layers( UDP, ISAKMP, dport=500, sport=500)
+def ikescan(ip):
+ return sr(IP(dst=ip)/UDP()/ISAKMP(init_cookie=RandString(8),
+ exch_type=2)/ISAKMP_payload_SA(prop=ISAKMP_payload_Proposal()))
+
diff --git a/scripts/external_libs/scapy-2.3.1/python3/scapy/layers/l2.py b/scripts/external_libs/scapy-2.3.1/python3/scapy/layers/l2.py
new file mode 100644
index 00000000..0d0a1c78
--- /dev/null
+++ b/scripts/external_libs/scapy-2.3.1/python3/scapy/layers/l2.py
@@ -0,0 +1,543 @@
+## This file is part of Scapy
+## See http://www.secdev.org/projects/scapy for more informations
+## Copyright (C) Philippe Biondi <phil@secdev.org>
+## This program is published under a GPLv2 license
+
+"""
+Classes and functions for layer 2 protocols.
+"""
+
+import os,struct,time
+from scapy.base_classes import Net
+from scapy.config import conf
+from scapy.packet import *
+from scapy.ansmachine import *
+from scapy.plist import SndRcvList
+from scapy.fields import *
+from scapy.sendrecv import srp,srp1
+from scapy.arch import get_if_hwaddr
+
+
+
+
+#################
+## Tools ##
+#################
+
+
+class Neighbor:
+ def __init__(self):
+ self.resolvers = {}
+
+ def register_l3(self, l2, l3, resolve_method):
+ self.resolvers[l2,l3]=resolve_method
+
+ def resolve(self, l2inst, l3inst):
+ k = l2inst.__class__,l3inst.__class__
+ if k in self.resolvers:
+ return self.resolvers[k](l2inst,l3inst)
+
+ def __repr__(self):
+ return "\n".join("%-15s -> %-15s" % (l2.__name__, l3.__name__) for l2,l3 in self.resolvers)
+
+conf.neighbor = Neighbor()
+
+conf.netcache.new_cache("arp_cache", 120) # cache entries expire after 120s
+
+
+@conf.commands.register
+def getmacbyip(ip, chainCC=0):
+ """Return MAC address corresponding to a given IP address"""
+ if isinstance(ip,Net):
+ ip = next(iter(ip))
+ ip = inet_ntoa(inet_aton(ip))
+ tmp = inet_aton(ip)
+ if (tmp[0] & 0xf0) == 0xe0: # mcast @
+ return "01:00:5e:%.2x:%.2x:%.2x" % (tmp[1]&0x7f,tmp[2],tmp[3])
+ iff,a,gw = conf.route.route(ip)
+ if ( (iff == "lo") or (ip == conf.route.get_if_bcast(iff)) ):
+ return "ff:ff:ff:ff:ff:ff"
+ if gw != "0.0.0.0":
+ ip = gw
+
+ mac = conf.netcache.arp_cache.get(ip)
+ if mac:
+ return mac
+
+ res = srp1(Ether(dst=ETHER_BROADCAST)/ARP(op="who-has", pdst=ip),
+ type=ETH_P_ARP,
+ iface = iff,
+ timeout=2,
+ verbose=0,
+ chainCC=chainCC,
+ nofilter=1)
+ if res is not None:
+ mac = res.payload.hwsrc
+ conf.netcache.arp_cache[ip] = mac
+ return mac
+ return None
+
+
+
+### Fields
+
+class DestMACField(MACField):
+ def __init__(self, name):
+ MACField.__init__(self, name, None)
+ def i2h(self, pkt, x):
+ if x is None:
+ x = conf.neighbor.resolve(pkt,pkt.payload)
+ if x is None:
+ x = "ff:ff:ff:ff:ff:ff"
+ warning("Mac address to reach destination not found. Using broadcast.")
+ return MACField.i2h(self, pkt, x)
+ def i2m(self, pkt, x):
+ return MACField.i2m(self, pkt, self.i2h(pkt, x))
+
+class SourceMACField(MACField):
+ def __init__(self, name):
+ MACField.__init__(self, name, None)
+ def i2h(self, pkt, x):
+ if x is None:
+ iff,a,gw = pkt.payload.route()
+ if iff:
+ try:
+ x = get_if_hwaddr(iff)
+ except:
+ pass
+ if x is None:
+ x = "00:00:00:00:00:00"
+ return MACField.i2h(self, pkt, x)
+ def i2m(self, pkt, x):
+ return MACField.i2m(self, pkt, self.i2h(pkt, x))
+
+class ARPSourceMACField(MACField):
+ def __init__(self, name):
+ MACField.__init__(self, name, None)
+ def i2h(self, pkt, x):
+ if x is None:
+ iff,a,gw = pkt.route()
+ if iff:
+ try:
+ x = get_if_hwaddr(iff)
+ except:
+ pass
+ if x is None:
+ x = "00:00:00:00:00:00"
+ return MACField.i2h(self, pkt, x)
+ def i2m(self, pkt, x):
+ return MACField.i2m(self, pkt, self.i2h(pkt, x))
+
+
+
+### Layers
+
+
+class Ether(Packet):
+ name = "Ethernet"
+ fields_desc = [ MACField("dst","00:00:00:01:00:00"),
+ MACField("src","00:00:00:02:00:00"),
+ XShortEnumField("type", 0x9000, ETHER_TYPES) ]
+ def hashret(self):
+ return struct.pack("H",self.type)+self.payload.hashret()
+ def answers(self, other):
+ if isinstance(other,Ether):
+ if self.type == other.type:
+ return self.payload.answers(other.payload)
+ return 0
+ def mysummary(self):
+ return self.sprintf("%src% > %dst% (%type%)")
+ @classmethod
+ def dispatch_hook(cls, _pkt=None, *args, **kargs):
+ if _pkt and len(_pkt) >= 14:
+ if struct.unpack("!H", _pkt[12:14])[0] <= 1500:
+ return Dot3
+ return cls
+
+
+class Dot3(Packet):
+ name = "802.3"
+ fields_desc = [ DestMACField("dst"),
+ MACField("src", ETHER_ANY),
+ LenField("len", None, "H") ]
+ def extract_padding(self,s):
+ l = self.len
+ return s[:l],s[l:]
+ def answers(self, other):
+ if isinstance(other,Dot3):
+ return self.payload.answers(other.payload)
+ return 0
+ def mysummary(self):
+ return "802.3 %s > %s" % (self.src, self.dst)
+ @classmethod
+ def dispatch_hook(cls, _pkt=None, *args, **kargs):
+ if _pkt and len(_pkt) >= 14:
+ if struct.unpack("!H", _pkt[12:14])[0] > 1500:
+ return Ether
+ return cls
+
+
+class LLC(Packet):
+ name = "LLC"
+ fields_desc = [ XByteField("dsap", 0x00),
+ XByteField("ssap", 0x00),
+ ByteField("ctrl", 0) ]
+
+conf.neighbor.register_l3(Ether, LLC, lambda l2,l3: conf.neighbor.resolve(l2,l3.payload))
+conf.neighbor.register_l3(Dot3, LLC, lambda l2,l3: conf.neighbor.resolve(l2,l3.payload))
+
+
+class CookedLinux(Packet):
+ name = "cooked linux"
+ fields_desc = [ ShortEnumField("pkttype",0, {0: "unicast",
+ 4:"sent-by-us"}), #XXX incomplete
+ XShortField("lladdrtype",512),
+ ShortField("lladdrlen",0),
+ StrFixedLenField("src","",8),
+ XShortEnumField("proto",0x800,ETHER_TYPES) ]
+
+
+
+class SNAP(Packet):
+ name = "SNAP"
+ fields_desc = [ X3BytesField("OUI",0x000000),
+ XShortEnumField("code", 0x000, ETHER_TYPES) ]
+
+conf.neighbor.register_l3(Dot3, SNAP, lambda l2,l3: conf.neighbor.resolve(l2,l3.payload))
+
+
+class Dot1Q(Packet):
+ name = "802.1Q"
+ aliastypes = [ Ether ]
+ fields_desc = [ BitField("prio", 0, 3),
+ BitField("id", 0, 1),
+ BitField("vlan", 1, 12),
+ XShortEnumField("type", 0x0000, ETHER_TYPES) ]
+ def answers(self, other):
+ if isinstance(other,Dot1Q):
+ if ( (self.type == other.type) and
+ (self.vlan == other.vlan) ):
+ return self.payload.answers(other.payload)
+ else:
+ return self.payload.answers(other)
+ return 0
+ def default_payload_class(self, pay):
+ if self.type <= 1500:
+ return LLC
+ return conf.raw_layer
+ def extract_padding(self,s):
+ if self.type <= 1500:
+ return s[:self.type],s[self.type:]
+ return s,None
+ def mysummary(self):
+ if isinstance(self.underlayer, Ether):
+ return self.underlayer.sprintf("802.1q %Ether.src% > %Ether.dst% (%Dot1Q.type%) vlan %Dot1Q.vlan%")
+ else:
+ return self.sprintf("802.1q (%Dot1Q.type%) vlan %Dot1Q.vlan%")
+
+
+conf.neighbor.register_l3(Ether, Dot1Q, lambda l2,l3: conf.neighbor.resolve(l2,l3.payload))
+
+class STP(Packet):
+ name = "Spanning Tree Protocol"
+ fields_desc = [ ShortField("proto", 0),
+ ByteField("version", 0),
+ ByteField("bpdutype", 0),
+ ByteField("bpduflags", 0),
+ ShortField("rootid", 0),
+ MACField("rootmac", ETHER_ANY),
+ IntField("pathcost", 0),
+ ShortField("bridgeid", 0),
+ MACField("bridgemac", ETHER_ANY),
+ ShortField("portid", 0),
+ BCDFloatField("age", 1),
+ BCDFloatField("maxage", 20),
+ BCDFloatField("hellotime", 2),
+ BCDFloatField("fwddelay", 15) ]
+
+
+class EAPOL(Packet):
+ name = "EAPOL"
+ fields_desc = [ ByteField("version", 1),
+ ByteEnumField("type", 0, ["EAP_PACKET", "START", "LOGOFF", "KEY", "ASF"]),
+ LenField("len", None, "H") ]
+
+ EAP_PACKET= 0
+ START = 1
+ LOGOFF = 2
+ KEY = 3
+ ASF = 4
+ def extract_padding(self, s):
+ l = self.len
+ return s[:l],s[l:]
+ def hashret(self):
+ #return chr(self.type)+self.payload.hashret()
+ return bytes([self.type])+self.payload.hashret()
+ def answers(self, other):
+ if isinstance(other,EAPOL):
+ if ( (self.type == self.EAP_PACKET) and
+ (other.type == self.EAP_PACKET) ):
+ return self.payload.answers(other.payload)
+ return 0
+ def mysummary(self):
+ return self.sprintf("EAPOL %EAPOL.type%")
+
+
+class EAP(Packet):
+ name = "EAP"
+ fields_desc = [ ByteEnumField("code", 4, {1:"REQUEST",2:"RESPONSE",3:"SUCCESS",4:"FAILURE"}),
+ ByteField("id", 0),
+ ShortField("len",None),
+ ConditionalField(ByteEnumField("type",0, {1:"ID",4:"MD5"}), lambda pkt:pkt.code not in [EAP.SUCCESS, EAP.FAILURE])
+
+ ]
+
+ REQUEST = 1
+ RESPONSE = 2
+ SUCCESS = 3
+ FAILURE = 4
+ TYPE_ID = 1
+ TYPE_MD5 = 4
+ def answers(self, other):
+ if isinstance(other,EAP):
+ if self.code == self.REQUEST:
+ return 0
+ elif self.code == self.RESPONSE:
+ if ( (other.code == self.REQUEST) and
+ (other.type == self.type) ):
+ return 1
+ elif other.code == self.RESPONSE:
+ return 1
+ return 0
+
+ def post_build(self, p, pay):
+ if self.len is None:
+ l = len(p)+len(pay)
+ p = p[:2]+bytes([((l>>8)&0xff),(l&0xff)])+p[4:]
+ return p+pay
+
+
+class ARP(Packet):
+ name = "ARP"
+ fields_desc = [ XShortField("hwtype", 0x0001),
+ XShortEnumField("ptype", 0x0800, ETHER_TYPES),
+ ByteField("hwlen", 6),
+ ByteField("plen", 4),
+ ShortEnumField("op", 1, {"who-has":1, "is-at":2, "RARP-req":3, "RARP-rep":4, "Dyn-RARP-req":5, "Dyn-RAR-rep":6, "Dyn-RARP-err":7, "InARP-req":8, "InARP-rep":9}),
+ ARPSourceMACField("hwsrc"),
+ SourceIPField("psrc","pdst"),
+ MACField("hwdst", ETHER_ANY),
+ IPField("pdst", "0.0.0.0") ]
+ who_has = 1
+ is_at = 2
+ def answers(self, other):
+ if isinstance(other,ARP):
+ if ( (self.op == self.is_at) and
+ (other.op == self.who_has) and
+ (self.psrc == other.pdst) ):
+ return 1
+ return 0
+ def route(self):
+ dst = self.pdst
+ if isinstance(dst,Gen):
+ dst = next(iter(dst))
+ return conf.route.route(dst)
+ def extract_padding(self, s):
+ return b"",s
+ def mysummary(self):
+ if self.op == self.is_at:
+ return self.sprintf("ARP is at %hwsrc% says %psrc%")
+ elif self.op == self.who_has:
+ return self.sprintf("ARP who has %pdst% says %psrc%")
+ else:
+ return self.sprintf("ARP %op% %psrc% > %pdst%")
+
+conf.neighbor.register_l3(Ether, ARP, lambda l2,l3: getmacbyip(l3.pdst))
+
+class GRErouting(Packet):
+ name = "GRE routing informations"
+ fields_desc = [ ShortField("address_family",0),
+ ByteField("SRE_offset", 0),
+ FieldLenField("SRE_len", None, "routing_info", "B"),
+ StrLenField("routing_info", "", "SRE_len"),
+ ]
+
+
+class GRE(Packet):
+ name = "GRE"
+ fields_desc = [ BitField("chksum_present",0,1),
+ BitField("routing_present",0,1),
+ BitField("key_present",0,1),
+ BitField("seqnum_present",0,1),
+ BitField("strict_route_source",0,1),
+ BitField("recursion_control",0,3),
+ BitField("flags",0,5),
+ BitField("version",0,3),
+ XShortEnumField("proto", 0x0000, ETHER_TYPES),
+ ConditionalField(XShortField("chksum",None), lambda pkt:pkt.chksum_present==1 or pkt.routing_present==1),
+ ConditionalField(XShortField("offset",None), lambda pkt:pkt.chksum_present==1 or pkt.routing_present==1),
+ ConditionalField(XIntField("key",None), lambda pkt:pkt.key_present==1),
+ ConditionalField(XIntField("seqence_number",None), lambda pkt:pkt.seqnum_present==1),
+ ]
+ def post_build(self, p, pay):
+ p += pay
+ if self.chksum_present and self.chksum is None:
+ c = checksum(p)
+ p = p[:4]+bytes([((c>>8)&0xff),(c&0xff)])+p[6:]
+ return p
+
+
+
+
+bind_layers( Dot3, LLC, )
+bind_layers( Ether, LLC, type=122)
+bind_layers( Ether, Dot1Q, type=33024)
+bind_layers( Ether, Ether, type=1)
+bind_layers( Ether, ARP, type=2054)
+bind_layers( Ether, EAPOL, type=34958)
+bind_layers( Ether, EAPOL, dst='01:80:c2:00:00:03', type=34958)
+bind_layers( CookedLinux, LLC, proto=122)
+bind_layers( CookedLinux, Dot1Q, proto=33024)
+bind_layers( CookedLinux, Ether, proto=1)
+bind_layers( CookedLinux, ARP, proto=2054)
+bind_layers( CookedLinux, EAPOL, proto=34958)
+bind_layers( GRE, LLC, proto=122)
+bind_layers( GRE, Dot1Q, proto=33024)
+bind_layers( GRE, Ether, proto=1)
+bind_layers( GRE, ARP, proto=2054)
+bind_layers( GRE, EAPOL, proto=34958)
+bind_layers( GRE, GRErouting, { "routing_present" : 1 } )
+bind_layers( GRErouting, conf.raw_layer,{ "address_family" : 0, "SRE_len" : 0 })
+bind_layers( GRErouting, GRErouting, { } )
+bind_layers( EAPOL, EAP, type=0)
+bind_layers( LLC, STP, dsap=66, ssap=66, ctrl=3)
+bind_layers( LLC, SNAP, dsap=170, ssap=170, ctrl=3)
+bind_layers( SNAP, Dot1Q, code=33024)
+bind_layers( SNAP, Ether, code=1)
+bind_layers( SNAP, ARP, code=2054)
+bind_layers( SNAP, EAPOL, code=34958)
+bind_layers( SNAP, STP, code=267)
+
+conf.l2types.register(ARPHDR_ETHER, Ether)
+conf.l2types.register_num2layer(ARPHDR_METRICOM, Ether)
+conf.l2types.register_num2layer(ARPHDR_LOOPBACK, Ether)
+conf.l2types.register_layer2num(ARPHDR_ETHER, Dot3)
+conf.l2types.register(144, CookedLinux) # called LINUX_IRDA, similar to CookedLinux
+conf.l2types.register(113, CookedLinux)
+
+conf.l3types.register(ETH_P_ARP, ARP)
+
+
+
+
+### Technics
+
+
+
+@conf.commands.register
+def arpcachepoison(target, victim, interval=60):
+ """Poison target's cache with (your MAC,victim's IP) couple
+arpcachepoison(target, victim, [interval=60]) -> None
+"""
+ tmac = getmacbyip(target)
+ p = Ether(dst=tmac)/ARP(op="who-has", psrc=victim, pdst=target)
+ try:
+ while 1:
+ sendp(p, iface_hint=target)
+ if conf.verb > 1:
+ os.write(1,b".")
+ time.sleep(interval)
+ except KeyboardInterrupt:
+ pass
+
+
+class ARPingResult(SndRcvList):
+ def __init__(self, res=None, name="ARPing", stats=None):
+ SndRcvList.__init__(self, res, name, stats)
+
+ def show(self):
+ for s,r in self.res:
+ print(r.sprintf("%19s,Ether.src% %ARP.psrc%"))
+
+
+
+@conf.commands.register
+def arping(net, timeout=2, cache=0, verbose=None, **kargs):
+ """Send ARP who-has requests to determine which hosts are up
+arping(net, [cache=0,] [iface=conf.iface,] [verbose=conf.verb]) -> None
+Set cache=True if you want arping to modify internal ARP-Cache"""
+ if verbose is None:
+ verbose = conf.verb
+ ans,unans = srp(Ether(dst="ff:ff:ff:ff:ff:ff")/ARP(pdst=net), verbose=verbose,
+ filter="arp and arp[7] = 2", timeout=timeout, iface_hint=net, **kargs)
+ ans = ARPingResult(ans.res)
+
+ if cache and ans is not None:
+ for pair in ans:
+ conf.netcache.arp_cache[pair[1].psrc] = (pair[1].hwsrc, time.time())
+ if verbose:
+ ans.show()
+ return ans,unans
+
+@conf.commands.register
+def is_promisc(ip, fake_bcast="ff:ff:00:00:00:00",**kargs):
+ """Try to guess if target is in Promisc mode. The target is provided by its ip."""
+
+ responses = srp1(Ether(dst=fake_bcast) / ARP(op="who-has", pdst=ip),type=ETH_P_ARP, iface_hint=ip, timeout=1, verbose=0,**kargs)
+
+ return responses is not None
+
+@conf.commands.register
+def promiscping(net, timeout=2, fake_bcast="ff:ff:ff:ff:ff:fe", **kargs):
+ """Send ARP who-has requests to determine which hosts are in promiscuous mode
+ promiscping(net, iface=conf.iface)"""
+ ans,unans = srp(Ether(dst=fake_bcast)/ARP(pdst=net),
+ filter="arp and arp[7] = 2", timeout=timeout, iface_hint=net, **kargs)
+ ans = ARPingResult(ans.res, name="PROMISCPing")
+
+ ans.display()
+ return ans,unans
+
+
+class ARP_am(AnsweringMachine):
+ function_name="farpd"
+ filter = "arp"
+ send_function = staticmethod(sendp)
+
+ def parse_options(self, IP_addr=None, iface=None, ARP_addr=None):
+ self.IP_addr=IP_addr
+ self.iface=iface
+ self.ARP_addr=ARP_addr
+
+ def is_request(self, req):
+ return (req.haslayer(ARP) and
+ req.getlayer(ARP).op == 1 and
+ (self.IP_addr == None or self.IP_addr == req.getlayer(ARP).pdst))
+
+ def make_reply(self, req):
+ ether = req.getlayer(Ether)
+ arp = req.getlayer(ARP)
+ iff,a,gw = conf.route.route(arp.psrc)
+ if self.iface != None:
+ iff = iface
+ ARP_addr = self.ARP_addr
+ IP_addr = arp.pdst
+ resp = Ether(dst=ether.src,
+ src=ARP_addr)/ARP(op="is-at",
+ hwsrc=ARP_addr,
+ psrc=IP_addr,
+ hwdst=arp.hwsrc,
+ pdst=arp.pdst)
+ return resp
+
+ def sniff(self):
+ sniff(iface=self.iface, **self.optsniff)
+
+@conf.commands.register
+def etherleak(target, **kargs):
+ """Exploit Etherleak flaw"""
+ return srpflood(Ether()/ARP(pdst=target),
+ prn=lambda a: conf.padding_layer in a[1] and hexstr(a[1][conf.padding_layer].load),
+ filter="arp", **kargs)
+
+
diff --git a/scripts/external_libs/scapy-2.3.1/python3/scapy/layers/l2tp.py b/scripts/external_libs/scapy-2.3.1/python3/scapy/layers/l2tp.py
new file mode 100644
index 00000000..0b56db21
--- /dev/null
+++ b/scripts/external_libs/scapy-2.3.1/python3/scapy/layers/l2tp.py
@@ -0,0 +1,36 @@
+## This file is part of Scapy
+## See http://www.secdev.org/projects/scapy for more informations
+## Copyright (C) Philippe Biondi <phil@secdev.org>
+## This program is published under a GPLv2 license
+
+"""
+L2TP (Layer 2 Tunneling Protocol) for VPNs.
+
+[RFC 2661]
+"""
+
+import struct
+
+from scapy.packet import *
+from scapy.fields import *
+from scapy.layers.inet import UDP
+from scapy.layers.ppp import PPP
+
+class L2TP(Packet):
+ fields_desc = [ ShortEnumField("pkt_type",2,{2:"data"}),
+ ShortField("len", None),
+ ShortField("tunnel_id", 0),
+ ShortField("session_id", 0),
+ ShortField("ns", 0),
+ ShortField("nr", 0),
+ ShortField("offset", 0) ]
+
+ def post_build(self, pkt, pay):
+ if self.len is None:
+ l = len(pkt)+len(pay)
+ pkt = pkt[:2]+struct.pack("!H", l)+pkt[4:]
+ return pkt+pay
+
+
+bind_layers( UDP, L2TP, sport=1701, dport=1701)
+bind_layers( L2TP, PPP, )
diff --git a/scripts/external_libs/scapy-2.3.1/python3/scapy/layers/llmnr.py b/scripts/external_libs/scapy-2.3.1/python3/scapy/layers/llmnr.py
new file mode 100644
index 00000000..65ecad41
--- /dev/null
+++ b/scripts/external_libs/scapy-2.3.1/python3/scapy/layers/llmnr.py
@@ -0,0 +1,65 @@
+from scapy.fields import *
+from scapy.packet import *
+from scapy.layers.inet import UDP
+from scapy.layers.dns import DNSQRField, DNSRRField, DNSRRCountField
+
+"""
+LLMNR (Link Local Multicast Node Resolution).
+
+[RFC 4795]
+"""
+
+#############################################################################
+### LLMNR (RFC4795) ###
+#############################################################################
+# LLMNR is based on the DNS packet format (RFC1035 Section 4)
+# RFC also envisions LLMNR over TCP. Like vista, we don't support it -- arno
+
+_LLMNR_IPv6_mcast_Addr = "FF02:0:0:0:0:0:1:3"
+_LLMNR_IPv4_mcast_addr = "224.0.0.252"
+
+class LLMNRQuery(Packet):
+ name = "Link Local Multicast Node Resolution - Query"
+ fields_desc = [ ShortField("id", 0),
+ BitField("qr", 0, 1),
+ BitEnumField("opcode", 0, 4, { 0:"QUERY" }),
+ BitField("c", 0, 1),
+ BitField("tc", 0, 2),
+ BitField("z", 0, 4),
+ BitEnumField("rcode", 0, 4, { 0:"ok" }),
+ DNSRRCountField("qdcount", None, "qd"),
+ DNSRRCountField("ancount", None, "an"),
+ DNSRRCountField("nscount", None, "ns"),
+ DNSRRCountField("arcount", None, "ar"),
+ DNSQRField("qd", "qdcount"),
+ DNSRRField("an", "ancount"),
+ DNSRRField("ns", "nscount"),
+ DNSRRField("ar", "arcount",0)]
+ overload_fields = {UDP: {"sport": 5355, "dport": 5355 }}
+ def hashret(self):
+ return struct.pack("!H", self.id)
+
+class LLMNRResponse(LLMNRQuery):
+ name = "Link Local Multicast Node Resolution - Response"
+ qr = 1
+ def answers(self, other):
+ return (isinstance(other, LLMNRQuery) and
+ self.id == other.id and
+ self.qr == 1 and
+ other.qr == 0)
+
+def _llmnr_dispatcher(x, *args, **kargs):
+ cls = conf.raw_layer
+ if len(x) >= 3:
+ if (ord(x[4]) & 0x80): # Response
+ cls = LLMNRResponse
+ else: # Query
+ cls = LLMNRQuery
+ return cls(x, *args, **kargs)
+
+bind_bottom_up(UDP, _llmnr_dispatcher, { "dport": 5355 })
+bind_bottom_up(UDP, _llmnr_dispatcher, { "sport": 5355 })
+
+# LLMNRQuery(id=RandShort(), qd=DNSQR(qname="vista.")))
+
+
diff --git a/scripts/external_libs/scapy-2.3.1/python3/scapy/layers/mgcp.py b/scripts/external_libs/scapy-2.3.1/python3/scapy/layers/mgcp.py
new file mode 100644
index 00000000..5d8a064e
--- /dev/null
+++ b/scripts/external_libs/scapy-2.3.1/python3/scapy/layers/mgcp.py
@@ -0,0 +1,45 @@
+## This file is part of Scapy
+## See http://www.secdev.org/projects/scapy for more informations
+## Copyright (C) Philippe Biondi <phil@secdev.org>
+## This program is published under a GPLv2 license
+
+"""
+MGCP (Media Gateway Control Protocol)
+
+[RFC 2805]
+"""
+
+from scapy.packet import *
+from scapy.fields import *
+from scapy.layers.inet import UDP
+
+class MGCP(Packet):
+ name = "MGCP"
+ longname = "Media Gateway Control Protocol"
+ fields_desc = [ StrStopField("verb","AUEP"," ", -1),
+ StrFixedLenField("sep1"," ",1),
+ StrStopField("transaction_id","1234567"," ", -1),
+ StrFixedLenField("sep2"," ",1),
+ StrStopField("endpoint","dummy@dummy.net"," ", -1),
+ StrFixedLenField("sep3"," ",1),
+ StrStopField("version","MGCP 1.0 NCS 1.0","\x0a", -1),
+ StrFixedLenField("sep4","\x0a",1),
+ ]
+
+
+#class MGCP(Packet):
+# name = "MGCP"
+# longname = "Media Gateway Control Protocol"
+# fields_desc = [ ByteEnumField("type",0, ["request","response","others"]),
+# ByteField("code0",0),
+# ByteField("code1",0),
+# ByteField("code2",0),
+# ByteField("code3",0),
+# ByteField("code4",0),
+# IntField("trasid",0),
+# IntField("req_time",0),
+# ByteField("is_duplicate",0),
+# ByteField("req_available",0) ]
+#
+bind_layers( UDP, MGCP, dport=2727)
+bind_layers( UDP, MGCP, sport=2727)
diff --git a/scripts/external_libs/scapy-2.3.1/python3/scapy/layers/mobileip.py b/scripts/external_libs/scapy-2.3.1/python3/scapy/layers/mobileip.py
new file mode 100644
index 00000000..bbaa8ce7
--- /dev/null
+++ b/scripts/external_libs/scapy-2.3.1/python3/scapy/layers/mobileip.py
@@ -0,0 +1,47 @@
+## This file is part of Scapy
+## See http://www.secdev.org/projects/scapy for more informations
+## Copyright (C) Philippe Biondi <phil@secdev.org>
+## This program is published under a GPLv2 license
+
+"""
+Mobile IP.
+"""
+
+from scapy.fields import *
+from scapy.packet import *
+from scapy.layers.inet import IP,UDP
+
+
+class MobileIP(Packet):
+ name = "Mobile IP (RFC3344)"
+ fields_desc = [ ByteEnumField("type", 1, {1:"RRQ", 3:"RRP"}) ]
+
+class MobileIPRRQ(Packet):
+ name = "Mobile IP Registration Request (RFC3344)"
+ fields_desc = [ XByteField("flags", 0),
+ ShortField("lifetime", 180),
+ IPField("homeaddr", "0.0.0.0"),
+ IPField("haaddr", "0.0.0.0"),
+ IPField("coaddr", "0.0.0.0"),
+ LongField("id", 0), ]
+
+class MobileIPRRP(Packet):
+ name = "Mobile IP Registration Reply (RFC3344)"
+ fields_desc = [ ByteField("code", 0),
+ ShortField("lifetime", 180),
+ IPField("homeaddr", "0.0.0.0"),
+ IPField("haaddr", "0.0.0.0"),
+ LongField("id", 0), ]
+
+class MobileIPTunnelData(Packet):
+ name = "Mobile IP Tunnel Data Message (RFC3519)"
+ fields_desc = [ ByteField("nexthdr", 4),
+ ShortField("res", 0) ]
+
+
+bind_layers( UDP, MobileIP, sport=434)
+bind_layers( UDP, MobileIP, dport=434)
+bind_layers( MobileIP, MobileIPRRQ, type=1)
+bind_layers( MobileIP, MobileIPRRP, type=3)
+bind_layers( MobileIP, MobileIPTunnelData, type=4)
+bind_layers( MobileIPTunnelData, IP, nexthdr=4)
diff --git a/scripts/external_libs/scapy-2.3.1/python3/scapy/layers/netbios.py b/scripts/external_libs/scapy-2.3.1/python3/scapy/layers/netbios.py
new file mode 100644
index 00000000..f06e9307
--- /dev/null
+++ b/scripts/external_libs/scapy-2.3.1/python3/scapy/layers/netbios.py
@@ -0,0 +1,222 @@
+## This file is part of Scapy
+## See http://www.secdev.org/projects/scapy for more informations
+## Copyright (C) Philippe Biondi <phil@secdev.org>
+## This program is published under a GPLv2 license
+
+"""
+NetBIOS over TCP/IP
+
+[RFC 1001/1002]
+"""
+
+import struct
+from scapy.packet import *
+from scapy.fields import *
+from scapy.layers.inet import UDP,TCP
+from scapy.layers.l2 import SourceMACField
+
+class NetBIOS_DS(Packet):
+ name = "NetBIOS datagram service"
+ fields_desc = [
+ ByteEnumField("type",17, {17:"direct_group"}),
+ ByteField("flags",0),
+ XShortField("id",0),
+ IPField("src","127.0.0.1"),
+ ShortField("sport",138),
+ ShortField("len",None),
+ ShortField("ofs",0),
+ NetBIOSNameField("srcname",""),
+ NetBIOSNameField("dstname",""),
+ ]
+ def post_build(self, p, pay):
+ p += pay
+ if self.len is None:
+ l = len(p)-14
+ p = p[:10]+struct.pack("!H", l)+p[12:]
+ return p
+
+# ShortField("length",0),
+# ShortField("Delimitor",0),
+# ByteField("command",0),
+# ByteField("data1",0),
+# ShortField("data2",0),
+# ShortField("XMIt",0),
+# ShortField("RSPCor",0),
+# StrFixedLenField("dest","",16),
+# StrFixedLenField("source","",16),
+#
+# ]
+#
+
+#NetBIOS
+
+
+# Name Query Request
+# Node Status Request
+class NBNSQueryRequest(Packet):
+ name="NBNS query request"
+ fields_desc = [ShortField("NAME_TRN_ID",0),
+ ShortField("FLAGS", 0x0110),
+ ShortField("QDCOUNT",1),
+ ShortField("ANCOUNT",0),
+ ShortField("NSCOUNT",0),
+ ShortField("ARCOUNT",0),
+ NetBIOSNameField("QUESTION_NAME","windows"),
+ ShortEnumField("SUFFIX",0x4141,{0x4141:"workstation",0x4141+0x03:"messenger service",0x4141+0x200:"file server service",0x4141+0x10b:"domain master browser",0x4141+0x10c:"domain controller", 0x4141+0x10e:"browser election service"}),
+ ByteField("NULL",0),
+ ShortEnumField("QUESTION_TYPE",0x20, {0x20:"NB",0x21:"NBSTAT"}),
+ ShortEnumField("QUESTION_CLASS",1,{1:"INTERNET"})]
+
+# Name Registration Request
+# Name Refresh Request
+# Name Release Request or Demand
+class NBNSRequest(Packet):
+ name="NBNS request"
+ fields_desc = [ShortField("NAME_TRN_ID",0),
+ ShortField("FLAGS", 0x2910),
+ ShortField("QDCOUNT",1),
+ ShortField("ANCOUNT",0),
+ ShortField("NSCOUNT",0),
+ ShortField("ARCOUNT",1),
+ NetBIOSNameField("QUESTION_NAME","windows"),
+ ShortEnumField("SUFFIX",0x4141,{0x4141:"workstation",0x4141+0x03:"messenger service",0x4141+0x200:"file server service",0x4141+0x10b:"domain master browser",0x4141+0x10c:"domain controller", 0x4141+0x10e:"browser election service"}),
+ ByteField("NULL",0),
+ ShortEnumField("QUESTION_TYPE",0x20, {0x20:"NB",0x21:"NBSTAT"}),
+ ShortEnumField("QUESTION_CLASS",1,{1:"INTERNET"}),
+ ShortEnumField("RR_NAME",0xC00C,{0xC00C:"Label String Pointer to QUESTION_NAME"}),
+ ShortEnumField("RR_TYPE",0x20, {0x20:"NB",0x21:"NBSTAT"}),
+ ShortEnumField("RR_CLASS",1,{1:"INTERNET"}),
+ IntField("TTL", 0),
+ ShortField("RDLENGTH", 6),
+ BitEnumField("G",0,1,{0:"Unique name",1:"Group name"}),
+ BitEnumField("OWNER_NODE_TYPE",00,2,{0:"B node",1:"P node",2:"M node",3:"H node"}),
+ BitEnumField("UNUSED",0,13,{0:"Unused"}),
+ IPField("NB_ADDRESS", "127.0.0.1")]
+
+# Name Query Response
+# Name Registration Response
+class NBNSQueryResponse(Packet):
+ name="NBNS query response"
+ fields_desc = [ShortField("NAME_TRN_ID",0),
+ ShortField("FLAGS", 0x8500),
+ ShortField("QDCOUNT",0),
+ ShortField("ANCOUNT",1),
+ ShortField("NSCOUNT",0),
+ ShortField("ARCOUNT",0),
+ NetBIOSNameField("RR_NAME","windows"),
+ ShortEnumField("SUFFIX",0x4141,{0x4141:"workstation",0x4141+0x03:"messenger service",0x4141+0x200:"file server service",0x4141+0x10b:"domain master browser",0x4141+0x10c:"domain controller", 0x4141+0x10e:"browser election service"}),
+ ByteField("NULL",0),
+ ShortEnumField("QUESTION_TYPE",0x20, {0x20:"NB",0x21:"NBSTAT"}),
+ ShortEnumField("QUESTION_CLASS",1,{1:"INTERNET"}),
+ IntField("TTL", 0x493e0),
+ ShortField("RDLENGTH", 6),
+ ShortField("NB_FLAGS", 0),
+ IPField("NB_ADDRESS", "127.0.0.1")]
+
+# Name Query Response (negative)
+# Name Release Response
+class NBNSQueryResponseNegative(Packet):
+ name="NBNS query response (negative)"
+ fields_desc = [ShortField("NAME_TRN_ID",0),
+ ShortField("FLAGS", 0x8506),
+ ShortField("QDCOUNT",0),
+ ShortField("ANCOUNT",1),
+ ShortField("NSCOUNT",0),
+ ShortField("ARCOUNT",0),
+ NetBIOSNameField("RR_NAME","windows"),
+ ShortEnumField("SUFFIX",0x4141,{0x4141:"workstation",0x4141+0x03:"messenger service",0x4141+0x200:"file server service",0x4141+0x10b:"domain master browser",0x4141+0x10c:"domain controller", 0x4141+0x10e:"browser election service"}),
+ ByteField("NULL",0),
+ ShortEnumField("RR_TYPE",0x20, {0x20:"NB",0x21:"NBSTAT"}),
+ ShortEnumField("RR_CLASS",1,{1:"INTERNET"}),
+ IntField("TTL",0),
+ ShortField("RDLENGTH",6),
+ BitEnumField("G",0,1,{0:"Unique name",1:"Group name"}),
+ BitEnumField("OWNER_NODE_TYPE",00,2,{0:"B node",1:"P node",2:"M node",3:"H node"}),
+ BitEnumField("UNUSED",0,13,{0:"Unused"}),
+ IPField("NB_ADDRESS", "127.0.0.1")]
+
+# Node Status Response
+class NBNSNodeStatusResponse(Packet):
+ name="NBNS Node Status Response"
+ fields_desc = [ShortField("NAME_TRN_ID",0),
+ ShortField("FLAGS", 0x8500),
+ ShortField("QDCOUNT",0),
+ ShortField("ANCOUNT",1),
+ ShortField("NSCOUNT",0),
+ ShortField("ARCOUNT",0),
+ NetBIOSNameField("RR_NAME","windows"),
+ ShortEnumField("SUFFIX",0x4141,{0x4141:"workstation",0x4141+0x03:"messenger service",0x4141+0x200:"file server service",0x4141+0x10b:"domain master browser",0x4141+0x10c:"domain controller", 0x4141+0x10e:"browser election service"}),
+ ByteField("NULL",0),
+ ShortEnumField("RR_TYPE",0x21, {0x20:"NB",0x21:"NBSTAT"}),
+ ShortEnumField("RR_CLASS",1,{1:"INTERNET"}),
+ IntField("TTL",0),
+ ShortField("RDLENGTH",83),
+ ByteField("NUM_NAMES",1)]
+
+# Service for Node Status Response
+class NBNSNodeStatusResponseService(Packet):
+ name="NBNS Node Status Response Service"
+ fields_desc = [StrFixedLenField("NETBIOS_NAME","WINDOWS ",15),
+ ByteEnumField("SUFFIX",0,{0:"workstation",0x03:"messenger service",0x20:"file server service",0x1b:"domain master browser",0x1c:"domain controller", 0x1e:"browser election service"}),
+ ByteField("NAME_FLAGS",0x4),
+ ByteEnumField("UNUSED",0,{0:"unused"})]
+
+# End of Node Status Response packet
+class NBNSNodeStatusResponseEnd(Packet):
+ name="NBNS Node Status Response"
+ fields_desc = [SourceMACField("MAC_ADDRESS"),
+ BitField("STATISTICS",0,57*8)]
+
+# Wait for Acknowledgement Response
+class NBNSWackResponse(Packet):
+ name="NBNS Wait for Acknowledgement Response"
+ fields_desc = [ShortField("NAME_TRN_ID",0),
+ ShortField("FLAGS", 0xBC07),
+ ShortField("QDCOUNT",0),
+ ShortField("ANCOUNT",1),
+ ShortField("NSCOUNT",0),
+ ShortField("ARCOUNT",0),
+ NetBIOSNameField("RR_NAME","windows"),
+ ShortEnumField("SUFFIX",0x4141,{0x4141:"workstation",0x4141+0x03:"messenger service",0x4141+0x200:"file server service",0x4141+0x10b:"domain master browser",0x4141+0x10c:"domain controller", 0x4141+0x10e:"browser election service"}),
+ ByteField("NULL",0),
+ ShortEnumField("RR_TYPE",0x20, {0x20:"NB",0x21:"NBSTAT"}),
+ ShortEnumField("RR_CLASS",1,{1:"INTERNET"}),
+ IntField("TTL", 2),
+ ShortField("RDLENGTH",2),
+ BitField("RDATA",10512,16)] #10512=0010100100010000
+
+class NBTDatagram(Packet):
+ name="NBT Datagram Packet"
+ fields_desc= [ByteField("Type", 0x10),
+ ByteField("Flags", 0x02),
+ ShortField("ID", 0),
+ IPField("SourceIP", "127.0.0.1"),
+ ShortField("SourcePort", 138),
+ ShortField("Length", 272),
+ ShortField("Offset", 0),
+ NetBIOSNameField("SourceName",b"windows"),
+ ShortEnumField("SUFFIX1",0x4141,{0x4141:"workstation",0x4141+0x03:"messenger service",0x4141+0x200:"file server service",0x4141+0x10b:"domain master browser",0x4141+0x10c:"domain controller", 0x4141+0x10e:"browser election service"}),
+ ByteField("NULL",0),
+ NetBIOSNameField("DestinationName",b"windows"),
+ ShortEnumField("SUFFIX2",0x4141,{0x4141:"workstation",0x4141+0x03:"messenger service",0x4141+0x200:"file server service",0x4141+0x10b:"domain master browser",0x4141+0x10c:"domain controller", 0x4141+0x10e:"browser election service"}),
+ ByteField("NULL",0)]
+
+
+class NBTSession(Packet):
+ name="NBT Session Packet"
+ fields_desc= [ByteEnumField("TYPE",0,{0x00:"Session Message",0x81:"Session Request",0x82:"Positive Session Response",0x83:"Negative Session Response",0x84:"Retarget Session Response",0x85:"Session Keepalive"}),
+ BitField("RESERVED",0x00,7),
+ BitField("LENGTH",0,17)]
+
+bind_layers( UDP, NBNSQueryRequest, dport=137)
+bind_layers( UDP, NBNSRequest, dport=137)
+bind_layers( UDP, NBNSQueryResponse, sport=137)
+bind_layers( UDP, NBNSQueryResponseNegative, sport=137)
+bind_layers( UDP, NBNSNodeStatusResponse, sport=137)
+bind_layers( NBNSNodeStatusResponse, NBNSNodeStatusResponseService, )
+bind_layers( NBNSNodeStatusResponse, NBNSNodeStatusResponseService, )
+bind_layers( NBNSNodeStatusResponseService, NBNSNodeStatusResponseService, )
+bind_layers( NBNSNodeStatusResponseService, NBNSNodeStatusResponseEnd, )
+bind_layers( UDP, NBNSWackResponse, sport=137)
+bind_layers( UDP, NBTDatagram, dport=138)
+bind_layers( TCP, NBTSession, dport=139)
diff --git a/scripts/external_libs/scapy-2.3.1/python3/scapy/layers/netflow.py b/scripts/external_libs/scapy-2.3.1/python3/scapy/layers/netflow.py
new file mode 100644
index 00000000..44567737
--- /dev/null
+++ b/scripts/external_libs/scapy-2.3.1/python3/scapy/layers/netflow.py
@@ -0,0 +1,48 @@
+## This file is part of Scapy
+## See http://www.secdev.org/projects/scapy for more informations
+## Copyright (C) Philippe Biondi <phil@secdev.org>
+## This program is published under a GPLv2 license
+
+"""
+Cisco NetFlow protocol v1
+"""
+
+
+from scapy.fields import *
+from scapy.packet import *
+
+# Cisco Netflow Protocol version 1
+class NetflowHeader(Packet):
+ name = "Netflow Header"
+ fields_desc = [ ShortField("version", 1) ]
+
+class NetflowHeaderV1(Packet):
+ name = "Netflow Header V1"
+ fields_desc = [ ShortField("count", 0),
+ IntField("sysUptime", 0),
+ IntField("unixSecs", 0),
+ IntField("unixNanoSeconds", 0) ]
+
+
+class NetflowRecordV1(Packet):
+ name = "Netflow Record"
+ fields_desc = [ IPField("ipsrc", "0.0.0.0"),
+ IPField("ipdst", "0.0.0.0"),
+ IPField("nexthop", "0.0.0.0"),
+ ShortField("inputIfIndex", 0),
+ ShortField("outpuIfIndex", 0),
+ IntField("dpkts", 0),
+ IntField("dbytes", 0),
+ IntField("starttime", 0),
+ IntField("endtime", 0),
+ ShortField("srcport", 0),
+ ShortField("dstport", 0),
+ ShortField("padding", 0),
+ ByteField("proto", 0),
+ ByteField("tos", 0),
+ IntField("padding1", 0),
+ IntField("padding2", 0) ]
+
+
+bind_layers( NetflowHeader, NetflowHeaderV1, version=1)
+bind_layers( NetflowHeaderV1, NetflowRecordV1, )
diff --git a/scripts/external_libs/scapy-2.3.1/python3/scapy/layers/ntp.py b/scripts/external_libs/scapy-2.3.1/python3/scapy/layers/ntp.py
new file mode 100644
index 00000000..6d11966c
--- /dev/null
+++ b/scripts/external_libs/scapy-2.3.1/python3/scapy/layers/ntp.py
@@ -0,0 +1,77 @@
+## This file is part of Scapy
+## See http://www.secdev.org/projects/scapy for more informations
+## Copyright (C) Philippe Biondi <phil@secdev.org>
+## This program is published under a GPLv2 license
+
+"""
+NTP (Network Time Protocol).
+"""
+
+import time
+from scapy.packet import *
+from scapy.fields import *
+from scapy.layers.inet import UDP
+
+
+# seconds between 01-01-1900 and 01-01-1970
+_NTP_BASETIME = 2208988800
+
+class TimeStampField(FixedPointField):
+ def __init__(self, name, default):
+ FixedPointField.__init__(self, name, default, 64, 32)
+
+ def i2repr(self, pkt, val):
+ if val is None:
+ return "--"
+ val = self.i2h(pkt,val)
+ if val < _NTP_BASETIME:
+ return val
+ return time.strftime("%a, %d %b %Y %H:%M:%S +0000", time.gmtime(val-_NTP_BASETIME))
+
+ def any2i(self, pkt, val):
+ if type(val) is str:
+ return int(time.mktime(time.strptime(val))) + _NTP_BASETIME + 3600 # XXX
+ return FixedPointField.any2i(self,pkt,val)
+
+ def i2m(self, pkt, val):
+ if val is None:
+ val = FixedPointField.any2i(self, pkt, time.time()+_NTP_BASETIME)
+ return FixedPointField.i2m(self, pkt, val)
+
+
+
+class NTP(Packet):
+ # RFC 1769
+ name = "NTP"
+ fields_desc = [
+ BitEnumField('leap', 0, 2,
+ { 0: 'nowarning',
+ 1: 'longminute',
+ 2: 'shortminute',
+ 3: 'notsync'}),
+ BitField('version', 3, 3),
+ BitEnumField('mode', 3, 3,
+ { 0: 'reserved',
+ 1: 'sym_active',
+ 2: 'sym_passive',
+ 3: 'client',
+ 4: 'server',
+ 5: 'broadcast',
+ 6: 'control',
+ 7: 'private'}),
+ BitField('stratum', 2, 8),
+ BitField('poll', 0xa, 8), ### XXX : it's a signed int
+ BitField('precision', 0, 8), ### XXX : it's a signed int
+ FixedPointField('delay', 0, size=32, frac_bits=16),
+ FixedPointField('dispersion', 0, size=32, frac_bits=16),
+ IPField('id', "127.0.0.1"),
+ TimeStampField('ref', 0),
+ TimeStampField('orig', None), # None means current time
+ TimeStampField('recv', 0),
+ TimeStampField('sent', None)
+ ]
+ def mysummary(self):
+ return self.sprintf("NTP v%ir,NTP.version%, %NTP.mode%")
+
+
+bind_layers( UDP, NTP, dport=123, sport=123)
diff --git a/scripts/external_libs/scapy-2.3.1/python3/scapy/layers/pflog.py b/scripts/external_libs/scapy-2.3.1/python3/scapy/layers/pflog.py
new file mode 100644
index 00000000..a8fc9fe0
--- /dev/null
+++ b/scripts/external_libs/scapy-2.3.1/python3/scapy/layers/pflog.py
@@ -0,0 +1,59 @@
+## This file is part of Scapy
+## See http://www.secdev.org/projects/scapy for more informations
+## Copyright (C) Philippe Biondi <phil@secdev.org>
+## This program is published under a GPLv2 license
+
+"""
+PFLog: OpenBSD PF packet filter logging.
+"""
+
+from scapy.packet import *
+from scapy.fields import *
+from scapy.layers.inet import IP
+if conf.ipv6_enabled:
+ from scapy.layers.inet6 import IPv6
+from scapy.config import conf
+
+class PFLog(Packet):
+ name = "PFLog"
+ # from OpenBSD src/sys/net/pfvar.h and src/sys/net/if_pflog.h
+ fields_desc = [ ByteField("hdrlen", 0),
+ ByteEnumField("addrfamily", 2, {socket.AF_INET: "IPv4",
+ socket.AF_INET6: "IPv6"}),
+ ByteEnumField("action", 1, {0: "pass", 1: "drop",
+ 2: "scrub", 3: "no-scrub",
+ 4: "nat", 5: "no-nat",
+ 6: "binat", 7: "no-binat",
+ 8: "rdr", 9: "no-rdr",
+ 10: "syn-proxy-drop" }),
+ ByteEnumField("reason", 0, {0: "match", 1: "bad-offset",
+ 2: "fragment", 3: "short",
+ 4: "normalize", 5: "memory",
+ 6: "bad-timestamp",
+ 7: "congestion",
+ 8: "ip-options",
+ 9: "proto-cksum",
+ 10: "state-mismatch",
+ 11: "state-insert",
+ 12: "state-limit",
+ 13: "src-limit",
+ 14: "syn-proxy" }),
+ StrFixedLenField("iface", "", 16),
+ StrFixedLenField("ruleset", "", 16),
+ SignedIntField("rulenumber", 0),
+ SignedIntField("subrulenumber", 0),
+ SignedIntField("uid", 0),
+ IntField("pid", 0),
+ SignedIntField("ruleuid", 0),
+ IntField("rulepid", 0),
+ ByteEnumField("direction", 255, {0: "inout", 1: "in",
+ 2:"out", 255: "unknown"}),
+ StrFixedLenField("pad", "\x00\x00\x00", 3 ) ]
+ def mysummary(self):
+ return self.sprintf("%PFLog.addrfamily% %PFLog.action% on %PFLog.iface% by rule %PFLog.rulenumber%")
+
+bind_layers(PFLog, IP, addrfamily=socket.AF_INET)
+if conf.ipv6_enabled:
+ bind_layers(PFLog, IPv6, addrfamily=socket.AF_INET6)
+
+conf.l2types.register(117, PFLog)
diff --git a/scripts/external_libs/scapy-2.3.1/python3/scapy/layers/ppp.py b/scripts/external_libs/scapy-2.3.1/python3/scapy/layers/ppp.py
new file mode 100644
index 00000000..08cf62cd
--- /dev/null
+++ b/scripts/external_libs/scapy-2.3.1/python3/scapy/layers/ppp.py
@@ -0,0 +1,349 @@
+## This file is part of Scapy
+## See http://www.secdev.org/projects/scapy for more informations
+## Copyright (C) Philippe Biondi <phil@secdev.org>
+## This program is published under a GPLv2 license
+
+"""
+PPP (Point to Point Protocol)
+
+[RFC 1661]
+"""
+
+import struct
+from scapy.packet import *
+from scapy.layers.l2 import *
+from scapy.layers.inet import *
+from scapy.fields import *
+
+class PPPoE(Packet):
+ name = "PPP over Ethernet"
+ fields_desc = [ BitField("version", 1, 4),
+ BitField("type", 1, 4),
+ ByteEnumField("code", 0, {0:"Session"}),
+ XShortField("sessionid", 0x0),
+ ShortField("len", None) ]
+
+ def post_build(self, p, pay):
+ p += pay
+ if self.len is None:
+ l = len(p)-6
+ p = p[:4]+struct.pack("!H", l)+p[6:]
+ return p
+
+class PPPoED(PPPoE):
+ name = "PPP over Ethernet Discovery"
+ fields_desc = [ BitField("version", 1, 4),
+ BitField("type", 1, 4),
+ ByteEnumField("code", 0x09, {0x09:"PADI",0x07:"PADO",0x19:"PADR",0x65:"PADS",0xa7:"PADT"}),
+ XShortField("sessionid", 0x0),
+ ShortField("len", None) ]
+
+
+_PPP_proto = { 0x0001: "Padding Protocol",
+ 0x0003: "ROHC small-CID [RFC3095]",
+ 0x0005: "ROHC large-CID [RFC3095]",
+ 0x0021: "Internet Protocol version 4",
+ 0x0023: "OSI Network Layer",
+ 0x0025: "Xerox NS IDP",
+ 0x0027: "DECnet Phase IV",
+ 0x0029: "Appletalk",
+ 0x002b: "Novell IPX",
+ 0x002d: "Van Jacobson Compressed TCP/IP",
+ 0x002f: "Van Jacobson Uncompressed TCP/IP",
+ 0x0031: "Bridging PDU",
+ 0x0033: "Stream Protocol (ST-II)",
+ 0x0035: "Banyan Vines",
+ 0x0037: "reserved (until 1993) [Typo in RFC1172]",
+ 0x0039: "AppleTalk EDDP",
+ 0x003b: "AppleTalk SmartBuffered",
+ 0x003d: "Multi-Link [RFC1717]",
+ 0x003f: "NETBIOS Framing",
+ 0x0041: "Cisco Systems",
+ 0x0043: "Ascom Timeplex",
+ 0x0045: "Fujitsu Link Backup and Load Balancing (LBLB)",
+ 0x0047: "DCA Remote Lan",
+ 0x0049: "Serial Data Transport Protocol (PPP-SDTP)",
+ 0x004b: "SNA over 802.2",
+ 0x004d: "SNA",
+ 0x004f: "IPv6 Header Compression",
+ 0x0051: "KNX Bridging Data [ianp]",
+ 0x0053: "Encryption [Meyer]",
+ 0x0055: "Individual Link Encryption [Meyer]",
+ 0x0057: "Internet Protocol version 6 [Hinden]",
+ 0x0059: "PPP Muxing [RFC3153]",
+ 0x005b: "Vendor-Specific Network Protocol (VSNP) [RFC3772]",
+ 0x0061: "RTP IPHC Full Header [RFC3544]",
+ 0x0063: "RTP IPHC Compressed TCP [RFC3544]",
+ 0x0065: "RTP IPHC Compressed Non TCP [RFC3544]",
+ 0x0067: "RTP IPHC Compressed UDP 8 [RFC3544]",
+ 0x0069: "RTP IPHC Compressed RTP 8 [RFC3544]",
+ 0x006f: "Stampede Bridging",
+ 0x0071: "Reserved [Fox]",
+ 0x0073: "MP+ Protocol [Smith]",
+ 0x007d: "reserved (Control Escape) [RFC1661]",
+ 0x007f: "reserved (compression inefficient [RFC1662]",
+ 0x0081: "Reserved Until 20-Oct-2000 [IANA]",
+ 0x0083: "Reserved Until 20-Oct-2000 [IANA]",
+ 0x00c1: "NTCITS IPI [Ungar]",
+ 0x00cf: "reserved (PPP NLID)",
+ 0x00fb: "single link compression in multilink [RFC1962]",
+ 0x00fd: "compressed datagram [RFC1962]",
+ 0x00ff: "reserved (compression inefficient)",
+ 0x0201: "802.1d Hello Packets",
+ 0x0203: "IBM Source Routing BPDU",
+ 0x0205: "DEC LANBridge100 Spanning Tree",
+ 0x0207: "Cisco Discovery Protocol [Sastry]",
+ 0x0209: "Netcs Twin Routing [Korfmacher]",
+ 0x020b: "STP - Scheduled Transfer Protocol [Segal]",
+ 0x020d: "EDP - Extreme Discovery Protocol [Grosser]",
+ 0x0211: "Optical Supervisory Channel Protocol (OSCP)[Prasad]",
+ 0x0213: "Optical Supervisory Channel Protocol (OSCP)[Prasad]",
+ 0x0231: "Luxcom",
+ 0x0233: "Sigma Network Systems",
+ 0x0235: "Apple Client Server Protocol [Ridenour]",
+ 0x0281: "MPLS Unicast [RFC3032] ",
+ 0x0283: "MPLS Multicast [RFC3032]",
+ 0x0285: "IEEE p1284.4 standard - data packets [Batchelder]",
+ 0x0287: "ETSI TETRA Network Protocol Type 1 [Nieminen]",
+ 0x0289: "Multichannel Flow Treatment Protocol [McCann]",
+ 0x2063: "RTP IPHC Compressed TCP No Delta [RFC3544]",
+ 0x2065: "RTP IPHC Context State [RFC3544]",
+ 0x2067: "RTP IPHC Compressed UDP 16 [RFC3544]",
+ 0x2069: "RTP IPHC Compressed RTP 16 [RFC3544]",
+ 0x4001: "Cray Communications Control Protocol [Stage]",
+ 0x4003: "CDPD Mobile Network Registration Protocol [Quick]",
+ 0x4005: "Expand accelerator protocol [Rachmani]",
+ 0x4007: "ODSICP NCP [Arvind]",
+ 0x4009: "DOCSIS DLL [Gaedtke]",
+ 0x400B: "Cetacean Network Detection Protocol [Siller]",
+ 0x4021: "Stacker LZS [Simpson]",
+ 0x4023: "RefTek Protocol [Banfill]",
+ 0x4025: "Fibre Channel [Rajagopal]",
+ 0x4027: "EMIT Protocols [Eastham]",
+ 0x405b: "Vendor-Specific Protocol (VSP) [RFC3772]",
+ 0x8021: "Internet Protocol Control Protocol",
+ 0x8023: "OSI Network Layer Control Protocol",
+ 0x8025: "Xerox NS IDP Control Protocol",
+ 0x8027: "DECnet Phase IV Control Protocol",
+ 0x8029: "Appletalk Control Protocol",
+ 0x802b: "Novell IPX Control Protocol",
+ 0x802d: "reserved",
+ 0x802f: "reserved",
+ 0x8031: "Bridging NCP",
+ 0x8033: "Stream Protocol Control Protocol",
+ 0x8035: "Banyan Vines Control Protocol",
+ 0x8037: "reserved (until 1993)",
+ 0x8039: "reserved",
+ 0x803b: "reserved",
+ 0x803d: "Multi-Link Control Protocol",
+ 0x803f: "NETBIOS Framing Control Protocol",
+ 0x8041: "Cisco Systems Control Protocol",
+ 0x8043: "Ascom Timeplex",
+ 0x8045: "Fujitsu LBLB Control Protocol",
+ 0x8047: "DCA Remote Lan Network Control Protocol (RLNCP)",
+ 0x8049: "Serial Data Control Protocol (PPP-SDCP)",
+ 0x804b: "SNA over 802.2 Control Protocol",
+ 0x804d: "SNA Control Protocol",
+ 0x804f: "IP6 Header Compression Control Protocol",
+ 0x8051: "KNX Bridging Control Protocol [ianp]",
+ 0x8053: "Encryption Control Protocol [Meyer]",
+ 0x8055: "Individual Link Encryption Control Protocol [Meyer]",
+ 0x8057: "IPv6 Control Protovol [Hinden]",
+ 0x8059: "PPP Muxing Control Protocol [RFC3153]",
+ 0x805b: "Vendor-Specific Network Control Protocol (VSNCP) [RFC3772]",
+ 0x806f: "Stampede Bridging Control Protocol",
+ 0x8073: "MP+ Control Protocol [Smith]",
+ 0x8071: "Reserved [Fox]",
+ 0x807d: "Not Used - reserved [RFC1661]",
+ 0x8081: "Reserved Until 20-Oct-2000 [IANA]",
+ 0x8083: "Reserved Until 20-Oct-2000 [IANA]",
+ 0x80c1: "NTCITS IPI Control Protocol [Ungar]",
+ 0x80cf: "Not Used - reserved [RFC1661]",
+ 0x80fb: "single link compression in multilink control [RFC1962]",
+ 0x80fd: "Compression Control Protocol [RFC1962]",
+ 0x80ff: "Not Used - reserved [RFC1661]",
+ 0x8207: "Cisco Discovery Protocol Control [Sastry]",
+ 0x8209: "Netcs Twin Routing [Korfmacher]",
+ 0x820b: "STP - Control Protocol [Segal]",
+ 0x820d: "EDPCP - Extreme Discovery Protocol Ctrl Prtcl [Grosser]",
+ 0x8235: "Apple Client Server Protocol Control [Ridenour]",
+ 0x8281: "MPLSCP [RFC3032]",
+ 0x8285: "IEEE p1284.4 standard - Protocol Control [Batchelder]",
+ 0x8287: "ETSI TETRA TNP1 Control Protocol [Nieminen]",
+ 0x8289: "Multichannel Flow Treatment Protocol [McCann]",
+ 0xc021: "Link Control Protocol",
+ 0xc023: "Password Authentication Protocol",
+ 0xc025: "Link Quality Report",
+ 0xc027: "Shiva Password Authentication Protocol",
+ 0xc029: "CallBack Control Protocol (CBCP)",
+ 0xc02b: "BACP Bandwidth Allocation Control Protocol [RFC2125]",
+ 0xc02d: "BAP [RFC2125]",
+ 0xc05b: "Vendor-Specific Authentication Protocol (VSAP) [RFC3772]",
+ 0xc081: "Container Control Protocol [KEN]",
+ 0xc223: "Challenge Handshake Authentication Protocol",
+ 0xc225: "RSA Authentication Protocol [Narayana]",
+ 0xc227: "Extensible Authentication Protocol [RFC2284]",
+ 0xc229: "Mitsubishi Security Info Exch Ptcl (SIEP) [Seno]",
+ 0xc26f: "Stampede Bridging Authorization Protocol",
+ 0xc281: "Proprietary Authentication Protocol [KEN]",
+ 0xc283: "Proprietary Authentication Protocol [Tackabury]",
+ 0xc481: "Proprietary Node ID Authentication Protocol [KEN]"}
+
+
+class HDLC(Packet):
+ fields_desc = [ XByteField("address",0xff),
+ XByteField("control",0x03) ]
+
+class PPP(Packet):
+ name = "PPP Link Layer"
+ fields_desc = [ ShortEnumField("proto", 0x0021, _PPP_proto) ]
+ @classmethod
+ def dispatch_hook(cls, _pkt=None, *args, **kargs):
+ if _pkt and _pkt[0] == 0xff:
+ cls = HDLC
+ return cls
+
+_PPP_conftypes = { 1:"Configure-Request",
+ 2:"Configure-Ack",
+ 3:"Configure-Nak",
+ 4:"Configure-Reject",
+ 5:"Terminate-Request",
+ 6:"Terminate-Ack",
+ 7:"Code-Reject",
+ 8:"Protocol-Reject",
+ 9:"Echo-Request",
+ 10:"Echo-Reply",
+ 11:"Discard-Request",
+ 14:"Reset-Request",
+ 15:"Reset-Ack",
+ }
+
+
+### PPP IPCP stuff (RFC 1332)
+
+# All IPCP options are defined below (names and associated classes)
+_PPP_ipcpopttypes = { 1:"IP-Addresses (Deprecated)",
+ 2:"IP-Compression-Protocol",
+ 3:"IP-Address",
+ 4:"Mobile-IPv4", # not implemented, present for completeness
+ 129:"Primary-DNS-Address",
+ 130:"Primary-NBNS-Address",
+ 131:"Secondary-DNS-Address",
+ 132:"Secondary-NBNS-Address"}
+
+
+class PPP_IPCP_Option(Packet):
+ name = "PPP IPCP Option"
+ fields_desc = [ ByteEnumField("type" , None , _PPP_ipcpopttypes),
+ FieldLenField("len", None, length_of="data", fmt="B", adjust=lambda p,x:x+2),
+ StrLenField("data", b"", length_from=lambda p:max(0,p.len-2)) ]
+ def extract_padding(self, pay):
+ return b"",pay
+
+ registered_options = {}
+ @classmethod
+ def register_variant(cls):
+ cls.registered_options[cls.type.default] = cls
+ @classmethod
+ def dispatch_hook(cls, _pkt=None, *args, **kargs):
+ if _pkt:
+ #o = ord(_pkt[0])
+ o = (_pkt[0])
+ return cls.registered_options.get(o, cls)
+ return cls
+
+
+class PPP_IPCP_Option_IPAddress(PPP_IPCP_Option):
+ name = "PPP IPCP Option: IP Address"
+ fields_desc = [ ByteEnumField("type" , 3 , _PPP_ipcpopttypes),
+ FieldLenField("len", None, length_of="data", fmt="B", adjust=lambda p,x:x+2),
+ IPField("data","0.0.0.0"),
+ ConditionalField(StrLenField("garbage","", length_from=lambda pkt:pkt.len-6), lambda p:p.len!=6) ]
+
+class PPP_IPCP_Option_DNS1(PPP_IPCP_Option):
+ name = "PPP IPCP Option: DNS1 Address"
+ fields_desc = [ ByteEnumField("type" , 129 , _PPP_ipcpopttypes),
+ FieldLenField("len", None, length_of="data", fmt="B", adjust=lambda p,x:x+2),
+ IPField("data","0.0.0.0"),
+ ConditionalField(StrLenField("garbage","", length_from=lambda pkt:pkt.len-6), lambda p:p.len!=6) ]
+
+class PPP_IPCP_Option_DNS2(PPP_IPCP_Option):
+ name = "PPP IPCP Option: DNS2 Address"
+ fields_desc = [ ByteEnumField("type" , 131 , _PPP_ipcpopttypes),
+ FieldLenField("len", None, length_of="data", fmt="B", adjust=lambda p,x:x+2),
+ IPField("data","0.0.0.0"),
+ ConditionalField(StrLenField("garbage","", length_from=lambda pkt:pkt.len-6), lambda p:p.len!=6) ]
+
+class PPP_IPCP_Option_NBNS1(PPP_IPCP_Option):
+ name = "PPP IPCP Option: NBNS1 Address"
+ fields_desc = [ ByteEnumField("type" , 130 , _PPP_ipcpopttypes),
+ FieldLenField("len", None, length_of="data", fmt="B", adjust=lambda p,x:x+2),
+ IPField("data","0.0.0.0"),
+ ConditionalField(StrLenField("garbage","", length_from=lambda pkt:pkt.len-6), lambda p:p.len!=6) ]
+
+class PPP_IPCP_Option_NBNS2(PPP_IPCP_Option):
+ name = "PPP IPCP Option: NBNS2 Address"
+ fields_desc = [ ByteEnumField("type" , 132 , _PPP_ipcpopttypes),
+ FieldLenField("len", None, length_of="data", fmt="B", adjust=lambda p,x:x+2),
+ IPField("data","0.0.0.0"),
+ ConditionalField(StrLenField("garbage","", length_from=lambda pkt:pkt.len-6), lambda p:p.len!=6) ]
+
+
+class PPP_IPCP(Packet):
+ fields_desc = [ ByteEnumField("code" , 1, _PPP_conftypes),
+ XByteField("id", 0 ),
+ FieldLenField("len" , None, fmt="H", length_of="options", adjust=lambda p,x:x+4 ),
+ PacketListField("options", [], PPP_IPCP_Option, length_from=lambda p:p.len-4,) ]
+
+
+### ECP
+
+_PPP_ecpopttypes = { 0:"OUI",
+ 1:"DESE", }
+
+class PPP_ECP_Option(Packet):
+ name = "PPP ECP Option"
+ fields_desc = [ ByteEnumField("type" , None , _PPP_ecpopttypes),
+ FieldLenField("len", None, length_of="data", fmt="B", adjust=lambda p,x:x+2),
+ StrLenField("data", "", length_from=lambda p:max(0,p.len-2)) ]
+ def extract_padding(self, pay):
+ return b"",pay
+
+ registered_options = {}
+ @classmethod
+ def register_variant(cls):
+ cls.registered_options[cls.type.default] = cls
+ @classmethod
+ def dispatch_hook(cls, _pkt=None, *args, **kargs):
+ if _pkt:
+ #o = ord(_pkt[0])
+ o = (_pkt[0])
+ return cls.registered_options.get(o, cls)
+ return cls
+
+class PPP_ECP_Option_OUI(PPP_ECP_Option):
+ fields_desc = [ ByteEnumField("type" , 0 , _PPP_ecpopttypes),
+ FieldLenField("len", None, length_of="data", fmt="B", adjust=lambda p,x:x+6),
+ StrFixedLenField("oui","",3),
+ ByteField("subtype",0),
+ StrLenField("data", "", length_from=lambda p:p.len-6) ]
+
+
+
+class PPP_ECP(Packet):
+ fields_desc = [ ByteEnumField("code" , 1, _PPP_conftypes),
+ XByteField("id", 0 ),
+ FieldLenField("len" , None, fmt="H", length_of="options", adjust=lambda p,x:x+4 ),
+ PacketListField("options", [], PPP_ECP_Option, length_from=lambda p:p.len-4,) ]
+
+bind_layers( Ether, PPPoED, type=0x8863)
+bind_layers( Ether, PPPoE, type=0x8864)
+bind_layers( CookedLinux, PPPoED, proto=0x8863)
+bind_layers( CookedLinux, PPPoE, proto=0x8864)
+bind_layers( PPPoE, PPP, code=0)
+bind_layers( HDLC, PPP, )
+bind_layers( PPP, IP, proto=33)
+bind_layers( PPP, PPP_IPCP, proto=0x8021)
+bind_layers( PPP, PPP_ECP, proto=0x8053)
+bind_layers( Ether, PPP_IPCP, type=0x8021)
+bind_layers( Ether, PPP_ECP, type=0x8053)
diff --git a/scripts/external_libs/scapy-2.3.1/python3/scapy/layers/radius.py b/scripts/external_libs/scapy-2.3.1/python3/scapy/layers/radius.py
new file mode 100644
index 00000000..13239603
--- /dev/null
+++ b/scripts/external_libs/scapy-2.3.1/python3/scapy/layers/radius.py
@@ -0,0 +1,65 @@
+## This file is part of Scapy
+## See http://www.secdev.org/projects/scapy for more informations
+## Copyright (C) Philippe Biondi <phil@secdev.org>
+## This program is published under a GPLv2 license
+
+"""
+RADIUS (Remote Authentication Dial In User Service)
+"""
+
+import struct
+from scapy.packet import *
+from scapy.fields import *
+
+class Radius(Packet):
+ name = "Radius"
+ fields_desc = [ ByteEnumField("code", 1, {1: "Access-Request",
+ 2: "Access-Accept",
+ 3: "Access-Reject",
+ 4: "Accounting-Request",
+ 5: "Accounting-Accept",
+ 6: "Accounting-Status",
+ 7: "Password-Request",
+ 8: "Password-Ack",
+ 9: "Password-Reject",
+ 10: "Accounting-Message",
+ 11: "Access-Challenge",
+ 12: "Status-Server",
+ 13: "Status-Client",
+ 21: "Resource-Free-Request",
+ 22: "Resource-Free-Response",
+ 23: "Resource-Query-Request",
+ 24: "Resource-Query-Response",
+ 25: "Alternate-Resource-Reclaim-Request",
+ 26: "NAS-Reboot-Request",
+ 27: "NAS-Reboot-Response",
+ 29: "Next-Passcode",
+ 30: "New-Pin",
+ 31: "Terminate-Session",
+ 32: "Password-Expired",
+ 33: "Event-Request",
+ 34: "Event-Response",
+ 40: "Disconnect-Request",
+ 41: "Disconnect-ACK",
+ 42: "Disconnect-NAK",
+ 43: "CoA-Request",
+ 44: "CoA-ACK",
+ 45: "CoA-NAK",
+ 50: "IP-Address-Allocate",
+ 51: "IP-Address-Release",
+ 253: "Experimental-use",
+ 254: "Reserved",
+ 255: "Reserved"} ),
+ ByteField("id", 0),
+ ShortField("len", None),
+ StrFixedLenField("authenticator","",16) ]
+ def post_build(self, p, pay):
+ p += pay
+ l = self.len
+ if l is None:
+ l = len(p)
+ p = p[:2]+struct.pack("!H",l)+p[4:]
+ return p
+
+
+
diff --git a/scripts/external_libs/scapy-2.3.1/python3/scapy/layers/rip.py b/scripts/external_libs/scapy-2.3.1/python3/scapy/layers/rip.py
new file mode 100644
index 00000000..1507fe5c
--- /dev/null
+++ b/scripts/external_libs/scapy-2.3.1/python3/scapy/layers/rip.py
@@ -0,0 +1,74 @@
+## This file is part of Scapy
+## See http://www.secdev.org/projects/scapy for more informations
+## Copyright (C) Philippe Biondi <phil@secdev.org>
+## This program is published under a GPLv2 license
+
+"""
+RIP (Routing Information Protocol).
+"""
+
+from scapy.packet import *
+from scapy.fields import *
+from scapy.layers.inet import UDP
+
+class RIP(Packet):
+ name = "RIP header"
+ fields_desc = [
+ ByteEnumField("cmd", 1, {1:"req", 2:"resp", 3:"traceOn", 4:"traceOff",
+ 5:"sun", 6:"trigReq", 7:"trigResp", 8:"trigAck",
+ 9:"updateReq", 10:"updateResp", 11:"updateAck"}),
+ ByteField("version", 1),
+ ShortField("null", 0),
+ ]
+
+ def guess_payload_class(self, payload):
+ if payload[:2] == "\xff\xff":
+ return RIPAuth
+ else:
+ return Packet.guess_payload_class(self, payload)
+
+class RIPEntry(RIP):
+ name = "RIP entry"
+ fields_desc = [
+ ShortEnumField("AF", 2, {2:"IP"}),
+ ShortField("RouteTag", 0),
+ IPField("addr", "0.0.0.0"),
+ IPField("mask", "0.0.0.0"),
+ IPField("nextHop", "0.0.0.0"),
+ IntEnumField("metric", 1, {16:"Unreach"}),
+ ]
+
+class RIPAuth(Packet):
+ name = "RIP authentication"
+ fields_desc = [
+ ShortEnumField("AF", 0xffff, {0xffff:"Auth"}),
+ ShortEnumField("authtype", 2, {1:"md5authdata", 2:"simple", 3:"md5"}),
+ ConditionalField(StrFixedLenField("password", None, 16),
+ lambda pkt: pkt.authtype == 2),
+ ConditionalField(ShortField("digestoffset", 0),
+ lambda pkt: pkt.authtype == 3),
+ ConditionalField(ByteField("keyid", 0),
+ lambda pkt: pkt.authtype == 3),
+ ConditionalField(ByteField("authdatalen", 0),
+ lambda pkt: pkt.authtype == 3),
+ ConditionalField(IntField("seqnum", 0),
+ lambda pkt: pkt.authtype == 3),
+ ConditionalField(StrFixedLenField("zeropad", None, 8),
+ lambda pkt: pkt.authtype == 3),
+ ConditionalField(StrLenField("authdata", None,
+ length_from=lambda pkt: pkt.md5datalen),
+ lambda pkt: pkt.authtype == 1)
+ ]
+
+ def pre_dissect(self, s):
+ if s[2:4] == "\x00\x01":
+ self.md5datalen = len(s) - 4
+
+ return s
+
+
+bind_layers( UDP, RIP, sport=520)
+bind_layers( UDP, RIP, dport=520)
+bind_layers( RIP, RIPEntry, )
+bind_layers( RIPEntry, RIPEntry, )
+bind_layers( RIPAuth, RIPEntry, )
diff --git a/scripts/external_libs/scapy-2.3.1/python3/scapy/layers/rtp.py b/scripts/external_libs/scapy-2.3.1/python3/scapy/layers/rtp.py
new file mode 100644
index 00000000..629dccdd
--- /dev/null
+++ b/scripts/external_libs/scapy-2.3.1/python3/scapy/layers/rtp.py
@@ -0,0 +1,40 @@
+## This file is part of Scapy
+## See http://www.secdev.org/projects/scapy for more informations
+## Copyright (C) Philippe Biondi <phil@secdev.org>
+## This program is published under a GPLv2 license
+
+"""
+RTP (Real-time Transport Protocol).
+"""
+
+from scapy.packet import *
+from scapy.fields import *
+
+_rtp_payload_types = {
+ # http://www.iana.org/assignments/rtp-parameters
+ 0: 'G.711 PCMU', 3: 'GSM',
+ 4: 'G723', 5: 'DVI4',
+ 6: 'DVI4', 7: 'LPC',
+ 8: 'PCMA', 9: 'G722',
+ 10: 'L16', 11: 'L16',
+ 12: 'QCELP', 13: 'CN',
+ 14: 'MPA', 15: 'G728',
+ 16: 'DVI4', 17: 'DVI4',
+ 18: 'G729', 25: 'CelB',
+ 26: 'JPEG', 28: 'nv',
+ 31: 'H261', 32: 'MPV',
+ 33: 'MP2T', 34: 'H263' }
+
+class RTP(Packet):
+ name="RTP"
+ fields_desc = [ BitField('version', 2, 2),
+ BitField('padding', 0, 1),
+ BitField('extension', 0, 1),
+ BitFieldLenField('numsync', None, 4, count_of='sync'),
+ BitField('marker', 0, 1),
+ BitEnumField('payload', 0, 7, _rtp_payload_types),
+ ShortField('sequence', 0),
+ IntField('timestamp', 0),
+ IntField('sourcesync', 0),
+ FieldListField('sync', [], IntField("id",0), count_from=lambda pkt:pkt.numsync) ]
+
diff --git a/scripts/external_libs/scapy-2.3.1/python3/scapy/layers/sctp.py b/scripts/external_libs/scapy-2.3.1/python3/scapy/layers/sctp.py
new file mode 100644
index 00000000..57712112
--- /dev/null
+++ b/scripts/external_libs/scapy-2.3.1/python3/scapy/layers/sctp.py
@@ -0,0 +1,439 @@
+## This file is part of Scapy
+## See http://www.secdev.org/projects/scapy for more informations
+## Copyright (C) Philippe Biondi <phil@secdev.org>
+## Copyright (C) 6WIND <olivier.matz@6wind.com>
+## This program is published under a GPLv2 license
+
+"""
+SCTP (Stream Control Transmission Protocol).
+"""
+
+import struct
+
+from scapy.packet import *
+from scapy.fields import *
+from scapy.layers.inet import IP
+from scapy.layers.inet6 import IP6Field
+
+IPPROTO_SCTP=132
+
+# crc32-c (Castagnoli) (crc32c_poly=0x1EDC6F41)
+crc32c_table = [
+ 0x00000000, 0xF26B8303, 0xE13B70F7, 0x1350F3F4,
+ 0xC79A971F, 0x35F1141C, 0x26A1E7E8, 0xD4CA64EB,
+ 0x8AD958CF, 0x78B2DBCC, 0x6BE22838, 0x9989AB3B,
+ 0x4D43CFD0, 0xBF284CD3, 0xAC78BF27, 0x5E133C24,
+ 0x105EC76F, 0xE235446C, 0xF165B798, 0x030E349B,
+ 0xD7C45070, 0x25AFD373, 0x36FF2087, 0xC494A384,
+ 0x9A879FA0, 0x68EC1CA3, 0x7BBCEF57, 0x89D76C54,
+ 0x5D1D08BF, 0xAF768BBC, 0xBC267848, 0x4E4DFB4B,
+ 0x20BD8EDE, 0xD2D60DDD, 0xC186FE29, 0x33ED7D2A,
+ 0xE72719C1, 0x154C9AC2, 0x061C6936, 0xF477EA35,
+ 0xAA64D611, 0x580F5512, 0x4B5FA6E6, 0xB93425E5,
+ 0x6DFE410E, 0x9F95C20D, 0x8CC531F9, 0x7EAEB2FA,
+ 0x30E349B1, 0xC288CAB2, 0xD1D83946, 0x23B3BA45,
+ 0xF779DEAE, 0x05125DAD, 0x1642AE59, 0xE4292D5A,
+ 0xBA3A117E, 0x4851927D, 0x5B016189, 0xA96AE28A,
+ 0x7DA08661, 0x8FCB0562, 0x9C9BF696, 0x6EF07595,
+ 0x417B1DBC, 0xB3109EBF, 0xA0406D4B, 0x522BEE48,
+ 0x86E18AA3, 0x748A09A0, 0x67DAFA54, 0x95B17957,
+ 0xCBA24573, 0x39C9C670, 0x2A993584, 0xD8F2B687,
+ 0x0C38D26C, 0xFE53516F, 0xED03A29B, 0x1F682198,
+ 0x5125DAD3, 0xA34E59D0, 0xB01EAA24, 0x42752927,
+ 0x96BF4DCC, 0x64D4CECF, 0x77843D3B, 0x85EFBE38,
+ 0xDBFC821C, 0x2997011F, 0x3AC7F2EB, 0xC8AC71E8,
+ 0x1C661503, 0xEE0D9600, 0xFD5D65F4, 0x0F36E6F7,
+ 0x61C69362, 0x93AD1061, 0x80FDE395, 0x72966096,
+ 0xA65C047D, 0x5437877E, 0x4767748A, 0xB50CF789,
+ 0xEB1FCBAD, 0x197448AE, 0x0A24BB5A, 0xF84F3859,
+ 0x2C855CB2, 0xDEEEDFB1, 0xCDBE2C45, 0x3FD5AF46,
+ 0x7198540D, 0x83F3D70E, 0x90A324FA, 0x62C8A7F9,
+ 0xB602C312, 0x44694011, 0x5739B3E5, 0xA55230E6,
+ 0xFB410CC2, 0x092A8FC1, 0x1A7A7C35, 0xE811FF36,
+ 0x3CDB9BDD, 0xCEB018DE, 0xDDE0EB2A, 0x2F8B6829,
+ 0x82F63B78, 0x709DB87B, 0x63CD4B8F, 0x91A6C88C,
+ 0x456CAC67, 0xB7072F64, 0xA457DC90, 0x563C5F93,
+ 0x082F63B7, 0xFA44E0B4, 0xE9141340, 0x1B7F9043,
+ 0xCFB5F4A8, 0x3DDE77AB, 0x2E8E845F, 0xDCE5075C,
+ 0x92A8FC17, 0x60C37F14, 0x73938CE0, 0x81F80FE3,
+ 0x55326B08, 0xA759E80B, 0xB4091BFF, 0x466298FC,
+ 0x1871A4D8, 0xEA1A27DB, 0xF94AD42F, 0x0B21572C,
+ 0xDFEB33C7, 0x2D80B0C4, 0x3ED04330, 0xCCBBC033,
+ 0xA24BB5A6, 0x502036A5, 0x4370C551, 0xB11B4652,
+ 0x65D122B9, 0x97BAA1BA, 0x84EA524E, 0x7681D14D,
+ 0x2892ED69, 0xDAF96E6A, 0xC9A99D9E, 0x3BC21E9D,
+ 0xEF087A76, 0x1D63F975, 0x0E330A81, 0xFC588982,
+ 0xB21572C9, 0x407EF1CA, 0x532E023E, 0xA145813D,
+ 0x758FE5D6, 0x87E466D5, 0x94B49521, 0x66DF1622,
+ 0x38CC2A06, 0xCAA7A905, 0xD9F75AF1, 0x2B9CD9F2,
+ 0xFF56BD19, 0x0D3D3E1A, 0x1E6DCDEE, 0xEC064EED,
+ 0xC38D26C4, 0x31E6A5C7, 0x22B65633, 0xD0DDD530,
+ 0x0417B1DB, 0xF67C32D8, 0xE52CC12C, 0x1747422F,
+ 0x49547E0B, 0xBB3FFD08, 0xA86F0EFC, 0x5A048DFF,
+ 0x8ECEE914, 0x7CA56A17, 0x6FF599E3, 0x9D9E1AE0,
+ 0xD3D3E1AB, 0x21B862A8, 0x32E8915C, 0xC083125F,
+ 0x144976B4, 0xE622F5B7, 0xF5720643, 0x07198540,
+ 0x590AB964, 0xAB613A67, 0xB831C993, 0x4A5A4A90,
+ 0x9E902E7B, 0x6CFBAD78, 0x7FAB5E8C, 0x8DC0DD8F,
+ 0xE330A81A, 0x115B2B19, 0x020BD8ED, 0xF0605BEE,
+ 0x24AA3F05, 0xD6C1BC06, 0xC5914FF2, 0x37FACCF1,
+ 0x69E9F0D5, 0x9B8273D6, 0x88D28022, 0x7AB90321,
+ 0xAE7367CA, 0x5C18E4C9, 0x4F48173D, 0xBD23943E,
+ 0xF36E6F75, 0x0105EC76, 0x12551F82, 0xE03E9C81,
+ 0x34F4F86A, 0xC69F7B69, 0xD5CF889D, 0x27A40B9E,
+ 0x79B737BA, 0x8BDCB4B9, 0x988C474D, 0x6AE7C44E,
+ 0xBE2DA0A5, 0x4C4623A6, 0x5F16D052, 0xAD7D5351,
+ ]
+
+def crc32c(buf):
+ crc = 0xffffffff
+ for c in buf:
+ #crc = (crc>>8) ^ crc32c_table[(crc^(ord(c))) & 0xFF]
+ crc = (crc>>8) ^ crc32c_table[(crc^(c)) & 0xFF]
+ crc = (~crc) & 0xffffffff
+ # reverse endianness
+ return struct.unpack(">I",struct.pack("<I", crc))[0]
+
+# old checksum (RFC2960)
+"""
+BASE = 65521 # largest prime smaller than 65536
+def update_adler32(adler, buf):
+ s1 = adler & 0xffff
+ s2 = (adler >> 16) & 0xffff
+ print(s1,s2)
+
+ for c in buf:
+ print(ord(c))
+ s1 = (s1 + ord(c)) % BASE
+ s2 = (s2 + s1) % BASE
+ print(s1,s2)
+ return (s2 << 16) + s1
+
+def sctp_checksum(buf):
+ return update_adler32(1, buf)
+"""
+
+sctpchunktypescls = {
+ 0 : "SCTPChunkData",
+ 1 : "SCTPChunkInit",
+ 2 : "SCTPChunkInitAck",
+ 3 : "SCTPChunkSACK",
+ 4 : "SCTPChunkHeartbeatReq",
+ 5 : "SCTPChunkHeartbeatAck",
+ 6 : "SCTPChunkAbort",
+ 7 : "SCTPChunkShutdown",
+ 8 : "SCTPChunkShutdownAck",
+ 9 : "SCTPChunkError",
+ 10 : "SCTPChunkCookieEcho",
+ 11 : "SCTPChunkCookieAck",
+ 14 : "SCTPChunkShutdownComplete",
+ }
+
+sctpchunktypes = {
+ 0 : "data",
+ 1 : "init",
+ 2 : "init-ack",
+ 3 : "sack",
+ 4 : "heartbeat-req",
+ 5 : "heartbeat-ack",
+ 6 : "abort",
+ 7 : "shutdown",
+ 8 : "shutdown-ack",
+ 9 : "error",
+ 10 : "cookie-echo",
+ 11 : "cookie-ack",
+ 14 : "shutdown-complete",
+ }
+
+sctpchunkparamtypescls = {
+ 1 : "SCTPChunkParamHearbeatInfo",
+ 5 : "SCTPChunkParamIPv4Addr",
+ 6 : "SCTPChunkParamIPv6Addr",
+ 7 : "SCTPChunkParamStateCookie",
+ 8 : "SCTPChunkParamUnrocognizedParam",
+ 9 : "SCTPChunkParamCookiePreservative",
+ 11 : "SCTPChunkParamHostname",
+ 12 : "SCTPChunkParamSupportedAddrTypes",
+ 32768 : "SCTPChunkParamECNCapable",
+ 49152 : "SCTPChunkParamFwdTSN",
+ 49158 : "SCTPChunkParamAdaptationLayer",
+ }
+
+sctpchunkparamtypes = {
+ 1 : "heartbeat-info",
+ 5 : "IPv4",
+ 6 : "IPv6",
+ 7 : "state-cookie",
+ 8 : "unrecognized-param",
+ 9 : "cookie-preservative",
+ 11 : "hostname",
+ 12 : "addrtypes",
+ 32768 : "ecn-capable",
+ 49152 : "fwd-tsn-supported",
+ 49158 : "adaptation-layer",
+ }
+
+############## SCTP header
+
+# Dummy class to guess payload type (variable parameters)
+class _SCTPChunkGuessPayload:
+ def default_payload_class(self,p):
+ if len(p) < 4:
+ return conf.padding_layer
+ else:
+ t = p[0]
+ return globals().get(sctpchunktypescls.get(t, "Raw"), conf.raw_layer)
+
+
+class SCTP(_SCTPChunkGuessPayload, Packet):
+ fields_desc = [ ShortField("sport", None),
+ ShortField("dport", None),
+ XIntField("tag", None),
+ XIntField("chksum", None), ]
+ def answers(self, other):
+ if not isinstance(other, SCTP):
+ return 0
+ if conf.checkIPsrc:
+ if not ((self.sport == other.dport) and
+ (self.dport == other.sport)):
+ return 0
+ return 1
+ def post_build(self, p, pay):
+ p += pay
+ if self.chksum is None:
+ crc = crc32c(str(p))
+ p = p[:8]+struct.pack(">I", crc)+p[12:]
+ return p
+
+############## SCTP Chunk variable params
+
+class ChunkParamField(PacketListField):
+ islist = 1
+ holds_packets=1
+ def __init__(self, name, default, count_from=None, length_from=None):
+ PacketListField.__init__(self, name, default, conf.raw_layer, count_from=count_from, length_from=length_from)
+ def m2i(self, p, m):
+ cls = conf.raw_layer
+ if len(m) >= 4:
+ #t = ord(m[0]) * 256 + ord(m[1])
+ t = (m[0]) * 256 + (m[1])
+ cls = globals().get(sctpchunkparamtypescls.get(t, "Raw"), conf.raw_layer)
+ return cls(m)
+
+# dummy class to avoid Raw() after Chunk params
+class _SCTPChunkParam:
+ def extract_padding(self, s):
+ return b"",s[:]
+
+class SCTPChunkParamHearbeatInfo(_SCTPChunkParam, Packet):
+ fields_desc = [ ShortEnumField("type", 1, sctpchunkparamtypes),
+ FieldLenField("len", None, length_of="data",
+ adjust = lambda pkt,x:x+4),
+ PadField(StrLenField("data", b"",
+ length_from=lambda pkt: pkt.len-4),
+ 4, padwith=b"\x00"),]
+
+class SCTPChunkParamIPv4Addr(_SCTPChunkParam, Packet):
+ fields_desc = [ ShortEnumField("type", 5, sctpchunkparamtypes),
+ ShortField("len", 8),
+ IPField("addr","127.0.0.1"), ]
+
+class SCTPChunkParamIPv6Addr(_SCTPChunkParam, Packet):
+ fields_desc = [ ShortEnumField("type", 6, sctpchunkparamtypes),
+ ShortField("len", 20),
+ IP6Field("addr","::1"), ]
+
+class SCTPChunkParamStateCookie(_SCTPChunkParam, Packet):
+ fields_desc = [ ShortEnumField("type", 7, sctpchunkparamtypes),
+ FieldLenField("len", None, length_of="cookie",
+ adjust = lambda pkt,x:x+4),
+ PadField(StrLenField("cookie", b"",
+ length_from=lambda pkt: pkt.len-4),
+ 4, padwith=b"\x00"),]
+
+class SCTPChunkParamUnrocognizedParam(_SCTPChunkParam, Packet):
+ fields_desc = [ ShortEnumField("type", 8, sctpchunkparamtypes),
+ FieldLenField("len", None, length_of="param",
+ adjust = lambda pkt,x:x+4),
+ PadField(StrLenField("param", b"",
+ length_from=lambda pkt: pkt.len-4),
+ 4, padwith=b"\x00"),]
+
+class SCTPChunkParamCookiePreservative(_SCTPChunkParam, Packet):
+ fields_desc = [ ShortEnumField("type", 9, sctpchunkparamtypes),
+ ShortField("len", 8),
+ XIntField("sug_cookie_inc", None), ]
+
+class SCTPChunkParamHostname(_SCTPChunkParam, Packet):
+ fields_desc = [ ShortEnumField("type", 11, sctpchunkparamtypes),
+ FieldLenField("len", None, length_of="hostname",
+ adjust = lambda pkt,x:x+4),
+ PadField(StrLenField("hostname", b"",
+ length_from=lambda pkt: pkt.len-4),
+ 4, padwith=b"\x00"), ]
+
+class SCTPChunkParamSupportedAddrTypes(_SCTPChunkParam, Packet):
+ fields_desc = [ ShortEnumField("type", 12, sctpchunkparamtypes),
+ FieldLenField("len", None, length_of="addr_type_list",
+ adjust = lambda pkt,x:x+4),
+ PadField(FieldListField("addr_type_list", [ "IPv4" ],
+ ShortEnumField("addr_type", 5, sctpchunkparamtypes),
+ length_from=lambda pkt: pkt.len-4),
+ 4, padwith=b"\x00"), ]
+
+class SCTPChunkParamECNCapable(_SCTPChunkParam, Packet):
+ fields_desc = [ ShortEnumField("type", 32768, sctpchunkparamtypes),
+ ShortField("len", 4), ]
+
+class SCTPChunkParamFwdTSN(_SCTPChunkParam, Packet):
+ fields_desc = [ ShortEnumField("type", 49152, sctpchunkparamtypes),
+ ShortField("len", 4), ]
+
+class SCTPChunkParamAdaptationLayer(_SCTPChunkParam, Packet):
+ fields_desc = [ ShortEnumField("type", 49158, sctpchunkparamtypes),
+ ShortField("len", 8),
+ XIntField("indication", None), ]
+
+############## SCTP Chunks
+
+class SCTPChunkData(_SCTPChunkGuessPayload, Packet):
+ fields_desc = [ ByteEnumField("type", 0, sctpchunktypes),
+ BitField("reserved", None, 4),
+ BitField("delay_sack", 0, 1),
+ BitField("unordered", 0, 1),
+ BitField("beginning", 0, 1),
+ BitField("ending", 0, 1),
+ FieldLenField("len", None, length_of="data", adjust = lambda pkt,x:x+16),
+ XIntField("tsn", None),
+ XShortField("stream_id", None),
+ XShortField("stream_seq", None),
+ XIntField("proto_id", None),
+ PadField(StrLenField("data", None, length_from=lambda pkt: pkt.len-16),
+ 4, padwith=b"\x00"),
+ ]
+
+class SCTPChunkInit(_SCTPChunkGuessPayload, Packet):
+ fields_desc = [ ByteEnumField("type", 1, sctpchunktypes),
+ XByteField("flags", None),
+ FieldLenField("len", None, length_of="params", adjust = lambda pkt,x:x+20),
+ XIntField("init_tag", None),
+ IntField("a_rwnd", None),
+ ShortField("n_out_streams", None),
+ ShortField("n_in_streams", None),
+ XIntField("init_tsn", None),
+ ChunkParamField("params", None, length_from=lambda pkt:pkt.len-20),
+ ]
+
+class SCTPChunkInitAck(_SCTPChunkGuessPayload, Packet):
+ fields_desc = [ ByteEnumField("type", 2, sctpchunktypes),
+ XByteField("flags", None),
+ FieldLenField("len", None, length_of="params", adjust = lambda pkt,x:x+20),
+ XIntField("init_tag", None),
+ IntField("a_rwnd", None),
+ ShortField("n_out_streams", None),
+ ShortField("n_in_streams", None),
+ XIntField("init_tsn", None),
+ ChunkParamField("params", None, length_from=lambda pkt:pkt.len-20),
+ ]
+
+class GapAckField(Field):
+ def __init__(self, name, default):
+ Field.__init__(self, name, default, "4s")
+ def i2m(self, pkt, x):
+ if x is None:
+ return "\0\0\0\0"
+ sta, end = map(int, x.split(":"))
+ args = tuple([">HH", sta, end])
+ return struct.pack(*args)
+ def m2i(self, pkt, x):
+ return "%d:%d"%(struct.unpack(">HH", x))
+ def any2i(self, pkt, x):
+ if type(x) is tuple and len(x) == 2:
+ return "%d:%d"%(x)
+ return x
+
+class SCTPChunkSACK(_SCTPChunkGuessPayload, Packet):
+ fields_desc = [ ByteEnumField("type", 3, sctpchunktypes),
+ XByteField("flags", None),
+ ShortField("len", None),
+ XIntField("cumul_tsn_ack", None),
+ IntField("a_rwnd", None),
+ FieldLenField("n_gap_ack", None, count_of="gap_ack_list"),
+ FieldLenField("n_dup_tsn", None, count_of="dup_tsn_list"),
+ FieldListField("gap_ack_list", [ ], GapAckField("gap_ack", None), count_from=lambda pkt:pkt.n_gap_ack),
+ FieldListField("dup_tsn_list", [ ], XIntField("dup_tsn", None), count_from=lambda pkt:pkt.n_dup_tsn),
+ ]
+
+ def post_build(self, p, pay):
+ if self.len is None:
+ p = p[:2] + struct.pack(">H", len(p)) + p[4:]
+ return p+pay
+
+
+class SCTPChunkHeartbeatReq(_SCTPChunkGuessPayload, Packet):
+ fields_desc = [ ByteEnumField("type", 4, sctpchunktypes),
+ XByteField("flags", None),
+ FieldLenField("len", None, length_of="params", adjust = lambda pkt,x:x+4),
+ ChunkParamField("params", None, length_from=lambda pkt:pkt.len-4),
+ ]
+
+class SCTPChunkHeartbeatAck(_SCTPChunkGuessPayload, Packet):
+ fields_desc = [ ByteEnumField("type", 5, sctpchunktypes),
+ XByteField("flags", None),
+ FieldLenField("len", None, length_of="params", adjust = lambda pkt,x:x+4),
+ ChunkParamField("params", None, length_from=lambda pkt:pkt.len-4),
+ ]
+
+class SCTPChunkAbort(_SCTPChunkGuessPayload, Packet):
+ fields_desc = [ ByteEnumField("type", 6, sctpchunktypes),
+ BitField("reserved", None, 7),
+ BitField("TCB", 0, 1),
+ FieldLenField("len", None, length_of="error_causes", adjust = lambda pkt,x:x+4),
+ PadField(StrLenField("error_causes", b"", length_from=lambda pkt: pkt.len-4),
+ 4, padwith=b"\x00"),
+ ]
+
+class SCTPChunkShutdown(_SCTPChunkGuessPayload, Packet):
+ fields_desc = [ ByteEnumField("type", 7, sctpchunktypes),
+ XByteField("flags", None),
+ ShortField("len", 8),
+ XIntField("cumul_tsn_ack", None),
+ ]
+
+class SCTPChunkShutdownAck(_SCTPChunkGuessPayload, Packet):
+ fields_desc = [ ByteEnumField("type", 8, sctpchunktypes),
+ XByteField("flags", None),
+ ShortField("len", 4),
+ ]
+
+class SCTPChunkError(_SCTPChunkGuessPayload, Packet):
+ fields_desc = [ ByteEnumField("type", 9, sctpchunktypes),
+ XByteField("flags", None),
+ FieldLenField("len", None, length_of="error_causes", adjust = lambda pkt,x:x+4),
+ PadField(StrLenField("error_causes", b"", length_from=lambda pkt: pkt.len-4),
+ 4, padwith=b"\x00"),
+ ]
+
+class SCTPChunkCookieEcho(_SCTPChunkGuessPayload, Packet):
+ fields_desc = [ ByteEnumField("type", 10, sctpchunktypes),
+ XByteField("flags", None),
+ FieldLenField("len", None, length_of="cookie", adjust = lambda pkt,x:x+4),
+ PadField(StrLenField("cookie", b"", length_from=lambda pkt: pkt.len-4),
+ 4, padwith=b"\x00"),
+ ]
+
+class SCTPChunkCookieAck(_SCTPChunkGuessPayload, Packet):
+ fields_desc = [ ByteEnumField("type", 11, sctpchunktypes),
+ XByteField("flags", None),
+ ShortField("len", 4),
+ ]
+
+class SCTPChunkShutdownComplete(_SCTPChunkGuessPayload, Packet):
+ fields_desc = [ ByteEnumField("type", 12, sctpchunktypes),
+ BitField("reserved", None, 7),
+ BitField("TCB", 0, 1),
+ ShortField("len", 4),
+ ]
+
+bind_layers( IP, SCTP, proto=IPPROTO_SCTP)
+
diff --git a/scripts/external_libs/scapy-2.3.1/python3/scapy/layers/sebek.py b/scripts/external_libs/scapy-2.3.1/python3/scapy/layers/sebek.py
new file mode 100644
index 00000000..c54e6728
--- /dev/null
+++ b/scripts/external_libs/scapy-2.3.1/python3/scapy/layers/sebek.py
@@ -0,0 +1,109 @@
+## This file is part of Scapy
+## See http://www.secdev.org/projects/scapy for more informations
+## Copyright (C) Philippe Biondi <phil@secdev.org>
+## This program is published under a GPLv2 license
+
+"""
+Sebek: Linux kernel module for data collection on honeypots.
+"""
+
+from scapy.fields import *
+from scapy.packet import *
+from scapy.layers.inet import UDP
+
+
+### SEBEK
+
+
+class SebekHead(Packet):
+ name = "Sebek header"
+ fields_desc = [ XIntField("magic", 0xd0d0d0),
+ ShortField("version", 1),
+ ShortEnumField("type", 0, {"read":0, "write":1,
+ "socket":2, "open":3}),
+ IntField("counter", 0),
+ IntField("time_sec", 0),
+ IntField("time_usec", 0) ]
+ def mysummary(self):
+ return self.sprintf("Sebek Header v%SebekHead.version% %SebekHead.type%")
+
+# we need this because Sebek headers differ between v1 and v3, and
+# between v3 type socket and v3 others
+
+class SebekV1(Packet):
+ name = "Sebek v1"
+ fields_desc = [ IntField("pid", 0),
+ IntField("uid", 0),
+ IntField("fd", 0),
+ StrFixedLenField("command", "", 12),
+ FieldLenField("data_length", None, "data",fmt="I"),
+ StrLenField("data", "", length_from=lambda x:x.data_length) ]
+ def mysummary(self):
+ if isinstance(self.underlayer, SebekHead):
+ return self.underlayer.sprintf("Sebek v1 %SebekHead.type% (%SebekV1.command%)")
+ else:
+ return self.sprintf("Sebek v1 (%SebekV1.command%)")
+
+class SebekV3(Packet):
+ name = "Sebek v3"
+ fields_desc = [ IntField("parent_pid", 0),
+ IntField("pid", 0),
+ IntField("uid", 0),
+ IntField("fd", 0),
+ IntField("inode", 0),
+ StrFixedLenField("command", "", 12),
+ FieldLenField("data_length", None, "data",fmt="I"),
+ StrLenField("data", "", length_from=lambda x:x.data_length) ]
+ def mysummary(self):
+ if isinstance(self.underlayer, SebekHead):
+ return self.underlayer.sprintf("Sebek v%SebekHead.version% %SebekHead.type% (%SebekV3.command%)")
+ else:
+ return self.sprintf("Sebek v3 (%SebekV3.command%)")
+
+class SebekV2(SebekV3):
+ def mysummary(self):
+ if isinstance(self.underlayer, SebekHead):
+ return self.underlayer.sprintf("Sebek v%SebekHead.version% %SebekHead.type% (%SebekV2.command%)")
+ else:
+ return self.sprintf("Sebek v2 (%SebekV2.command%)")
+
+class SebekV3Sock(Packet):
+ name = "Sebek v2 socket"
+ fields_desc = [ IntField("parent_pid", 0),
+ IntField("pid", 0),
+ IntField("uid", 0),
+ IntField("fd", 0),
+ IntField("inode", 0),
+ StrFixedLenField("command", "", 12),
+ IntField("data_length", 15),
+ IPField("dip", "127.0.0.1"),
+ ShortField("dport", 0),
+ IPField("sip", "127.0.0.1"),
+ ShortField("sport", 0),
+ ShortEnumField("call", 0, { "bind":2,
+ "connect":3, "listen":4,
+ "accept":5, "sendmsg":16,
+ "recvmsg":17, "sendto":11,
+ "recvfrom":12}),
+ ByteEnumField("proto", 0, IP_PROTOS) ]
+ def mysummary(self):
+ if isinstance(self.underlayer, SebekHead):
+ return self.underlayer.sprintf("Sebek v%SebekHead.version% %SebekHead.type% (%SebekV3Sock.command%)")
+ else:
+ return self.sprintf("Sebek v3 socket (%SebekV3Sock.command%)")
+
+class SebekV2Sock(SebekV3Sock):
+ def mysummary(self):
+ if isinstance(self.underlayer, SebekHead):
+ return self.underlayer.sprintf("Sebek v%SebekHead.version% %SebekHead.type% (%SebekV2Sock.command%)")
+ else:
+ return self.sprintf("Sebek v2 socket (%SebekV2Sock.command%)")
+
+bind_layers( UDP, SebekHead, sport=1101)
+bind_layers( UDP, SebekHead, dport=1101)
+bind_layers( UDP, SebekHead, dport=1101, sport=1101)
+bind_layers( SebekHead, SebekV1, version=1)
+bind_layers( SebekHead, SebekV2Sock, version=2, type=2)
+bind_layers( SebekHead, SebekV2, version=2)
+bind_layers( SebekHead, SebekV3Sock, version=3, type=2)
+bind_layers( SebekHead, SebekV3, version=3)
diff --git a/scripts/external_libs/scapy-2.3.1/python3/scapy/layers/skinny.py b/scripts/external_libs/scapy-2.3.1/python3/scapy/layers/skinny.py
new file mode 100644
index 00000000..9fb6ac06
--- /dev/null
+++ b/scripts/external_libs/scapy-2.3.1/python3/scapy/layers/skinny.py
@@ -0,0 +1,161 @@
+## This file is part of Scapy
+## See http://www.secdev.org/projects/scapy for more informations
+## Copyright (C) Philippe Biondi <phil@secdev.org>
+## This program is published under a GPLv2 license
+
+"""
+Cisco Skinny protocol.
+"""
+
+from scapy.packet import *
+from scapy.fields import *
+from scapy.layers.inet import TCP
+
+# shamelessly ripped from Ethereal dissector
+skinny_messages = {
+# Station -> Callmanager
+ 0x0000: "KeepAliveMessage",
+ 0x0001: "RegisterMessage",
+ 0x0002: "IpPortMessage",
+ 0x0003: "KeypadButtonMessage",
+ 0x0004: "EnblocCallMessage",
+ 0x0005: "StimulusMessage",
+ 0x0006: "OffHookMessage",
+ 0x0007: "OnHookMessage",
+ 0x0008: "HookFlashMessage",
+ 0x0009: "ForwardStatReqMessage",
+ 0x000A: "SpeedDialStatReqMessage",
+ 0x000B: "LineStatReqMessage",
+ 0x000C: "ConfigStatReqMessage",
+ 0x000D: "TimeDateReqMessage",
+ 0x000E: "ButtonTemplateReqMessage",
+ 0x000F: "VersionReqMessage",
+ 0x0010: "CapabilitiesResMessage",
+ 0x0011: "MediaPortListMessage",
+ 0x0012: "ServerReqMessage",
+ 0x0020: "AlarmMessage",
+ 0x0021: "MulticastMediaReceptionAck",
+ 0x0022: "OpenReceiveChannelAck",
+ 0x0023: "ConnectionStatisticsRes",
+ 0x0024: "OffHookWithCgpnMessage",
+ 0x0025: "SoftKeySetReqMessage",
+ 0x0026: "SoftKeyEventMessage",
+ 0x0027: "UnregisterMessage",
+ 0x0028: "SoftKeyTemplateReqMessage",
+ 0x0029: "RegisterTokenReq",
+ 0x002A: "MediaTransmissionFailure",
+ 0x002B: "HeadsetStatusMessage",
+ 0x002C: "MediaResourceNotification",
+ 0x002D: "RegisterAvailableLinesMessage",
+ 0x002E: "DeviceToUserDataMessage",
+ 0x002F: "DeviceToUserDataResponseMessage",
+ 0x0030: "UpdateCapabilitiesMessage",
+ 0x0031: "OpenMultiMediaReceiveChannelAckMessage",
+ 0x0032: "ClearConferenceMessage",
+ 0x0033: "ServiceURLStatReqMessage",
+ 0x0034: "FeatureStatReqMessage",
+ 0x0035: "CreateConferenceResMessage",
+ 0x0036: "DeleteConferenceResMessage",
+ 0x0037: "ModifyConferenceResMessage",
+ 0x0038: "AddParticipantResMessage",
+ 0x0039: "AuditConferenceResMessage",
+ 0x0040: "AuditParticipantResMessage",
+ 0x0041: "DeviceToUserDataVersion1Message",
+# Callmanager -> Station */
+ 0x0081: "RegisterAckMessage",
+ 0x0082: "StartToneMessage",
+ 0x0083: "StopToneMessage",
+ 0x0085: "SetRingerMessage",
+ 0x0086: "SetLampMessage",
+ 0x0087: "SetHkFDetectMessage",
+ 0x0088: "SetSpeakerModeMessage",
+ 0x0089: "SetMicroModeMessage",
+ 0x008A: "StartMediaTransmission",
+ 0x008B: "StopMediaTransmission",
+ 0x008C: "StartMediaReception",
+ 0x008D: "StopMediaReception",
+ 0x008F: "CallInfoMessage",
+ 0x0090: "ForwardStatMessage",
+ 0x0091: "SpeedDialStatMessage",
+ 0x0092: "LineStatMessage",
+ 0x0093: "ConfigStatMessage",
+ 0x0094: "DefineTimeDate",
+ 0x0095: "StartSessionTransmission",
+ 0x0096: "StopSessionTransmission",
+ 0x0097: "ButtonTemplateMessage",
+ 0x0098: "VersionMessage",
+ 0x0099: "DisplayTextMessage",
+ 0x009A: "ClearDisplay",
+ 0x009B: "CapabilitiesReqMessage",
+ 0x009C: "EnunciatorCommandMessage",
+ 0x009D: "RegisterRejectMessage",
+ 0x009E: "ServerResMessage",
+ 0x009F: "Reset",
+ 0x0100: "KeepAliveAckMessage",
+ 0x0101: "StartMulticastMediaReception",
+ 0x0102: "StartMulticastMediaTransmission",
+ 0x0103: "StopMulticastMediaReception",
+ 0x0104: "StopMulticastMediaTransmission",
+ 0x0105: "OpenReceiveChannel",
+ 0x0106: "CloseReceiveChannel",
+ 0x0107: "ConnectionStatisticsReq",
+ 0x0108: "SoftKeyTemplateResMessage",
+ 0x0109: "SoftKeySetResMessage",
+ 0x0110: "SelectSoftKeysMessage",
+ 0x0111: "CallStateMessage",
+ 0x0112: "DisplayPromptStatusMessage",
+ 0x0113: "ClearPromptStatusMessage",
+ 0x0114: "DisplayNotifyMessage",
+ 0x0115: "ClearNotifyMessage",
+ 0x0116: "ActivateCallPlaneMessage",
+ 0x0117: "DeactivateCallPlaneMessage",
+ 0x0118: "UnregisterAckMessage",
+ 0x0119: "BackSpaceReqMessage",
+ 0x011A: "RegisterTokenAck",
+ 0x011B: "RegisterTokenReject",
+ 0x0042: "DeviceToUserDataResponseVersion1Message",
+ 0x011C: "StartMediaFailureDetection",
+ 0x011D: "DialedNumberMessage",
+ 0x011E: "UserToDeviceDataMessage",
+ 0x011F: "FeatureStatMessage",
+ 0x0120: "DisplayPriNotifyMessage",
+ 0x0121: "ClearPriNotifyMessage",
+ 0x0122: "StartAnnouncementMessage",
+ 0x0123: "StopAnnouncementMessage",
+ 0x0124: "AnnouncementFinishMessage",
+ 0x0127: "NotifyDtmfToneMessage",
+ 0x0128: "SendDtmfToneMessage",
+ 0x0129: "SubscribeDtmfPayloadReqMessage",
+ 0x012A: "SubscribeDtmfPayloadResMessage",
+ 0x012B: "SubscribeDtmfPayloadErrMessage",
+ 0x012C: "UnSubscribeDtmfPayloadReqMessage",
+ 0x012D: "UnSubscribeDtmfPayloadResMessage",
+ 0x012E: "UnSubscribeDtmfPayloadErrMessage",
+ 0x012F: "ServiceURLStatMessage",
+ 0x0130: "CallSelectStatMessage",
+ 0x0131: "OpenMultiMediaChannelMessage",
+ 0x0132: "StartMultiMediaTransmission",
+ 0x0133: "StopMultiMediaTransmission",
+ 0x0134: "MiscellaneousCommandMessage",
+ 0x0135: "FlowControlCommandMessage",
+ 0x0136: "CloseMultiMediaReceiveChannel",
+ 0x0137: "CreateConferenceReqMessage",
+ 0x0138: "DeleteConferenceReqMessage",
+ 0x0139: "ModifyConferenceReqMessage",
+ 0x013A: "AddParticipantReqMessage",
+ 0x013B: "DropParticipantReqMessage",
+ 0x013C: "AuditConferenceReqMessage",
+ 0x013D: "AuditParticipantReqMessage",
+ 0x013F: "UserToDeviceDataVersion1Message",
+ }
+
+
+
+class Skinny(Packet):
+ name="Skinny"
+ fields_desc = [ LEIntField("len",0),
+ LEIntField("res",0),
+ LEIntEnumField("msg",0,skinny_messages) ]
+
+bind_layers( TCP, Skinny, dport=2000)
+bind_layers( TCP, Skinny, sport=2000)
diff --git a/scripts/external_libs/scapy-2.3.1/python3/scapy/layers/smb.py b/scripts/external_libs/scapy-2.3.1/python3/scapy/layers/smb.py
new file mode 100644
index 00000000..f8e0da7a
--- /dev/null
+++ b/scripts/external_libs/scapy-2.3.1/python3/scapy/layers/smb.py
@@ -0,0 +1,354 @@
+## This file is part of Scapy
+## See http://www.secdev.org/projects/scapy for more informations
+## Copyright (C) Philippe Biondi <phil@secdev.org>
+## This program is published under a GPLv2 license
+
+"""
+SMB (Server Message Block), also known as CIFS.
+"""
+
+from scapy.packet import *
+from scapy.fields import *
+from scapy.layers.netbios import NBTSession
+
+
+# SMB NetLogon Response Header
+class SMBNetlogon_Protocol_Response_Header(Packet):
+ name="SMBNetlogon Protocol Response Header"
+ fields_desc = [StrFixedLenField("Start","\xffSMB",4),
+ ByteEnumField("Command",0x25,{0x25:"Trans"}),
+ ByteField("Error_Class",0x02),
+ ByteField("Reserved",0),
+ LEShortField("Error_code",4),
+ ByteField("Flags",0),
+ LEShortField("Flags2",0x0000),
+ LEShortField("PIDHigh",0x0000),
+ LELongField("Signature",0x0),
+ LEShortField("Unused",0x0),
+ LEShortField("TID",0),
+ LEShortField("PID",0),
+ LEShortField("UID",0),
+ LEShortField("MID",0),
+ ByteField("WordCount",17),
+ LEShortField("TotalParamCount",0),
+ LEShortField("TotalDataCount",112),
+ LEShortField("MaxParamCount",0),
+ LEShortField("MaxDataCount",0),
+ ByteField("MaxSetupCount",0),
+ ByteField("unused2",0),
+ LEShortField("Flags3",0),
+ ByteField("TimeOut1",0xe8),
+ ByteField("TimeOut2",0x03),
+ LEShortField("unused3",0),
+ LEShortField("unused4",0),
+ LEShortField("ParamCount2",0),
+ LEShortField("ParamOffset",0),
+ LEShortField("DataCount",112),
+ LEShortField("DataOffset",92),
+ ByteField("SetupCount", 3),
+ ByteField("unused5", 0)]
+
+# SMB MailSlot Protocol
+class SMBMailSlot(Packet):
+ name = "SMB Mail Slot Protocol"
+ fields_desc = [LEShortField("opcode", 1),
+ LEShortField("priority", 1),
+ LEShortField("class", 2),
+ LEShortField("size", 135),
+ StrNullField("name","\\MAILSLOT\\NET\\GETDC660")]
+
+# SMB NetLogon Protocol Response Tail SAM
+class SMBNetlogon_Protocol_Response_Tail_SAM(Packet):
+ name = "SMB Netlogon Protocol Response Tail SAM"
+ fields_desc = [ByteEnumField("Command", 0x17, {0x12:"SAM logon request", 0x17:"SAM Active directory Response"}),
+ ByteField("unused", 0),
+ ShortField("Data1", 0),
+ ShortField("Data2", 0xfd01),
+ ShortField("Data3", 0),
+ ShortField("Data4", 0xacde),
+ ShortField("Data5", 0x0fe5),
+ ShortField("Data6", 0xd10a),
+ ShortField("Data7", 0x374c),
+ ShortField("Data8", 0x83e2),
+ ShortField("Data9", 0x7dd9),
+ ShortField("Data10", 0x3a16),
+ ShortField("Data11", 0x73ff),
+ ByteField("Data12", 0x04),
+ StrFixedLenField("Data13", "rmff", 4),
+ ByteField("Data14", 0x0),
+ ShortField("Data16", 0xc018),
+ ByteField("Data18", 0x0a),
+ StrFixedLenField("Data20", "rmff-win2k", 10),
+ ByteField("Data21", 0xc0),
+ ShortField("Data22", 0x18c0),
+ ShortField("Data23", 0x180a),
+ StrFixedLenField("Data24", "RMFF-WIN2K", 10),
+ ShortField("Data25", 0),
+ ByteField("Data26", 0x17),
+ StrFixedLenField("Data27", "Default-First-Site-Name", 23),
+ ShortField("Data28", 0x00c0),
+ ShortField("Data29", 0x3c10),
+ ShortField("Data30", 0x00c0),
+ ShortField("Data31", 0x0200),
+ ShortField("Data32", 0x0),
+ ShortField("Data33", 0xac14),
+ ShortField("Data34", 0x0064),
+ ShortField("Data35", 0x0),
+ ShortField("Data36", 0x0),
+ ShortField("Data37", 0x0),
+ ShortField("Data38", 0x0),
+ ShortField("Data39", 0x0d00),
+ ShortField("Data40", 0x0),
+ ShortField("Data41", 0xffff)]
+
+# SMB NetLogon Protocol Response Tail LM2.0
+class SMBNetlogon_Protocol_Response_Tail_LM20(Packet):
+ name = "SMB Netlogon Protocol Response Tail LM20"
+ fields_desc = [ByteEnumField("Command",0x06,{0x06:"LM 2.0 Response to logon request"}),
+ ByteField("unused", 0),
+ StrFixedLenField("DblSlash", "\\\\", 2),
+ StrNullField("ServerName","WIN"),
+ LEShortField("LM20Token", 0xffff)]
+
+# SMBNegociate Protocol Request Header
+class SMBNegociate_Protocol_Request_Header(Packet):
+ name="SMBNegociate Protocol Request Header"
+ fields_desc = [StrFixedLenField("Start","\xffSMB",4),
+ ByteEnumField("Command",0x72,{0x72:"SMB_COM_NEGOTIATE"}),
+ ByteField("Error_Class",0),
+ ByteField("Reserved",0),
+ LEShortField("Error_code",0),
+ ByteField("Flags",0x18),
+ LEShortField("Flags2",0x0000),
+ LEShortField("PIDHigh",0x0000),
+ LELongField("Signature",0x0),
+ LEShortField("Unused",0x0),
+ LEShortField("TID",0),
+ LEShortField("PID",1),
+ LEShortField("UID",0),
+ LEShortField("MID",2),
+ ByteField("WordCount",0),
+ LEShortField("ByteCount",12)]
+
+# SMB Negociate Protocol Request Tail
+class SMBNegociate_Protocol_Request_Tail(Packet):
+ name="SMB Negociate Protocol Request Tail"
+ fields_desc=[ByteField("BufferFormat",0x02),
+ StrNullField("BufferData","NT LM 0.12")]
+
+# SMBNegociate Protocol Response Advanced Security
+class SMBNegociate_Protocol_Response_Advanced_Security(Packet):
+ name="SMBNegociate Protocol Response Advanced Security"
+ fields_desc = [StrFixedLenField("Start","\xffSMB",4),
+ ByteEnumField("Command",0x72,{0x72:"SMB_COM_NEGOTIATE"}),
+ ByteField("Error_Class",0),
+ ByteField("Reserved",0),
+ LEShortField("Error_Code",0),
+ ByteField("Flags",0x98),
+ LEShortField("Flags2",0x0000),
+ LEShortField("PIDHigh",0x0000),
+ LELongField("Signature",0x0),
+ LEShortField("Unused",0x0),
+ LEShortField("TID",0),
+ LEShortField("PID",1),
+ LEShortField("UID",0),
+ LEShortField("MID",2),
+ ByteField("WordCount",17),
+ LEShortField("DialectIndex",7),
+ ByteField("SecurityMode",0x03),
+ LEShortField("MaxMpxCount",50),
+ LEShortField("MaxNumberVC",1),
+ LEIntField("MaxBufferSize",16144),
+ LEIntField("MaxRawSize",65536),
+ LEIntField("SessionKey",0x0000),
+ LEShortField("ServerCapabilities",0xf3f9),
+ BitField("UnixExtensions",0,1),
+ BitField("Reserved2",0,7),
+ BitField("ExtendedSecurity",1,1),
+ BitField("CompBulk",0,2),
+ BitField("Reserved3",0,5),
+# There have been 127490112000000000 tenths of micro-seconds between 1st january 1601 and 1st january 2005. 127490112000000000=0x1C4EF94D6228000, so ServerTimeHigh=0xD6228000 and ServerTimeLow=0x1C4EF94.
+ LEIntField("ServerTimeHigh",0xD6228000),
+ LEIntField("ServerTimeLow",0x1C4EF94),
+ LEShortField("ServerTimeZone",0x3c),
+ ByteField("EncryptionKeyLength",0),
+ LEFieldLenField("ByteCount", None, "SecurityBlob", adjust=lambda pkt,x:x-16),
+ BitField("GUID",0,128),
+ StrLenField("SecurityBlob", "", length_from=lambda x:x.ByteCount+16)]
+
+# SMBNegociate Protocol Response No Security
+# When using no security, with EncryptionKeyLength=8, you must have an EncryptionKey before the DomainName
+class SMBNegociate_Protocol_Response_No_Security(Packet):
+ name="SMBNegociate Protocol Response No Security"
+ fields_desc = [StrFixedLenField("Start","\xffSMB",4),
+ ByteEnumField("Command",0x72,{0x72:"SMB_COM_NEGOTIATE"}),
+ ByteField("Error_Class",0),
+ ByteField("Reserved",0),
+ LEShortField("Error_Code",0),
+ ByteField("Flags",0x98),
+ LEShortField("Flags2",0x0000),
+ LEShortField("PIDHigh",0x0000),
+ LELongField("Signature",0x0),
+ LEShortField("Unused",0x0),
+ LEShortField("TID",0),
+ LEShortField("PID",1),
+ LEShortField("UID",0),
+ LEShortField("MID",2),
+ ByteField("WordCount",17),
+ LEShortField("DialectIndex",7),
+ ByteField("SecurityMode",0x03),
+ LEShortField("MaxMpxCount",50),
+ LEShortField("MaxNumberVC",1),
+ LEIntField("MaxBufferSize",16144),
+ LEIntField("MaxRawSize",65536),
+ LEIntField("SessionKey",0x0000),
+ LEShortField("ServerCapabilities",0xf3f9),
+ BitField("UnixExtensions",0,1),
+ BitField("Reserved2",0,7),
+ BitField("ExtendedSecurity",0,1),
+ FlagsField("CompBulk",0,2,"CB"),
+ BitField("Reserved3",0,5),
+ # There have been 127490112000000000 tenths of micro-seconds between 1st january 1601 and 1st january 2005. 127490112000000000=0x1C4EF94D6228000, so ServerTimeHigh=0xD6228000 and ServerTimeLow=0x1C4EF94.
+ LEIntField("ServerTimeHigh",0xD6228000),
+ LEIntField("ServerTimeLow",0x1C4EF94),
+ LEShortField("ServerTimeZone",0x3c),
+ ByteField("EncryptionKeyLength",8),
+ LEShortField("ByteCount",24),
+ BitField("EncryptionKey",0,64),
+ StrNullField("DomainName","WORKGROUP"),
+ StrNullField("ServerName","RMFF1")]
+
+# SMBNegociate Protocol Response No Security No Key
+class SMBNegociate_Protocol_Response_No_Security_No_Key(Packet):
+ namez="SMBNegociate Protocol Response No Security No Key"
+ fields_desc = [StrFixedLenField("Start","\xffSMB",4),
+ ByteEnumField("Command",0x72,{0x72:"SMB_COM_NEGOTIATE"}),
+ ByteField("Error_Class",0),
+ ByteField("Reserved",0),
+ LEShortField("Error_Code",0),
+ ByteField("Flags",0x98),
+ LEShortField("Flags2",0x0000),
+ LEShortField("PIDHigh",0x0000),
+ LELongField("Signature",0x0),
+ LEShortField("Unused",0x0),
+ LEShortField("TID",0),
+ LEShortField("PID",1),
+ LEShortField("UID",0),
+ LEShortField("MID",2),
+ ByteField("WordCount",17),
+ LEShortField("DialectIndex",7),
+ ByteField("SecurityMode",0x03),
+ LEShortField("MaxMpxCount",50),
+ LEShortField("MaxNumberVC",1),
+ LEIntField("MaxBufferSize",16144),
+ LEIntField("MaxRawSize",65536),
+ LEIntField("SessionKey",0x0000),
+ LEShortField("ServerCapabilities",0xf3f9),
+ BitField("UnixExtensions",0,1),
+ BitField("Reserved2",0,7),
+ BitField("ExtendedSecurity",0,1),
+ FlagsField("CompBulk",0,2,"CB"),
+ BitField("Reserved3",0,5),
+ # There have been 127490112000000000 tenths of micro-seconds between 1st january 1601 and 1st january 2005. 127490112000000000=0x1C4EF94D6228000, so ServerTimeHigh=0xD6228000 and ServerTimeLow=0x1C4EF94.
+ LEIntField("ServerTimeHigh",0xD6228000),
+ LEIntField("ServerTimeLow",0x1C4EF94),
+ LEShortField("ServerTimeZone",0x3c),
+ ByteField("EncryptionKeyLength",0),
+ LEShortField("ByteCount",16),
+ StrNullField("DomainName","WORKGROUP"),
+ StrNullField("ServerName","RMFF1")]
+
+# Session Setup AndX Request
+class SMBSession_Setup_AndX_Request(Packet):
+ name="Session Setup AndX Request"
+ fields_desc=[StrFixedLenField("Start","\xffSMB",4),
+ ByteEnumField("Command",0x73,{0x73:"SMB_COM_SESSION_SETUP_ANDX"}),
+ ByteField("Error_Class",0),
+ ByteField("Reserved",0),
+ LEShortField("Error_Code",0),
+ ByteField("Flags",0x18),
+ LEShortField("Flags2",0x0001),
+ LEShortField("PIDHigh",0x0000),
+ LELongField("Signature",0x0),
+ LEShortField("Unused",0x0),
+ LEShortField("TID",0),
+ LEShortField("PID",1),
+ LEShortField("UID",0),
+ LEShortField("MID",2),
+ ByteField("WordCount",13),
+ ByteEnumField("AndXCommand",0x75,{0x75:"SMB_COM_TREE_CONNECT_ANDX"}),
+ ByteField("Reserved2",0),
+ LEShortField("AndXOffset",96),
+ LEShortField("MaxBufferS",2920),
+ LEShortField("MaxMPXCount",50),
+ LEShortField("VCNumber",0),
+ LEIntField("SessionKey",0),
+ LEFieldLenField("ANSIPasswordLength",None,"ANSIPassword"),
+ LEShortField("UnicodePasswordLength",0),
+ LEIntField("Reserved3",0),
+ LEShortField("ServerCapabilities",0x05),
+ BitField("UnixExtensions",0,1),
+ BitField("Reserved4",0,7),
+ BitField("ExtendedSecurity",0,1),
+ BitField("CompBulk",0,2),
+ BitField("Reserved5",0,5),
+ LEShortField("ByteCount",35),
+ StrLenField("ANSIPassword", "Pass",length_from=lambda x:x.ANSIPasswordLength),
+ StrNullField("Account","GUEST"),
+ StrNullField("PrimaryDomain", ""),
+ StrNullField("NativeOS","Windows 4.0"),
+ StrNullField("NativeLanManager","Windows 4.0"),
+ ByteField("WordCount2",4),
+ ByteEnumField("AndXCommand2",0xFF,{0xFF:"SMB_COM_NONE"}),
+ ByteField("Reserved6",0),
+ LEShortField("AndXOffset2",0),
+ LEShortField("Flags3",0x2),
+ LEShortField("PasswordLength",0x1),
+ LEShortField("ByteCount2",18),
+ ByteField("Password",0),
+ StrNullField("Path","\\\\WIN2K\\IPC$"),
+ StrNullField("Service","IPC")]
+
+# Session Setup AndX Response
+class SMBSession_Setup_AndX_Response(Packet):
+ name="Session Setup AndX Response"
+ fields_desc=[StrFixedLenField("Start","\xffSMB",4),
+ ByteEnumField("Command",0x73,{0x73:"SMB_COM_SESSION_SETUP_ANDX"}),
+ ByteField("Error_Class",0),
+ ByteField("Reserved",0),
+ LEShortField("Error_Code",0),
+ ByteField("Flags",0x90),
+ LEShortField("Flags2",0x1001),
+ LEShortField("PIDHigh",0x0000),
+ LELongField("Signature",0x0),
+ LEShortField("Unused",0x0),
+ LEShortField("TID",0),
+ LEShortField("PID",1),
+ LEShortField("UID",0),
+ LEShortField("MID",2),
+ ByteField("WordCount",3),
+ ByteEnumField("AndXCommand",0x75,{0x75:"SMB_COM_TREE_CONNECT_ANDX"}),
+ ByteField("Reserved2",0),
+ LEShortField("AndXOffset",66),
+ LEShortField("Action",0),
+ LEShortField("ByteCount",25),
+ StrNullField("NativeOS","Windows 4.0"),
+ StrNullField("NativeLanManager","Windows 4.0"),
+ StrNullField("PrimaryDomain",""),
+ ByteField("WordCount2",3),
+ ByteEnumField("AndXCommand2",0xFF,{0xFF:"SMB_COM_NONE"}),
+ ByteField("Reserved3",0),
+ LEShortField("AndXOffset2",80),
+ LEShortField("OptionalSupport",0x01),
+ LEShortField("ByteCount2",5),
+ StrNullField("Service","IPC"),
+ StrNullField("NativeFileSystem","")]
+
+bind_layers( NBTSession, SMBNegociate_Protocol_Request_Header, )
+bind_layers( NBTSession, SMBNegociate_Protocol_Response_Advanced_Security, ExtendedSecurity=1)
+bind_layers( NBTSession, SMBNegociate_Protocol_Response_No_Security, ExtendedSecurity=0, EncryptionKeyLength=8)
+bind_layers( NBTSession, SMBNegociate_Protocol_Response_No_Security_No_Key, ExtendedSecurity=0, EncryptionKeyLength=0)
+bind_layers( NBTSession, SMBSession_Setup_AndX_Request, )
+bind_layers( NBTSession, SMBSession_Setup_AndX_Response, )
+bind_layers( SMBNegociate_Protocol_Request_Header, SMBNegociate_Protocol_Request_Tail, )
+bind_layers( SMBNegociate_Protocol_Request_Tail, SMBNegociate_Protocol_Request_Tail, )
diff --git a/scripts/external_libs/scapy-2.3.1/python3/scapy/layers/snmp.py b/scripts/external_libs/scapy-2.3.1/python3/scapy/layers/snmp.py
new file mode 100644
index 00000000..dddd4e27
--- /dev/null
+++ b/scripts/external_libs/scapy-2.3.1/python3/scapy/layers/snmp.py
@@ -0,0 +1,255 @@
+## This file is part of Scapy
+## See http://www.secdev.org/projects/scapy for more informations
+## Copyright (C) Philippe Biondi <phil@secdev.org>
+## This program is published under a GPLv2 license
+
+"""
+SNMP (Simple Network Management Protocol).
+"""
+
+from scapy.asn1packet import *
+from scapy.asn1fields import *
+from scapy.layers.inet import UDP
+
+##########
+## SNMP ##
+##########
+
+######[ ASN1 class ]######
+
+class ASN1_Class_SNMP(ASN1_Class_UNIVERSAL):
+ name="SNMP"
+ PDU_GET = 0xa0
+ PDU_NEXT = 0xa1
+ PDU_RESPONSE = 0xa2
+ PDU_SET = 0xa3
+ PDU_TRAPv1 = 0xa4
+ PDU_BULK = 0xa5
+ PDU_INFORM = 0xa6
+ PDU_TRAPv2 = 0xa7
+
+
+class ASN1_SNMP_PDU_GET(ASN1_SEQUENCE):
+ tag = ASN1_Class_SNMP.PDU_GET
+
+class ASN1_SNMP_PDU_NEXT(ASN1_SEQUENCE):
+ tag = ASN1_Class_SNMP.PDU_NEXT
+
+class ASN1_SNMP_PDU_RESPONSE(ASN1_SEQUENCE):
+ tag = ASN1_Class_SNMP.PDU_RESPONSE
+
+class ASN1_SNMP_PDU_SET(ASN1_SEQUENCE):
+ tag = ASN1_Class_SNMP.PDU_SET
+
+class ASN1_SNMP_PDU_TRAPv1(ASN1_SEQUENCE):
+ tag = ASN1_Class_SNMP.PDU_TRAPv1
+
+class ASN1_SNMP_PDU_BULK(ASN1_SEQUENCE):
+ tag = ASN1_Class_SNMP.PDU_BULK
+
+class ASN1_SNMP_PDU_INFORM(ASN1_SEQUENCE):
+ tag = ASN1_Class_SNMP.PDU_INFORM
+
+class ASN1_SNMP_PDU_TRAPv2(ASN1_SEQUENCE):
+ tag = ASN1_Class_SNMP.PDU_TRAPv2
+
+
+######[ BER codecs ]#######
+
+class BERcodec_SNMP_PDU_GET(BERcodec_SEQUENCE):
+ tag = ASN1_Class_SNMP.PDU_GET
+
+class BERcodec_SNMP_PDU_NEXT(BERcodec_SEQUENCE):
+ tag = ASN1_Class_SNMP.PDU_NEXT
+
+class BERcodec_SNMP_PDU_RESPONSE(BERcodec_SEQUENCE):
+ tag = ASN1_Class_SNMP.PDU_RESPONSE
+
+class BERcodec_SNMP_PDU_SET(BERcodec_SEQUENCE):
+ tag = ASN1_Class_SNMP.PDU_SET
+
+class BERcodec_SNMP_PDU_TRAPv1(BERcodec_SEQUENCE):
+ tag = ASN1_Class_SNMP.PDU_TRAPv1
+
+class BERcodec_SNMP_PDU_BULK(BERcodec_SEQUENCE):
+ tag = ASN1_Class_SNMP.PDU_BULK
+
+class BERcodec_SNMP_PDU_INFORM(BERcodec_SEQUENCE):
+ tag = ASN1_Class_SNMP.PDU_INFORM
+
+class BERcodec_SNMP_PDU_TRAPv2(BERcodec_SEQUENCE):
+ tag = ASN1_Class_SNMP.PDU_TRAPv2
+
+
+
+######[ ASN1 fields ]######
+
+class ASN1F_SNMP_PDU_GET(ASN1F_SEQUENCE):
+ ASN1_tag = ASN1_Class_SNMP.PDU_GET
+
+class ASN1F_SNMP_PDU_NEXT(ASN1F_SEQUENCE):
+ ASN1_tag = ASN1_Class_SNMP.PDU_NEXT
+
+class ASN1F_SNMP_PDU_RESPONSE(ASN1F_SEQUENCE):
+ ASN1_tag = ASN1_Class_SNMP.PDU_RESPONSE
+
+class ASN1F_SNMP_PDU_SET(ASN1F_SEQUENCE):
+ ASN1_tag = ASN1_Class_SNMP.PDU_SET
+
+class ASN1F_SNMP_PDU_TRAPv1(ASN1F_SEQUENCE):
+ ASN1_tag = ASN1_Class_SNMP.PDU_TRAPv1
+
+class ASN1F_SNMP_PDU_BULK(ASN1F_SEQUENCE):
+ ASN1_tag = ASN1_Class_SNMP.PDU_BULK
+
+class ASN1F_SNMP_PDU_INFORM(ASN1F_SEQUENCE):
+ ASN1_tag = ASN1_Class_SNMP.PDU_INFORM
+
+class ASN1F_SNMP_PDU_TRAPv2(ASN1F_SEQUENCE):
+ ASN1_tag = ASN1_Class_SNMP.PDU_TRAPv2
+
+
+
+######[ SNMP Packet ]######
+
+SNMP_error = { 0: "no_error",
+ 1: "too_big",
+ 2: "no_such_name",
+ 3: "bad_value",
+ 4: "read_only",
+ 5: "generic_error",
+ 6: "no_access",
+ 7: "wrong_type",
+ 8: "wrong_length",
+ 9: "wrong_encoding",
+ 10: "wrong_value",
+ 11: "no_creation",
+ 12: "inconsistent_value",
+ 13: "ressource_unavailable",
+ 14: "commit_failed",
+ 15: "undo_failed",
+ 16: "authorization_error",
+ 17: "not_writable",
+ 18: "inconsistent_name",
+ }
+
+SNMP_trap_types = { 0: "cold_start",
+ 1: "warm_start",
+ 2: "link_down",
+ 3: "link_up",
+ 4: "auth_failure",
+ 5: "egp_neigh_loss",
+ 6: "enterprise_specific",
+ }
+
+class SNMPvarbind(ASN1_Packet):
+ ASN1_codec = ASN1_Codecs.BER
+ ASN1_root = ASN1F_SEQUENCE( ASN1F_OID("oid","1.3"),
+ ASN1F_field("value",ASN1_NULL(0))
+ )
+
+
+class SNMPget(ASN1_Packet):
+ ASN1_codec = ASN1_Codecs.BER
+ ASN1_root = ASN1F_SNMP_PDU_GET( ASN1F_INTEGER("id",0),
+ ASN1F_enum_INTEGER("error",0, SNMP_error),
+ ASN1F_INTEGER("error_index",0),
+ ASN1F_SEQUENCE_OF("varbindlist", [], SNMPvarbind)
+ )
+
+class SNMPnext(ASN1_Packet):
+ ASN1_codec = ASN1_Codecs.BER
+ ASN1_root = ASN1F_SNMP_PDU_NEXT( ASN1F_INTEGER("id",0),
+ ASN1F_enum_INTEGER("error",0, SNMP_error),
+ ASN1F_INTEGER("error_index",0),
+ ASN1F_SEQUENCE_OF("varbindlist", [], SNMPvarbind)
+ )
+
+class SNMPresponse(ASN1_Packet):
+ ASN1_codec = ASN1_Codecs.BER
+ ASN1_root = ASN1F_SNMP_PDU_RESPONSE( ASN1F_INTEGER("id",0),
+ ASN1F_enum_INTEGER("error",0, SNMP_error),
+ ASN1F_INTEGER("error_index",0),
+ ASN1F_SEQUENCE_OF("varbindlist", [], SNMPvarbind)
+ )
+
+class SNMPset(ASN1_Packet):
+ ASN1_codec = ASN1_Codecs.BER
+ ASN1_root = ASN1F_SNMP_PDU_SET( ASN1F_INTEGER("id",0),
+ ASN1F_enum_INTEGER("error",0, SNMP_error),
+ ASN1F_INTEGER("error_index",0),
+ ASN1F_SEQUENCE_OF("varbindlist", [], SNMPvarbind)
+ )
+
+class SNMPtrapv1(ASN1_Packet):
+ ASN1_codec = ASN1_Codecs.BER
+ ASN1_root = ASN1F_SNMP_PDU_TRAPv1( ASN1F_OID("enterprise", "1.3"),
+ ASN1F_IPADDRESS("agent_addr","0.0.0.0"),
+ ASN1F_enum_INTEGER("generic_trap", 0, SNMP_trap_types),
+ ASN1F_INTEGER("specific_trap", 0),
+ ASN1F_TIME_TICKS("time_stamp", IntAutoTime()),
+ ASN1F_SEQUENCE_OF("varbindlist", [], SNMPvarbind)
+ )
+
+class SNMPbulk(ASN1_Packet):
+ ASN1_codec = ASN1_Codecs.BER
+ ASN1_root = ASN1F_SNMP_PDU_BULK( ASN1F_INTEGER("id",0),
+ ASN1F_INTEGER("non_repeaters",0),
+ ASN1F_INTEGER("max_repetitions",0),
+ ASN1F_SEQUENCE_OF("varbindlist", [], SNMPvarbind)
+ )
+
+class SNMPinform(ASN1_Packet):
+ ASN1_codec = ASN1_Codecs.BER
+ ASN1_root = ASN1F_SNMP_PDU_INFORM( ASN1F_INTEGER("id",0),
+ ASN1F_enum_INTEGER("error",0, SNMP_error),
+ ASN1F_INTEGER("error_index",0),
+ ASN1F_SEQUENCE_OF("varbindlist", [], SNMPvarbind)
+ )
+
+class SNMPtrapv2(ASN1_Packet):
+ ASN1_codec = ASN1_Codecs.BER
+ ASN1_root = ASN1F_SNMP_PDU_TRAPv2( ASN1F_INTEGER("id",0),
+ ASN1F_enum_INTEGER("error",0, SNMP_error),
+ ASN1F_INTEGER("error_index",0),
+ ASN1F_SEQUENCE_OF("varbindlist", [], SNMPvarbind)
+ )
+
+
+class SNMP(ASN1_Packet):
+ ASN1_codec = ASN1_Codecs.BER
+ ASN1_root = ASN1F_SEQUENCE(
+ ASN1F_enum_INTEGER("version", 1, {0:"v1", 1:"v2c", 2:"v2", 3:"v3"}),
+ ASN1F_STRING("community",b"public"),
+ ASN1F_CHOICE("PDU", SNMPget(),
+ SNMPget, SNMPnext, SNMPresponse, SNMPset,
+ SNMPtrapv1, SNMPbulk, SNMPinform, SNMPtrapv2)
+ )
+ def answers(self, other):
+ return ( isinstance(self.PDU, SNMPresponse) and
+ ( isinstance(other.PDU, SNMPget) or
+ isinstance(other.PDU, SNMPnext) or
+ isinstance(other.PDU, SNMPset) ) and
+ self.PDU.id == other.PDU.id )
+
+bind_layers( UDP, SNMP, sport=161)
+bind_layers( UDP, SNMP, dport=161)
+bind_layers( UDP, SNMP, sport=162)
+bind_layers( UDP, SNMP, dport=162)
+
+def snmpwalk(dst, oid="1", community=b"public"):
+ try:
+ while 1:
+ r = sr1(IP(dst=dst)/UDP(sport=RandShort())/SNMP(community=community, PDU=SNMPnext(varbindlist=[SNMPvarbind(oid=oid)])),timeout=2, chainCC=1, verbose=0, retry=2)
+ if ICMP in r:
+ print(repr(r))
+ break
+ if r is None:
+ print("No answers")
+ break
+ print("%-40s: %r" % (r[SNMPvarbind].oid.val,r[SNMPvarbind].value))
+ oid = r[SNMPvarbind].oid
+
+ except KeyboardInterrupt:
+ pass
+
diff --git a/scripts/external_libs/scapy-2.3.1/python3/scapy/layers/tftp.py b/scripts/external_libs/scapy-2.3.1/python3/scapy/layers/tftp.py
new file mode 100644
index 00000000..1535e99c
--- /dev/null
+++ b/scripts/external_libs/scapy-2.3.1/python3/scapy/layers/tftp.py
@@ -0,0 +1,477 @@
+## This file is part of Scapy
+## See http://www.secdev.org/projects/scapy for more informations
+## Copyright (C) Philippe Biondi <phil@secdev.org>
+## This program is published under a GPLv2 license
+
+"""
+TFTP (Trivial File Transfer Protocol).
+"""
+
+import os,random
+from scapy.packet import *
+from scapy.fields import *
+from scapy.automaton import *
+from scapy.layers.inet import UDP
+
+
+
+TFTP_operations = { 1:"RRQ",2:"WRQ",3:"DATA",4:"ACK",5:"ERROR",6:"OACK" }
+
+
+class TFTP(Packet):
+ name = "TFTP opcode"
+ fields_desc = [ ShortEnumField("op", 1, TFTP_operations), ]
+
+
+
+class TFTP_RRQ(Packet):
+ name = "TFTP Read Request"
+ fields_desc = [ StrNullField("filename", ""),
+ StrNullField("mode", "octet") ]
+ def answers(self, other):
+ return 0
+ def mysummary(self):
+ return self.sprintf("RRQ %filename%"),[UDP]
+
+
+class TFTP_WRQ(Packet):
+ name = "TFTP Write Request"
+ fields_desc = [ StrNullField("filename", ""),
+ StrNullField("mode", "octet") ]
+ def answers(self, other):
+ return 0
+ def mysummary(self):
+ return self.sprintf("WRQ %filename%"),[UDP]
+
+class TFTP_DATA(Packet):
+ name = "TFTP Data"
+ fields_desc = [ ShortField("block", 0) ]
+ def answers(self, other):
+ return self.block == 1 and isinstance(other, TFTP_RRQ)
+ def mysummary(self):
+ return self.sprintf("DATA %block%"),[UDP]
+
+class TFTP_Option(Packet):
+ fields_desc = [ StrNullField("oname",""),
+ StrNullField("value","") ]
+ def extract_padding(self, pkt):
+ return "",pkt
+
+class TFTP_Options(Packet):
+ fields_desc = [ PacketListField("options", [], TFTP_Option, length_from=lambda x:None) ]
+
+
+class TFTP_ACK(Packet):
+ name = "TFTP Ack"
+ fields_desc = [ ShortField("block", 0) ]
+ def answers(self, other):
+ if isinstance(other, TFTP_DATA):
+ return self.block == other.block
+ elif isinstance(other, TFTP_RRQ) or isinstance(other, TFTP_WRQ) or isinstance(other, TFTP_OACK):
+ return self.block == 0
+ return 0
+ def mysummary(self):
+ return self.sprintf("ACK %block%"),[UDP]
+
+TFTP_Error_Codes = { 0: "Not defined",
+ 1: "File not found",
+ 2: "Access violation",
+ 3: "Disk full or allocation exceeded",
+ 4: "Illegal TFTP operation",
+ 5: "Unknown transfer ID",
+ 6: "File already exists",
+ 7: "No such user",
+ 8: "Terminate transfer due to option negotiation",
+ }
+
+class TFTP_ERROR(Packet):
+ name = "TFTP Error"
+ fields_desc = [ ShortEnumField("errorcode", 0, TFTP_Error_Codes),
+ StrNullField("errormsg", "")]
+ def answers(self, other):
+ return (isinstance(other, TFTP_DATA) or
+ isinstance(other, TFTP_RRQ) or
+ isinstance(other, TFTP_WRQ) or
+ isinstance(other, TFTP_ACK))
+ def mysummary(self):
+ return self.sprintf("ERROR %errorcode%: %errormsg%"),[UDP]
+
+
+class TFTP_OACK(Packet):
+ name = "TFTP Option Ack"
+ fields_desc = [ ]
+ def answers(self, other):
+ return isinstance(other, TFTP_WRQ) or isinstance(other, TFTP_RRQ)
+
+
+bind_layers(UDP, TFTP, dport=69)
+bind_layers(TFTP, TFTP_RRQ, op=1)
+bind_layers(TFTP, TFTP_WRQ, op=2)
+bind_layers(TFTP, TFTP_DATA, op=3)
+bind_layers(TFTP, TFTP_ACK, op=4)
+bind_layers(TFTP, TFTP_ERROR, op=5)
+bind_layers(TFTP, TFTP_OACK, op=6)
+bind_layers(TFTP_RRQ, TFTP_Options)
+bind_layers(TFTP_WRQ, TFTP_Options)
+bind_layers(TFTP_OACK, TFTP_Options)
+
+
+class TFTP_read(Automaton):
+ def parse_args(self, filename, server, sport = None, port=69, **kargs):
+ Automaton.parse_args(self, **kargs)
+ self.filename = filename
+ self.server = server
+ self.port = port
+ self.sport = sport
+
+
+ def master_filter(self, pkt):
+ return ( IP in pkt and pkt[IP].src == self.server and UDP in pkt
+ and pkt[UDP].dport == self.my_tid
+ and (self.server_tid is None or pkt[UDP].sport == self.server_tid) )
+
+ # BEGIN
+ @ATMT.state(initial=1)
+ def BEGIN(self):
+ self.blocksize=512
+ self.my_tid = self.sport or RandShort()._fix()
+ bind_bottom_up(UDP, TFTP, dport=self.my_tid)
+ self.server_tid = None
+ self.res = ""
+
+ self.l3 = IP(dst=self.server)/UDP(sport=self.my_tid, dport=self.port)/TFTP()
+ self.last_packet = self.l3/TFTP_RRQ(filename=self.filename, mode="octet")
+ self.send(self.last_packet)
+ self.awaiting=1
+
+ raise self.WAITING()
+
+ # WAITING
+ @ATMT.state()
+ def WAITING(self):
+ pass
+
+
+ @ATMT.receive_condition(WAITING)
+ def receive_data(self, pkt):
+ if TFTP_DATA in pkt and pkt[TFTP_DATA].block == self.awaiting:
+ if self.server_tid is None:
+ self.server_tid = pkt[UDP].sport
+ self.l3[UDP].dport = self.server_tid
+ raise self.RECEIVING(pkt)
+
+ @ATMT.receive_condition(WAITING, prio=1)
+ def receive_error(self, pkt):
+ if TFTP_ERROR in pkt:
+ raise self.ERROR(pkt)
+
+
+ @ATMT.timeout(WAITING, 3)
+ def timeout_waiting(self):
+ raise self.WAITING()
+ @ATMT.action(timeout_waiting)
+ def retransmit_last_packet(self):
+ self.send(self.last_packet)
+
+ @ATMT.action(receive_data)
+# @ATMT.action(receive_error)
+ def send_ack(self):
+ self.last_packet = self.l3 / TFTP_ACK(block = self.awaiting)
+ self.send(self.last_packet)
+
+
+ # RECEIVED
+ @ATMT.state()
+ def RECEIVING(self, pkt):
+ if conf.raw_layer in pkt:
+ recvd = pkt[conf.raw_layer].load
+ else:
+ recvd = ""
+ self.res += recvd
+ self.awaiting += 1
+ if len(recvd) == self.blocksize:
+ raise self.WAITING()
+ raise self.END()
+
+ # ERROR
+ @ATMT.state(error=1)
+ def ERROR(self,pkt):
+ split_bottom_up(UDP, TFTP, dport=self.my_tid)
+ return pkt[TFTP_ERROR].summary()
+
+ #END
+ @ATMT.state(final=1)
+ def END(self):
+ split_bottom_up(UDP, TFTP, dport=self.my_tid)
+ return self.res
+
+
+
+
+class TFTP_write(Automaton):
+ def parse_args(self, filename, data, server, sport=None, port=69,**kargs):
+ Automaton.parse_args(self, **kargs)
+ self.filename = filename
+ self.server = server
+ self.port = port
+ self.sport = sport
+ self.blocksize = 512
+ self.origdata = data
+
+ def master_filter(self, pkt):
+ return ( IP in pkt and pkt[IP].src == self.server and UDP in pkt
+ and pkt[UDP].dport == self.my_tid
+ and (self.server_tid is None or pkt[UDP].sport == self.server_tid) )
+
+
+ # BEGIN
+ @ATMT.state(initial=1)
+ def BEGIN(self):
+ self.data = [ self.origdata[i*self.blocksize:(i+1)*self.blocksize]
+ for i in range( len(self.origdata)/self.blocksize+1) ]
+ self.my_tid = self.sport or RandShort()._fix()
+ bind_bottom_up(UDP, TFTP, dport=self.my_tid)
+ self.server_tid = None
+
+ self.l3 = IP(dst=self.server)/UDP(sport=self.my_tid, dport=self.port)/TFTP()
+ self.last_packet = self.l3/TFTP_WRQ(filename=self.filename, mode="octet")
+ self.send(self.last_packet)
+ self.res = ""
+ self.awaiting=0
+
+ raise self.WAITING_ACK()
+
+ # WAITING_ACK
+ @ATMT.state()
+ def WAITING_ACK(self):
+ pass
+
+ @ATMT.receive_condition(WAITING_ACK)
+ def received_ack(self,pkt):
+ if TFTP_ACK in pkt and pkt[TFTP_ACK].block == self.awaiting:
+ if self.server_tid is None:
+ self.server_tid = pkt[UDP].sport
+ self.l3[UDP].dport = self.server_tid
+ raise self.SEND_DATA()
+
+ @ATMT.receive_condition(WAITING_ACK)
+ def received_error(self, pkt):
+ if TFTP_ERROR in pkt:
+ raise self.ERROR(pkt)
+
+ @ATMT.timeout(WAITING_ACK, 3)
+ def timeout_waiting(self):
+ raise self.WAITING_ACK()
+ @ATMT.action(timeout_waiting)
+ def retransmit_last_packet(self):
+ self.send(self.last_packet)
+
+ # SEND_DATA
+ @ATMT.state()
+ def SEND_DATA(self):
+ self.awaiting += 1
+ self.last_packet = self.l3/TFTP_DATA(block=self.awaiting)/self.data.pop(0)
+ self.send(self.last_packet)
+ if self.data:
+ raise self.WAITING_ACK()
+ raise self.END()
+
+
+ # ERROR
+ @ATMT.state(error=1)
+ def ERROR(self,pkt):
+ split_bottom_up(UDP, TFTP, dport=self.my_tid)
+ return pkt[TFTP_ERROR].summary()
+
+ # END
+ @ATMT.state(final=1)
+ def END(self):
+ split_bottom_up(UDP, TFTP, dport=self.my_tid)
+
+
+class TFTP_WRQ_server(Automaton):
+
+ def parse_args(self, ip=None, sport=None, *args, **kargs):
+ Automaton.parse_args(self, *args, **kargs)
+ self.ip = ip
+ self.sport = sport
+
+ def master_filter(self, pkt):
+ return TFTP in pkt and (not self.ip or pkt[IP].dst == self.ip)
+
+ @ATMT.state(initial=1)
+ def BEGIN(self):
+ self.blksize=512
+ self.blk=1
+ self.filedata=""
+ self.my_tid = self.sport or random.randint(10000,65500)
+ bind_bottom_up(UDP, TFTP, dport=self.my_tid)
+
+ @ATMT.receive_condition(BEGIN)
+ def receive_WRQ(self,pkt):
+ if TFTP_WRQ in pkt:
+ raise self.WAIT_DATA().action_parameters(pkt)
+
+ @ATMT.action(receive_WRQ)
+ def ack_WRQ(self, pkt):
+ ip = pkt[IP]
+ self.ip = ip.dst
+ self.dst = ip.src
+ self.filename = pkt[TFTP_WRQ].filename
+ options = pkt[TFTP_Options]
+ self.l3 = IP(src=ip.dst, dst=ip.src)/UDP(sport=self.my_tid, dport=pkt.sport)/TFTP()
+ if options is None:
+ self.last_packet = self.l3/TFTP_ACK(block=0)
+ self.send(self.last_packet)
+ else:
+ opt = [x for x in options.options if x.oname.upper() == "BLKSIZE"]
+ if opt:
+ self.blksize = int(opt[0].value)
+ self.debug(2,"Negotiated new blksize at %i" % self.blksize)
+ self.last_packet = self.l3/TFTP_OACK()/TFTP_Options(options=opt)
+ self.send(self.last_packet)
+
+ @ATMT.state()
+ def WAIT_DATA(self):
+ pass
+
+ @ATMT.timeout(WAIT_DATA, 1)
+ def resend_ack(self):
+ self.send(self.last_packet)
+ raise self.WAIT_DATA()
+
+ @ATMT.receive_condition(WAIT_DATA)
+ def receive_data(self, pkt):
+ if TFTP_DATA in pkt:
+ data = pkt[TFTP_DATA]
+ if data.block == self.blk:
+ raise self.DATA(data)
+
+ @ATMT.action(receive_data)
+ def ack_data(self):
+ self.last_packet = self.l3/TFTP_ACK(block = self.blk)
+ self.send(self.last_packet)
+
+ @ATMT.state()
+ def DATA(self, data):
+ self.filedata += data.load
+ if len(data.load) < self.blksize:
+ raise self.END()
+ self.blk += 1
+ raise self.WAIT_DATA()
+
+ @ATMT.state(final=1)
+ def END(self):
+ return self.filename,self.filedata
+ split_bottom_up(UDP, TFTP, dport=self.my_tid)
+
+
+class TFTP_RRQ_server(Automaton):
+ def parse_args(self, store=None, joker=None, dir=None, ip=None, sport=None, serve_one=False, **kargs):
+ Automaton.parse_args(self,**kargs)
+ if store is None:
+ store = {}
+ if dir is not None:
+ self.dir = os.path.join(os.path.abspath(dir),"")
+ else:
+ self.dir = None
+ self.store = store
+ self.joker = joker
+ self.ip = ip
+ self.sport = sport
+ self.serve_one = serve_one
+ self.my_tid = self.sport or random.randint(10000,65500)
+ bind_bottom_up(UDP, TFTP, dport=self.my_tid)
+
+ def master_filter(self, pkt):
+ return TFTP in pkt and (not self.ip or pkt[IP].dst == self.ip)
+
+ @ATMT.state(initial=1)
+ def WAIT_RRQ(self):
+ self.blksize=512
+ self.blk=0
+
+ @ATMT.receive_condition(WAIT_RRQ)
+ def receive_rrq(self, pkt):
+ if TFTP_RRQ in pkt:
+ raise self.RECEIVED_RRQ(pkt)
+
+
+ @ATMT.state()
+ def RECEIVED_RRQ(self, pkt):
+ ip = pkt[IP]
+ options = pkt[TFTP_Options]
+ self.l3 = IP(src=ip.dst, dst=ip.src)/UDP(sport=self.my_tid, dport=ip.sport)/TFTP()
+ self.filename = pkt[TFTP_RRQ].filename
+ self.blk=1
+ self.data = None
+ if self.filename in self.store:
+ self.data = self.store[self.filename]
+ elif self.dir is not None:
+ fn = os.path.abspath(os.path.join(self.dir, self.filename))
+ if fn.startswith(self.dir): # Check we're still in the server's directory
+ try:
+ self.data=open(fn).read()
+ except IOError:
+ pass
+ if self.data is None:
+ self.data = self.joker
+
+ if options:
+ opt = [x for x in options.options if x.oname.upper() == "BLKSIZE"]
+ if opt:
+ self.blksize = int(opt[0].value)
+ self.debug(2,"Negotiated new blksize at %i" % self.blksize)
+ self.last_packet = self.l3/TFTP_OACK()/TFTP_Options(options=opt)
+ self.send(self.last_packet)
+
+
+
+
+ @ATMT.condition(RECEIVED_RRQ)
+ def file_in_store(self):
+ if self.data is not None:
+ self.blknb = len(self.data)/self.blksize+1
+ raise self.SEND_FILE()
+
+ @ATMT.condition(RECEIVED_RRQ)
+ def file_not_found(self):
+ if self.data is None:
+ raise self.WAIT_RRQ()
+ @ATMT.action(file_not_found)
+ def send_error(self):
+ self.send(self.l3/TFTP_ERROR(errorcode=1, errormsg=TFTP_Error_Codes[1]))
+
+ @ATMT.state()
+ def SEND_FILE(self):
+ self.send(self.l3/TFTP_DATA(block=self.blk)/self.data[(self.blk-1)*self.blksize:self.blk*self.blksize])
+
+ @ATMT.timeout(SEND_FILE, 3)
+ def timeout_waiting_ack(self):
+ raise self.SEND_FILE()
+
+ @ATMT.receive_condition(SEND_FILE)
+ def received_ack(self, pkt):
+ if TFTP_ACK in pkt and pkt[TFTP_ACK].block == self.blk:
+ raise self.RECEIVED_ACK()
+ @ATMT.state()
+ def RECEIVED_ACK(self):
+ self.blk += 1
+
+ @ATMT.condition(RECEIVED_ACK)
+ def no_more_data(self):
+ if self.blk > self.blknb:
+ if self.serve_one:
+ raise self.END()
+ raise self.WAIT_RRQ()
+ @ATMT.condition(RECEIVED_ACK, prio=2)
+ def data_remaining(self):
+ raise self.SEND_FILE()
+
+ @ATMT.state(final=1)
+ def END(self):
+ split_bottom_up(UDP, TFTP, dport=self.my_tid)
+
+
+
+
diff --git a/scripts/external_libs/scapy-2.3.1/python3/scapy/layers/vrrp.py b/scripts/external_libs/scapy-2.3.1/python3/scapy/layers/vrrp.py
new file mode 100644
index 00000000..e2818381
--- /dev/null
+++ b/scripts/external_libs/scapy-2.3.1/python3/scapy/layers/vrrp.py
@@ -0,0 +1,39 @@
+## This file is part of Scapy
+## See http://www.secdev.org/projects/scapy for more informations
+## Copyright (C) Philippe Biondi <phil@secdev.org>
+## Copyright (C) 6WIND <olivier.matz@6wind.com>
+## This program is published under a GPLv2 license
+
+"""
+VRRP (Virtual Router Redundancy Protocol).
+"""
+
+from scapy.packet import *
+from scapy.fields import *
+from scapy.layers.inet import IP
+
+IPPROTO_VRRP=112
+
+# RFC 3768 - Virtual Router Redundancy Protocol (VRRP)
+class VRRP(Packet):
+ fields_desc = [
+ BitField("version" , 2, 4),
+ BitField("type" , 1, 4),
+ ByteField("vrid", 1),
+ ByteField("priority", 100),
+ FieldLenField("ipcount", None, count_of="addrlist", fmt="B"),
+ ByteField("authtype", 0),
+ ByteField("adv", 1),
+ XShortField("chksum", None),
+ FieldListField("addrlist", [], IPField("", "0.0.0.0"),
+ count_from = lambda pkt: pkt.ipcount),
+ IntField("auth1", 0),
+ IntField("auth2", 0) ]
+
+ def post_build(self, p, pay):
+ if self.chksum is None:
+ ck = checksum(p)
+ p = p[:6]+bytes([(ck>>8),(ck&0xff)])+p[8:]
+ return p
+
+bind_layers( IP, VRRP, proto=IPPROTO_VRRP)
diff --git a/scripts/external_libs/scapy-2.3.1/python3/scapy/layers/x509.py b/scripts/external_libs/scapy-2.3.1/python3/scapy/layers/x509.py
new file mode 100644
index 00000000..18aaa5e3
--- /dev/null
+++ b/scripts/external_libs/scapy-2.3.1/python3/scapy/layers/x509.py
@@ -0,0 +1,108 @@
+## This file is part of Scapy
+## See http://www.secdev.org/projects/scapy for more informations
+## Copyright (C) Philippe Biondi <phil@secdev.org>
+## This program is published under a GPLv2 license
+
+"""
+X.509 certificates.
+"""
+
+from scapy.asn1packet import *
+from scapy.asn1fields import *
+
+##########
+## X509 ##
+##########
+
+######[ ASN1 class ]######
+
+class ASN1_Class_X509(ASN1_Class_UNIVERSAL):
+ name="X509"
+ CONT0 = 0xa0
+ CONT1 = 0xa1
+ CONT2 = 0xa2
+ CONT3 = 0xa3
+
+class ASN1_X509_CONT0(ASN1_SEQUENCE):
+ tag = ASN1_Class_X509.CONT0
+
+class ASN1_X509_CONT1(ASN1_SEQUENCE):
+ tag = ASN1_Class_X509.CONT1
+
+class ASN1_X509_CONT2(ASN1_SEQUENCE):
+ tag = ASN1_Class_X509.CONT2
+
+class ASN1_X509_CONT3(ASN1_SEQUENCE):
+ tag = ASN1_Class_X509.CONT3
+
+######[ BER codecs ]#######
+
+class BERcodec_X509_CONT0(BERcodec_SEQUENCE):
+ tag = ASN1_Class_X509.CONT0
+
+class BERcodec_X509_CONT1(BERcodec_SEQUENCE):
+ tag = ASN1_Class_X509.CONT1
+
+class BERcodec_X509_CONT2(BERcodec_SEQUENCE):
+ tag = ASN1_Class_X509.CONT2
+
+class BERcodec_X509_CONT3(BERcodec_SEQUENCE):
+ tag = ASN1_Class_X509.CONT3
+
+######[ ASN1 fields ]######
+
+class ASN1F_X509_CONT0(ASN1F_SEQUENCE):
+ ASN1_tag = ASN1_Class_X509.CONT0
+
+class ASN1F_X509_CONT1(ASN1F_SEQUENCE):
+ ASN1_tag = ASN1_Class_X509.CONT1
+
+class ASN1F_X509_CONT2(ASN1F_SEQUENCE):
+ ASN1_tag = ASN1_Class_X509.CONT2
+
+class ASN1F_X509_CONT3(ASN1F_SEQUENCE):
+ ASN1_tag = ASN1_Class_X509.CONT3
+
+######[ X509 packets ]######
+
+class X509RDN(ASN1_Packet):
+ ASN1_codec = ASN1_Codecs.BER
+ ASN1_root = ASN1F_SET(
+ ASN1F_SEQUENCE( ASN1F_OID("oid","2.5.4.6"),
+ ASN1F_PRINTABLE_STRING("value","")
+ )
+ )
+
+class X509v3Ext(ASN1_Packet):
+ ASN1_codec = ASN1_Codecs.BER
+ ASN1_root = ASN1F_field("val",ASN1_NULL(0))
+
+
+class X509Cert(ASN1_Packet):
+ ASN1_codec = ASN1_Codecs.BER
+ ASN1_root = ASN1F_SEQUENCE(
+ ASN1F_SEQUENCE(
+ ASN1F_optionnal(ASN1F_X509_CONT0(ASN1F_INTEGER("version",3))),
+ ASN1F_INTEGER("sn",1),
+ ASN1F_SEQUENCE(ASN1F_OID("sign_algo","1.2.840.113549.1.1.5"),
+ ASN1F_field("sa_value",ASN1_NULL(0))),
+ ASN1F_SEQUENCE_OF("issuer",[],X509RDN),
+ ASN1F_SEQUENCE(ASN1F_UTC_TIME("not_before",ZuluTime(-600)), # ten minutes ago
+ ASN1F_UTC_TIME("not_after",ZuluTime(+86400))), # for 24h
+ ASN1F_SEQUENCE_OF("subject",[],X509RDN),
+ ASN1F_SEQUENCE(
+ ASN1F_SEQUENCE(ASN1F_OID("pubkey_algo","1.2.840.113549.1.1.1"),
+ ASN1F_field("pk_value",ASN1_NULL(0))),
+ ASN1F_BIT_STRING("pubkey","")
+ ),
+ ASN1F_optionnal(ASN1F_X509_CONT3(ASN1F_SEQUENCE_OF("x509v3ext",[],X509v3Ext))),
+
+ ),
+ ASN1F_SEQUENCE(ASN1F_OID("sign_algo2","1.2.840.113549.1.1.5"),
+ ASN1F_field("sa2_value",ASN1_NULL(0))),
+ ASN1F_BIT_STRING("signature","")
+ )
+
+
+
+
diff --git a/scripts/external_libs/scapy-2.3.1/python3/scapy/main.py b/scripts/external_libs/scapy-2.3.1/python3/scapy/main.py
new file mode 100644
index 00000000..47805443
--- /dev/null
+++ b/scripts/external_libs/scapy-2.3.1/python3/scapy/main.py
@@ -0,0 +1,380 @@
+## This file is part of Scapy
+## See http://www.secdev.org/projects/scapy for more informations
+## Copyright (C) Philippe Biondi <phil@secdev.org>
+## This program is published under a GPLv2 license
+
+"""
+Main module for interactive startup.
+"""
+
+import os,sys,socket
+import glob
+import builtins
+import types
+import gzip
+from .error import *
+from . import utils
+
+
+def _probe_config_file(cf):
+ cf_path = os.path.join(os.path.expanduser("~"), cf)
+ try:
+ os.stat(cf_path)
+ except OSError:
+ return None
+ else:
+ return cf_path
+
+def _read_config_file(cf):
+ log_loading.debug("Loading config file [%s]" % cf)
+ try:
+ exec(open(cf).read())
+ except IOError as e:
+ log_loading.warning("Cannot read config file [%s] [%s]" % (cf,e))
+ except Exception as e:
+ log_loading.exception("Error during evaluation of config file [%s]" % cf)
+
+
+DEFAULT_PRESTART_FILE = _probe_config_file(".scapy_prestart.py")
+DEFAULT_STARTUP_FILE = _probe_config_file(".scapy_startup.py")
+
+def _usage():
+ print("""Usage: scapy.py [-s sessionfile] [-c new_startup_file] [-p new_prestart_file] [-C] [-P]
+ -C: do not read startup file
+ -P: do not read pre-startup file""")
+ sys.exit(0)
+
+
+from .config import conf
+from .themes import DefaultTheme
+
+
+######################
+## Extension system ##
+######################
+
+
+def _load(module):
+ try:
+ mod = __import__(module,globals(),locals(),".")
+ builtins.__dict__.update(mod.__dict__)
+ except Exception as e:
+ log_interactive.error(e)
+
+def load_module(name):
+ _load("scapy.modules."+name)
+
+def load_layer(name):
+ _load("scapy.layers."+name)
+
+def load_contrib(name):
+ _load("scapy.contrib."+name)
+
+def list_contrib(name=None):
+ if name is None:
+ name="*.py"
+ elif "*" not in name and "?" not in name and not name.endswith(".py"):
+ name += ".py"
+ name = os.path.join(os.path.dirname(__file__), "contrib", name)
+ for f in glob.glob(name):
+ mod = os.path.basename(f)
+ if mod.startswith("__"):
+ continue
+ if mod.endswith(".py"):
+ mod = mod[:-3]
+ desc = { "description":"-", "status":"?", "name":mod }
+ for l in open(f):
+ p = l.find("scapy.contrib.")
+ if p >= 0:
+ p += 14
+ q = l.find("=", p)
+ key = l[p:q].strip()
+ value = l[q+1:].strip()
+ desc[key] = value
+ print("%(name)-20s: %(description)-40s status=%(status)s" % desc)
+
+
+
+
+
+
+##############################
+## Session saving/restoring ##
+##############################
+
+
+def save_session(fname=None, session=None, pickleProto=4):
+ import dill as pickle
+
+ if fname is None:
+ fname = conf.session
+ if not fname:
+ conf.session = fname = utils.get_temp_file(keep=True)
+ log_interactive.info("Use [%s] as session file" % fname)
+ if session is None:
+ session = builtins.__dict__["scapy_session"]
+
+ to_be_saved = session.copy()
+
+ for k in list(to_be_saved.keys()):
+ if k in ["__builtins__", "In", "Out", "conf"] or k.startswith("_") or \
+ (hasattr(to_be_saved[k], "__module__") and str(to_be_saved[k].__module__).startswith('IPython')):
+ del(to_be_saved[k])
+ continue
+ if type(to_be_saved[k]) in [type, types.ModuleType, types.MethodType]:
+ log_interactive.info("[%s] (%s) can't be saved." % (k, type(to_be_saved[k])))
+ del(to_be_saved[k])
+
+ try:
+ os.rename(fname, fname+".bak")
+ except OSError:
+ pass
+ f=gzip.open(fname,"wb")
+ for i in to_be_saved.keys():
+ #d = {i: to_be_saved[i]}
+ #pickle.dump(d, f, pickleProto)
+ pickle.dump(to_be_saved, f, pickleProto)
+ f.close()
+
+def load_session(fname=None):
+ if conf.interactive_shell.lower() == "ipython":
+ log_interactive.error("There are issues with load_session in ipython. Use python for interactive shell, or use -s parameter to load session")
+ return
+
+ import dill as pickle
+
+ if fname is None:
+ fname = conf.session
+ try:
+ s = pickle.load(gzip.open(fname,"rb"))
+ except IOError:
+ s = pickle.load(open(fname,"rb"))
+ scapy_session = builtins.__dict__["scapy_session"]
+ scapy_session.clear()
+ scapy_session.update(s)
+
+def update_session(fname=None):
+ import dill as pickle
+ if fname is None:
+ fname = conf.session
+ try:
+ s = pickle.load(gzip.open(fname,"rb"))
+ except IOError:
+ s = pickle.load(open(fname,"rb"))
+ scapy_session = builtins.__dict__["scapy_session"]
+ scapy_session.update(s)
+
+
+################
+##### Main #####
+################
+
+def scapy_delete_temp_files():
+ for f in conf.temp_files:
+ try:
+ os.unlink(f)
+ except:
+ pass
+
+def scapy_write_history_file(readline):
+ if conf.histfile:
+ try:
+ readline.write_history_file(conf.histfile)
+ except IOError as e:
+ try:
+ warning("Could not write history to [%s]\n\t (%s)" % (conf.histfile,e))
+ tmp = utils.get_temp_file(keep=True)
+ readline.write_history_file(tmp)
+ warning("Wrote history to [%s]" % tmp)
+ except:
+ warning("Cound not write history to [%s]. Discarded" % tmp)
+
+
+def interact(mydict=None,argv=None,mybanner=None,loglevel=20):
+ global session
+ import code,sys,pickle,os,getopt,re
+ from .config import conf
+ conf.interactive = True
+ if loglevel is not None:
+ conf.logLevel=loglevel
+
+ the_banner = "Welcome to Scapy (%s)"
+ if mybanner is not None:
+ the_banner += "\n"
+ the_banner += mybanner
+
+ if argv is None:
+ argv = sys.argv
+
+ import atexit
+ try:
+ import rlcompleter,readline
+ except ImportError:
+ log_loading.info("Can't load Python libreadline or completer")
+ READLINE=0
+ else:
+ READLINE=1
+ class ScapyCompleter(rlcompleter.Completer):
+ def global_matches(self, text):
+ matches = []
+ n = len(text)
+ for lst in [dir(builtins), session.keys()]:
+ for word in lst:
+ if word[:n] == text and word != "__builtins__":
+ matches.append(word)
+ return matches
+
+
+ def attr_matches(self, text):
+ m = re.match(r"(\w+(\.\w+)*)\.(\w*)", text)
+ if not m:
+ return
+ expr, attr = m.group(1, 3)
+ try:
+ object = eval(expr)
+ except:
+ object = eval(expr, session)
+ if isinstance(object, Packet) or isinstance(object, Packet_metaclass):
+ #words = filter(lambda x: x[0]!="_",dir(object))
+ words = [ x for x in dir(object) if x[0]!="_" ]
+ words += [x.name for x in object.fields_desc]
+ else:
+ words = dir(object)
+ if hasattr( object,"__class__" ):
+ words = words + rlcompleter.get_class_members(object.__class__)
+ matches = []
+ n = len(attr)
+ for word in words:
+ if word[:n] == attr and word != "__builtins__":
+ matches.append("%s.%s" % (expr, word))
+ return matches
+
+ readline.set_completer(ScapyCompleter().complete)
+ readline.parse_and_bind("C-o: operate-and-get-next")
+ readline.parse_and_bind("tab: complete")
+
+
+ session=None
+ session_name=""
+ STARTUP_FILE = DEFAULT_STARTUP_FILE
+ PRESTART_FILE = DEFAULT_PRESTART_FILE
+
+
+ iface = None
+ try:
+ opts=getopt.getopt(argv[1:], "hs:Cc:Pp:d")
+ for opt, parm in opts[0]:
+ if opt == "-h":
+ _usage()
+ elif opt == "-s":
+ session_name = parm
+ elif opt == "-c":
+ STARTUP_FILE = parm
+ elif opt == "-C":
+ STARTUP_FILE = None
+ elif opt == "-p":
+ PRESTART_FILE = parm
+ elif opt == "-P":
+ PRESTART_FILE = None
+ elif opt == "-d":
+ conf.logLevel = max(1,conf.logLevel-10)
+
+ if len(opts[1]) > 0:
+ raise getopt.GetoptError("Too many parameters : [%s]" % " ".join(opts[1]))
+
+
+ except getopt.GetoptError as msg:
+ log_loading.error(msg)
+ sys.exit(1)
+
+ if PRESTART_FILE:
+ _read_config_file(PRESTART_FILE)
+
+ scapy_builtins = __import__("scapy.all",globals(),locals(),".").__dict__
+ builtins.__dict__.update(scapy_builtins)
+ globkeys = list(scapy_builtins.keys())
+ globkeys.append("scapy_session")
+ scapy_builtins=None # XXX replace with "with" statement
+ if mydict is not None:
+ builtins.__dict__.update(mydict)
+ globkeys += mydict.keys()
+
+
+ conf.color_theme = DefaultTheme()
+ if STARTUP_FILE:
+ _read_config_file(STARTUP_FILE)
+
+ if session_name:
+ try:
+ os.stat(session_name)
+ except OSError:
+ log_loading.info("New session [%s]" % session_name)
+ else:
+ try:
+ try:
+ session = pickle.load(gzip.open(session_name,"rb"))
+ except IOError:
+ session = pickle.load(open(session_name,"rb"))
+ log_loading.info("Using session [%s]" % session_name)
+ except EOFError:
+ log_loading.error("Error opening session [%s]" % session_name)
+ except AttributeError:
+ log_loading.error("Error opening session [%s]. Attribute missing" % session_name)
+
+ if session:
+ if "conf" in session:
+ conf.configure(session["conf"])
+ session["conf"] = conf
+ else:
+ conf.session = session_name
+ session={"conf":conf}
+
+ else:
+ session={"conf": conf}
+
+ builtins.__dict__["scapy_session"] = session
+
+
+ if READLINE:
+ if conf.histfile:
+ try:
+ readline.read_history_file(conf.histfile)
+ except IOError:
+ pass
+ atexit.register(scapy_write_history_file,readline)
+
+ atexit.register(scapy_delete_temp_files)
+
+ IPYTHON=False
+ if conf.interactive_shell.lower() == "ipython":
+ try:
+ import IPython
+ IPYTHON=True
+ except ImportError as e:
+ log_loading.warning("IPython not available. Using standard Python shell instead.")
+ IPYTHON=False
+
+ if IPYTHON:
+ banner = the_banner % (conf.version) + " using IPython %s" % IPython.__version__
+
+ if conf.ipython_embedded:
+ IPython.embed(user_ns=session, banner2=banner)
+ else:
+ IPython.start_ipython(argv=[], user_ns=session)
+
+ else:
+ code.interact(banner = the_banner % (conf.version),
+ local=session, readfunc=conf.readfunc)
+
+ if conf.session:
+ save_session(conf.session, session)
+
+
+ for k in globkeys:
+ try:
+ del(builtins.__dict__[k])
+ except:
+ pass
+
+if __name__ == "__main__":
+ interact()
diff --git a/scripts/external_libs/scapy-2.3.1/python3/scapy/modules/__init__.py b/scripts/external_libs/scapy-2.3.1/python3/scapy/modules/__init__.py
new file mode 100644
index 00000000..6303dad0
--- /dev/null
+++ b/scripts/external_libs/scapy-2.3.1/python3/scapy/modules/__init__.py
@@ -0,0 +1,8 @@
+## This file is part of Scapy
+## See http://www.secdev.org/projects/scapy for more informations
+## Copyright (C) Philippe Biondi <phil@secdev.org>
+## This program is published under a GPLv2 license
+
+"""
+Package of extension modules that have to be loaded explicitly.
+"""
diff --git a/scripts/external_libs/scapy-2.3.1/python3/scapy/modules/geoip.py b/scripts/external_libs/scapy-2.3.1/python3/scapy/modules/geoip.py
new file mode 100644
index 00000000..7396fe96
--- /dev/null
+++ b/scripts/external_libs/scapy-2.3.1/python3/scapy/modules/geoip.py
@@ -0,0 +1,77 @@
+## This file is part of Scapy
+## See http://www.secdev.org/projects/scapy for more informations
+## Copyright (C) Philippe Biondi <phil@secdev.org>
+## This program is published under a GPLv2 license
+
+"""
+GeoIP: find out the geographical location of IP addresses
+"""
+
+from scapy.data import KnowledgeBase
+from scapy.config import conf
+
+conf.IPCountry_base = "GeoIPCountry4Scapy.gz"
+conf.countryLoc_base = "countryLoc.csv"
+
+##########################
+## IP location database ##
+##########################
+
+class IPCountryKnowledgeBase(KnowledgeBase):
+ """
+How to generate the base :
+db = []
+for l in open("GeoIPCountryWhois.csv").readlines():
+ s,e,c = l.split(",")[2:5]
+ db.append((int(s[1:-1]),int(e[1:-1]),c[1:-1]))
+cPickle.dump(gzip.open("xxx","w"),db)
+"""
+ def lazy_init(self):
+ self.base = load_object(self.filename)
+
+
+class CountryLocKnowledgeBase(KnowledgeBase):
+ def lazy_init(self):
+ f=open(self.filename)
+ self.base = {}
+ while 1:
+ l = f.readline()
+ if not l:
+ break
+ l = l.strip().split(",")
+ if len(l) != 3:
+ continue
+ c,lat,long = l
+
+ self.base[c] = (float(long),float(lat))
+ f.close()
+
+
+
+@conf.commands.register
+def locate_ip(ip):
+ """Get geographic coordinates from IP using geoip database"""
+ ip=map(int,ip.split("."))
+ ip = ip[3]+(ip[2]<<8)+(ip[1]<<16)+(ip[0]<<24)
+
+ cloc = country_loc_kdb.get_base()
+ db = IP_country_kdb.get_base()
+
+ d=0
+ f=len(db)-1
+ while (f-d) > 1:
+ guess = (d+f)/2
+ if ip > db[guess][0]:
+ d = guess
+ else:
+ f = guess
+ s,e,c = db[guess]
+ if s <= ip and ip <= e:
+ return cloc.get(c,None)
+
+
+
+
+
+conf.IP_country_kdb = IPCountryKnowledgeBase(conf.IPCountry_base)
+conf.country_loc_kdb = CountryLocKnowledgeBase(conf.countryLoc_base)
diff --git a/scripts/external_libs/scapy-2.3.1/python3/scapy/modules/nmap.py b/scripts/external_libs/scapy-2.3.1/python3/scapy/modules/nmap.py
new file mode 100644
index 00000000..07ec7a93
--- /dev/null
+++ b/scripts/external_libs/scapy-2.3.1/python3/scapy/modules/nmap.py
@@ -0,0 +1,215 @@
+## This file is part of Scapy
+## See http://www.secdev.org/projects/scapy for more informations
+## Copyright (C) Philippe Biondi <phil@secdev.org>
+## This program is published under a GPLv2 license
+
+"""
+Clone of Nmap's first generation OS fingerprinting.
+"""
+
+import os
+
+from scapy.data import KnowledgeBase
+from scapy.config import conf
+from scapy.arch import WINDOWS
+
+
+if WINDOWS:
+ conf.nmap_base=os.environ["ProgramFiles"] + "\\nmap\\nmap-os-fingerprints"
+else:
+ conf.nmap_base ="/usr/share/nmap/nmap-os-fingerprints"
+
+
+######################
+## nmap OS fp stuff ##
+######################
+
+
+class NmapKnowledgeBase(KnowledgeBase):
+ def lazy_init(self):
+ try:
+ f=open(self.filename)
+ except IOError:
+ return
+
+ self.base = []
+ name = None
+ try:
+ for l in f:
+ l = l.strip()
+ if not l or l[0] == "#":
+ continue
+ if l[:12] == "Fingerprint ":
+ if name is not None:
+ self.base.append((name,sig))
+ name = l[12:].strip()
+ sig={}
+ p = self.base
+ continue
+ elif l[:6] == "Class ":
+ continue
+ op = l.find("(")
+ cl = l.find(")")
+ if op < 0 or cl < 0:
+ warning("error reading nmap os fp base file")
+ continue
+ test = l[:op]
+ s = map(lambda x: x.split("="), l[op+1:cl].split("%"))
+ si = {}
+ for n,v in s:
+ si[n] = v
+ sig[test]=si
+ if name is not None:
+ self.base.append((name,sig))
+ except:
+ self.base = None
+ warning("Can't read nmap database [%s](new nmap version ?)" % self.filename)
+ f.close()
+
+nmap_kdb = NmapKnowledgeBase(conf.nmap_base)
+
+def TCPflags2str(f):
+ fl="FSRPAUEC"
+ s=""
+ for i in range(len(fl)):
+ if f & 1:
+ s = fl[i]+s
+ f >>= 1
+ return s
+
+def nmap_tcppacket_sig(pkt):
+ r = {}
+ if pkt is not None:
+# r["Resp"] = "Y"
+ r["DF"] = (pkt.flags & 2) and "Y" or "N"
+ r["W"] = "%X" % pkt.window
+ r["ACK"] = pkt.ack==2 and "S++" or pkt.ack==1 and "S" or "O"
+ r["Flags"] = TCPflags2str(pkt.payload.flags)
+ r["Ops"] = "".join(map(lambda x: x[0][0],pkt.payload.options))
+ else:
+ r["Resp"] = "N"
+ return r
+
+
+def nmap_udppacket_sig(S,T):
+ r={}
+ if T is None:
+ r["Resp"] = "N"
+ else:
+ r["DF"] = (T.flags & 2) and "Y" or "N"
+ r["TOS"] = "%X" % T.tos
+ r["IPLEN"] = "%X" % T.len
+ r["RIPTL"] = "%X" % T.payload.payload.len
+ r["RID"] = S.id == T.payload.payload.id and "E" or "F"
+ r["RIPCK"] = S.chksum == T.getlayer(IPerror).chksum and "E" or T.getlayer(IPerror).chksum == 0 and "0" or "F"
+ r["UCK"] = S.payload.chksum == T.getlayer(UDPerror).chksum and "E" or T.getlayer(UDPerror).chksum ==0 and "0" or "F"
+ r["ULEN"] = "%X" % T.getlayer(UDPerror).len
+ r["DAT"] = T.getlayer(conf.raw_layer) is None and "E" or S.getlayer(conf.raw_layer).load == T.getlayer(conf.raw_layer).load and "E" or "F"
+ return r
+
+
+
+def nmap_match_one_sig(seen, ref):
+ c = 0
+ for k in seen.keys():
+ if k in ref:
+ if seen[k] in ref[k].split("|"):
+ c += 1
+ if c == 0 and seen.get("Resp") == "N":
+ return 0.7
+ else:
+ return 1.0*c/len(seen.keys())
+
+
+def nmap_sig(target, oport=80, cport=81, ucport=1):
+ res = {}
+
+ tcpopt = [ ("WScale", 10),
+ ("NOP",None),
+ ("MSS", 256),
+ ("Timestamp",(123,0)) ]
+ tests = [ IP(dst=target, id=1)/TCP(seq=1, sport=5001, dport=oport, options=tcpopt, flags="CS"),
+ IP(dst=target, id=1)/TCP(seq=1, sport=5002, dport=oport, options=tcpopt, flags=0),
+ IP(dst=target, id=1)/TCP(seq=1, sport=5003, dport=oport, options=tcpopt, flags="SFUP"),
+ IP(dst=target, id=1)/TCP(seq=1, sport=5004, dport=oport, options=tcpopt, flags="A"),
+ IP(dst=target, id=1)/TCP(seq=1, sport=5005, dport=cport, options=tcpopt, flags="S"),
+ IP(dst=target, id=1)/TCP(seq=1, sport=5006, dport=cport, options=tcpopt, flags="A"),
+ IP(dst=target, id=1)/TCP(seq=1, sport=5007, dport=cport, options=tcpopt, flags="FPU"),
+ IP(str(IP(dst=target)/UDP(sport=5008,dport=ucport)/(300*"i"))) ]
+
+ ans, unans = sr(tests, timeout=2)
+ ans += map(lambda x: (x,None), unans)
+
+ for S,T in ans:
+ if S.sport == 5008:
+ res["PU"] = nmap_udppacket_sig(S,T)
+ else:
+ t = "T%i" % (S.sport-5000)
+ if T is not None and T.haslayer(ICMP):
+ warning("Test %s answered by an ICMP" % t)
+ T=None
+ res[t] = nmap_tcppacket_sig(T)
+
+ return res
+
+def nmap_probes2sig(tests):
+ tests=tests.copy()
+ res = {}
+ if "PU" in tests:
+ res["PU"] = nmap_udppacket_sig(*tests["PU"])
+ del(tests["PU"])
+ for k in tests:
+ res[k] = nmap_tcppacket_sig(tests[k])
+ return res
+
+
+def nmap_search(sigs):
+ guess = 0,[]
+ for os,fp in nmap_kdb.get_base():
+ c = 0.0
+ for t in sigs.keys():
+ if t in fp:
+ c += nmap_match_one_sig(sigs[t], fp[t])
+ c /= len(sigs.keys())
+ if c > guess[0]:
+ guess = c,[ os ]
+ elif c == guess[0]:
+ guess[1].append(os)
+ return guess
+
+
+@conf.commands.register
+def nmap_fp(target, oport=80, cport=81):
+ """nmap fingerprinting
+nmap_fp(target, [oport=80,] [cport=81,]) -> list of best guesses with accuracy
+"""
+ sigs = nmap_sig(target, oport, cport)
+ return nmap_search(sigs)
+
+
+@conf.commands.register
+def nmap_sig2txt(sig):
+ torder = ["TSeq","T1","T2","T3","T4","T5","T6","T7","PU"]
+ korder = ["Class", "gcd", "SI", "IPID", "TS",
+ "Resp", "DF", "W", "ACK", "Flags", "Ops",
+ "TOS", "IPLEN", "RIPTL", "RID", "RIPCK", "UCK", "ULEN", "DAT" ]
+ txt=[]
+ for i in sig.keys():
+ if i not in torder:
+ torder.append(i)
+ for t in torder:
+ sl = sig.get(t)
+ if sl is None:
+ continue
+ s = []
+ for k in korder:
+ v = sl.get(k)
+ if v is None:
+ continue
+ s.append("%s=%s"%(k,v))
+ txt.append("%s(%s)" % (t, "%".join(s)))
+ return "\n".join(txt)
+
+
+
+
diff --git a/scripts/external_libs/scapy-2.3.1/python3/scapy/modules/p0f.py b/scripts/external_libs/scapy-2.3.1/python3/scapy/modules/p0f.py
new file mode 100644
index 00000000..289ef531
--- /dev/null
+++ b/scripts/external_libs/scapy-2.3.1/python3/scapy/modules/p0f.py
@@ -0,0 +1,549 @@
+## This file is part of Scapy
+## See http://www.secdev.org/projects/scapy for more informations
+## Copyright (C) Philippe Biondi <phil@secdev.org>
+## This program is published under a GPLv2 license
+
+"""
+Clone of p0f passive OS fingerprinting
+"""
+
+from scapy.data import KnowledgeBase
+from scapy.config import conf
+from scapy.error import warning
+from scapy.layers.inet import IP, TCP, TCPOptions
+from scapy.packet import NoPayload
+
+conf.p0f_base ="/etc/p0f/p0f.fp"
+conf.p0fa_base ="/etc/p0f/p0fa.fp"
+conf.p0fr_base ="/etc/p0f/p0fr.fp"
+#conf.p0fo_base ="/etc/p0f/p0fo.fp"
+
+
+###############
+## p0f stuff ##
+###############
+
+# File format (according to p0f.fp) :
+#
+# wwww:ttt:D:ss:OOO...:QQ:OS:Details
+#
+# wwww - window size
+# ttt - initial TTL
+# D - don't fragment bit (0=unset, 1=set)
+# ss - overall SYN packet size
+# OOO - option value and order specification
+# QQ - quirks list
+# OS - OS genre
+# details - OS description
+
+class p0fKnowledgeBase(KnowledgeBase):
+ def __init__(self, filename):
+ KnowledgeBase.__init__(self, filename)
+ #self.ttl_range=[255]
+ def lazy_init(self):
+ try:
+ f=open(self.filename)
+ except IOError:
+ warning("Can't open base %s" % self.filename)
+ return
+ try:
+ self.base = []
+ for l in f:
+ if l[0] in ["#","\n"]:
+ continue
+ l = tuple(l.split(":"))
+ if len(l) < 8:
+ continue
+ def a2i(x):
+ if x.isdigit():
+ return int(x)
+ return x
+ li = [ a2i(i) for i in l[1:4] ]
+ #if li[0] not in self.ttl_range:
+ # self.ttl_range.append(li[0])
+ # self.ttl_range.sort()
+ self.base.append((l[0], li[0], li[1], li[2], l[4], l[5], l[6], l[7][:-1]))
+ except:
+ warning("Can't parse p0f database (new p0f version ?)")
+ self.base = None
+ f.close()
+
+p0f_kdb = p0fKnowledgeBase(conf.p0f_base)
+p0fa_kdb = p0fKnowledgeBase(conf.p0fa_base)
+p0fr_kdb = p0fKnowledgeBase(conf.p0fr_base)
+#p0fo_kdb = p0fKnowledgeBase(conf.p0fo_base)
+
+def p0f_selectdb(flags):
+ # tested flags: S, R, A
+ if flags & 0x16 == 0x2:
+ # SYN
+ return p0f_kdb
+ elif flags & 0x16 == 0x12:
+ # SYN/ACK
+ return p0fa_kdb
+ elif flags & 0x16 in [ 0x4, 0x14 ]:
+ # RST RST/ACK
+ return p0fr_kdb
+# elif flags & 0x16 == 0x10:
+ # ACK
+# return p0fo_kdb
+ else:
+ return None
+
+def packet2p0f(pkt):
+ pkt = pkt.copy()
+ pkt = pkt.__class__(bytes(pkt))
+ while pkt.haslayer(IP) and pkt.haslayer(TCP):
+ pkt = pkt.getlayer(IP)
+ if isinstance(pkt.payload, TCP):
+ break
+ pkt = pkt.payload
+
+ if not isinstance(pkt, IP) or not isinstance(pkt.payload, TCP):
+ raise TypeError("Not a TCP/IP packet")
+ #if pkt.payload.flags & 0x7 != 0x02: #S,!F,!R
+ # raise TypeError("Not a SYN or SYN/ACK packet")
+
+ db = p0f_selectdb(pkt.payload.flags)
+
+ #t = p0f_kdb.ttl_range[:]
+ #t += [pkt.ttl]
+ #t.sort()
+ #ttl=t[t.index(pkt.ttl)+1]
+ ttl = pkt.ttl
+
+ df = (pkt.flags & 2) / 2
+ ss = len(pkt)
+ # from p0f/config.h : PACKET_BIG = 100
+ if ss > 100:
+ if db == p0fr_kdb:
+ # p0fr.fp: "Packet size may be wildcarded. The meaning of
+ # wildcard is, however, hardcoded as 'size >
+ # PACKET_BIG'"
+ ss = '*'
+ else:
+ ss = 0
+# if db == p0fo_kdb:
+ # p0fo.fp: "Packet size MUST be wildcarded."
+# ss = '*'
+
+ ooo = ""
+ mss = -1
+ qqT = False
+ qqP = False
+ #qqBroken = False
+ ilen = (pkt.payload.dataofs << 2) - 20 # from p0f.c
+ for option in pkt.payload.options:
+ ilen -= 1
+ if option[0] == "MSS":
+ ooo += "M" + str(option[1]) + ","
+ mss = option[1]
+ # FIXME: qqBroken
+ ilen -= 3
+ elif option[0] == "WScale":
+ ooo += "W" + str(option[1]) + ","
+ # FIXME: qqBroken
+ ilen -= 2
+ elif option[0] == "Timestamp":
+ if option[1][0] == 0:
+ ooo += "T0,"
+ else:
+ ooo += "T,"
+ if option[1][1] != 0:
+ qqT = True
+ ilen -= 9
+ elif option[0] == "SAckOK":
+ ooo += "S,"
+ ilen -= 1
+ elif option[0] == "NOP":
+ ooo += "N,"
+ elif option[0] == "EOL":
+ ooo += "E,"
+ if ilen > 0:
+ qqP = True
+ else:
+ if type(option[0]) is str:
+ ooo += "?%i," % TCPOptions[1][option[0]]
+ else:
+ ooo += "?%i," % option[0]
+ # FIXME: ilen
+ ooo = ooo[:-1]
+ if ooo == "": ooo = "."
+
+ win = pkt.payload.window
+ if mss != -1:
+ if mss != 0 and win % mss == 0:
+ win = "S" + str(win/mss)
+ elif win % (mss + 40) == 0:
+ win = "T" + str(win/(mss+40))
+ win = str(win)
+
+ qq = ""
+
+ if db == p0fr_kdb:
+ if pkt.payload.flags & 0x10 == 0x10:
+ # p0fr.fp: "A new quirk, 'K', is introduced to denote
+ # RST+ACK packets"
+ qq += "K"
+ # The two next cases should also be only for p0f*r*, but although
+ # it's not documented (or I have not noticed), p0f seems to
+ # support the '0' and 'Q' quirks on any databases (or at the least
+ # "classical" p0f.fp).
+ if pkt.payload.seq == pkt.payload.ack:
+ # p0fr.fp: "A new quirk, 'Q', is used to denote SEQ number
+ # equal to ACK number."
+ qq += "Q"
+ if pkt.payload.seq == 0:
+ # p0fr.fp: "A new quirk, '0', is used to denote packets
+ # with SEQ number set to 0."
+ qq += "0"
+ if qqP:
+ qq += "P"
+ if pkt.id == 0:
+ qq += "Z"
+ if pkt.options != []:
+ qq += "I"
+ if pkt.payload.urgptr != 0:
+ qq += "U"
+ if pkt.payload.reserved != 0:
+ qq += "X"
+ if pkt.payload.ack != 0:
+ qq += "A"
+ if qqT:
+ qq += "T"
+# if db == p0fo_kdb:
+# if pkt.payload.flags & 0x20 != 0:
+ # U
+ # p0fo.fp: "PUSH flag is excluded from 'F' quirk checks"
+# qq += "F"
+# else:
+# if pkt.payload.flags & 0x28 != 0:
+ # U or P
+ qq += "F"
+ #if db != p0fo_kdb and not isinstance(pkt.payload.payload, NoPayload):
+ if not isinstance(pkt.payload.payload, NoPayload):
+ # p0fo.fp: "'D' quirk is not checked for."
+ qq += "D"
+ # FIXME : "!" - broken options segment: not handled yet
+
+ if qq == "":
+ qq = "."
+
+ return (db, (win, ttl, df, ss, ooo, qq))
+
+def p0f_correl(x,y):
+ d = 0
+ # wwww can be "*" or "%nn". "Tnn" and "Snn" should work fine with
+ # the x[0] == y[0] test.
+ d += (x[0] == y[0] or y[0] == "*" or (y[0][0] == "%" and x[0].isdigit() and (int(x[0]) % int(y[0][1:])) == 0))
+ # ttl
+ d += (y[1] >= x[1] and y[1] - x[1] < 32)
+ for i in [2, 5]:
+ d += (x[i] == y[i] or y[i] == '*')
+ # '*' has a special meaning for ss
+ d += x[3] == y[3]
+ xopt = x[4].split(",")
+ yopt = y[4].split(",")
+ if len(xopt) == len(yopt):
+ same = True
+ for i in range(len(xopt)):
+ if not (xopt[i] == yopt[i] or
+ (len(yopt[i]) == 2 and len(xopt[i]) > 1 and
+ yopt[i][1] == "*" and xopt[i][0] == yopt[i][0]) or
+ (len(yopt[i]) > 2 and len(xopt[i]) > 1 and
+ yopt[i][1] == "%" and xopt[i][0] == yopt[i][0] and
+ int(xopt[i][1:]) % int(yopt[i][2:]) == 0)):
+ same = False
+ break
+ if same:
+ d += len(xopt)
+ return d
+
+
+@conf.commands.register
+def p0f(pkt):
+ """Passive OS fingerprinting: which OS emitted this TCP packet ?
+p0f(packet) -> accuracy, [list of guesses]
+"""
+ db, sig = packet2p0f(pkt)
+ if db:
+ pb = db.get_base()
+ else:
+ pb = []
+ if not pb:
+ warning("p0f base empty.")
+ return []
+ #s = len(pb[0][0])
+ r = []
+ max = len(sig[4].split(",")) + 5
+ for b in pb:
+ d = p0f_correl(sig,b)
+ if d == max:
+ r.append((b[6], b[7], b[1] - pkt[IP].ttl))
+ return r
+
+def prnp0f(pkt):
+ # we should print which DB we use
+ try:
+ r = p0f(pkt)
+ except:
+ return
+ if r == []:
+ r = ("UNKNOWN", "[" + ":".join([ str(i) for i in packet2p0f(pkt)[1]]) + ":?:?]", None)
+ else:
+ r = r[0]
+ uptime = None
+ try:
+ uptime = pkt2uptime(pkt)
+ except:
+ pass
+ if uptime == 0:
+ uptime = None
+ res = pkt.sprintf("%IP.src%:%TCP.sport% - " + r[0] + " " + r[1])
+ if uptime is not None:
+ res += pkt.sprintf(" (up: " + str(uptime//3600) + " hrs)\n -> %IP.dst%:%TCP.dport% (%TCP.flags%)")
+ else:
+ res += pkt.sprintf("\n -> %IP.dst%:%TCP.dport% (%TCP.flags%)")
+ if r[2] is not None:
+ res += " (distance " + str(r[2]) + ")"
+ print(res)
+
+@conf.commands.register
+def pkt2uptime(pkt, HZ=100):
+ """Calculate the date the machine which emitted the packet booted using TCP timestamp
+pkt2uptime(pkt, [HZ=100])"""
+ if not isinstance(pkt, Packet):
+ raise TypeError("Not a TCP packet")
+ if isinstance(pkt,NoPayload):
+ raise TypeError("Not a TCP packet")
+ if not isinstance(pkt, TCP):
+ return pkt2uptime(pkt.payload)
+ for opt in pkt.options:
+ if opt[0] == "Timestamp":
+ #t = pkt.time - opt[1][0] * 1.0/HZ
+ #return time.ctime(t)
+ t = opt[1][0] / HZ
+ return t
+ raise TypeError("No timestamp option")
+
+def p0f_impersonate(pkt, osgenre=None, osdetails=None, signature=None,
+ extrahops=0, mtu=1500, uptime=None):
+ """Modifies pkt so that p0f will think it has been sent by a
+specific OS. If osdetails is None, then we randomly pick up a
+personality matching osgenre. If osgenre and signature are also None,
+we use a local signature (using p0f_getlocalsigs). If signature is
+specified (as a tuple), we use the signature.
+
+For now, only TCP Syn packets are supported.
+Some specifications of the p0f.fp file are not (yet) implemented."""
+ pkt = pkt.copy()
+ #pkt = pkt.__class__(str(pkt))
+ while pkt.haslayer(IP) and pkt.haslayer(TCP):
+ pkt = pkt.getlayer(IP)
+ if isinstance(pkt.payload, TCP):
+ break
+ pkt = pkt.payload
+
+ if not isinstance(pkt, IP) or not isinstance(pkt.payload, TCP):
+ raise TypeError("Not a TCP/IP packet")
+
+ if uptime is None:
+ uptime = random.randint(120,100*60*60*24*365)
+
+ db = p0f_selectdb(pkt.payload.flags)
+ if osgenre:
+ pb = db.get_base()
+ if pb is None:
+ pb = []
+ #pb = filter(lambda x: x[6] == osgenre, pb)
+ pb = [ x for x in pb if x[6] == osgenre ]
+ if osdetails:
+ #pb = filter(lambda x: x[7] == osdetails, pb)
+ pb = [ x for x in pb if x[7] == osdetails ]
+ elif signature:
+ pb = [signature]
+ else:
+ pb = p0f_getlocalsigs()[db]
+ if db == p0fr_kdb:
+ # 'K' quirk <=> RST+ACK
+ if pkt.payload.flags & 0x4 == 0x4:
+ #pb = filter(lambda x: 'K' in x[5], pb)
+ pb = [ x for x in pb if 'K' in x[5] ]
+ else:
+ #pb = filter(lambda x: 'K' not in x[5], pb)
+ pb = [ x for x in pb if 'K' not in x[5] ]
+ if not pb:
+ raise Scapy_Exception("No match in the p0f database")
+ pers = pb[random.randint(0, len(pb) - 1)]
+
+ # options (we start with options because of MSS)
+ ## TODO: let the options already set if they are valid
+ options = []
+ if pers[4] != '.':
+ for opt in pers[4].split(','):
+ if opt[0] == 'M':
+ # MSS might have a maximum size because of window size
+ # specification
+ if pers[0][0] == 'S':
+ maxmss = (2**16-1) / int(pers[0][1:])
+ else:
+ maxmss = (2**16-1)
+ # If we have to randomly pick up a value, we cannot use
+ # scapy RandXXX() functions, because the value has to be
+ # set in case we need it for the window size value. That's
+ # why we use random.randint()
+ if opt[1:] == '*':
+ options.append(('MSS', random.randint(1,maxmss)))
+ elif opt[1] == '%':
+ coef = int(opt[2:])
+ options.append(('MSS', coef*random.randint(1,maxmss/coef)))
+ else:
+ options.append(('MSS', int(opt[1:])))
+ elif opt[0] == 'W':
+ if opt[1:] == '*':
+ options.append(('WScale', RandByte()))
+ elif opt[1] == '%':
+ coef = int(opt[2:])
+ options.append(('WScale', coef*RandNum(min=1,
+ max=(2**8-1)/coef)))
+ else:
+ options.append(('WScale', int(opt[1:])))
+ elif opt == 'T0':
+ options.append(('Timestamp', (0, 0)))
+ elif opt == 'T':
+ if 'T' in pers[5]:
+ # FIXME: RandInt() here does not work (bug (?) in
+ # TCPOptionsField.m2i often raises "OverflowError:
+ # long int too large to convert to int" in:
+ # oval = struct.pack(ofmt, *oval)"
+ # Actually, this is enough to often raise the error:
+ # struct.pack('I', RandInt())
+ options.append(('Timestamp', (uptime, random.randint(1,2**32-1))))
+ else:
+ options.append(('Timestamp', (uptime, 0)))
+ elif opt == 'S':
+ options.append(('SAckOK', ''))
+ elif opt == 'N':
+ options.append(('NOP', None))
+ elif opt == 'E':
+ options.append(('EOL', None))
+ elif opt[0] == '?':
+ if int(opt[1:]) in TCPOptions[0]:
+ optname = TCPOptions[0][int(opt[1:])][0]
+ optstruct = TCPOptions[0][int(opt[1:])][1]
+ options.append((optname,
+ struct.unpack(optstruct,
+ RandString(struct.calcsize(optstruct))._fix())))
+ else:
+ options.append((int(opt[1:]), ''))
+ ## FIXME: qqP not handled
+ else:
+ warning("unhandled TCP option " + opt)
+ pkt.payload.options = options
+
+ # window size
+ if pers[0] == '*':
+ pkt.payload.window = RandShort()
+ elif pers[0].isdigit():
+ pkt.payload.window = int(pers[0])
+ elif pers[0][0] == '%':
+ coef = int(pers[0][1:])
+ pkt.payload.window = coef * RandNum(min=1,max=(2**16-1)/coef)
+ elif pers[0][0] == 'T':
+ pkt.payload.window = mtu * int(pers[0][1:])
+ elif pers[0][0] == 'S':
+ ## needs MSS set
+ #MSS = filter(lambda x: x[0] == 'MSS', options)
+ MSS = [ x for x in options if x[0] == 'MSS' ]
+ if not MSS:
+ raise Scapy_Exception("TCP window value requires MSS, and MSS option not set")
+ pkt.payload.window = MSS[0][1] * int(pers[0][1:])
+ else:
+ raise Scapy_Exception('Unhandled window size specification')
+
+ # ttl
+ pkt.ttl = pers[1]-extrahops
+ # DF flag
+ pkt.flags |= (2 * pers[2])
+ ## FIXME: ss (packet size) not handled (how ? may be with D quirk
+ ## if present)
+ # Quirks
+ if pers[5] != '.':
+ for qq in pers[5]:
+ ## FIXME: not handled: P, I, X, !
+ # T handled with the Timestamp option
+ if qq == 'Z': pkt.id = 0
+ elif qq == 'U': pkt.payload.urgptr = RandShort()
+ elif qq == 'A': pkt.payload.ack = RandInt()
+ elif qq == 'F':
+ #if db == p0fo_kdb:
+ # pkt.payload.flags |= 0x20 # U
+ #else:
+ pkt.payload.flags |= RandChoice(8, 32, 40) #P / U / PU
+ elif qq == 'D' and db != p0fo_kdb:
+ pkt /= conf.raw_layer(load=RandString(random.randint(1, 10))) # XXX p0fo.fp
+ elif qq == 'Q': pkt.payload.seq = pkt.payload.ack
+ #elif qq == '0': pkt.payload.seq = 0
+ #if db == p0fr_kdb:
+ # '0' quirk is actually not only for p0fr.fp (see
+ # packet2p0f())
+ if '0' in pers[5]:
+ pkt.payload.seq = 0
+ elif pkt.payload.seq == 0:
+ pkt.payload.seq = RandInt()
+
+ while pkt.underlayer:
+ pkt = pkt.underlayer
+ return pkt
+
+def p0f_getlocalsigs():
+ """This function returns a dictionary of signatures indexed by p0f
+db (e.g., p0f_kdb, p0fa_kdb, ...) for the local TCP/IP stack.
+
+You need to have your firewall at least accepting the TCP packets
+from/to a high port (30000 <= x <= 40000) on your loopback interface.
+
+Please note that the generated signatures come from the loopback
+interface and may (are likely to) be different than those generated on
+"normal" interfaces."""
+ pid = os.fork()
+ port = random.randint(30000, 40000)
+ if pid > 0:
+ # parent: sniff
+ result = {}
+ def addresult(res):
+ # TODO: wildcard window size in some cases? and maybe some
+ # other values?
+ if res[0] not in result:
+ result[res[0]] = [res[1]]
+ else:
+ if res[1] not in result[res[0]]:
+ result[res[0]].append(res[1])
+ # XXX could we try with a "normal" interface using other hosts
+ iface = conf.route.route('127.0.0.1')[0]
+ # each packet is seen twice: S + RA, S + SA + A + FA + A
+ # XXX are the packets also seen twice on non Linux systems ?
+ count=14
+ pl = sniff(iface=iface, filter='tcp and port ' + str(port), count = count, timeout=3)
+ map(addresult, map(packet2p0f, pl))
+ os.waitpid(pid,0)
+ elif pid < 0:
+ log_runtime.error("fork error")
+ else:
+ # child: send
+ # XXX erk
+ time.sleep(1)
+ s1 = socket.socket(socket.AF_INET, type = socket.SOCK_STREAM)
+ # S & RA
+ try:
+ s1.connect(('127.0.0.1', port))
+ except socket.error:
+ pass
+ # S, SA, A, FA, A
+ s1.bind(('127.0.0.1', port))
+ s1.connect(('127.0.0.1', port))
+ # howto: get an RST w/o ACK packet
+ s1.close()
+ os._exit(0)
+ return result
+
diff --git a/scripts/external_libs/scapy-2.3.1/python3/scapy/modules/queso.py b/scripts/external_libs/scapy-2.3.1/python3/scapy/modules/queso.py
new file mode 100644
index 00000000..fbc7d06b
--- /dev/null
+++ b/scripts/external_libs/scapy-2.3.1/python3/scapy/modules/queso.py
@@ -0,0 +1,113 @@
+## This file is part of Scapy
+## See http://www.secdev.org/projects/scapy for more informations
+## Copyright (C) Philippe Biondi <phil@secdev.org>
+## This program is published under a GPLv2 license
+
+"""
+Clone of queso OS fingerprinting
+"""
+
+from scapy.data import KnowledgeBase
+from scapy.config import conf
+from scapy.layers.inet import IP,TCP
+#from
+
+conf.queso_base ="/etc/queso.conf"
+
+
+#################
+## Queso stuff ##
+#################
+
+
+def quesoTCPflags(flags):
+ if flags == "-":
+ return "-"
+ flv = "FSRPAUXY"
+ v = 0
+ for i in flags:
+ v |= 2**flv.index(i)
+ return "%x" % v
+
+class QuesoKnowledgeBase(KnowledgeBase):
+ def lazy_init(self):
+ try:
+ f = open(self.filename)
+ except IOError:
+ return
+ self.base = {}
+ p = None
+ try:
+ for l in f:
+ l = l.strip()
+ if not l or l[0] == ';':
+ continue
+ if l[0] == '*':
+ if p is not None:
+ p[""] = name
+ name = l[1:].strip()
+ p = self.base
+ continue
+ if l[0] not in list("0123456"):
+ continue
+ res = l[2:].split()
+ res[-1] = quesoTCPflags(res[-1])
+ res = " ".join(res)
+ if not res in p:
+ p[res] = {}
+ p = p[res]
+ if p is not None:
+ p[""] = name
+ except:
+ self.base = None
+ warning("Can't load queso base [%s]", self.filename)
+ f.close()
+
+
+queso_kdb = QuesoKnowledgeBase(conf.queso_base)
+
+
+def queso_sig(target, dport=80, timeout=3):
+ p = queso_kdb.get_base()
+ ret = []
+ for flags in ["S", "SA", "F", "FA", "SF", "P", "SEC"]:
+ ans, unans = sr(IP(dst=target)/TCP(dport=dport,flags=flags,seq=RandInt()),
+ timeout=timeout, verbose=0)
+ if len(ans) == 0:
+ rs = "- - - -"
+ else:
+ s,r = ans[0]
+ rs = "%i" % (r.seq != 0)
+ if not r.ack:
+ r += " 0"
+ elif r.ack-s.seq > 666:
+ rs += " R" % 0
+ else:
+ rs += " +%i" % (r.ack-s.seq)
+ rs += " %X" % r.window
+ rs += " %x" % r.payload.flags
+ ret.append(rs)
+ return ret
+
+def queso_search(sig):
+ p = queso_kdb.get_base()
+ sig.reverse()
+ ret = []
+ try:
+ while sig:
+ s = sig.pop()
+ p = p[s]
+ if "" in p:
+ ret.append(p[""])
+ except KeyError:
+ pass
+ return ret
+
+
+@conf.commands.register
+def queso(*args,**kargs):
+ """Queso OS fingerprinting
+queso(target, dport=80, timeout=3)"""
+ return queso_search(queso_sig(*args, **kargs))
+
+
diff --git a/scripts/external_libs/scapy-2.3.1/python3/scapy/modules/voip.py b/scripts/external_libs/scapy-2.3.1/python3/scapy/modules/voip.py
new file mode 100644
index 00000000..70000a54
--- /dev/null
+++ b/scripts/external_libs/scapy-2.3.1/python3/scapy/modules/voip.py
@@ -0,0 +1,149 @@
+## This file is part of Scapy
+## See http://www.secdev.org/projects/scapy for more informations
+## Copyright (C) Philippe Biondi <phil@secdev.org>
+## This program is published under a GPLv2 license
+
+"""
+VoIP (Voice over IP) related functions
+"""
+
+import os
+###################
+## Testing stuff ##
+###################
+
+from fcntl import fcntl
+from scapy.sendrecv import sniff
+from scapy.layers.inet import IP,UDP
+from scapy.layers.rtp import RTP
+from scapy.utils import get_temp_file
+
+
+def merge(x,y,sample_size=2):
+ if len(x) > len(y):
+ y += "\x00"*(len(x)-len(y))
+ elif len(x) < len(y):
+ x += "\x00"*(len(y)-len(x))
+ m = ""
+ ss=sample_size
+ for i in range(len(x)/ss):
+ m += x[ss*i:ss*(i+1)]+y[ss*i:ss*(i+1)]
+ return m
+# return "".join(map(str.__add__, x, y))
+
+
+def voip_play(s1,list=None,**kargs):
+ FIFO=get_temp_file()
+ FIFO1=FIFO % 1
+ FIFO2=FIFO % 2
+
+ os.mkfifo(FIFO1)
+ os.mkfifo(FIFO2)
+ try:
+ os.system("soxmix -t .ul %s -t .ul %s -t ossdsp /dev/dsp &" % (FIFO1,FIFO2))
+
+ c1=open(FIFO1,"w", 4096)
+ c2=open(FIFO2,"w", 4096)
+ fcntl.fcntl(c1.fileno(),fcntl.F_SETFL, os.O_NONBLOCK)
+ fcntl.fcntl(c2.fileno(),fcntl.F_SETFL, os.O_NONBLOCK)
+
+ # dsp,rd = os.popen2("sox -t .ul -c 2 - -t ossdsp /dev/dsp")
+ def play(pkt,last=[]):
+ if not pkt:
+ return
+ if not pkt.haslayer(UDP):
+ return
+ ip=pkt.getlayer(IP)
+ if s1 in [ip.src, ip.dst]:
+ if not last:
+ last.append(pkt)
+ return
+ load=last.pop()
+ # x1 = load.load[12:]
+ c1.write(load.load[12:])
+ if load.getlayer(IP).src == ip.src:
+ # x2 = ""
+ c2.write("\x00"*len(load.load[12:]))
+ last.append(pkt)
+ else:
+ # x2 = pkt.load[:12]
+ c2.write(pkt.load[12:])
+ # dsp.write(merge(x1,x2))
+
+ if list is None:
+ sniff(store=0, prn=play, **kargs)
+ else:
+ for p in list:
+ play(p)
+ finally:
+ os.unlink(FIFO1)
+ os.unlink(FIFO2)
+
+
+
+def voip_play1(s1,list=None,**kargs):
+
+
+ dsp,rd = os.popen2("sox -t .ul - -t ossdsp /dev/dsp")
+ def play(pkt):
+ if not pkt:
+ return
+ if not pkt.haslayer(UDP):
+ return
+ ip=pkt.getlayer(IP)
+ if s1 in [ip.src, ip.dst]:
+ dsp.write(pkt.getlayer(conf.raw_layer).load[12:])
+ try:
+ if list is None:
+ sniff(store=0, prn=play, **kargs)
+ else:
+ for p in list:
+ play(p)
+ finally:
+ dsp.close()
+ rd.close()
+
+def voip_play2(s1,**kargs):
+ dsp,rd = os.popen2("sox -t .ul -c 2 - -t ossdsp /dev/dsp")
+ def play(pkt,last=[]):
+ if not pkt:
+ return
+ if not pkt.haslayer(UDP):
+ return
+ ip=pkt.getlayer(IP)
+ if s1 in [ip.src, ip.dst]:
+ if not last:
+ last.append(pkt)
+ return
+ load=last.pop()
+ x1 = load.load[12:]
+# c1.write(load.load[12:])
+ if load.getlayer(IP).src == ip.src:
+ x2 = ""
+# c2.write("\x00"*len(load.load[12:]))
+ last.append(pkt)
+ else:
+ x2 = pkt.load[:12]
+# c2.write(pkt.load[12:])
+ dsp.write(merge(x1,x2))
+
+ sniff(store=0, prn=play, **kargs)
+
+def voip_play3(lst=None,**kargs):
+ dsp,rd = os.popen2("sox -t .ul - -t ossdsp /dev/dsp")
+ try:
+ def play(pkt, dsp=dsp):
+ if pkt and pkt.haslayer(UDP) and pkt.haslayer(conf.raw_layer):
+ dsp.write(pkt.getlayer(RTP).load)
+ if lst is None:
+ sniff(store=0, prn=play, **kargs)
+ else:
+ for p in lst:
+ play(p)
+ finally:
+ try:
+ dsp.close()
+ rd.close()
+ except:
+ pass
+
diff --git a/scripts/external_libs/scapy-2.3.1/python3/scapy/packet.py b/scripts/external_libs/scapy-2.3.1/python3/scapy/packet.py
new file mode 100644
index 00000000..c92a27b1
--- /dev/null
+++ b/scripts/external_libs/scapy-2.3.1/python3/scapy/packet.py
@@ -0,0 +1,1360 @@
+## This file is part of Scapy
+## See http://www.secdev.org/projects/scapy for more informations
+## Copyright (C) Philippe Biondi <phil@secdev.org>
+## This program is published under a GPLv2 license
+
+"""
+Packet class. Binding mechanism. fuzz() method.
+"""
+
+import time,itertools,os
+import sys,traceback
+import copy
+from .fields import StrField,ConditionalField,Emph,PacketListField
+from .config import conf
+from .base_classes import BasePacket,Gen,SetGen,Packet_metaclass,NewDefaultValues
+from .volatile import VolatileValue
+from .utils import import_hexcap,tex_escape,colgen,get_temp_file
+from .error import Scapy_Exception,log_runtime, warning
+import subprocess
+import pprint
+
+class CGlobal:
+ ONCE =False
+
+try:
+ import pyx
+except ImportError:
+ pass
+
+
+class RawVal:
+ def __init__(self, val=b""):
+ assert type(val) == bytes
+ self.val = val
+ def __str__(self):
+ return str(self.val)
+ def __repr__(self):
+ return "<RawVal [%r]>" % self.val
+ def bytes(self):
+ return self.val
+
+
+class CPacketRes:
+ pass;
+
+class Packet(BasePacket, metaclass = Packet_metaclass):
+ name=None
+
+ fields_desc = []
+
+ aliastypes = []
+ overload_fields = {}
+
+ underlayer = None
+
+ sent_time = None
+ payload_guess = []
+ initialized = 0
+ show_indent=1
+ explicit = 0
+ raw_packet_cache = None
+
+ @classmethod
+ def from_hexcap(cls):
+ return cls(import_hexcap())
+
+ @classmethod
+ def upper_bonds(self):
+ for fval,upper in self.payload_guess:
+ print("%-20s %s" % (upper.__name__, ", ".join("%-12s" % ("%s=%r"%i) for i in fval.items())))
+
+ @classmethod
+ def lower_bonds(self):
+ for lower,fval in self.overload_fields.items():
+ print("%-20s %s" % (lower.__name__, ", ".join("%-12s" % ("%s=%r"%i) for i in fval.items())))
+
+ def __init__(self, _pkt=b"", post_transform=None, _internal=0, _underlayer=None, **fields):
+ self.time = time.time()
+ self.sent_time = 0
+ if self.name is None:
+ self.name = self.__class__.__name__
+ self.aliastypes = [ self.__class__ ] + self.aliastypes
+ self.default_fields = {}
+ self.offset=0; # offset of the object
+ self.offset_fields = {} # ofsset of each field
+ self.overloaded_fields = {}
+ self.fields={}
+ self.fieldtype={}
+ self.packetfields=[]
+ self.__dict__["payload"] = NoPayload()
+ self.init_fields()
+ self.underlayer = _underlayer
+ self.initialized = 1
+ self.original = _pkt
+ if _pkt:
+ self.dissect(_pkt)
+ if not _internal:
+ self.dissection_done(self)
+ for f in fields.keys():
+ self.fields[f] = self.get_field(f).any2i(self,fields[f])
+ if type(post_transform) is list:
+ self.post_transforms = post_transform
+ elif post_transform is None:
+ self.post_transforms = []
+ else:
+ self.post_transforms = [post_transform]
+
+ def init_fields(self):
+ self.do_init_fields(self.fields_desc)
+
+ def do_init_fields(self, flist):
+ for f in flist:
+ self.default_fields[f.name] = copy.deepcopy(f.default)
+ self.fieldtype[f.name] = f
+ if f.holds_packets:
+ self.packetfields.append(f)
+
+ def dissection_done(self,pkt):
+ """DEV: will be called after a dissection is completed"""
+ self.post_dissection(pkt)
+ self.payload.dissection_done(pkt)
+
+ def post_dissection(self, pkt):
+ """DEV: is called after the dissection of the whole packet"""
+ pass
+
+ def get_field(self, fld):
+ """DEV: returns the field instance from the name of the field"""
+ return self.fieldtype[fld]
+
+ def add_payload(self, payload):
+ if payload is None:
+ return
+ elif not isinstance(self.payload, NoPayload):
+ self.payload.add_payload(payload)
+ else:
+ if isinstance(payload, Packet):
+ self.__dict__["payload"] = payload
+ payload.add_underlayer(self)
+ for t in self.aliastypes:
+ if t in payload.overload_fields:
+ self.overloaded_fields = payload.overload_fields[t]
+ break
+ #elif type(payload) is str:
+ elif type(payload) is bytes:
+ self.__dict__["payload"] = conf.raw_layer(load=payload)
+ else:
+ raise TypeError("payload must be either 'Packet' or 'bytes', not [%s]" % repr(payload))
+ def remove_payload(self):
+ self.payload.remove_underlayer(self)
+ self.__dict__["payload"] = NoPayload()
+ self.overloaded_fields = {}
+ def add_underlayer(self, underlayer):
+ self.underlayer = underlayer
+ def remove_underlayer(self,other):
+ self.underlayer = None
+ def copy(self):
+ """Returns a deep copy of the instance."""
+ clone = self.__class__()
+ clone.fields = self.fields.copy()
+ for k in clone.fields:
+ clone.fields[k] = self.get_field(k).do_copy(clone.fields[k])
+ clone.default_fields = self.default_fields.copy()
+ clone.overloaded_fields = self.overloaded_fields.copy()
+ clone.overload_fields = self.overload_fields.copy()
+ clone.offset=self.offset
+ clone.underlayer = self.underlayer
+ clone.explicit = self.explicit
+ clone.raw_packet_cache = self.raw_packet_cache
+ clone.post_transforms = self.post_transforms[:]
+ clone.__dict__["payload"] = self.payload.copy()
+ clone.payload.add_underlayer(clone)
+ return clone
+
+ def dump_offsets (self):
+ print ("obj-id {0} {1} {2}".format(id(self),self.name ,self.offset))
+ if self.payload:
+ self.payload.dump_offsets()
+
+ def getfieldval(self, attr):
+ if attr in self.fields:
+ return self.fields[attr]
+ if attr in self.overloaded_fields:
+ return self.overloaded_fields[attr]
+ if attr in self.default_fields:
+ return self.default_fields[attr]
+ return self.payload.getfieldval(attr)
+
+ def getbyteval(self, attr):
+ fld,v = self.getfield_and_val(attr)
+ return fld.i2b(self, v)
+
+ def getstrval(self, attr):
+ fld,v = self.getfield_and_val(attr)
+ return fld.i2repr(self, v)
+
+ def getfield_and_val(self, attr):
+ if attr in self.fields:
+ return self.get_field(attr),self.fields[attr]
+ if attr in self.overloaded_fields:
+ return self.get_field(attr),self.overloaded_fields[attr]
+ if attr in self.default_fields:
+ return self.get_field(attr),self.default_fields[attr]
+ return self.payload.getfield_and_val(attr)
+
+ def __getattr__(self, attr):
+ if self.initialized:
+ fld,v = self.getfield_and_val(attr)
+ if fld is not None:
+ return fld.i2h(self, v)
+ return v
+ raise AttributeError(attr)
+
+ def setfieldval(self, attr, val):
+ if attr in self.default_fields:
+ fld = self.get_field(attr)
+ if fld is None:
+ any2i = lambda x,y: y
+ else:
+ any2i = fld.any2i
+ self.fields[attr] = any2i(self, val)
+ self.explicit = 0
+ self.raw_packet_cache = None
+ elif attr == "payload":
+ self.remove_payload()
+ self.add_payload(val)
+ else:
+ self.payload.setfieldval(attr,val)
+
+ def __setattr__(self, attr, val):
+ if self.initialized:
+ try:
+ self.setfieldval(attr,val)
+ except AttributeError:
+ pass
+ else:
+ return
+ self.__dict__[attr] = val
+
+ def delfieldval(self, attr):
+ if attr in self.fields:
+ del(self.fields[attr])
+ self.explicit = 0 # in case a default value must be explicited
+ self.raw_packet_cache = None
+ elif attr in self.default_fields:
+ pass
+ elif attr == "payload":
+ self.remove_payload()
+ else:
+ self.payload.delfieldval(attr)
+
+ def __delattr__(self, attr):
+ if self.initialized:
+ try:
+ self.delfieldval(attr)
+ except AttributeError:
+ pass
+ else:
+ return
+ if attr in self.__dict__:
+ del(self.__dict__[attr])
+ else:
+ raise AttributeError(attr)
+
+ def __repr__(self):
+ s = ""
+ ct = conf.color_theme
+ for f in self.fields_desc:
+ if isinstance(f, ConditionalField) and not f._evalcond(self):
+ continue
+ if f.name in self.fields:
+ val = f.i2repr(self, self.fields[f.name])
+ elif f.name in self.overloaded_fields:
+ val = f.i2repr(self, self.overloaded_fields[f.name])
+ else:
+ continue
+ if isinstance(f, Emph) or f in conf.emph:
+ ncol = ct.emph_field_name
+ vcol = ct.emph_field_value
+ else:
+ ncol = ct.field_name
+ vcol = ct.field_value
+
+
+ s += " %s%s%s" % (ncol(f.name),
+ ct.punct("="),
+ vcol(val))
+ return "%s%s %s %s%s%s"% (ct.punct("<"),
+ ct.layer_name(self.__class__.__name__),
+ s,
+ ct.punct("|"),
+ repr(self.payload),
+ ct.punct(">"))
+ #def __str__(self):
+ #TODO3 FIX
+ def __str__(self):
+ warning("Unless called manually, this could indicate deprecated use. Should be changed to bytes(self)")
+ return repr(bytes(self))
+ def __bytes__(self):
+ return self.build()
+ def __div__(self, other):
+ if isinstance(other, Packet):
+ cloneA = self.copy()
+ cloneB = other.copy()
+ cloneA.add_payload(cloneB)
+ return cloneA
+ elif type(other) is str:
+ return self/conf.raw_layer(load=other.encode('ascii'))
+ elif type(other) is bytes:
+ return self/conf.raw_layer(load=other)
+ else:
+ return other.__rdiv__(self)
+ __truediv__ = __div__
+ def __rdiv__(self, other):
+ if type(other) is str:
+ return conf.raw_layer(load=other.encode('ascii'))/self
+ if type(other) is bytes:
+ return conf.raw_layer(load=other)/self
+ else:
+ raise TypeError
+ __rtruediv__ = __rdiv__
+ def __mul__(self, other):
+ if type(other) is int:
+ return [self]*other
+ else:
+ raise TypeError
+ def __rmul__(self,other):
+ return self.__mul__(other)
+
+ def __nonzero__(self):
+ return True
+
+ def __len__(self):
+ return len(bytes(self))
+
+ def dump_fields_offsets (self):
+ for f in self.fields_desc:
+ print ("field %-40s %02d %02d" % (f.name, f.offset,f.get_size_bytes () ) );
+
+ def self_build(self, field_pos_list=None):
+ if self.raw_packet_cache is not None:
+ return self.raw_packet_cache
+ p=b""
+ for f in self.fields_desc:
+ #print(f.name)
+ if type(p) is tuple :
+ f.offset=len(p[0])
+ else:
+ assert(type(p)==bytes)
+ f.offset=len(p)
+
+ val = self.getfieldval(f.name)
+ if isinstance(val, RawVal):
+ #sval = str(val)
+ sval = bytes(val)
+ p += sval
+ if field_pos_list is not None:
+ field_pos_list.append( (f.name, sval.encode("string_escape"), len(p), len(sval) ) )
+ f.offset= val
+ else:
+ p = f.addfield(self, p, val)
+ return p
+
+ def do_build_payload(self):
+ return self.payload.do_build(None)
+
+ def do_update_payload_offset(self,pkt):
+ #print "obj-id ",id(self)
+ #print "current offset ",self.name," ",self.offset
+ #print "current header size ",len(pkt)
+ self.payload.offset = self.offset + len(pkt)
+
+ def dump_layers_offset (self):
+ p=self;
+ while True:
+ print (p.name, "offset :",p.offset)
+ p=p.payload
+ if p ==None or isinstance(p,NoPayload):
+ break;
+
+
+ def do_build(self,result):
+ if not self.explicit:
+ self = next(self.__iter__())
+ pkt = self.self_build()
+ for t in self.post_transforms:
+ pkt = t(pkt)
+ self.do_update_payload_offset(pkt)
+ pay = self.do_build_payload()
+ p = self.post_build(pkt,pay)
+ if result != None:
+ result.pkt = self;
+ return p
+
+ def build_padding(self):
+ return self.payload.build_padding()
+
+ def update_build_info (self,other):
+ p=self;
+ o=other;
+ while True:
+ assert(p.aliastypes==o.aliastypes)
+ assert(type(p) == type(o) )
+
+ #copy
+ p.offset=o.offset
+
+ #next
+ p=p.payload
+ o=o.payload
+ if p ==None or isinstance(p,NoPayload):
+ break;
+
+ def build(self):
+ result = CPacketRes;
+ p = self.do_build(result)
+ p += self.build_padding()
+ p = self.build_done(p)
+ self.update_build_info (result.pkt)
+ return p
+
+ def post_build(self, pkt, pay):
+ """DEV: called right after the current layer is build."""
+ return pkt+pay
+
+ def build_done(self, p):
+ return self.payload.build_done(p)
+
+ def do_build_ps(self):
+ p=b""
+ pl = []
+ q=b""
+ for f in self.fields_desc:
+ if isinstance(f, ConditionalField) and not f._evalcond(self):
+ continue
+ p = f.addfield(self, p, self.getfieldval(f.name) )
+ if type(p) is bytes:
+ r = p[len(q):]
+ q = p
+ else:
+ r = b""
+ pl.append( (f, f.i2repr(self,self.getfieldval(f.name)), r) )
+
+ pkt,lst = self.payload.build_ps(internal=1)
+ p += pkt
+ lst.append( (self, pl) )
+
+ return p,lst
+
+ def build_ps(self,internal=0):
+ p,lst = self.do_build_ps()
+# if not internal:
+# pkt = self
+# while pkt.haslayer(conf.padding_layer):
+# pkt = pkt.getlayer(conf.padding_layer)
+# lst.append( (pkt, [ ("loakjkjd", pkt.load, pkt.load) ] ) )
+# p += pkt.load
+# pkt = pkt.payload
+ return p,lst
+
+
+ def psdump(self, filename=None, **kargs):
+ """psdump(filename=None, layer_shift=0, rebuild=1)
+Creates an EPS file describing a packet. If filename is not provided a temporary file is created and gs is called."""
+ canvas = self.canvas_dump(**kargs)
+ if filename is None:
+ fname = get_temp_file(autoext=".eps")
+ canvas.writeEPSfile(fname)
+ subprocess.Popen([conf.prog.psreader, fname+".eps"])
+ else:
+ canvas.writeEPSfile(filename)
+
+ def pdfdump(self, filename=None, **kargs):
+ """pdfdump(filename=None, layer_shift=0, rebuild=1)
+ Creates a PDF file describing a packet. If filename is not provided a temporary file is created and xpdf is called."""
+ canvas = self.canvas_dump(**kargs)
+ if filename is None:
+ fname = get_temp_file(autoext=".pdf")
+ canvas.writePDFfile(fname)
+ subprocess.Popen([conf.prog.pdfreader, fname+".pdf"])
+ else:
+ canvas.writePDFfile(filename)
+
+
+ def canvas_dump(self, layer_shift=0, rebuild=1):
+ canvas = pyx.canvas.canvas()
+ if rebuild:
+ p,t = self.__class__(bytes(self)).build_ps()
+ else:
+ p,t = self.build_ps()
+ YTXT=len(t)
+ for n,l in t:
+ YTXT += len(l)
+ YTXT = float(YTXT)
+ YDUMP=YTXT
+
+ XSTART = 1
+ XDSTART = 10
+ y = 0.0
+ yd = 0.0
+ xd = 0
+ XMUL= 0.55
+ YMUL = 0.4
+
+ backcolor=colgen(0.6, 0.8, 1.0, trans=pyx.color.rgb)
+ forecolor=colgen(0.2, 0.5, 0.8, trans=pyx.color.rgb)
+# backcolor=makecol(0.376, 0.729, 0.525, 1.0)
+
+
+ def hexstr(x):
+ s = []
+ for c in x:
+ s.append("%02x" % c)
+ return " ".join(s)
+
+
+ def make_dump_txt(x,y,txt):
+ return pyx.text.text(XDSTART+x*XMUL, (YDUMP-y)*YMUL, r"\tt{%s}"%hexstr(txt), [pyx.text.size.Large])
+
+ def make_box(o):
+ return pyx.box.rect(o.left(), o.bottom(), o.width(), o.height(), relcenter=(0.5,0.5))
+
+ def make_frame(lst):
+ if len(lst) == 1:
+ b = lst[0].bbox()
+ b.enlarge(pyx.unit.u_pt)
+ return b.path()
+ else:
+ fb = lst[0].bbox()
+ fb.enlarge(pyx.unit.u_pt)
+ lb = lst[-1].bbox()
+ lb.enlarge(pyx.unit.u_pt)
+ if len(lst) == 2 and fb.left() > lb.right():
+ return pyx.path.path(pyx.path.moveto(fb.right(), fb.top()),
+ pyx.path.lineto(fb.left(), fb.top()),
+ pyx.path.lineto(fb.left(), fb.bottom()),
+ pyx.path.lineto(fb.right(), fb.bottom()),
+ pyx.path.moveto(lb.left(), lb.top()),
+ pyx.path.lineto(lb.right(), lb.top()),
+ pyx.path.lineto(lb.right(), lb.bottom()),
+ pyx.path.lineto(lb.left(), lb.bottom()))
+ else:
+ # XXX
+ gb = lst[1].bbox()
+ if gb != lb:
+ gb.enlarge(pyx.unit.u_pt)
+ kb = lst[-2].bbox()
+ if kb != gb and kb != lb:
+ kb.enlarge(pyx.unit.u_pt)
+ return pyx.path.path(pyx.path.moveto(fb.left(), fb.top()),
+ pyx.path.lineto(fb.right(), fb.top()),
+ pyx.path.lineto(fb.right(), kb.bottom()),
+ pyx.path.lineto(lb.right(), kb.bottom()),
+ pyx.path.lineto(lb.right(), lb.bottom()),
+ pyx.path.lineto(lb.left(), lb.bottom()),
+ pyx.path.lineto(lb.left(), gb.top()),
+ pyx.path.lineto(fb.left(), gb.top()),
+ pyx.path.closepath(),)
+
+
+ def make_dump(s, shift=0, y=0, col=None, bkcol=None, larg=16):
+ c = pyx.canvas.canvas()
+ tlist = []
+ while s:
+ dmp,s = s[:larg-shift],s[larg-shift:]
+ txt = make_dump_txt(shift, y, dmp)
+ tlist.append(txt)
+ shift += len(dmp)
+ if shift >= 16:
+ shift = 0
+ y += 1
+ if col is None:
+ col = pyx.color.rgb.red
+ if bkcol is None:
+ col = pyx.color.rgb.white
+ c.stroke(make_frame(tlist),[col,pyx.deco.filled([bkcol]),pyx.style.linewidth.Thick])
+ for txt in tlist:
+ c.insert(txt)
+ return c, tlist[-1].bbox(), shift, y
+
+
+ last_shift,last_y=0,0.0
+ while t:
+ bkcol = next(backcolor)
+ proto,fields = t.pop()
+ y += 0.5
+ pt = pyx.text.text(XSTART, (YTXT-y)*YMUL, r"\font\cmssfont=cmss10\cmssfont{%s}" % proto.name, [ pyx.text.size.Large])
+ y += 1
+ ptbb=pt.bbox()
+ ptbb.enlarge(pyx.unit.u_pt*2)
+ canvas.stroke(ptbb.path(),[pyx.color.rgb.black, pyx.deco.filled([bkcol])])
+ canvas.insert(pt)
+ for fname, fval, fdump in fields:
+ col = next(forecolor)
+ ft = pyx.text.text(XSTART, (YTXT-y)*YMUL, r"\font\cmssfont=cmss10\cmssfont{%s}" % tex_escape(fname.name))
+ if isinstance(fval, str):
+ if len(fval) > 18:
+ fval = fval[:18]+"[...]"
+ else:
+ fval=""
+ vt = pyx.text.text(XSTART+3, (YTXT-y)*YMUL, r"\font\cmssfont=cmss10\cmssfont{%s}" % tex_escape(fval))
+ y += 1.0
+ if fdump:
+ dt,target,last_shift,last_y = make_dump(fdump, last_shift, last_y, col, bkcol)
+
+ dtb = dt.bbox()
+ dtb=target
+ vtb = vt.bbox()
+ bxvt = make_box(vtb)
+ bxdt = make_box(dtb)
+ dtb.enlarge(pyx.unit.u_pt)
+ try:
+ if yd < 0:
+ cnx = pyx.connector.curve(bxvt,bxdt,absangle1=0, absangle2=-90)
+ else:
+ cnx = pyx.connector.curve(bxvt,bxdt,absangle1=0, absangle2=90)
+ except:
+ pass
+ else:
+ canvas.stroke(cnx,[pyx.style.linewidth.thin,pyx.deco.earrow.small,col])
+
+ canvas.insert(dt)
+
+ canvas.insert(ft)
+ canvas.insert(vt)
+ last_y += layer_shift
+
+ return canvas
+
+
+
+ def extract_padding(self, s):
+ """DEV: to be overloaded to extract current layer's padding. Return a couple of strings (actual layer, padding)"""
+ return s,None
+
+ def post_dissect(self, s):
+ """DEV: is called right after the current layer has been dissected"""
+ return s
+
+ def pre_dissect(self, s):
+ """DEV: is called right before the current layer is dissected"""
+ return s
+
+ def do_dissect(self, s):
+ flist = self.fields_desc[:]
+ flist.reverse()
+ raw = s
+ while s and flist:
+ f = flist.pop()
+ #print(f, end = " = ")
+ s,fval = f.getfield(self, s)
+ #print('fval')
+ self.fields[f.name] = fval
+ assert(raw.endswith(s))
+ if s:
+ self.raw_packet_cache = raw[:-len(s)]
+ else:
+ self.raw_packet_cache = raw
+ self.explicit = 1
+ return s
+
+ def do_dissect_payload(self, s):
+ if s:
+ cls = self.guess_payload_class(s)
+ try:
+ p = cls(s, _internal=1, _underlayer=self)
+ except KeyboardInterrupt:
+ raise
+ except:
+ if conf.debug_dissector:
+ if isinstance(cls,type) and issubclass(cls,Packet):
+ log_runtime.error("%s dissector failed" % cls.name)
+ else:
+ log_runtime.error("%s.guess_payload_class() returned [%s]" % (self.__class__.__name__,repr(cls)))
+ if cls is not None:
+ raise
+ p = conf.raw_layer(s, _internal=1, _underlayer=self)
+ self.add_payload(p)
+
+ def dissect(self, s):
+ s = self.pre_dissect(s)
+
+ s = self.do_dissect(s)
+
+ s = self.post_dissect(s)
+
+ payl,pad = self.extract_padding(s)
+ self.do_dissect_payload(payl)
+ if pad and conf.padding:
+ self.add_payload(conf.padding_layer(pad))
+
+
+ def guess_payload_class(self, payload):
+ """DEV: Guesses the next payload class from layer bonds. Can be overloaded to use a different mechanism."""
+ for t in self.aliastypes:
+ for fval, cls in t.payload_guess:
+ ok = 1
+ for k in fval.keys():
+ if not hasattr(self, k) or fval[k] != self.getfieldval(k):
+ ok = 0
+ break
+ if ok:
+ return cls
+ return self.default_payload_class(payload)
+
+ def default_payload_class(self, payload):
+ """DEV: Returns the default payload class if nothing has been found by the guess_payload_class() method."""
+ return conf.raw_layer
+
+ def hide_defaults(self):
+ """Removes fields' values that are the same as default values."""
+ for k in list(self.fields.keys()):
+ if k in self.default_fields:
+ if self.default_fields[k] == self.fields[k]:
+ del(self.fields[k])
+ self.payload.hide_defaults()
+
+ def clone_with(self, payload=None, **kargs):
+ pkt = self.__class__()
+ pkt.explicit = 1
+ pkt.fields = kargs
+ pkt.offset=self.offset
+ pkt.time = self.time
+ pkt.underlayer = self.underlayer
+ pkt.overload_fields = self.overload_fields.copy()
+ pkt.post_transforms = self.post_transforms
+ if payload is not None:
+ pkt.add_payload(payload)
+ return pkt
+
+
+ def __iter__(self):
+ def loop(todo, done, self=self):
+ if todo:
+ eltname = todo.pop()
+ elt = self.getfieldval(eltname)
+ if not isinstance(elt, Gen):
+ if self.get_field(eltname).islist:
+ elt = SetGen([elt])
+ else:
+ elt = SetGen(elt)
+ for e in elt:
+ done[eltname]=e
+ for x in loop(todo[:], done):
+ yield x
+ else:
+ if isinstance(self.payload,NoPayload):
+ payloads = [None]
+ else:
+ payloads = self.payload
+ for payl in payloads:
+ done2=done.copy()
+ for k in done2:
+ if isinstance(done2[k], VolatileValue):
+ done2[k] = done2[k]._fix()
+ pkt = self.clone_with(payload=payl, **done2)
+ yield pkt
+
+ if self.explicit:
+ todo = []
+ done = self.fields
+ else:
+ todo = [ k for (k,v) in itertools.chain(self.default_fields.items(),
+ self.overloaded_fields.items())
+ if isinstance(v, VolatileValue) ] + list(self.fields.keys())
+ done = {}
+ return loop(todo, done)
+
+ def __gt__(self, other):
+ """True if other is an answer from self (self ==> other)."""
+ if isinstance(other, Packet):
+ return other < self
+ elif type(other) is str:
+ return 1
+ else:
+ raise TypeError((self, other))
+ def __lt__(self, other):
+ """True if self is an answer from other (other ==> self)."""
+ if isinstance(other, Packet):
+ return self.answers(other)
+ elif type(other) is str:
+ return 1
+ else:
+ raise TypeError((self, other))
+
+ def __eq__(self, other):
+ if not isinstance(other, self.__class__):
+ return False
+ for f in self.fields_desc:
+ if f not in other.fields_desc:
+ return False
+ if self.getfieldval(f.name) != other.getfieldval(f.name):
+ return False
+ return self.payload == other.payload
+
+ def __ne__(self, other):
+ return not self.__eq__(other)
+
+ def hashret(self):
+ """DEV: returns a string that has the same value for a request and its answer."""
+ return self.payload.hashret()
+ def answers(self, other):
+ """DEV: true if self is an answer from other"""
+ if other.__class__ == self.__class__:
+ return self.payload.answers(other.payload)
+ return 0
+
+ def haslayer(self, cls):
+ """true if self has a layer that is an instance of cls. Superseded by "cls in self" syntax."""
+ if self.__class__ == cls or self.__class__.__name__ == cls:
+ return 1
+ for f in self.packetfields:
+ fvalue_gen = self.getfieldval(f.name)
+ if fvalue_gen is None:
+ continue
+ if not f.islist:
+ fvalue_gen = SetGen(fvalue_gen,_iterpacket=0)
+ for fvalue in fvalue_gen:
+ if isinstance(fvalue, Packet):
+ ret = fvalue.haslayer(cls)
+ if ret:
+ return ret
+ return self.payload.haslayer(cls)
+ def getlayer(self, cls, nb=1, _track=None):
+ """Return the nb^th layer that is an instance of cls."""
+ if type(cls) is int:
+ nb = cls+1
+ cls = None
+ if type(cls) is str and "." in cls:
+ ccls,fld = cls.split(".",1)
+ else:
+ ccls,fld = cls,None
+ if cls is None or self.__class__ == cls or self.__class__.name == ccls:
+ if nb == 1:
+ if fld is None:
+ return self
+ else:
+ return self.getfieldval(fld)
+ else:
+ nb -=1
+ for f in self.packetfields:
+ fvalue_gen = self.getfieldval(f.name)
+ if fvalue_gen is None:
+ continue
+ if not f.islist:
+ fvalue_gen = SetGen(fvalue_gen,_iterpacket=0)
+ for fvalue in fvalue_gen:
+ if isinstance(fvalue, Packet):
+ track=[]
+ ret = fvalue.getlayer(cls, nb, _track=track)
+ if ret is not None:
+ return ret
+ nb = track[0]
+ return self.payload.getlayer(cls,nb,_track=_track)
+
+ def firstlayer(self):
+ q = self
+ while q.underlayer is not None:
+ q = q.underlayer
+ return q
+
+ def __getitem__(self, cls):
+ if type(cls) is slice:
+ lname = cls.start
+ if cls.stop:
+ ret = self.getlayer(cls.start, cls.stop)
+ else:
+ ret = self.getlayer(cls.start)
+ if ret is None and cls.step is not None:
+ ret = cls.step
+ else:
+ lname=cls
+ ret = self.getlayer(cls)
+ if ret is None:
+ if type(lname) is Packet_metaclass:
+ lname = lname.__name__
+ elif type(lname) is not str:
+ lname = repr(lname)
+ raise IndexError("Layer [%s] not found" % lname)
+ return ret
+
+ def __delitem__(self, cls):
+ del(self[cls].underlayer.payload)
+
+ def __setitem__(self, cls, val):
+ self[cls].underlayer.payload = val
+
+ def __contains__(self, cls):
+ """"cls in self" returns true if self has a layer which is an instance of cls."""
+ return self.haslayer(cls)
+
+ def route(self):
+ return (None,None,None)
+
+ def fragment(self, *args, **kargs):
+ return self.payload.fragment(*args, **kargs)
+
+
+ def display(self,*args,**kargs): # Deprecated. Use show()
+ """Deprecated. Use show() method."""
+ self.show(*args,**kargs)
+ def show(self, indent=3, lvl="", label_lvl=""):
+ """Prints a hierarchical view of the packet. "indent" gives the size of indentation for each layer."""
+ ct = conf.color_theme
+ print("%s%s %s %s" % (label_lvl,
+ ct.punct("###["),
+ ct.layer_name(self.name),
+ ct.punct("]###")))
+ for f in self.fields_desc:
+ if isinstance(f, ConditionalField) and not f._evalcond(self):
+ continue
+ if isinstance(f, Emph) or f in conf.emph:
+ ncol = ct.emph_field_name
+ vcol = ct.emph_field_value
+ else:
+ ncol = ct.field_name
+ vcol = ct.field_value
+ fvalue = self.getfieldval(f.name)
+ if isinstance(fvalue, Packet) or (f.islist and f.holds_packets and type(fvalue) is list):
+ print("%s \\%-10s\\" % (label_lvl+lvl, ncol(f.name)))
+ fvalue_gen = SetGen(fvalue,_iterpacket=0)
+ for fvalue in fvalue_gen:
+ fvalue.show(indent=indent, label_lvl=label_lvl+lvl+" |")
+ else:
+ begn = "%s %-10s%s " % (label_lvl+lvl,
+ ncol(f.name),
+ ct.punct("="),)
+ reprval = f.i2repr(self,fvalue)
+ if type(reprval) is str:
+ reprval = reprval.replace("\n", "\n"+" "*(len(label_lvl)
+ +len(lvl)
+ +len(f.name)
+ +4))
+ print("%s%s" % (begn,vcol(reprval)))
+ self.payload.show(indent=indent, lvl=lvl+(" "*indent*self.show_indent), label_lvl=label_lvl)
+ def show2(self):
+ """Prints a hierarchical view of an assembled version of the packet, so that automatic fields are calculated (checksums, etc.)"""
+ self.__class__(bytes(self)).show()
+
+ def sprintf(self, fmt, relax=1):
+ """sprintf(format, [relax=1]) -> str
+where format is a string that can include directives. A directive begins and
+ends by % and has the following format %[fmt[r],][cls[:nb].]field%.
+
+fmt is a classic printf directive, "r" can be appended for raw substitution
+(ex: IP.flags=0x18 instead of SA), nb is the number of the layer we want
+(ex: for IP/IP packets, IP:2.src is the src of the upper IP layer).
+Special case : "%.time%" is the creation time.
+Ex : p.sprintf("%.time% %-15s,IP.src% -> %-15s,IP.dst% %IP.chksum% "
+ "%03xr,IP.proto% %r,TCP.flags%")
+
+Moreover, the format string can include conditionnal statements. A conditionnal
+statement looks like : {layer:string} where layer is a layer name, and string
+is the string to insert in place of the condition if it is true, i.e. if layer
+is present. If layer is preceded by a "!", the result si inverted. Conditions
+can be imbricated. A valid statement can be :
+ p.sprintf("This is a{TCP: TCP}{UDP: UDP}{ICMP:n ICMP} packet")
+ p.sprintf("{IP:%IP.dst% {ICMP:%ICMP.type%}{TCP:%TCP.dport%}}")
+
+A side effect is that, to obtain "{" and "}" characters, you must use
+"%(" and "%)".
+"""
+
+ escape = { "%": "%",
+ "(": "{",
+ ")": "}" }
+
+
+ # Evaluate conditions
+ while "{" in fmt:
+ i = fmt.rindex("{")
+ j = fmt[i+1:].index("}")
+ cond = fmt[i+1:i+j+1]
+ k = cond.find(":")
+ if k < 0:
+ raise Scapy_Exception("Bad condition in format string: [%s] (read sprintf doc!)"%cond)
+ cond,format = cond[:k],cond[k+1:]
+ res = False
+ if cond[0] == "!":
+ res = True
+ cond = cond[1:]
+ if self.haslayer(cond):
+ res = not res
+ if not res:
+ format = ""
+ fmt = fmt[:i]+format+fmt[i+j+2:]
+
+ # Evaluate directives
+ s = ""
+ while "%" in fmt:
+ i = fmt.index("%")
+ s += fmt[:i]
+ fmt = fmt[i+1:]
+ if fmt and fmt[0] in escape:
+ s += escape[fmt[0]]
+ fmt = fmt[1:]
+ continue
+ try:
+ i = fmt.index("%")
+ sfclsfld = fmt[:i]
+ fclsfld = sfclsfld.split(",")
+ if len(fclsfld) == 1:
+ f = "s"
+ clsfld = fclsfld[0]
+ elif len(fclsfld) == 2:
+ f,clsfld = fclsfld
+ else:
+ raise Scapy_Exception
+ if "." in clsfld:
+ cls,fld = clsfld.split(".")
+ else:
+ cls = self.__class__.__name__
+ fld = clsfld
+ num = 1
+ if ":" in cls:
+ cls,num = cls.split(":")
+ num = int(num)
+ fmt = fmt[i+1:]
+ except:
+ raise Scapy_Exception("Bad format string [%%%s%s]" % (fmt[:25], fmt[25:] and "..."))
+ else:
+ if fld == "time":
+ val = time.strftime("%H:%M:%S.%%06i", time.localtime(self.time)) % int((self.time-int(self.time))*1000000)
+ elif cls == self.__class__.__name__ and hasattr(self, fld):
+ if num > 1:
+ val = self.payload.sprintf("%%%s,%s:%s.%s%%" % (f,cls,num-1,fld), relax)
+ f = "s"
+ elif f[-1] == "r": # Raw field value
+ val = getattr(self,fld)
+ f = f[:-1]
+ if not f:
+ f = "s"
+ else:
+ val = getattr(self,fld)
+ if fld in self.fieldtype:
+ val = self.fieldtype[fld].i2repr(self,val)
+ else:
+ val = self.payload.sprintf("%%%s%%" % sfclsfld, relax)
+ f = "s"
+ s += ("%"+f) % val
+
+ s += fmt
+ return s
+
+ def mysummary(self):
+ """DEV: can be overloaded to return a string that summarizes the layer.
+ Only one mysummary() is used in a whole packet summary: the one of the upper layer,
+ except if a mysummary() also returns (as a couple) a list of layers whose
+ mysummary() must be called if they are present."""
+ return ""
+
+ def _do_summary(self):
+ found,s,needed = self.payload._do_summary()
+ if s:
+ s = " / "+s
+ ret = ""
+ if not found or self.__class__ in needed:
+ ret = self.mysummary()
+ if type(ret) is tuple:
+ ret,n = ret
+ needed += n
+ if ret or needed:
+ found = 1
+ if not ret:
+ ret = self.__class__.__name__
+ if self.__class__ in conf.emph:
+ impf = []
+ for f in self.fields_desc:
+ if f in conf.emph:
+ impf.append("%s=%s" % (f.name, f.i2repr(self, self.getfieldval(f.name))))
+ ret = "%s [%s]" % (ret," ".join(impf))
+ ret = "%s%s" % (ret,s)
+ return found,ret,needed
+
+ def summary(self, intern=0):
+ """Prints a one line summary of a packet."""
+ found,s,needed = self._do_summary()
+ return s
+
+
+ def lastlayer(self,layer=None):
+ """Returns the uppest layer of the packet"""
+ return self.payload.lastlayer(self)
+
+ def decode_payload_as(self,cls):
+ """Reassembles the payload and decode it using another packet class"""
+ s = bytes(self.payload)
+ self.payload = cls(s, _internal=1, _underlayer=self)
+ pp = self
+ while pp.underlayer is not None:
+ pp = pp.underlayer
+ self.payload.dissection_done(pp)
+
+ def libnet(self):
+ """Not ready yet. Should give the necessary C code that interfaces with libnet to recreate the packet"""
+ print("libnet_build_%s(" % self.__class__.name.lower())
+ det = self.__class__(str(self))
+ for f in self.fields_desc:
+ val = det.getfieldval(f.name)
+ if val is None:
+ val = 0
+ elif type(val) is int:
+ val = str(val)
+ else:
+ val = '"%s"' % str(val)
+ print("\t%s, \t\t/* %s */" % (val,f.name))
+ print(");")
+ def command(self):
+ """Returns a string representing the command you have to type to obtain the same packet"""
+ f = []
+ for fn,fv in self.fields.items():
+ fld = self.get_field(fn)
+ if isinstance(fv, Packet):
+ fv = fv.command()
+ elif fld.islist and fld.holds_packets and type(fv) is list:
+ #fv = "[%s]" % ",".join( map(Packet.command, fv))
+ fv = "[%s]" % ",".join([ Packet.command(i) for i in fv ])
+ else:
+ fv = repr(fv)
+ f.append("%s=%s" % (fn, fv))
+ c = "%s(%s)" % (self.__class__.__name__, ", ".join(f))
+ pc = self.payload.command()
+ if pc:
+ c += "/"+pc
+ return c
+
+class NoPayload(Packet):
+ def __new__(cls, *args, **kargs):
+ singl = cls.__dict__.get("__singl__")
+ if singl is None:
+ cls.__singl__ = singl = Packet.__new__(cls)
+ Packet.__init__(singl)
+ return singl
+ def __init__(self, *args, **kargs):
+ pass
+ def dissection_done(self,pkt):
+ return
+ def add_payload(self, payload):
+ raise Scapy_Exception("Can't add payload to NoPayload instance")
+ def remove_payload(self):
+ pass
+ def add_underlayer(self,underlayer):
+ pass
+ def remove_underlayer(self,other):
+ pass
+ def copy(self):
+ return self
+ def __repr__(self):
+ return ""
+ def __str__(self):
+ return ""
+ def __nonzero__(self):
+ return False
+ def do_build(self,result):
+ return b""
+ def build(self):
+ return b""
+ def build_padding(self):
+ return b""
+ def build_done(self, p):
+ return p
+ def build_ps(self, internal=0):
+ return b"",[]
+ def getfieldval(self, attr):
+ raise AttributeError(attr)
+ def getfield_and_val(self, attr):
+ raise AttributeError(attr)
+ def setfieldval(self, attr, val):
+ raise AttributeError(attr)
+ def delfieldval(self, attr):
+ raise AttributeError(attr)
+ def __getattr__(self, attr):
+ if attr in self.__dict__:
+ return self.__dict__[attr]
+ elif attr in self.__class__.__dict__:
+ return self.__class__.__dict__[attr]
+ else:
+ raise AttributeError(attr)
+ def hide_defaults(self):
+ pass
+ def __iter__(self):
+ return iter([])
+ def __eq__(self, other):
+ if isinstance(other, NoPayload):
+ return True
+ return False
+ def hashret(self):
+ return b""
+ def answers(self, other):
+ return isinstance(other, NoPayload) or isinstance(other, conf.padding_layer)
+ def haslayer(self, cls):
+ return 0
+ def getlayer(self, cls, nb=1, _track=None):
+ if _track is not None:
+ _track.append(nb)
+ return None
+ def fragment(self, *args, **kargs):
+ raise Scapy_Exception("cannot fragment this packet")
+ def show(self, indent=3, lvl="", label_lvl=""):
+ pass
+ def sprintf(self, fmt, relax):
+ if relax:
+ return "??"
+ else:
+ raise Scapy_Exception("Format not found [%s]"%fmt)
+ def _do_summary(self):
+ return 0,"",[]
+ def lastlayer(self,layer):
+ return layer
+ def command(self):
+ return ""
+
+####################
+## packet classes ##
+####################
+
+
+class Raw(Packet):
+ name = "Raw"
+ fields_desc = [ StrField("load", b"") ]
+ def answers(self, other):
+ return 1
+# s = str(other)
+# t = self.load
+# l = min(len(s), len(t))
+# return s[:l] == t[:l]
+ def mysummary(self):
+ cs = conf.raw_summary
+ if cs:
+ if callable(cs):
+ return "Raw %s" % cs(self.load)
+ else:
+ return "Raw %r" % self.load
+ return Packet.mysummary(self)
+
+class Padding(Raw):
+ name = "Padding"
+ def self_build(self):
+ return b""
+ def build_padding(self):
+ return (self.getbyteval("load") if self.raw_packet_cache is None
+ else self.raw_packet_cache) + self.payload.build_padding()
+
+conf.raw_layer = Raw
+conf.padding_layer = Padding
+if conf.default_l2 is None:
+ conf.default_l2 = Raw
+
+#################
+## Bind layers ##
+#################
+
+
+def bind_bottom_up(lower, upper, __fval=None, **fval):
+ if __fval is not None:
+ fval.update(__fval)
+ lower.payload_guess = lower.payload_guess[:]
+ lower.payload_guess.append((fval, upper))
+
+
+def bind_top_down(lower, upper, __fval=None, **fval):
+ if __fval is not None:
+ fval.update(__fval)
+ upper.overload_fields = upper.overload_fields.copy()
+ upper.overload_fields[lower] = fval
+
+@conf.commands.register
+def bind_layers(lower, upper, __fval=None, **fval):
+ """Bind 2 layers on some specific fields' values"""
+ if __fval is not None:
+ fval.update(__fval)
+ bind_top_down(lower, upper, **fval)
+ bind_bottom_up(lower, upper, **fval)
+
+def split_bottom_up(lower, upper, __fval=None, **fval):
+ if __fval is not None:
+ fval.update(__fval)
+ #def do_filter((f,u),upper=upper,fval=fval):
+ def do_filter(s,upper=upper,fval=fval):
+ if s[1] != upper:
+ return True
+ for k in fval:
+ if k not in s[0] or s[0][k] != fval[k]:
+ return True
+ return False
+ lower.payload_guess = list(filter(do_filter, lower.payload_guess))
+
+def split_top_down(lower, upper, __fval=None, **fval):
+ if __fval is not None:
+ fval.update(__fval)
+ if lower in upper.overload_fields:
+ ofval = upper.overload_fields[lower]
+ for k in fval:
+ if k not in ofval or ofval[k] != fval[k]:
+ return
+ upper.overload_fields = upper.overload_fields.copy()
+ del(upper.overload_fields[lower])
+
+@conf.commands.register
+def split_layers(lower, upper, __fval=None, **fval):
+ """Split 2 layers previously bound"""
+ if __fval is not None:
+ fval.update(__fval)
+ split_bottom_up(lower, upper, **fval)
+ split_top_down(lower, upper, **fval)
+
+
+@conf.commands.register
+def ls(obj=None):
+ """List available layers, or infos on a given layer"""
+ if obj is None:
+
+ import builtins
+ all = builtins.__dict__.copy()
+ all.update(globals())
+ objlst = sorted(conf.layers, key=lambda x:x.__name__)
+ for o in objlst:
+ print("%-10s : %s" %(o.__name__,o.name))
+ else:
+ if isinstance(obj, type) and issubclass(obj, Packet):
+ for f in obj.fields_desc:
+ print("%-10s : %-20s = (%s)" % (f.name, f.__class__.__name__, repr(f.default)))
+ elif isinstance(obj, Packet):
+ for f in obj.fields_desc:
+ print("%-10s : %-20s = %-15s (%s)" % (f.name, f.__class__.__name__, repr(getattr(obj,f.name)), repr(f.default)))
+ if not isinstance(obj.payload, NoPayload):
+ print("--")
+ ls(obj.payload)
+
+
+ else:
+ print("Not a packet class. Type 'ls()' to list packet classes.")
+
+
+
+#############
+## Fuzzing ##
+#############
+
+@conf.commands.register
+def fuzz(p, _inplace=0):
+ """Transform a layer into a fuzzy layer by replacing some default values by random objects"""
+ if not _inplace:
+ p = p.copy()
+ q = p
+ while not isinstance(q, NoPayload):
+ for f in q.fields_desc:
+ if isinstance(f, PacketListField):
+ for r in getattr(q, f.name):
+ print("fuzzing", repr(r))
+ fuzz(r, _inplace=1)
+ elif f.default is not None:
+ rnd = f.randval()
+ if rnd is not None:
+ q.default_fields[f.name] = rnd
+ q = q.payload
+ return p
+
+
+
diff --git a/scripts/external_libs/scapy-2.3.1/python3/scapy/pipetool.py b/scripts/external_libs/scapy-2.3.1/python3/scapy/pipetool.py
new file mode 100644
index 00000000..2dc28cb5
--- /dev/null
+++ b/scripts/external_libs/scapy-2.3.1/python3/scapy/pipetool.py
@@ -0,0 +1,566 @@
+#! /usr/bin/env python
+
+## This file is part of Scapy
+## See http://www.secdev.org/projects/scapy for more informations
+## Copyright (C) Philippe Biondi <phil@secdev.org>
+## This program is published under a GPLv2 license
+
+from __future__ import with_statement
+
+import scapy.utils
+from scapy.config import conf
+import os,_thread,select
+import subprocess
+import itertools
+import collections
+import time
+from scapy.error import log_interactive,warning
+import queue
+
+class PipeEngine:
+ pipes = {}
+ @classmethod
+ def list_pipes(cls):
+ for pn,pc in sorted(cls.pipes.items()):
+ doc = pc.__doc__ or ""
+ if doc:
+ doc = doc.splitlines()[0]
+ print("%20s: %s" % (pn, doc))
+ @classmethod
+ def list_pipes_detailed(cls):
+ for pn,pc in sorted(cls.pipes.items()):
+ if pc.__doc__:
+ print("###### %s\n %s" % (pn ,pc.__doc__))
+ else:
+ print("###### %s" % pn)
+
+ def __init__(self, *pipes):
+ self.active_pipes = set()
+ self.active_sources = set()
+ self.active_drains = set()
+ self.active_sinks = set()
+ self._add_pipes(*pipes)
+ self.thread_lock = _thread.allocate_lock()
+ self.command_lock = _thread.allocate_lock()
+ self.__fdr,self.__fdw = os.pipe()
+ self.threadid = None
+ def __getattr__(self, attr):
+ if attr.startswith("spawn_"):
+ dname = attr[6:]
+ if dname in self.pipes:
+ def f(*args, **kargs):
+ k = self.pipes[dname]
+ p = k(*args, **kargs)
+ self.add(p)
+ return p
+ return f
+ raise AttributeError(attr)
+
+ def add_one_pipe(self, pipe):
+ self.active_pipes.add(pipe)
+ if isinstance(pipe, Source):
+ self.active_sources.add(pipe)
+ if isinstance(pipe, Drain):
+ self.active_drains.add(pipe)
+ if isinstance(pipe, Sink):
+ self.active_sinks.add(pipe)
+
+ def get_pipe_list(self, pipe):
+ def flatten(p, l):
+ l.add(p)
+ for q in p.sources|p.sinks|p.high_sources|p.high_sinks:
+ if q not in l:
+ flatten(q, l)
+ pl = set()
+ flatten(pipe, pl)
+ return pl
+
+ def _add_pipes(self, *pipes):
+ pl = set()
+ for p in pipes:
+ pl |= self.get_pipe_list(p)
+ pl -= self.active_pipes
+ for q in pl:
+ self.add_one_pipe(q)
+ return pl
+
+
+ def run(self):
+ log_interactive.info("Pipe engine thread started.")
+ try:
+ for p in self.active_pipes:
+ p.start()
+ sources = self.active_sources
+ sources.add(self.__fdr)
+ exhausted = set([])
+ RUN=True
+ STOP_IF_EXHAUSTED = False
+ while RUN and (not STOP_IF_EXHAUSTED or len(sources) > 1):
+ fds,fdo,fde=select.select(sources,[],[])
+ for fd in fds:
+ if fd is self.__fdr:
+ cmd = os.read(self.__fdr,1)
+ if cmd == "X":
+ RUN=False
+ break
+ elif cmd == "B":
+ STOP_IF_EXHAUSTED = True
+ elif cmd == "A":
+ sources = self.active_sources-exhausted
+ sources.add(self.__fdr)
+ else:
+ warning("Unknown internal pipe engine command: %r. Ignoring." % cmd)
+ elif fd in sources:
+ try:
+ fd.deliver()
+ except Exception as e:
+ log_interactive.exception("piping from %s failed: %s" % (fd.name, e))
+ else:
+ if fd.exhausted():
+ exhausted.add(fd)
+ sources.remove(fd)
+ except KeyboardInterrupt:
+ pass
+ finally:
+ try:
+ for p in self.active_pipes:
+ p.stop()
+ finally:
+ self.thread_lock.release()
+ log_interactive.info("Pipe engine thread stopped.")
+
+ def start(self):
+ if self.thread_lock.acquire(0):
+ self.threadid = _thread.start_new_thread(self.run,())
+ else:
+ warning("Pipe engine already running")
+ def wait_and_stop(self):
+ self.stop(_cmd="B")
+ def stop(self, _cmd="X"):
+ try:
+ with self.command_lock:
+ if self.threadid is not None:
+ os.write(self.__fdw, _cmd)
+ while not self.thread_lock.acquire(0):
+ time.sleep(0.01) # interruptible wait for thread to terminate
+ self.thread_lock.release() # (not using .join() because it needs 'threading' module)
+ else:
+ warning("Pipe engine thread not running")
+ except KeyboardInterrupt:
+ print("Interrupted by user.")
+
+ def add(self, *pipes):
+ pipes = self._add_pipes(*pipes)
+ with self.command_lock:
+ if self.threadid is not None:
+ for p in pipes:
+ p.start()
+ os.write(self.__fdw, "A")
+
+ def graph(self,**kargs):
+ g=['digraph "pipe" {',"\tnode [shape=rectangle];",]
+ for p in self.active_pipes:
+ g.append('\t"%i" [label="%s"];' % (id(p), p.name))
+ g.append("")
+ g.append("\tedge [color=blue, arrowhead=vee];")
+ for p in self.active_pipes:
+ for q in p.sinks:
+ g.append('\t"%i" -> "%i";' % (id(p), id(q)))
+ g.append("")
+ g.append("\tedge [color=red, arrowhead=veevee];")
+ for p in self.active_pipes:
+ for q in p.high_sinks:
+ g.append('\t"%i" -> "%i" [color="red"];' % (id(p), id(q)))
+ g.append('}')
+ graph = "\n".join(g)
+ scapy.utils.do_graph(graph, **kargs)
+
+
+class _ConnectorLogic(object):
+ def __init__(self):
+ self.sources = set()
+ self.sinks = set()
+ self.high_sources = set()
+ self.high_sinks = set()
+
+ def __lt__(self, other):
+ other.sinks.add(self)
+ self.sources.add(other)
+ return other
+ def __gt__(self, other):
+ self.sinks.add(other)
+ other.sources.add(self)
+ return other
+ def __eq__(self, other):
+ self > other
+ other > self
+ return other
+
+ def __lshift__(self, other):
+ self.high_sources.add(other)
+ other.high_sinks.add(self)
+ return other
+ def __rshift__(self, other):
+ self.high_sinks.add(other)
+ other.high_sources.add(self)
+ return other
+ def __floordiv__(self, other):
+ self >> other
+ other >> self
+ return other
+
+
+class Pipe(_ConnectorLogic):
+ #TODO3 Move to new metaclass syntax
+ class __metaclass__(type):
+ def __new__(cls, name, bases, dct):
+ c = type.__new__(cls, name, bases, dct)
+ PipeEngine.pipes[name] = c
+ return c
+ def __init__(self, name=None):
+ _ConnectorLogic.__init__(self)
+ if name is None:
+ name = "%s" % (self.__class__.__name__)
+ self.name = name
+ def _send(self, msg):
+ for s in self.sinks:
+ s.push(msg)
+ def _high_send(self, msg):
+ for s in self.high_sinks:
+ s.high_push(msg)
+
+ def __repr__(self):
+ ct = conf.color_theme
+ s = "%s%s" % (ct.punct("<"), ct.layer_name(self.name))
+ if self.sources or self.sinks:
+ s+= " %s" % ct.punct("[")
+ if self.sources:
+ s+="%s%s" % (ct.punct(",").join(ct.field_name(s.name) for s in self.sources),
+ ct.field_value(">"))
+ s += ct.layer_name("#")
+ if self.sinks:
+ s+="%s%s" % (ct.field_value(">"),
+ ct.punct(",").join(ct.field_name(s.name) for s in self.sinks))
+ s += ct.punct("]")
+
+ if self.high_sources or self.high_sinks:
+ s+= " %s" % ct.punct("[")
+ if self.high_sources:
+ s+="%s%s" % (ct.punct(",").join(ct.field_name(s.name) for s in self.high_sources),
+ ct.field_value(">>"))
+ s += ct.layer_name("#")
+ if self.high_sinks:
+ s+="%s%s" % (ct.field_value(">>"),
+ ct.punct(",").join(ct.field_name(s.name) for s in self.high_sinks))
+ s += ct.punct("]")
+
+
+ s += ct.punct(">")
+ return s
+
+class Source(Pipe):
+ def __init__(self, name=None):
+ Pipe.__init__(self, name=name)
+ self.is_exhausted = False
+ def _read_message(self):
+ return Message()
+ def deliver(self):
+ msg = self._read_message
+ self._send(msg)
+ def fileno(self):
+ return None
+ def exhausted(self):
+ return self.is_exhausted
+ def start(self):
+ pass
+ def stop(self):
+ pass
+
+class Drain(Pipe):
+ """Repeat messages from low/high entries to (resp.) low/high exits
+ +-------+
+ >>-|-------|->>
+ | |
+ >-|-------|->
+ +-------+
+"""
+ def push(self, msg):
+ self._send(msg)
+ def high_push(self, msg):
+ self._high_send(msg)
+ def start(self):
+ pass
+ def stop(self):
+ pass
+
+class Sink(Pipe):
+ def push(self, msg):
+ pass
+ def high_push(self, msg):
+ pass
+ def start(self):
+ pass
+ def stop(self):
+ pass
+
+
+class AutoSource(Source):
+ def __init__(self, name=None):
+ Source.__init__(self, name=name)
+ self.__fdr,self.__fdw = os.pipe()
+ self._queue = collections.deque()
+ def fileno(self):
+ return self.__fdr
+ def _gen_data(self, msg):
+ self._queue.append((msg,False))
+ self._wake_up()
+ def _gen_high_data(self, msg):
+ self._queue.append((msg,True))
+ self._wake_up()
+ def _wake_up(self):
+ os.write(self.__fdw,"x")
+ def deliver(self):
+ os.read(self.__fdr,1)
+ try:
+ msg,high = self._queue.popleft()
+ except IndexError: #empty queue. Exhausted source
+ pass
+ else:
+ if high:
+ self._high_send(msg)
+ else:
+ self._send(msg)
+
+class ThreadGenSource(AutoSource):
+ def __init__(self, name=None):
+ AutoSource.__init__(self, name=name)
+ self.RUN = False
+ def generate(self):
+ pass
+ def start(self):
+ self.RUN = True
+ _thread.start_new_thread(self.generate,())
+ def stop(self):
+ self.RUN = False
+
+
+
+class ConsoleSink(Sink):
+ """Print messages on low and high entries
+ +-------+
+ >>-|--. |->>
+ | print |
+ >-|--' |->
+ +-------+
+"""
+ def push(self, msg):
+ print(">%r" % msg)
+ def high_push(self, msg):
+ print(">>%r" % msg)
+
+class RawConsoleSink(Sink):
+ """Print messages on low and high entries
+ +-------+
+ >>-|--. |->>
+ | write |
+ >-|--' |->
+ +-------+
+"""
+ def __init__(self, name=None, newlines=True):
+ Sink.__init__(self, name=name)
+ self.newlines = newlines
+ def push(self, msg):
+ if self.newlines:
+ msg += "\n"
+ os.write(1, str(msg))
+ def high_push(self, msg):
+ if self.newlines:
+ msg += "\n"
+ os.write(1, str(msg))
+
+class CLIFeeder(AutoSource):
+ """Send messages from python command line
+ +--------+
+ >>-| |->>
+ | send() |
+ >-| `----|->
+ +--------+
+"""
+ def send(self, msg):
+ self._gen_data(msg)
+ def close(self):
+ self.is_exhausted = True
+
+class CLIHighFeeder(CLIFeeder):
+ """Send messages from python command line to high output
+ +--------+
+ >>-| .----|->>
+ | send() |
+ >-| |->
+ +--------+
+"""
+ def send(self, msg):
+ self._gen_high_data(msg)
+
+
+class PeriodicSource(ThreadGenSource):
+ """Generage messages periodically on low exit
+ +-------+
+ >>-| |->>
+ | msg,T |
+ >-| `----|->
+ +-------+
+"""
+ def __init__(self, msg, period, period2=0, name=None):
+ ThreadGenSource.__init__(self,name=name)
+ if not hasattr(msg, "__iter__"):
+ msg=[msg]
+ self.msg = msg
+ self.period = period
+ self.period2 = period2
+ def generate(self):
+ while self.RUN:
+ empty_gen = True
+ for m in self.msg:
+ empty_gen = False
+ self._gen_data(m)
+ time.sleep(self.period)
+ if empty_gen:
+ self.is_exhausted = True
+ self._wake_up()
+ time.sleep(self.period2)
+
+class TermSink(Sink):
+ """Print messages on low and high entries on a separate terminal
+ +-------+
+ >>-|--. |->>
+ | print |
+ >-|--' |->
+ +-------+
+"""
+ def __init__(self, name=None, keepterm=True, newlines=True, openearly=True):
+ Sink.__init__(self, name=name)
+ self.keepterm = keepterm
+ self.newlines = newlines
+ self.openearly = openearly
+ self.opened = False
+ if self.openearly:
+ self.start()
+
+ def start(self):
+ if not self.opened:
+ self.opened = True
+ self.__r,self.__w = os.pipe()
+ cmd = ["xterm"]
+ if self.name is not None:
+ cmd.extend(["-title",self.name])
+ if self.keepterm:
+ cmd.append("-hold")
+ cmd.extend(["-e", "cat 0<&%i" % self.__r])
+ self.__p = subprocess.Popen(cmd)
+ os.close(self.__r)
+ def stop(self):
+ if not self.keepterm:
+ self.opened = False
+ os.close(self.__w)
+ self.__p.kill()
+ self.__p.wait()
+ def _print(self, s):
+ if self.newlines:
+ s+="\n"
+ os.write(self.__w, s)
+
+ def push(self, msg):
+ self._print(str(msg))
+ def high_push(self, msg):
+ self._print(str(msg))
+
+
+class QueueSink(Sink):
+ """Collect messages from high and low entries and queue them. Messages are unqueued with the .recv() method.
+ +-------+
+ >>-|--. |->>
+ | queue |
+ >-|--' |->
+ +-------+
+"""
+ def __init__(self, name=None):
+ Sink.__init__(self, name=name)
+ self.q = queue.Queue()
+ def push(self, msg):
+ self.q.put(msg)
+ def high_push(self, msg):
+ self.q.put(msg)
+ def recv(self):
+ while True:
+ try:
+ return self.q.get(True, timeout=0.1)
+ except queue.Empty:
+ pass
+
+
+class TransformDrain(Drain):
+ """Apply a function to messages on low and high entry
+ +-------+
+ >>-|--[f]--|->>
+ | |
+ >-|--[f]--|->
+ +-------+
+"""
+ def __init__(self, f, name=None):
+ Drain.__init__(self, name=name)
+ self.f = f
+ def push(self, msg):
+ self._send(self.f(msg))
+ def high_push(self, msg):
+ self._high_send(self.f(msg))
+
+class UpDrain(Drain):
+ """Repeat messages from low entry to high exit
+ +-------+
+ >>-| ,--|->>
+ | / |
+ >-|--' |->
+ +-------+
+"""
+ def push(self, msg):
+ self._high_send(msg)
+ def high_push(self, msg):
+ pass
+
+class DownDrain(Drain):
+ """Repeat messages from high entry to low exit
+ +-------+
+ >>-|--. |->>
+ | \ |
+ >-| `--|->
+ +-------+
+"""
+ def push(self, msg):
+ pass
+ def high_push(self, msg):
+ self._send(msg)
+
+
+def _testmain():
+ s = PeriodicSource("hello", 1, name="src")
+ d1 = Drain(name="d1")
+ c = ConsoleSink(name="c")
+ tf = TransformDrain(lambda x:"Got %r" % x)
+ t = TermSink(name="t", keepterm=False)
+
+ s > d1 > c
+ d1 > tf > t
+
+ p = PipeEngine(s)
+
+ p.graph(type="png",target="> /tmp/pipe.png")
+
+ p.start()
+ print(p.threadid)
+ time.sleep(5)
+ p.stop()
+
+
+if __name__ == "__main__":
+ _testmain()
diff --git a/scripts/external_libs/scapy-2.3.1/python3/scapy/plist.py b/scripts/external_libs/scapy-2.3.1/python3/scapy/plist.py
new file mode 100644
index 00000000..bdf0b757
--- /dev/null
+++ b/scripts/external_libs/scapy-2.3.1/python3/scapy/plist.py
@@ -0,0 +1,517 @@
+## This file is part of Scapy
+## See http://www.secdev.org/projects/scapy for more informations
+## Copyright (C) Philippe Biondi <phil@secdev.org>
+## This program is published under a GPLv2 license
+
+"""
+PacketList: holds several packets and allows to do operations on them.
+"""
+
+
+import os,subprocess
+from .config import conf
+from .base_classes import BasePacket,BasePacketList
+from collections import defaultdict
+
+from .utils import do_graph,hexdump,make_table,make_lined_table,make_tex_table,get_temp_file
+
+from scapy.arch import NETWORKX
+if NETWORKX:
+ import networkx as nx
+
+
+#############
+## Results ##
+#############
+
+class PacketList(BasePacketList):
+ res = []
+ def __init__(self, res=None, name="PacketList", stats=None, vector_index = None):
+ """create a packet list from a list of packets
+ res: the list of packets
+ stats: a list of classes that will appear in the stats (defaults to [TCP,UDP,ICMP])"""
+ if stats is None:
+ stats = conf.stats_classic_protocols
+ self.stats = stats
+ if res is None:
+ res = []
+ if isinstance(res, PacketList):
+ res = res.res
+ self.res = res
+ self.listname = name
+ self.vector_index = vector_index
+ def __len__(self):
+ return len(self.res)
+ def _elt2pkt(self, elt):
+ if self.vector_index == None:
+ return elt
+ else:
+ return elt[self.vector_index]
+ def _elt2sum(self, elt):
+ if self.vector_index == None:
+ return elt.summary()
+ else:
+ return "%s ==> %s" % (elt[0].summary(),elt[1].summary())
+
+ def _elt2show(self, elt):
+ return self._elt2sum(elt)
+ def __repr__(self):
+ stats=dict.fromkeys(self.stats,0)
+ other = 0
+ for r in self.res:
+ f = 0
+ for p in stats:
+ if self._elt2pkt(r).haslayer(p):
+ stats[p] += 1
+ f = 1
+ break
+ if not f:
+ other += 1
+ s = ""
+ ct = conf.color_theme
+ for p in self.stats:
+ s += " %s%s%s" % (ct.packetlist_proto(p.name),
+ ct.punct(":"),
+ ct.packetlist_value(stats[p]))
+ s += " %s%s%s" % (ct.packetlist_proto("Other"),
+ ct.punct(":"),
+ ct.packetlist_value(other))
+ return "%s%s%s%s%s" % (ct.punct("<"),
+ ct.packetlist_name(self.listname),
+ ct.punct(":"),
+ s,
+ ct.punct(">"))
+ def __getattr__(self, attr):
+ return getattr(self.res, attr)
+ def __getitem__(self, item):
+ if isinstance(item,type) and issubclass(item,BasePacket):
+ #return self.__class__(filter(lambda x: item in self._elt2pkt(x),self.res),
+ return self.__class__([ x for x in self.res if item in self._elt2pkt(x) ],
+ name="%s from %s"%(item.__name__,self.listname))
+ if type(item) is slice:
+ return self.__class__(self.res.__getitem__(item),
+ name = "mod %s" % self.listname)
+ return self.res.__getitem__(item)
+ def __getslice__(self, *args, **kargs):
+ return self.__class__(self.res.__getslice__(*args, **kargs),
+ name="mod %s"%self.listname)
+ def __add__(self, other):
+ return self.__class__(self.res+other.res,
+ name="%s+%s"%(self.listname,other.listname))
+ def summary(self, prn=None, lfilter=None):
+ """prints a summary of each packet
+prn: function to apply to each packet instead of lambda x:x.summary()
+lfilter: truth function to apply to each packet to decide whether it will be displayed"""
+ for r in self.res:
+ if lfilter is not None:
+ if not lfilter(r):
+ continue
+ if prn is None:
+ print(self._elt2sum(r))
+ else:
+ print(prn(r))
+ def nsummary(self,prn=None, lfilter=None):
+ """prints a summary of each packet with the packet's number
+prn: function to apply to each packet instead of lambda x:x.summary()
+lfilter: truth function to apply to each packet to decide whether it will be displayed"""
+ for i, p in enumerate(self.res):
+ if lfilter is not None:
+ if not lfilter(p):
+ continue
+ print(conf.color_theme.id(i,fmt="%04i"), end = " ")
+ if prn is None:
+ print(self._elt2sum(p))
+ else:
+ print(prn(p))
+ def display(self): # Deprecated. Use show()
+ """deprecated. is show()"""
+ self.show()
+ def show(self, *args, **kargs):
+ """Best way to display the packet list. Defaults to nsummary() method"""
+ return self.nsummary(*args, **kargs)
+
+ def filter(self, func):
+ """Returns a packet list filtered by a truth function"""
+ return self.__class__(list(filter(func,self.res)),
+ name="filtered %s"%self.listname)
+
+ def plot(self, f, lfilter=None,**kargs):
+ """Applies a function to each packet to get a value that will be plotted with matplotlib. A matplotlib object is returned
+ lfilter: a truth function that decides whether a packet must be ploted"""
+
+ return plt.plot([ f(i) for i in self.res if not lfilter or lfilter(i) ], **kargs)
+
+ def diffplot(self, f, delay=1, lfilter=None, **kargs):
+ """diffplot(f, delay=1, lfilter=None)
+ Applies a function to couples (l[i],l[i+delay])"""
+
+ return plt.plot([ f(i, j) for i in self.res[:-delay] for j in self.res[delay:] if not lfilter or (lfilter(i) and lfilter(j))],
+ **kargs)
+
+ def multiplot(self, f, lfilter=None, **kargs):
+ """Uses a function that returns a label and a value for this label, then plots all the values label by label"""
+
+ d = defaultdict(list)
+ for i in self.res:
+ if lfilter and not lfilter(i):
+ continue
+ k, v = f(i)
+ d[k].append(v)
+
+ figure = plt.figure()
+ ax = figure.add_axes(plt.axes())
+ for i in d:
+ ax.plot(d[i], **kargs)
+ return figure
+
+
+ def rawhexdump(self):
+ """Prints an hexadecimal dump of each packet in the list"""
+ for p in self:
+ hexdump(self._elt2pkt(p))
+
+ def hexraw(self, lfilter=None):
+ """Same as nsummary(), except that if a packet has a Raw layer, it will be hexdumped
+ lfilter: a truth function that decides whether a packet must be displayed"""
+ for i,p in enumerate(self.res):
+ p1 = self._elt2pkt(p)
+ if lfilter is not None and not lfilter(p1):
+ continue
+ print("%s %s %s" % (conf.color_theme.id(i,fmt="%04i"),
+ p1.sprintf("%.time%"),
+ self._elt2sum(p)))
+ if p1.haslayer(conf.raw_layer):
+ hexdump(p1.getlayer(conf.raw_layer).load)
+
+ def hexdump(self, lfilter=None):
+ """Same as nsummary(), except that packets are also hexdumped
+ lfilter: a truth function that decides whether a packet must be displayed"""
+ for i,p in enumerate(self.res):
+ p1 = self._elt2pkt(p)
+ if lfilter is not None and not lfilter(p1):
+ continue
+ print("%s %s %s" % (conf.color_theme.id(i,fmt="%04i"),
+ p1.sprintf("%.time%"),
+ self._elt2sum(p)))
+ hexdump(p1)
+
+ def padding(self, lfilter=None):
+ """Same as hexraw(), for Padding layer"""
+ for i,p in enumerate(self.res):
+ p1 = self._elt2pkt(p)
+ if p1.haslayer(conf.padding_layer):
+ if lfilter is None or lfilter(p1):
+ print("%s %s %s" % (conf.color_theme.id(i,fmt="%04i"),
+ p1.sprintf("%.time%"),
+ self._elt2sum(p)))
+ hexdump(p1.getlayer(conf.padding_layer).load)
+
+ def nzpadding(self, lfilter=None):
+ """Same as padding() but only non null padding"""
+ for i,p in enumerate(self.res):
+ p1 = self._elt2pkt(p)
+ if p1.haslayer(conf.padding_layer):
+ pad = p1.getlayer(conf.padding_layer).load
+ if pad == pad[0]*len(pad):
+ continue
+ if lfilter is None or lfilter(p1):
+ print("%s %s %s" % (conf.color_theme.id(i,fmt="%04i"),
+ p1.sprintf("%.time%"),
+ self._elt2sum(p)))
+ hexdump(p1.getlayer(conf.padding_layer).load)
+
+
+ def conversations(self, getsrcdst=None, draw = True, **kargs):
+ """Graphes a conversations between sources and destinations and display it
+ (using graphviz)
+ getsrcdst: a function that takes an element of the list and return the source and dest
+ by defaults, return source and destination IP
+ if networkx library is available returns a DiGraph, or draws it if draw = True otherwise graphviz is used
+ format: output type (svg, ps, gif, jpg, etc.), passed to dot's "-T" option
+ target: output filename. If None, matplotlib is used to display
+ prog: which graphviz program to use"""
+ if getsrcdst is None:
+ getsrcdst = lambda x:(x['IP'].src, x['IP'].dst)
+ conv = {}
+ for p in self.res:
+ p = self._elt2pkt(p)
+ try:
+ c = getsrcdst(p)
+ except:
+ #XXX warning()
+ continue
+ conv[c] = conv.get(c,0)+1
+
+ if NETWORKX: # networkx is available
+ gr = nx.DiGraph()
+ for s,d in conv:
+ if s not in gr:
+ gr.add_node(s)
+ if d not in gr:
+ gr.add_node(d)
+ gr.add_edge(s, d)
+ if draw:
+ return do_graph(gr, **kargs)
+ else:
+ return gr
+ else:
+ gr = 'digraph "conv" {\n'
+ for s,d in conv:
+ gr += '\t "%s" -> "%s"\n' % (s,d)
+ gr += "}\n"
+ return do_graph(gr, **kargs)
+
+ def afterglow(self, src=None, event=None, dst=None, **kargs):
+ """Experimental clone attempt of http://sourceforge.net/projects/afterglow
+ each datum is reduced as src -> event -> dst and the data are graphed.
+ by default we have IP.src -> IP.dport -> IP.dst"""
+ if src is None:
+ src = lambda x: x['IP'].src
+ if event is None:
+ event = lambda x: x['IP'].dport
+ if dst is None:
+ dst = lambda x: x['IP'].dst
+ sl = {}
+ el = {}
+ dl = {}
+ for i in self.res:
+ try:
+ s,e,d = src(i),event(i),dst(i)
+ if s in sl:
+ n,l = sl[s]
+ n += 1
+ if e not in l:
+ l.append(e)
+ sl[s] = (n,l)
+ else:
+ sl[s] = (1,[e])
+ if e in el:
+ n,l = el[e]
+ n+=1
+ if d not in l:
+ l.append(d)
+ el[e] = (n,l)
+ else:
+ el[e] = (1,[d])
+ dl[d] = dl.get(d,0)+1
+ except:
+ continue
+
+ import math
+ def normalize(n):
+ return 2+math.log(n)/4.0
+
+ def minmax(x):
+ m,M = min(x),max(x)
+ if m == M:
+ m = 0
+ if M == 0:
+ M = 1
+ return m,M
+
+ #mins,maxs = minmax(map(lambda (x,y): x, sl.values()))
+ mins,maxs = minmax([ a[0] for a in sl.values()])
+ #mine,maxe = minmax(map(lambda (x,y): x, el.values()))
+ mine,maxe = minmax([ a[0] for a in el.values()])
+ mind,maxd = minmax(dl.values())
+
+ gr = 'digraph "afterglow" {\n\tedge [len=2.5];\n'
+
+ gr += "# src nodes\n"
+ for s in sl:
+ n,l = sl[s]; n = 1+(n-mins)/(maxs-mins)
+ gr += '"src.%s" [label = "%s", shape=box, fillcolor="#FF0000", style=filled, fixedsize=1, height=%.2f,width=%.2f];\n' % (repr(s),repr(s),n,n)
+ gr += "# event nodes\n"
+ for e in el:
+ n,l = el[e]; n = n = 1+(n-mine)/(maxe-mine)
+ gr += '"evt.%s" [label = "%s", shape=circle, fillcolor="#00FFFF", style=filled, fixedsize=1, height=%.2f, width=%.2f];\n' % (repr(e),repr(e),n,n)
+ for d in dl:
+ n = dl[d]; n = n = 1+(n-mind)/(maxd-mind)
+ gr += '"dst.%s" [label = "%s", shape=triangle, fillcolor="#0000ff", style=filled, fixedsize=1, height=%.2f, width=%.2f];\n' % (repr(d),repr(d),n,n)
+
+ gr += "###\n"
+ for s in sl:
+ n,l = sl[s]
+ for e in l:
+ gr += ' "src.%s" -> "evt.%s";\n' % (repr(s),repr(e))
+ for e in el:
+ n,l = el[e]
+ for d in l:
+ gr += ' "evt.%s" -> "dst.%s";\n' % (repr(e),repr(d))
+
+ gr += "}"
+ return do_graph(gr, **kargs)
+
+
+ def _dump_document(self, **kargs):
+ import pyx
+ d = pyx.document.document()
+ l = len(self.res)
+ for i in range(len(self.res)):
+ elt = self.res[i]
+ c = self._elt2pkt(elt).canvas_dump(**kargs)
+ cbb = c.bbox()
+ c.text(cbb.left(),cbb.top()+1,r"\font\cmssfont=cmss12\cmssfont{Frame %i/%i}" % (i,l),[pyx.text.size.LARGE])
+ if conf.verb >= 2:
+ os.write(1,b".")
+ d.append(pyx.document.page(c, paperformat=pyx.document.paperformat.A4,
+ margin=1*pyx.unit.t_cm,
+ fittosize=1))
+ return d
+
+
+
+ def psdump(self, filename = None, **kargs):
+ """Creates a multipage poscript file with a psdump of every packet
+ filename: name of the file to write to. If empty, a temporary file is used and
+ conf.prog.psreader is called"""
+ d = self._dump_document(**kargs)
+ if filename is None:
+ filename = get_temp_file(autoext=".ps")
+ d.writePSfile(filename)
+ subprocess.Popen([conf.prog.psreader, filename+".ps"])
+ else:
+ d.writePSfile(filename)
+ print
+
+ def pdfdump(self, filename = None, **kargs):
+ """Creates a PDF file with a psdump of every packet
+ filename: name of the file to write to. If empty, a temporary file is used and
+ conf.prog.pdfreader is called"""
+ d = self._dump_document(**kargs)
+ if filename is None:
+ filename = get_temp_file(autoext=".pdf")
+ d.writePDFfile(filename)
+ subprocess.Popen([conf.prog.pdfreader, filename+".pdf"])
+ else:
+ d.writePDFfile(filename)
+ print
+
+ def sr(self,multi=0):
+ """sr([multi=1]) -> (SndRcvList, PacketList)
+ Matches packets in the list and return ( (matched couples), (unmatched packets) )"""
+ remain = self.res[:]
+ sr = []
+ i = 0
+ while i < len(remain):
+ s = remain[i]
+ j = i
+ while j < len(remain)-1:
+ j += 1
+ r = remain[j]
+ if r.answers(s):
+ sr.append((s,r))
+ if multi:
+ remain[i]._answered=1
+ remain[j]._answered=2
+ continue
+ del(remain[j])
+ del(remain[i])
+ i -= 1
+ break
+ i += 1
+ if multi:
+ remain = filter(lambda x:not hasattr(x,"_answered"), remain)
+ return SndRcvList(sr),PacketList(remain)
+
+ def sessions(self, session_extractor=None):
+ if session_extractor is None:
+ def session_extractor(p):
+ sess = "Other"
+ if 'Ether' in p:
+ if 'IP' in p:
+ if 'TCP' in p:
+ sess = p.sprintf("TCP %IP.src%:%r,TCP.sport% > %IP.dst%:%r,TCP.dport%")
+ elif 'UDP' in p:
+ sess = p.sprintf("UDP %IP.src%:%r,UDP.sport% > %IP.dst%:%r,UDP.dport%")
+ elif 'ICMP' in p:
+ sess = p.sprintf("ICMP %IP.src% > %IP.dst% type=%r,ICMP.type% code=%r,ICMP.code% id=%ICMP.id%")
+ else:
+ sess = p.sprintf("IP %IP.src% > %IP.dst% proto=%IP.proto%")
+ elif 'ARP' in p:
+ sess = p.sprintf("ARP %ARP.psrc% > %ARP.pdst%")
+ else:
+ sess = p.sprintf("Ethernet type=%04xr,Ether.type%")
+ return sess
+ sessions = defaultdict(self.__class__)
+ for p in self.res:
+ sess = session_extractor(self._elt2pkt(p))
+ sessions[sess].append(p)
+ return dict(sessions)
+
+ def replace(self, *args, **kargs):
+ """
+ lst.replace(<field>,[<oldvalue>,]<newvalue>)
+ lst.replace( (fld,[ov],nv),(fld,[ov,]nv),...)
+ if ov is None, all values are replaced
+ ex:
+ lst.replace( IP.src, "192.168.1.1", "10.0.0.1" )
+ lst.replace( IP.ttl, 64 )
+ lst.replace( (IP.ttl, 64), (TCP.sport, 666, 777), )
+ """
+ delete_checksums = kargs.get("delete_checksums",False)
+ x=PacketList(name="Replaced %s" % self.listname)
+ if type(args[0]) is not tuple:
+ args = (args,)
+ for p in self.res:
+ p = self._elt2pkt(p)
+ copied = False
+ for scheme in args:
+ fld = scheme[0]
+ old = scheme[1] # not used if len(scheme) == 2
+ new = scheme[-1]
+ for o in fld.owners:
+ if o in p:
+ if len(scheme) == 2 or p[o].getfieldval(fld.name) == old:
+ if not copied:
+ p = p.copy()
+ if delete_checksums:
+ p.delete_checksums()
+ copied = True
+ setattr(p[o], fld.name, new)
+ x.append(p)
+ return x
+
+
+class SndRcvList(PacketList):
+ def __init__(self, res=None, name="Results", stats=None):
+ PacketList.__init__(self, res, name, stats, vector_index = 1)
+ def summary(self, prn=None, lfilter=None):
+ """prints a summary of each SndRcv packet pair
+prn: function to apply to each packet pair instead of lambda s, r: "%s ==> %s" % (s.summary(),r.summary())
+lfilter: truth function to apply to each packet pair to decide whether it will be displayed"""
+ for s, r in self.res:
+ if lfilter is not None:
+ if not lfilter(s, r):
+ continue
+ if prn is None:
+ print(self._elt2sum((s, r)))
+ else:
+ print(prn(s, r))
+ def nsummary(self,prn=None, lfilter=None):
+ """prints a summary of each SndRcv packet pair with the pair's number
+prn: function to apply to each packet pair instead of lambda s, r: "%s ==> %s" % (s.summary(),r.summary())
+lfilter: truth function to apply to each packet pair to decide whether it will be displayed"""
+ for i, (s, r) in enumerate(self.res):
+ if lfilter is not None:
+ if not lfilter(s, r):
+ continue
+ print(conf.color_theme.id(i,fmt="%04i"), end = " ")
+ if prn is None:
+ print(self._elt2sum((s, r)))
+ else:
+ print(prn(s, r))
+ def filter(self, func):
+ """Returns a SndRcv list filtered by a truth function"""
+ return self.__class__( [ i for i in self.res if func(*i) ], name='filtered %s'%self.listname)
+
+ def make_table(self, *args, **kargs):
+ """Prints a table using a function that returs for each packet its head column value, head row value and displayed value
+ ex: p.make_table(lambda s, r:(s[IP].dst, r[TCP].sport, s[TCP].sprintf("%flags%")) """
+ return make_table(self.res, *args, **kargs)
+ def make_lined_table(self, *args, **kargs):
+ """Same as make_table, but print a table with lines"""
+ return make_lined_table(self.res, *args, **kargs)
+ def make_tex_table(self, *args, **kargs):
+ """Same as make_table, but print a table with LaTeX syntax"""
+ return make_tex_table(self.res, *args, **kargs)
diff --git a/scripts/external_libs/scapy-2.3.1/python3/scapy/pton_ntop.py b/scripts/external_libs/scapy-2.3.1/python3/scapy/pton_ntop.py
new file mode 100644
index 00000000..1629edee
--- /dev/null
+++ b/scripts/external_libs/scapy-2.3.1/python3/scapy/pton_ntop.py
@@ -0,0 +1,90 @@
+## This file is part of Scapy
+## See http://www.secdev.org/projects/scapy for more informations
+## Copyright (C) Philippe Biondi <phil@secdev.org>
+## This program is published under a GPLv2 license
+
+"""
+Convert IPv6 addresses between textual representation and binary.
+
+These functions are missing when python is compiled
+without IPv6 support, on Windows for instance.
+"""
+
+import socket,struct
+
+def inet_pton(af, addr):
+ """Convert an IP address from text representation into binary form"""
+ print('hello')
+ if af == socket.AF_INET:
+ return inet_aton(addr)
+ elif af == socket.AF_INET6:
+ # IPv6: The use of "::" indicates one or more groups of 16 bits of zeros.
+ # We deal with this form of wildcard using a special marker.
+ JOKER = b"*"
+ while b"::" in addr:
+ addr = addr.replace(b"::", b":" + JOKER + b":")
+ joker_pos = None
+
+ # The last part of an IPv6 address can be an IPv4 address
+ ipv4_addr = None
+ if b"." in addr:
+ ipv4_addr = addr.split(b":")[-1]
+
+ result = b""
+ parts = addr.split(b":")
+ for part in parts:
+ if part == JOKER:
+ # Wildcard is only allowed once
+ if joker_pos is None:
+ joker_pos = len(result)
+ else:
+ raise Exception("Illegal syntax for IP address")
+ elif part == ipv4_addr: # FIXME: Make sure IPv4 can only be last part
+ # FIXME: inet_aton allows IPv4 addresses with less than 4 octets
+ result += socket.inet_aton(ipv4_addr)
+ else:
+ # Each part must be 16bit. Add missing zeroes before decoding.
+ try:
+ result += part.rjust(4, b"0").decode("hex")
+ except TypeError:
+ raise Exception("Illegal syntax for IP address")
+
+ # If there's a wildcard, fill up with zeros to reach 128bit (16 bytes)
+ if JOKER in addr:
+ result = (result[:joker_pos] + b"\x00" * (16 - len(result))
+ + result[joker_pos:])
+
+ if len(result) != 16:
+ raise Exception("Illegal syntax for IP address")
+ return result
+ else:
+ raise Exception("Address family not supported")
+
+
+def inet_ntop(af, addr):
+ """Convert an IP address from binary form into text represenation"""
+ if af == socket.AF_INET:
+ return inet_ntoa(addr)
+ elif af == socket.AF_INET6:
+ # IPv6 addresses have 128bits (16 bytes)
+ if len(addr) != 16:
+ raise Exception("Illegal syntax for IP address")
+ parts = []
+ for left in [0, 2, 4, 6, 8, 10, 12, 14]:
+ try:
+ value = struct.unpack("!H", addr[left:left+2])[0]
+ hexstr = hex(value)[2:]
+ except TypeError:
+ raise Exception("Illegal syntax for IP address")
+ parts.append(hexstr.lstrip("0").lower())
+ result = b":".join(parts)
+ while b":::" in result:
+ result = result.replace(b":::", b"::")
+ # Leaving out leading and trailing zeros is only allowed with ::
+ if result.endswith(b":") and not result.endswith(b"::"):
+ result = result + b"0"
+ if result.startswith(b":") and not result.startswith(b"::"):
+ result = b"0" + result
+ return result
+ else:
+ raise Exception("Address family not supported yet")
diff --git a/scripts/external_libs/scapy-2.3.1/python3/scapy/route.py b/scripts/external_libs/scapy-2.3.1/python3/scapy/route.py
new file mode 100644
index 00000000..bccc43a2
--- /dev/null
+++ b/scripts/external_libs/scapy-2.3.1/python3/scapy/route.py
@@ -0,0 +1,175 @@
+## This file is part of Scapy
+## See http://www.secdev.org/projects/scapy for more informations
+## Copyright (C) Philippe Biondi <phil@secdev.org>
+## This program is published under a GPLv2 license
+
+"""
+Routing and handling of network interfaces.
+"""
+
+import socket
+from scapy.arch import read_routes,get_if_addr,LOOPBACK_NAME
+from scapy.utils import atol,ltoa,itom
+from scapy.config import conf
+from scapy.error import Scapy_Exception,warning
+
+##############################
+## Routing/Interfaces stuff ##
+##############################
+
+class Route:
+ def __init__(self):
+ self.resync()
+ self.s=socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
+ self.cache = {}
+
+ def invalidate_cache(self):
+ self.cache = {}
+
+ def resync(self):
+ self.invalidate_cache()
+ self.routes = read_routes()
+
+ def __repr__(self):
+ rt = "Network Netmask Gateway Iface Output IP\n"
+ for net,msk,gw,iface,addr in self.routes:
+ rt += "%-15s %-15s %-15s %-15s %-15s\n" % (ltoa(net),
+ ltoa(msk),
+ gw,
+ iface,
+ addr)
+ return rt
+
+ def make_route(self, host=None, net=None, gw=None, dev=None):
+ if host is not None:
+ thenet,msk = host,32
+ elif net is not None:
+ thenet,msk = net.split("/")
+ msk = int(msk)
+ else:
+ raise Scapy_Exception("make_route: Incorrect parameters. You should specify a host or a net")
+ if gw is None:
+ gw="0.0.0.0"
+ if dev is None:
+ if gw:
+ nhop = gw
+ else:
+ nhop = thenet
+ dev,ifaddr,x = self.route(nhop)
+ else:
+ ifaddr = get_if_addr(dev)
+ return (atol(thenet), itom(msk), gw, dev, ifaddr)
+
+ def add(self, *args, **kargs):
+ """Ex:
+ add(net="192.168.1.0/24",gw="1.2.3.4")
+ """
+ self.invalidate_cache()
+ self.routes.append(self.make_route(*args,**kargs))
+
+
+ def delt(self, *args, **kargs):
+ """delt(host|net, gw|dev)"""
+ self.invalidate_cache()
+ route = self.make_route(*args,**kargs)
+ try:
+ i=self.routes.index(route)
+ del(self.routes[i])
+ except ValueError:
+ warning("no matching route found")
+
+ def ifchange(self, iff, addr):
+ self.invalidate_cache()
+ the_addr,the_msk = (addr.split("/")+["32"])[:2]
+ the_msk = itom(int(the_msk))
+ the_rawaddr = atol(the_addr)
+ the_net = the_rawaddr & the_msk
+
+
+ for i in range(len(self.routes)):
+ net,msk,gw,iface,addr = self.routes[i]
+ if iface != iff:
+ continue
+ if gw == '0.0.0.0':
+ self.routes[i] = (the_net,the_msk,gw,iface,the_addr)
+ else:
+ self.routes[i] = (net,msk,gw,iface,the_addr)
+ conf.netcache.flush()
+
+
+
+ def ifdel(self, iff):
+ self.invalidate_cache()
+ new_routes=[]
+ for rt in self.routes:
+ if rt[3] != iff:
+ new_routes.append(rt)
+ self.routes=new_routes
+
+ def ifadd(self, iff, addr):
+ self.invalidate_cache()
+ the_addr,the_msk = (addr.split("/")+["32"])[:2]
+ the_msk = itom(int(the_msk))
+ the_rawaddr = atol(the_addr)
+ the_net = the_rawaddr & the_msk
+ self.routes.append((the_net,the_msk,'0.0.0.0',iff,the_addr))
+
+
+ def route(self,dest,verbose=None):
+ if type(dest) is list and dest:
+ dest = dest[0]
+ if dest in self.cache:
+ return self.cache[dest]
+ if verbose is None:
+ verbose=conf.verb
+ # Transform "192.168.*.1-5" to one IP of the set
+ dst = dest.split("/")[0]
+ dst = dst.replace("*","0")
+ while True:
+ l = dst.find("-")
+ if l < 0:
+ break
+ m = (dst[l:]+".").find(".")
+ dst = dst[:l]+dst[l+m:]
+
+
+ dst = atol(dst)
+ pathes=[]
+ for d,m,gw,i,a in self.routes:
+ aa = atol(a)
+ #Commented out after issue with virtual network with local address 0.0.0.0
+ #if aa == dst:
+ # pathes.append((0xffffffff,(LOOPBACK_NAME,a,"0.0.0.0")))
+ if (dst & m) == (d & m):
+ pathes.append((m,(i,a,gw)))
+ if not pathes:
+ if verbose:
+ warning("No route found (no default route?)")
+ return LOOPBACK_NAME,"0.0.0.0","0.0.0.0" #XXX linux specific!
+ # Choose the more specific route (greatest netmask).
+ # XXX: we don't care about metrics
+ pathes.sort()
+ ret = pathes[-1][1]
+ self.cache[dest] = ret
+ return ret
+
+ def get_if_bcast(self, iff):
+ for net, msk, gw, iface, addr in self.routes:
+ if (iff == iface and net != 0):
+ bcast = atol(addr)|(~msk&0xffffffff); # FIXME: check error in atol()
+ return ltoa(bcast);
+ warning("No broadcast address found for iface %s\n" % iff);
+
+#conf.route=Route()
+
+conf.route=None;
+_betteriface = None
+
+
+
+#XXX use "with"
+#_betteriface = conf.route.route("0.0.0.0", verbose=0)[0]
+
+if _betteriface != LOOPBACK_NAME:
+ conf.iface = _betteriface
+del(_betteriface)
diff --git a/scripts/external_libs/scapy-2.3.1/python3/scapy/route6.py b/scripts/external_libs/scapy-2.3.1/python3/scapy/route6.py
new file mode 100644
index 00000000..44a66735
--- /dev/null
+++ b/scripts/external_libs/scapy-2.3.1/python3/scapy/route6.py
@@ -0,0 +1,288 @@
+## This file is part of Scapy
+## See http://www.secdev.org/projects/scapy for more informations
+## Copyright (C) Philippe Biondi <phil@secdev.org>
+## This program is published under a GPLv2 license
+
+## Copyright (C) 2005 Guillaume Valadon <guedou@hongo.wide.ad.jp>
+## Arnaud Ebalard <arnaud.ebalard@eads.net>
+
+"""
+Routing and network interface handling for IPv6.
+"""
+
+#############################################################################
+#############################################################################
+### Routing/Interfaces stuff ###
+#############################################################################
+#############################################################################
+
+import socket
+from .config import conf
+from .utils6 import *
+from .arch import *
+
+
+class Route6:
+
+ def __init__(self):
+ self.invalidate_cache()
+ self.resync()
+
+ def invalidate_cache(self):
+ self.cache = {}
+
+ def flush(self):
+ self.invalidate_cache()
+ self.routes = []
+
+ def resync(self):
+ # TODO : At the moment, resync will drop existing Teredo routes
+ # if any. Change that ...
+ self.invalidate_cache()
+ self.routes = read_routes6()
+ if self.routes == []:
+ log_loading.info("No IPv6 support in kernel")
+
+ def __repr__(self):
+ rtlst = [('Destination', 'Next Hop', "iface", "src candidates")]
+
+ for net,msk,gw,iface,cset in self.routes:
+ rtlst.append(('%s/%i'% (net,msk), gw, iface, ", ".join(cset)))
+
+ #colwidth = map(lambda x: max(map(lambda y: len(y), x)), apply(zip, rtlst))
+ rtlst = zip(rtlst)
+ colwidth = [ max([len(y) for y in x]) for x in rtlst]
+ fmt = " ".join(map(lambda x: "%%-%ds"%x, colwidth))
+ rt = "\n".join([ fmt % x for x in rtlst])
+
+ return rt
+
+
+ # Unlike Scapy's Route.make_route() function, we do not have 'host' and 'net'
+ # parameters. We only have a 'dst' parameter that accepts 'prefix' and
+ # 'prefix/prefixlen' values.
+ # WARNING: Providing a specific device will at the moment not work correctly.
+ def make_route(self, dst, gw=None, dev=None):
+ """Internal function : create a route for 'dst' via 'gw'.
+ """
+ prefix, plen = (dst.split("/")+["128"])[:2]
+ plen = int(plen)
+
+ if gw is None:
+ gw = "::"
+ if dev is None:
+ dev, ifaddr, x = self.route(gw)
+ else:
+ # TODO: do better than that
+ # replace that unique address by the list of all addresses
+ lifaddr = in6_getifaddr()
+ #filter(lambda x: x[2] == dev, lifaddr)
+ devaddrs = [ i for i in lifaddr if i[2] == dev]
+ ifaddr = construct_source_candidate_set(prefix, plen, devaddrs, LOOPBACK_NAME)
+
+ return (prefix, plen, gw, dev, ifaddr)
+
+
+ def add(self, *args, **kargs):
+ """Ex:
+ add(dst="2001:db8:cafe:f000::/56")
+ add(dst="2001:db8:cafe:f000::/56", gw="2001:db8:cafe::1")
+ add(dst="2001:db8:cafe:f000::/64", gw="2001:db8:cafe::1", dev="eth0")
+ """
+ self.invalidate_cache()
+ self.routes.append(self.make_route(*args, **kargs))
+
+
+ def delt(self, dst, gw=None):
+ """ Ex:
+ delt(dst="::/0")
+ delt(dst="2001:db8:cafe:f000::/56")
+ delt(dst="2001:db8:cafe:f000::/56", gw="2001:db8:deca::1")
+ """
+ tmp = dst+b"/128"
+ dst, plen = tmp.split(b'/')[:2]
+ dst = in6_ptop(dst)
+ plen = int(plen)
+ #l = filter(lambda x: in6_ptop(x[0]) == dst and x[1] == plen, self.routes)
+ l = [ x for x in self.routes if in6_ptop(x[0]) == dst and x[1] == plen ]
+ if gw:
+ gw = in6_ptop(gw)
+ #l = filter(lambda x: in6_ptop(x[0]) == gw, self.routes)
+ l = [ x for x in self.routes if in6_ptop(x[0]) == gw ]
+ if len(l) == 0:
+ warning("No matching route found")
+ elif len(l) > 1:
+ warning("Found more than one match. Aborting.")
+ else:
+ i=self.routes.index(l[0])
+ self.invalidate_cache()
+ del(self.routes[i])
+
+ def ifchange(self, iff, addr):
+ the_addr, the_plen = (addr.split("/")+["128"])[:2]
+ the_plen = int(the_plen)
+
+ naddr = inet_pton(socket.AF_INET6, the_addr)
+ nmask = in6_cidr2mask(the_plen)
+ the_net = inet_ntop(socket.AF_INET6, in6_and(nmask,naddr))
+
+ for i in range(len(self.routes)):
+ net,plen,gw,iface,addr = self.routes[i]
+ if iface != iff:
+ continue
+ if gw == '::':
+ self.routes[i] = (the_net,the_plen,gw,iface,the_addr)
+ else:
+ self.routes[i] = (net,the_plen,gw,iface,the_addr)
+ self.invalidate_cache()
+ ip6_neigh_cache.flush()
+
+ def ifdel(self, iff):
+ """ removes all route entries that uses 'iff' interface. """
+ new_routes=[]
+ for rt in self.routes:
+ if rt[3] != iff:
+ new_routes.append(rt)
+ self.invalidate_cache()
+ self.routes = new_routes
+
+
+ def ifadd(self, iff, addr):
+ """
+ Add an interface 'iff' with provided address into routing table.
+
+ Ex: ifadd('eth0', '2001:bd8:cafe:1::1/64') will add following entry into
+ Scapy6 internal routing table:
+
+ Destination Next Hop iface Def src @
+ 2001:bd8:cafe:1::/64 :: eth0 2001:bd8:cafe:1::1
+
+ prefix length value can be omitted. In that case, a value of 128
+ will be used.
+ """
+ addr, plen = (addr.split(b"/")+[b"128"])[:2]
+ addr = in6_ptop(addr)
+ plen = int(plen)
+ naddr = inet_pton(socket.AF_INET6, addr)
+ nmask = in6_cidr2mask(plen)
+ prefix = inet_ntop(socket.AF_INET6, in6_and(nmask,naddr))
+ self.invalidate_cache()
+ self.routes.append((prefix,plen,'::',iff,[addr]))
+
+ def route(self, dst, dev=None):
+ """
+ Provide best route to IPv6 destination address, based on Scapy6
+ internal routing table content.
+
+ When a set of address is passed (e.g. 2001:db8:cafe:*::1-5) an address
+ of the set is used. Be aware of that behavior when using wildcards in
+ upper parts of addresses !
+
+ If 'dst' parameter is a FQDN, name resolution is performed and result
+ is used.
+
+ if optional 'dev' parameter is provided a specific interface, filtering
+ is performed to limit search to route associated to that interface.
+ """
+ # Transform "2001:db8:cafe:*::1-5:0/120" to one IPv6 address of the set
+ dst = dst.split("/")[0]
+ savedst = dst # In case following inet_pton() fails
+ dst = dst.replace("*","0")
+ l = dst.find("-")
+ while l >= 0:
+ m = (dst[l:]+":").find(":")
+ dst = dst[:l]+dst[l+m:]
+ l = dst.find("-")
+
+ try:
+ inet_pton(socket.AF_INET6, dst)
+ except socket.error:
+ dst = socket.getaddrinfo(savedst, None, socket.AF_INET6)[0][-1][0]
+ # TODO : Check if name resolution went well
+
+ # Deal with dev-specific request for cache search
+ k = dst
+ if dev is not None:
+ k = dst + "%%" + dev
+ if k in self.cache:
+ return self.cache[k]
+
+ pathes = []
+
+ # TODO : review all kinds of addresses (scope and *cast) to see
+ # if we are able to cope with everything possible. I'm convinced
+ # it's not the case.
+ # -- arnaud
+ for p, plen, gw, iface, cset in self.routes:
+ if dev is not None and iface != dev:
+ continue
+ if in6_isincluded(dst, p, plen):
+ pathes.append((plen, (iface, cset, gw)))
+ elif (in6_ismlladdr(dst) and in6_islladdr(p) and in6_islladdr(cset[0])):
+ pathes.append((plen, (iface, cset, gw)))
+
+ if not pathes:
+ warning("No route found for IPv6 destination %s (no default route?). This affects only IPv6" % dst)
+ return (LOOPBACK_NAME, "::", "::") # XXX Linux specific
+
+ # Sort with longest prefix first
+ pathes.sort(reverse=True)
+
+ best_plen = pathes[0][0]
+ pathes = filter(lambda x: x[0] == best_plen, pathes)
+
+ res = []
+ for p in pathes: # Here we select best source address for every route
+ tmp = p[1]
+ srcaddr = get_source_addr_from_candidate_set(dst, p[1][1])
+ if srcaddr is not None:
+ res.append((p[0], (tmp[0], srcaddr, tmp[2])))
+
+ if res == []:
+ warning("Found a route for IPv6 destination '%s', but no possible source address. This affects only IPv6" % dst)
+ return (LOOPBACK_NAME, b"::", b"::") # XXX Linux specific
+
+ # Symptom : 2 routes with same weight (our weight is plen)
+ # Solution :
+ # - dst is unicast global. Check if it is 6to4 and we have a source
+ # 6to4 address in those available
+ # - dst is link local (unicast or multicast) and multiple output
+ # interfaces are available. Take main one (conf.iface6)
+ # - if none of the previous or ambiguity persists, be lazy and keep
+ # first one
+ # XXX TODO : in a _near_ future, include metric in the game
+
+ if len(res) > 1:
+ tmp = []
+ if in6_isgladdr(dst) and in6_isaddr6to4(dst):
+ # TODO : see if taking the longest match between dst and
+ # every source addresses would provide better results
+ #tmp = filter(lambda x: in6_isaddr6to4(x[1][1]), res)
+ tmp = [ x for x in res if in6_isaddr6to4(x[1][1]) ]
+ elif in6_ismaddr(dst) or in6_islladdr(dst):
+ # TODO : I'm sure we are not covering all addresses. Check that
+ #tmp = filter(lambda x: x[1][0] == conf.iface6, res)
+ tmp = [ x for x in res if x[1][0] == conf.iface6 ]
+
+ if tmp:
+ res = tmp
+
+ # Fill the cache (including dev-specific request)
+ k = dst
+ if dev is not None:
+ k = dst + "%%" + dev
+ self.cache[k] = res[0][1]
+
+ return res[0][1]
+
+conf.route6 = Route6()
+
+#TBD-hhaim no need for that
+#_res = conf.route6.route("::/0")
+_res = None;
+
+if _res:
+ iff, gw, addr = _res
+ conf.iface6 = iff
+del(_res)
+
diff --git a/scripts/external_libs/scapy-2.3.1/python3/scapy/scapypipes.py b/scripts/external_libs/scapy-2.3.1/python3/scapy/scapypipes.py
new file mode 100644
index 00000000..aa67277e
--- /dev/null
+++ b/scripts/external_libs/scapy-2.3.1/python3/scapy/scapypipes.py
@@ -0,0 +1,123 @@
+## This file is part of Scapy
+## See http://www.secdev.org/projects/scapy for more informations
+## Copyright (C) Philippe Biondi <phil@secdev.org>
+## This program is published under a GPLv2 license
+
+from .pipetool import Source,Drain,Sink
+from .config import conf
+
+
+class SniffSource(Source):
+ """Read packets from an interface and send them to low exit.
+ +-----------+
+ >>-| |->>
+ | |
+ >-| [iface]--|->
+ +-----------+
+"""
+ def __init__(self, iface=None, filter=None, name=None):
+ Source.__init__(self, name=name)
+ self.iface = iface
+ self.filter = filter
+ def start(self):
+ self.s = conf.L2listen(iface=self.iface, filter=self.filter)
+ def stop(self):
+ self.s.close()
+ def fileno(self):
+ return self.s.fileno()
+ def deliver(self):
+ self._send(self.s.recv())
+
+class RdpcapSource(Source):
+ """Read packets from a PCAP file send them to low exit.
+ +----------+
+ >>-| |->>
+ | |
+ >-| [pcap]--|->
+ +----------+
+"""
+ def __init__(self, fname, name=None):
+ Source.__init__(self, name=name)
+ self.fname = fname
+ self.f = PcapReader(self.fname)
+ def start(self):
+ print("start")
+ self.f = PcapReader(self.fname)
+ self.is_exhausted = False
+ def stop(self):
+ print("stop")
+ self.f.close()
+ def fileno(self):
+ return self.f.fileno()
+ def deliver(self):
+ p = self.f.recv()
+ print("deliver %r" % p)
+ if p is None:
+ self.is_exhausted = True
+ else:
+ self._send(p)
+
+
+class InjectSink(Sink):
+ """Packets received on low input are injected to an interface
+ +-----------+
+ >>-| |->>
+ | |
+ >-|--[iface] |->
+ +-----------+
+"""
+ def __init__(self, iface=None, name=None):
+ Sink.__init__(self, name=name)
+ if iface == None:
+ iface = conf.iface
+ self.iface = iface
+ def start(self):
+ self.s = conf.L2socket(iface=self.iface)
+ def stop(self):
+ self.s.close()
+ def push(self, msg):
+ self.s.send(msg)
+
+class Inject3Sink(InjectSink):
+ def start(self):
+ self.s = conf.L3socket(iface=self.iface)
+
+
+class WrpcapSink(Sink):
+ """Packets received on low input are written to PCA file
+ +----------+
+ >>-| |->>
+ | |
+ >-|--[pcap] |->
+ +----------+
+"""
+ def __init__(self, fname, name=None):
+ Sink.__init__(self, name=name)
+ self.f = PcapWriter(fname)
+ def stop(self):
+ self.f.flush()
+ def push(self, msg):
+ self.f.write(msg)
+
+
+class UDPDrain(Drain):
+ """Apply a function to messages on low and high entry
+ +-------------+
+ >>-|--[payload]--|->>
+ | X |
+ >-|----[UDP]----|->
+ +-------------+
+"""
+ def __init__(self, ip="127.0.0.1", port=1234):
+ Drain.__init__(self)
+ self.ip = ip
+ self.port = port
+
+ def push(self, msg):
+ if IP in msg and msg[IP].proto == 17 and UDP in msg:
+ payload = msg[UDP].payload
+ self._high_send(str(payload))
+ def high_push(self, msg):
+ p = IP(dst=self.ip)/UDP(sport=1234,dport=self.port)/msg
+ self._send(p)
+
diff --git a/scripts/external_libs/scapy-2.3.1/python3/scapy/sendrecv.py b/scripts/external_libs/scapy-2.3.1/python3/scapy/sendrecv.py
new file mode 100644
index 00000000..8649c14d
--- /dev/null
+++ b/scripts/external_libs/scapy-2.3.1/python3/scapy/sendrecv.py
@@ -0,0 +1,678 @@
+## This file is part of Scapy
+## See http://www.secdev.org/projects/scapy for more informations
+## Copyright (C) Philippe Biondi <phil@secdev.org>
+## This program is published under a GPLv2 license
+
+"""
+Functions to send and receive packets.
+"""
+
+import pickle,os,sys,time,subprocess,itertools
+from select import select
+from .data import *
+import scapy.arch
+from .config import conf
+from .packet import Gen
+from .utils import warning,get_temp_file,PcapReader,wrpcap
+from . import plist
+from .error import log_runtime,log_interactive
+from .base_classes import SetGen
+
+#################
+## Debug class ##
+#################
+
+class debug:
+ recv=[]
+ sent=[]
+ match=[]
+
+
+####################
+## Send / Receive ##
+####################
+
+
+
+
+def sndrcv(pks, pkt, timeout = None, inter = 0, verbose=None, chainCC=0, retry=0, multi=0):
+ if not isinstance(pkt, Gen):
+ pkt = SetGen(pkt)
+
+ if verbose is None:
+ verbose = conf.verb
+ debug.recv = plist.PacketList([],"Unanswered")
+ debug.sent = plist.PacketList([],"Sent")
+ debug.match = plist.SndRcvList([])
+ nbrecv=0
+ ans = []
+ # do it here to fix random fields, so that parent and child have the same
+ all_stimuli = tobesent = [p for p in pkt]
+ notans = len(tobesent)
+
+ hsent={}
+ for i in tobesent:
+ h = i.hashret()
+ if h in hsent:
+ hsent[h].append(i)
+ else:
+ hsent[h] = [i]
+ if retry < 0:
+ retry = -retry
+ autostop=retry
+ else:
+ autostop=0
+
+
+ while retry >= 0:
+ found=0
+
+ if timeout < 0:
+ timeout = None
+
+ rdpipe,wrpipe = os.pipe()
+ rdpipe=os.fdopen(rdpipe, "rb")
+ wrpipe=os.fdopen(wrpipe,"wb")
+
+ pid=1
+ try:
+ pid = os.fork()
+ if pid == 0:
+ try:
+ sys.stdin.close()
+ rdpipe.close()
+ try:
+ i = 0
+ if verbose:
+ print("Begin emission:")
+ for p in tobesent:
+ pks.send(p)
+ i += 1
+ time.sleep(inter)
+ if verbose:
+ print("Finished to send %i packets." % i)
+ except SystemExit:
+ pass
+ except KeyboardInterrupt:
+ pass
+ except:
+ log_runtime.exception("--- Error in child %i" % os.getpid())
+ log_runtime.info("--- Error in child %i" % os.getpid())
+ finally:
+ try:
+ os.setpgrp() # Chance process group to avoid ctrl-C
+ sent_times = [p.sent_time for p in all_stimuli if p.sent_time]
+ pickle.dump( (conf.netcache,sent_times), wrpipe )
+ wrpipe.close()
+ except:
+ pass
+ elif pid < 0:
+ log_runtime.error("fork error")
+ else:
+ wrpipe.close()
+ stoptime = 0
+ remaintime = None
+ inmask = [rdpipe,pks]
+ try:
+ try:
+ while 1:
+ if stoptime:
+ remaintime = stoptime-time.time()
+ if remaintime <= 0:
+ break
+ r = None
+ if scapy.arch.FREEBSD or scapy.arch.DARWIN:
+ inp, out, err = select(inmask,[],[], 0.05)
+ if len(inp) == 0 or pks in inp:
+ r = pks.nonblock_recv()
+ else:
+ inp, out, err = select(inmask,[],[], remaintime)
+ if len(inp) == 0:
+ break
+ if pks in inp:
+ r = pks.recv(MTU)
+ if rdpipe in inp:
+ if timeout:
+ stoptime = time.time()+timeout
+ del(inmask[inmask.index(rdpipe)])
+ if r is None:
+ continue
+ ok = 0
+ h = r.hashret()
+ if h in hsent:
+ hlst = hsent[h]
+ for i in range(len(hlst)):
+ if r.answers(hlst[i]):
+ ans.append((hlst[i],r))
+ if verbose > 1:
+ os.write(1, b"*")
+ ok = 1
+ if not multi:
+ del(hlst[i])
+ notans -= 1;
+ else:
+ if not hasattr(hlst[i], '_answered'):
+ notans -= 1;
+ hlst[i]._answered = 1;
+ break
+ if notans == 0 and not multi:
+ break
+ if not ok:
+ if verbose > 1:
+ os.write(1, b".")
+ nbrecv += 1
+ if conf.debug_match:
+ debug.recv.append(r)
+ except KeyboardInterrupt:
+ if chainCC:
+ raise
+ finally:
+ try:
+ nc,sent_times = pickle.load(rdpipe)
+ except EOFError:
+ warning("Child died unexpectedly. Packets may have not been sent %i"%os.getpid())
+ else:
+ conf.netcache.update(nc)
+ for p,t in zip(all_stimuli, sent_times):
+ p.sent_time = t
+ os.waitpid(pid,0)
+ finally:
+ if pid == 0:
+ os._exit(0)
+
+ #remain = reduce(list.__add__, hsent.values(), [])
+ remain = list(itertools.chain(*[ i for i in hsent.values() ]))
+ if multi:
+ #remain = filter(lambda p: not hasattr(p, '_answered'), remain);
+ remain = [ p for p in remain if not hasattr(p, '_answered')]
+
+ if autostop and len(remain) > 0 and len(remain) != len(tobesent):
+ retry = autostop
+
+ tobesent = remain
+ if len(tobesent) == 0:
+ break
+ retry -= 1
+
+ if conf.debug_match:
+ debug.sent=plist.PacketList(remain[:],"Sent")
+ debug.match=plist.SndRcvList(ans[:])
+
+ #clean the ans list to delete the field _answered
+ if (multi):
+ for s,r in ans:
+ if hasattr(s, '_answered'):
+ del(s._answered)
+
+ if verbose:
+ print("\nReceived %i packets, got %i answers, remaining %i packets" % (nbrecv+len(ans), len(ans), notans))
+ return plist.SndRcvList(ans),plist.PacketList(remain,"Unanswered")
+
+
+def __gen_send(s, x, inter=0, loop=0, count=None, verbose=None, realtime=None, *args, **kargs):
+ if type(x) is bytes:
+ x = conf.raw_layer(load=x)
+ if type(x) is str:
+ x = conf.raw_layer(load=x.encode('ascii'))
+ if not isinstance(x, Gen):
+ x = SetGen(x)
+ if verbose is None:
+ verbose = conf.verb
+ n = 0
+ if count is not None:
+ loop = -count
+ elif not loop:
+ loop=-1
+ try:
+ while loop:
+ dt0 = None
+ for p in x:
+ if realtime:
+ ct = time.time()
+ if dt0:
+ st = dt0+p.time-ct
+ if st > 0:
+ time.sleep(st)
+ else:
+ dt0 = ct-p.time
+ s.send(p)
+ n += 1
+ if verbose:
+ os.write(1,b".")
+ time.sleep(inter)
+ if loop < 0:
+ loop += 1
+ except KeyboardInterrupt:
+ pass
+ s.close()
+ if verbose:
+ print("\nSent %i packets." % n)
+
+@conf.commands.register
+def send(x, inter=0, loop=0, count=None, verbose=None, realtime=None, *args, **kargs):
+ """Send packets at layer 3
+send(packets, [inter=0], [loop=0], [verbose=conf.verb]) -> None"""
+ __gen_send(conf.L3socket(*args, **kargs), x, inter=inter, loop=loop, count=count,verbose=verbose, realtime=realtime)
+
+@conf.commands.register
+def sendp(x, inter=0, loop=0, iface=None, iface_hint=None, count=None, verbose=None, realtime=None, *args, **kargs):
+ """Send packets at layer 2
+sendp(packets, [inter=0], [loop=0], [verbose=conf.verb]) -> None"""
+ if iface is None and iface_hint is not None:
+ iface = conf.route.route(iface_hint)[0]
+ __gen_send(conf.L2socket(iface=iface, *args, **kargs), x, inter=inter, loop=loop, count=count, verbose=verbose, realtime=realtime)
+
+@conf.commands.register
+def sendpfast(x, pps=None, mbps=None, realtime=None, loop=0, file_cache=False, iface=None, verbose=True):
+ """Send packets at layer 2 using tcpreplay for performance
+ pps: packets per second
+ mpbs: MBits per second
+ realtime: use packet's timestamp, bending time with realtime value
+ loop: number of times to process the packet list
+ file_cache: cache packets in RAM instead of reading from disk at each iteration
+ iface: output interface
+ verbose: if False, discard tcpreplay output """
+ if iface is None:
+ iface = conf.iface
+ argv = [conf.prog.tcpreplay, "--intf1=%s" % iface ]
+ if pps is not None:
+ argv.append("--pps=%i" % pps)
+ elif mbps is not None:
+ argv.append("--mbps=%f" % mbps)
+ elif realtime is not None:
+ argv.append("--multiplier=%i" % realtime)
+ else:
+ argv.append("--topspeed")
+ if not verbose:
+ argv.append("-q")
+ if loop:
+ argv.append("--loop=%i" % loop)
+ if file_cache:
+ argv.append("--enable-file-cache")
+
+ f = get_temp_file()
+ argv.append(f)
+ wrpcap(f, x)
+ with open(os.devnull, "wb") as null:
+ proc_output = null if not verbose else None
+ try:
+ subprocess.check_call(argv,
+ stdout=proc_output,
+ stderr=proc_output)
+ except KeyboardInterrupt:
+ log_interactive.info("Interrupted by user")
+ except Exception as e:
+ log_interactive.error("while trying to exec [%s]: %s" % (argv[0],e))
+ finally:
+ os.unlink(f)
+
+
+
+
+
+@conf.commands.register
+def sr(x,filter=None, iface=None, nofilter=0, *args,**kargs):
+ """Send and receive packets at layer 3
+nofilter: put 1 to avoid use of bpf filters
+retry: if positive, how many times to resend unanswered packets
+ if negative, how many times to retry when no more packets are answered
+timeout: how much time to wait after the last packet has been sent
+verbose: set verbosity level
+multi: whether to accept multiple answers for the same stimulus
+filter: provide a BPF filter
+iface: listen answers only on the given interface"""
+ if not "timeout" in kargs:
+ kargs["timeout"] = -1
+ s = conf.L3socket(filter=filter, iface=iface, nofilter=nofilter)
+ a,b=sndrcv(s,x,*args,**kargs)
+ s.close()
+ return a,b
+
+@conf.commands.register
+def sr1(x,filter=None,iface=None, nofilter=0, *args,**kargs):
+ """Send packets at layer 3 and return only the first answer
+nofilter: put 1 to avoid use of bpf filters
+retry: if positive, how many times to resend unanswered packets
+ if negative, how many times to retry when no more packets are answered
+timeout: how much time to wait after the last packet has been sent
+verbose: set verbosity level
+multi: whether to accept multiple answers for the same stimulus
+filter: provide a BPF filter
+iface: listen answers only on the given interface"""
+ if not "timeout" in kargs:
+ kargs["timeout"] = -1
+ s=conf.L3socket(filter=filter, nofilter=nofilter, iface=iface)
+ a,b=sndrcv(s,x,*args,**kargs)
+ s.close()
+ if len(a) > 0:
+ return a[0][1]
+ else:
+ return None
+
+@conf.commands.register
+def srp(x,iface=None, iface_hint=None, filter=None, nofilter=0, type=ETH_P_ALL, *args,**kargs):
+ """Send and receive packets at layer 2
+nofilter: put 1 to avoid use of bpf filters
+retry: if positive, how many times to resend unanswered packets
+ if negative, how many times to retry when no more packets are answered
+timeout: how much time to wait after the last packet has been sent
+verbose: set verbosity level
+multi: whether to accept multiple answers for the same stimulus
+filter: provide a BPF filter
+iface: work only on the given interface"""
+ if not "timeout" in kargs:
+ kargs["timeout"] = -1
+ if iface is None and iface_hint is not None:
+ iface = conf.route.route(iface_hint)[0]
+ s = conf.L2socket(iface=iface, filter=filter, nofilter=nofilter, type=type)
+ a,b=sndrcv(s ,x,*args,**kargs)
+ s.close()
+ return a,b
+
+@conf.commands.register
+def srp1(*args,**kargs):
+ """Send and receive packets at layer 2 and return only the first answer
+nofilter: put 1 to avoid use of bpf filters
+retry: if positive, how many times to resend unanswered packets
+ if negative, how many times to retry when no more packets are answered
+timeout: how much time to wait after the last packet has been sent
+verbose: set verbosity level
+multi: whether to accept multiple answers for the same stimulus
+filter: provide a BPF filter
+iface: work only on the given interface"""
+ if not "timeout" in kargs:
+ kargs["timeout"] = -1
+ a,b=srp(*args,**kargs)
+ if len(a) > 0:
+ return a[0][1]
+ else:
+ return None
+
+def __sr_loop(srfunc, pkts, prn=lambda x:x[1].summary(), prnfail=lambda x:x.summary(), inter=1, timeout=None, count=None, verbose=None, store=1, *args, **kargs):
+ n = 0
+ r = 0
+ ct = conf.color_theme
+ if verbose is None:
+ verbose = conf.verb
+ parity = 0
+ ans=[]
+ unans=[]
+ if timeout is None:
+ timeout = min(2*inter, 5)
+ try:
+ while 1:
+ parity ^= 1
+ col = [ct.even,ct.odd][parity]
+ if count is not None:
+ if count == 0:
+ break
+ count -= 1
+ start = time.time()
+ print("\rsend...\r", end = " ")
+ res = srfunc(pkts, timeout=timeout, verbose=0, chainCC=1, *args, **kargs)
+ n += len(res[0])+len(res[1])
+ r += len(res[0])
+ if verbose > 1 and prn and len(res[0]) > 0:
+ msg = "RECV %i:" % len(res[0])
+ print( "\r"+ct.success(msg), end = " ")
+ for p in res[0]:
+ print(col(prn(p)))
+ print(" "*len(msg), end = " ")
+ if verbose > 1 and prnfail and len(res[1]) > 0:
+ msg = "fail %i:" % len(res[1])
+ print("\r"+ct.fail(msg), end = " ")
+ for p in res[1]:
+ print(col(prnfail(p)))
+ print(" "*len(msg), end = " ")
+ if verbose > 1 and not (prn or prnfail):
+ print("recv:%i fail:%i" % tuple(map(len, res[:2])))
+ if store:
+ ans += res[0]
+ unans += res[1]
+ end=time.time()
+ if end-start < inter:
+ time.sleep(inter+start-end)
+ except KeyboardInterrupt:
+ pass
+
+ if verbose and n>0:
+ print(ct.normal("\nSent %i packets, received %i packets. %3.1f%% hits." % (n,r,100.0*r/n)))
+ return plist.SndRcvList(ans),plist.PacketList(unans)
+
+@conf.commands.register
+def srloop(pkts, *args, **kargs):
+ """Send a packet at layer 3 in loop and print the answer each time
+srloop(pkts, [prn], [inter], [count], ...) --> None"""
+ return __sr_loop(sr, pkts, *args, **kargs)
+
+@conf.commands.register
+def srploop(pkts, *args, **kargs):
+ """Send a packet at layer 2 in loop and print the answer each time
+srloop(pkts, [prn], [inter], [count], ...) --> None"""
+ return __sr_loop(srp, pkts, *args, **kargs)
+
+
+#def sndrcvflood(pks, pkt, prn=lambda (s,r):r.summary(), chainCC=0, store=1, unique=0):
+def sndrcvflood(pks, pkt, prn=lambda a:a[1].summary(), chainCC=0, store=1, unique=0):
+ if not isinstance(pkt, Gen):
+ pkt = SetGen(pkt)
+ tobesent = [p for p in pkt]
+ received = plist.SndRcvList()
+ seen = {}
+
+ hsent={}
+ for i in tobesent:
+ h = i.hashret()
+ if h in hsent:
+ hsent[h].append(i)
+ else:
+ hsent[h] = [i]
+
+ def send_in_loop(tobesent):
+ while 1:
+ for p in tobesent:
+ yield p
+
+ packets_to_send = send_in_loop(tobesent)
+
+ ssock = rsock = pks.fileno()
+
+ try:
+ while 1:
+ readyr,readys,_ = select([rsock],[ssock],[])
+ if ssock in readys:
+ pks.send(next(packets_to_send))
+
+ if rsock in readyr:
+ p = pks.recv(MTU)
+ if p is None:
+ continue
+ h = p.hashret()
+ if h in hsent:
+ hlst = hsent[h]
+ for i in hlst:
+ if p.answers(i):
+ res = prn((i,p))
+ if unique:
+ if res in seen:
+ continue
+ seen[res] = None
+ if res is not None:
+ print(res)
+ if store:
+ received.append((i,p))
+ except KeyboardInterrupt:
+ if chainCC:
+ raise
+ return received
+
+@conf.commands.register
+def srflood(x,filter=None, iface=None, nofilter=None, *args,**kargs):
+ """Flood and receive packets at layer 3
+prn: function applied to packets received. Ret val is printed if not None
+store: if 1 (default), store answers and return them
+unique: only consider packets whose print
+nofilter: put 1 to avoid use of bpf filters
+filter: provide a BPF filter
+iface: listen answers only on the given interface"""
+ s = conf.L3socket(filter=filter, iface=iface, nofilter=nofilter)
+ r=sndrcvflood(s,x,*args,**kargs)
+ s.close()
+ return r
+
+@conf.commands.register
+def srpflood(x,filter=None, iface=None, iface_hint=None, nofilter=None, *args,**kargs):
+ """Flood and receive packets at layer 2
+prn: function applied to packets received. Ret val is printed if not None
+store: if 1 (default), store answers and return them
+unique: only consider packets whose print
+nofilter: put 1 to avoid use of bpf filters
+filter: provide a BPF filter
+iface: listen answers only on the given interface"""
+ if iface is None and iface_hint is not None:
+ iface = conf.route.route(iface_hint)[0]
+ s = conf.L2socket(filter=filter, iface=iface, nofilter=nofilter)
+ r=sndrcvflood(s,x,*args,**kargs)
+ s.close()
+ return r
+
+
+
+
+@conf.commands.register
+def sniff(count=0, store=1, offline=None, prn = None, lfilter=None, L2socket=None, timeout=None,
+ opened_socket=None, stop_filter=None, *arg, **karg):
+ """Sniff packets
+sniff([count=0,] [prn=None,] [store=1,] [offline=None,] [lfilter=None,] + L2ListenSocket args) -> list of packets
+
+ count: number of packets to capture. 0 means infinity
+ store: wether to store sniffed packets or discard them
+ prn: function to apply to each packet. If something is returned,
+ it is displayed. Ex:
+ ex: prn = lambda x: x.summary()
+lfilter: python function applied to each packet to determine
+ if further action may be done
+ ex: lfilter = lambda x: x.haslayer(Padding)
+offline: pcap file to read packets from, instead of sniffing them
+timeout: stop sniffing after a given time (default: None)
+L2socket: use the provided L2socket
+opened_socket: provide an object ready to use .recv() on
+stop_filter: python function applied to each packet to determine
+ if we have to stop the capture after this packet
+ ex: stop_filter = lambda x: x.haslayer(TCP)
+ """
+ c = 0
+
+ if opened_socket is not None:
+ s = opened_socket
+ else:
+ if offline is None:
+ if L2socket is None:
+ L2socket = conf.L2listen
+ s = L2socket(type=ETH_P_ALL, *arg, **karg)
+ else:
+ s = PcapReader(offline)
+
+ lst = []
+ if timeout is not None:
+ stoptime = time.time()+timeout
+ remain = None
+ try:
+ while 1:
+ if timeout is not None:
+ remain = stoptime-time.time()
+ if remain <= 0:
+ break
+ sel = select([s],[],[],remain)
+ if s in sel[0]:
+ p = s.recv(MTU)
+ if p is None:
+ break
+ if lfilter and not lfilter(p):
+ continue
+ if store:
+ lst.append(p)
+ c += 1
+ if prn:
+ r = prn(p)
+ if r is not None:
+ print(r)
+ if stop_filter and stop_filter(p):
+ break
+ if count > 0 and c >= count:
+ break
+ except KeyboardInterrupt:
+ pass
+ if opened_socket is None:
+ s.close()
+ return plist.PacketList(lst,"Sniffed")
+
+
+@conf.commands.register
+def bridge_and_sniff(if1, if2, count=0, store=1, offline=None, prn = None, lfilter=None, L2socket=None, timeout=None,
+ stop_filter=None, *args, **kargs):
+ """Forward traffic between two interfaces and sniff packets exchanged
+bridge_and_sniff([count=0,] [prn=None,] [store=1,] [offline=None,] [lfilter=None,] + L2Socket args) -> list of packets
+
+ count: number of packets to capture. 0 means infinity
+ store: wether to store sniffed packets or discard them
+ prn: function to apply to each packet. If something is returned,
+ it is displayed. Ex:
+ ex: prn = lambda x: x.summary()
+lfilter: python function applied to each packet to determine
+ if further action may be done
+ ex: lfilter = lambda x: x.haslayer(Padding)
+timeout: stop sniffing after a given time (default: None)
+L2socket: use the provided L2socket
+stop_filter: python function applied to each packet to determine
+ if we have to stop the capture after this packet
+ ex: stop_filter = lambda x: x.haslayer(TCP)
+ """
+ c = 0
+ if L2socket is None:
+ L2socket = conf.L2socket
+ s1 = L2socket(iface=if1)
+ s2 = L2socket(iface=if2)
+ peerof={s1:s2,s2:s1}
+ label={s1:if1, s2:if2}
+
+ lst = []
+ if timeout is not None:
+ stoptime = time.time()+timeout
+ remain = None
+ try:
+ while True:
+ if timeout is not None:
+ remain = stoptime-time.time()
+ if remain <= 0:
+ break
+ ins,outs,errs = select([s1,s2],[],[], remain)
+ for s in ins:
+ p = s.recv()
+ if p is not None:
+ peerof[s].send(p.original)
+ if lfilter and not lfilter(p):
+ continue
+ if store:
+ p.sniffed_on = label[s]
+ lst.append(p)
+ c += 1
+ if prn:
+ r = prn(p)
+ if r is not None:
+ print("%s: %s" % (label[s],r))
+ if stop_filter and stop_filter(p):
+ break
+ if count > 0 and c >= count:
+ break
+ except KeyboardInterrupt:
+ pass
+ finally:
+ return plist.PacketList(lst,"Sniffed")
+
+
+@conf.commands.register
+def tshark(*args,**kargs):
+ """Sniff packets and print them calling pkt.show(), a bit like text wireshark"""
+ sniff(prn=lambda x: x.display(),*args,**kargs)
+
diff --git a/scripts/external_libs/scapy-2.3.1/python3/scapy/supersocket.py b/scripts/external_libs/scapy-2.3.1/python3/scapy/supersocket.py
new file mode 100644
index 00000000..b87f9c16
--- /dev/null
+++ b/scripts/external_libs/scapy-2.3.1/python3/scapy/supersocket.py
@@ -0,0 +1,141 @@
+## This file is part of Scapy
+## See http://www.secdev.org/projects/scapy for more informations
+## Copyright (C) Philippe Biondi <phil@secdev.org>
+## This program is published under a GPLv2 license
+
+"""
+SuperSocket.
+"""
+
+import socket,time
+from .config import conf
+from .data import *
+from scapy.error import warning, log_runtime
+
+class _SuperSocket_metaclass(type):
+ def __repr__(self):
+ if self.desc is not None:
+ return "<%s: %s>" % (self.__name__,self.desc)
+ else:
+ return "<%s>" % self.__name__
+
+
+class SuperSocket(metaclass = _SuperSocket_metaclass):
+ desc = None
+ closed=0
+ def __init__(self, family=socket.AF_INET,type=socket.SOCK_STREAM, proto=0):
+ self.ins = socket.socket(family, type, proto)
+ self.outs = self.ins
+ self.promisc=None
+ def send(self, x):
+ sx = bytes(x)
+ if hasattr(x, "sent_time"):
+ x.sent_time = time.time()
+ return self.outs.send(sx)
+ def recv(self, x=MTU):
+ return conf.raw_layer(self.ins.recv(x))
+ def fileno(self):
+ return self.ins.fileno()
+ def close(self):
+ if self.closed:
+ return
+ self.closed=1
+ if self.ins != self.outs:
+ if self.outs and self.outs.fileno() != -1:
+ self.outs.close()
+ if self.ins and self.ins.fileno() != -1:
+ self.ins.close()
+ def sr(self, *args, **kargs):
+ return sendrecv.sndrcv(self, *args, **kargs)
+ def sr1(self, *args, **kargs):
+ a,b = sendrecv.sndrcv(self, *args, **kargs)
+ if len(a) > 0:
+ return a[0][1]
+ else:
+ return None
+ def sniff(self, *args, **kargs):
+ return sendrecv.sniff(opened_socket=self, *args, **kargs)
+
+class L3RawSocket(SuperSocket):
+ desc = "Layer 3 using Raw sockets (PF_INET/SOCK_RAW)"
+ def __init__(self, type = ETH_P_IP, filter=None, iface=None, promisc=None, nofilter=0):
+ self.outs = socket.socket(socket.AF_INET, socket.SOCK_RAW, socket.IPPROTO_RAW)
+ self.outs.setsockopt(socket.SOL_IP, socket.IP_HDRINCL, 1)
+ self.ins = socket.socket(socket.AF_PACKET, socket.SOCK_RAW, socket.htons(type))
+ if iface is not None:
+ self.ins.bind((iface, type))
+ def recv(self, x=MTU):
+ pkt, sa_ll = self.ins.recvfrom(x)
+ if sa_ll[2] == socket.PACKET_OUTGOING:
+ return None
+ if sa_ll[3] in conf.l2types:
+ cls = conf.l2types[sa_ll[3]]
+ lvl = 2
+ elif sa_ll[1] in conf.l3types:
+ cls = conf.l3types[sa_ll[1]]
+ lvl = 3
+ else:
+ cls = conf.default_l2
+ warning("Unable to guess type (interface=%s protocol=%#x family=%i). Using %s" % (sa_ll[0],sa_ll[1],sa_ll[3],cls.name))
+ lvl = 3
+
+ try:
+ pkt = cls(pkt)
+ except KeyboardInterrupt:
+ raise
+ except:
+ if conf.debug_dissector:
+ raise
+ pkt = conf.raw_layer(pkt)
+ if lvl == 2:
+ pkt = pkt.payload
+
+ if pkt is not None:
+ from arch import get_last_packet_timestamp
+ pkt.time = get_last_packet_timestamp(self.ins)
+ return pkt
+ def send(self, x):
+ try:
+ #sx = str(x)
+ sx = x
+ x.sent_time = time.time()
+ self.outs.sendto(sx,(x.dst,0))
+ except socket.error as msg:
+ log_runtime.error(msg)
+
+class SimpleSocket(SuperSocket):
+ desc = "wrapper arround a classic socket"
+ def __init__(self, sock):
+ self.ins = sock
+ self.outs = sock
+
+
+class StreamSocket(SimpleSocket):
+ desc = "transforms a stream socket into a layer 2"
+ def __init__(self, sock, basecls=None):
+ if basecls is None:
+ basecls = conf.raw_layer
+ SimpleSocket.__init__(self, sock)
+ self.basecls = basecls
+
+ def recv(self, x=MTU):
+ pkt = self.ins.recv(x, socket.MSG_PEEK)
+ x = len(pkt)
+ if x == 0:
+ raise socket.error((100,"Underlying stream socket tore down"))
+ pkt = self.basecls(pkt)
+ pad = pkt.getlayer(conf.padding_layer)
+ if pad is not None and pad.underlayer is not None:
+ del(pad.underlayer.payload)
+ while pad is not None and not isinstance(pad, NoPayload):
+ x -= len(pad.load)
+ pad = pad.payload
+ self.ins.recv(x)
+ return pkt
+
+
+
+if conf.L3socket is None:
+ conf.L3socket = L3RawSocket
+
+import scapy.sendrecv
diff --git a/scripts/external_libs/scapy-2.3.1/python3/scapy/themes.py b/scripts/external_libs/scapy-2.3.1/python3/scapy/themes.py
new file mode 100644
index 00000000..f519ad7e
--- /dev/null
+++ b/scripts/external_libs/scapy-2.3.1/python3/scapy/themes.py
@@ -0,0 +1,277 @@
+## This file is part of Scapy
+## See http://www.secdev.org/projects/scapy for more informations
+## Copyright (C) Philippe Biondi <phil@secdev.org>
+## This program is published under a GPLv2 license
+
+"""
+Color themes for the interactive console.
+"""
+
+##################
+## Color themes ##
+##################
+
+class Color:
+ normal = "\033[0m"
+ black = "\033[30m"
+ red = "\033[31m"
+ green = "\033[32m"
+ yellow = "\033[33m"
+ blue = "\033[34m"
+ purple = "\033[35m"
+ cyan = "\033[36m"
+ grey = "\033[37m"
+
+ bold = "\033[1m"
+ uline = "\033[4m"
+ blink = "\033[5m"
+ invert = "\033[7m"
+
+
+def create_styler(fmt=None, before="", after="", fmt2="%s"):
+ def do_style(val, fmt=fmt, before=before, after=after, fmt2=fmt2):
+ if fmt is None:
+ if type(val) is not str:
+ val = str(val)
+ else:
+ val = fmt % val
+ return fmt2 % (before+val+after)
+ return do_style
+
+class ColorTheme:
+ def __repr__(self):
+ return "<%s>" % self.__class__.__name__
+ def __getattr__(self, attr):
+ return create_styler()
+
+
+class NoTheme(ColorTheme):
+ pass
+
+
+class AnsiColorTheme(ColorTheme):
+ def __getattr__(self, attr):
+ if attr.startswith("__"):
+ raise AttributeError(attr)
+ s = "style_%s" % attr
+ if s in self.__class__.__dict__:
+ before = getattr(self, s)
+ after = self.style_normal
+ else:
+ before = after = ""
+
+ return create_styler(before=before, after=after)
+
+
+ style_normal = ""
+ style_prompt = ""
+ style_punct = ""
+ style_id = ""
+ style_not_printable = ""
+ style_layer_name = ""
+ style_field_name = ""
+ style_field_value = ""
+ style_emph_field_name = ""
+ style_emph_field_value = ""
+ style_packetlist_name = ""
+ style_packetlist_proto = ""
+ style_packetlist_value = ""
+ style_fail = ""
+ style_success = ""
+ style_odd = ""
+ style_even = ""
+ style_opening = ""
+ style_active = ""
+ style_closed = ""
+ style_left = ""
+ style_right = ""
+
+class BlackAndWhite(AnsiColorTheme):
+ pass
+
+class DefaultTheme(AnsiColorTheme):
+ style_normal = Color.normal
+ style_prompt = Color.blue+Color.bold
+ style_punct = Color.normal
+ style_id = Color.blue+Color.bold
+ style_not_printable = Color.grey
+ style_layer_name = Color.red+Color.bold
+ style_field_name = Color.blue
+ style_field_value = Color.purple
+ style_emph_field_name = Color.blue+Color.uline+Color.bold
+ style_emph_field_value = Color.purple+Color.uline+Color.bold
+ style_packetlist_name = Color.red+Color.bold
+ style_packetlist_proto = Color.blue
+ style_packetlist_value = Color.purple
+ style_fail = Color.red+Color.bold
+ style_success = Color.blue+Color.bold
+ style_even = Color.black+Color.bold
+ style_odd = Color.black
+ style_opening = Color.yellow
+ style_active = Color.black
+ style_closed = Color.grey
+ style_left = Color.blue+Color.invert
+ style_right = Color.red+Color.invert
+
+class BrightTheme(AnsiColorTheme):
+ style_normal = Color.normal
+ style_punct = Color.normal
+ style_id = Color.yellow+Color.bold
+ style_layer_name = Color.red+Color.bold
+ style_field_name = Color.yellow+Color.bold
+ style_field_value = Color.purple+Color.bold
+ style_emph_field_name = Color.yellow+Color.bold
+ style_emph_field_value = Color.green+Color.bold
+ style_packetlist_name = Color.red+Color.bold
+ style_packetlist_proto = Color.yellow+Color.bold
+ style_packetlist_value = Color.purple+Color.bold
+ style_fail = Color.red+Color.bold
+ style_success = Color.blue+Color.bold
+ style_even = Color.black+Color.bold
+ style_odd = Color.black
+ style_left = Color.cyan+Color.invert
+ style_right = Color.purple+Color.invert
+
+
+class RastaTheme(AnsiColorTheme):
+ style_normal = Color.normal+Color.green+Color.bold
+ style_prompt = Color.yellow+Color.bold
+ style_punct = Color.red
+ style_id = Color.green+Color.bold
+ style_not_printable = Color.green
+ style_layer_name = Color.red+Color.bold
+ style_field_name = Color.yellow+Color.bold
+ style_field_value = Color.green+Color.bold
+ style_emph_field_name = Color.green
+ style_emph_field_value = Color.green
+ style_packetlist_name = Color.red+Color.bold
+ style_packetlist_proto = Color.yellow+Color.bold
+ style_packetlist_value = Color.green+Color.bold
+ style_fail = Color.red
+ style_success = Color.red+Color.bold
+ style_even = Color.yellow
+ style_odd = Color.green
+ style_left = Color.yellow+Color.invert
+ style_right = Color.red+Color.invert
+
+class ColorOnBlackTheme(AnsiColorTheme):
+ """Color theme for black backgrounds"""
+ style_normal = Color.normal
+ style_prompt = Color.green+Color.bold
+ style_punct = Color.normal
+ style_id = Color.green
+ style_not_printable = Color.black+Color.bold
+ style_layer_name = Color.yellow+Color.bold
+ style_field_name = Color.cyan
+ style_field_value = Color.purple+Color.bold
+ style_emph_field_name = Color.cyan+Color.bold
+ style_emph_field_value = Color.red+Color.bold
+ style_packetlist_name = Color.black+Color.bold
+ style_packetlist_proto = Color.yellow+Color.bold
+ style_packetlist_value = Color.purple+Color.bold
+ style_fail = Color.red+Color.bold
+ style_success = Color.green
+ style_even = Color.black+Color.bold
+ style_odd = Color.grey
+ style_opening = Color.yellow
+ style_active = Color.grey+Color.bold
+ style_closed = Color.black+Color.bold
+ style_left = Color.cyan+Color.bold
+ style_right = Color.red+Color.bold
+
+
+class FormatTheme(ColorTheme):
+ def __getattr__(self, attr):
+ if attr.startswith("__"):
+ raise AttributeError(attr)
+ colfmt = self.__class__.__dict__.get("style_%s" % attr, "%s")
+ return create_styler(fmt2 = colfmt)
+
+class LatexTheme(FormatTheme):
+ style_prompt = r"\textcolor{blue}{%s}"
+ style_not_printable = r"\textcolor{gray}{%s}"
+ style_layer_name = r"\textcolor{red}{\bf %s}"
+ style_field_name = r"\textcolor{blue}{%s}"
+ style_field_value = r"\textcolor{purple}{%s}"
+ style_emph_field_name = r"\textcolor{blue}{\underline{%s}}" #ul
+ style_emph_field_value = r"\textcolor{purple}{\underline{%s}}" #ul
+ style_packetlist_name = r"\textcolor{red}{\bf %s}"
+ style_packetlist_proto = r"\textcolor{blue}{%s}"
+ style_packetlist_value = r"\textcolor{purple}{%s}"
+ style_fail = r"\textcolor{red}{\bf %s}"
+ style_success = r"\textcolor{blue}{\bf %s}"
+ style_left = r"\textcolor{blue}{%s}"
+ style_right = r"\textcolor{red}{%s}"
+# style_even = r"}{\bf "
+# style_odd = ""
+
+class LatexTheme2(FormatTheme):
+ style_prompt = r"@`@textcolor@[@blue@]@@[@%s@]@"
+ style_not_printable = r"@`@textcolor@[@gray@]@@[@%s@]@"
+ style_layer_name = r"@`@textcolor@[@red@]@@[@@`@bfseries@[@@]@%s@]@"
+ style_field_name = r"@`@textcolor@[@blue@]@@[@%s@]@"
+ style_field_value = r"@`@textcolor@[@purple@]@@[@%s@]@"
+ style_emph_field_name = r"@`@textcolor@[@blue@]@@[@@`@underline@[@%s@]@@]@"
+ style_emph_field_value = r"@`@textcolor@[@purple@]@@[@@`@underline@[@%s@]@@]@"
+ style_packetlist_name = r"@`@textcolor@[@red@]@@[@@`@bfseries@[@@]@%s@]@"
+ style_packetlist_proto = r"@`@textcolor@[@blue@]@@[@%s@]@"
+ style_packetlist_value = r"@`@textcolor@[@purple@]@@[@%s@]@"
+ style_fail = r"@`@textcolor@[@red@]@@[@@`@bfseries@[@@]@%s@]@"
+ style_success = r"@`@textcolor@[@blue@]@@[@@`@bfserices@[@@]@%s@]@"
+ style_even = r"@`@textcolor@[@gray@]@@[@@`@bfseries@[@@]@%s@]@"
+# style_odd = r"@`@textcolor@[@black@]@@[@@`@bfseries@[@@]@%s@]@"
+ style_left = r"@`@textcolor@[@blue@]@@[@%s@]@"
+ style_right = r"@`@textcolor@[@red@]@@[@%s@]@"
+
+class HTMLTheme(FormatTheme):
+ style_prompt = "<span class=prompt>%s</span>"
+ style_not_printable = "<span class=not_printable>%s</span>"
+ style_layer_name = "<span class=layer_name>%s</span>"
+ style_field_name = "<span class=field_name>%s</span>"
+ style_field_value = "<span class=field_value>%s</span>"
+ style_emph_field_name = "<span class=emph_field_name>%s</span>"
+ style_emph_field_value = "<span class=emph_field_value>%s</span>"
+ style_packetlist_name = "<span class=packetlist_name>%s</span>"
+ style_packetlist_proto = "<span class=packetlist_proto>%s</span>"
+ style_packetlist_value = "<span class=packetlist_value>%s</span>"
+ style_fail = "<span class=fail>%s</span>"
+ style_success = "<span class=success>%s</span>"
+ style_even = "<span class=even>%s</span>"
+ style_odd = "<span class=odd>%s</span>"
+ style_left = "<span class=left>%s</span>"
+ style_right = "<span class=right>%s</span>"
+
+class HTMLTheme2(HTMLTheme):
+ style_prompt = "#[#span class=prompt#]#%s#[#/span#]#"
+ style_not_printable = "#[#span class=not_printable#]#%s#[#/span#]#"
+ style_layer_name = "#[#span class=layer_name#]#%s#[#/span#]#"
+ style_field_name = "#[#span class=field_name#]#%s#[#/span#]#"
+ style_field_value = "#[#span class=field_value#]#%s#[#/span#]#"
+ style_emph_field_name = "#[#span class=emph_field_name#]#%s#[#/span#]#"
+ style_emph_field_value = "#[#span class=emph_field_value#]#%s#[#/span#]#"
+ style_packetlist_name = "#[#span class=packetlist_name#]#%s#[#/span#]#"
+ style_packetlist_proto = "#[#span class=packetlist_proto#]#%s#[#/span#]#"
+ style_packetlist_value = "#[#span class=packetlist_value#]#%s#[#/span#]#"
+ style_fail = "#[#span class=fail#]#%s#[#/span#]#"
+ style_success = "#[#span class=success#]#%s#[#/span#]#"
+ style_even = "#[#span class=even#]#%s#[#/span#]#"
+ style_odd = "#[#span class=odd#]#%s#[#/span#]#"
+ style_left = "#[#span class=left#]#%s#[#/span#]#"
+ style_right = "#[#span class=right#]#%s#[#/span#]#"
+
+
+class ColorPrompt:
+ __prompt = ">>> "
+ def __str__(self):
+ try:
+ ct = scapy.config.conf.color_theme
+ if isinstance(ct, AnsiColorTheme):
+ ## ^A and ^B delimit invisible caracters for readline to count right
+ return "\001%s\002" % ct.prompt("\002"+scapy.config.conf.prompt+"\001")
+ else:
+ return ct.prompt(scapy.config.conf.prompt)
+ except:
+ return self.__prompt
+
+
+import scapy.config
diff --git a/scripts/external_libs/scapy-2.3.1/python3/scapy/tools/UTscapy.py b/scripts/external_libs/scapy-2.3.1/python3/scapy/tools/UTscapy.py
new file mode 100644
index 00000000..212aa123
--- /dev/null
+++ b/scripts/external_libs/scapy-2.3.1/python3/scapy/tools/UTscapy.py
@@ -0,0 +1,677 @@
+## This file is part of Scapy
+## See http://www.secdev.org/projects/scapy for more informations
+## Copyright (C) Philippe Biondi <phil@secdev.org>
+## This program is published under a GPLv2 license
+
+"""
+Unit testing infrastructure for Scapy
+"""
+
+import sys,getopt,imp
+import bz2, base64, os.path, time, traceback, zlib, hashlib
+
+
+#### Import tool ####
+
+def import_module(name):
+ name = os.path.realpath(name)
+ thepath = os.path.dirname(name)
+ name = os.path.basename(name)
+ if name.endswith(".py"):
+ name = name[:-3]
+ f,path,desc = imp.find_module(name,[thepath])
+
+ try:
+ return imp.load_module(name, f, path, desc)
+ finally:
+ if f:
+ f.close()
+
+
+#### INTERNAL/EXTERNAL FILE EMBEDDING ####
+
+class File:
+ def __init__(self, name, URL, local):
+ self.name = name
+ self.local = local
+ self.URL = URL
+ def get_local(self):
+ return bz2.decompress(base64.decodestring(self.local))
+ def get_URL(self):
+ return URL
+ def write(self, dir):
+ if dir:
+ dir += "/"
+ open(dir+self.name,"w").write(self.get_local())
+
+
+# Embed a base64 encoded bziped version of js and css files
+# to work if you can't reach Internet.
+class External_Files:
+ UTscapy_js = File("UTscapy.js", "http://www.secdev.org/projects/UTscapy/UTscapy.js",
+"""QlpoOTFBWSZTWWVijKQAAXxfgERUYOvAChIhBAC/79+qQAH8AFA0poANAMjQAAAG
+ABo0NGEZNBo00BhgAaNDRhGTQaNNAYFURJinplGaKbRkJiekzSenqmpA0Gm1LFMp
+RUklVQlK9WUTZYpNFI1IiEWEFT09Sfj5uO+qO6S5DQwKIxM92+Zku94wL6V/1KTK
+an2c66Ug6SmVKy1ZIrgauxMVLF5xLH0lJRQuKlqLF10iatlTzqvw7S9eS3+h4lu3
+GZyMgoOude3NJ1pQy8eo+X96IYZw+ynehsiPj73m0rnvQ3QXZ9BJQiZQYQ5/uNcl
+2WOlC5vyQqV/BWsnr2NZYLYXQLDs/Bffk4ZfR4/SH6GfA5Xlek4xHNHqbSsRbREO
+gueXo3kcYi94K6hSO3ldD2O/qJXOFqJ8o3TE2aQahxtQpCVUKQMvODHwu2YkaORY
+ZC6gihEallcHDIAtRPScBACAJnUggYhLDX6DEko7nC9GvAw5OcEkiyDUbLdiGCzD
+aXWMC2DuQ2Y6sGf6NcRuON7QSbhHsPc4KKmZ/xdyRThQkGVijKQ=""")
+ UTscapy_css = File("UTscapy.css","http://www.secdev.org/projects/UTscapy/UTscapy.css",
+"""QlpoOTFBWSZTWTbBCNEAAE7fgHxwSB//+Cpj2QC//9/6UAR+63dxbNzO3ccmtGEk
+pM0m1I9E/Qp6g9Q09TNQ9QDR6gMgAkiBFG9U9TEGRkGgABoABoBmpJkRAaAxD1AN
+Gh6gNADQBzAATJgATCYJhDAEYAEiQkwIyJk0n6qenpqeoaMUeo9RgIxp6pX78kfx
+Jx4MUhDHKEb2pJAYAelG1cybiZBBDipH8ocxNyHDAqTUxiQmIAEDE3ApIBUUECAT
+7Lvlf4xA/sVK0QHkSlYtT0JmErdOjx1v5NONPYSjrIhQnbl1MbG5m+InMYmVAWJp
+uklD9cNdmQv2YigxbEtgUrsY2pDDV/qMT2SHnHsViu2rrp2LA01YJIHZqjYCGIQN
+sGNobFxAYHLqqMOj9TI2Y4GRpRCUGu82PnMnXUBgDSkTY4EfmygaqvUwbGMbPwyE
+220Q4G+sDvw7+6in3CAOS634pcOEAdREUW+QqMjvWvECrGISo1piv3vqubTGOL1c
+ssrFnnSfU4T6KSCbPs98HJ2yjWN4i8Bk5WrM/JmELLNeZ4vgMkA4JVQInNnWTUTe
+gmMSlJd/b7JuRwiM5RUzXOBTa0e3spO/rsNJiylu0rCxygdRo2koXdSJzmUVjJUm
+BOFIkUKq8LrE+oT9h2qUqqUQ25fGV7e7OFkpmZopqUi0WeIBzlXdYY0Zz+WUJUTC
+RC+CIPFIYh1RkopswMAop6ZjuZKRqR0WNuV+rfuF5aCXPpxAm0F14tPyhf42zFMT
+GJUMxxowJnoauRq4xGQk+2lYFxbQ0FiC43WZSyYLHMuo5NTJ92QLAgs4FgOyZQqQ
+xpsGKMA0cIisNeiootpnlWQvkPzNGUTPg8jqkwTvqQLguZLKJudha1hqfBib1IfO
+LNChcU6OqF+3wyPKg5Y5oSbSJPAMcRDANwmS2i9oZm6vsD1pLkWtFGbAkEjjCuEU
+W1ev1IsF2UVmWYFtJkqLT708ApUBK/ig3rbJWSq7RGQd3sSrOKu3lyKzTBdkXK2a
+BGLV5dS1XURdKxaRkMplLLQxsimBYZEAa8KQkYyI+4EagMqycRR7RgwtZFxJSu0T
+1q5wS2JG82iETHplbNj8DYo9IkmKzNAiw4FxK8bRfIYvwrbshbEagL11AQJFsqeZ
+WeXDoWEx2FMyyZRAB5QyCFnwYtwtWAQmmITY8aIM2SZyRnHH9Wi8+Sr2qyCscFYo
+vzM985aHXOHAxQN2UQZbQkUv3D4Vc+lyvalAffv3Tyg4ks3a22kPXiyeCGweviNX
+0K8TKasyOhGsVamTUAZBXfQVw1zmdS4rHDnbHgtIjX3DcCt6UIr0BHTYjdV0JbPj
+r1APYgXihjQwM2M83AKIhwQQJv/F3JFOFCQNsEI0QA==""")
+ def get_local_dict(cls):
+ #return dict(map(lambda (x,y): (x, y.name), filter(lambda (x,y): isinstance(y, File), cls.__dict__.items())))
+ return dict(map(lambda a: (a[0], a[1].name), filter(lambda a: isinstance(a[1], File), cls.__dict__.items())))
+ get_local_dict = classmethod(get_local_dict)
+ def get_URL_dict(cls):
+ #return dict(map(lambda (x,y): (x, y.URL), filter(lambda (x,y): isinstance(y, File), cls.__dict__.items())))
+ return dict(map(lambda a: (a[0], a[1].URL), filter(lambda a: isinstance(a[1], File), cls.__dict__.items())))
+ get_URL_dict = classmethod(get_URL_dict)
+
+
+#### HELPER CLASSES FOR PARAMETRING OUTPUT FORMAT ####
+
+class EnumClass:
+ def from_string(cls,x):
+ return cls.__dict__[x.upper()]
+ from_string = classmethod(from_string)
+
+class Format(EnumClass):
+ TEXT = 1
+ ANSI = 2
+ HTML = 3
+ LATEX = 4
+ XUNIT = 5
+
+
+#### TEST CLASSES ####
+
+class TestClass:
+ def __getitem__(self, item):
+ return getattr(self, item)
+ def add_keywords(self, kw):
+ if kw is str:
+ self.keywords.append(kw)
+ else:
+ self.keywords += kw
+
+class TestCampaign(TestClass):
+ def __init__(self, title):
+ self.title = title
+ self.filename = None
+ self.headcomments = ""
+ self.campaign = []
+ self.keywords = []
+ self.crc = None
+ self.sha = None
+ self.preexec = None
+ self.preexec_output = None
+ def add_testset(self, testset):
+ self.campaign.append(testset)
+ def __iter__(self):
+ return self.campaign.__iter__()
+ def all_tests(self):
+ for ts in self:
+ for t in ts:
+ yield t
+
+class TestSet(TestClass):
+ def __init__(self, name):
+ self.name = name
+ self.set = []
+ self.comments = ""
+ self.keywords = []
+ self.crc = None
+ self.expand = 1
+ def add_test(self, test):
+ self.set.append(test)
+ def __iter__(self):
+ return self.set.__iter__()
+
+class UnitTest(TestClass):
+ def __init__(self, name):
+ self.name = name
+ self.test = ""
+ self.comments = ""
+ self.result = ""
+ self.res = True # must be True at init to have a different truth value than None
+ self.output = ""
+ self.num = -1
+ self.keywords = []
+ self.crc = None
+ self.expand = 1
+ def __nonzero__(self):
+ return self.res
+
+
+#### PARSE CAMPAIGN ####
+
+def parse_campaign_file(campaign_file):
+ test_campaign = TestCampaign("Test campaign")
+ test_campaign.filename= campaign_file.name
+ testset = None
+ test = None
+ testnb = 0
+
+ for l in campaign_file.readlines():
+ if l[0] == '#':
+ continue
+ if l[0] == "~":
+ (test or testset or campaign_file).add_keywords(l[1:].split())
+ elif l[0] == "%":
+ test_campaign.title = l[1:].strip()
+ elif l[0] == "+":
+ testset = TestSet(l[1:].strip())
+ test_campaign.add_testset(testset)
+ test = None
+ elif l[0] == "=":
+ test = UnitTest(l[1:].strip())
+ test.num = testnb
+ testnb += 1
+ testset.add_test(test)
+ elif l[0] == "*":
+ if test is not None:
+
+ test.comments += l[1:]
+ elif testset is not None:
+ testset.comments += l[1:]
+ else:
+ test_campaign.headcomments += l[1:]
+ else:
+ if test is None:
+ if l.strip():
+ print("Unknown content [%s]" % l.strip(), file = sys.stderr)
+ else:
+ test.test += l
+ return test_campaign
+
+def dump_campaign(test_campaign):
+ print("#"*(len(test_campaign.title)+6))
+ print("## %(title)s ##" % test_campaign)
+ print("#"*(len(test_campaign.title)+6))
+ if test_campaign.sha and test_campaign.crc:
+ print("CRC=[%(crc)s] SHA=[%(sha)s]" % test_campaign)
+ print("from file %(filename)s" % test_campaign)
+ print()
+ for ts in test_campaign:
+ if ts.crc:
+ print("+--[%s]%s(%s)--" % (ts.name,"-"*max(2,80-len(ts.name)-18),ts.crc))
+ else:
+ print("+--[%s]%s" % (ts.name,"-"*max(2,80-len(ts.name)-6)))
+ if ts.keywords:
+ print(" kw=%s" % ",".join(ts.keywords))
+ for t in ts:
+ print("%(num)03i %(name)s" % t)
+ c = k = ""
+ if t.keywords:
+ k = "kw=%s" % ",".join(t.keywords)
+ if t.crc:
+ c = "[%(crc)s] " % t
+ if c or k:
+ print(" %s%s" % (c,k) )
+
+#### COMPUTE CAMPAIGN DIGESTS ####
+
+def crc32(x):
+ return "%08X" % (0xffffffff & zlib.crc32(x))
+
+def sha1(x):
+ return hashlib.sha1(x).hexdigest().upper()
+
+def compute_campaign_digests(test_campaign):
+ dc = b""
+ for ts in test_campaign:
+ dts = b""
+ for t in ts:
+ dt = t.test.strip().encode('ascii')
+ t.crc = crc32(dt)
+ dts += b"\0"+dt
+ ts.crc = crc32(dts)
+ dc += b"\0\x01"+dts
+ test_campaign.crc = crc32(dc)
+ if type(test_campaign.filename) is str and test_campaign.filename != '<stdin>':
+ test = open(test_campaign.filename, 'rb').read()
+ elif test_campaign.filename == '<stdin>':
+ test = sys.stdin.read().encode('ascii')
+ else:
+ raise Exception("Unknown test source %s" % test_campaign.filename)
+ test_campaign.sha = sha1(test)
+
+
+#### FILTER CAMPAIGN #####
+
+def filter_tests_on_numbers(test_campaign, num):
+ if num:
+ for ts in test_campaign:
+ #ts.set = filter(lambda t: t.num in num, ts.set)
+ ts.set = [ t for t in ts.set if t.num in num ]
+ #test_campaign.campaign = filter(lambda ts: len(ts.set) > 0, test_campaign.campaign)
+ test_campaign.campaign = [ ts for ts in test_campaign.campaign if len(ts.set) > 0 ]
+
+def filter_tests_keep_on_keywords(test_campaign, kw):
+ def kw_match(lst, kw):
+ for k in lst:
+ if k in kw:
+ return True
+ return False
+
+ if kw:
+ for ts in test_campaign:
+ #ts.set = filter(lambda t: kw_match(t.keywords, kw), ts.set)
+ ts.set = [ t for t in ts.set if kw_match(t.keywords, kw) ]
+
+def filter_tests_remove_on_keywords(test_campaign, kw):
+ def kw_match(lst, kw):
+ for k in kw:
+ if k not in lst:
+ return False
+ return True
+
+ if kw:
+ for ts in test_campaign:
+ #ts.set = filter(lambda t: not kw_match(t.keywords, kw), ts.set)
+ ts.set = [ t for t in ts.set if not kw_match(t.keywords, kw) ]
+
+
+def remove_empty_testsets(test_campaign):
+ #test_campaign.campaign = filter(lambda ts: len(ts.set) > 0, test_campaign.campaign)
+ test_campaign.campaign = [ ts for ts in test_campaign.campaign if len(ts.set) > 0 ]
+
+
+#### RUN CAMPAIGN #####
+
+def run_campaign(test_campaign, get_interactive_session, verb=2):
+ passed=failed=0
+ if test_campaign.preexec:
+ test_campaign.preexec_output = get_interactive_session(test_campaign.preexec.strip())[0]
+ for testset in test_campaign:
+ for t in testset:
+ t.output,res = get_interactive_session(t.test.strip())
+ the_res = False
+ try:
+ if res is None or res:
+ the_res= True
+ except Exception as msg:
+ t.output+="UTscapy: Error during result interpretation:\n"
+ t.output+="".join(traceback.format_exception(sys.exc_type, sys.exc_value, sys.exc_traceback,))
+ if the_res:
+ t.res = True
+ res = "passed"
+ passed += 1
+ else:
+ t.res = False
+ res = "failed"
+ failed += 1
+ t.result = res
+ if verb > 1:
+ print("%(result)6s %(crc)s %(name)s" % t, file = sys.stderr)
+ test_campaign.passed = passed
+ test_campaign.failed = failed
+ if verb:
+ print("Campaign CRC=%(crc)s SHA=%(sha)s" % test_campaign, file = sys.stderr)
+ print("PASSED=%i FAILED=%i" % (passed, failed), file = sys.stderr)
+
+
+#### INFO LINES ####
+
+def info_line(test_campaign):
+ filename = test_campaign.filename
+ if filename is None:
+ return "Run %s by UTscapy" % time.ctime()
+ else:
+ return "Run %s from [%s] by UTscapy" % (time.ctime(), filename)
+
+def html_info_line(test_campaign):
+ filename = test_campaign.filename
+ if filename is None:
+ return """Run %s by <a href="http://www.secdev.org/projects/UTscapy/">UTscapy</a><br>""" % time.ctime()
+ else:
+ return """Run %s from [%s] by <a href="http://www.secdev.org/projects/UTscapy/">UTscapy</a><br>""" % (time.ctime(), filename)
+
+
+#### CAMPAIGN TO something ####
+
+def campaign_to_TEXT(test_campaign):
+ output="%(title)s\n" % test_campaign
+ output += "-- "+info_line(test_campaign)+"\n\n"
+ output += "Passed=%(passed)i\nFailed=%(failed)i\n\n%(headcomments)s\n" % test_campaign
+
+ for testset in test_campaign:
+ output += "######\n## %(name)s\n######\n%(comments)s\n\n" % testset
+ for t in testset:
+ if t.expand:
+ output += "###(%(num)03i)=[%(result)s] %(name)s\n%(comments)s\n%(output)s\n\n" % t
+
+ return output
+
+def campaign_to_ANSI(test_campaign):
+ output="%(title)s\n" % test_campaign
+ output += "-- "+info_line(test_campaign)+"\n\n"
+ output += "Passed=%(passed)i\nFailed=%(failed)i\n\n%(headcomments)s\n" % test_campaign
+
+ for testset in test_campaign:
+ output += "######\n## %(name)s\n######\n%(comments)s\n\n" % testset
+ for t in testset:
+ if t.expand:
+ output += "###(%(num)03i)=[%(result)s] %(name)s\n%(comments)s\n%(output)s\n\n" % t
+
+ return output
+
+def campaign_to_xUNIT(test_campaign):
+ output='<?xml version="1.0" encoding="UTF-8" ?>\n<testsuite>\n'
+ for testset in test_campaign:
+ for t in testset:
+ output += ' <testcase classname="%s"\n' % testset.name.encode("string_escape").replace('"',' ')
+ output += ' name="%s"\n' % t.name.encode("string_escape").replace('"',' ')
+ output += ' duration="0">\n' % t
+ if not t.res:
+ output += '<error><![CDATA[%(output)s]]></error>\n' % t
+ output += "</testcase>\n"
+ output += '</testsuite>'
+ return output
+
+
+def campaign_to_HTML(test_campaign, local=0):
+ output = """<html>
+<head>
+<title>%(title)s</title>
+<link rel="stylesheet" href="%%(UTscapy_css)s" type="text/css">
+<script language="JavaScript" src="%%(UTscapy_js)s" type="text/javascript"></script>
+</head>
+<body>
+
+<h1>%(title)s</h1>
+
+<span class=button onClick="hide_all('tst')">Shrink All</span>
+<span class=button onClick="show_all('tst')">Expand All</span>
+<span class=button onClick="show_passed('tst')">Expand Passed</span>
+<span class=button onClick="show_failed('tst')">Expand Failed</span>
+<p>
+""" % test_campaign
+
+ if local:
+ External_Files.UTscapy_js.write(os.path.dirname(test_campaign.output_file.name))
+ External_Files.UTscapy_css.write(os.path.dirname(test_campaign.output_file.name))
+ output %= External_Files.get_local_dict()
+ else:
+ output %= External_Files.get_URL_dict()
+
+ if test_campaign.crc is not None and test_campaign.sha is not None:
+ output += "CRC=<span class=crc>%(crc)s</span> SHA=<span class=crc>%(sha)s</span><br>" % test_campaign
+ output += "<small><em>"+html_info_line(test_campaign)+"</em></small>"
+ output += test_campaign.headcomments + "\n<p>PASSED=%(passed)i FAILED=%(failed)i<p>\n\n" % test_campaign
+ for ts in test_campaign:
+ for t in ts:
+ output += """<span class=button%(result)s onClick="goto_id('tst%(num)il')">%(num)03i</span>\n""" % t
+ output += "\n\n"
+
+ for testset in test_campaign:
+ output += "<h2>" % testset
+ if testset.crc is not None:
+ output += "<span class=crc>%(crc)s</span> " % testset
+ output += "%(name)s</h2>\n%(comments)s\n<ul>\n" % testset
+ for t in testset:
+ output += """<li class=%(result)s id="tst%(num)il">\n""" % t
+ if t.expand == 2:
+ output +="""
+<span id="tst%(num)i+" class="button%(result)s" onClick="show('tst%(num)i')" style="POSITION: absolute; VISIBILITY: hidden;">+%(num)03i+</span>
+<span id="tst%(num)i-" class="button%(result)s" onClick="hide('tst%(num)i')">-%(num)03i-</span>
+""" % t
+ else:
+ output += """
+<span id="tst%(num)i+" class="button%(result)s" onClick="show('tst%(num)i')">+%(num)03i+</span>
+<span id="tst%(num)i-" class="button%(result)s" onClick="hide('tst%(num)i')" style="POSITION: absolute; VISIBILITY: hidden;">-%(num)03i-</span>
+""" % t
+ if t.crc is not None:
+ output += "<span class=crc>%(crc)s</span>\n" % t
+ output += """%(name)s\n<span class="comment %(result)s" id="tst%(num)i" """ % t
+ if t.expand < 2:
+ output += """ style="POSITION: absolute; VISIBILITY: hidden;" """
+ output += """><br>%(comments)s
+<pre>
+%(output)s</pre></span>
+""" % t
+ output += "\n</ul>\n\n"
+
+ output += "</body></html>"
+ return output
+
+def campaign_to_LATEX(test_campaign):
+ output = r"""\documentclass{report}
+\usepackage{alltt}
+\usepackage{xcolor}
+\usepackage{a4wide}
+\usepackage{hyperref}
+
+\title{%(title)s}
+\date{%%s}
+
+\begin{document}
+\maketitle
+\tableofcontents
+
+\begin{description}
+\item[Passed:] %(passed)i
+\item[Failed:] %(failed)i
+\end{description}
+
+%(headcomments)s
+
+""" % test_campaign
+ output %= info_line(test_campaign)
+
+ for testset in test_campaign:
+ output += "\\chapter{%(name)s}\n\n%(comments)s\n\n" % testset
+ for t in testset:
+ if t.expand:
+ output += r"""\section{%(name)s}
+
+[%(num)03i] [%(result)s]
+
+%(comments)s
+\begin{alltt}
+%(output)s
+\end{alltt}
+
+""" % t
+
+ output += "\\end{document}\n"
+ return output
+
+
+
+#### USAGE ####
+
+def usage():
+ print("""Usage: UTscapy [-m module] [-f {text|ansi|HTML|LaTeX}] [-o output_file]
+ [-t testfile] [-k keywords [-k ...]] [-K keywords [-K ...]]
+ [-l] [-d|-D] [-F] [-q[q]] [-P preexecute_python_code]
+ [-s /path/to/scpay]
+-l\t\t: generate local files
+-F\t\t: expand only failed tests
+-d\t\t: dump campaign
+-D\t\t: dump campaign and stop
+-C\t\t: don't calculate CRC and SHA
+-s\t\t: path to scapy.py
+-q\t\t: quiet mode
+-qq\t\t: [silent mode]
+-n <testnum>\t: only tests whose numbers are given (eg. 1,3-7,12)
+-m <module>\t: additional module to put in the namespace
+-k <kw1>,<kw2>,...\t: include only tests with one of those keywords (can be used many times)
+-K <kw1>,<kw2>,...\t: remove tests with one of those keywords (can be used many times)
+-P <preexecute_python_code>
+""", file = sys.stderr)
+ raise SystemExit
+
+
+#### MAIN ####
+
+def main(argv):
+ import builtins
+
+ # Parse arguments
+
+ FORMAT = Format.ANSI
+ TESTFILE = sys.stdin
+ OUTPUTFILE = sys.stdout
+ LOCAL = 0
+ NUM=None
+ KW_OK = []
+ KW_KO = []
+ DUMP = 0
+ CRC = 1
+ ONLYFAILED = 0
+ VERB=2
+ PREEXEC=""
+ SCAPY="scapy"
+ MODULES = []
+ try:
+ opts = getopt.getopt(argv, "o:t:f:hln:m:k:K:DdCFqP:s:")
+ for opt,optarg in opts[0]:
+ if opt == "-h":
+ usage()
+ elif opt == "-F":
+ ONLYFAILED = 1
+ elif opt == "-q":
+ VERB -= 1
+ elif opt == "-D":
+ DUMP = 2
+ elif opt == "-d":
+ DUMP = 1
+ elif opt == "-C":
+ CRC = 0
+ elif opt == "-s":
+ SCAPY = optarg
+ elif opt == "-P":
+ PREEXEC += "\n"+optarg
+ elif opt == "-f":
+ try:
+ FORMAT = Format.from_string(optarg)
+ except KeyError as msg:
+ raise getopt.GetoptError("Unknown output format %s" % msg)
+ elif opt == "-t":
+ TESTFILE = open(optarg)
+ elif opt == "-o":
+ OUTPUTFILE = open(optarg, "w")
+ elif opt == "-l":
+ LOCAL = 1
+ elif opt == "-n":
+ NUM = []
+ for v in map( lambda x: x.strip(), optarg.split(",") ):
+ try:
+ NUM.append(int(v))
+ except ValueError:
+ v1,v2 = map(int, v.split("-"))
+ for vv in range(v1,v2+1):
+ NUM.append(vv)
+ elif opt == "-m":
+ MODULES.append(optarg)
+ elif opt == "-k":
+ KW_OK.append(optarg.split(","))
+ elif opt == "-K":
+ KW_KO.append(optarg.split(","))
+
+
+ try:
+ from scapy import all as scapy
+ except ImportError as e:
+ raise getopt.GetoptError("cannot import [%s]: %s" % (SCAPY,e))
+
+ for m in MODULES:
+ try:
+ mod = import_module(m)
+ builtins.__dict__.update(mod.__dict__)
+ except ImportError as e:
+ raise getopt.GetoptError("cannot import [%s]: %s" % (m,e))
+
+ except getopt.GetoptError as msg:
+ print("ERROR:",msg, file = sys.stderr)
+ raise SystemExit
+
+ autorun_func = {
+ Format.TEXT: scapy.autorun_get_text_interactive_session,
+ Format.ANSI: scapy.autorun_get_ansi_interactive_session,
+ Format.HTML: scapy.autorun_get_html_interactive_session,
+ Format.LATEX: scapy.autorun_get_latex_interactive_session,
+ Format.XUNIT: scapy.autorun_get_text_interactive_session,
+ }
+
+ # Parse test file
+ test_campaign = parse_campaign_file(TESTFILE)
+
+ # Report parameters
+ if PREEXEC:
+ test_campaign.preexec = PREEXEC
+
+
+ # Compute campaign CRC and SHA
+ if CRC:
+ compute_campaign_digests(test_campaign)
+
+ # Filter out unwanted tests
+ filter_tests_on_numbers(test_campaign, NUM)
+ for k in KW_OK:
+ filter_tests_keep_on_keywords(test_campaign, k)
+ for k in KW_KO:
+ filter_tests_remove_on_keywords(test_campaign, k)
+
+ remove_empty_testsets(test_campaign)
+
+
+ # Dump campaign
+ if DUMP:
+ dump_campaign(test_campaign)
+ if DUMP > 1:
+ sys.exit()
+
+ # Run tests
+ test_campaign.output_file = OUTPUTFILE
+ run_campaign(test_campaign, autorun_func[FORMAT], verb=VERB)
+
+ # Shrink passed
+ if ONLYFAILED:
+ for t in test_campaign.all_tests():
+ if t:
+ t.expand = 0
+ else:
+ t.expand = 2
+
+ # Generate report
+ if FORMAT == Format.TEXT:
+ output = campaign_to_TEXT(test_campaign)
+ elif FORMAT == Format.ANSI:
+ output = campaign_to_ANSI(test_campaign)
+ elif FORMAT == Format.HTML:
+ output = campaign_to_HTML(test_campaign, local=LOCAL)
+ elif FORMAT == Format.LATEX:
+ output = campaign_to_LATEX(test_campaign)
+ elif FORMAT == Format.XUNIT:
+ output = campaign_to_xUNIT(test_campaign)
+
+ OUTPUTFILE.write(output)
+ OUTPUTFILE.close()
+
+if __name__ == "__main__":
+ main(sys.argv[1:])
diff --git a/scripts/external_libs/scapy-2.3.1/python3/scapy/tools/__init__.py b/scripts/external_libs/scapy-2.3.1/python3/scapy/tools/__init__.py
new file mode 100644
index 00000000..af6eec74
--- /dev/null
+++ b/scripts/external_libs/scapy-2.3.1/python3/scapy/tools/__init__.py
@@ -0,0 +1,8 @@
+## This file is part of Scapy
+## See http://www.secdev.org/projects/scapy for more informations
+## Copyright (C) Philippe Biondi <phil@secdev.org>
+## This program is published under a GPLv2 license
+
+"""
+Additional tools to be run separately
+"""
diff --git a/scripts/external_libs/scapy-2.3.1/python3/scapy/tools/check_asdis.py b/scripts/external_libs/scapy-2.3.1/python3/scapy/tools/check_asdis.py
new file mode 100644
index 00000000..3e45007f
--- /dev/null
+++ b/scripts/external_libs/scapy-2.3.1/python3/scapy/tools/check_asdis.py
@@ -0,0 +1,103 @@
+#! /usr/bin/env python
+
+import getopt
+
+def usage():
+ print("""Usage: check_asdis -i <pcap_file> [-o <wrong_packets.pcap>]
+ -v increase verbosity
+ -d hexdiff packets that differ
+ -z compress output pcap
+ -a open pcap file in append mode""", file = sys.stderr)
+
+def main(argv):
+ PCAP_IN = None
+ PCAP_OUT = None
+ COMPRESS=False
+ APPEND=False
+ DIFF=False
+ VERBOSE=0
+ try:
+ opts=getopt.getopt(argv, "hi:o:azdv")
+ for opt, parm in opts[0]:
+ if opt == "-h":
+ usage()
+ raise SystemExit
+ elif opt == "-i":
+ PCAP_IN = parm
+ elif opt == "-o":
+ PCAP_OUT = parm
+ elif opt == "-v":
+ VERBOSE += 1
+ elif opt == "-d":
+ DIFF = True
+ elif opt == "-a":
+ APPEND = True
+ elif opt == "-z":
+ COMPRESS = True
+
+
+ if PCAP_IN is None:
+ raise getopt.GetoptError("Missing pcap file (-i)")
+
+ except getopt.GetoptError as e:
+ print("ERROR: %s" % e, file = sys.stderr)
+ raise SystemExit
+
+
+
+ from scapy.config import conf
+ from scapy.utils import RawPcapReader,RawPcapWriter,hexdiff
+ from scapy.layers import all
+
+
+ pcap = RawPcapReader(PCAP_IN)
+ pcap_out = None
+ if PCAP_OUT:
+ pcap_out = RawPcapWriter(PCAP_OUT, append=APPEND, gz=COMPRESS, linktype=pcap.linktype)
+ pcap_out._write_header(None)
+
+ LLcls = conf.l2types.get(pcap.linktype)
+ if LLcls is None:
+ print(" Unknown link type [%i]. Can't test anything!" % pcap.linktype, file = sys.stderr)
+ raise SystemExit
+
+
+ i=-1
+ differ=0
+ failed=0
+ for p1,meta in pcap:
+ i += 1
+ try:
+ p2d = LLcls(p1)
+ p2 = str(p2d)
+ except KeyboardInterrupt:
+ raise
+ except Exception as e:
+ print("Dissection error on packet %i" % i)
+ failed += 1
+ else:
+ if p1 == p2:
+ if VERBOSE >= 2:
+ print("Packet %i ok" % i)
+ continue
+ else:
+ print("Packet %i differs" % i)
+ differ += 1
+ if VERBOSE >= 1:
+ print(repr(p2d))
+ if DIFF:
+ hexdiff(p1,p2)
+ if pcap_out is not None:
+ pcap_out.write(p1)
+ i+=1
+ correct = i-differ-failed
+ print("%i total packets. %i ok, %i differed, %i failed. %.2f%% correct." % (i, correct, differ,
+ failed, i and 100.0*(correct)/i))
+
+
+if __name__ == "__main__":
+ import sys
+ try:
+ main(sys.argv[1:])
+ except KeyboardInterrupt:
+ print("Interrupted by user.", file = sys.stderr)
diff --git a/scripts/external_libs/scapy-2.3.1/python3/scapy/utils.py b/scripts/external_libs/scapy-2.3.1/python3/scapy/utils.py
new file mode 100644
index 00000000..252109bb
--- /dev/null
+++ b/scripts/external_libs/scapy-2.3.1/python3/scapy/utils.py
@@ -0,0 +1,1054 @@
+## This file is part of Scapy
+## See http://www.secdev.org/projects/scapy for more informations
+## Copyright (C) Philippe Biondi <phil@secdev.org>
+## This program is published under a GPLv2 license
+
+"""
+General utility functions.
+"""
+
+import os,sys,socket,types
+import random,time
+import gzip,zlib
+import re,struct,array,stat
+import subprocess
+
+import warnings
+warnings.filterwarnings("ignore","tempnam",RuntimeWarning, __name__)
+
+from .config import conf
+from .data import MTU
+from .error import log_runtime,log_loading,log_interactive, Scapy_Exception
+from .base_classes import BasePacketList,BasePacket
+
+
+WINDOWS=sys.platform.startswith("win32")
+
+###########
+## Tools ##
+###########
+
+def get_temp_file(keep=False, autoext=""):
+ import tempfile
+ fd, fname = tempfile.mkstemp(suffix = ".scapy" + autoext)
+ os.close(fd)
+ if not keep:
+ conf.temp_files.append(fname)
+ return fname
+
+def str2bytes(x):
+ """Convert input argument to bytes"""
+ if type(x) is bytes:
+ return x
+ elif type(x) is str:
+ return bytes([ ord(i) for i in x ])
+ else:
+ return str2bytes(str(x))
+
+def chb(x):
+ if type(x) is str:
+ return x
+ else:
+ return chr(x)
+
+def orb(x):
+ if type(x) is str:
+ return ord(x)
+ else:
+ return x
+
+def any2b(x):
+ if type(x) is not str and type(x) is not bytes:
+ try:
+ x=bytes(x)
+ except:
+ x = str(x)
+ if type(x) is str:
+ x = bytes([ ord(i) for i in x ])
+ return x
+
+def sane_color(x):
+ r=""
+ for i in x:
+ j = orb(i)
+ if (j < 32) or (j >= 127):
+ r=r+conf.color_theme.not_printable(".")
+ else:
+ r=r+chb(i)
+ return r
+
+def sane(x):
+ r=""
+ for i in x:
+ if type(x) is str:
+ j = ord(i)
+ else:
+ j = i
+ if (j < 32) or (j >= 127):
+ r=r+"."
+ else:
+ r=r+chb(i)
+ return r
+
+def lhex(x):
+ if type(x) is int:
+ return hex(x)
+ elif type(x) is tuple:
+ return "(%s)" % ", ".join(map(lhex, x))
+ elif type(x) is list:
+ return "[%s]" % ", ".join(map(lhex, x))
+ else:
+ return x
+
+@conf.commands.register
+def hexdump(x):
+ if type(x) is not str and type(x) is not bytes:
+ try:
+ x=bytes(x)
+ except:
+ x = str(x)
+ l = len(x)
+ i = 0
+ while i < l:
+ print("%04x " % i,end = " ")
+ for j in range(16):
+ if i+j < l:
+ print("%02X" % orb(x[i+j]), end = " ")
+ else:
+ print(" ", end = " ")
+ if j%16 == 7:
+ print("", end = " ")
+ print(" ", end = " ")
+ print(sane_color(x[i:i+16]))
+ i += 16
+
+@conf.commands.register
+def linehexdump(x, onlyasc=0, onlyhex=0):
+ if type(x) is not str and type(x) is not bytes:
+ try:
+ x=bytes(x)
+ except:
+ x = str(x)
+ l = len(x)
+ if not onlyasc:
+ for i in range(l):
+ print("%02X" % orb(x[i]), end = " ")
+ print("", end = " ")
+ if not onlyhex:
+ print(sane_color(x))
+
+def chexdump(x):
+ if type(x) is not str and type(x) is not bytes:
+ try:
+ x=bytes(x)
+ except:
+ x = str(x)
+ print(", ".join(map(lambda x: "%#04x"%orb(x), x)))
+
+def hexstr(x, onlyasc=0, onlyhex=0):
+ s = []
+ if not onlyasc:
+ s.append(" ".join(map(lambda x:"%02x"%orb(x), x)))
+ if not onlyhex:
+ s.append(sane(x))
+ return " ".join(s)
+
+
+@conf.commands.register
+def hexdiff(x,y):
+ """Show differences between 2 binary strings"""
+ x=any2b(x)[::-1]
+ y=any2b(y)[::-1]
+ SUBST=1
+ INSERT=1
+ d={}
+ d[-1,-1] = 0,(-1,-1)
+ for j in range(len(y)):
+ d[-1,j] = d[-1,j-1][0]+INSERT, (-1,j-1)
+ for i in range(len(x)):
+ d[i,-1] = d[i-1,-1][0]+INSERT, (i-1,-1)
+
+ for j in range(len(y)):
+ for i in range(len(x)):
+ d[i,j] = min( ( d[i-1,j-1][0]+SUBST*(x[i] != y[j]), (i-1,j-1) ),
+ ( d[i-1,j][0]+INSERT, (i-1,j) ),
+ ( d[i,j-1][0]+INSERT, (i,j-1) ) )
+
+
+ backtrackx = []
+ backtracky = []
+ i=len(x)-1
+ j=len(y)-1
+ while not (i == j == -1):
+ i2,j2 = d[i,j][1]
+ backtrackx.append(x[i2+1:i+1])
+ backtracky.append(y[j2+1:j+1])
+ i,j = i2,j2
+
+
+
+ x = y = i = 0
+ colorize = { 0: lambda x:x,
+ -1: conf.color_theme.left,
+ 1: conf.color_theme.right }
+
+ dox=1
+ doy=0
+ l = len(backtrackx)
+ while i < l:
+ separate=0
+ linex = backtrackx[i:i+16]
+ liney = backtracky[i:i+16]
+ xx = sum(len(k) for k in linex)
+ yy = sum(len(k) for k in liney)
+ if dox and not xx:
+ dox = 0
+ doy = 1
+ if dox and linex == liney:
+ doy=1
+
+ if dox:
+ xd = y
+ j = 0
+ while not linex[j]:
+ j += 1
+ xd -= 1
+ print(colorize[doy-dox]("%04x" % xd), end = " ")
+ x += xx
+ line=linex
+ else:
+ print(" ", end = " ")
+ if doy:
+ yd = y
+ j = 0
+ while not liney[j]:
+ j += 1
+ yd -= 1
+ print(colorize[doy-dox]("%04x" % yd), end = " ")
+ y += yy
+ line=liney
+ else:
+ print(" ", end = " ")
+
+ print(" ", end = " ")
+
+ cl = ""
+ for j in range(16):
+ if i+j < l:
+ if line[j]:
+ col = colorize[(linex[j]!=liney[j])*(doy-dox)]
+ print(col("%02X" % line[j][0]), end = " ")
+ if linex[j]==liney[j]:
+ cl += sane_color(line[j])
+ else:
+ cl += col(sane(line[j]))
+ else:
+ print(" ", end = " ")
+ cl += " "
+ else:
+ print(" ", end = " ")
+ if j == 7:
+ print("", end = " ")
+
+
+ print(" ",cl)
+
+ if doy or not yy:
+ doy=0
+ dox=1
+ i += 16
+ else:
+ if yy:
+ dox=0
+ doy=1
+ else:
+ i += 16
+
+
+crc32 = zlib.crc32
+
+if struct.pack("H",1) == b"\x00\x01": # big endian
+ def checksum(pkt):
+ if len(pkt) % 2 == 1:
+ pkt += b"\0"
+ s = sum(array.array("H", pkt))
+ s = (s >> 16) + (s & 0xffff)
+ s += s >> 16
+ s = ~s
+ return s & 0xffff
+else:
+ def checksum(pkt):
+ if len(pkt) % 2 == 1:
+ pkt += b"\0"
+ s = sum(array.array("H", pkt))
+ s = (s >> 16) + (s & 0xffff)
+ s += s >> 16
+ s = ~s
+ return (((s>>8)&0xff)|s<<8) & 0xffff
+
+def warning(x):
+ log_runtime.warning(x)
+
+def mac2str(mac):
+ #return "".join(map(lambda x: chr(int(x,16)), mac.split(":")))
+ if type(mac) != str:
+ mac = mac.decode('ascii')
+ return b''.join([ bytes([int(i, 16)]) for i in mac.split(":") ])
+
+def str2mac(s):
+ return ("%02x:"*6)[:-1] % tuple(s)
+
+def strxor(x,y):
+ #return "".join(map(lambda i,j:chr(ord(i)^ord(j)),x,y))
+ return bytes([ i[0] ^ i[1] for i in zip(x,y) ] )
+
+# Workarround bug 643005 : https://sourceforge.net/tracker/?func=detail&atid=105470&aid=643005&group_id=5470
+try:
+ socket.inet_aton("255.255.255.255")
+except socket.error:
+ def inet_aton(x):
+ if x == "255.255.255.255":
+ return b"\xff"*4
+ else:
+ return socket.inet_aton(x)
+else:
+ inet_aton = socket.inet_aton
+
+inet_ntoa = socket.inet_ntoa
+try:
+ inet_ntop = socket.inet_ntop
+ inet_pton = socket.inet_pton
+except AttributeError:
+ from scapy.pton_ntop import *
+ log_loading.info("inet_ntop/pton functions not found. Python IPv6 support not present")
+
+
+def atol(x):
+ try:
+ ip = inet_aton(x)
+ except socket.error:
+ ip = inet_aton(socket.gethostbyname(x))
+ return struct.unpack("!I", ip)[0]
+def ltoa(x):
+ return inet_ntoa(struct.pack("!I", x&0xffffffff))
+
+def itom(x):
+ return (0xffffffff00000000>>x)&0xffffffff
+
+def do_graph(graph,prog=None,format='png',target=None,string=False,options=None, figsize = (12, 12), **kargs):
+ """do_graph(graph, prog=conf.prog.dot, format="png",
+ target=None, options=None, string=False):
+ if networkx library is available and graph is instance of Graph, use networkx.draw
+
+ string: if not False, simply return the graph string
+ graph: GraphViz graph description
+ format: output type (svg, ps, gif, jpg, etc.), passed to dot's "-T" option. Ignored if target==None
+ target: filename. If None uses matplotlib to display
+ prog: which graphviz program to use
+ options: options to be passed to prog"""
+
+ from scapy.arch import NETWORKX
+ if NETWORKX:
+ import networkx as nx
+
+ if NETWORKX and isinstance(graph, nx.Graph):
+ nx.draw(graph, with_labels = True, edge_color = '0.75', **kargs)
+ else: # otherwise use dot as in scapy 2.x
+ if string:
+ return graph
+ if prog is None:
+ prog = conf.prog.dot
+
+ if not target or not format:
+ format = 'png'
+ format = "-T %s" % format
+
+ p = subprocess.Popen("%s %s %s" % (prog,options or "", format or ""), shell = True, stdin = subprocess.PIPE, stdout = subprocess.PIPE)
+ w, r = p.stdin, p.stdout
+ w.write(graph.encode('utf-8'))
+ w.close()
+ if target:
+ with open(target, 'wb') as f:
+ f.write(r.read())
+ else:
+ try:
+ import matplotlib.image as mpimg
+ import matplotlib.pyplot as plt
+ plt.figure(figsize = figsize)
+ plt.axis('off')
+ return plt.imshow(mpimg.imread(r, format = format), **kargs)
+
+ except ImportError:
+ warning('matplotlib.image required for interactive graph viewing. Use target option to write to a file')
+
+_TEX_TR = {
+ "{":"{\\tt\\char123}",
+ "}":"{\\tt\\char125}",
+ "\\":"{\\tt\\char92}",
+ "^":"\\^{}",
+ "$":"\\$",
+ "#":"\\#",
+ "~":"\\~",
+ "_":"\\_",
+ "&":"\\&",
+ "%":"\\%",
+ "|":"{\\tt\\char124}",
+ "~":"{\\tt\\char126}",
+ "<":"{\\tt\\char60}",
+ ">":"{\\tt\\char62}",
+ }
+
+def tex_escape(x):
+ s = ""
+ for c in x:
+ s += _TEX_TR.get(c,c)
+ return s
+
+def colgen(*lstcol,**kargs):
+ """Returns a generator that mixes provided quantities forever
+ trans: a function to convert the three arguments into a color. lambda x,y,z:(x,y,z) by default"""
+ if len(lstcol) < 2:
+ lstcol *= 2
+ trans = kargs.get("trans", lambda x,y,z: (x,y,z))
+ while 1:
+ for i in range(len(lstcol)):
+ for j in range(len(lstcol)):
+ for k in range(len(lstcol)):
+ if i != j or j != k or k != i:
+ yield trans(lstcol[(i+j)%len(lstcol)],lstcol[(j+k)%len(lstcol)],lstcol[(k+i)%len(lstcol)])
+
+def incremental_label(label="tag%05i", start=0):
+ while True:
+ yield label % start
+ start += 1
+
+#########################
+#### Enum management ####
+#########################
+
+class EnumElement:
+ _value=None
+ def __init__(self, key, value):
+ self._key = key
+ self._value = value
+ def __repr__(self):
+ return "<%s %s[%r]>" % (self.__dict__.get("_name", self.__class__.__name__), self._key, self._value)
+ def __getattr__(self, attr):
+ return getattr(self._value, attr)
+ def __str__(self):
+ return self._key
+ def __eq__(self, other):
+ #return self._value == int(other)
+ return self._value == hash(other)
+ def __hash__(self):
+ return self._value
+
+
+class Enum_metaclass(type):
+ element_class = EnumElement
+ def __new__(cls, name, bases, dct):
+ rdict={}
+ for k,v in dct.items():
+ if type(v) is int:
+ v = cls.element_class(k,v)
+ dct[k] = v
+ rdict[v] = k
+ dct["__rdict__"] = rdict
+ return super(Enum_metaclass, cls).__new__(cls, name, bases, dct)
+ def __getitem__(self, attr):
+ return self.__rdict__[attr]
+ def __contains__(self, val):
+ return val in self.__rdict__
+ def get(self, attr, val=None):
+ return self._rdict__.get(attr, val)
+ def __repr__(self):
+ return "<%s>" % self.__dict__.get("name", self.__name__)
+
+
+
+###################
+## Object saving ##
+###################
+
+
+def export_object(obj):
+ import dill as pickle
+ import base64
+ return base64.b64encode(gzip.zlib.compress(pickle.dumps(obj,4),9)).decode('utf-8')
+
+
+def import_object(obj):
+ import dill as pickle
+ import base64
+# if obj is None:
+# obj = sys.stdin.read().strip().encode('utf-8')
+ if obj is str:
+ obj = obj.strip().encode('utf-8')
+ return pickle.loads(gzip.zlib.decompress(base64.b64decode(obj)))
+
+
+def save_object(fname, obj):
+ import dill as pickle
+ pickle.dump(obj,gzip.open(fname,"wb"))
+
+def load_object(fname):
+ import dill as pickle
+ return pickle.load(gzip.open(fname,"rb"))
+
+@conf.commands.register
+def corrupt_bytes(s, p=0.01, n=None):
+ """Corrupt a given percentage or number of bytes from bytes"""
+ s = str2bytes(s)
+ s = array.array("B",s)
+ l = len(s)
+ if n is None:
+ n = max(1,int(l*p))
+ for i in random.sample(range(l), n):
+ s[i] = (s[i]+random.randint(1,255))%256
+ return s.tobytes()
+
+@conf.commands.register
+def corrupt_bits(s, p=0.01, n=None):
+ """Flip a given percentage or number of bits from bytes"""
+ s = str2bytes(s)
+ s = array.array("B",s)
+ l = len(s)*8
+ if n is None:
+ n = max(1, int(l*p))
+ for i in random.sample(range(l), n):
+ s[i//8] ^= 1 << (i%8)
+ return s.tobytes()
+
+
+#############################
+## pcap capture file stuff ##
+#############################
+
+@conf.commands.register
+def wrpcap(filename, pkt, *args, **kargs):
+ """Write a list of packets to a pcap file
+gz: set to 1 to save a gzipped capture
+linktype: force linktype value
+endianness: "<" or ">", force endianness"""
+ with PcapWriter(filename, *args, **kargs) as pcap:
+ pcap.write(pkt)
+
+@conf.commands.register
+def rdpcap(filename, count=-1):
+ """Read a pcap file and return a packet list
+count: read only <count> packets"""
+ with PcapReader(filename) as pcap:
+ return pcap.read_all(count=count)
+
+class RawPcapReader:
+ """A stateful pcap reader. Each packet is returned as bytes"""
+ def __init__(self, filename):
+ self.filename = filename
+ try:
+ if not stat.S_ISREG(os.stat(filename).st_mode):
+ raise IOError("GZIP detection works only for regular files")
+ self.f = gzip.open(filename,"rb")
+ magic = self.f.read(4)
+ except IOError:
+ self.f = open(filename,"rb")
+ magic = self.f.read(4)
+ if magic == b"\xa1\xb2\xc3\xd4": #big endian
+ self.endian = ">"
+ self.reader = _RawPcapOldReader(self.f, self.endian)
+ elif magic == b"\xd4\xc3\xb2\xa1": #little endian
+ self.endian = "<"
+ self.reader = _RawPcapOldReader(self.f, self.endian)
+ elif magic == b"\x0a\x0d\x0d\x0a": #PcapNG
+ self.reader = _RawPcapNGReader(self.f)
+ else:
+ raise Scapy_Exception("Not a pcap capture file (bad magic)")
+
+ def __enter__(self):
+ return self.reader
+
+ def __exit__(self, exc_type, exc_value, tracback):
+ self.close()
+
+ def __iter__(self):
+ return self.reader.__iter__()
+
+ def dispatch(self, callback):
+ """call the specified callback routine for each packet read
+
+ This is just a convienience function for the main loop
+ that allows for easy launching of packet processing in a
+ thread.
+ """
+ for p in self:
+ callback(p)
+
+ def read_all(self,count=-1):
+ """return a list of all packets in the pcap file
+ """
+ res=[]
+ while count != 0:
+ count -= 1
+ p = self.read_packet()
+ if p is None:
+ break
+ res.append(p)
+ return res
+
+ def recv(self, size=MTU):
+ """ Emulate a socket
+ """
+ return self.read_packet(size)[0]
+
+ def fileno(self):
+ return self.f.fileno()
+
+ def close(self):
+ return self.f.close()
+
+ def read_packet(self, size = MTU):
+ return self.reader.read_packet(size)
+
+def align32(n):
+ return n + (4 - n % 4) % 4
+
+class _RawPcapNGReader:
+ def __init__(self, filep):
+ self.filep = filep
+ self.filep.seek(0, 0)
+ self.endian = '<'
+ self.tsresol = 6
+ self.linktype = None
+
+ def __iter__(self):
+ return self
+
+ def __next__(self):
+ """implement the iterator protocol on a set of packets in a pcapng file"""
+ pkt = self.read_packet()
+ if pkt == None:
+ raise StopIteration
+ return pkt
+
+ def read_packet(self, size = MTU):
+ while True:
+ buf = self._read_bytes(4, check = False)
+ if len(buf) == 0:
+ return None
+ elif len(buf) != 4:
+ raise IOError("PacketNGReader: Premature end of file")
+ block_type, = struct.unpack(self.endian + 'i', buf)
+ if block_type == 168627466: #Section Header b'\x0a\x0d\x0d\x0a'
+ self.read_section_header()
+ elif block_type == 1:
+ self.read_interface_description()
+ elif block_type == 6:
+ return self.read_enhanced_packet(size)
+ else:
+ warning("PacketNGReader: Unparsed block type %d/#%x" % (block_type, block_type))
+ self.read_generic_block()
+
+ def _read_bytes(self, n, check = True):
+ buf = self.filep.read(n)
+ if check and len(buf) < n:
+ raise IOError("PacketNGReader: Premature end of file")
+ return buf
+
+ def read_generic_block(self):
+ block_length, = struct.unpack(self.endian + 'I', self._read_bytes(4))
+ self._read_bytes(block_length - 12)
+ self._check_length(block_length)
+
+ def read_section_header(self):
+ buf = self._read_bytes(16)
+ if buf[4:8] == b'\x1a\x2b\x3c\x4d':
+ self.endian = '>'
+ elif buf[4:8] == b'\x4d\x3c\x2b\x1a':
+ self.endian = '<'
+ else:
+ raise Scapy_Exception('Cannot read byte order value')
+ block_length, _, major_version, minor_version, section_length = struct.unpack(self.endian + 'IIHHi', buf)
+ options = self._read_bytes(block_length - 24)
+ if options:
+ opt = self.parse_options(options)
+ for i in opt.keys():
+ if not i & (0b1 << 15):
+ warning("PcapNGReader: Unparsed option %d/#%x in section header" % (i, i))
+ self._check_length(block_length)
+
+ def read_interface_description(self):
+ buf = self._read_bytes(12)
+ block_length, self.linktype, reserved, self.snaplen = struct.unpack(self.endian + 'IHHI', buf)
+ options = self._read_bytes(block_length - 20)
+ if options:
+ opt = self.parse_options(options)
+ for i in opt.keys():
+ if 9 in opt:
+ self.tsresol = opt[9][0]
+ elif not i & (0b1 << 15):
+ warning("PcapNGReader: Unparsed option %d/#%x in enhanced packet block" % (i, i))
+ try:
+ self.LLcls = conf.l2types[self.linktype]
+ except KeyError:
+ warning("RawPcapReader: unknown LL type [%i]/[%#x]. Using Raw packets" % (self.linktype,self.linktype))
+ self.LLcls = conf.raw_layer
+
+ self._check_length(block_length)
+
+ def read_enhanced_packet(self, size = MTU):
+ buf = self._read_bytes(24)
+ block_length, interface, ts_high, ts_low, caplen, wirelen = struct.unpack(self.endian + 'IIIIII', buf)
+ timestamp = (ts_high << 32) + ts_low
+
+ pkt = self._read_bytes(align32(caplen))[:caplen]
+ options = self._read_bytes(block_length - align32(caplen) - 32)
+ if options:
+ opt = self.parse_options(options)
+ for i in opt.keys():
+ if not i & (0b1 << 15):
+ warning("PcapNGReader: Unparsed option %d/#%x in enhanced packet block" % (i, i))
+ self._check_length(block_length)
+ return pkt[:MTU], (self.parse_sec(timestamp), self.parse_usec(timestamp), wirelen)
+
+ def parse_sec(self, t):
+ if self.tsresol & 0b10000000:
+ return t >> self.tsresol
+ else:
+ return t // pow(10, self.tsresol)
+
+ def parse_usec(self, t):
+ if self.tsresol & 0b10000000:
+ return t & (1 << self.tsresol) - 1
+ else:
+ return t % pow(10, self.tsresol)
+
+ def parse_options(self, opt):
+ buf = opt
+ options = {}
+ while buf:
+ opt_type, opt_len = struct.unpack(self.endian + 'HH', buf[:4])
+ if opt_type == 0:
+ return options
+ options[opt_type] = buf[4:4 + opt_len]
+ buf = buf[ 4 + align32(opt_len):]
+ return options
+
+ def _check_length(self, block_length):
+ check_length, = struct.unpack(self.endian + 'I', self._read_bytes(4))
+ if check_length != block_length:
+ raise Scapy_Exception('Block length values are not equal')
+
+class _RawPcapOldReader:
+ def __init__(self, filep, endianness):
+ self.endian = endianness
+ self.f = filep
+ hdr = self.f.read(20)
+ if len(hdr)<20:
+ raise Scapy_Exception("Invalid pcap file (too short)")
+ vermaj,vermin,tz,sig,snaplen,linktype = struct.unpack(self.endian+"HHIIII",hdr)
+
+ self.linktype = linktype
+ try:
+ self.LLcls = conf.l2types[self.linktype]
+ except KeyError:
+ warning("RawPcapReader: unknown LL type [%i]/[%#x]. Using Raw packets" % (self.linktype,self.linktype))
+ self.LLcls = conf.raw_layer
+
+ def __iter__(self):
+ return self
+
+ def __next__(self):
+ """implement the iterator protocol on a set of packets in a pcap file"""
+ pkt = self.read_packet()
+ if pkt == None:
+ raise StopIteration
+ return pkt
+
+ def read_packet(self, size=MTU):
+ """return a single packet read from the file
+ bytes, (sec, #timestamp seconds
+ usec, #timestamp microseconds
+ wirelen) #actual length of packet
+ returns None when no more packets are available
+ """
+ hdr = self.f.read(16)
+ if len(hdr) < 16:
+ return None
+ sec,usec,caplen,wirelen = struct.unpack(self.endian+"IIII", hdr)
+ s = self.f.read(caplen)[:MTU]
+ return s,(sec,usec,wirelen) # caplen = len(s)
+
+
+class PcapReader(RawPcapReader):
+ def __init__(self, filename):
+ RawPcapReader.__init__(self, filename)
+ def __enter__(self):
+ return self
+ def __iter__(self):
+ return self
+ def __next__(self):
+ """implement the iterator protocol on a set of packets in a pcap file"""
+ pkt = self.read_packet()
+ if pkt == None:
+ raise StopIteration
+ return pkt
+ def read_packet(self, size=MTU):
+ rp = RawPcapReader.read_packet(self,size)
+ if rp is None:
+ return None
+ s,(sec,usec,wirelen) = rp
+
+
+ try:
+ p = self.reader.LLcls(s)
+ except KeyboardInterrupt:
+ raise
+ except:
+ if conf.debug_dissector:
+ raise
+ p = conf.raw_layer(s)
+ p.time = sec+0.000001*usec
+ p.wirelen = wirelen
+ return p
+
+ def read_all(self,count=-1):
+ res = RawPcapReader.read_all(self, count)
+ import scapy.plist
+ return scapy.plist.PacketList(res,name = os.path.basename(self.filename))
+ def recv(self, size=MTU):
+ return self.read_packet(size)
+
+
+class RawPcapWriter:
+ """A stream PCAP writer with more control than wrpcap()"""
+ def __init__(self, filename, linktype=None, gz=False, endianness="", append=False, sync=False):
+ """
+ linktype: force linktype to a given value. If None, linktype is taken
+ from the first writter packet
+ gz: compress the capture on the fly
+ endianness: force an endianness (little:"<", big:">"). Default is native
+ append: append packets to the capture file instead of truncating it
+ sync: do not bufferize writes to the capture file
+ """
+
+ self.linktype = linktype
+ self.header_present = 0
+ self.append=append
+ self.gz = gz
+ self.endian = endianness
+ self.filename=filename
+ self.sync=sync
+ bufsz=4096
+ if sync:
+ bufsz=0
+
+ self.f = [open,gzip.open][gz](filename,append and "ab" or "wb", gz and 9 or bufsz)
+
+ def fileno(self):
+ return self.f.fileno()
+
+ def _write_header(self, pkt):
+ self.header_present=1
+
+ if self.append:
+ # Even if prone to race conditions, this seems to be
+ # safest way to tell whether the header is already present
+ # because we have to handle compressed streams that
+ # are not as flexible as basic files
+ g = [open,gzip.open][self.gz](self.filename,"rb")
+ if g.read(16):
+ return
+
+ self.f.write(struct.pack(self.endian+"IHHIIII", 0xa1b2c3d4,
+ 2, 4, 0, 0, MTU, self.linktype))
+ self.f.flush()
+
+
+ def write(self, pkt):
+ """accepts a either a single packet or a list of packets
+ to be written to the dumpfile
+ """
+ if not self.header_present:
+ self._write_header(pkt)
+ if type(pkt) is bytes:
+ self._write_packet(pkt)
+ else:
+ for p in pkt:
+ self._write_packet(p)
+
+ def _write_packet(self, packet, sec=None, usec=None, caplen=None, wirelen=None):
+ """writes a single packet to the pcap file
+ """
+ if caplen is None:
+ caplen = len(packet)
+ if wirelen is None:
+ wirelen = caplen
+ if sec is None or usec is None:
+ t=time.time()
+ it = int(t)
+ if sec is None:
+ sec = it
+ if usec is None:
+ usec = int(round((t-it)*1000000))
+ self.f.write(struct.pack(self.endian+"IIII", sec, usec, caplen, wirelen))
+ self.f.write(packet)
+ if self.gz and self.sync:
+ self.f.flush()
+
+ def flush(self):
+ return self.f.flush()
+
+ def close(self):
+ return self.f.close()
+
+ def __enter__(self):
+ return self
+ def __exit__(self, exc_type, exc_value, tracback):
+ self.flush()
+ self.close()
+
+
+class PcapWriter(RawPcapWriter):
+ def _write_header(self, pkt):
+ if self.linktype == None:
+ if type(pkt) is list or type(pkt) is tuple or isinstance(pkt,BasePacketList):
+ pkt = pkt[0]
+ try:
+ self.linktype = conf.l2types[pkt.__class__]
+ except KeyError:
+ warning("PcapWriter: unknown LL type for %s. Using type 1 (Ethernet)" % pkt.__class__.__name__)
+ self.linktype = 1
+ RawPcapWriter._write_header(self, pkt)
+
+ def _write_packet(self, packet):
+ try:
+ sec = int(packet.time)
+ usec = int(round((packet.time-sec)*1000000))
+ s = bytes(packet)
+ caplen = len(s)
+ RawPcapWriter._write_packet(self, s, sec, usec, caplen, caplen)
+ except Exception as e:
+ log_interactive.error(e)
+ def write(self, pkt):
+ """accepts a either a single packet or a list of packets
+ to be written to the dumpfile
+ """
+ if not self.header_present:
+ self._write_header(pkt)
+ if isinstance(pkt, BasePacket):
+ self._write_packet(pkt)
+ else:
+ for p in pkt:
+ self._write_packet(p)
+
+
+re_extract_hexcap = re.compile("^((0x)?[0-9a-fA-F]{2,}[ :\t]{,3}|) *(([0-9a-fA-F]{2} {,2}){,16})")
+
+def import_hexcap():
+ p = ""
+ try:
+ while 1:
+ l = raw_input().strip()
+ try:
+ p += re_extract_hexcap.match(l).groups()[2]
+ except:
+ warning("Parsing error during hexcap")
+ continue
+ except EOFError:
+ pass
+
+ p = p.replace(" ","")
+ return p.decode("hex")
+
+
+
+@conf.commands.register
+def wireshark(pktlist, *args):
+ """Run wireshark on a list of packets"""
+ fname = get_temp_file()
+ wrpcap(fname, pktlist)
+ subprocess.Popen([conf.prog.wireshark, "-r", fname] + list(args))
+
+@conf.commands.register
+def tdecode(pkt, *args):
+ """Run tshark to decode and display the packet. If no args defined uses -V"""
+ if not args:
+ args = [ "-V" ]
+ fname = get_temp_file()
+ wrpcap(fname,[pkt])
+ subprocess.call(["tshark", "-r", fname] + list(args))
+
+@conf.commands.register
+def hexedit(x):
+ """Run external hex editor on a packet or bytes. Set editor in conf.prog.hexedit"""
+ x = bytes(x)
+ fname = get_temp_file()
+ with open(fname,"wb") as f:
+ f.write(x)
+ subprocess.call([conf.prog.hexedit, fname])
+ with open(fname, "rb") as f:
+ x = f.read()
+ return x
+
+def __make_table(yfmtfunc, fmtfunc, endline, items, fxyz, sortx=None, sorty=None, seplinefunc=None):
+ vx = {}
+ vy = {}
+ vz = {}
+ vxf = {}
+ vyf = {}
+ max_length = 0
+ for record in items:
+ xx,yy,zz = map(str, fxyz(record[0], record[1]))
+ max_length = max(len(yy),max_length)
+ vx[xx] = max(vx.get(xx,0), len(xx), len(zz))
+ vy[yy] = None
+ vz[(xx,yy)] = zz
+
+ vxk = list(vx.keys())
+ vyk = list(vy.keys())
+ if sortx:
+ vxk.sort(sortx)
+ else:
+ try:
+ vxk.sort(key = lambda x: atol(x))
+ except:
+ vxk.sort()
+ if sorty:
+ vyk.sort(sorty)
+ else:
+ try:
+ vyk.sort(key = lambda x: atol(x))
+ except:
+ vyk.sort()
+
+
+ if seplinefunc:
+ sepline = seplinefunc(max_length, [vx[x] for x in vxk])
+ print(sepline)
+
+ fmt = yfmtfunc(max_length)
+ print(fmt % "", end = " ")
+ for x in vxk:
+ vxf[x] = fmtfunc(vx[x])
+ print(vxf[x] % x, end = " ")
+ print(endline)
+ if seplinefunc:
+ print(sepline)
+ for y in vyk:
+ print(fmt % y, end = " ")
+ for x in vxk:
+ print(vxf[x] % vz.get((x,y), "-"), end = " ")
+ print(endline)
+ if seplinefunc:
+ print(sepline)
+
+def make_table(*args, **kargs):
+ __make_table(lambda l:"%%-%is" % l, lambda l:"%%-%is" % l, "", *args, **kargs)
+
+def make_lined_table(*args, **kargs):
+ __make_table(lambda l:"%%-%is |" % l, lambda l:"%%-%is |" % l, "",
+ seplinefunc=lambda max_length,x:"+".join([ "-"*(y+2) for y in [max_length-1]+x+[-2]]),
+ *args, **kargs)
+
+def make_tex_table(*args, **kargs):
+ __make_table(lambda l: "%s", lambda l: "& %s", "\\\\", seplinefunc=lambda a,x:"\\hline", *args, **kargs)
+
diff --git a/scripts/external_libs/scapy-2.3.1/python3/scapy/utils6.py b/scripts/external_libs/scapy-2.3.1/python3/scapy/utils6.py
new file mode 100644
index 00000000..d9112aa5
--- /dev/null
+++ b/scripts/external_libs/scapy-2.3.1/python3/scapy/utils6.py
@@ -0,0 +1,823 @@
+## This file is part of Scapy
+## See http://www.secdev.org/projects/scapy for more informations
+## Copyright (C) Philippe Biondi <phil@secdev.org>
+## This program is published under a GPLv2 license
+
+## Copyright (C) 2005 Guillaume Valadon <guedou@hongo.wide.ad.jp>
+## Arnaud Ebalard <arnaud.ebalard@eads.net>
+
+"""
+Utility functions for IPv6.
+"""
+
+import itertools
+from .config import conf
+from .data import *
+from .utils import *
+
+def cmp_to_key(mycmp):
+ 'Convert a cmp= function into a key= function'
+ class K(object):
+ def __init__(self, obj, *args):
+ self.obj = obj
+ def __lt__(self, other):
+ return mycmp(self.obj, other.obj) < 0
+ def __gt__(self, other):
+ return mycmp(self.obj, other.obj) > 0
+ def __eq__(self, other):
+ return mycmp(self.obj, other.obj) == 0
+ def __le__(self, other):
+ return mycmp(self.obj, other.obj) <= 0
+ def __ge__(self, other):
+ return mycmp(self.obj, other.obj) >= 0
+ def __ne__(self, other):
+ return mycmp(self.obj, other.obj) != 0
+ return K
+
+
+
+def construct_source_candidate_set(addr, plen, laddr, loname):
+ """
+ Given all addresses assigned to a specific interface ('laddr' parameter),
+ this function returns the "candidate set" associated with 'addr/plen'.
+
+ Basically, the function filters all interface addresses to keep only those
+ that have the same scope as provided prefix.
+
+ This is on this list of addresses that the source selection mechanism
+ will then be performed to select the best source address associated
+ with some specific destination that uses this prefix.
+ """
+ def cset_sort(x,y):
+ x_global = 0
+ if in6_isgladdr(x):
+ x_global = 1
+ y_global = 0
+ if in6_isgladdr(y):
+ y_global = 1
+ res = y_global - x_global
+ if res != 0 or y_global != 1:
+ return res
+ # two global addresses: if one is native, it wins.
+ if not in6_isaddr6to4(x):
+ return -1;
+ return -res
+
+ cset = []
+ if in6_isgladdr(addr) or in6_isuladdr(addr):
+ cset = [ x for x in laddr if x[1] == IPV6_ADDR_GLOBAL ]
+ elif in6_islladdr(addr):
+ cset = [ x for x in laddr if x[1] == IPV6_ADDR_LINKLOCAL ]
+ elif in6_issladdr(addr):
+ cset = [ x for x in laddr if x[1] == IPV6_ADDR_SITELOCAL ]
+ elif in6_ismaddr(addr):
+ if in6_ismnladdr(addr):
+ cset = [('::1', 16, loname)]
+ elif in6_ismgladdr(addr):
+ cset = [ x for x in laddr if x[1] == IPV6_ADDR_GLOBAL ]
+ elif in6_ismlladdr(addr):
+ cset = [ x for x in laddr if x[1] == IPV6_ADDR_LINKLOCAL ]
+ elif in6_ismsladdr(addr):
+ cset = [ x for x in laddr if x[1] == IPV6_ADDR_SITELOCAL ]
+ elif addr == '::' and plen == 0:
+ cset = [ x for x in laddr if x[1] == IPV6_ADDR_GLOBAL ]
+ cset = [ x[0] for x in cset ]
+ cset.sort(key = cmp_to_key(cset_sort)) # Sort with global addresses first
+ return cset
+
+def get_source_addr_from_candidate_set(dst, candidate_set):
+ """
+ This function implement a limited version of source address selection
+ algorithm defined in section 5 of RFC 3484. The format is very different
+ from that described in the document because it operates on a set
+ of candidate source address for some specific route.
+ """
+
+ def scope_cmp(a, b):
+ """
+ Given two addresses, returns -1, 0 or 1 based on comparison of
+ their scope
+ """
+ scope_mapper = {IPV6_ADDR_GLOBAL: 4,
+ IPV6_ADDR_SITELOCAL: 3,
+ IPV6_ADDR_LINKLOCAL: 2,
+ IPV6_ADDR_LOOPBACK: 1}
+ sa = in6_getscope(a)
+ if sa == -1:
+ sa = IPV6_ADDR_LOOPBACK
+ sb = in6_getscope(b)
+ if sb == -1:
+ sb = IPV6_ADDR_LOOPBACK
+
+ sa = scope_mapper[sa]
+ sb = scope_mapper[sb]
+
+ if sa == sb:
+ return 0
+ if sa > sb:
+ return 1
+ return -1
+
+ def rfc3484_cmp(source_a, source_b):
+ """
+ The function implements a limited version of the rules from Source
+ Address selection algorithm defined section of RFC 3484.
+ """
+
+ # Rule 1: Prefer same address
+ if source_a == dst:
+ return 1
+ if source_b == dst:
+ return 1
+
+ # Rule 2: Prefer appropriate scope
+ tmp = scope_cmp(source_a, source_b)
+ if tmp == -1:
+ if scope_cmp(source_a, dst) == -1:
+ return 1
+ else:
+ return -1
+ elif tmp == 1:
+ if scope_cmp(source_b, dst) == -1:
+ return 1
+ else:
+ return -1
+
+ # Rule 3: cannot be easily implemented
+ # Rule 4: cannot be easily implemented
+ # Rule 5: does not make sense here
+ # Rule 6: cannot be implemented
+ # Rule 7: cannot be implemented
+
+ # Rule 8: Longest prefix match
+ tmp1 = in6_get_common_plen(source_a, dst)
+ tmp2 = in6_get_common_plen(source_b, dst)
+ if tmp1 > tmp2:
+ return 1
+ elif tmp2 > tmp1:
+ return -1
+ return 0
+
+ if not candidate_set:
+ # Should not happen
+ return None
+
+ candidate_set.sort(key=cmp_to_key(rfc3484_cmp), reverse=True)
+
+ return candidate_set[0]
+
+
+def find_ifaddr2(addr, plen, laddr):
+ dstAddrType = in6_getAddrType(addr)
+
+ if dstAddrType == IPV6_ADDR_UNSPECIFIED: # Shouldn't happen as dst addr
+ return None
+
+ if dstAddrType == IPV6_ADDR_LOOPBACK:
+ return None
+
+ #tmp = [[]] + map(lambda (x,y,z): (in6_getAddrType(x), x, y, z), laddr)
+ tmp = [[]] + map(lambda a: (in6_getAddrType(a[0]), a[0], a[1], a[2]), laddr)
+ #def filterSameScope(l, t):
+ # if (t[0] & dstAddrType & IPV6_ADDR_SCOPE_MASK) == 0:
+ # l.append(t)
+ # return l
+ #sameScope = reduce(filterSameScope, tmp)
+ sameScope = itertools.chain(*[ t for t in tmp if (t[0] & dstAddrType & IPV6_ADDR_SCOPE_MASK) == 0 ])
+
+ l = len(sameScope)
+ if l == 1: # Only one address for our scope
+ return sameScope[0][1]
+
+ elif l > 1: # Muliple addresses for our scope
+ #stfAddr = filter(lambda x: x[0] & IPV6_ADDR_6TO4, sameScope)
+ stfAddr = [ x for x in sameScope if x[0] & IPV6_ADDR_6TO4 ]
+ #nativeAddr = filter(lambda x: not (x[0] & IPV6_ADDR_6TO4), sameScope)
+ nativeAddr = [ x for x in sameScope if not (x[0] & IPV6_ADDR_6TO4) ]
+
+ if not (dstAddrType & IPV6_ADDR_6TO4): # destination is not 6to4
+ if len(nativeAddr) != 0:
+ return nativeAddr[0][1]
+ return stfAddr[0][1]
+
+ else: # Destination is 6to4, try to use source 6to4 addr if any
+ if len(stfAddr) != 0:
+ return stfAddr[0][1]
+ return nativeAddr[0][1]
+ else:
+ return None
+
+# Think before modify it : for instance, FE::1 does exist and is unicast
+# there are many others like that.
+# TODO : integrate Unique Local Addresses
+def in6_getAddrType(addr):
+ naddr = inet_pton(socket.AF_INET6, addr)
+ paddr = inet_ntop(socket.AF_INET6, naddr) # normalize
+ addrType = 0
+ # _Assignable_ Global Unicast Address space
+ # is defined in RFC 3513 as those in 2000::/3
+ #if ((struct.unpack("B", naddr[0])[0] & 0xE0) == 0x20):
+ if (((naddr[0]) & 0xE0) == 0x20):
+ addrType = (IPV6_ADDR_UNICAST | IPV6_ADDR_GLOBAL)
+ if naddr[:2] == b' \x02': # Mark 6to4 @
+ addrType |= IPV6_ADDR_6TO4
+ elif naddr[0] == 0xff: # multicast
+ addrScope = paddr[3]
+ if addrScope == '2':
+ addrType = (IPV6_ADDR_LINKLOCAL | IPV6_ADDR_MULTICAST)
+ elif addrScope == 'e':
+ addrType = (IPV6_ADDR_GLOBAL | IPV6_ADDR_MULTICAST)
+ else:
+ addrType = (IPV6_ADDR_GLOBAL | IPV6_ADDR_MULTICAST)
+ elif ((naddr[0] == 0xfe) and ((int(paddr[2], 16) & 0xC) == 0x8)):
+ addrType = (IPV6_ADDR_UNICAST | IPV6_ADDR_LINKLOCAL)
+ elif paddr == "::1":
+ addrType = IPV6_ADDR_LOOPBACK
+ elif paddr == "::":
+ addrType = IPV6_ADDR_UNSPECIFIED
+ else:
+ # Everything else is global unicast (RFC 3513)
+ # Even old deprecated (RFC3879) Site-Local addresses
+ addrType = (IPV6_ADDR_GLOBAL | IPV6_ADDR_UNICAST)
+
+ return addrType
+
+def in6_mactoifaceid(mac, ulbit=None):
+ """
+ Compute the interface ID in modified EUI-64 format associated
+ to the Ethernet address provided as input.
+ value taken by U/L bit in the interface identifier is basically
+ the reversed value of that in given MAC address it can be forced
+ to a specific value by using optional 'ulbit' parameter.
+ """
+ if len(mac) != 17: return None
+ m = "".join(mac.split(':'))
+ if len(m) != 12: return None
+ first = int(m[0:2], 16)
+ if ulbit is None or not (ulbit == 0 or ulbit == 1):
+ ulbit = [1,'-',0][first & 0x02]
+ ulbit *= 2
+ first = "%.02x" % ((first & 0xFD) | ulbit)
+ eui64 = first + m[2:4] + ":" + m[4:6] + "FF:FE" + m[6:8] + ":" + m[8:12]
+ return eui64.upper()
+
+def in6_ifaceidtomac(ifaceid): # TODO: finish commenting function behavior
+ """
+ Extract the mac address from provided iface ID. Iface ID is provided
+ in printable format ("XXXX:XXFF:FEXX:XXXX", eventually compressed). None
+ is returned on error.
+ """
+ try:
+ ifaceid = inet_pton(socket.AF_INET6, "::"+ifaceid)[8:16]
+ except:
+ return None
+ if ifaceid[3:5] != b'\xff\xfe':
+ return None
+ first = struct.unpack("B", ifaceid[:1])[0]
+ ulbit = 2*[1,'-',0][first & 0x02]
+ first = struct.pack("B", ((first & 0xFD) | ulbit))
+ oui = first + ifaceid[1:3]
+ end = ifaceid[5:]
+ #l = map(lambda x: "%.02x" % struct.unpack("B", x)[0], list(oui+end))
+ l = map(lambda x: "%.02x" % x, list(oui+end))
+ return ":".join(l)
+
+def in6_addrtomac(addr):
+ """
+ Extract the mac address from provided address. None is returned
+ on error.
+ """
+ mask = inet_pton(socket.AF_INET6, "::ffff:ffff:ffff:ffff")
+ x = in6_and(mask, inet_pton(socket.AF_INET6, addr))
+ ifaceid = inet_ntop(socket.AF_INET6, x)[2:]
+ return in6_ifaceidtomac(ifaceid)
+
+def in6_addrtovendor(addr):
+ """
+ Extract the MAC address from a modified EUI-64 constructed IPv6
+ address provided and use the IANA oui.txt file to get the vendor.
+ The database used for the conversion is the one loaded by Scapy,
+ based on Wireshark (/usr/share/wireshark/wireshark/manuf) None
+ is returned on error, "UNKNOWN" if the vendor is unknown.
+ """
+ mac = in6_addrtomac(addr)
+ if mac is None:
+ return None
+
+ res = conf.manufdb._get_manuf(mac)
+ if len(res) == 17 and res.count(b':') != 5: # Mac address, i.e. unknown
+ res = "UNKNOWN"
+
+ return res
+
+def in6_getLinkScopedMcastAddr(addr, grpid=None, scope=2):
+ """
+ Generate a Link-Scoped Multicast Address as described in RFC 4489.
+ Returned value is in printable notation.
+
+ 'addr' parameter specifies the link-local address to use for generating
+ Link-scoped multicast address IID.
+
+ By default, the function returns a ::/96 prefix (aka last 32 bits of
+ returned address are null). If a group id is provided through 'grpid'
+ parameter, last 32 bits of the address are set to that value (accepted
+ formats : '\x12\x34\x56\x78' or '12345678' or 0x12345678 or 305419896).
+
+ By default, generated address scope is Link-Local (2). That value can
+ be modified by passing a specific 'scope' value as an argument of the
+ function. RFC 4489 only authorizes scope values <= 2. Enforcement
+ is performed by the function (None will be returned).
+
+ If no link-local address can be used to generate the Link-Scoped IPv6
+ Multicast address, or if another error occurs, None is returned.
+ """
+ if not scope in [0, 1, 2]:
+ return None
+ try:
+ if not in6_islladdr(addr):
+ return None
+ addr = inet_pton(socket.AF_INET6, addr)
+ except:
+ warning("in6_getLinkScopedMcastPrefix(): Invalid address provided")
+ return None
+
+ iid = addr[8:]
+
+ if grpid is None:
+ grpid = b'\x00\x00\x00\x00'
+ else:
+ if type(grpid) is str:
+ grpid = grpid.encode('ascii')
+ if type(grpid) is bytes:
+ if len(grpid) == 8:
+ try:
+ grpid = int(grpid, 16) & 0xffffffff
+ except:
+ warning("in6_getLinkScopedMcastPrefix(): Invalid group id provided")
+ return None
+ elif len(grpid) == 4:
+ try:
+ grpid = struct.unpack("!I", grpid)[0]
+ except:
+ warning("in6_getLinkScopedMcastPrefix(): Invalid group id provided")
+ return None
+ grpid = struct.pack("!I", grpid)
+
+ flgscope = struct.pack("B", 0xff & ((0x3 << 4) | scope))
+ plen = b'\xff'
+ res = b'\x00'
+ a = b'\xff' + flgscope + res + plen + iid + grpid
+
+ return inet_ntop(socket.AF_INET6, a)
+
+def in6_get6to4Prefix(addr):
+ """
+ Returns the /48 6to4 prefix associated with provided IPv4 address
+ On error, None is returned. No check is performed on public/private
+ status of the address
+ """
+ try:
+ addr = inet_pton(socket.AF_INET, addr)
+ addr = inet_ntop(socket.AF_INET6, b'\x20\x02'+addr+b'\x00'*10)
+ except:
+ return None
+ return addr
+
+def in6_6to4ExtractAddr(addr):
+ """
+ Extract IPv4 address embbeded in 6to4 address. Passed address must be
+ a 6to4 addrees. None is returned on error.
+ """
+ try:
+ addr = inet_pton(socket.AF_INET6, addr)
+ except:
+ return None
+ if addr[:2] != b" \x02":
+ return None
+ return inet_ntop(socket.AF_INET, addr[2:6])
+
+
+def in6_getLocalUniquePrefix():
+ """
+ Returns a pseudo-randomly generated Local Unique prefix. Function
+ follows recommandation of Section 3.2.2 of RFC 4193 for prefix
+ generation.
+ """
+ # Extracted from RFC 1305 (NTP) :
+ # NTP timestamps are represented as a 64-bit unsigned fixed-point number,
+ # in seconds relative to 0h on 1 January 1900. The integer part is in the
+ # first 32 bits and the fraction part in the last 32 bits.
+
+ # epoch = (1900, 1, 1, 0, 0, 0, 5, 1, 0)
+ # x = time.time()
+ # from time import gmtime, strftime, gmtime, mktime
+ # delta = mktime(gmtime(0)) - mktime(self.epoch)
+ # x = x-delta
+
+ tod = time.time() # time of day. Will bother with epoch later
+ i = int(tod)
+ j = int((tod - i)*(2**32))
+ tod = struct.pack("!II", i,j)
+ # TODO: Add some check regarding system address gathering
+ rawmac = get_if_raw_hwaddr(conf.iface6)
+ mac = b":".join(map(lambda x: b"%.02x" % ord(x), list(rawmac)))
+ # construct modified EUI-64 ID
+ eui64 = inet_pton(socket.AF_INET6, '::' + in6_mactoifaceid(mac))[8:]
+ import sha
+ globalid = sha.new(tod+eui64).digest()[:5]
+ return inet_ntop(socket.AF_INET6, b'\xfd' + globalid + b'\x00'*10)
+
+def in6_getRandomizedIfaceId(ifaceid, previous=None):
+ """
+ Implements the interface ID generation algorithm described in RFC 3041.
+ The function takes the Modified EUI-64 interface identifier generated
+ as described in RFC 4291 and an optional previous history value (the
+ first element of the output of this function). If no previous interface
+ identifier is provided, a random one is generated. The function returns
+ a tuple containing the randomized interface identifier and the history
+ value (for possible future use). Input and output values are provided in
+ a "printable" format as depicted below.
+
+ ex:
+
+ >>> in6_getRandomizedIfaceId('20b:93ff:feeb:2d3')
+ ('4c61:76ff:f46a:a5f3', 'd006:d540:db11:b092')
+
+ >>> in6_getRandomizedIfaceId('20b:93ff:feeb:2d3',
+ previous='d006:d540:db11:b092')
+ ('fe97:46fe:9871:bd38', 'eeed:d79c:2e3f:62e')
+ """
+
+ s = []
+ if previous is None:
+ #d = b"".join(map(chr, range(256)))
+ d = list(range(256))
+ for i in range(8):
+ s.append(random.choice(d))
+ s = bytes(s)
+ previous = s
+ s = inet_pton(socket.AF_INET6, "::"+ifaceid)[8:] + previous
+ import hashlib
+ s = hashlib.md5(s).digest()
+ s1,s2 = s[:8],s[8:]
+ s1 = bytes([(s1[0]) | 0x04]) + s1[1:]
+ s1 = inet_ntop(socket.AF_INET6, b"\xff"*8 + s1)[20:]
+ s2 = inet_ntop(socket.AF_INET6, b"\xff"*8 + s2)[20:]
+ return (s1, s2)
+
+
+_rfc1924map = [ '0','1','2','3','4','5','6','7','8','9','A','B','C','D','E',
+ 'F','G','H','I','J','K','L','M','N','O','P','Q','R','S','T',
+ 'U','V','W','X','Y','Z','a','b','c','d','e','f','g','h','i',
+ 'j','k','l','m','n','o','p','q','r','s','t','u','v','w','x',
+ 'y','z','!','#','$','%','&','(',')','*','+','-',';','<','=',
+ '>','?','@','^','_','`','{','|','}','~' ]
+
+def in6_ctop(addr):
+ """
+ Convert an IPv6 address in Compact Representation Notation
+ (RFC 1924) to printable representation ;-)
+ Returns None on error.
+ """
+ #if len(addr) != 20 or not reduce(lambda x,y: x and y, map(lambda x: x in _rfc1924map, addr)):
+ if len(addr) != 20 or not all(map(lambda x: x in _rfc1924map, addr)):
+ return None
+ i = 0
+ for c in addr:
+ j = _rfc1924map.index(c)
+ i = 85*i + j
+ res = []
+ for j in range(4):
+ res.append(struct.pack("!I", i%2**32))
+ i = i//(2**32)
+ res.reverse()
+ return inet_ntop(socket.AF_INET6, b"".join(res))
+
+def in6_ptoc(addr):
+ """
+ Converts an IPv6 address in printable representation to RFC
+ 1924 Compact Representation ;-)
+ Returns None on error.
+ """
+ try:
+ d=struct.unpack("!IIII", inet_pton(socket.AF_INET6, addr))
+ except:
+ return None
+ res = 0
+ m = [2**96, 2**64, 2**32, 1]
+ for i in range(4):
+ res += d[i]*m[i]
+ rem = res
+ res = []
+ while rem:
+ res.append(_rfc1924map[rem%85])
+ rem = rem//85
+ res.reverse()
+ return "".join(res)
+
+
+def in6_isaddr6to4(x):
+ """
+ Return True if provided address (in printable format) is a 6to4
+ address (being in 2002::/16).
+ """
+ x = inet_pton(socket.AF_INET6, x)
+ return x[:2] == ' \x02'
+
+conf.teredoPrefix = "2001::" # old one was 3ffe:831f (it is a /32)
+conf.teredoServerPort = 3544
+
+def in6_isaddrTeredo(x):
+ """
+ Return True if provided address is a Teredo, meaning it is under
+ the /32 conf.teredoPrefix prefix value (by default, 2001::).
+ Otherwise, False is returned. Address must be passed in printable
+ format.
+ """
+ our = inet_pton(socket.AF_INET6, x)[0:4]
+ teredoPrefix = inet_pton(socket.AF_INET6, conf.teredoPrefix)[0:4]
+ return teredoPrefix == our
+
+def teredoAddrExtractInfo(x):
+ """
+ Extract information from a Teredo address. Return value is
+ a 4-tuple made of IPv4 address of Teredo server, flag value (int),
+ mapped address (non obfuscated) and mapped port (non obfuscated).
+ No specific checks are performed on passed address.
+ """
+ addr = inet_pton(socket.AF_INET6, x)
+ server = inet_ntop(socket.AF_INET, addr[4:8])
+ flag = struct.unpack("!H",addr[8:10])[0]
+ mappedport = struct.unpack("!H",strxor(addr[10:12],b'\xff'*2))[0]
+ mappedaddr = inet_ntop(socket.AF_INET, strxor(addr[12:16],b'\xff'*4))
+ return server, flag, mappedaddr, mappedport
+
+def in6_iseui64(x):
+ """
+ Return True if provided address has an interface identifier part
+ created in modified EUI-64 format (meaning it matches *::*:*ff:fe*:*).
+ Otherwise, False is returned. Address must be passed in printable
+ format.
+ """
+ eui64 = inet_pton(socket.AF_INET6, '::ff:fe00:0')
+ x = in6_and(inet_pton(socket.AF_INET6, x), eui64)
+ return x == eui64
+
+def in6_isanycast(x): # RFC 2526
+ if in6_iseui64(x):
+ s = '::fdff:ffff:ffff:ff80'
+ packed_x = inet_pton(socket.AF_INET6, x)
+ packed_s = inet_pton(socket.AF_INET6, s)
+ x_and_s = in6_and(packed_x, packed_s)
+ return x_and_s == packed_s
+ else:
+ # not EUI-64
+ #| n bits | 121-n bits | 7 bits |
+ #+---------------------------------+------------------+------------+
+ #| subnet prefix | 1111111...111111 | anycast ID |
+ #+---------------------------------+------------------+------------+
+ # | interface identifier field |
+ warning('in6_isanycast(): TODO not EUI-64')
+ return 0
+
+def _in6_bitops(a1, a2, operator=0):
+ a1 = struct.unpack('4I', a1)
+ a2 = struct.unpack('4I', a2)
+ fop = [ lambda x,y: x | y,
+ lambda x,y: x & y,
+ lambda x,y: x ^ y
+ ]
+ ret = map(fop[operator%len(fop)], a1, a2)
+ t = b''.join(map(lambda x: struct.pack('I', x), ret))
+ return t
+
+def in6_or(a1, a2):
+ """
+ Provides a bit to bit OR of provided addresses. They must be
+ passed in network format. Return value is also an IPv6 address
+ in network format.
+ """
+ return _in6_bitops(a1, a2, 0)
+
+def in6_and(a1, a2):
+ """
+ Provides a bit to bit AND of provided addresses. They must be
+ passed in network format. Return value is also an IPv6 address
+ in network format.
+ """
+ return _in6_bitops(a1, a2, 1)
+
+def in6_xor(a1, a2):
+ """
+ Provides a bit to bit XOR of provided addresses. They must be
+ passed in network format. Return value is also an IPv6 address
+ in network format.
+ """
+ return _in6_bitops(a1, a2, 2)
+
+def in6_cidr2mask(m):
+ """
+ Return the mask (bitstring) associated with provided length
+ value. For instance if function is called on 48, return value is
+ '\xff\xff\xff\xff\xff\xff\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'.
+
+ """
+ if m > 128 or m < 0:
+ raise Scapy_Exception("value provided to in6_cidr2mask outside [0, 128] domain (%d)" % m)
+
+ t = []
+ for i in range(0, 4):
+ t.append(max(0, 2**32 - 2**(32-min(32, m))))
+ m -= 32
+
+ return b"".join([ struct.pack('!I', i) for i in t ])
+
+def in6_getnsma(a):
+ """
+ Return link-local solicited-node multicast address for given
+ address. Passed address must be provided in network format.
+ Returned value is also in network format.
+ """
+
+ r = in6_and(a, inet_pton(socket.AF_INET6, '::ff:ffff'))
+ r = in6_or(inet_pton(socket.AF_INET6, 'ff02::1:ff00:0'), r)
+ return r
+
+def in6_getnsmac(a): # return multicast Ethernet address associated with multicast v6 destination
+ """
+ Return the multicast mac address associated with provided
+ IPv6 address. Passed address must be in network format.
+ """
+
+ a = struct.unpack('16B', a)[-4:]
+ mac = '33:33:'
+ mac += (':'.join(map(lambda x: '%.2x' %x, a)))
+ return mac
+
+def in6_getha(prefix):
+ """
+ Return the anycast address associated with all home agents on a given
+ subnet.
+ """
+ r = in6_and(inet_pton(socket.AF_INET6, prefix), in6_cidr2mask(64))
+ r = in6_or(r, inet_pton(socket.AF_INET6, '::fdff:ffff:ffff:fffe'))
+ return inet_ntop(socket.AF_INET6, r)
+
+def in6_ptop(s):
+ """
+ Normalizes IPv6 addresses provided in printable format, returning the
+ same address in printable format. (2001:0db8:0:0::1 -> 2001:db8::1)
+ """
+ return inet_ntop(socket.AF_INET6, inet_pton(socket.AF_INET6, s))
+
+def in6_isincluded(addr, prefix, plen):
+ """
+ Returns True when 'addr' belongs to prefix/plen. False otherwise.
+ """
+ temp = inet_pton(socket.AF_INET6, addr)
+ pref = in6_cidr2mask(plen)
+ zero = inet_pton(socket.AF_INET6, prefix)
+ return zero == in6_and(temp, pref)
+
+def in6_isdocaddr(s):
+ """
+ Returns True if provided address in printable format belongs to
+ 2001:db8::/32 address space reserved for documentation (as defined
+ in RFC 3849).
+ """
+ return in6_isincluded(s, '2001:db8::', 32)
+
+def in6_islladdr(s):
+ """
+ Returns True if provided address in printable format belongs to
+ _allocated_ link-local unicast address space (fe80::/10)
+ """
+ return in6_isincluded(s, 'fe80::', 10)
+
+def in6_issladdr(s):
+ """
+ Returns True if provided address in printable format belongs to
+ _allocated_ site-local address space (fec0::/10). This prefix has
+ been deprecated, address being now reserved by IANA. Function
+ will remain for historic reasons.
+ """
+ return in6_isincluded(s, 'fec0::', 10)
+
+def in6_isuladdr(s):
+ """
+ Returns True if provided address in printable format belongs to
+ Unique local address space (fc00::/7).
+ """
+ return in6_isincluded(s, 'fc00::', 7)
+
+# TODO : we should see the status of Unique Local addresses against
+# global address space.
+# Up-to-date information is available through RFC 3587.
+# We should review function behavior based on its content.
+def in6_isgladdr(s):
+ """
+ Returns True if provided address in printable format belongs to
+ _allocated_ global address space (2000::/3). Please note that,
+ Unique Local addresses (FC00::/7) are not part of global address
+ space, and won't match.
+ """
+ return in6_isincluded(s, '2000::', 3)
+
+def in6_ismaddr(s):
+ """
+ Returns True if provided address in printable format belongs to
+ allocated Multicast address space (ff00::/8).
+ """
+ return in6_isincluded(s, 'ff00::', 8)
+
+def in6_ismnladdr(s):
+ """
+ Returns True if address belongs to node-local multicast address
+ space (ff01::/16) as defined in RFC
+ """
+ return in6_isincluded(s, 'ff01::', 16)
+
+def in6_ismgladdr(s):
+ """
+ Returns True if address belongs to global multicast address
+ space (ff0e::/16).
+ """
+ return in6_isincluded(s, 'ff0e::', 16)
+
+def in6_ismlladdr(s):
+ """
+ Returns True if address belongs to link-local multicast address
+ space (ff02::/16)
+ """
+ return in6_isincluded(s, 'ff02::', 16)
+
+def in6_ismsladdr(s):
+ """
+ Returns True if address belongs to site-local multicast address
+ space (ff05::/16). Site local address space has been deprecated.
+ Function remains for historic reasons.
+ """
+ return in6_isincluded(s, 'ff05::', 16)
+
+def in6_isaddrllallnodes(s):
+ """
+ Returns True if address is the link-local all-nodes multicast
+ address (ff02::1).
+ """
+ return (inet_pton(socket.AF_INET6, "ff02::1") ==
+ inet_pton(socket.AF_INET6, s))
+
+def in6_isaddrllallservers(s):
+ """
+ Returns True if address is the link-local all-servers multicast
+ address (ff02::2).
+ """
+ return (inet_pton(socket.AF_INET6, "ff02::2") ==
+ inet_pton(socket.AF_INET6, s))
+
+def in6_getscope(addr):
+ """
+ Returns the scope of the address.
+ """
+ if in6_isgladdr(addr) or in6_isuladdr(addr):
+ scope = IPV6_ADDR_GLOBAL
+ elif in6_islladdr(addr):
+ scope = IPV6_ADDR_LINKLOCAL
+ elif in6_issladdr(addr):
+ scope = IPV6_ADDR_SITELOCAL
+ elif in6_ismaddr(addr):
+ if in6_ismgladdr(addr):
+ scope = IPV6_ADDR_GLOBAL
+ elif in6_ismlladdr(addr):
+ scope = IPV6_ADDR_LINKLOCAL
+ elif in6_ismsladdr(addr):
+ scope = IPV6_ADDR_SITELOCAL
+ elif in6_ismnladdr(addr):
+ scope = IPV6_ADDR_LOOPBACK
+ else:
+ scope = -1
+ elif addr == '::1':
+ scope = IPV6_ADDR_LOOPBACK
+ else:
+ scope = -1
+ return scope
+
+def in6_get_common_plen(a, b):
+ """
+ Return common prefix length of IPv6 addresses a and b.
+ """
+ def matching_bits(byte1, byte2):
+ for i in range(8):
+ cur_mask = 0x80 >> i
+ if (byte1 & cur_mask) != (byte2 & cur_mask):
+ return i
+ return 8
+
+ tmpA = inet_pton(socket.AF_INET6, a)
+ tmpB = inet_pton(socket.AF_INET6, b)
+ for i in range(16):
+ #mbits = matching_bits(ord(tmpA[i]), ord(tmpB[i]))
+ mbits = matching_bits((tmpA[i]), (tmpB[i]))
+ if mbits != 8:
+ return 8*i + mbits
+ return 128
diff --git a/scripts/external_libs/scapy-2.3.1/python3/scapy/volatile.py b/scripts/external_libs/scapy-2.3.1/python3/scapy/volatile.py
new file mode 100644
index 00000000..ed5c26e4
--- /dev/null
+++ b/scripts/external_libs/scapy-2.3.1/python3/scapy/volatile.py
@@ -0,0 +1,685 @@
+## This file is part of Scapy
+## See http://www.secdev.org/projects/scapy for more informations
+## Copyright (C) Philippe Biondi <phil@secdev.org>
+## This program is published under a GPLv2 license
+
+"""
+Fields that hold random numbers.
+"""
+
+import random,time,math
+from .base_classes import Net
+from .utils import corrupt_bits,corrupt_bytes
+
+####################
+## Random numbers ##
+####################
+
+
+class RandomEnumeration:
+ """iterate through a sequence in random order.
+ When all the values have been drawn, if forever=1, the drawing is done again.
+ If renewkeys=0, the draw will be in the same order, guaranteeing that the same
+ number will be drawn in not less than the number of integers of the sequence"""
+ def __init__(self, inf, sup, seed=None, forever=1, renewkeys=0):
+ self.forever = forever
+ self.renewkeys = renewkeys
+ self.inf = inf
+ self.rnd = random.Random(seed)
+ self.sbox_size = 256
+
+ self.top = sup-inf+1
+
+ n=0
+ while (1<<n) < self.top:
+ n += 1
+ self.n =n
+
+ self.fs = min(3,(n+1)//2)
+ self.fsmask = 2**self.fs-1
+ self.rounds = max(self.n,3)
+ self.turns = 0
+ self.i = 0
+
+ def __iter__(self):
+ return self
+ def __next__(self):
+ while True:
+ if self.turns == 0 or (self.i == 0 and self.renewkeys):
+ self.cnt_key = self.rnd.randint(0,2**self.n-1)
+ self.sbox = [self.rnd.randint(0,self.fsmask) for k in range(self.sbox_size)]
+ self.turns += 1
+ while self.i < 2**self.n:
+ ct = self.i^self.cnt_key
+ self.i += 1
+ for k in range(self.rounds): # Unbalanced Feistel Network
+ lsb = ct & self.fsmask
+ ct >>= self.fs
+ lsb ^= self.sbox[ct%self.sbox_size]
+ ct |= lsb << (self.n-self.fs)
+
+ if ct < self.top:
+ return self.inf+ct
+ self.i = 0
+ if not self.forever:
+ raise StopIteration
+
+class _MetaVolatile(type):
+ def __init__(cls, name, bases, dct):
+ def special_gen(special_method):
+ def special_wrapper(self):
+ return getattr(getattr(self, "_fix")(), special_method)
+ return special_wrapper
+
+ #This is from scapy2 code. Usage places should be identified and fixed as there is no more __cmp__ in python3
+ # if attr == "__cmp__":
+ # x = self._fix()
+ # def cmp2(y,x=x):
+ # if type(x) != type(y):
+ # return -1
+ # return x.__cmp__(y)
+ # return cmp2
+
+ type.__init__(cls, name, bases, dct)
+ for i in ["__int__", "__repr__", "__str__", "__index__", "__add__", "__radd__", "__bytes__","__mul__","__rmul__"]:
+ setattr(cls, i, property(special_gen(i)))
+
+
+class VolatileValue(metaclass = _MetaVolatile):
+ def __repr__(self):
+ return "<%s>" % self.__class__.__name__
+ def __getattr__(self, attr):
+ if attr == "__setstate__":
+ raise AttributeError("__setstate__")
+ return getattr(self._fix(),attr)
+ def _fix(self):
+ return None
+
+
+class RandField(VolatileValue):
+ pass
+
+class RandNum(RandField):
+ """Instances evaluate to random integers in selected range"""
+ min = 0
+ max = 0
+ def __init__(self, min, max):
+ self.min = min
+ self.max = max
+ def _fix(self):
+ return random.randrange(self.min, self.max+1)
+
+class RandNumGamma(RandField):
+ def __init__(self, alpha, beta):
+ self.alpha = alpha
+ self.beta = beta
+ def _fix(self):
+ return int(round(random.gammavariate(self.alpha, self.beta)))
+
+class RandNumGauss(RandField):
+ def __init__(self, mu, sigma):
+ self.mu = mu
+ self.sigma = sigma
+ def _fix(self):
+ return int(round(random.gauss(self.mu, self.sigma)))
+
+class RandNumExpo(RandField):
+ def __init__(self, lambd, base=0):
+ self.lambd = lambd
+ self.base = base
+ def _fix(self):
+ return self.base+int(round(random.expovariate(self.lambd)))
+
+class RandEnum(RandNum):
+ """Instances evaluate to integer sampling without replacement from the given interval"""
+ def __init__(self, min, max):
+ self.seq = RandomEnumeration(min,max)
+ def _fix(self):
+ return next(self.seq)
+
+class RandByte(RandNum):
+ def __init__(self):
+ RandNum.__init__(self, 0, 2**8-1)
+
+class RandSByte(RandNum):
+ def __init__(self):
+ RandNum.__init__(self, -2**7, 2**7-1)
+
+class RandShort(RandNum):
+ def __init__(self):
+ RandNum.__init__(self, 0, 2**16-1)
+
+class RandSShort(RandNum):
+ def __init__(self):
+ RandNum.__init__(self, -2**15, 2**15-1)
+
+class RandInt(RandNum):
+ def __init__(self):
+ RandNum.__init__(self, 0, 2**32-1)
+
+class RandSInt(RandNum):
+ def __init__(self):
+ RandNum.__init__(self, -2**31, 2**31-1)
+
+class RandLong(RandNum):
+ def __init__(self):
+ RandNum.__init__(self, 0, 2**64-1)
+
+class RandSLong(RandNum):
+ def __init__(self):
+ RandNum.__init__(self, -2**63, 2**63-1)
+
+class RandEnumByte(RandEnum):
+ def __init__(self):
+ RandEnum.__init__(self, 0, 2**8-1)
+
+class RandEnumSByte(RandEnum):
+ def __init__(self):
+ RandEnum.__init__(self, -2**7, 2**7-1)
+
+class RandEnumShort(RandEnum):
+ def __init__(self):
+ RandEnum.__init__(self, 0, 2**16-1)
+
+class RandEnumSShort(RandEnum):
+ def __init__(self):
+ RandEnum.__init__(self, -2**15, 2**15-1)
+
+class RandEnumInt(RandEnum):
+ def __init__(self):
+ RandEnum.__init__(self, 0, 2**32-1)
+
+class RandEnumSInt(RandEnum):
+ def __init__(self):
+ RandEnum.__init__(self, -2**31, 2**31-1)
+
+class RandEnumLong(RandEnum):
+ def __init__(self):
+ RandEnum.__init__(self, 0, 2**64-1)
+
+class RandEnumSLong(RandEnum):
+ def __init__(self):
+ RandEnum.__init__(self, -2**63, 2**63-1)
+
+class RandChoice(RandField):
+ def __init__(self, *args):
+ if not args:
+ raise TypeError("RandChoice needs at least one choice")
+ self._choice = args
+ def _fix(self):
+ return random.choice(self._choice)
+
+class RandString(RandField):
+ def __init__(self, size=None, chars=b"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"):
+ if size is None:
+ size = RandNumExpo(0.01)
+ self.size = size
+ self.chars = chars
+ def _fix(self):
+ s = []
+ for i in range(self.size):
+ s.append(random.choice(self.chars))
+ return bytes(s)
+
+class RandBin(RandString):
+ def __init__(self, size=None):
+ #RandString.__init__(self, size, b"".join(map(chr,range(256))))
+ RandString.__init__(self, size, b"".join([bytes([i]) for i in range(256)]))
+
+
+class RandTermString(RandString):
+ def __init__(self, size, term):
+ #RandString.__init__(self, size, b"".join(map(chr,range(1,256))))
+ RandString.__init__(self, size, bytes([i for i in range(1,256)]))
+ self.term = term
+ def _fix(self):
+ return RandString._fix(self)+self.term
+
+
+class RandIP(RandString):
+ def __init__(self, iptemplate="0.0.0.0/0"):
+ self.ip = Net(iptemplate)
+ def _fix(self):
+ return self.ip.choice()
+
+class RandMAC(RandString):
+ def __init__(self, template="*"):
+ template += ":*:*:*:*:*"
+ template = template.split(":")
+ self.mac = ()
+ for i in range(6):
+ if template[i] == "*":
+ v = RandByte()
+ elif "-" in template[i]:
+ x,y = template[i].split("-")
+ v = RandNum(int(x,16), int(y,16))
+ else:
+ v = int(template[i],16)
+ self.mac += (v,)
+ def _fix(self):
+ return "%02x:%02x:%02x:%02x:%02x:%02x" % self.mac
+
+class RandIP6(RandString):
+ def __init__(self, ip6template="**"):
+ self.tmpl = ip6template
+ self.sp = self.tmpl.split(":")
+ for i,v in enumerate(self.sp):
+ if not v or v == "**":
+ continue
+ if "-" in v:
+ a,b = v.split("-")
+ elif v == "*":
+ a=b=""
+ else:
+ a=b=v
+
+ if not a:
+ a = "0"
+ if not b:
+ b = "ffff"
+ if a==b:
+ self.sp[i] = int(a,16)
+ else:
+ self.sp[i] = RandNum(int(a,16), int(b,16))
+ self.variable = "" in self.sp
+ self.multi = self.sp.count("**")
+ def _fix(self):
+ done = 0
+ nbm = self.multi
+ ip = []
+ for i,n in enumerate(self.sp):
+ if n == "**":
+ nbm -= 1
+ remain = 8-(len(self.sp)-i-1)-len(ip)+nbm
+ if "" in self.sp:
+ remain += 1
+ if nbm or self.variable:
+ remain = random.randint(0,remain)
+ for j in range(remain):
+ ip.append("%04x" % random.randint(0,65535))
+ elif n == 0:
+ ip.append("0")
+ elif not n:
+ ip.append("")
+ else:
+ ip.append("%04x" % n)
+ if len(ip) == 9:
+ ip.remove("")
+ if ip[-1] == "":
+ ip[-1] = 0
+ return ":".join(ip)
+
+class RandOID(RandString):
+ def __init__(self, fmt=None, depth=RandNumExpo(0.1), idnum=RandNumExpo(0.01)):
+ self.ori_fmt = fmt
+ if fmt is not None:
+ fmt = fmt.split(".")
+ for i in range(len(fmt)):
+ if "-" in fmt[i]:
+ fmt[i] = tuple(map(int, fmt[i].split("-")))
+ self.fmt = fmt
+ self.depth = depth
+ self.idnum = idnum
+ def __repr__(self):
+ if self.ori_fmt is None:
+ return "<%s>" % self.__class__.__name__
+ else:
+ return "<%s [%s]>" % (self.__class__.__name__, self.ori_fmt)
+ def _fix(self):
+ if self.fmt is None:
+ return ".".join(map(str, [self.idnum for i in range(1+self.depth)]))
+ else:
+ oid = []
+ for i in self.fmt:
+ if i == "*":
+ oid.append(str(self.idnum))
+ elif i == "**":
+ oid += map(str, [self.idnum for i in range(1+self.depth)])
+ elif type(i) is tuple:
+ oid.append(str(random.randrange(*i)))
+ else:
+ oid.append(i)
+ return ".".join(oid)
+
+
+class RandRegExp(RandField):
+ def __init__(self, regexp, lambda_=0.3,):
+ self._regexp = regexp
+ self._lambda = lambda_
+
+ @staticmethod
+ def choice_expand(s): #XXX does not support special sets like (ex ':alnum:')
+ m = ""
+ invert = s and s[0] == "^"
+ while True:
+ p = s.find("-")
+ if p < 0:
+ break
+ if p == 0 or p == len(s)-1:
+ m = "-"
+ if p:
+ s = s[:-1]
+ else:
+ s = s[1:]
+ else:
+ c1 = s[p-1]
+ c2 = s[p+1]
+ rng = "".join(map(chr, range(ord(c1),ord(c2)+1)))
+ s = s[:p-1]+rng+s[p+1:]
+ res = m+s
+ if invert:
+ res = "".join([chr(x) for x in range(256) if chr(x) not in res])
+ return res
+
+ @staticmethod
+ def stack_fix(lst, index):
+ r = ""
+ mul = 1
+ for e in lst:
+ if type(e) is list:
+ if mul != 1:
+ mul = mul-1
+ r += RandRegExp.stack_fix(e[1:]*mul, index)
+ # only the last iteration should be kept for back reference
+ f = RandRegExp.stack_fix(e[1:], index)
+ for i,idx in enumerate(index):
+ if e is idx:
+ index[i] = f
+ r += f
+ mul = 1
+ elif type(e) is tuple:
+ kind,val = e
+ if kind == "cite":
+ r += index[val-1]
+ elif kind == "repeat":
+ mul = val
+
+ elif kind == "choice":
+ if mul == 1:
+ c = random.choice(val)
+ r += RandRegExp.stack_fix(c[1:], index)
+ else:
+ r += RandRegExp.stack_fix([e]*mul, index)
+ mul = 1
+ else:
+ if mul != 1:
+ r += RandRegExp.stack_fix([e]*mul, index)
+ mul = 1
+ else:
+ r += str(e)
+ return r
+
+ def _fix(self):
+ stack = [None]
+ index = []
+ current = stack
+ i = 0
+ ln = len(self._regexp)
+ interp = True
+ while i < ln:
+ c = self._regexp[i]
+ i+=1
+
+ if c == '(':
+ current = [current]
+ current[0].append(current)
+ elif c == '|':
+ p = current[0]
+ ch = p[-1]
+ if type(ch) is not tuple:
+ ch = ("choice",[current])
+ p[-1] = ch
+ else:
+ ch[1].append(current)
+ current = [p]
+ elif c == ')':
+ ch = current[0][-1]
+ if type(ch) is tuple:
+ ch[1].append(current)
+ index.append(current)
+ current = current[0]
+ elif c == '[' or c == '{':
+ current = [current]
+ current[0].append(current)
+ interp = False
+ elif c == ']':
+ current = current[0]
+ choice = RandRegExp.choice_expand("".join(current.pop()[1:]))
+ current.append(RandChoice(*list(choice)))
+ interp = True
+ elif c == '}':
+ current = current[0]
+ num = "".join(current.pop()[1:])
+ e = current.pop()
+ if "," not in num:
+ n = int(num)
+ current.append([current]+[e]*n)
+ else:
+ num_min,num_max = num.split(",")
+ if not num_min:
+ num_min = "0"
+ if num_max:
+ n = RandNum(int(num_min),int(num_max))
+ else:
+ n = RandNumExpo(self._lambda,base=int(num_min))
+ current.append(("repeat",n))
+ current.append(e)
+ interp = True
+ elif c == '\\':
+ c = self._regexp[i]
+ if c == "s":
+ c = RandChoice(" ","\t")
+ elif c in "0123456789":
+ c = ("cite",ord(c)-0x30)
+ current.append(c)
+ i += 1
+ elif not interp:
+ current.append(c)
+ elif c == '+':
+ e = current.pop()
+ current.append([current]+[e]*(int(random.expovariate(self._lambda))+1))
+ elif c == '*':
+ e = current.pop()
+ current.append([current]+[e]*int(random.expovariate(self._lambda)))
+ elif c == '?':
+ if random.randint(0,1):
+ current.pop()
+ elif c == '.':
+ current.append(RandChoice(*[chr(x) for x in range(256)]))
+ elif c == '$' or c == '^':
+ pass
+ else:
+ current.append(c)
+
+ return RandRegExp.stack_fix(stack[1:], index)
+ def __repr__(self):
+ return "<%s [%r]>" % (self.__class__.__name__, self._regexp)
+
+class RandSingularity(RandChoice):
+ pass
+
+class RandSingNum(RandSingularity):
+ @staticmethod
+ def make_power_of_two(end):
+ sign = 1
+ if end == 0:
+ end = 1
+ if end < 0:
+ end = -end
+ sign = -1
+ end_n = int(math.log(end)/math.log(2))+1
+ return set([sign*2**i for i in range(end_n)])
+
+ def __init__(self, mn, mx):
+ sing = set([0, mn, mx, int((mn+mx)/2)])
+ sing |= self.make_power_of_two(mn)
+ sing |= self.make_power_of_two(mx)
+ for i in sing.copy():
+ sing.add(i+1)
+ sing.add(i-1)
+ for i in sing.copy():
+ if not mn <= i <= mx:
+ sing.remove(i)
+ self._choice = list(sing)
+
+
+class RandSingByte(RandSingNum):
+ def __init__(self):
+ RandSingNum.__init__(self, 0, 2**8-1)
+
+class RandSingSByte(RandSingNum):
+ def __init__(self):
+ RandSingNum.__init__(self, -2**7, 2**7-1)
+
+class RandSingShort(RandSingNum):
+ def __init__(self):
+ RandSingNum.__init__(self, 0, 2**16-1)
+
+class RandSingSShort(RandSingNum):
+ def __init__(self):
+ RandSingNum.__init__(self, -2**15, 2**15-1)
+
+class RandSingInt(RandSingNum):
+ def __init__(self):
+ RandSingNum.__init__(self, 0, 2**32-1)
+
+class RandSingSInt(RandSingNum):
+ def __init__(self):
+ RandSingNum.__init__(self, -2**31, 2**31-1)
+
+class RandSingLong(RandSingNum):
+ def __init__(self):
+ RandSingNum.__init__(self, 0, 2**64-1)
+
+class RandSingSLong(RandSingNum):
+ def __init__(self):
+ RandSingNum.__init__(self, -2**63, 2**63-1)
+
+class RandSingString(RandSingularity): #TODO3
+ def __init__(self):
+ self._choice = [ b"",
+ b"%x",
+ b"%%",
+ b"%s",
+ b"%i",
+ b"%n",
+ b"%x%x%x%x%x%x%x%x%x",
+ b"%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s",
+ b"%",
+ b"%%%",
+ b"A"*4096,
+ b"\x00"*4096,
+ b"\xff"*4096,
+ b"\x7f"*4096,
+ b"\x80"*4096,
+ b" "*4096,
+ b"\\"*4096,
+ b"("*4096,
+ b"../"*1024,
+ b"/"*1024,
+ b"${HOME}"*512,
+ b" or 1=1 --",
+ b"' or 1=1 --",
+ b'" or 1=1 --',
+ b" or 1=1; #",
+ b"' or 1=1; #",
+ b'" or 1=1; #',
+ b";reboot;",
+ b"$(reboot)",
+ b"`reboot`",
+ b"index.php%00",
+ b"\x00",
+ b"%00",
+ b"\\",
+ b"../../../../../../../../../../../../../../../../../etc/passwd",
+ b"%2e%2e%2f" * 20 + b"etc/passwd",
+ b"%252e%252e%252f" * 20 + b"boot.ini",
+ b"..%c0%af" * 20 + b"etc/passwd",
+ b"..%c0%af" * 20 + b"boot.ini",
+ b"//etc/passwd",
+ br"..\..\..\..\..\..\..\..\..\..\..\..\..\..\..\..\..\boot.ini",
+ b"AUX:",
+ b"CLOCK$",
+ b"COM:",
+ b"CON:",
+ b"LPT:",
+ b"LST:",
+ b"NUL:",
+ b"CON:",
+ br"C:\CON\CON",
+ br"C:\boot.ini",
+ br"\\myserver\share",
+ b"foo.exe:",
+ b"foo.exe\\", ]
+
+
+class RandPool(RandField):
+ def __init__(self, *args):
+ """Each parameter is a volatile object or a couple (volatile object, weight)"""
+ pool = []
+ for p in args:
+ w = 1
+ if type(p) is tuple:
+ p,w = p
+ pool += [p]*w
+ self._pool = pool
+ def _fix(self):
+ r = random.choice(self._pool)
+ return r._fix()
+
+# Automatic timestamp
+
+class AutoTime(VolatileValue):
+ def __init__(self, base=None):
+ if base == None:
+ self.diff = 0
+ else:
+ self.diff = time.time()-base
+ def _fix(self):
+ return time.time()-self.diff
+
+class IntAutoTime(AutoTime):
+ def _fix(self):
+ return int(time.time()-self.diff)
+
+
+class ZuluTime(AutoTime):
+ def __init__(self, diff=0):
+ self.diff=diff
+ def _fix(self):
+ return time.strftime("%y%m%d%H%M%SZ",time.gmtime(time.time()+self.diff))
+
+
+class DelayedEval(VolatileValue):
+ """ Example of usage: DelayedEval("time.time()") """
+ def __init__(self, expr):
+ self.expr = expr
+ def _fix(self):
+ return eval(self.expr)
+
+
+class IncrementalValue(VolatileValue):
+ def __init__(self, start=0, step=1, restart=-1):
+ self.start = self.val = start
+ self.step = step
+ self.restart = restart
+ def _fix(self):
+ v = self.val
+ if self.val == self.restart :
+ self.val = self.start
+ else:
+ self.val += self.step
+ return v
+
+class CorruptedBytes(VolatileValue):
+ def __init__(self, s, p=0.01, n=None):
+ self.s = s
+ self.p = p
+ self.n = n
+ def _fix(self):
+ return corrupt_bytes(self.s, p = self.p, n = self.n)
+
+class CorruptedBits(CorruptedBytes):
+ def _fix(self):
+ return corrupt_bits(self.s, p = self.p, n = self.n)
+
diff --git a/scripts/external_libs/scapy3-0.18-origin.rar b/scripts/external_libs/scapy3-0.18-origin.rar
new file mode 100644
index 00000000..0fda86fa
--- /dev/null
+++ b/scripts/external_libs/scapy3-0.18-origin.rar
Binary files differ
diff --git a/scripts/external_libs/texttable-0.8.4/texttable.py b/scripts/external_libs/texttable-0.8.4/texttable.py
index a2b4df96..2224ad77 100644
--- a/scripts/external_libs/texttable-0.8.4/texttable.py
+++ b/scripts/external_libs/texttable-0.8.4/texttable.py
@@ -147,8 +147,8 @@ TEXT_CODES = {'bold': {'start': '\x1b[1m',
'end': '\x1b[24m'}}
class TextCodesStripper:
- keys = [re.escape(v['start']) for k,v in TEXT_CODES.iteritems()]
- keys += [re.escape(v['end']) for k,v in TEXT_CODES.iteritems()]
+ keys = [re.escape(v['start']) for k,v in TEXT_CODES.items()]
+ keys += [re.escape(v['end']) for k,v in TEXT_CODES.items()]
pattern = re.compile("|".join(keys))
@staticmethod
diff --git a/scripts/find_python.sh b/scripts/find_python.sh
index 9552260b..929e873d 100755
--- a/scripts/find_python.sh
+++ b/scripts/find_python.sh
@@ -25,6 +25,11 @@ function find_python {
}
if [ -z "$PYTHON" ]; then
- find_python
+ # for development here - move us to python 3 for now
+ if [ "$USER" == "imarom" ] || [ "$USER" == "hhaim" ] || [ "$USER" == "ybrustin" ] || [ "$USER" == "ibarnea" ]; then
+ PYTHON=/auto/proj-pcube-b/apps/PL-b/tools/python3.4/bin/python3
+ else
+ find_python
+ fi
fi
diff --git a/scripts/run_functional_tests b/scripts/run_functional_tests
index e3a5fa61..995b1b0d 100755
--- a/scripts/run_functional_tests
+++ b/scripts/run_functional_tests
@@ -1,6 +1,27 @@
#!/bin/bash
-source find_python.sh
+#source find_python.sh
cd automation/regression
+
+PYTHON=/usr/bin/python2
+PYTHON3=/auto/proj-pcube-b/apps/PL-b/tools/python3.4/bin/python3
+
+# Python 2
$PYTHON trex_unit_test.py --functional $@
+if [ $? -eq 0 ]; then
+ printf "\n$PYTHON test succeeded\n\n"
+else
+ printf "\n*** $PYTHON test failed\n\n"
+ exit -1
+fi
+
+# Python 3
+$PYTHON3 trex_unit_test.py --functional $@
+if [ $? -eq 0 ]; then
+ printf "\n$PYTHON3 test succeeded\n\n"
+else
+ printf "\n*** $PYTHON3 test failed\n\n"
+ exit -1
+fi
+
diff --git a/scripts/stl/udp_1pkt_simple_test.py b/scripts/stl/udp_1pkt_simple_test.py
index b2f80233..3915412d 100644
--- a/scripts/stl/udp_1pkt_simple_test.py
+++ b/scripts/stl/udp_1pkt_simple_test.py
@@ -2,9 +2,9 @@ from trex_stl_lib.api import *
def generate_payload(length):
word = ''
- alphabet_size = len(string.letters)
+ alphabet_size = len(string.ascii_letters)
for i in range(length):
- word += string.letters[(i % alphabet_size)]
+ word += string.ascii_letters[(i % alphabet_size)]
return word
@@ -12,7 +12,7 @@ class STLS1(object):
def create_stream (self):
fsize_no_fcs = 129
- base_pkt_a = Ether()/IP(dst="48.0.0.1",options=IPOption('\x01\x01\x01\x00'))/UDP(dport=12,sport=1025)
+ base_pkt_a = Ether()/IP(dst="48.0.0.1",options=IPOption(b'\x01\x01\x01\x00'))/UDP(dport=12,sport=1025)
vm1 = STLScVmRaw([
STLVmFlowVar(name="src",min_value="10.0.0.1",max_value="10.0.0.10",size=4,op="inc"),
diff --git a/scripts/stl/udp_1pkt_simple_test2.py b/scripts/stl/udp_1pkt_simple_test2.py
index c261d3f8..617d98b3 100644
--- a/scripts/stl/udp_1pkt_simple_test2.py
+++ b/scripts/stl/udp_1pkt_simple_test2.py
@@ -2,9 +2,9 @@ from trex_stl_lib.api import *
def generate_payload(length):
word = ''
- alphabet_size = len(string.letters)
+ alphabet_size = len(string.ascii_letters)
for i in range(length):
- word += string.letters[(i % alphabet_size)]
+ word += string.ascii_letters[(i % alphabet_size)]
return word
@@ -12,7 +12,7 @@ class STLS1(object):
def create_stream (self):
fsize_no_fcs = 129
- base_pkt_a = Ether()/IP()/IPv6()/IP(dst="48.0.0.1",options=IPOption('\x01\x01\x01\x00'))/UDP(dport=12,sport=1025)
+ base_pkt_a = Ether()/IP()/IPv6()/IP(dst="48.0.0.1",options=IPOption(b'\x01\x01\x01\x00'))/UDP(dport=12,sport=1025)
vm1 = STLScVmRaw([
STLVmFlowVar(name="src",min_value="10.0.0.1",max_value="10.0.0.10",size=4,op="inc"),
diff --git a/scripts/stl/udp_1pkt_tuple_gen_split.py b/scripts/stl/udp_1pkt_tuple_gen_split.py
index 304ed9c0..e7a33b22 100644
--- a/scripts/stl/udp_1pkt_tuple_gen_split.py
+++ b/scripts/stl/udp_1pkt_tuple_gen_split.py
@@ -31,7 +31,7 @@ class STLS1(object):
vm = vm)
stream = STLStream(packet = pkt,
mode = STLTXCont())
- print stream.to_code()
+ #print(stream.to_code())
return stream
diff --git a/scripts/trex-console b/scripts/trex-console
index 58944237..ea253fdd 100755
--- a/scripts/trex-console
+++ b/scripts/trex-console
@@ -3,4 +3,7 @@
source find_python.sh
export PYTHONPATH=automation/trex_control_plane/stl
+
+printf "\nUsing '$PYTHON' as Python interpeter\n\n"
+
$PYTHON -m console.trex_console $@
diff --git a/src/bp_sim.cpp b/src/bp_sim.cpp
index 6ea40be2..cc9af837 100755
--- a/src/bp_sim.cpp
+++ b/src/bp_sim.cpp
@@ -6039,8 +6039,13 @@ uint16_t CSimplePacketParser::getPktSize(){
return ( ip_len +m_vlan_offset+14);
}
+uint16_t CSimplePacketParser::getIpId() {
+ if (m_ipv4) {
+ return ( m_ipv4->getId() );
+ }
-
+ return (0);
+}
uint8_t CSimplePacketParser::getTTl(){
if (m_ipv4) {
diff --git a/src/bp_sim.h b/src/bp_sim.h
index 37ed7854..4b1a88e3 100755
--- a/src/bp_sim.h
+++ b/src/bp_sim.h
@@ -1246,9 +1246,10 @@ static inline int get_is_rx_check_mode(){
return (CGlobalInfo::m_options.preview.get_is_rx_check_enable() ?1:0);
}
-static inline bool get_is_rx_filter_enable(){//???
+static inline bool get_is_rx_filter_enable(){
uint32_t latency_rate=CGlobalInfo::m_options.m_latency_rate;
- return ( ( get_is_rx_check_mode() || CGlobalInfo::is_learn_mode() || latency_rate != 0) ?true:false );
+ return ( ( get_is_rx_check_mode() || CGlobalInfo::is_learn_mode() || latency_rate != 0
+ || get_is_stateless()) ?true:false );
}
static inline uint16_t get_rx_check_hops() {
return (CGlobalInfo::m_options.m_rx_check_hops);
diff --git a/src/debug.cpp b/src/debug.cpp
index 902766a1..656549dc 100644
--- a/src/debug.cpp
+++ b/src/debug.cpp
@@ -415,11 +415,17 @@ int CTrexDebug::test_send(uint pkt_type) {
lp->dump_stats_extended(stdout);
}
for (port_id = 0; port_id < m_max_ports; port_id++) {
- uint64_t fdir_stat[MAX_FLOW_STATS];
+ rx_per_flow_t fdir_stat[MAX_FLOW_STATS];
+ uint64_t fdir_stat_64[MAX_FLOW_STATS];
CPhyEthIF *lp = &m_ports[port_id];
- if (lp->get_flow_stats(fdir_stat, NULL, 0, MAX_FLOW_STATS, false) == 0)
- rte_stat_dump_array(fdir_stat, "FDIR stat", MAX_FLOW_STATS);
+ if (lp->get_flow_stats(fdir_stat, NULL, 0, MAX_FLOW_STATS, false) == 0) {
+ for (int i = 0; i < MAX_FLOW_STATS; i++) {
+ fdir_stat_64[i] = fdir_stat[i].get_pkts();
+ }
+ rte_stat_dump_array(fdir_stat_64, "FDIR stat", MAX_FLOW_STATS);
+ }
}
+
return (0);
}
diff --git a/src/flow_stat.cpp b/src/flow_stat.cpp
index f03065d2..43bde08b 100644
--- a/src/flow_stat.cpp
+++ b/src/flow_stat.cpp
@@ -25,6 +25,7 @@
#include <os_time.h>
#include "internal_api/trex_platform_api.h"
#include "trex_stateless.h"
+#include "trex_stateless_messaging.h"
#include "trex_stream.h"
#include "flow_stat_parser.h"
#include "flow_stat.h"
@@ -33,7 +34,6 @@
#define FLOW_STAT_ADD_ALL_PORTS 255
static const uint16_t FREE_HW_ID = UINT16_MAX;
-static bool no_stat_supported = true;
inline std::string methodName(const std::string& prettyFunction)
{
@@ -106,7 +106,7 @@ int CFlowStatUserIdInfo::add_stream(uint8_t proto) {
#endif
if (proto != m_proto)
- return -1;
+ throw TrexException("Can't use same pg_id for streams with different l4 protocol");
m_ref_count++;
@@ -121,7 +121,7 @@ void CFlowStatUserIdInfo::reset_hw_id() {
// Next session will start counting from 0.
for (int i = 0; i < TREX_MAX_PORTS; i++) {
m_rx_counter_base[i] += m_rx_counter[i];
- m_rx_counter[i] = 0;
+ memset(&m_rx_counter[i], 0, sizeof(m_rx_counter[0]));
m_tx_counter_base[i] += m_tx_counter[i];
memset(&m_tx_counter[i], 0, sizeof(m_tx_counter[0]));
}
@@ -197,7 +197,7 @@ int CFlowStatUserIdMap::add_stream(uint32_t user_id, uint8_t proto) {
if (! c_user_id) {
c_user_id = add_user_id(user_id, proto);
if (! c_user_id)
- return -1;
+ throw TrexException("Failed adding statistic counter - Failure in add_stream");
return 0;
} else {
return c_user_id->add_stream(proto);
@@ -213,7 +213,7 @@ int CFlowStatUserIdMap::del_stream(uint32_t user_id) {
c_user_id = find_user_id(user_id);
if (! c_user_id) {
- return -1;
+ throw TrexException("Trying to delete stream which does not exist");
}
if (c_user_id->del_stream() == 0) {
@@ -236,13 +236,13 @@ int CFlowStatUserIdMap::start_stream(uint32_t user_id, uint16_t hw_id) {
if (! c_user_id) {
fprintf(stderr, "%s Error: Trying to associate hw id %d to user_id %d but it does not exist\n"
, __func__, hw_id, user_id);
- return -1;
+ throw TrexException("Internal error: Trying to associate non exist group id");
}
if (c_user_id->is_hw_id()) {
- fprintf(stderr, "%s Error: Trying to associate hw id %d to user_id %d but it is already associate to %u\n"
+ fprintf(stderr, "%s Error: Trying to associate hw id %d to user_id %d but it is already associated to %u\n"
, __func__, hw_id, user_id, c_user_id->get_hw_id());
- return -1;
+ throw TrexException("Internal error: Trying to associate used packet group id to different hardware counter");
}
c_user_id->set_hw_id(hw_id);
c_user_id->add_started_stream();
@@ -259,9 +259,9 @@ int CFlowStatUserIdMap::start_stream(uint32_t user_id) {
c_user_id = find_user_id(user_id);
if (! c_user_id) {
- fprintf(stderr, "%s Error: Trying to start stream on user_id %d but it does not exist\n"
+ fprintf(stderr, "%s Error: Trying to start stream on pg_id %d but it does not exist\n"
, __func__, user_id);
- return -1;
+ throw TrexException("Trying to start stream with non exist packet group id");
}
c_user_id->add_started_stream();
@@ -280,9 +280,9 @@ int CFlowStatUserIdMap::stop_stream(uint32_t user_id) {
c_user_id = find_user_id(user_id);
if (! c_user_id) {
- fprintf(stderr, "%s Error: Trying to stop stream on user_id %d but it does not exist\n"
+ fprintf(stderr, "%s Error: Trying to stop stream on pg_id %d but it does not exist\n"
, __func__, user_id);
- return -1;
+ throw TrexException("Trying to stop stream with non exist packet group id");
}
return c_user_id->stop_started_stream();
@@ -385,6 +385,34 @@ void CFlowStatHwIdMap::unmap(uint16_t hw_id) {
CFlowStatRuleMgr::CFlowStatRuleMgr() {
m_api = NULL;
m_max_hw_id = -1;
+ m_num_started_streams = 0;
+ m_ring_to_rx = NULL;
+ m_capabilities = 0;
+ m_parser = NULL;
+}
+
+CFlowStatRuleMgr::~CFlowStatRuleMgr() {
+ if (m_parser)
+ delete m_parser;
+}
+
+void CFlowStatRuleMgr::create() {
+ uint16_t num_counters, capabilities;
+ TrexStateless *tstateless = get_stateless_obj();
+ assert(tstateless);
+
+ m_api = tstateless->get_platform_api();
+ assert(m_api);
+ m_api->get_interface_stat_info(0, num_counters, capabilities);
+ m_api->get_port_num(m_num_ports);
+ for (uint8_t port = 0; port < m_num_ports; port++) {
+ assert(m_api->reset_hw_flow_stats(port) == 0);
+ }
+ m_ring_to_rx = CMsgIns::Ins()->getCpRx()->getRingCpToDp(0);
+ assert(m_ring_to_rx);
+ m_parser = m_api->get_flow_stat_parser();
+ assert(m_parser);
+ m_capabilities = capabilities;
}
std::ostream& operator<<(std::ostream& os, const CFlowStatRuleMgr& cf) {
@@ -394,38 +422,30 @@ std::ostream& operator<<(std::ostream& os, const CFlowStatRuleMgr& cf) {
return os;
}
-int CFlowStatRuleMgr::compile_stream(const TrexStream * stream, Cxl710Parser &parser) {
+int CFlowStatRuleMgr::compile_stream(const TrexStream * stream, CFlowStatParser *parser) {
#ifdef __DEBUG_FUNC_ENTRY__
std::cout << __METHOD_NAME__ << " user id:" << stream->m_rx_check.m_pg_id << " en:";
std::cout << stream->m_rx_check.m_enabled << std::endl;
#endif
- // currently we support only IP ID rule types
- // all our ports are the same type, so testing port 0 is enough
- uint16_t num_counters, capabilities;
- m_api->get_interface_stat_info(0, num_counters, capabilities);
- if ((capabilities & TrexPlatformApi::IF_STAT_IPV4_ID) == 0) {
- return -2;
- }
-
- if (parser.parse(stream->m_pkt.binary, stream->m_pkt.len) != 0) {
+ if (parser->parse(stream->m_pkt.binary, stream->m_pkt.len) != 0) {
// if we could not parse the packet, but no stat count needed, it is probably OK.
if (stream->m_rx_check.m_enabled) {
fprintf(stderr, "Error: %s - Compilation failed\n", __func__);
- return -1;
+ throw TrexException("Failed parsing given packet for flow stat. Probably bad packet format.");
} else {
return 0;
}
}
- if (!parser.is_fdir_supported()) {
+ if (!parser->is_stat_supported()) {
if (stream->m_stream_id <= 0) {
- // rx stat not needed. Do nothing.
+ // flow stat not needed. Do nothing.
return 0;
} else {
- // rx stat needed, but packet format is not supported
- fprintf(stderr, "Error: %s - Unsupported packet format for rx stat\n", __func__);
- return -1;
+ // flow stat needed, but packet format is not supported
+ fprintf(stderr, "Error: %s - Unsupported packet format for flow stat\n", __func__);
+ throw TrexException("Unsupported packet format for flow stat on given interface type");
}
}
return 0;
@@ -436,44 +456,36 @@ int CFlowStatRuleMgr::add_stream(const TrexStream * stream) {
std::cout << __METHOD_NAME__ << " user id:" << stream->m_rx_check.m_pg_id << std::endl;
#endif
- if (! m_api ) {
- TrexStateless *tstateless = get_stateless_obj();
- m_api = tstateless->get_platform_api();
- uint16_t num_counters, capabilities;
- m_api->get_interface_stat_info(0, num_counters, capabilities);
- if ((capabilities & TrexPlatformApi::IF_STAT_IPV4_ID) == 0) {
- // All our interfaces are from the same type. If statistics not supported.
- // no operation will work
- return -1;
- } else {
- no_stat_supported = false;
- }
- m_api->get_port_num(m_num_ports);
- for (uint8_t port = 0; port < m_num_ports; port++) {
- assert(m_api->reset_hw_flow_stats(port) == 0);
- }
+ if (! stream->m_rx_check.m_enabled) {
+ return 0;
}
- if (no_stat_supported)
- return -ENOTSUP;
+ // Init everything here, and not in the constructor, since we relay on other objects
+ // By the time a stream is added everything else is initialized.
+ if (! m_api ) {
+ create();
+ }
- Cxl710Parser parser;
- int ret;
+ uint16_t rule_type = TrexPlatformApi::IF_STAT_IPV4_ID; // In the future need to get it from the stream;
- if (! stream->m_rx_check.m_enabled) {
- return 0;
+ if ((m_capabilities & rule_type) == 0) {
+ fprintf(stderr, "Error: %s - rule type not supported by interface\n", __func__);
+ throw TrexException("Interface does not support given rule type");
}
- if ((ret = compile_stream(stream, parser)) < 0)
- return ret;
+ // compile_stream throws exception if something goes wrong
+ compile_stream(stream, m_parser);
uint8_t l4_proto;
- if (parser.get_l4_proto(l4_proto) < 0) {
- printf("Error: %s failed finding l4 proto\n", __func__);
- return -1;
+ if (m_parser->get_l4_proto(l4_proto) < 0) {
+ fprintf(stderr, "Error: %s failed finding l4 proto\n", __func__);
+ throw TrexException("Failed determining l4 proto for packet");
}
- return m_user_id_map.add_stream(stream->m_rx_check.m_pg_id, l4_proto);
+ // throws exception if there is error
+ m_user_id_map.add_stream(stream->m_rx_check.m_pg_id, l4_proto);
+
+ return 0;
}
int CFlowStatRuleMgr::del_stream(const TrexStream * stream) {
@@ -481,14 +493,23 @@ int CFlowStatRuleMgr::del_stream(const TrexStream * stream) {
std::cout << __METHOD_NAME__ << " user id:" << stream->m_rx_check.m_pg_id << std::endl;
#endif
- if (no_stat_supported)
- return -ENOTSUP;
+ if (! m_api)
+ throw TrexException("Called del_stream, but no stream was added");
if (! stream->m_rx_check.m_enabled) {
return 0;
}
- return m_user_id_map.del_stream(stream->m_rx_check.m_pg_id);
+ if (m_user_id_map.is_started(stream->m_rx_check.m_pg_id)) {
+ std::cerr << "Error: Trying to delete flow statistics stream " << stream->m_rx_check.m_pg_id
+ << " which is not stopped." << std::endl;
+ throw TrexException("Trying to delete stream which was not stopped");
+ }
+
+ // Throws exception in case of error
+ m_user_id_map.del_stream(stream->m_rx_check.m_pg_id);
+
+ return 0;
}
// called on all streams, when stream start to transmit
@@ -502,33 +523,49 @@ int CFlowStatRuleMgr::start_stream(TrexStream * stream, uint16_t &ret_hw_id) {
std::cout << __METHOD_NAME__ << " user id:" << stream->m_rx_check.m_pg_id << std::endl;
#endif
- Cxl710Parser parser;
int ret;
-
- if (no_stat_supported)
- return -ENOTSUP;
-
- if ((ret = compile_stream(stream, parser)) < 0)
- return ret;
+ // Streams which does not need statistics might be started, before any stream that do
+ // need statistcs, so start_stream might be called before add_stream
+ if (! m_api ) {
+ create();
+ }
// first handle streams that do not need rx stat
if (! stream->m_rx_check.m_enabled) {
- // no need for stat count
+ try {
+ compile_stream(stream, m_parser);
+ } catch (TrexException) {
+ // If no statistics needed, and we can't parse the stream, that's OK.
+ return 0;
+ }
+
uint16_t ip_id;
- if (parser.get_ip_id(ip_id) < 0) {
- return 0; // if we could not find and ip id, no need to fix
+ if (m_parser->get_ip_id(ip_id) < 0) {
+ return 0; // if we could not find the ip id, no need to fix
}
// verify no reserved IP_ID used, and change if needed
if (ip_id >= IP_ID_RESERVE_BASE) {
- if (parser.set_ip_id(ip_id & 0xefff) < 0) {
- return -1;
+ if (m_parser->set_ip_id(ip_id & 0xefff) < 0) {
+ throw TrexException("Stream IP ID in reserved range. Failed changing it");
}
}
return 0;
}
- uint16_t hw_id;
// from here, we know the stream need rx stat
+
+ // compile_stream throws exception if something goes wrong
+ if ((ret = compile_stream(stream, m_parser)) < 0)
+ return ret;
+
+ uint16_t hw_id;
+ uint16_t rule_type = TrexPlatformApi::IF_STAT_IPV4_ID; // In the future, need to get it from the stream;
+
+ if ((m_capabilities & rule_type) == 0) {
+ fprintf(stderr, "Error: %s - rule type not supported by interface\n", __func__);
+ throw TrexException("Interface does not support given rule type");
+ }
+
if (m_user_id_map.is_started(stream->m_rx_check.m_pg_id)) {
m_user_id_map.start_stream(stream->m_rx_check.m_pg_id); // just increase ref count;
hw_id = m_user_id_map.get_hw_id(stream->m_rx_check.m_pg_id); // can't fail if we got here
@@ -536,19 +573,19 @@ int CFlowStatRuleMgr::start_stream(TrexStream * stream, uint16_t &ret_hw_id) {
hw_id = m_hw_id_map.find_free_hw_id();
if (hw_id == FREE_HW_ID) {
printf("Error: %s failed finding free hw_id\n", __func__);
- return -1;
+ throw TrexException("Failed allocating statistic counter. Probably all are used.");
} else {
if (hw_id > m_max_hw_id) {
m_max_hw_id = hw_id;
}
uint32_t user_id = stream->m_rx_check.m_pg_id;
- m_user_id_map.start_stream(user_id, hw_id);
+ m_user_id_map.start_stream(user_id, hw_id); // ??? can throw exception. return hw_id
m_hw_id_map.map(hw_id, user_id);
add_hw_rule(hw_id, m_user_id_map.l4_proto(user_id));
}
}
- parser.set_ip_id(IP_ID_RESERVE_BASE + hw_id);
+ m_parser->set_ip_id(IP_ID_RESERVE_BASE + hw_id);
ret_hw_id = hw_id;
@@ -556,6 +593,10 @@ int CFlowStatRuleMgr::start_stream(TrexStream * stream, uint16_t &ret_hw_id) {
std::cout << "exit:" << __METHOD_NAME__ << " hw_id:" << ret_hw_id << std::endl;
#endif
+ if (m_num_started_streams == 0) {
+ send_start_stop_msg_to_rx(true); // First transmitting stream. Rx core should start reading packets;
+ }
+ m_num_started_streams++;
return 0;
}
@@ -571,13 +612,13 @@ int CFlowStatRuleMgr::stop_stream(const TrexStream * stream) {
#ifdef __DEBUG_FUNC_ENTRY__
std::cout << __METHOD_NAME__ << " user id:" << stream->m_rx_check.m_pg_id << std::endl;
#endif
- if (no_stat_supported)
- return -ENOTSUP;
-
if (! stream->m_rx_check.m_enabled) {
return 0;
}
+ if (! m_api)
+ throw TrexException("Called stop_stream, but no stream was added");
+
if (m_user_id_map.stop_stream(stream->m_rx_check.m_pg_id) == 0) {
// last stream associated with the entry stopped transmittig.
// remove user_id <--> hw_id mapping
@@ -585,12 +626,12 @@ int CFlowStatRuleMgr::stop_stream(const TrexStream * stream) {
uint16_t hw_id = m_user_id_map.get_hw_id(stream->m_rx_check.m_pg_id);
if (hw_id >= MAX_FLOW_STATS) {
fprintf(stderr, "Error: %s got wrong hw_id %d from unmap\n", __func__, hw_id);
- return -1;
+ throw TrexException("Internal error in stop_stream. Got bad hw_id");
} else {
// update counters, and reset before unmapping
CFlowStatUserIdInfo *p_user_id = m_user_id_map.find_user_id(m_hw_id_map.get_user_id(hw_id));
assert(p_user_id != NULL);
- uint64_t rx_counter;
+ rx_per_flow_t rx_counter;
tx_per_flow_t tx_counter;
for (uint8_t port = 0; port < m_num_ports; port++) {
m_api->del_rx_flow_stat_rule(port, FLOW_STAT_RULE_TYPE_IPV4_ID, proto, hw_id);
@@ -605,6 +646,11 @@ int CFlowStatRuleMgr::stop_stream(const TrexStream * stream) {
m_hw_id_map.unmap(hw_id);
}
}
+ m_num_started_streams--;
+ assert (m_num_started_streams >= 0);
+ if (m_num_started_streams == 0) {
+ send_start_stop_msg_to_rx(false); // No more transmittig streams. Rx core shoulde get into idle loop.
+ }
return 0;
}
@@ -618,16 +664,28 @@ int CFlowStatRuleMgr::get_active_pgids(flow_stat_active_t &result) {
return 0;
}
+extern bool rx_should_stop;
+void CFlowStatRuleMgr::send_start_stop_msg_to_rx(bool is_start) {
+ TrexStatelessCpToRxMsgBase *msg;
+
+ if (is_start) {
+ msg = new TrexStatelessRxStartMsg();
+ } else {
+ msg = new TrexStatelessRxStopMsg();
+ }
+ m_ring_to_rx->Enqueue((CGenNode *)msg);
+}
+
// return false if no counters changed since last run. true otherwise
bool CFlowStatRuleMgr::dump_json(std::string & json, bool baseline) {
- uint64_t rx_stats[MAX_FLOW_STATS];
+ rx_per_flow_t rx_stats[MAX_FLOW_STATS];
tx_per_flow_t tx_stats[MAX_FLOW_STATS];
Json::FastWriter writer;
Json::Value root;
root["name"] = "flow_stats";
root["type"] = 0;
-
+
if (baseline) {
root["baseline"] = true;
}
@@ -645,15 +703,16 @@ bool CFlowStatRuleMgr::dump_json(std::string & json, bool baseline) {
for (uint8_t port = 0; port < m_num_ports; port++) {
m_api->get_flow_stats(port, rx_stats, (void *)tx_stats, 0, m_max_hw_id, false);
for (int i = 0; i <= m_max_hw_id; i++) {
- if (rx_stats[i] != 0) {
+ if (rx_stats[i].get_pkts() != 0) {
+ rx_per_flow_t rx_pkts = rx_stats[i];
CFlowStatUserIdInfo *p_user_id = m_user_id_map.find_user_id(m_hw_id_map.get_user_id(i));
if (likely(p_user_id != NULL)) {
- if (p_user_id->get_rx_counter(port) != rx_stats[i]) {
- p_user_id->set_rx_counter(port, rx_stats[i]);
+ if (p_user_id->get_rx_counter(port) != rx_pkts) {
+ p_user_id->set_rx_counter(port, rx_pkts);
p_user_id->set_need_to_send_rx(port);
}
} else {
- std::cerr << __METHOD_NAME__ << i << ":Could not count " << rx_stats[i] << " rx packets, on port "
+ std::cerr << __METHOD_NAME__ << i << ":Could not count " << rx_pkts << " rx packets, on port "
<< (uint16_t)port << ", because no mapping was found." << std::endl;
}
}
@@ -690,7 +749,8 @@ bool CFlowStatRuleMgr::dump_json(std::string & json, bool baseline) {
std::string str_port = static_cast<std::ostringstream*>( &(std::ostringstream() << int(port) ) )->str();
if (user_id_info->need_to_send_rx(port) || baseline) {
user_id_info->set_no_need_to_send_rx(port);
- data_section[str_user_id]["rx_pkts"][str_port] = Json::Value::UInt64(user_id_info->get_rx_counter(port));
+ data_section[str_user_id]["rx_pkts"][str_port] = Json::Value::UInt64(user_id_info->get_rx_counter(port).get_pkts());
+ data_section[str_user_id]["rx_bytes"][str_port] = Json::Value::UInt64(user_id_info->get_rx_counter(port).get_bytes());
send_empty = false;
}
if (user_id_info->need_to_send_tx(port) || baseline) {
diff --git a/src/flow_stat.h b/src/flow_stat.h
index 3e00a180..ea33062d 100644
--- a/src/flow_stat.h
+++ b/src/flow_stat.h
@@ -26,6 +26,7 @@
#include <map>
#include "trex_defs.h"
#include "trex_stream.h"
+#include "msg_manager.h"
#include <internal_api/trex_platform_api.h>
// range reserved for rx stat measurement is from IP_ID_RESERVE_BASE to 0xffff
@@ -50,7 +51,7 @@ class tx_per_flow_t_ {
inline void set_bytes(uint64_t bytes) {
m_bytes = bytes;;
}
- inline void get_pkts(uint64_t pkts) {
+ inline void set_pkts(uint64_t pkts) {
m_pkts = pkts;
}
inline void add_bytes(uint64_t bytes) {
@@ -100,16 +101,17 @@ class tx_per_flow_t_ {
};
typedef class tx_per_flow_t_ tx_per_flow_t;
+typedef class tx_per_flow_t_ rx_per_flow_t;
class CPhyEthIF;
-class Cxl710Parser;
+class CFlowStatParser;
class CFlowStatUserIdInfo {
public:
CFlowStatUserIdInfo(uint8_t proto);
friend std::ostream& operator<<(std::ostream& os, const CFlowStatUserIdInfo& cf);
- void set_rx_counter(uint8_t port, uint64_t val) {m_rx_counter[port] = val;}
- uint64_t get_rx_counter(uint8_t port) {return m_rx_counter[port] + m_rx_counter_base[port];}
+ void set_rx_counter(uint8_t port, rx_per_flow_t val) {m_rx_counter[port] = val;}
+ rx_per_flow_t get_rx_counter(uint8_t port) {return m_rx_counter[port] + m_rx_counter_base[port];}
void set_tx_counter(uint8_t port, tx_per_flow_t val) {m_tx_counter[port] = val;}
tx_per_flow_t get_tx_counter(uint8_t port) {return m_tx_counter[port] + m_tx_counter_base[port];}
void set_hw_id(uint16_t hw_id) {m_hw_id = hw_id;}
@@ -135,16 +137,16 @@ class CFlowStatUserIdInfo {
private:
bool m_rx_changed[TREX_MAX_PORTS]; // Which RX counters changed since we last published
bool m_tx_changed[TREX_MAX_PORTS]; // Which TX counters changed since we last published
- uint64_t m_rx_counter[TREX_MAX_PORTS]; // How many packets received with this user id since stream start
+ rx_per_flow_t m_rx_counter[TREX_MAX_PORTS]; // How many packets received with this user id since stream start
// How many packets received with this user id, since stream creation, before stream start.
- uint64_t m_rx_counter_base[TREX_MAX_PORTS];
+ rx_per_flow_t m_rx_counter_base[TREX_MAX_PORTS];
tx_per_flow_t m_tx_counter[TREX_MAX_PORTS]; // How many packets transmitted with this user id since stream start
// How many packets transmitted with this user id, since stream creation, before stream start.
tx_per_flow_t m_tx_counter_base[TREX_MAX_PORTS];
uint16_t m_hw_id; // Associated hw id. UINT16_MAX if no associated hw id.
uint8_t m_proto; // protocol (UDP, TCP, other), associated with this user id.
- uint8_t m_ref_count; // How many streams with this ref count exists
- uint8_t m_trans_ref_count; // How many streams with this ref count currently transmit
+ uint8_t m_ref_count; // How many streams with this user id exists
+ uint8_t m_trans_ref_count; // How many streams with this user id currently transmit
bool m_was_sent; // Did we send this info to clients once?
};
@@ -196,6 +198,7 @@ class CFlowStatRuleMgr {
};
CFlowStatRuleMgr();
+ ~CFlowStatRuleMgr();
friend std::ostream& operator<<(std::ostream& os, const CFlowStatRuleMgr& cf);
int add_stream(const TrexStream * stream);
int del_stream(const TrexStream * stream);
@@ -205,8 +208,10 @@ class CFlowStatRuleMgr {
bool dump_json(std::string & json, bool baseline);
private:
- int compile_stream(const TrexStream * stream, Cxl710Parser &parser);
+ void create();
+ int compile_stream(const TrexStream * stream, CFlowStatParser *parser);
int add_hw_rule(uint16_t hw_id, uint8_t proto);
+ void send_start_stop_msg_to_rx(bool is_start);
private:
CFlowStatHwIdMap m_hw_id_map; // map hw ids to user ids
@@ -214,6 +219,10 @@ class CFlowStatRuleMgr {
uint8_t m_num_ports; // How many ports are being used
const TrexPlatformApi *m_api;
int m_max_hw_id; // max hw id we ever used
+ uint32_t m_num_started_streams; // How many started (transmitting) streams we have
+ CNodeRing *m_ring_to_rx; // handle for sending messages to Rx core
+ CFlowStatParser *m_parser;
+ uint16_t m_capabilities;
};
#endif
diff --git a/src/flow_stat_parser.cpp b/src/flow_stat_parser.cpp
index 52824f73..8a77c82d 100644
--- a/src/flow_stat_parser.cpp
+++ b/src/flow_stat_parser.cpp
@@ -25,38 +25,36 @@
#include <common/Network/Packet/EthernetHeader.h>
#include <flow_stat_parser.h>
-Cxl710Parser::Cxl710Parser() {
- reset();
-}
-
-void Cxl710Parser::reset() {
+void CFlowStatParser::reset() {
m_ipv4 = 0;
m_l4_proto = 0;
- m_fdir_supported = false;
+ m_stat_supported = false;
}
-int Cxl710Parser::parse(uint8_t *p, uint16_t len) {
+int CFlowStatParser::parse(uint8_t *p, uint16_t len) {
EthernetHeader *ether = (EthernetHeader *)p;
+ reset();
+
switch( ether->getNextProtocol() ) {
case EthernetHeader::Protocol::IP :
m_ipv4 = (IPHeader *)(p + 14);
- m_fdir_supported = true;
+ m_stat_supported = true;
break;
case EthernetHeader::Protocol::VLAN :
switch ( ether->getVlanProtocol() ){
case EthernetHeader::Protocol::IP:
m_ipv4 = (IPHeader *)(p + 18);
- m_fdir_supported = true;
+ m_stat_supported = true;
break;
default:
- m_fdir_supported = false;
+ m_stat_supported = false;
return -1;
}
break;
default:
- m_fdir_supported = false;
+ m_stat_supported = false;
return -1;
break;
}
@@ -64,7 +62,7 @@ int Cxl710Parser::parse(uint8_t *p, uint16_t len) {
return 0;
}
-int Cxl710Parser::get_ip_id(uint16_t &ip_id) {
+int CFlowStatParser::get_ip_id(uint16_t &ip_id) {
if (! m_ipv4)
return -1;
@@ -73,7 +71,7 @@ int Cxl710Parser::get_ip_id(uint16_t &ip_id) {
return 0;
}
-int Cxl710Parser::set_ip_id(uint16_t new_id) {
+int CFlowStatParser::set_ip_id(uint16_t new_id) {
if (! m_ipv4)
return -1;
@@ -84,7 +82,7 @@ int Cxl710Parser::set_ip_id(uint16_t new_id) {
return 0;
}
-int Cxl710Parser::get_l4_proto(uint8_t &proto) {
+int CFlowStatParser::get_l4_proto(uint8_t &proto) {
if (! m_ipv4)
return -1;
@@ -96,7 +94,7 @@ int Cxl710Parser::get_l4_proto(uint8_t &proto) {
static const uint16_t TEST_IP_ID = 0xabcd;
static const uint8_t TEST_L4_PROTO = 0x11;
-int Cxl710Parser::test() {
+int CFlowStatParser::test() {
uint16_t ip_id = 0;
uint8_t l4_proto;
uint8_t test_pkt[] = {
@@ -124,14 +122,34 @@ int Cxl710Parser::test() {
assert(m_ipv4->isChecksumOK() == true);
assert(get_l4_proto(l4_proto) == 0);
assert(l4_proto == TEST_L4_PROTO);
- assert(m_fdir_supported == true);
+ assert(m_stat_supported == true);
reset();
// bad packet
test_pkt[16] = 0xaa;
assert (parse(test_pkt, sizeof(test_pkt)) == -1);
- assert(m_fdir_supported == false);
+ assert(m_stat_supported == false);
+
+ return 0;
+}
+
+// In 82599 10G card we do not support VLANs
+int C82599Parser::parse(uint8_t *p, uint16_t len) {
+ EthernetHeader *ether = (EthernetHeader *)p;
+
+ reset();
+
+ switch( ether->getNextProtocol() ) {
+ case EthernetHeader::Protocol::IP :
+ m_ipv4 = (IPHeader *)(p + 14);
+ m_stat_supported = true;
+ break;
+ default:
+ m_stat_supported = false;
+ return -1;
+ break;
+ }
return 0;
}
diff --git a/src/flow_stat_parser.h b/src/flow_stat_parser.h
index 606a1bec..8c9e1418 100644
--- a/src/flow_stat_parser.h
+++ b/src/flow_stat_parser.h
@@ -19,19 +19,33 @@
limitations under the License.
*/
-class Cxl710Parser {
+#ifndef __FLOW_STAT_PARSER_H__
+#define __FLOW_STAT_PARSER_H__
+
+// Basic flow stat parser. Relevant for xl710/x710/x350 cards
+#include "common/Network/Packet/IPHeader.h"
+
+class CFlowStatParser {
public:
- Cxl710Parser();
- void reset();
- int parse(uint8_t *pkt, uint16_t len);
- bool is_fdir_supported() {return m_fdir_supported == true;};
- int get_ip_id(uint16_t &ip_id);
- int set_ip_id(uint16_t ip_id);
- int get_l4_proto(uint8_t &proto);
- int test();
+ virtual ~CFlowStatParser() {};
+ virtual void reset();
+ virtual int parse(uint8_t *pkt, uint16_t len);
+ virtual bool is_stat_supported() {return m_stat_supported == true;};
+ virtual int get_ip_id(uint16_t &ip_id);
+ virtual int set_ip_id(uint16_t ip_id);
+ virtual int get_l4_proto(uint8_t &proto);
+ virtual int test();
- private:
+ protected:
IPHeader *m_ipv4;
- bool m_fdir_supported;
+ bool m_stat_supported;
uint8_t m_l4_proto;
};
+
+class C82599Parser : public CFlowStatParser {
+ public:
+ ~C82599Parser() {};
+ int parse(uint8_t *pkt, uint16_t len);
+};
+
+#endif
diff --git a/src/gtest/trex_stateless_gtest.cpp b/src/gtest/trex_stateless_gtest.cpp
index c3dfcb95..a5cf3307 100644
--- a/src/gtest/trex_stateless_gtest.cpp
+++ b/src/gtest/trex_stateless_gtest.cpp
@@ -3581,7 +3581,7 @@ class rx_stat_pkt_parse : public testing::Test {
TEST_F(rx_stat_pkt_parse, x710_parser) {
- Cxl710Parser parser;
+ CFlowStatParser parser;
parser.test();
}
diff --git a/src/internal_api/trex_platform_api.h b/src/internal_api/trex_platform_api.h
index f8f76584..90eaa7c7 100644
--- a/src/internal_api/trex_platform_api.h
+++ b/src/internal_api/trex_platform_api.h
@@ -26,6 +26,7 @@ limitations under the License.
#include <vector>
#include <string>
#include <string.h>
+#include "flow_stat_parser.h"
#include "trex_defs.h"
/**
@@ -34,6 +35,7 @@ limitations under the License.
* @author imarom (06-Oct-15)
*/
+
class TrexPlatformGlobalStats {
public:
TrexPlatformGlobalStats() {
@@ -42,7 +44,7 @@ public:
struct {
double m_cpu_util;
-
+ double m_rx_cpu_util;
double m_tx_bps;
double m_rx_bps;
@@ -142,7 +144,7 @@ public:
virtual void publish_async_data_now(uint32_t key, bool baseline) const = 0;
virtual uint8_t get_dp_core_count() const = 0;
virtual void get_interface_stat_info(uint8_t interface_id, uint16_t &num_counters, uint16_t &capabilities) const =0;
- virtual int get_flow_stats(uint8_t port_id, uint64_t *stats, void *tx_stats, int min, int max, bool reset) const = 0;
+ virtual int get_flow_stats(uint8_t port_id, void *stats, void *tx_stats, int min, int max, bool reset) const = 0;
virtual int reset_hw_flow_stats(uint8_t port_id) const = 0;
virtual void get_port_num(uint8_t &port_num) const = 0;
virtual int add_rx_flow_stat_rule(uint8_t port_id, uint8_t type, uint16_t proto, uint16_t id) const = 0;
@@ -151,6 +153,7 @@ public:
virtual bool get_promiscuous(uint8_t port_id) const = 0;
virtual void flush_dp_messages() const = 0;
virtual int get_active_pgids(flow_stat_active_t &result) const = 0;
+ virtual CFlowStatParser *get_flow_stat_parser() const = 0;
virtual ~TrexPlatformApi() {}
};
@@ -171,7 +174,7 @@ public:
void publish_async_data_now(uint32_t key, bool baseline) const;
uint8_t get_dp_core_count() const;
void get_interface_stat_info(uint8_t interface_id, uint16_t &num_counters, uint16_t &capabilities) const;
- int get_flow_stats(uint8_t port_id, uint64_t *stats, void *tx_stats, int min, int max, bool reset) const;
+ int get_flow_stats(uint8_t port_id, void *stats, void *tx_stats, int min, int max, bool reset) const;
int reset_hw_flow_stats(uint8_t port_id) const;
void get_port_num(uint8_t &port_num) const;
int add_rx_flow_stat_rule(uint8_t port_id, uint8_t type, uint16_t proto, uint16_t id) const;
@@ -180,6 +183,7 @@ public:
bool get_promiscuous(uint8_t port_id) const;
void flush_dp_messages() const;
int get_active_pgids(flow_stat_active_t &result) const;
+ CFlowStatParser *get_flow_stat_parser() const;
};
@@ -225,7 +229,7 @@ public:
virtual void publish_async_data_now(uint32_t key, bool baseline) const {
}
- virtual int get_flow_stats(uint8_t port_id, uint64_t *stats, void *tx_stats, int min, int max, bool reset) const {return 0;};
+ virtual int get_flow_stats(uint8_t port_id, void *stats, void *tx_stats, int min, int max, bool reset) const {return 0;};
virtual int reset_hw_flow_stats(uint8_t port_id) const {return 0;};
virtual void get_port_num(uint8_t &port_num) const {port_num = 2;};
virtual int add_rx_flow_stat_rule(uint8_t port_id, uint8_t type, uint16_t proto, uint16_t id) const {return 0;}
@@ -241,6 +245,7 @@ public:
void flush_dp_messages() const {
}
int get_active_pgids(flow_stat_active_t &result) const {return 0;}
+ CFlowStatParser *get_flow_stat_parser() const {return new CFlowStatParser();}
private:
int m_dp_core_count;
diff --git a/src/latency.cpp b/src/latency.cpp
index d57e97c8..fff7935d 100644
--- a/src/latency.cpp
+++ b/src/latency.cpp
@@ -177,6 +177,9 @@ void CCPortLatency::reset(){
m_seq_error=0;
m_length_error=0;
m_no_ipv4_option=0;
+ for (int i = 0; i < MAX_FLOW_STATS; i++) {
+ m_rx_pg_stat[i].clear();
+ }
m_hist.Reset();
}
@@ -628,8 +631,8 @@ void CLatencyManager::handle_rx_pkt(CLatencyManagerPerPort * lp,
rte_pktmbuf_free(m);
}
-void CLatencyManager::handle_latency_pkt_msg(uint8_t thread_id,
- CGenNodeLatencyPktInfo * msg){
+// In VM, we receive the RX packets in DP core, and send message to RX core with the packet
+void CLatencyManager::handle_latency_pkt_msg(uint8_t thread_id, CGenNodeLatencyPktInfo * msg) {
assert(msg->m_latency_offset==0xdead);
@@ -666,6 +669,7 @@ void CLatencyManager::run_rx_queue_msgs(uint8_t thread_id,
}
}
+// VM mode function. Handle messages from DP
void CLatencyManager::try_rx_queues(){
CMessagingManager * rx_dp = CMsgIns::Ins()->getRxDp();
@@ -679,7 +683,6 @@ void CLatencyManager::try_rx_queues(){
}
}
-
void CLatencyManager::try_rx(){
rte_mbuf_t * rx_pkts[64];
int i;
@@ -712,7 +715,7 @@ void CLatencyManager::reset(){
}
-void CLatencyManager::start(int iter){
+void CLatencyManager::start(int iter) {
m_do_stop =false;
m_is_active =false;
int cnt=0;
diff --git a/src/latency.h b/src/latency.h
index 1f8ef5c0..3dd1cc36 100644
--- a/src/latency.h
+++ b/src/latency.h
@@ -86,6 +86,7 @@ public:
bool Parse();
uint8_t getTTl();
+ uint16_t getIpId();
uint16_t getPktSize();
// Check if packet contains latency data
@@ -243,9 +244,8 @@ public:
uint64_t m_seq_error;
uint64_t m_rx_check;
uint64_t m_no_ipv4_option;
-
-
uint64_t m_length_error;
+ rx_per_flow_t m_rx_pg_stat[MAX_FLOW_STATS];
CTimeHistogram m_hist; /* all window */
CJitter m_jitter;
};
diff --git a/src/main_dpdk.cpp b/src/main_dpdk.cpp
index 1b750bbd..46e9a95e 100644
--- a/src/main_dpdk.cpp
+++ b/src/main_dpdk.cpp
@@ -58,6 +58,7 @@
#include "stateless/cp/trex_stateless.h"
#include "stateless/dp/trex_stream_node.h"
#include "stateless/messaging/trex_stateless_messaging.h"
+#include "stateless/rx/trex_stateless_rx_core.h"
#include "publisher/trex_publisher.h"
#include "../linux_dpdk/version.h"
extern "C" {
@@ -106,7 +107,7 @@ static inline int get_vm_one_queue_enable(){
}
static inline int get_is_rx_thread_enabled() {
- return (CGlobalInfo::m_options.is_rx_enabled() ?1:0);
+ return ((CGlobalInfo::m_options.is_rx_enabled() || CGlobalInfo::m_options.is_stateless()) ?1:0);
}
struct port_cfg_t;
@@ -140,10 +141,14 @@ public:
virtual int wait_for_stable_link()=0;
virtual void wait_after_link_up(){};
virtual bool flow_control_disable_supported(){return true;}
- virtual int get_rx_stats(CPhyEthIF * _if, uint32_t *stats, uint32_t *prev_stats, int min, int max) {return -1;}
+ virtual bool hw_rx_stat_supported(){return false;}
+ virtual int get_rx_stats(CPhyEthIF * _if, uint32_t *pkts, uint32_t *prev_pkts, uint32_t *bytes, uint32_t *prev_bytes
+ , int min, int max) {return -1;}
+ virtual int reset_rx_stats(CPhyEthIF * _if, uint32_t *stats) {return 0;}
virtual int dump_fdir_global_stats(CPhyEthIF * _if, FILE *fd) { return -1;}
virtual int get_stat_counters_num() {return 0;}
virtual int get_rx_stat_capabilities() {return 0;}
+ virtual CFlowStatParser *get_flow_stat_parser();
};
@@ -174,8 +179,8 @@ public:
virtual int configure_drop_queue(CPhyEthIF * _if);
virtual int configure_rx_filter_rules(CPhyEthIF * _if);
- int configure_rx_filter_rules_statefull(CPhyEthIF * _if);
- int configure_rx_filter_rules_stateless(CPhyEthIF * _if);
+ virtual int configure_rx_filter_rules_statefull(CPhyEthIF * _if);
+ virtual int configure_rx_filter_rules_stateless(CPhyEthIF * _if);
virtual bool is_hardware_support_drop_queue(){
return(true);
@@ -184,9 +189,11 @@ public:
virtual void get_extended_stats(CPhyEthIF * _if,CPhyEthIFStats *stats);
virtual void clear_extended_stats(CPhyEthIF * _if);
-
+ virtual int dump_fdir_global_stats(CPhyEthIF * _if, FILE *fd) {return 0;}
+ virtual int get_stat_counters_num() {return MAX_FLOW_STATS;}
+ virtual int get_rx_stat_capabilities() {return TrexPlatformApi::IF_STAT_IPV4_ID;}
virtual int wait_for_stable_link();
- void wait_after_link_up();
+ virtual void wait_after_link_up();
};
class CTRexExtendedDriverBase1GVm : public CTRexExtendedDriverBase {
@@ -236,6 +243,8 @@ public:
virtual void clear_extended_stats(CPhyEthIF * _if);
virtual int wait_for_stable_link();
+ virtual int get_stat_counters_num() {return MAX_FLOW_STATS;}
+ virtual int get_rx_stat_capabilities() {return TrexPlatformApi::IF_STAT_IPV4_ID;}
};
@@ -262,15 +271,18 @@ public:
virtual bool is_hardware_filter_is_supported(){
return (true);
}
-
virtual int configure_rx_filter_rules(CPhyEthIF * _if);
-
+ virtual int configure_rx_filter_rules_stateless(CPhyEthIF * _if);
+ virtual int configure_rx_filter_rules_statefull(CPhyEthIF * _if);
virtual bool is_hardware_support_drop_queue(){
return(true);
}
virtual void get_extended_stats(CPhyEthIF * _if,CPhyEthIFStats *stats);
virtual void clear_extended_stats(CPhyEthIF * _if);
virtual int wait_for_stable_link();
+ virtual int get_stat_counters_num() {return MAX_FLOW_STATS;}
+ virtual int get_rx_stat_capabilities() {return TrexPlatformApi::IF_STAT_IPV4_ID;}
+ virtual CFlowStatParser *get_flow_stat_parser();
};
class CTRexExtendedDriverBase40G : public CTRexExtendedDriverBase10G {
@@ -313,16 +325,21 @@ public:
}
virtual void get_extended_stats(CPhyEthIF * _if,CPhyEthIFStats *stats);
virtual void clear_extended_stats(CPhyEthIF * _if);
- int get_rx_stats(CPhyEthIF * _if, uint32_t *stats, uint32_t *prev_stats, int min, int max);
- int dump_fdir_global_stats(CPhyEthIF * _if, FILE *fd);
- int get_stat_counters_num() {return MAX_FLOW_STATS;}
- int get_rx_stat_capabilities() {return TrexPlatformApi::IF_STAT_IPV4_ID;}
+ virtual int reset_rx_stats(CPhyEthIF * _if, uint32_t *stats);
+ virtual int get_rx_stats(CPhyEthIF * _if, uint32_t *pkts, uint32_t *prev_pkts, uint32_t *bytes, uint32_t *prev_bytes, int min, int max);
+ virtual int dump_fdir_global_stats(CPhyEthIF * _if, FILE *fd);
+ virtual int get_stat_counters_num() {return MAX_FLOW_STATS;}
+ virtual int get_rx_stat_capabilities() {return TrexPlatformApi::IF_STAT_IPV4_ID;}
virtual int wait_for_stable_link();
// disabling flow control on 40G using DPDK API causes the interface to malfunction
- bool flow_control_disable_supported(){return false;}
+ virtual bool flow_control_disable_supported(){return false;}
+ virtual bool hw_rx_stat_supported(){return true;}
+ virtual CFlowStatParser *get_flow_stat_parser();
+
private:
- void add_del_rules(enum rte_filter_op op, uint8_t port_id, uint16_t type, uint8_t ttl, uint16_t ip_id, int queue, uint16_t stat_idx);
+ virtual void add_del_rules(enum rte_filter_op op, uint8_t port_id, uint16_t type, uint8_t ttl, uint16_t ip_id, int queue, uint16_t stat_idx);
virtual int configure_rx_filter_rules_statfull(CPhyEthIF * _if);
+
private:
uint8_t m_if_per_card;
};
@@ -1019,11 +1036,11 @@ static int parse_options(int argc, char *argv[], CParserOption* po, bool first_t
}
if ( (po->is_latency_enabled()) || (po->preview.getOnlyLatency()) ){
- parse_err("Latecny check is not supported with interactive mode ");
+ parse_err("Latency check is not supported with interactive mode ");
}
if ( po->preview.getSingleCore() ){
- parse_err("single core is not supported with interactive mode ");
+ parse_err("Single core is not supported with interactive mode ");
}
}
@@ -1096,15 +1113,19 @@ public:
m_port_conf.fdir_conf.status=RTE_FDIR_NO_REPORT_STATUS;
/* Offset of flexbytes field in RX packets (in 16-bit word units). */
/* Note: divide by 2 to convert byte offset to word offset */
- if ( CGlobalInfo::m_options.preview.get_ipv6_mode_enable() ){
- m_port_conf.fdir_conf.flexbytes_offset=(14+6)/2;
- }else{
- m_port_conf.fdir_conf.flexbytes_offset=(14+8)/2;
- }
+ if (get_is_stateless()) {
+ m_port_conf.fdir_conf.flexbytes_offset = (14+4)/2;
+ } else {
+ if ( CGlobalInfo::m_options.preview.get_ipv6_mode_enable() ) {
+ m_port_conf.fdir_conf.flexbytes_offset = (14+6)/2;
+ } else {
+ m_port_conf.fdir_conf.flexbytes_offset = (14+8)/2;
+ }
- /* Increment offset 4 bytes for the case where we add VLAN */
- if ( CGlobalInfo::m_options.preview.get_vlan_mode_enable() ){
- m_port_conf.fdir_conf.flexbytes_offset+=(4/2);
+ /* Increment offset 4 bytes for the case where we add VLAN */
+ if ( CGlobalInfo::m_options.preview.get_vlan_mode_enable() ) {
+ m_port_conf.fdir_conf.flexbytes_offset += (4/2);
+ }
}
m_port_conf.fdir_conf.drop_queue=1;
}
@@ -1180,7 +1201,8 @@ void CPhyEthIFStats::Clear(){
oerrors = 0;
imcasts = 0;
rx_nombuf = 0;
- memset(m_rx_per_flow, 0, sizeof(m_rx_per_flow));
+ memset(m_rx_per_flow_pkts, 0, sizeof(m_rx_per_flow_pkts));
+ memset(m_rx_per_flow_bytes, 0, sizeof(m_rx_per_flow_bytes));
}
@@ -1214,6 +1236,7 @@ void CPhyEthIFStats::Dump(FILE *fd){
DP_A(rx_nombuf);
}
+// Clear the RX queue of an interface, dropping all packets
void CPhyEthIF::flush_rx_queue(void){
rte_mbuf_t * rx_pkts[32];
@@ -1786,6 +1809,9 @@ bool CCoreEthIF::Create(uint8_t core_id,
return (true);
}
+// This function is only relevant if we are in VM. In this case, we only have one rx queue. Can't have
+// rules to drop queue 0, and pass queue 1 to RX core, like in other cases.
+// We receive all packets in the same core that transmitted, and handle them to RX core.
void CCoreEthIF::flush_rx_queue(void){
pkt_dir_t dir ;
bool is_rx = get_is_rx_thread_enabled();
@@ -2300,6 +2326,7 @@ public:
float m_active_flows;
float m_open_flows;
float m_cpu_util;
+ float m_rx_cpu_util;
uint8_t m_threads;
uint32_t m_num_of_ports;
@@ -2588,15 +2615,17 @@ public:
int queues_prob_init();
int ixgbe_start();
int ixgbe_rx_queue_flush();
- int ixgbe_configure_mg();
+ void ixgbe_configure_mg();
+ void rx_sl_configure();
bool is_all_links_are_up(bool dump=false);
int reset_counters();
private:
- /* try to stop all datapath cores */
- void try_stop_all_dp();
+ /* try to stop all datapath cores and RX core */
+ void try_stop_all_cores();
/* send message to all dp cores */
int send_message_all_dp(TrexStatelessCpToDpMsgBase *msg);
+ int send_message_to_rx(TrexStatelessCpToRxMsgBase *msg);
void check_for_dp_message_from_core(int thread_id);
public:
@@ -2604,7 +2633,6 @@ public:
int start_master_statefull();
int start_master_stateless();
int run_in_core(virtual_thread_id_t virt_core_id);
- int stop_core(virtual_thread_id_t virt_core_id);
int core_for_rx(){
if ( (! get_is_rx_thread_enabled()) ) {
return -1;
@@ -2675,8 +2703,10 @@ public:
CParserOption m_po ;
CFlowGenList m_fl;
bool m_fl_was_init;
- volatile uint8_t m_signal[BP_MAX_CORES] __rte_cache_aligned ;
- CLatencyManager m_mg;
+ volatile uint8_t m_signal[BP_MAX_CORES] __rte_cache_aligned ; // Signal to main core when DP thread finished
+ volatile bool m_rx_running; // Signal main core when RX thread finished
+ CLatencyManager m_mg; // statefull RX core
+ CRxCoreStateless m_rx_sl; // stateless RX core
CTrexGlobalIoMode m_io_modes;
private:
@@ -2763,12 +2793,14 @@ bool CGlobalTRex::is_all_links_are_up(bool dump){
return (all_link_are);
}
+void CGlobalTRex::try_stop_all_cores(){
-void CGlobalTRex::try_stop_all_dp(){
-
- TrexStatelessDpQuit * msg= new TrexStatelessDpQuit();
- send_message_all_dp(msg);
- delete msg;
+ TrexStatelessDpQuit * dp_msg= new TrexStatelessDpQuit();
+ TrexStatelessRxQuit * rx_msg= new TrexStatelessRxQuit();
+ send_message_all_dp(dp_msg);
+ send_message_to_rx(rx_msg);
+ delete dp_msg;
+ // no need to delete rx_msg. Deleted by receiver
bool all_core_finished = false;
int i;
for (i=0; i<20; i++) {
@@ -2799,6 +2831,13 @@ int CGlobalTRex::send_message_all_dp(TrexStatelessCpToDpMsgBase *msg){
return (0);
}
+int CGlobalTRex::send_message_to_rx(TrexStatelessCpToRxMsgBase *msg) {
+ CNodeRing *ring = CMsgIns::Ins()->getCpRx()->getRingCpToDp(0);
+ ring->Enqueue((CGenNode *) msg);
+
+ return (0);
+}
+
int CGlobalTRex::ixgbe_rx_queue_flush(){
int i;
@@ -2810,7 +2849,7 @@ int CGlobalTRex::ixgbe_rx_queue_flush(){
}
-int CGlobalTRex::ixgbe_configure_mg(void){
+void CGlobalTRex::ixgbe_configure_mg(void) {
int i;
CLatencyManagerCfg mg_cfg;
mg_cfg.m_max_ports = m_max_ports;
@@ -2850,10 +2889,34 @@ int CGlobalTRex::ixgbe_configure_mg(void){
m_mg.Create(&mg_cfg);
m_mg.set_mask(CGlobalInfo::m_options.m_latency_mask);
-
- return (0);
}
+// init m_rx_sl object for stateless rx core
+void CGlobalTRex::rx_sl_configure(void) {
+ CRxSlCfg rx_sl_cfg;
+ int i;
+
+ rx_sl_cfg.m_max_ports = m_max_ports;
+
+ if ( get_vm_one_queue_enable() ) {
+ /* vm mode, indirect queues */
+ for (i=0; i < m_max_ports; i++) {
+ CMessagingManager * rx_dp = CMsgIns::Ins()->getRxDp();
+ uint8_t thread_id = (i >> 1);
+ CNodeRing * r = rx_dp->getRingCpToDp(thread_id);
+ m_latency_vm_vports[i].Create((uint8_t)i, r, &m_mg);
+ rx_sl_cfg.m_ports[i] = &m_latency_vm_vports[i];
+ }
+ } else {
+ for (i = 0; i < m_max_ports; i++) {
+ CPhyEthIF * _if = &m_ports[i];
+ m_latency_vports[i].Create(_if, m_latency_tx_queue_id, 1);
+ rx_sl_cfg.m_ports[i] = &m_latency_vports[i];
+ }
+ }
+
+ m_rx_sl.create(rx_sl_cfg);
+}
int CGlobalTRex::ixgbe_start(void){
int i;
@@ -2971,8 +3034,11 @@ int CGlobalTRex::ixgbe_start(void){
ixgbe_rx_queue_flush();
-
- ixgbe_configure_mg();
+ if (! get_is_stateless()) {
+ ixgbe_configure_mg();
+ } else {
+ rx_sl_configure();
+ }
/* core 0 - control
@@ -3361,6 +3427,9 @@ void CGlobalTRex::get_stats(CGlobalStats & stats){
stats.m_num_of_ports = m_max_ports;
stats.m_cpu_util = m_fl.GetCpuUtil();
+ if (get_is_stateless()) {
+ stats.m_rx_cpu_util = m_rx_sl.get_cpu_util();
+ }
stats.m_threads = m_fl.m_threads_info.size();
for (i=0; i<m_max_ports; i++) {
@@ -3724,7 +3793,7 @@ int CGlobalTRex::run_in_master() {
if (!is_all_cores_finished()) {
/* probably CLTR-C */
- try_stop_all_dp();
+ try_stop_all_cores();
}
m_mg.stop();
@@ -3739,16 +3808,17 @@ int CGlobalTRex::run_in_master() {
int CGlobalTRex::run_in_rx_core(void){
- if ( CGlobalInfo::m_options.is_rx_enabled() ){
- m_mg.start(0);
+ if (get_is_stateless()) {
+ m_rx_running = true;
+ m_rx_sl.start();
+ } else {
+ if ( CGlobalInfo::m_options.is_rx_enabled() ){
+ m_rx_running = true;
+ m_mg.start(0);
+ }
}
- // ??? start stateless rx
- return (0);
-}
-
-int CGlobalTRex::stop_core(virtual_thread_id_t virt_core_id){
- m_signal[virt_core_id]=1;
+ m_rx_running = false;
return (0);
}
@@ -3833,14 +3903,17 @@ int CGlobalTRex::stop_master(){
return (0);
}
-bool CGlobalTRex::is_all_cores_finished(){
+bool CGlobalTRex::is_all_cores_finished() {
int i;
for (i=0; i<get_cores_tx(); i++) {
if ( m_signal[i+1]==0){
- return (false);
+ return false;
}
}
- return (true);
+ if (m_rx_running)
+ return false;
+
+ return true;
}
@@ -3926,48 +3999,60 @@ int CGlobalTRex::start_master_statefull() {
////////////////////////////////////////////
-
static CGlobalTRex g_trex;
-// The HW counters start from some random values. The driver give us the diffs from previous,
-// each time we do get_rx_stats. We need to make one first call, at system startup,
-// and ignore the returned diffs
int CPhyEthIF::reset_hw_flow_stats() {
- uint32_t diff_stats[MAX_FLOW_STATS];
-
- if (get_ex_drv()->get_rx_stats(this, diff_stats, m_stats.m_fdir_prev_stats, 0, MAX_FLOW_STATS - 1) < 0) {
- return -1;
+ if (get_ex_drv()->hw_rx_stat_supported()) {
+ if (get_ex_drv()->reset_rx_stats(this, m_stats.m_fdir_prev_pkts) < 0) {
+ return -1;
+ }
+ } else {
+ g_trex.m_rx_sl.reset_rx_stats(get_port_id());
}
-
return 0;
}
// get/reset flow director counters
// return 0 if OK. -1 if operation not supported.
-// rx_stats, tx_stats - arrays of len max - min + 1. Returning rx, tx updated values.
+// rx_stats, tx_stats - arrays of len max - min + 1. Returning rx, tx updated absolute values.
// min, max - minimum, maximum counters range to get
// reset - If true, need to reset counter value after reading
-int CPhyEthIF::get_flow_stats(uint64_t *rx_stats, tx_per_flow_t *tx_stats, int min, int max, bool reset) {
- uint32_t diff_stats[MAX_FLOW_STATS];
-
- if (get_ex_drv()->get_rx_stats(this, diff_stats, m_stats.m_fdir_prev_stats, min, max) < 0) {
- return -1;
+int CPhyEthIF::get_flow_stats(rx_per_flow_t *rx_stats, tx_per_flow_t *tx_stats, int min, int max, bool reset) {
+ uint32_t diff_pkts[MAX_FLOW_STATS];
+ uint32_t diff_bytes[MAX_FLOW_STATS];
+ bool hw_rx_stat_supported = get_ex_drv()->hw_rx_stat_supported();
+
+ if (hw_rx_stat_supported) {
+ if (get_ex_drv()->get_rx_stats(this, diff_pkts, m_stats.m_fdir_prev_pkts
+ , diff_bytes, m_stats.m_fdir_prev_bytes, min, max) < 0) {
+ return -1;
+ }
+ } else {
+ g_trex.m_rx_sl.get_rx_stats(get_port_id(), rx_stats, min, max, reset);
}
for (int i = min; i <= max; i++) {
if ( reset ) {
// return value so far, and reset
- if (rx_stats != NULL) {
- rx_stats[i - min] = m_stats.m_rx_per_flow[i] + diff_stats[i];
+ if (hw_rx_stat_supported) {
+ if (rx_stats != NULL) {
+ rx_stats[i - min].set_pkts(m_stats.m_rx_per_flow_pkts[i] + diff_pkts[i]);
+ rx_stats[i - min].set_bytes(m_stats.m_rx_per_flow_bytes[i] + diff_bytes[i]);
+ }
+ m_stats.m_rx_per_flow_pkts[i] = 0;
+ m_stats.m_rx_per_flow_bytes[i] = 0;
}
if (tx_stats != NULL) {
tx_stats[i - min] = g_trex.clear_flow_tx_stats(m_port_id, i);
}
- m_stats.m_rx_per_flow[i] = 0;
} else {
- m_stats.m_rx_per_flow[i] += diff_stats[i];
- if (rx_stats != NULL) {
- rx_stats[i - min] = m_stats.m_rx_per_flow[i];
+ if (hw_rx_stat_supported) {
+ m_stats.m_rx_per_flow_pkts[i] += diff_pkts[i];
+ m_stats.m_rx_per_flow_bytes[i] += diff_bytes[i];
+ if (rx_stats != NULL) {
+ rx_stats[i - min].set_pkts(m_stats.m_rx_per_flow_pkts[i]);
+ rx_stats[i - min].set_bytes(m_stats.m_rx_per_flow_bytes[i]);
+ }
}
if (tx_stats != NULL) {
tx_stats[i - min] = g_trex.get_flow_tx_stats(m_port_id, i);
@@ -3978,6 +4063,8 @@ int CPhyEthIF::get_flow_stats(uint64_t *rx_stats, tx_per_flow_t *tx_stats, int m
return 0;
}
+// If needed, send packets to rx core for processing.
+// This is relevant only in VM case, where we receive packets to the working DP core (only 1 DP core in this case)
bool CCoreEthIF::process_rx_pkt(pkt_dir_t dir,
rte_mbuf_t * m){
@@ -3986,17 +4073,25 @@ bool CCoreEthIF::process_rx_pkt(pkt_dir_t dir,
return false;
}
bool send=false;
- CLatencyPktMode *c_l_pkt_mode = g_trex.m_mg.c_l_pkt_mode;
- bool is_lateancy_pkt = c_l_pkt_mode->IsLatencyPkt(parser.m_ipv4) & parser.IsLatencyPkt(parser.m_l4 + c_l_pkt_mode->l4_header_len());
- if (is_lateancy_pkt){
- send=true;
- }else{
- if ( get_is_rx_filter_enable() ){
- uint8_t max_ttl = 0xff - get_rx_check_hops();
- uint8_t pkt_ttl = parser.getTTl();
- if ( (pkt_ttl==max_ttl) || (pkt_ttl==(max_ttl-1) ) ) {
- send=true;
+ if ( get_is_stateless() ) {
+ // In stateless RX, we only care about flow stat packets
+ if ((parser.getIpId() & 0xff00) == IP_ID_RESERVE_BASE) {
+ send = true;
+ }
+ } else {
+ CLatencyPktMode *c_l_pkt_mode = g_trex.m_mg.c_l_pkt_mode;
+ bool is_lateancy_pkt = c_l_pkt_mode->IsLatencyPkt(parser.m_ipv4) & parser.IsLatencyPkt(parser.m_l4 + c_l_pkt_mode->l4_header_len());
+
+ if (is_lateancy_pkt) {
+ send = true;
+ } else {
+ if ( get_is_rx_filter_enable() ) {
+ uint8_t max_ttl = 0xff - get_rx_check_hops();
+ uint8_t pkt_ttl = parser.getTTl();
+ if ( (pkt_ttl==max_ttl) || (pkt_ttl==(max_ttl-1) ) ) {
+ send=true;
+ }
}
}
}
@@ -4036,7 +4131,6 @@ static int latency_one_lcore(__attribute__((unused)) void *dummy)
CPlatformSocketInfo * lpsock=&CGlobalInfo::m_socket;
physical_thread_id_t phy_id =rte_lcore_id();
-
if ( lpsock->thread_phy_is_rx(phy_id) ) {
g_trex.run_in_rx_core();
}else{
@@ -4060,7 +4154,6 @@ static int slave_one_lcore(__attribute__((unused)) void *dummy)
CPlatformSocketInfo * lpsock=&CGlobalInfo::m_socket;
physical_thread_id_t phy_id =rte_lcore_id();
-
if ( lpsock->thread_phy_is_rx(phy_id) ) {
g_trex.run_in_rx_core();
}else{
@@ -4387,7 +4480,7 @@ int main_test(int argc , char * argv[]){
&& (CGlobalInfo::m_options.m_latency_prev > 0)) {
uint32_t pkts = CGlobalInfo::m_options.m_latency_prev *
CGlobalInfo::m_options.m_latency_rate;
- printf("Start prev latency check- for %d sec \n",CGlobalInfo::m_options.m_latency_prev);
+ printf("Starting pre latency check for %d sec\n",CGlobalInfo::m_options.m_latency_prev);
g_trex.m_mg.start(pkts);
delay(CGlobalInfo::m_options.m_latency_prev* 1000);
printf("Finished \n");
@@ -4395,6 +4488,7 @@ int main_test(int argc , char * argv[]){
g_trex.reset_counters();
}
+ g_trex.m_rx_running = false;
if ( get_is_stateless() ) {
g_trex.start_master_stateless();
@@ -4448,6 +4542,12 @@ int CTRexExtendedDriverBase::configure_drop_queue(CPhyEthIF * _if) {
return (rte_eth_dev_rx_queue_stop(port_id, 0));
}
+CFlowStatParser *CTRexExtendedDriverBase::get_flow_stat_parser() {
+ CFlowStatParser *parser = new CFlowStatParser();
+ assert (parser);
+ return parser;
+}
+
void wait_x_sec(int sec) {
int i;
printf(" wait %d sec ", sec);
@@ -4610,7 +4710,7 @@ int CTRexExtendedDriverBase1G::configure_rx_filter_rules_stateless(CPhyEthIF * _
}
rule_id = 0;
- // filter for byte 18 of packet (lsb of IP ID) should equal ff
+ // filter for byte 18 of packet (msb of IP ID) should equal ff
_if->pci_reg_write( (E1000_FHFT(rule_id)+(2*16)) , 0x00ff0000);
_if->pci_reg_write( (E1000_FHFT(rule_id)+(2*16) + 8) , 0x04); /* MASK */
// + bytes 12 + 13 (ether type) should indicate IP.
@@ -4682,7 +4782,13 @@ void CTRexExtendedDriverBase1G::get_extended_stats(CPhyEthIF * _if,CPhyEthIFStat
void CTRexExtendedDriverBase1G::clear_extended_stats(CPhyEthIF * _if){
}
-
+#if 0
+int CTRexExtendedDriverBase1G::get_rx_stats(CPhyEthIF * _if, uint32_t *pkts, uint32_t *prev_pkts
+ ,uint32_t *bytes, uint32_t *prev_bytes, int min, int max) {
+ uint32_t port_id = _if->get_port_id();
+ return g_trex.m_rx_sl.get_rx_stats(port_id, pkts, prev_pkts, bytes, prev_bytes, min, max);
+}
+#endif
void CTRexExtendedDriverBase10G::clear_extended_stats(CPhyEthIF * _if){
_if->pci_reg_read(IXGBE_RXNFGPC);
@@ -4698,7 +4804,43 @@ void CTRexExtendedDriverBase10G::update_configuration(port_cfg_t * cfg){
cfg->m_tx_conf.tx_thresh.wthresh = TX_WTHRESH;
}
-int CTRexExtendedDriverBase10G::configure_rx_filter_rules(CPhyEthIF * _if){
+int CTRexExtendedDriverBase10G::configure_rx_filter_rules(CPhyEthIF * _if) {
+ if ( get_is_stateless() ) {
+ return configure_rx_filter_rules_stateless(_if);
+ } else {
+ return configure_rx_filter_rules_statefull(_if);
+ }
+
+ return 0;
+}
+
+int CTRexExtendedDriverBase10G::configure_rx_filter_rules_stateless(CPhyEthIF * _if) {
+ uint8_t port_id = _if->get_rte_port_id();
+ int ip_id_lsb;
+
+ for (ip_id_lsb = 0; ip_id_lsb < MAX_FLOW_STATS; ip_id_lsb++ ) {
+ struct rte_eth_fdir_filter fdir_filter;
+ int res = 0;
+
+ memset(&fdir_filter,0,sizeof(fdir_filter));
+ fdir_filter.input.flow_type = RTE_ETH_FLOW_NONFRAG_IPV4_OTHER;
+ fdir_filter.soft_id = ip_id_lsb; // We can use the ip_id_lsb also as filter soft_id
+ fdir_filter.input.flow_ext.flexbytes[0] = 0xff;
+ fdir_filter.input.flow_ext.flexbytes[1] = ip_id_lsb;
+ fdir_filter.action.rx_queue = 1;
+ fdir_filter.action.behavior = RTE_ETH_FDIR_ACCEPT;
+ fdir_filter.action.report_status = RTE_ETH_FDIR_NO_REPORT_STATUS;
+ res = rte_eth_dev_filter_ctrl(port_id, RTE_ETH_FILTER_FDIR, RTE_ETH_FILTER_ADD, &fdir_filter);
+
+ if (res != 0) {
+ rte_exit(EXIT_FAILURE, " ERROR rte_eth_dev_filter_ctrl : %d\n",res);
+ }
+ }
+
+ return 0;
+}
+
+int CTRexExtendedDriverBase10G::configure_rx_filter_rules_statefull(CPhyEthIF * _if) {
uint8_t port_id=_if->get_rte_port_id();
uint16_t hops = get_rx_check_hops();
uint16_t v4_hops = (hops << 8)&0xff00;
@@ -4809,6 +4951,12 @@ int CTRexExtendedDriverBase10G::wait_for_stable_link(){
return (0);
}
+CFlowStatParser *CTRexExtendedDriverBase10G::get_flow_stat_parser() {
+ CFlowStatParser *parser = new C82599Parser();
+ assert (parser);
+ return parser;
+}
+
////////////////////////////////////////////////////////////////////////////////
void CTRexExtendedDriverBase40G::clear_extended_stats(CPhyEthIF * _if){
rte_eth_stats_reset(_if->get_port_id());
@@ -4939,13 +5087,24 @@ int CTRexExtendedDriverBase40G::configure_rx_filter_rules(CPhyEthIF * _if) {
}
}
+int CTRexExtendedDriverBase40G::reset_rx_stats(CPhyEthIF * _if, uint32_t *stats) {
+ uint32_t diff_stats[MAX_FLOW_STATS];
+
+ // The HW counters start from some random values. The driver give us the diffs from previous,
+ // each time we do get_rx_stats. We need to make one first call, at system startup,
+ // and ignore the returned diffs
+ return get_rx_stats(_if, diff_stats, stats, NULL, NULL, 0, MAX_FLOW_STATS - 1);
+}
+
// instead of adding this to rte_ethdev.h
extern "C" int rte_eth_fdir_stats_get(uint8_t port_id, uint32_t *stats, uint32_t start, uint32_t len);
// get rx stats on _if, between min and max
-// prev_stats should be the previous values read from the hardware.
+// prev_pkts should be the previous values read from the hardware.
// Getting changed to be equal to current HW values.
-// stats return the diff between prev_stats and current hw values
-int CTRexExtendedDriverBase40G::get_rx_stats(CPhyEthIF * _if, uint32_t *stats, uint32_t *prev_stats, int min, int max) {
+// pkts return the diff between prev_pkts and current hw values
+// bytes and prev_bytes are not used. X710 fdir filters do not support byte count.
+int CTRexExtendedDriverBase40G::get_rx_stats(CPhyEthIF * _if, uint32_t *pkts, uint32_t *prev_pkts
+ ,uint32_t *bytes, uint32_t *prev_bytes, int min, int max) {
uint32_t hw_stats[MAX_FLOW_STATS];
uint32_t port_id = _if->get_port_id();
uint32_t start = (port_id % m_if_per_card) * MAX_FLOW_STATS + min;
@@ -4954,13 +5113,13 @@ int CTRexExtendedDriverBase40G::get_rx_stats(CPhyEthIF * _if, uint32_t *stats, u
rte_eth_fdir_stats_get(port_id, hw_stats, start, len);
for (int i = loop_start; i < loop_start + len; i++) {
- if (hw_stats[i - min] >= prev_stats[i]) {
- stats[i] = (uint64_t)(hw_stats[i - min] - prev_stats[i]);
+ if (hw_stats[i - min] >= prev_pkts[i]) {
+ pkts[i] = (uint64_t)(hw_stats[i - min] - prev_pkts[i]);
} else {
// Wrap around
- stats[i] = (uint64_t)((hw_stats[i - min] + ((uint64_t)1 << 32)) - prev_stats[i]);
+ pkts[i] = (uint64_t)((hw_stats[i - min] + ((uint64_t)1 << 32)) - prev_pkts[i]);
}
- prev_stats[i] = hw_stats[i - min];
+ prev_pkts[i] = hw_stats[i - min];
}
return 0;
@@ -5025,6 +5184,12 @@ int CTRexExtendedDriverBase40G::wait_for_stable_link(){
return (0);
}
+CFlowStatParser *CTRexExtendedDriverBase40G::get_flow_stat_parser() {
+ CFlowStatParser *parser = new CFlowStatParser();
+ assert (parser);
+ return parser;
+}
+
/////////////////////////////////////////////////////////////////////
@@ -5144,6 +5309,9 @@ TrexDpdkPlatformApi::get_global_stats(TrexPlatformGlobalStats &stats) const {
g_trex.get_stats(trex_stats);
stats.m_stats.m_cpu_util = trex_stats.m_cpu_util;
+ if (get_is_stateless()) {
+ stats.m_stats.m_rx_cpu_util = trex_stats.m_rx_cpu_util;
+ }
stats.m_stats.m_tx_bps = trex_stats.m_tx_bps;
stats.m_stats.m_tx_pps = trex_stats.m_tx_pps;
@@ -5197,12 +5365,6 @@ TrexDpdkPlatformApi::get_interface_info(uint8_t interface_id, intf_info_st &info
/* hardware */
g_trex.m_ports[interface_id].macaddr_get(&rte_mac_addr);
assert(ETHER_ADDR_LEN == 6);
- printf("interface %d speed: %d mac:", interface_id, info.speed);
- for (int i = 0; i < 6; i++) {
- info.mac_info.hw_macaddr[i] = rte_mac_addr.addr_bytes[i];
- printf("%x:", rte_mac_addr.addr_bytes[i]);
- }
- printf("\n");
/* software */
uint8_t sw_macaddr[12];
@@ -5235,8 +5397,8 @@ TrexDpdkPlatformApi::get_interface_stat_info(uint8_t interface_id, uint16_t &num
capabilities = CTRexExtendedDriverDb::Ins()->get_drv()->get_rx_stat_capabilities();
}
-int TrexDpdkPlatformApi::get_flow_stats(uint8 port_id, uint64_t *rx_stats, void *tx_stats, int min, int max, bool reset) const {
- return g_trex.m_ports[port_id].get_flow_stats(rx_stats, (tx_per_flow_t *)tx_stats, min, max, reset);
+int TrexDpdkPlatformApi::get_flow_stats(uint8 port_id, void *rx_stats, void *tx_stats, int min, int max, bool reset) const {
+ return g_trex.m_ports[port_id].get_flow_stats((rx_per_flow_t *)rx_stats, (tx_per_flow_t *)tx_stats, min, max, reset);
}
int TrexDpdkPlatformApi::reset_hw_flow_stats(uint8_t port_id) const {
@@ -5268,3 +5430,8 @@ void TrexDpdkPlatformApi::flush_dp_messages() const {
int TrexDpdkPlatformApi::get_active_pgids(flow_stat_active_t &result) const {
return g_trex.m_trex_stateless->m_rx_flow_stat.get_active_pgids(result);
}
+
+CFlowStatParser *TrexDpdkPlatformApi::get_flow_stat_parser() const {
+ return CTRexExtendedDriverDb::Ins()->get_drv()
+ ->get_flow_stat_parser();
+}
diff --git a/src/main_dpdk.h b/src/main_dpdk.h
index a475d321..ff1ea784 100644
--- a/src/main_dpdk.h
+++ b/src/main_dpdk.h
@@ -38,9 +38,11 @@ class CPhyEthIFStats {
uint64_t oerrors; /**< Total number of failed transmitted packets. */
uint64_t imcasts; /**< Total number of multicast received packets. */
uint64_t rx_nombuf; /**< Total number of RX mbuf allocation failures. */
- uint64_t m_rx_per_flow [MAX_FLOW_STATS]; // Per flow RX statistics
- // Previous fdir stats values read from HW. Since on xl710 this is 32 bit, we save old value, to handle wrap around.
- uint32_t m_fdir_prev_stats [MAX_FLOW_STATS];
+ uint64_t m_rx_per_flow_pkts [MAX_FLOW_STATS]; // Per flow RX pkts
+ uint64_t m_rx_per_flow_bytes[MAX_FLOW_STATS]; // Per flow RX bytes
+ // Previous fdir stats values read from driver. Since on xl710 this is 32 bit, we save old value, to handle wrap around.
+ uint32_t m_fdir_prev_pkts [MAX_FLOW_STATS];
+ uint32_t m_fdir_prev_bytes [MAX_FLOW_STATS];
public:
void Clear();
void Dump(FILE *fd);
@@ -73,7 +75,7 @@ class CPhyEthIF {
void get_stats(CPhyEthIFStats *stats);
int dump_fdir_global_stats(FILE *fd);
int reset_hw_flow_stats();
- int get_flow_stats(uint64_t *rx_stats, tx_per_flow_t *tx_stats, int min, int max, bool reset);
+ int get_flow_stats(rx_per_flow_t *rx_stats, tx_per_flow_t *tx_stats, int min, int max, bool reset);
void get_stats_1g(CPhyEthIFStats *stats);
void rx_queue_setup(uint16_t rx_queue_id,
uint16_t nb_rx_desc,
diff --git a/src/msg_manager.cpp b/src/msg_manager.cpp
index 9ade1bfc..7e39391a 100755
--- a/src/msg_manager.cpp
+++ b/src/msg_manager.cpp
@@ -4,7 +4,7 @@
*/
/*
-Copyright (c) 2015-2015 Cisco Systems, Inc.
+Copyright (c) 2015-2016 Cisco Systems, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -65,12 +65,12 @@ void CMessagingManager::Delete(){
delete [] m_dp_to_cp;
m_dp_to_cp = NULL;
}
-
+
if (m_cp_to_dp) {
delete [] m_cp_to_dp;
m_cp_to_dp = NULL;
}
-
+
}
CNodeRing * CMessagingManager::getRingCpToDp(uint8_t thread_id){
@@ -84,7 +84,6 @@ CNodeRing * CMessagingManager::getRingDpToCp(uint8_t thread_id){
}
-
void CMsgIns::Free(){
if (m_ins) {
m_ins->Delete();
@@ -107,6 +106,11 @@ bool CMsgIns::Create(uint8_t num_threads){
if (!res) {
return (res);
}
+ res = m_cp_rx.Create(1, "cp_rx");
+ if (!res) {
+ return (res);
+ }
+
return (m_rx_dp.Create(num_threads,"rx_dp"));
}
@@ -114,9 +118,8 @@ bool CMsgIns::Create(uint8_t num_threads){
void CMsgIns::Delete(){
m_cp_dp.Delete();
m_rx_dp.Delete();
+ m_cp_rx.Delete();
}
-CMsgIns * CMsgIns::m_ins=0;
-
-
+CMsgIns * CMsgIns::m_ins=0;
diff --git a/src/msg_manager.h b/src/msg_manager.h
index 0390ce10..de11edbd 100755
--- a/src/msg_manager.h
+++ b/src/msg_manager.h
@@ -6,7 +6,7 @@
*/
/*
-Copyright (c) 2015-2015 Cisco Systems, Inc.
+Copyright (c) 2015-2016 Cisco Systems, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -40,37 +40,37 @@ public:
/*
-e.g DP with 4 threads
-will look like this
+e.g DP with 4 threads
+will look like this
- cp_to_dp
+ cp_to_dp
master :push
dpx : pop
-
+
- --> dp0
cp - --> dp1
- --> dp2
- --> dp3
- dp_to_cp
+ dp_to_cp
cp : pop
dpx : push
-
+
<- -- dp0
cp <- -- dp1
<- -- dp2
<- -- dp3
-*/
+*/
class CGenNode ;
typedef CTRingSp<CGenNode> CNodeRing;
-/* CP == latency thread
+/* CP == latency thread
DP == traffic pkt generator */
class CMessagingManager {
public:
@@ -83,6 +83,7 @@ public:
void Delete();
CNodeRing * getRingCpToDp(uint8_t thread_id);
CNodeRing * getRingDpToCp(uint8_t thread_id);
+ CNodeRing * getRingCpToRx();
uint8_t get_num_threads(){
return (m_num_dp_threads);
}
@@ -106,6 +107,9 @@ public:
CMessagingManager * getCpDp(){
return (&m_cp_dp);
}
+ CMessagingManager * getCpRx(){
+ return (&m_cp_rx);
+ }
uint8_t get_num_threads(){
return (m_rx_dp.get_num_threads());
@@ -114,11 +118,11 @@ public:
private:
CMessagingManager m_rx_dp;
CMessagingManager m_cp_dp;
-
+ CMessagingManager m_cp_rx;
private:
/* one instance */
- static CMsgIns * m_ins;
+ static CMsgIns * m_ins;
};
#endif
diff --git a/src/stateless/cp/trex_stateless.cpp b/src/stateless/cp/trex_stateless.cpp
index 9e24802b..9df57a50 100644
--- a/src/stateless/cp/trex_stateless.cpp
+++ b/src/stateless/cp/trex_stateless.cpp
@@ -132,6 +132,7 @@ TrexStateless::encode_stats(Json::Value &global) {
api->get_global_stats(stats);
global["cpu_util"] = stats.m_stats.m_cpu_util;
+ global["rx_cpu_util"] = stats.m_stats.m_rx_cpu_util;
global["tx_bps"] = stats.m_stats.m_tx_bps;
global["rx_bps"] = stats.m_stats.m_rx_bps;
diff --git a/src/stateless/cp/trex_stateless_port.cpp b/src/stateless/cp/trex_stateless_port.cpp
index 5947aaf7..90589d7a 100644
--- a/src/stateless/cp/trex_stateless_port.cpp
+++ b/src/stateless/cp/trex_stateless_port.cpp
@@ -473,6 +473,13 @@ TrexStatelessPort::send_message_to_dp(uint8_t core_id, TrexStatelessCpToDpMsgBas
ring->Enqueue((CGenNode *)msg);
}
+void
+TrexStatelessPort::send_message_to_rx(TrexStatelessCpToRxMsgBase *msg) {
+
+ /* send the message to the core */
+ CNodeRing *ring = CMsgIns::Ins()->getCpRx()->getRingCpToDp(0);
+ ring->Enqueue((CGenNode *)msg);
+}
uint64_t
TrexStatelessPort::get_port_speed_bps() const {
diff --git a/src/stateless/cp/trex_stateless_port.h b/src/stateless/cp/trex_stateless_port.h
index d3c4dcb9..7e1838d4 100644
--- a/src/stateless/cp/trex_stateless_port.h
+++ b/src/stateless/cp/trex_stateless_port.h
@@ -4,7 +4,7 @@
*/
/*
-Copyright (c) 2015-2015 Cisco Systems, Inc.
+Copyright (c) 2015-2016 Cisco Systems, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -21,20 +21,21 @@ limitations under the License.
#ifndef __TREX_STATELESS_PORT_H__
#define __TREX_STATELESS_PORT_H__
-#include <trex_stream.h>
-#include <trex_dp_port_events.h>
-#include <internal_api/trex_platform_api.h>
+#include "internal_api/trex_platform_api.h"
+#include "trex_dp_port_events.h"
+#include "trex_stream.h"
class TrexStatelessCpToDpMsgBase;
+class TrexStatelessCpToRxMsgBase;
class TrexStreamsGraphObj;
class TrexPortMultiplier;
-/**
+/**
* TRex port owner can perform
* write commands
* while port is owned - others can
* do read only commands
- *
+ *
*/
class TrexPortOwner {
public:
@@ -92,7 +93,7 @@ private:
/* handler genereated internally */
std::string m_handler;
-
+
/* seed for generating random values */
unsigned int m_seed;
@@ -106,7 +107,7 @@ class AsyncStopEvent;
/**
* describes a stateless port
- *
+ *
* @author imarom (31-Aug-15)
*/
class TrexStatelessPort {
@@ -137,9 +138,9 @@ public:
RC_ERR_FAILED_TO_COMPILE_STREAMS
};
-
+
TrexStatelessPort(uint8_t port_id, const TrexPlatformApi *api);
-
+
~TrexStatelessPort();
/**
@@ -155,11 +156,11 @@ public:
void release(void);
/**
- * validate the state of the port before start
- * it will return a stream graph
- * containing information about the streams
- * configured on this port
- *
+ * validate the state of the port before start
+ * it will return a stream graph
+ * containing information about the streams
+ * configured on this port
+ *
* on error it throws TrexException
*/
const TrexStreamsGraphObj *validate(void);
@@ -190,13 +191,13 @@ public:
/**
* update current traffic on port
- *
+ *
*/
void update_traffic(const TrexPortMultiplier &mul, bool force);
/**
* get the port state
- *
+ *
*/
port_state_e get_state() const {
return m_port_state;
@@ -204,23 +205,23 @@ public:
/**
* port state as string
- *
+ *
*/
std::string get_state_as_string() const;
/**
* the the max stream id currently assigned
- *
+ *
*/
int get_max_stream_id() const;
/**
* fill up properties of the port
- *
+ *
* @author imarom (16-Sep-15)
- *
- * @param driver
- * @param speed
+ *
+ * @param driver
+ * @param speed
*/
void get_properties(std::string &driver, TrexPlatformApi::driver_speed_e &speed);
@@ -237,7 +238,7 @@ public:
/**
* delegators
- *
+ *
*/
void add_stream(TrexStream *stream);
@@ -267,7 +268,7 @@ public:
/**
* returns the number of DP cores linked to this port
- *
+ *
*/
uint8_t get_dp_core_count() {
return m_cores_id_list.size();
@@ -275,7 +276,7 @@ public:
/**
* returns the traffic multiplier currently being used by the DP
- *
+ *
*/
double get_multiplier() {
return (m_factor);
@@ -283,13 +284,13 @@ public:
/**
* get port speed in bits per second
- *
+ *
*/
uint64_t get_port_speed_bps() const;
/**
* return RX caps
- *
+ *
*/
int get_rx_caps() const {
return m_rx_caps;
@@ -300,12 +301,12 @@ public:
}
/**
- * return true if port adds CRC to a packet (not occurs for
- * VNICs)
- *
+ * return true if port adds CRC to a packet (not occurs for
+ * VNICs)
+ *
* @author imarom (24-Feb-16)
- *
- * @return bool
+ *
+ * @return bool
*/
bool has_crc_added() const {
return m_api_info.has_crc;
@@ -318,9 +319,9 @@ public:
/**
* get the port effective rate (on a started / paused port)
- *
+ *
* @author imarom (07-Jan-16)
- *
+ *
*/
void get_port_effective_rate(double &pps,
double &bps_L1,
@@ -330,8 +331,8 @@ public:
/**
* set port promiscuous on/off
- *
- * @param enabled
+ *
+ * @param enabled
*/
void set_promiscuous(bool enabled);
bool get_promiscuous();
@@ -357,40 +358,45 @@ private:
/**
* send message to all cores using duplicate
- *
+ *
*/
void send_message_to_all_dp(TrexStatelessCpToDpMsgBase *msg);
/**
* send message to specific DP core
- *
+ *
*/
void send_message_to_dp(uint8_t core_id, TrexStatelessCpToDpMsgBase *msg);
+ /**
+ * send message to specific RX core
+ *
+ */
+ void send_message_to_rx(TrexStatelessCpToRxMsgBase *msg);
/**
* when a port stops, perform various actions
- *
+ *
*/
void common_port_stop_actions(bool async);
/**
* calculate effective M per core
- *
+ *
*/
double calculate_effective_factor(const TrexPortMultiplier &mul, bool force = false);
double calculate_effective_factor_internal(const TrexPortMultiplier &mul);
-
+
/**
* generates a graph of streams graph
- *
+ *
*/
void generate_streams_graph();
/**
* dispose of it
- *
+ *
* @author imarom (26-Nov-15)
*/
void delete_streams_graph();
@@ -426,7 +432,7 @@ private:
/**
* port multiplier object
- *
+ *
*/
class TrexPortMultiplier {
public:
@@ -443,8 +449,8 @@ public:
};
/**
- * multiplier can be absolute value
- * increment value or subtract value
+ * multiplier can be absolute value
+ * increment value or subtract value
*/
enum mul_op_e {
OP_ABS,
diff --git a/src/stateless/cp/trex_streams_compiler.cpp b/src/stateless/cp/trex_streams_compiler.cpp
index be5002da..563236c2 100644
--- a/src/stateless/cp/trex_streams_compiler.cpp
+++ b/src/stateless/cp/trex_streams_compiler.cpp
@@ -477,7 +477,8 @@ TrexStreamsCompiler::compile_stream(TrexStream *stream,
TrexStream *fixed_rx_flow_stat_stream = stream->clone(true);
- get_stateless_obj()->m_rx_flow_stat.start_stream(fixed_rx_flow_stat_stream, fixed_rx_flow_stat_stream->m_rx_check.m_hw_id); //???? check for errors
+ // not checking for errors. We assume that if add_stream succeeded, start_stream will too.
+ get_stateless_obj()->m_rx_flow_stat.start_stream(fixed_rx_flow_stat_stream, fixed_rx_flow_stat_stream->m_rx_check.m_hw_id);
/* can this stream be split to many cores ? */
if (!stream->is_splitable(dp_core_count)) {
diff --git a/src/stateless/dp/trex_stateless_dp_core.cpp b/src/stateless/dp/trex_stateless_dp_core.cpp
index f8d6d828..ba25f61d 100644
--- a/src/stateless/dp/trex_stateless_dp_core.cpp
+++ b/src/stateless/dp/trex_stateless_dp_core.cpp
@@ -5,7 +5,7 @@
*/
/*
-Copyright (c) 2015-2015 Cisco Systems, Inc.
+Copyright (c) 2015-2016 Cisco Systems, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -19,14 +19,12 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
-#include <trex_stateless_dp_core.h>
-#include <trex_stateless_messaging.h>
-#include <trex_streams_compiler.h>
-#include <trex_stream_node.h>
-#include <trex_stream.h>
-
-#include <bp_sim.h>
-
+#include "bp_sim.h"
+#include "trex_stateless_dp_core.h"
+#include "trex_stateless_messaging.h"
+#include "trex_stream.h"
+#include "trex_stream_node.h"
+#include "trex_streams_compiler.h"
void CDpOneStream::Delete(CFlowGenListPerThread * core){
assert(m_node->get_state() == CGenNodeStateless::ss_INACTIVE);
diff --git a/src/stateless/messaging/trex_stateless_messaging.cpp b/src/stateless/messaging/trex_stateless_messaging.cpp
index 333aec88..7edf0f13 100644
--- a/src/stateless/messaging/trex_stateless_messaging.cpp
+++ b/src/stateless/messaging/trex_stateless_messaging.cpp
@@ -5,7 +5,7 @@
*/
/*
-Copyright (c) 2015-2015 Cisco Systems, Inc.
+Copyright (c) 2015-2016 Cisco Systems, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -19,17 +19,18 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
-#include <trex_stateless_messaging.h>
-#include <trex_stateless_dp_core.h>
-#include <trex_streams_compiler.h>
-#include <trex_stateless.h>
-#include <bp_sim.h>
-
#include <string.h>
+#include "trex_stateless_messaging.h"
+#include "trex_stateless_dp_core.h"
+#include "trex_stateless_rx_core.h"
+#include "trex_streams_compiler.h"
+#include "trex_stateless.h"
+#include "bp_sim.h"
+
/*************************
start traffic message
- ************************/
+ ************************/
TrexStatelessDpStart::TrexStatelessDpStart(uint8_t port_id, int event_id, TrexStreamsCompiledObj *obj, double duration) {
m_port_id = port_id;
m_event_id = event_id;
@@ -40,7 +41,7 @@ TrexStatelessDpStart::TrexStatelessDpStart(uint8_t port_id, int event_id, TrexSt
/**
* clone for DP start message
- *
+ *
*/
TrexStatelessCpToDpMsgBase *
TrexStatelessDpStart::clone() {
@@ -69,7 +70,7 @@ TrexStatelessDpStart::handle(TrexStatelessDpCore *dp_core) {
/*************************
stop traffic message
- ************************/
+ ************************/
bool
TrexStatelessDpStop::handle(TrexStatelessDpCore *dp_core) {
@@ -114,7 +115,7 @@ bool TrexStatelessDpResume::handle(TrexStatelessDpCore *dp_core){
/**
* clone for DP stop message
- *
+ *
*/
TrexStatelessCpToDpMsgBase *
TrexStatelessDpStop::clone() {
@@ -130,7 +131,7 @@ TrexStatelessDpStop::clone() {
-TrexStatelessCpToDpMsgBase *
+TrexStatelessCpToDpMsgBase *
TrexStatelessDpQuit::clone(){
TrexStatelessCpToDpMsgBase *new_msg = new TrexStatelessDpQuit();
@@ -140,7 +141,7 @@ TrexStatelessDpQuit::clone(){
bool TrexStatelessDpQuit::handle(TrexStatelessDpCore *dp_core){
-
+
/* quit */
dp_core->quit_main_loop();
return (true);
@@ -155,7 +156,7 @@ bool TrexStatelessDpCanQuit::handle(TrexStatelessDpCore *dp_core){
return (true);
}
-TrexStatelessCpToDpMsgBase *
+TrexStatelessCpToDpMsgBase *
TrexStatelessDpCanQuit::clone(){
TrexStatelessCpToDpMsgBase *new_msg = new TrexStatelessDpCanQuit();
@@ -165,7 +166,7 @@ TrexStatelessDpCanQuit::clone(){
/*************************
update traffic message
- ************************/
+ ************************/
bool
TrexStatelessDpUpdate::handle(TrexStatelessDpCore *dp_core) {
dp_core->update_traffic(m_port_id, m_factor);
@@ -207,3 +208,18 @@ TrexDpPortEventMsg::handle() {
return (true);
}
+/************************* messages from CP to RX **********************/
+bool TrexStatelessRxStartMsg::handle (CRxCoreStateless *rx_core) {
+ rx_core->work();
+ return true;
+}
+
+bool TrexStatelessRxStopMsg::handle (CRxCoreStateless *rx_core) {
+ rx_core->idle();
+ return true;
+}
+
+bool TrexStatelessRxQuit::handle (CRxCoreStateless *rx_core) {
+ rx_core->quit();
+ return true;
+}
diff --git a/src/stateless/messaging/trex_stateless_messaging.h b/src/stateless/messaging/trex_stateless_messaging.h
index dda086b7..0eed01bd 100644
--- a/src/stateless/messaging/trex_stateless_messaging.h
+++ b/src/stateless/messaging/trex_stateless_messaging.h
@@ -5,7 +5,7 @@
*/
/*
-Copyright (c) 2015-2015 Cisco Systems, Inc.
+Copyright (c) 2015-2016 Cisco Systems, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -22,16 +22,17 @@ limitations under the License.
#ifndef __TREX_STATELESS_MESSAGING_H__
#define __TREX_STATELESS_MESSAGING_H__
-#include <msg_manager.h>
-#include <trex_dp_port_events.h>
+#include "msg_manager.h"
+#include "trex_dp_port_events.h"
class TrexStatelessDpCore;
+class CRxCoreStateless;
class TrexStreamsCompiledObj;
class CFlowGenListPerThread;
/**
* defines the base class for CP to DP messages
- *
+ *
* @author imarom (27-Oct-15)
*/
class TrexStatelessCpToDpMsgBase {
@@ -49,7 +50,7 @@ public:
/**
* clone the current message
- *
+ *
*/
virtual TrexStatelessCpToDpMsgBase * clone() = 0;
@@ -76,7 +77,7 @@ protected:
/**
* a message to start traffic
- *
+ *
* @author imarom (27-Oct-15)
*/
class TrexStatelessDpStart : public TrexStatelessCpToDpMsgBase {
@@ -137,7 +138,7 @@ private:
/**
* a message to stop traffic
- *
+ *
* @author imarom (27-Oct-15)
*/
class TrexStatelessDpStop : public TrexStatelessCpToDpMsgBase {
@@ -191,9 +192,9 @@ private:
};
/**
- * a message to Quit the datapath traffic. support only stateless for now
- *
- * @author hhaim
+ * a message to Quit the datapath traffic. support only stateless for now
+ *
+ * @author hhaim
*/
class TrexStatelessDpQuit : public TrexStatelessCpToDpMsgBase {
public:
@@ -209,9 +210,9 @@ public:
};
/**
- * a message to check if both port are idel and exit
- *
- * @author hhaim
+ * a message to check if both port are idel and exit
+ *
+ * @author hhaim
*/
class TrexStatelessDpCanQuit : public TrexStatelessCpToDpMsgBase {
public:
@@ -247,7 +248,7 @@ private:
/**
* barrier message for DP core
- *
+ *
*/
class TrexStatelessDpBarrier : public TrexStatelessCpToDpMsgBase {
public:
@@ -270,7 +271,7 @@ private:
/**
* defines the base class for CP to DP messages
- *
+ *
* @author imarom (27-Oct-15)
*/
class TrexStatelessDpToCpMsgBase {
@@ -284,7 +285,7 @@ public:
/**
* virtual function to handle a message
- *
+ *
*/
virtual bool handle() = 0;
@@ -295,9 +296,9 @@ public:
/**
- * a message indicating an event has happened on a port at the
- * DP
- *
+ * a message indicating an event has happened on a port at the
+ * DP
+ *
*/
class TrexDpPortEventMsg : public TrexStatelessDpToCpMsgBase {
public:
@@ -326,8 +327,45 @@ private:
int m_thread_id;
uint8_t m_port_id;
int m_event_id;
-
+
};
-#endif /* __TREX_STATELESS_MESSAGING_H__ */
+/************************* messages from CP to RX **********************/
+/**
+ * defines the base class for CP to RX messages
+ *
+ */
+class TrexStatelessCpToRxMsgBase {
+public:
+
+ TrexStatelessCpToRxMsgBase() {
+ }
+
+ virtual ~TrexStatelessCpToRxMsgBase() {
+ }
+
+ /**
+ * virtual function to handle a message
+ *
+ */
+ virtual bool handle (CRxCoreStateless *rx_core) = 0;
+
+ /* no copy constructor */
+ TrexStatelessCpToRxMsgBase(TrexStatelessCpToRxMsgBase &) = delete;
+
+};
+
+class TrexStatelessRxStartMsg : public TrexStatelessCpToRxMsgBase {
+ bool handle (CRxCoreStateless *rx_core);
+};
+
+class TrexStatelessRxStopMsg : public TrexStatelessCpToRxMsgBase {
+ bool handle (CRxCoreStateless *rx_core);
+};
+
+class TrexStatelessRxQuit : public TrexStatelessCpToRxMsgBase {
+ bool handle (CRxCoreStateless *rx_core);
+};
+
+#endif /* __TREX_STATELESS_MESSAGING_H__ */
diff --git a/src/stateless/rx/trex_stateless_rx_core.cpp b/src/stateless/rx/trex_stateless_rx_core.cpp
new file mode 100644
index 00000000..929ad7fa
--- /dev/null
+++ b/src/stateless/rx/trex_stateless_rx_core.cpp
@@ -0,0 +1,217 @@
+#include <stdio.h>
+#include "bp_sim.h"
+#include "flow_stat_parser.h"
+#include "latency.h"
+#include "trex_stateless_messaging.h"
+#include "trex_stateless_rx_core.h"
+
+void CRxCoreStateless::create(const CRxSlCfg &cfg) {
+ m_max_ports = cfg.m_max_ports;
+
+ CMessagingManager * cp_rx = CMsgIns::Ins()->getCpRx();
+
+ m_ring_from_cp = cp_rx->getRingCpToDp(0);
+ m_ring_to_cp = cp_rx->getRingDpToCp(0);
+ m_state = STATE_IDLE;
+
+ for (int i = 0; i < m_max_ports; i++) {
+ CLatencyManagerPerPort * lp = &m_ports[i];
+ lp->m_io = cfg.m_ports[i];
+ }
+ m_cpu_cp_u.Create(&m_cpu_dp_u);
+}
+
+void CRxCoreStateless::handle_cp_msg(TrexStatelessCpToRxMsgBase *msg) {
+ msg->handle(this);
+ delete msg;
+}
+
+bool CRxCoreStateless::periodic_check_for_cp_messages() {
+ /* fast path */
+ if ( likely ( m_ring_from_cp->isEmpty() ) ) {
+ return false;
+ }
+
+ while ( true ) {
+ CGenNode * node = NULL;
+
+ if (m_ring_from_cp->Dequeue(node) != 0) {
+ break;
+ }
+ assert(node);
+ TrexStatelessCpToRxMsgBase * msg = (TrexStatelessCpToRxMsgBase *)node;
+ handle_cp_msg(msg);
+ }
+
+ return true;
+
+}
+
+void CRxCoreStateless::idle_state_loop() {
+ const int SHORT_DELAY_MS = 2;
+ const int LONG_DELAY_MS = 50;
+ const int DEEP_SLEEP_LIMIT = 2000;
+
+ int counter = 0;
+
+ while (m_state == STATE_IDLE) {
+ bool had_msg = periodic_check_for_cp_messages();
+ if (had_msg) {
+ counter = 0;
+ continue;
+ }
+
+ /* enter deep sleep only if enough time had passed */
+ if (counter < DEEP_SLEEP_LIMIT) {
+ delay(SHORT_DELAY_MS);
+ counter++;
+ } else {
+ delay(LONG_DELAY_MS);
+ }
+ }
+}
+
+void CRxCoreStateless::start() {
+ static int count = 0;
+ static int i = 0;
+ bool do_try_rx_queue =CGlobalInfo::m_options.preview.get_vm_one_queue_enable() ? true : false;
+
+ while (true) {
+ if (m_state == STATE_WORKING) {
+ i++;
+ if (i == 100) {
+ i = 0;
+ // if no packets in 100 cycles, sleep for a while to spare the cpu
+ if (count == 0) {
+ delay(1);
+ }
+ count = 0;
+ periodic_check_for_cp_messages(); // m_state might change in here
+ }
+ } else {
+ if (m_state == STATE_QUIT)
+ break;
+ idle_state_loop();
+ }
+ if (do_try_rx_queue) {
+ try_rx_queues();
+ }
+ count += try_rx();
+ }
+}
+
+void CRxCoreStateless::handle_rx_pkt(CLatencyManagerPerPort *lp, rte_mbuf_t *m) {
+ CFlowStatParser parser;
+
+ if (parser.parse(rte_pktmbuf_mtod(m, uint8_t *), m->pkt_len) == 0) {
+ uint16_t ip_id;
+ if (parser.get_ip_id(ip_id) == 0) {
+ if (is_flow_stat_id(ip_id)) {
+ uint16_t hw_id = get_hw_id(ip_id);
+ lp->m_port.m_rx_pg_stat[hw_id].add_pkts(1);
+ lp->m_port.m_rx_pg_stat[hw_id].add_bytes(m->pkt_len);
+ }
+ }
+ }
+}
+
+// In VM setup, handle packets coming as messages from DP cores.
+void CRxCoreStateless::handle_rx_queue_msgs(uint8_t thread_id, CNodeRing * r) {
+ while ( true ) {
+ CGenNode * node;
+ if ( r->Dequeue(node) != 0 ) {
+ break;
+ }
+ assert(node);
+
+ CGenNodeMsgBase * msg = (CGenNodeMsgBase *)node;
+ CGenNodeLatencyPktInfo * l_msg;
+ uint8_t msg_type = msg->m_msg_type;
+ uint8_t rx_port_index;
+ CLatencyManagerPerPort * lp;
+
+ switch (msg_type) {
+ case CGenNodeMsgBase::LATENCY_PKT:
+ l_msg = (CGenNodeLatencyPktInfo *)msg;
+ assert(l_msg->m_latency_offset == 0xdead);
+ rx_port_index = (thread_id << 1) + (l_msg->m_dir & 1);
+ assert( rx_port_index < m_max_ports );
+ lp = &m_ports[rx_port_index];
+ handle_rx_pkt(lp, (rte_mbuf_t *)l_msg->m_pkt);
+ break;
+ default:
+ printf("ERROR latency-thread message type is not valid %d \n", msg_type);
+ assert(0);
+ }
+
+ CGlobalInfo::free_node(node);
+ }
+}
+
+// VM mode function. Handle messages from DP
+void CRxCoreStateless::try_rx_queues() {
+
+ CMessagingManager * rx_dp = CMsgIns::Ins()->getRxDp();
+ uint8_t threads=CMsgIns::Ins()->get_num_threads();
+ int ti;
+ for (ti = 0; ti < (int)threads; ti++) {
+ CNodeRing * r = rx_dp->getRingDpToCp(ti);
+ if ( ! r->isEmpty() ) {
+ handle_rx_queue_msgs((uint8_t)ti, r);
+ }
+ }
+}
+
+int CRxCoreStateless::try_rx() {
+ rte_mbuf_t * rx_pkts[64];
+ int i, total_pkts = 0;
+ for (i = 0; i < m_max_ports; i++) {
+ CLatencyManagerPerPort * lp = &m_ports[i];
+ rte_mbuf_t * m;
+ m_cpu_dp_u.start_work();
+ /* try to read 64 packets clean up the queue */
+ uint16_t cnt_p = lp->m_io->rx_burst(rx_pkts, 64);
+ total_pkts += cnt_p;
+ if (cnt_p) {
+ int j;
+ for (j = 0; j < cnt_p; j++) {
+ m = rx_pkts[j];
+ handle_rx_pkt(lp, m);
+ rte_pktmbuf_free(m);
+ }
+ /* commit only if there was work to do ! */
+ m_cpu_dp_u.commit();
+ }/* if work */
+ }// all ports
+ return total_pkts;
+}
+
+bool CRxCoreStateless::is_flow_stat_id(uint16_t id) {
+ if ((id & 0xff00) == IP_ID_RESERVE_BASE) return true;
+ return false;
+}
+
+uint16_t CRxCoreStateless::get_hw_id(uint16_t id) {
+ return (0x00ff & id);
+}
+
+void CRxCoreStateless::reset_rx_stats(uint8_t port_id) {
+ for (int hw_id = 0; hw_id < MAX_FLOW_STATS; hw_id++) {
+ m_ports[port_id].m_port.m_rx_pg_stat[hw_id].clear();
+ }
+}
+
+int CRxCoreStateless::get_rx_stats(uint8_t port_id, rx_per_flow_t *rx_stats, int min, int max, bool reset) {
+ for (int hw_id = min; hw_id <= max; hw_id++) {
+ rx_stats[hw_id - min] = m_ports[port_id].m_port.m_rx_pg_stat[hw_id];
+ if (reset) {
+ m_ports[port_id].m_port.m_rx_pg_stat[hw_id].clear();
+ }
+ }
+ return 0;
+}
+
+double CRxCoreStateless::get_cpu_util() {
+ m_cpu_cp_u.Update();
+ return m_cpu_cp_u.GetVal();
+}
diff --git a/src/stateless/rx/trex_stateless_rx_core.h b/src/stateless/rx/trex_stateless_rx_core.h
new file mode 100644
index 00000000..5ab12f4e
--- /dev/null
+++ b/src/stateless/rx/trex_stateless_rx_core.h
@@ -0,0 +1,80 @@
+/*
+ Ido Barnea
+ Cisco Systems, Inc.
+*/
+
+/*
+ Copyright (c) 2016-2016 Cisco Systems, Inc.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+#ifndef __TREX_STATELESS_RX_CORE_H__
+#define __TREX_STATELESS_RX_CORE_H__
+#include <stdint.h>
+#include "latency.h"
+#include "utl_cpuu.h"
+
+class TrexStatelessCpToRxMsgBase;
+
+class CRxSlCfg {
+ public:
+ CRxSlCfg (){
+ m_max_ports = 0;
+ m_cps = 0.0;
+ }
+
+ public:
+ uint32_t m_max_ports;
+ double m_cps;
+ CPortLatencyHWBase * m_ports[TREX_MAX_PORTS];
+};
+
+class CRxCoreStateless {
+ enum state_e {
+ STATE_IDLE,
+ STATE_WORKING,
+ STATE_QUIT
+ };
+
+ public:
+ void start();
+ void create(const CRxSlCfg &cfg);
+ void reset_rx_stats(uint8_t port_id);
+ int get_rx_stats(uint8_t port_id, rx_per_flow_t *rx_stats, int min, int max, bool reset);
+ void work() {m_state = STATE_WORKING;}
+ void idle() {m_state = STATE_IDLE;}
+ void quit() {m_state = STATE_QUIT;}
+ double get_cpu_util();
+
+ private:
+ void handle_cp_msg(TrexStatelessCpToRxMsgBase *msg);
+ bool periodic_check_for_cp_messages();
+ void idle_state_loop();
+ void handle_rx_pkt(CLatencyManagerPerPort * lp, rte_mbuf_t * m);
+ void handle_rx_queue_msgs(uint8_t thread_id, CNodeRing * r);
+ int try_rx();
+ void try_rx_queues();
+ bool is_flow_stat_id(uint16_t id);
+ uint16_t get_hw_id(uint16_t id);
+
+ private:
+ uint32_t m_max_ports;
+ bool m_has_streams;
+ CLatencyManagerPerPort m_ports[TREX_MAX_PORTS];
+ state_e m_state; /* state of all ports */
+ CNodeRing *m_ring_from_cp;
+ CNodeRing *m_ring_to_cp;
+ CCpuUtlDp m_cpu_dp_u;
+ CCpuUtlCp m_cpu_cp_u;
+};
+#endif