summaryrefslogtreecommitdiffstats
path: root/scripts
diff options
context:
space:
mode:
authorHanoh Haim <hhaim@cisco.com>2015-06-24 14:03:29 +0300
committerHanoh Haim <hhaim@cisco.com>2015-06-24 14:03:29 +0300
commit8b52a31ed2c299b759f330c4f976b9c70f5765f4 (patch)
tree9d6da5438b5b56b1d2d57e6c13494b4e65d000e7 /scripts
first version
Diffstat (limited to 'scripts')
-rwxr-xr-xscripts/automation/config/trex-dan.cfg33
-rwxr-xr-xscripts/automation/config/trex-dev3.cfg34
-rwxr-xr-xscripts/automation/config/trex-esp80-hhaim.cfg31
-rwxr-xr-xscripts/automation/config/trex-hhaim.cfg33
-rwxr-xr-xscripts/automation/config/trex01-1g.cfg35
-rwxr-xr-xscripts/automation/graph_template.html80
-rwxr-xr-xscripts/automation/h_avc.py195
-rwxr-xr-xscripts/automation/phantom/phantomjsbin0 -> 38346752 bytes
-rwxr-xr-xscripts/automation/phantom/rasterize.js32
-rwxr-xr-xscripts/automation/readme.txt15
-rwxr-xr-xscripts/automation/report_template.html96
-rwxr-xr-xscripts/automation/sshpass.exp15
-rwxr-xr-xscripts/automation/trex_control_plane/__init__.py1
-rwxr-xr-xscripts/automation/trex_control_plane/client/__init__.py1
-rwxr-xr-xscripts/automation/trex_control_plane/client/outer_packages.py29
-rwxr-xr-xscripts/automation/trex_control_plane/client/trex_adv_client.py70
-rwxr-xr-xscripts/automation/trex_control_plane/client/trex_client.py1066
-rwxr-xr-xscripts/automation/trex_control_plane/client_utils/__init__.py1
-rwxr-xr-xscripts/automation/trex_control_plane/client_utils/general_utils.py57
-rwxr-xr-xscripts/automation/trex_control_plane/client_utils/trex_yaml_gen.py212
-rwxr-xr-xscripts/automation/trex_control_plane/common/__init__.py1
-rwxr-xr-xscripts/automation/trex_control_plane/common/trex_exceptions.py140
-rwxr-xr-xscripts/automation/trex_control_plane/common/trex_status_e.py8
-rwxr-xr-xscripts/automation/trex_control_plane/dirtree_no_files.txt11
-rwxr-xr-xscripts/automation/trex_control_plane/dirtree_with_files.txt31
-rwxr-xr-xscripts/automation/trex_control_plane/doc/Makefile192
-rwxr-xr-xscripts/automation/trex_control_plane/doc/_static/no_scrollbars.css10
-rwxr-xr-xscripts/automation/trex_control_plane/doc/about_trex.rst16
-rwxr-xr-xscripts/automation/trex_control_plane/doc/api/client_code.rst17
-rwxr-xr-xscripts/automation/trex_control_plane/doc/api/exceptions.rst7
-rwxr-xr-xscripts/automation/trex_control_plane/doc/api/index.rst19
-rwxr-xr-xscripts/automation/trex_control_plane/doc/api/json_fields.rst233
-rwxr-xr-xscripts/automation/trex_control_plane/doc/authors.rst12
-rwxr-xr-xscripts/automation/trex_control_plane/doc/client_utils.rst14
-rwxr-xr-xscripts/automation/trex_control_plane/doc/conf.py303
-rwxr-xr-xscripts/automation/trex_control_plane/doc/docs_utilities.py37
-rwxr-xr-xscripts/automation/trex_control_plane/doc/index.rst57
-rwxr-xr-xscripts/automation/trex_control_plane/doc/installation.rst25
-rwxr-xr-xscripts/automation/trex_control_plane/doc/json_dictionary.yaml252
-rwxr-xr-xscripts/automation/trex_control_plane/doc/license.rst18
-rwxr-xr-xscripts/automation/trex_control_plane/doc/requirements.rst0
-rwxr-xr-xscripts/automation/trex_control_plane/doc/usage_examples.rst68
-rwxr-xr-xscripts/automation/trex_control_plane/examples/__init__.py1
-rwxr-xr-xscripts/automation/trex_control_plane/examples/client_interactive_example.py256
-rwxr-xr-xscripts/automation/trex_control_plane/examples/pkt_generation_for_trex.py105
-rwxr-xr-xscripts/automation/trex_control_plane/examples/trex_root_path.py15
-rwxr-xr-xscripts/automation/trex_control_plane/python_lib/__init__.py1
-rwxr-xr-xscripts/automation/trex_control_plane/python_lib/enum34-1.0.4/PKG-INFO746
-rwxr-xr-xscripts/automation/trex_control_plane/python_lib/enum34-1.0.4/enum/LICENSE32
-rwxr-xr-xscripts/automation/trex_control_plane/python_lib/enum34-1.0.4/enum/README2
-rwxr-xr-xscripts/automation/trex_control_plane/python_lib/enum34-1.0.4/enum/__init__.py790
-rwxr-xr-xscripts/automation/trex_control_plane/python_lib/enum34-1.0.4/enum/doc/enum.rst725
-rwxr-xr-xscripts/automation/trex_control_plane/python_lib/enum34-1.0.4/enum/enum.py790
-rwxr-xr-xscripts/automation/trex_control_plane/python_lib/enum34-1.0.4/enum/test_enum.py1690
-rwxr-xr-xscripts/automation/trex_control_plane/python_lib/enum34-1.0.4/setup.py44
-rwxr-xr-xscripts/automation/trex_control_plane/python_lib/jsonrpclib-0.1.3/LICENSE.txt11
-rwxr-xr-xscripts/automation/trex_control_plane/python_lib/jsonrpclib-0.1.3/PKG-INFO10
-rwxr-xr-xscripts/automation/trex_control_plane/python_lib/jsonrpclib-0.1.3/README.txt203
-rwxr-xr-xscripts/automation/trex_control_plane/python_lib/jsonrpclib-0.1.3/jsonrpclib/SimpleJSONRPCServer.py229
-rwxr-xr-xscripts/automation/trex_control_plane/python_lib/jsonrpclib-0.1.3/jsonrpclib/__init__.py6
-rwxr-xr-xscripts/automation/trex_control_plane/python_lib/jsonrpclib-0.1.3/jsonrpclib/config.py38
-rwxr-xr-xscripts/automation/trex_control_plane/python_lib/jsonrpclib-0.1.3/jsonrpclib/history.py40
-rwxr-xr-xscripts/automation/trex_control_plane/python_lib/jsonrpclib-0.1.3/jsonrpclib/jsonclass.py145
-rwxr-xr-xscripts/automation/trex_control_plane/python_lib/jsonrpclib-0.1.3/jsonrpclib/jsonrpc.py556
-rwxr-xr-xscripts/automation/trex_control_plane/python_lib/jsonrpclib-0.1.3/setup.py28
-rwxr-xr-xscripts/automation/trex_control_plane/python_lib/jsonrpclib-pelix-0.2.5/LICENSE.txt11
-rwxr-xr-xscripts/automation/trex_control_plane/python_lib/jsonrpclib-pelix-0.2.5/MANIFEST.in2
-rwxr-xr-xscripts/automation/trex_control_plane/python_lib/jsonrpclib-pelix-0.2.5/PKG-INFO460
-rwxr-xr-xscripts/automation/trex_control_plane/python_lib/jsonrpclib-pelix-0.2.5/README.rst438
-rwxr-xr-xscripts/automation/trex_control_plane/python_lib/jsonrpclib-pelix-0.2.5/jsonrpclib/SimpleJSONRPCServer.py602
-rwxr-xr-xscripts/automation/trex_control_plane/python_lib/jsonrpclib-pelix-0.2.5/jsonrpclib/__init__.py34
-rwxr-xr-xscripts/automation/trex_control_plane/python_lib/jsonrpclib-pelix-0.2.5/jsonrpclib/config.py141
-rwxr-xr-xscripts/automation/trex_control_plane/python_lib/jsonrpclib-pelix-0.2.5/jsonrpclib/history.py95
-rwxr-xr-xscripts/automation/trex_control_plane/python_lib/jsonrpclib-pelix-0.2.5/jsonrpclib/jsonclass.py295
-rwxr-xr-xscripts/automation/trex_control_plane/python_lib/jsonrpclib-pelix-0.2.5/jsonrpclib/jsonrpc.py1192
-rwxr-xr-xscripts/automation/trex_control_plane/python_lib/jsonrpclib-pelix-0.2.5/jsonrpclib/threadpool.py490
-rwxr-xr-xscripts/automation/trex_control_plane/python_lib/jsonrpclib-pelix-0.2.5/jsonrpclib/utils.py122
-rwxr-xr-xscripts/automation/trex_control_plane/python_lib/jsonrpclib-pelix-0.2.5/jsonrpclib_pelix.egg-info/PKG-INFO460
-rwxr-xr-xscripts/automation/trex_control_plane/python_lib/jsonrpclib-pelix-0.2.5/jsonrpclib_pelix.egg-info/SOURCES.txt17
-rwxr-xr-xscripts/automation/trex_control_plane/python_lib/jsonrpclib-pelix-0.2.5/jsonrpclib_pelix.egg-info/dependency_links.txt1
-rwxr-xr-xscripts/automation/trex_control_plane/python_lib/jsonrpclib-pelix-0.2.5/jsonrpclib_pelix.egg-info/top_level.txt1
-rwxr-xr-xscripts/automation/trex_control_plane/python_lib/jsonrpclib-pelix-0.2.5/setup.cfg8
-rwxr-xr-xscripts/automation/trex_control_plane/python_lib/jsonrpclib-pelix-0.2.5/setup.py74
-rwxr-xr-xscripts/automation/trex_control_plane/python_lib/lockfile-0.10.2/ACKS6
-rwxr-xr-xscripts/automation/trex_control_plane/python_lib/lockfile-0.10.2/AUTHORS5
-rwxr-xr-xscripts/automation/trex_control_plane/python_lib/lockfile-0.10.2/ChangeLog165
-rwxr-xr-xscripts/automation/trex_control_plane/python_lib/lockfile-0.10.2/LICENSE21
-rwxr-xr-xscripts/automation/trex_control_plane/python_lib/lockfile-0.10.2/PKG-INFO51
-rwxr-xr-xscripts/automation/trex_control_plane/python_lib/lockfile-0.10.2/README27
-rwxr-xr-xscripts/automation/trex_control_plane/python_lib/lockfile-0.10.2/RELEASE-NOTES50
-rwxr-xr-xscripts/automation/trex_control_plane/python_lib/lockfile-0.10.2/doc/source/Makefile73
-rwxr-xr-xscripts/automation/trex_control_plane/python_lib/lockfile-0.10.2/doc/source/conf.py179
-rwxr-xr-xscripts/automation/trex_control_plane/python_lib/lockfile-0.10.2/doc/source/index.rst275
-rwxr-xr-xscripts/automation/trex_control_plane/python_lib/lockfile-0.10.2/lockfile.egg-info/PKG-INFO51
-rwxr-xr-xscripts/automation/trex_control_plane/python_lib/lockfile-0.10.2/lockfile.egg-info/SOURCES.txt26
-rwxr-xr-xscripts/automation/trex_control_plane/python_lib/lockfile-0.10.2/lockfile.egg-info/dependency_links.txt1
-rwxr-xr-xscripts/automation/trex_control_plane/python_lib/lockfile-0.10.2/lockfile.egg-info/not-zip-safe1
-rwxr-xr-xscripts/automation/trex_control_plane/python_lib/lockfile-0.10.2/lockfile.egg-info/top_level.txt1
-rwxr-xr-xscripts/automation/trex_control_plane/python_lib/lockfile-0.10.2/lockfile/__init__.py326
-rwxr-xr-xscripts/automation/trex_control_plane/python_lib/lockfile-0.10.2/lockfile/linklockfile.py73
-rwxr-xr-xscripts/automation/trex_control_plane/python_lib/lockfile-0.10.2/lockfile/mkdirlockfile.py83
-rwxr-xr-xscripts/automation/trex_control_plane/python_lib/lockfile-0.10.2/lockfile/pidlockfile.py193
-rwxr-xr-xscripts/automation/trex_control_plane/python_lib/lockfile-0.10.2/lockfile/sqlitelockfile.py155
-rwxr-xr-xscripts/automation/trex_control_plane/python_lib/lockfile-0.10.2/lockfile/symlinklockfile.py69
-rwxr-xr-xscripts/automation/trex_control_plane/python_lib/lockfile-0.10.2/setup.cfg39
-rwxr-xr-xscripts/automation/trex_control_plane/python_lib/lockfile-0.10.2/setup.py30
-rwxr-xr-xscripts/automation/trex_control_plane/python_lib/lockfile-0.10.2/test-requirements.txt2
-rwxr-xr-xscripts/automation/trex_control_plane/python_lib/lockfile-0.10.2/test/compliancetest.py261
-rwxr-xr-xscripts/automation/trex_control_plane/python_lib/lockfile-0.10.2/test/test_lockfile.py36
-rwxr-xr-xscripts/automation/trex_control_plane/python_lib/lockfile-0.10.2/tox.ini28
-rwxr-xr-xscripts/automation/trex_control_plane/python_lib/python-daemon-2.0.5/ChangeLog380
-rwxr-xr-xscripts/automation/trex_control_plane/python_lib/python-daemon-2.0.5/LICENSE.ASF-2202
-rwxr-xr-xscripts/automation/trex_control_plane/python_lib/python-daemon-2.0.5/LICENSE.GPL-3674
-rwxr-xr-xscripts/automation/trex_control_plane/python_lib/python-daemon-2.0.5/MANIFEST.in7
-rwxr-xr-xscripts/automation/trex_control_plane/python_lib/python-daemon-2.0.5/PKG-INFO38
-rwxr-xr-xscripts/automation/trex_control_plane/python_lib/python-daemon-2.0.5/daemon/__init__.py49
-rwxr-xr-xscripts/automation/trex_control_plane/python_lib/python-daemon-2.0.5/daemon/_metadata.py152
-rwxr-xr-xscripts/automation/trex_control_plane/python_lib/python-daemon-2.0.5/daemon/daemon.py926
-rwxr-xr-xscripts/automation/trex_control_plane/python_lib/python-daemon-2.0.5/daemon/pidfile.py67
-rwxr-xr-xscripts/automation/trex_control_plane/python_lib/python-daemon-2.0.5/daemon/runner.py324
-rwxr-xr-xscripts/automation/trex_control_plane/python_lib/python-daemon-2.0.5/doc/CREDITS53
-rwxr-xr-xscripts/automation/trex_control_plane/python_lib/python-daemon-2.0.5/doc/FAQ156
-rwxr-xr-xscripts/automation/trex_control_plane/python_lib/python-daemon-2.0.5/doc/TODO95
-rwxr-xr-xscripts/automation/trex_control_plane/python_lib/python-daemon-2.0.5/doc/hacking.txt180
-rwxr-xr-xscripts/automation/trex_control_plane/python_lib/python-daemon-2.0.5/python_daemon.egg-info/PKG-INFO38
-rwxr-xr-xscripts/automation/trex_control_plane/python_lib/python-daemon-2.0.5/python_daemon.egg-info/SOURCES.txt30
-rwxr-xr-xscripts/automation/trex_control_plane/python_lib/python-daemon-2.0.5/python_daemon.egg-info/dependency_links.txt1
-rwxr-xr-xscripts/automation/trex_control_plane/python_lib/python-daemon-2.0.5/python_daemon.egg-info/not-zip-safe1
-rwxr-xr-xscripts/automation/trex_control_plane/python_lib/python-daemon-2.0.5/python_daemon.egg-info/requires.txt3
-rwxr-xr-xscripts/automation/trex_control_plane/python_lib/python-daemon-2.0.5/python_daemon.egg-info/top_level.txt1
-rwxr-xr-xscripts/automation/trex_control_plane/python_lib/python-daemon-2.0.5/python_daemon.egg-info/version_info.json6
-rwxr-xr-xscripts/automation/trex_control_plane/python_lib/python-daemon-2.0.5/setup.cfg11
-rwxr-xr-xscripts/automation/trex_control_plane/python_lib/python-daemon-2.0.5/setup.py106
-rwxr-xr-xscripts/automation/trex_control_plane/python_lib/python-daemon-2.0.5/test/__init__.py23
-rwxr-xr-xscripts/automation/trex_control_plane/python_lib/python-daemon-2.0.5/test/scaffold.py322
-rwxr-xr-xscripts/automation/trex_control_plane/python_lib/python-daemon-2.0.5/test/test_daemon.py1744
-rwxr-xr-xscripts/automation/trex_control_plane/python_lib/python-daemon-2.0.5/test/test_metadata.py380
-rwxr-xr-xscripts/automation/trex_control_plane/python_lib/python-daemon-2.0.5/test/test_pidfile.py472
-rwxr-xr-xscripts/automation/trex_control_plane/python_lib/python-daemon-2.0.5/test/test_runner.py675
-rwxr-xr-xscripts/automation/trex_control_plane/python_lib/python-daemon-2.0.5/test_version.py1373
-rwxr-xr-xscripts/automation/trex_control_plane/python_lib/python-daemon-2.0.5/version.py547
-rwxr-xr-xscripts/automation/trex_control_plane/python_lib/rednose-0.4.1/rednose.py387
-rwxr-xr-xscripts/automation/trex_control_plane/python_lib/rednose-0.4.1/setup.py29
-rwxr-xr-xscripts/automation/trex_control_plane/python_lib/termstyle/MANIFEST.in1
-rwxr-xr-xscripts/automation/trex_control_plane/python_lib/termstyle/Makefile9
-rwxr-xr-xscripts/automation/trex_control_plane/python_lib/termstyle/README.rst82
-rwxr-xr-xscripts/automation/trex_control_plane/python_lib/termstyle/VERSION1
-rwxr-xr-xscripts/automation/trex_control_plane/python_lib/termstyle/python-termstyle.xml183
-rwxr-xr-xscripts/automation/trex_control_plane/python_lib/termstyle/setup.py27
-rwxr-xr-xscripts/automation/trex_control_plane/python_lib/termstyle/termstyle.py107
-rwxr-xr-xscripts/automation/trex_control_plane/python_lib/termstyle/test2.py5
-rwxr-xr-xscripts/automation/trex_control_plane/python_lib/termstyle/test3.py5
-rwxr-xr-xscripts/automation/trex_control_plane/python_lib/termstyle/test_all.sh4
-rwxr-xr-xscripts/automation/trex_control_plane/python_lib/zmq_fedora.tar.gzbin0 -> 1752871 bytes
-rwxr-xr-xscripts/automation/trex_control_plane/server/CCustomLogger.py100
-rwxr-xr-xscripts/automation/trex_control_plane/server/extended_daemon_runner.py144
-rwxr-xr-xscripts/automation/trex_control_plane/server/outer_packages.py66
-rwxr-xr-xscripts/automation/trex_control_plane/server/trex_daemon_server25
-rwxr-xr-xscripts/automation/trex_control_plane/server/trex_daemon_server.py87
-rwxr-xr-xscripts/automation/trex_control_plane/server/trex_launch_thread.py92
-rwxr-xr-xscripts/automation/trex_control_plane/server/trex_server.py465
-rwxr-xr-xscripts/automation/trex_control_plane/server/zmq_monitor_thread.py80
-rwxr-xr-xscripts/automation/trex_control_plane/unit_tests/__init__.py1
-rwxr-xr-xscripts/automation/trex_control_plane/unit_tests/client_launching_test.py31
-rwxr-xr-xscripts/automation/trex_control_plane/unit_tests/control_plane_general_test.py72
-rwxr-xr-xscripts/automation/trex_control_plane/unit_tests/control_plane_unit_test.py73
-rwxr-xr-xscripts/automation/trex_control_plane/unit_tests/functional_test.py160
-rwxr-xr-xscripts/automation/trex_control_plane/unit_tests/nose_outer_packages.py27
-rwxr-xr-xscripts/automation/trex_control_plane/unit_tests/sock.py552
-rwxr-xr-xscripts/automation/trex_control_plane/unit_tests/test.py36
-rwxr-xr-xscripts/automation/trex_perf.py1265
-rwxr-xr-xscripts/automation/wkhtmltopdf-amd64bin0 -> 8301444 bytes
-rwxr-xr-xscripts/avl/_tun_citrix_0_fixed.pcapbin0 -> 94477 bytes
-rwxr-xr-xscripts/avl/_tun_exchange_0_fixed.pcapbin0 -> 11946 bytes
-rwxr-xr-xscripts/avl/_tun_http_browsing_0_fixed.pcapbin0 -> 45175 bytes
-rwxr-xr-xscripts/avl/_tun_http_get_0_fixed.pcapbin0 -> 49326 bytes
-rwxr-xr-xscripts/avl/_tun_http_post_0_fixed.pcapbin0 -> 63342 bytes
-rwxr-xr-xscripts/avl/_tun_https_0_fixed.pcapbin0 -> 111955 bytes
-rwxr-xr-xscripts/avl/_tun_mail_pop_0_fixed.pcapbin0 -> 7227 bytes
-rwxr-xr-xscripts/avl/_tun_mail_pop_1_fixed.pcapbin0 -> 123955 bytes
-rwxr-xr-xscripts/avl/_tun_mail_pop_2_fixed.pcapbin0 -> 19484 bytes
-rwxr-xr-xscripts/avl/_tun_oracle_0_fixed.pcapbin0 -> 64895 bytes
-rwxr-xr-xscripts/avl/_tun_rtsp_0_fixed.pcapbin0 -> 5002 bytes
-rwxr-xr-xscripts/avl/_tun_smtp_0_fixed.pcapbin0 -> 7298 bytes
-rwxr-xr-xscripts/avl/_tun_smtp_1_fixed.pcapbin0 -> 22742 bytes
-rwxr-xr-xscripts/avl/_tun_smtp_2_fixed.pcapbin0 -> 117980 bytes
-rwxr-xr-xscripts/avl/avl.csv26
-rwxr-xr-xscripts/avl/avl_delay_10.csv27
-rwxr-xr-xscripts/avl/citrix_0.pcapbin0 -> 91105 bytes
-rwxr-xr-xscripts/avl/delay_10_citrix_0.pcapbin0 -> 90017 bytes
-rwxr-xr-xscripts/avl/delay_10_dns_0.pcapbin0 -> 226 bytes
-rwxr-xr-xscripts/avl/delay_10_exchange_0.pcapbin0 -> 10732 bytes
-rwxr-xr-xscripts/avl/delay_10_http_browsing_0.pcapbin0 -> 35189 bytes
-rwxr-xr-xscripts/avl/delay_10_http_get_0.pcapbin0 -> 38734 bytes
-rwxr-xr-xscripts/avl/delay_10_http_post_0.pcapbin0 -> 49572 bytes
-rwxr-xr-xscripts/avl/delay_10_https_0.pcapbin0 -> 93563 bytes
-rwxr-xr-xscripts/avl/delay_10_mail_pop_0.pcapbin0 -> 6027 bytes
-rwxr-xr-xscripts/avl/delay_10_mail_pop_1.pcapbin0 -> 103821 bytes
-rwxr-xr-xscripts/avl/delay_10_mail_pop_2.pcapbin0 -> 16254 bytes
-rwxr-xr-xscripts/avl/delay_10_oracle_0.pcapbin0 -> 62195 bytes
-rwxr-xr-xscripts/avl/delay_10_rtp_160k_0.pcapbin0 -> 99790 bytes
-rwxr-xr-xscripts/avl/delay_10_rtp_160k_1.pcapbin0 -> 1155285 bytes
-rwxr-xr-xscripts/avl/delay_10_rtp_160k_full.pcapbin0 -> 1259861 bytes
-rwxr-xr-xscripts/avl/delay_10_rtp_250k_0_0.pcapbin0 -> 168536 bytes
-rwxr-xr-xscripts/avl/delay_10_rtp_250k_1_0.pcapbin0 -> 1790082 bytes
-rwxr-xr-xscripts/avl/delay_10_rtp_250k_full.pcapbin0 -> 1963404 bytes
-rwxr-xr-xscripts/avl/delay_10_rtsp_0.pcapbin0 -> 4774 bytes
-rwxr-xr-xscripts/avl/delay_10_sip_0.pcapbin0 -> 2831 bytes
-rwxr-xr-xscripts/avl/delay_10_sip_video_call_full.pcapbin0 -> 153359 bytes
-rwxr-xr-xscripts/avl/delay_10_sip_video_call_short.pcapbin0 -> 3515 bytes
-rwxr-xr-xscripts/avl/delay_10_smtp_0.pcapbin0 -> 6082 bytes
-rwxr-xr-xscripts/avl/delay_10_smtp_1.pcapbin0 -> 19068 bytes
-rwxr-xr-xscripts/avl/delay_10_smtp_2.pcapbin0 -> 98768 bytes
-rwxr-xr-xscripts/avl/delay_10_video_call_0.pcapbin0 -> 2579101 bytes
-rwxr-xr-xscripts/avl/delay_10_video_call_rtp_0.pcapbin0 -> 128964 bytes
-rwxr-xr-xscripts/avl/delay_citrix_0.pcapbin0 -> 94794 bytes
-rwxr-xr-xscripts/avl/delay_dns_0.pcapbin0 -> 226 bytes
-rwxr-xr-xscripts/avl/delay_email_pop_0.pcapbin0 -> 6027 bytes
-rwxr-xr-xscripts/avl/delay_email_pop_1.pcapbin0 -> 110263 bytes
-rwxr-xr-xscripts/avl/delay_email_pop_2.pcapbin0 -> 18508 bytes
-rwxr-xr-xscripts/avl/delay_exchange_0.pcapbin0 -> 10732 bytes
-rwxr-xr-xscripts/avl/delay_http_browsing_0.pcapbin0 -> 35349 bytes
-rwxr-xr-xscripts/avl/delay_http_get_0.pcapbin0 -> 39134 bytes
-rwxr-xr-xscripts/avl/delay_http_post_0.pcapbin0 -> 50052 bytes
-rwxr-xr-xscripts/avl/delay_https_0.pcapbin0 -> 96297 bytes
-rwxr-xr-xscripts/avl/delay_oracle_0.pcapbin0 -> 62195 bytes
-rwxr-xr-xscripts/avl/delay_rtp_160k_1_1_0.pcapbin0 -> 1155285 bytes
-rwxr-xr-xscripts/avl/delay_rtp_160k_1_1_1.pcapbin0 -> 99790 bytes
-rwxr-xr-xscripts/avl/delay_rtp_250k_0_0.pcapbin0 -> 168536 bytes
-rwxr-xr-xscripts/avl/delay_rtp_250k_2_0.pcapbin0 -> 1790082 bytes
-rwxr-xr-xscripts/avl/delay_rtsp_0.pcapbin0 -> 4304 bytes
-rwxr-xr-xscripts/avl/delay_sip_0.pcapbin0 -> 2831 bytes
-rwxr-xr-xscripts/avl/delay_smtp_0.pcapbin0 -> 6242 bytes
-rwxr-xr-xscripts/avl/delay_smtp_1.pcapbin0 -> 19628 bytes
-rwxr-xr-xscripts/avl/delay_smtp_2.pcapbin0 -> 105770 bytes
-rwxr-xr-xscripts/avl/delay_video_call_0.pcapbin0 -> 2579101 bytes
-rwxr-xr-xscripts/avl/delay_video_call_rtp_0.pcapbin0 -> 150508 bytes
-rwxr-xr-xscripts/avl/dns_0.pcapbin0 -> 234 bytes
-rwxr-xr-xscripts/avl/email_pop1.pcapbin0 -> 6107 bytes
-rwxr-xr-xscripts/avl/email_pop1_1.pcapbin0 -> 6107 bytes
-rwxr-xr-xscripts/avl/email_pop2.pcapbin0 -> 104277 bytes
-rwxr-xr-xscripts/avl/email_pop2_2.pcapbin0 -> 104277 bytes
-rwxr-xr-xscripts/avl/email_pop4_29.pcapbin0 -> 16374 bytes
-rwxr-xr-xscripts/avl/exchange.pcapbin0 -> 10904 bytes
-rwxr-xr-xscripts/avl/http_browsing.pcapbin0 -> 35337 bytes
-rwxr-xr-xscripts/avl/http_get.pcapbin0 -> 38910 bytes
-rwxr-xr-xscripts/avl/http_post.pcapbin0 -> 49788 bytes
-rwxr-xr-xscripts/avl/https.pcapbin0 -> 93947 bytes
-rwxr-xr-xscripts/avl/mac_uit.yaml5
-rwxr-xr-xscripts/avl/oracle.pcapbin0 -> 63403 bytes
-rwxr-xr-xscripts/avl/rtp_160_0.pcapbin0 -> 98852 bytes
-rwxr-xr-xscripts/avl/rtp_160_1.pcapbin0 -> 1158408 bytes
-rwxr-xr-xscripts/avl/rtp_250k_1_0.pcapbin0 -> 148880 bytes
-rwxr-xr-xscripts/avl/rtp_250k_2_0.pcapbin0 -> 1797762 bytes
-rwxr-xr-xscripts/avl/rtsp_0.pcapbin0 -> 4382 bytes
-rwxr-xr-xscripts/avl/sfr_branch_profile_delay_10.yaml114
-rwxr-xr-xscripts/avl/sfr_delay_10.yaml119
-rwxr-xr-xscripts/avl/sfr_delay_10_1g.yaml118
-rwxr-xr-xscripts/avl/sfr_delay_10_1g_no_bundeling.yaml130
-rwxr-xr-xscripts/avl/sfr_delay_10_no_bundeling.yaml131
-rwxr-xr-xscripts/avl/sfr_delay_50_tunnel_no_bundeling.yaml131
-rwxr-xr-xscripts/avl/sip_0.pcapbin0 -> 2859 bytes
-rwxr-xr-xscripts/avl/smtp_1.pcapbin0 -> 6170 bytes
-rwxr-xr-xscripts/avl/smtp_2.pcapbin0 -> 19208 bytes
-rwxr-xr-xscripts/avl/smtp_3.pcapbin0 -> 99208 bytes
-rwxr-xr-xscripts/avl/test_mac.yaml29
-rwxr-xr-xscripts/avl/video_call_0.pcapbin0 -> 2588401 bytes
-rwxr-xr-xscripts/avl/video_rtp_1588_0.pcapbin0 -> 148656 bytes
-rwxr-xr-xscripts/cap2/Oracle.pcapbin0 -> 62195 bytes
-rwxr-xr-xscripts/cap2/Video_Calls.pcapbin0 -> 2579101 bytes
-rwxr-xr-xscripts/cap2/Voice_calls_rtp_only.pcapbin0 -> 124214 bytes
-rwxr-xr-xscripts/cap2/citrix.pcapbin0 -> 89937 bytes
-rwxr-xr-xscripts/cap2/delay_10_rtp_250k_short.pcapbin0 -> 19821 bytes
-rwxr-xr-xscripts/cap2/dns.pcapbin0 -> 226 bytes
-rwxr-xr-xscripts/cap2/dns.yaml23
-rwxr-xr-xscripts/cap2/dns_one_server.yaml33
-rwxr-xr-xscripts/cap2/dns_single_server.yaml32
-rwxr-xr-xscripts/cap2/dns_wlen.yaml25
-rwxr-xr-xscripts/cap2/dns_wlen1.yaml25
-rwxr-xr-xscripts/cap2/dns_wlen2.yaml32
-rwxr-xr-xscripts/cap2/dns_wlength.yaml25
-rwxr-xr-xscripts/cap2/dyn_pyld1.yaml36
-rwxr-xr-xscripts/cap2/exchange.pcapbin0 -> 10732 bytes
-rwxr-xr-xscripts/cap2/http.yaml22
-rwxr-xr-xscripts/cap2/http_browsing.pcapbin0 -> 35429 bytes
-rwxr-xr-xscripts/cap2/http_get.pcapbin0 -> 42781 bytes
-rwxr-xr-xscripts/cap2/http_plugin.yaml24
-rwxr-xr-xscripts/cap2/http_post.pcapbin0 -> 41026 bytes
-rwxr-xr-xscripts/cap2/http_simple.yaml21
-rwxr-xr-xscripts/cap2/https.pcapbin0 -> 174163 bytes
-rwxr-xr-xscripts/cap2/imix.yaml35
-rwxr-xr-xscripts/cap2/imix_1518.yaml70
-rwxr-xr-xscripts/cap2/imix_64.yaml70
-rwxr-xr-xscripts/cap2/imix_fast_1g.yaml53
-rwxr-xr-xscripts/cap2/imix_fast_1g_100k_flows.yaml53
-rwxr-xr-xscripts/cap2/ipv4_vlan.yaml21
-rwxr-xr-xscripts/cap2/ipv6.pcapbin0 -> 130 bytes
-rwxr-xr-xscripts/cap2/ipv6.yaml22
-rwxr-xr-xscripts/cap2/ipv6_vlan.yaml23
-rwxr-xr-xscripts/cap2/lb_ex1.yaml26
-rwxr-xr-xscripts/cap2/limit_multi_pkt.yaml23
-rwxr-xr-xscripts/cap2/limit_single_pkt.yaml23
-rwxr-xr-xscripts/cap2/mail_pop.pcapbin0 -> 16254 bytes
-rwxr-xr-xscripts/cap2/nat_test.yaml46
-rwxr-xr-xscripts/cap2/rtp_160k.pcapbin0 -> 1150301 bytes
-rwxr-xr-xscripts/cap2/rtp_250k_rtp_only.pcapbin0 -> 3900796 bytes
-rwxr-xr-xscripts/cap2/rtp_250k_rtp_only_1.pcapbin0 -> 168032 bytes
-rwxr-xr-xscripts/cap2/rtp_250k_rtp_only_2.pcapbin0 -> 1782402 bytes
-rwxr-xr-xscripts/cap2/rtsp.yaml24
-rwxr-xr-xscripts/cap2/rtsp_full1.yaml17
-rwxr-xr-xscripts/cap2/rtsp_full2.yaml24
-rwxr-xr-xscripts/cap2/rtsp_short.pcapbin0 -> 11692 bytes
-rwxr-xr-xscripts/cap2/rtsp_short1.yaml24
-rwxr-xr-xscripts/cap2/rtsp_short1_slow.yaml24
-rwxr-xr-xscripts/cap2/rtsp_short2.yaml24
-rwxr-xr-xscripts/cap2/rtsp_short3.yaml24
-rwxr-xr-xscripts/cap2/sfr.yaml90
-rwxr-xr-xscripts/cap2/sfr2.yaml30
-rwxr-xr-xscripts/cap2/sfr3.yaml90
-rwxr-xr-xscripts/cap2/sfr4.yaml20
-rwxr-xr-xscripts/cap2/sfr_agg_tcp14_udp11_http200msec_new_high_new_nir_profile.yaml83
-rwxr-xr-xscripts/cap2/sfr_agg_tcp14_udp11_http200msec_new_high_new_nir_profile_ipg_mix.yaml240
-rwxr-xr-xscripts/cap2/short_tcp.yaml20
-rwxr-xr-xscripts/cap2/sip_short1.yaml24
-rwxr-xr-xscripts/cap2/sip_short2.yaml24
-rwxr-xr-xscripts/cap2/smtp.pcapbin0 -> 98768 bytes
-rwxr-xr-xscripts/cap2/test_mac.yaml9
-rwxr-xr-xscripts/cap2/test_pcap_mode1.yaml24
-rwxr-xr-xscripts/cap2/test_pcap_mode2.yaml24
-rwxr-xr-xscripts/cap2/tuple_gen.yaml10
-rwxr-xr-xscripts/cap2/udp_1518B.pcapbin0 -> 1558 bytes
-rwxr-xr-xscripts/cap2/udp_594B.pcapbin0 -> 634 bytes
-rwxr-xr-xscripts/cap2/udp_64B.pcapbin0 -> 104 bytes
-rwxr-xr-xscripts/cfg/cfg_example1.yaml27
-rwxr-xr-xscripts/cfg/cfg_example2.yaml26
-rwxr-xr-xscripts/cfg/ins1.yaml25
-rwxr-xr-xscripts/cfg/ins2.yaml25
-rwxr-xr-xscripts/cfg/ins3.yaml27
-rwxr-xr-xscripts/cfg/ucs_h0.yaml9
-rwxr-xr-xscripts/cfg/ucs_h1.yaml9
-rwxr-xr-xscripts/cfg/xl710.yaml29
-rwxr-xr-xscripts/daemon_server27
-rwxr-xr-xscripts/doc_process.py109
-rwxr-xr-xscripts/dpdk_nic_bind.py539
-rwxr-xr-xscripts/dpdk_setup_ports.py232
-rwxr-xr-xscripts/exp/dns-0-ex.erfbin0 -> 2080 bytes
-rw-r--r--scripts/exp/dns-0.erfbin0 -> 2080 bytes
-rwxr-xr-xscripts/exp/dns_e-0-ex.erfbin0 -> 2080 bytes
-rw-r--r--scripts/exp/dns_e-0.erfbin0 -> 2080 bytes
-rwxr-xr-xscripts/exp/dns_flip-0-ex.erfbin0 -> 2080 bytes
-rw-r--r--scripts/exp/dns_flip-0.erfbin0 -> 2080 bytes
-rwxr-xr-xscripts/exp/dns_ipv6-0-ex.erfbin0 -> 2560 bytes
-rw-r--r--scripts/exp/dns_ipv6-0.erfbin0 -> 2560 bytes
-rwxr-xr-xscripts/exp/dns_ipv6_rxcheck-ex.erfbin0 -> 304 bytes
-rw-r--r--scripts/exp/dns_ipv6_rxcheck.erfbin0 -> 304 bytes
-rwxr-xr-xscripts/exp/dns_one_server-0-ex.erfbin0 -> 4160 bytes
-rw-r--r--scripts/exp/dns_one_server-0.erfbin0 -> 4160 bytes
-rwxr-xr-xscripts/exp/dns_p-0-ex.erfbin0 -> 2080 bytes
-rw-r--r--scripts/exp/dns_p-0.erfbin0 -> 2080 bytes
-rwxr-xr-xscripts/exp/dns_rxcheck-ex.erfbin0 -> 256 bytes
-rw-r--r--scripts/exp/dns_rxcheck.erfbin0 -> 256 bytes
-rwxr-xr-xscripts/exp/dns_single_server-0-ex.erfbin0 -> 3360 bytes
-rwxr-xr-xscripts/exp/dns_wlen-0-ex.erfbin0 -> 2240 bytes
-rwxr-xr-xscripts/exp/dns_wlen1-0-ex.erfbin0 -> 2240 bytes
-rwxr-xr-xscripts/exp/dns_wlen2-0-ex.erfbin0 -> 4480 bytes
-rwxr-xr-xscripts/exp/dyn_pyld1-0-ex.erfbin0 -> 2080 bytes
-rw-r--r--scripts/exp/dyn_pyld1-0.erfbin0 -> 2080 bytes
-rwxr-xr-xscripts/exp/http1_with_option-ex.pcapbin0 -> 35049 bytes
-rw-r--r--scripts/exp/http1_with_option.pcapbin0 -> 35049 bytes
-rwxr-xr-xscripts/exp/http1_with_option_ipv6-ex.pcapbin0 -> 35713 bytes
-rw-r--r--scripts/exp/http1_with_option_ipv6.pcapbin0 -> 35713 bytes
-rwxr-xr-xscripts/exp/http_plugin-0-ex.erfbin0 -> 35328 bytes
-rw-r--r--scripts/exp/http_plugin-0.erfbin0 -> 35328 bytes
-rwxr-xr-xscripts/exp/http_plugin_v6-0-ex.erfbin0 -> 36008 bytes
-rw-r--r--scripts/exp/http_plugin_v6-0.erfbin0 -> 36008 bytes
-rwxr-xr-xscripts/exp/imix-0-ex.erfbin0 -> 62872 bytes
-rw-r--r--scripts/exp/imix-0.erfbin0 -> 62872 bytes
-rwxr-xr-xscripts/exp/imix_v6-0-ex.erfbin0 -> 65480 bytes
-rw-r--r--scripts/exp/imix_v6-0.erfbin0 -> 65480 bytes
-rwxr-xr-xscripts/exp/ipv4_vlan-0-ex.erfbin0 -> 8800 bytes
-rw-r--r--scripts/exp/ipv4_vlan-0.erfbin0 -> 8800 bytes
-rwxr-xr-xscripts/exp/ipv6-0-ex.erfbin0 -> 11200 bytes
-rw-r--r--scripts/exp/ipv6-0.erfbin0 -> 11200 bytes
-rwxr-xr-xscripts/exp/ipv6_vlan-0-ex.erfbin0 -> 11200 bytes
-rw-r--r--scripts/exp/ipv6_vlan-0.erfbin0 -> 11200 bytes
-rwxr-xr-xscripts/exp/limit_multi_pkt-0-ex.erfbin0 -> 30368 bytes
-rw-r--r--scripts/exp/limit_multi_pkt-0.erfbin0 -> 30368 bytes
-rwxr-xr-xscripts/exp/limit_single_pkt-0-ex.erfbin0 -> 5368 bytes
-rw-r--r--scripts/exp/limit_single_pkt-0.erfbin0 -> 5368 bytes
-rwxr-xr-xscripts/exp/pcap_mode1-0-ex.erfbin0 -> 91456 bytes
-rw-r--r--scripts/exp/pcap_mode1-0.erfbin0 -> 91456 bytes
-rwxr-xr-xscripts/exp/pcap_mode2-0-ex.erfbin0 -> 914560 bytes
-rw-r--r--scripts/exp/pcap_mode2-0.erfbin0 -> 914560 bytes
-rwxr-xr-xscripts/exp/rtsp_short1-0-ex.erfbin0 -> 20024 bytes
-rw-r--r--scripts/exp/rtsp_short1-0.erfbin0 -> 20024 bytes
-rwxr-xr-xscripts/exp/rtsp_short1_ipv6_rxcheck-ex.erfbin0 -> 21560 bytes
-rw-r--r--scripts/exp/rtsp_short1_ipv6_rxcheck.erfbin0 -> 21560 bytes
-rwxr-xr-xscripts/exp/rtsp_short1_rxcheck-ex.erfbin0 -> 20912 bytes
-rw-r--r--scripts/exp/rtsp_short1_rxcheck.erfbin0 -> 20912 bytes
-rwxr-xr-xscripts/exp/rtsp_short1_v6-0-ex.erfbin0 -> 20672 bytes
-rw-r--r--scripts/exp/rtsp_short1_v6-0.erfbin0 -> 20672 bytes
-rwxr-xr-xscripts/exp/rtsp_short2-0-ex.erfbin0 -> 20024 bytes
-rw-r--r--scripts/exp/rtsp_short2-0.erfbin0 -> 20024 bytes
-rwxr-xr-xscripts/exp/rtsp_short2_v6-0-ex.erfbin0 -> 20672 bytes
-rw-r--r--scripts/exp/rtsp_short2_v6-0.erfbin0 -> 20672 bytes
-rwxr-xr-xscripts/exp/rtsp_short3-0-ex.erfbin0 -> 20032 bytes
-rw-r--r--scripts/exp/rtsp_short3-0.erfbin0 -> 20032 bytes
-rwxr-xr-xscripts/exp/rtsp_short3_v6-0-ex.erfbin0 -> 20696 bytes
-rw-r--r--scripts/exp/rtsp_short3_v6-0.erfbin0 -> 20696 bytes
-rwxr-xr-xscripts/exp/sctp-ex.erfbin0 -> 704 bytes
-rw-r--r--scripts/exp/sctp.erfbin0 -> 704 bytes
-rwxr-xr-xscripts/exp/sfr2-0-ex.erfbin0 -> 1830944 bytes
-rw-r--r--scripts/exp/sfr2-0.erfbin0 -> 1830944 bytes
-rwxr-xr-xscripts/exp/sfr3-0-ex.erfbin0 -> 10351656 bytes
-rw-r--r--scripts/exp/sfr3-0.erfbin0 -> 10351656 bytes
-rwxr-xr-xscripts/exp/sfr_4-0-ex.erfbin0 -> 42968 bytes
-rw-r--r--scripts/exp/sfr_4-0.erfbin0 -> 42968 bytes
-rwxr-xr-xscripts/exp/sip_short1-0-ex.erfbin0 -> 3576 bytes
-rw-r--r--scripts/exp/sip_short1-0.erfbin0 -> 3576 bytes
-rwxr-xr-xscripts/exp/sip_short1_v6-0-ex.erfbin0 -> 3880 bytes
-rw-r--r--scripts/exp/sip_short1_v6-0.erfbin0 -> 3880 bytes
-rwxr-xr-xscripts/exp/sip_short2-0-ex.erfbin0 -> 3576 bytes
-rw-r--r--scripts/exp/sip_short2-0.erfbin0 -> 3576 bytes
-rwxr-xr-xscripts/exp/sip_short2_v6-0-ex.erfbin0 -> 3880 bytes
-rw-r--r--scripts/exp/sip_short2_v6-0.erfbin0 -> 3880 bytes
-rwxr-xr-xscripts/exp/sip_short3-0-ex.erfbin0 -> 3584 bytes
-rw-r--r--scripts/exp/sip_short3-0.erfbin0 -> 3584 bytes
-rwxr-xr-xscripts/exp/sip_short3_v6-0-ex.erfbin0 -> 3888 bytes
-rw-r--r--scripts/exp/sip_short3_v6-0.erfbin0 -> 3888 bytes
-rwxr-xr-xscripts/ko/3.11.10-301.fc20.x86_64/igb_uio.kobin0 -> 230302 bytes
-rwxr-xr-xscripts/ko/3.13.0-32-generic/igb_uio.kobin0 -> 16907 bytes
-rwxr-xr-xscripts/ko/3.16.0-37-generic/igb_uio.kobin0 -> 16531 bytes
-rwxr-xr-xscripts/ko/3.17.4-301.fc21.x86_64/igb_uio.kobin0 -> 241815 bytes
-rwxr-xr-xscripts/ko/3.18.9-100.fc20.x86_64/igb_uio.kobin0 -> 251127 bytes
-rwxr-xr-xscripts/ko/3.19.1-201.fc21.x86_64/igb_uio.kobin0 -> 252755 bytes
-rwxr-xr-xscripts/ko/3.6.10-4.fc18.x86_64/igb_uio.kobin0 -> 199501 bytes
-rwxr-xr-xscripts/ko/src/Makefile38
-rwxr-xr-xscripts/ko/src/compat.h116
-rwxr-xr-xscripts/ko/src/igb_uio.c643
-rwxr-xr-xscripts/ko/src/readme.txt16
-rwxr-xr-xscripts/ko/src/rte_pci_dev_feature_defs.h45
-rwxr-xr-xscripts/ko/src/rte_pci_dev_features.h44
-rwxr-xr-xscripts/libzmq.so.3bin0 -> 3150071 bytes
-rwxr-xr-xscripts/libzmq.so.3.1.0bin0 -> 3150071 bytes
-rwxr-xr-xscripts/python-lib/yaml/__init__.py315
-rw-r--r--scripts/python-lib/yaml/__init__.pycbin0 -> 13543 bytes
-rwxr-xr-xscripts/python-lib/yaml/composer.py139
-rw-r--r--scripts/python-lib/yaml/composer.pycbin0 -> 5476 bytes
-rwxr-xr-xscripts/python-lib/yaml/constructor.py675
-rw-r--r--scripts/python-lib/yaml/constructor.pycbin0 -> 25778 bytes
-rwxr-xr-xscripts/python-lib/yaml/cyaml.py85
-rw-r--r--scripts/python-lib/yaml/cyaml.pycbin0 -> 4841 bytes
-rwxr-xr-xscripts/python-lib/yaml/dumper.py62
-rw-r--r--scripts/python-lib/yaml/dumper.pycbin0 -> 3120 bytes
-rwxr-xr-xscripts/python-lib/yaml/emitter.py1140
-rw-r--r--scripts/python-lib/yaml/emitter.pycbin0 -> 37006 bytes
-rwxr-xr-xscripts/python-lib/yaml/error.py75
-rw-r--r--scripts/python-lib/yaml/error.pycbin0 -> 3674 bytes
-rwxr-xr-xscripts/python-lib/yaml/events.py86
-rw-r--r--scripts/python-lib/yaml/events.pycbin0 -> 6757 bytes
-rwxr-xr-xscripts/python-lib/yaml/loader.py40
-rw-r--r--scripts/python-lib/yaml/loader.pycbin0 -> 2443 bytes
-rwxr-xr-xscripts/python-lib/yaml/nodes.py49
-rw-r--r--scripts/python-lib/yaml/nodes.pycbin0 -> 2976 bytes
-rwxr-xr-xscripts/python-lib/yaml/parser.py589
-rw-r--r--scripts/python-lib/yaml/parser.pycbin0 -> 17397 bytes
-rwxr-xr-xscripts/python-lib/yaml/reader.py190
-rw-r--r--scripts/python-lib/yaml/reader.pycbin0 -> 6696 bytes
-rwxr-xr-xscripts/python-lib/yaml/representer.py484
-rw-r--r--scripts/python-lib/yaml/representer.pycbin0 -> 17745 bytes
-rwxr-xr-xscripts/python-lib/yaml/resolver.py224
-rw-r--r--scripts/python-lib/yaml/resolver.pycbin0 -> 7503 bytes
-rwxr-xr-xscripts/python-lib/yaml/scanner.py1457
-rw-r--r--scripts/python-lib/yaml/scanner.pycbin0 -> 39060 bytes
-rwxr-xr-xscripts/python-lib/yaml/serializer.py111
-rw-r--r--scripts/python-lib/yaml/serializer.pycbin0 -> 5121 bytes
-rwxr-xr-xscripts/python-lib/yaml/tokens.py104
-rw-r--r--scripts/python-lib/yaml/tokens.pycbin0 -> 8859 bytes
-rwxr-xr-xscripts/stty_r2
-rwxr-xr-xscripts/t-rex-6425
-rwxr-xr-xscripts/t-rex-64-debug9
-rwxr-xr-xscripts/t-rex-64-debug-gdb4
-rwxr-xr-xscripts/t-rex-64-debug-o9
-rwxr-xr-xscripts/t-rex-64-debug-o-gdb4
-rwxr-xr-xscripts/t-rex-64-o9
-rwxr-xr-xscripts/trex-cfg57
-rwxr-xr-xscripts/trex_daemon_server25
-rwxr-xr-xscripts/version.txt4
488 files changed, 40193 insertions, 0 deletions
diff --git a/scripts/automation/config/trex-dan.cfg b/scripts/automation/config/trex-dan.cfg
new file mode 100755
index 00000000..110f22e9
--- /dev/null
+++ b/scripts/automation/config/trex-dan.cfg
@@ -0,0 +1,33 @@
+# Configuration for trex01-1g TRex machine
+#
+# machine_name - can be DNS name or IP for the TRex machine for ssh to the box
+# machine_typ - 1G or 10G TRex machine
+# config_file - [Optional] configuration file for TRex if needed
+# is_dual - should the TRex inject with -p ?
+# cores - how many cores should be used
+# limit-ports - how many ports should be used
+# latency - rate of latency packets injected by the TRex
+# misc_params - [Optional] misc parameters to be passed to the trex
+
+[trex]
+machine_name=trex-dan
+machine_port=8090
+history_size=100
+machine_type=1G
+config_file=
+is_dual=yes
+cores=2
+limit_ports=2
+latency=1000
+latency_condition=10000
+misc_params=
+
+# Configuration for the router connected to the TRex
+#
+# interface - interface that can be used to communicate with the router
+
+[router]
+type=ASR
+interface=10.56.199.247
+password=lab
+
diff --git a/scripts/automation/config/trex-dev3.cfg b/scripts/automation/config/trex-dev3.cfg
new file mode 100755
index 00000000..0d0801e9
--- /dev/null
+++ b/scripts/automation/config/trex-dev3.cfg
@@ -0,0 +1,34 @@
+# Configuration for trex-dev3 TRex machine
+#
+# machine_name - can be DNS name or IP for the TRex machine for ssh to the box
+# password - root password for TRex machine
+# machine_typ - 1G or 10G TRex machine
+# config_file - [Optional] configuration file for TRex if needed
+# is_dual - should the TRex inject with -p ?
+# version_pat - path to the t-rex version and executable
+# exec - executable name (which will be under the version_path)
+# cores - how many cores should be used
+# limit-ports - how many ports should be used
+# latency - rate of latency packets injected by the TRex
+# misc_params - [Optional] misc parameters to be passed to the trex
+
+[trex]
+machine_name=trex-dev3
+machine_type=10G
+config_file=
+is_dual=yes
+version_path=/auto/proj-pcube-b/apps/PL-b/tools/bp_sim2/v1.52
+exec=t-rex-64
+cores=2
+limit_ports=2
+latency=1000
+# misc_params="--nc"
+
+# Configuration for the router connected to the TRex
+#
+# interface - interface that can be used to communicate with the router
+
+[router]
+type=ASR
+interface= 10.56.128.198
+password=lab
diff --git a/scripts/automation/config/trex-esp80-hhaim.cfg b/scripts/automation/config/trex-esp80-hhaim.cfg
new file mode 100755
index 00000000..fa5414d4
--- /dev/null
+++ b/scripts/automation/config/trex-esp80-hhaim.cfg
@@ -0,0 +1,31 @@
+# Configuration for trex01-1g TRex machine
+#
+# machine_name - can be DNS name or IP for the TRex machine for ssh to the box
+# password - root password for TRex machine
+# machine_type - 1G or 10G TRex machine
+# config_file - configuration file for TRex, can be "" if default
+# is_dual - should the TRex inject with -p ?
+# version_path - path to the t-rex version and executable
+# exec - executable name (which will be under the version_path)
+# cores - how many cores should be used
+# limit-ports - how many ports should be used
+# latency - rate of latency packets injected by the TRex
+
+[trex]
+machine_name=csi-kiwi-02
+machine_type=10G
+config_file=
+is_dual=yes
+version_path=/auto/proj-pcube-b/apps/PL-b/tools/bp_sim2/v1.32/
+exec=t-rex-64
+limit_ports=4
+latency=1000
+misc_params=
+
+# Configuration for the router connected to the TRex
+#
+# interface - interface that can be used to communicate with the router
+
+[router]
+interface=10.56.192.57
+password=cisco
diff --git a/scripts/automation/config/trex-hhaim.cfg b/scripts/automation/config/trex-hhaim.cfg
new file mode 100755
index 00000000..44eba6f2
--- /dev/null
+++ b/scripts/automation/config/trex-hhaim.cfg
@@ -0,0 +1,33 @@
+# Configuration for trex01-1g TRex machine
+#
+# machine_name - can be DNS name or IP for the TRex machine for ssh to the box
+# machine_typ - 1G or 10G TRex machine
+# config_file - [Optional] configuration file for TRex if needed
+# is_dual - should the TRex inject with -p ?
+# cores - how many cores should be used
+# limit-ports - how many ports should be used
+# latency - rate of latency packets injected by the TRex
+# misc_params - [Optional] misc parameters to be passed to the trex
+
+[trex]
+machine_name=10.56.217.210
+machine_port=8090
+history_size=100
+machine_type=10G
+config_file=
+is_dual=yes
+cores=4
+limit_ports=4
+latency=1000
+latency_condition=1000
+misc_params=
+
+# Configuration for the router connected to the TRex
+#
+# interface - interface that can be used to communicate with the router
+
+[router]
+type=ASR
+interface=10.56.192.57
+password=cisco
+
diff --git a/scripts/automation/config/trex01-1g.cfg b/scripts/automation/config/trex01-1g.cfg
new file mode 100755
index 00000000..98953cae
--- /dev/null
+++ b/scripts/automation/config/trex01-1g.cfg
@@ -0,0 +1,35 @@
+# Configuration for trex01-1g TRex machine
+#
+# machine_name - can be DNS name or IP for the TRex machine for ssh to the box
+# password - root password for TRex machine
+# machine_typ - 1G or 10G TRex machine
+# config_file - [Optional] configuration file for TRex if needed
+# is_dual - should the TRex inject with -p ?
+# version_pat - path to the t-rex version and executable
+# exec - executable name (which will be under the version_path)
+# cores - how many cores should be used
+# limit-ports - how many ports should be used
+# latency - rate of latency packets injected by the TRex
+# misc_params - [Optional] misc parameters to be passed to the trex
+
+[trex]
+machine_name=trex01-1g
+password=password
+machine_type=1G
+config_file=
+is_dual=yes
+version_path=/auto/proj-pcube-b/apps/PL-b/tools/bp_sim2/v1.35
+exec=t-rex-64
+cores=2
+limit_ports=2
+latency=1000
+misc_params="--nc"
+
+# Configuration for the router connected to the TRex
+#
+# interface - interface that can be used to communicate with the router
+
+[router]
+type=ASR
+interface=10.56.30.49
+password=cisco
diff --git a/scripts/automation/graph_template.html b/scripts/automation/graph_template.html
new file mode 100755
index 00000000..984fbc49
--- /dev/null
+++ b/scripts/automation/graph_template.html
@@ -0,0 +1,80 @@
+
+<html>
+ <head>
+ <script type="text/javascript" src="https://www.google.com/jsapi"></script>
+ <script type="text/javascript">
+ google.load("visualization", "1", {packages:["corechart"]});
+ google.load("visualization", "1", {packages:["table"]});
+ google.setOnLoadCallback(drawChart);
+ function drawChart() {
+
+ var cpu_data = google.visualization.arrayToDataTable([
+ ['Bandwidth [Mbps]', 'CPU [%]', 'Max. Latency [usec]', 'Avg. Latency [usec]'],
+ !@#$template_fill_graph!@#$
+ ])
+
+ var cpu_options = {
+ title: '!@#$template_fill_head!@#$',
+ hAxis: { title: 'Bandwidth [Mbps]', format:'#.##'},
+ vAxes:[
+ {title: 'CPU Util [%]',format:'#%', minValue:0, maxValue: 1}, // Left axis
+ {title: 'Latency [usec]'}, // Right axis
+ ],
+ series: {0: {targetAxisIndex:0},
+ 1: {targetAxisIndex:1},
+ 2: {targetAxisIndex:1},
+ },
+ colors: ["green", "red", "blue"],
+ };
+
+ var chart = new google.visualization.LineChart(document.getElementById('chart_div'));
+
+ chart.draw(cpu_data, cpu_options);
+
+ var plot_data = new google.visualization.DataTable();
+ plot_data.addColumn('number', 'BW [Mbps]');
+ plot_data.addColumn('number', 'PPS [Kpps]');
+ plot_data.addColumn('number', 'CPU Util. [%]');
+ plot_data.addColumn('number', 'BW / CPU');
+ plot_data.addColumn('number', 'Max. Latency [usec]');
+ plot_data.addColumn('number', 'Avg. Latency [usec]');
+ plot_data.addColumn('number', 'Pkt Drop [pkts]');
+ plot_data.addRows([
+ !@#$template_fill_table!@#$
+ ]);
+
+ var formatter = new google.visualization.NumberFormat(
+ {fractionDigits:2});
+ formatter.format(plot_data, 0); // Apply formatter to Bandwidth util column
+
+ var formatter = new google.visualization.NumberFormat(
+ {fractionDigits: 0});
+ formatter.format(plot_data, 1); // Apply formatter to PPS column
+
+ formatter = new google.visualization.NumberFormat(
+ {pattern:'#,###%'});
+ formatter.format(plot_data, 2); // Apply formatter to CPU util column
+
+ formatter = new google.visualization.NumberFormat(
+ {fractionDigits: 2});
+ formatter.format(plot_data, 3); // Apply formatter to BW / CPU column
+
+ formatter = new google.visualization.NumberFormat(
+ {fractionDigits: 0});
+ formatter.format(plot_data, 4); // Apply formatter to Avg Latency util column
+ formatter.format(plot_data, 5); // Apply formatter to Max Latency util column
+ formatter.format(plot_data, 6); // Apply formatter to Pkt Drop
+
+ var table = new google.visualization.Table(document.getElementById('table_div'));
+
+ table.draw(plot_data, {showRowNumber: true});
+ }
+
+ </script>
+ </head>
+ <body>
+ <div id="chart_div" style="width: 900px; height: 500px; position: relative;"></div>
+ <div id="table_div" style="display: table"></div>
+ </body>
+</html>
+
diff --git a/scripts/automation/h_avc.py b/scripts/automation/h_avc.py
new file mode 100755
index 00000000..75548d92
--- /dev/null
+++ b/scripts/automation/h_avc.py
@@ -0,0 +1,195 @@
+#!/router/bin/python-2.4.3
+import time,os, sys, string
+from os.path import exists
+from os import system, remove, chdir
+import re
+import time
+import random
+import copy
+import telnetlib
+import datetime
+import collections
+from trex_perf import TrexRunException
+
+
+import random
+import time
+
+class RouterIOException(Exception):
+ def __init__ (self, reason):
+ # generate the error message
+ #self.message = "\nSummary of error:\n\n %s\n" % (reason)
+ self.message = reason
+
+ def __str__(self):
+ return self.message
+
+# basic router class
+class Router:
+ def __init__ (self, host, port, password, str_wait = "#"):
+ self.host = host
+ self.port = port;
+ self.pr = str_wait;
+ self.password = password
+ self.to = 60
+ self.cpu_util_histo = []
+
+ # private function - command send
+ def _command (self, command, match = None, timeout = None):
+ to = timeout if (timeout != None) else self.to
+ m = match if (match != None) else [self.pr]
+
+ if not isinstance(m, list):
+ m = [m]
+
+ total_to = 0
+ while True:
+ self.tn.write(command + "\n")
+ ret = self.tn.expect(m, timeout = 2)
+ total_to += 2
+
+ if ret[0] != -1:
+ result = {}
+ result['match_index'] = ret[0]
+ result['output'] = ret[2]
+ return (result)
+
+ if total_to >= self.to:
+ raise RouterIOException("Failed to process command to router %s" % command)
+
+ # connect to router by telnet
+ def connect (self):
+ # create telnet session
+ self.tn = telnetlib.Telnet ()
+
+ try:
+ self.tn.open(self.host, self.port)
+ except IOError:
+ raise RouterIOException("Failed To Connect To Router interface at '%s' : '%s'" % (self.host, self.port))
+
+ # get a ready console and decides if you need password
+ ret = self._command("", ["Password", ">", "#"])
+ if ret['match_index'] == 0:
+ self._command(self.password, [">", "#"])
+
+ # can't hurt to call enable even if on enable
+ ret = self._command("enable 15", ["Password", "#"])
+ if (ret['match_index'] == 0):
+ self._command(self.password, "#")
+
+ self._command("terminal length 0")
+
+ def close (self):
+ self.tn.close ()
+ self.tn = None
+
+ # implemented through derived classes
+ def sample_cpu (self):
+ raise Exception("abstract method called")
+
+ def get_last_cpu_util (self):
+ if not self.cpu_util_histo:
+ return (0)
+ else:
+ return self.cpu_util_histo[len(self.cpu_util_histo) - 1]
+
+ def get_cpu_util_histo (self):
+ return self.cpu_util_histo
+
+ def get_filtered_cpu_util_histo (self):
+ trim_start = int(0.15 * len(self.cpu_util_histo))
+
+ filtered = self.cpu_util_histo[trim_start:]
+ if not filtered:
+ return [0]
+
+ m = collections.Counter(filtered).most_common(n = 1)[0][0]
+ #m = max(self.cpu_util_histo)
+ filtered = [x for x in filtered if (x > (0.9*m))]
+ return filtered
+
+ def clear_sampling_stats (self):
+ self.cpu_util_histo = []
+
+
+ # add a sample to the database
+ def sample_stats (self):
+ # sample CPU util
+ cpu_util = self.sample_cpu()
+ self.cpu_util_histo.append(cpu_util)
+
+ def get_stats (self):
+ stats = {}
+
+ filtered_cpu_util = self.get_filtered_cpu_util_histo()
+
+ if not filtered_cpu_util:
+ stats['cpu_util'] = 0
+ else:
+ stats['cpu_util'] = sum(filtered_cpu_util)/len(filtered_cpu_util)
+
+ stats['cpu_histo'] = self.get_cpu_util_histo()
+
+ return stats
+
+
+class ASR1k(Router):
+ def __init__ (self, host, password, port, str_wait = "#"):
+ Router.__init__(self, host, password, port, str_wait)
+
+ def sample_cpu (self):
+ cpu_show_cmd = "show platform hardware qfp active datapath utilization | inc Load"
+ output = self._command(cpu_show_cmd)['output']
+ lines = output.split('\n');
+
+ cpu_util = -1.0
+ # search for the line
+ for l in lines:
+ m = re.match("\W*Processing: Load\D*(\d+)\D*(\d+)\D*(\d+)\D*(\d+)\D*", l)
+ if m:
+ cpu_util = float(m.group(1))
+
+ if (cpu_util == -1.0):
+ raise Exception("cannot determine CPU util. for asr1k")
+
+ return cpu_util
+
+
+class ISR(Router):
+ def __init__ (self, host, password, port, str_wait = "#"):
+ Router.__init__(self, host, password, port, str_wait)
+
+ def sample_cpu (self):
+ cpu_show_cmd = "show processes cpu sorted | inc CPU utilization"
+ output = self._command(cpu_show_cmd)['output']
+ lines = output.split('\n');
+
+ cpu_util = -1.0
+
+ # search for the line
+ for l in lines:
+ m = re.match("\W*CPU utilization for five seconds: (\d+)%/(\d+)%", l)
+ if m:
+ max_cpu_util = float(m.group(1))
+ min_cpu_util = float(m.group(2))
+ cpu_util = (min_cpu_util + max_cpu_util)/2
+
+ if (cpu_util == -1.0):
+ raise Exception("cannot determine CPU util. for ISR")
+
+ return cpu_util
+
+
+
+if __name__ == "__main__":
+ #router = ASR1k("pqemb19ts", "cisco", port=2052)
+ router = ISR("10.56.198.7", "lab")
+ router.connect()
+ for i in range(1, 10):
+ router.sample_stats()
+ time.sleep(1)
+
+
+
+
+
diff --git a/scripts/automation/phantom/phantomjs b/scripts/automation/phantom/phantomjs
new file mode 100755
index 00000000..af9e4ab1
--- /dev/null
+++ b/scripts/automation/phantom/phantomjs
Binary files differ
diff --git a/scripts/automation/phantom/rasterize.js b/scripts/automation/phantom/rasterize.js
new file mode 100755
index 00000000..165bcfa7
--- /dev/null
+++ b/scripts/automation/phantom/rasterize.js
@@ -0,0 +1,32 @@
+var page = require('webpage').create(),
+ system = require('system'),
+ address, output, size;
+
+if (system.args.length < 3 || system.args.length > 5) {
+ console.log('Usage: rasterize.js URL filename [paperwidth*paperheight|paperformat] [zoom]');
+ console.log(' paper (pdf output) examples: "5in*7.5in", "10cm*20cm", "A4", "Letter"');
+ phantom.exit(1);
+} else {
+ address = system.args[1];
+ output = system.args[2];
+ page.viewportSize = { width: 600, height: 600 };
+ if (system.args.length > 3 && system.args[2].substr(-4) === ".pdf") {
+ size = system.args[3].split('*');
+ page.paperSize = size.length === 2 ? { width: size[0], height: size[1], margin: '0px' }
+ : { format: system.args[3], orientation: 'portrait', margin: '1cm' };
+ }
+ if (system.args.length > 4) {
+ page.zoomFactor = system.args[4];
+ }
+ page.open(address, function (status) {
+ if (status !== 'success') {
+ console.log('Unable to load the address!');
+ phantom.exit();
+ } else {
+ window.setTimeout(function () {
+ page.render(output);
+ phantom.exit();
+ }, 200);
+ }
+ });
+}
diff --git a/scripts/automation/readme.txt b/scripts/automation/readme.txt
new file mode 100755
index 00000000..2541a1a3
--- /dev/null
+++ b/scripts/automation/readme.txt
@@ -0,0 +1,15 @@
+README - trex_perf.py
+=====================
+
+This script uses the T-Rex RESTfull client-server conrtol plane achitecture and tries to find the maximum M (platform factor) for trex before hitting one of two stopping conditions:
+(*) Packet drops
+(*) High latency.
+ Since high latency can change from one platform to another, and might suffer from kickoff peak (espicially at VM), it is the user responsibility to provide the latency condition.
+ A common value used by non-vm machines is 1000, where in VM machines values around 5000 are more common.
+
+please note that '-f' and '-c' options are mandatory.
+
+Also, this is the user's responsibility to make sure a T-Rex is running, listening to relevant client request coming from this script.
+
+example for finding max M (between 10 to 100) with imix_fast_1g.yaml traffic profile:
+./trex_perf.py -m 10 100 -c config/trex-hhaim.cfg all drop -f cap2/imix_fast_1g.yaml
diff --git a/scripts/automation/report_template.html b/scripts/automation/report_template.html
new file mode 100755
index 00000000..779d5429
--- /dev/null
+++ b/scripts/automation/report_template.html
@@ -0,0 +1,96 @@
+<!DOCTYPE html>
+<html>
+
+<head>
+
+<style>
+ html{overflow-y:scroll;}
+body
+{
+font-size:12px;
+color:#000000;
+background-color:#ffffff;
+margin:0px;
+background-image:url('/images/gradientfromtop.gif');
+background-repeat:repeat-x;
+}
+body,p,h1,h2,h3,h4,table,td,th,ul,ol,textarea,input
+{
+font-family:verdana,helvetica,arial,sans-serif;
+}
+h1 {font-size:190%;margin-top:0px;font-weight:normal}
+h2 {font-size:160%;margin-top:10px;margin-bottom:10px;font-weight:normal}
+h3 {font-size:120%;font-weight:normal}
+h4 {font-size:100%;}
+h5 {font-size:90%;}
+h6 {font-size:80%;}
+h1,h2,h3,h4,h5,h6
+{
+background-color:transparent;
+color:#000000;
+}
+table.myWideTable
+{
+background-color:#ffffff;
+border:1px solid #c3c3c3;
+border-collapse:collapse;
+width:100%;
+}
+table.myWideTable th
+{
+background-color:#e5eecc;
+border:1px solid #c3c3c3;
+padding:3px;
+vertical-align:top;
+text-align:left;
+}table.myWideTable td
+{
+border:1px solid #c3c3c3;
+padding:3px;
+vertical-align:top;
+}table.myTable
+{
+background-color:#ffffff;
+border:1px solid #c3c3c3;
+border-collapse:collapse;
+width:50%;
+}
+table.myTable th
+{
+background-color:#e5eecc;
+border:1px solid #c3c3c3;
+padding:3px;
+vertical-align:top;
+text-align:left;
+}table.myTable td
+{
+border:1px solid #c3c3c3;
+padding:3px;
+vertical-align:top;
+}}
+ </style>
+
+
+</head>
+
+<body>
+
+<H1>
+T-Rex Performance Report
+</H1>
+
+<H2>
+Job Setup
+</H2>
+
+!@#$template_fill_job_setup_table!@#$
+
+<H2>
+Job Summary
+</H2>
+
+!@#$template_fill_job_summary_table!@#$
+
+</body>
+</html>
+
diff --git a/scripts/automation/sshpass.exp b/scripts/automation/sshpass.exp
new file mode 100755
index 00000000..f27210c8
--- /dev/null
+++ b/scripts/automation/sshpass.exp
@@ -0,0 +1,15 @@
+#!/usr/cisco/bin/expect -f
+# ./ssh.exp password 192.168.1.11 id *
+set pass [lrange $argv 0 0]
+set server [lrange $argv 1 1]
+set name [lrange $argv 2 2]
+set cmd [lrange $argv 3 10]
+
+set cmd_str [join $cmd]
+
+spawn ssh $name@$server $cmd_str
+match_max 100000
+expect "*?assword:*"
+send -- "$pass\r"
+send -- "\r"
+interact
diff --git a/scripts/automation/trex_control_plane/__init__.py b/scripts/automation/trex_control_plane/__init__.py
new file mode 100755
index 00000000..d3f5a12f
--- /dev/null
+++ b/scripts/automation/trex_control_plane/__init__.py
@@ -0,0 +1 @@
+
diff --git a/scripts/automation/trex_control_plane/client/__init__.py b/scripts/automation/trex_control_plane/client/__init__.py
new file mode 100755
index 00000000..e1d24710
--- /dev/null
+++ b/scripts/automation/trex_control_plane/client/__init__.py
@@ -0,0 +1 @@
+__all__ = ["trex_client"]
diff --git a/scripts/automation/trex_control_plane/client/outer_packages.py b/scripts/automation/trex_control_plane/client/outer_packages.py
new file mode 100755
index 00000000..a7c34e48
--- /dev/null
+++ b/scripts/automation/trex_control_plane/client/outer_packages.py
@@ -0,0 +1,29 @@
+#!/router/bin/python
+
+import sys,site
+import platform,os
+
+CURRENT_PATH = os.path.dirname(os.path.realpath(__file__))
+ROOT_PATH = os.path.abspath(os.path.join(CURRENT_PATH, os.pardir)) # path to trex_control_plane directory
+PATH_TO_PYTHON_LIB = os.path.abspath(os.path.join(ROOT_PATH, 'python_lib'))
+
+
+CLIENT_MODULES = ['enum34-1.0.4',
+ # 'jsonrpclib-0.1.3',
+ 'jsonrpclib-pelix-0.2.5',
+ 'termstyle',
+ 'rpc_exceptions-0.1'
+ ]
+
+def import_client_modules ():
+ sys.path.append(ROOT_PATH)
+ import_module_list(CLIENT_MODULES)
+
+def import_module_list (modules_list):
+ assert(isinstance(modules_list, list))
+ for p in modules_list:
+ full_path = os.path.join(PATH_TO_PYTHON_LIB, p)
+ fix_path = os.path.normcase(full_path) #CURRENT_PATH+p)
+ site.addsitedir(full_path)
+
+import_client_modules()
diff --git a/scripts/automation/trex_control_plane/client/trex_adv_client.py b/scripts/automation/trex_control_plane/client/trex_adv_client.py
new file mode 100755
index 00000000..b3fe3dad
--- /dev/null
+++ b/scripts/automation/trex_control_plane/client/trex_adv_client.py
@@ -0,0 +1,70 @@
+#!/router/bin/python
+
+import trex_client
+from jsonrpclib import ProtocolError, AppError
+
+class CTRexAdvClient(trex_client.CTRexClient):
+ def __init__ (self, trex_host, max_history_size = 100, trex_daemon_port = 8090, trex_zmq_port = 4500, verbose = False):
+ super(CTRexAdvClient, self).__init__(trex_host, max_history_size, trex_daemon_port, trex_zmq_port, verbose)
+ pass
+
+ # T-REX KIWI advanced methods
+ def start_quick_trex(self, pcap_file, d, delay, dual, ipv6, times, interfaces):
+ try:
+ return self.server.start_quick_trex(pcap_file = pcap_file, duration = d, dual = dual, delay = delay, ipv6 = ipv6, times = times, interfaces = interfaces)
+ except AppError as err:
+ self.__handle_AppError_exception(err.args[0])
+ except ProtocolError:
+ raise
+ finally:
+ self.prompt_verbose_data()
+
+ def stop_quick_trex(self):
+ try:
+ return self.server.stop_quick_trex()
+ except AppError as err:
+ self.__handle_AppError_exception(err.args[0])
+ except ProtocolError:
+ raise
+ finally:
+ self.prompt_verbose_data()
+
+# def is_running(self):
+# pass
+
+ def get_running_stats(self):
+ try:
+ return self.server.get_running_stats()
+ except AppError as err:
+ self.__handle_AppError_exception(err.args[0])
+ except ProtocolError:
+ raise
+ finally:
+ self.prompt_verbose_data()
+
+ def clear_counters(self):
+ try:
+ return self.server.clear_counters()
+ except AppError as err:
+ self.__handle_AppError_exception(err.args[0])
+ except ProtocolError:
+ raise
+ finally:
+ self.prompt_verbose_data()
+
+
+if __name__ == "__main__":
+ trex = CTRexAdvClient('trex-dan', trex_daemon_port = 8383, verbose = True)
+ print trex.start_quick_trex(delay = 10,
+ dual = True,
+ d = 20,
+ interfaces = ["gig0/0/1", "gig0/0/2"],
+ ipv6 = False,
+ pcap_file="avl/http_browsing.pcap",
+ times=3)
+ print trex.stop_quick_trex()
+ print trex.get_running_stats()
+ print trex.clear_counters()
+ pass
+
+
diff --git a/scripts/automation/trex_control_plane/client/trex_client.py b/scripts/automation/trex_control_plane/client/trex_client.py
new file mode 100755
index 00000000..1f297538
--- /dev/null
+++ b/scripts/automation/trex_control_plane/client/trex_client.py
@@ -0,0 +1,1066 @@
+#!/router/bin/python
+
+import sys
+import os
+
+try:
+ # support import for Python 2
+ import outer_packages
+except ImportError:
+ # support import for Python 3
+ import client.outer_packages
+import jsonrpclib
+from jsonrpclib import ProtocolError, AppError
+from common.trex_status_e import TRexStatus
+from common.trex_exceptions import *
+from common.trex_exceptions import exception_handler
+from client_utils.general_utils import *
+from enum import Enum
+import socket
+import errno
+import time
+import re
+import copy
+import binascii
+from collections import deque
+from json import JSONDecoder
+from distutils.util import strtobool
+
+
+
+class CTRexClient(object):
+ """
+ This class defines the client side of the RESTfull interaction with T-Rex
+ """
+
+ def __init__(self, trex_host, max_history_size = 100, trex_daemon_port = 8090, trex_zmq_port = 4500, verbose = False):
+ """
+ Instatiate a T-Rex client object, and connecting it to listening deamon-server
+
+ :parameters:
+ trex_host : str
+ a string of the t-rex ip address or hostname.
+ max_history_size : int
+ a number to set the maximum history size of a single T-Rex run. Each sampling adds a new item to history.
+
+ default value : **100**
+ trex_daemon_port : int
+ the port number on which the trex-deamon server can be reached
+
+ default value: **8090**
+ trex_zmq_port : int
+ the port number on which trex's zmq module will interact with deamon server
+
+ default value: **4500**
+ verbose : bool
+ sets a verbose output on suported class method.
+
+ default value : **False**
+
+ :raises:
+ socket errors, in case server could not be reached.
+
+ """
+ self.trex_host = trex_host
+ self.trex_daemon_port = trex_daemon_port
+ self.trex_zmq_port = trex_zmq_port
+ self.seq = None
+ self.verbose = verbose
+ self.result_obj = CTRexResult(max_history_size)
+ self.decoder = JSONDecoder()
+ self.trex_server_path = "http://{hostname}:{port}/".format( hostname = trex_host, port = trex_daemon_port )
+ self.__verbose_print("Connecting to T-Rex @ {trex_path} ...".format( trex_path = self.trex_server_path ) )
+ self.history = jsonrpclib.history.History()
+ self.server = jsonrpclib.Server(self.trex_server_path, history = self.history)
+ self.check_server_connectivity()
+ self.__verbose_print("Connection established successfully!")
+ self._last_sample = time.time()
+ self.__default_user = get_current_user()
+
+
+ def add (self, x, y):
+ try:
+ return self.server.add(x,y)
+ except AppError as err:
+ self._handle_AppError_exception(err.args[0])
+ except ProtocolError:
+ raise
+ finally:
+ self.prompt_verbose_data()
+
+ def start_trex (self, f, d, block_to_success = True, timeout = 30, user = None, **trex_cmd_options):
+ """
+ Request to start a T-Rex run on server.
+
+ :parameters:
+ f : str
+ a path (on server) for the injected traffic data (.yaml file)
+ d : int
+ the desired duration of the test. must be at least 30 seconds long.
+ block_to_success : bool
+ determine if this method blocks until T-Rex changes state from 'Starting' to either 'Idle' or 'Running'
+
+ default value : **True**
+ timeout : int
+ maximum time (in seconds) to wait in blocking state until T-Rex changes state from 'Starting' to either 'Idle' or 'Running'
+
+ default value: **30**
+ user : str
+ the identity of the the run issuer.
+ trex_cmd_options : key, val
+ sets desired T-Rex options using key=val syntax, separated by comma.
+ for keys with no value, state key=True
+
+ :return:
+ **True** on success
+
+ :raises:
+ + :exc:`ValueError`, in case 'd' parameter inserted with wrong value.
+ + :exc:`trex_exceptions.TRexError`, in case one of the trex_cmd_options raised an exception at server.
+ + :exc:`trex_exceptions.TRexInUseError`, in case T-Rex is already taken.
+ + :exc:`trex_exceptions.TRexRequestDenied`, in case T-Rex is reserved for another user than the one trying start T-Rex.
+ + ProtocolError, in case of error in JSON-RPC protocol.
+
+ """
+ user = user or self.__default_user
+ try:
+ d = int(d)
+ if d < 30: # specify a test should take at least 30 seconds long.
+ raise ValueError
+ except ValueError:
+ raise ValueError('d parameter must be integer, specifying how long T-Rex run, and must be larger than 30 secs.')
+
+ trex_cmd_options.update( {'f' : f, 'd' : d} )
+
+ self.result_obj.clear_results()
+ try:
+ issue_time = time.time()
+ retval = self.server.start_trex(trex_cmd_options, user, block_to_success, timeout)
+ except AppError as err:
+ self._handle_AppError_exception(err.args[0])
+ except ProtocolError:
+ raise
+ finally:
+ self.prompt_verbose_data()
+
+ if retval!=0:
+ self.seq = retval # update seq num only on successful submission
+ return True
+ else: # T-Rex is has been started by another user
+ raise TRexInUseError('T-Rex is already being used by another user or process. Try again once T-Rex is back in IDLE state.')
+
+ def stop_trex (self):
+ """
+ Request to stop a T-Rex run on server.
+
+ The request is only valid if the stop intitiator is the same client as the T-Rex run intitiator.
+
+ :parameters:
+ None
+
+ :return:
+ + **True** on successful termination
+ + **False** if request issued but T-Rex wasn't running.
+
+ :raises:
+ + :exc:`trex_exceptions.TRexRequestDenied`, in case T-Rex ir running but started by another user.
+ + :exc:`trex_exceptions.TRexIncompleteRunError`, in case one of failed T-Rex run (unexpected termination).
+ + ProtocolError, in case of error in JSON-RPC protocol.
+
+ """
+ try:
+ return self.server.stop_trex(self.seq)
+ except AppError as err:
+ self._handle_AppError_exception(err.args[0])
+ except ProtocolError:
+ raise
+ finally:
+ self.prompt_verbose_data()
+
+ def force_kill (self, confirm = True):
+ """
+ Force killing of running T-Rex process (if exists) on the server.
+
+ .. tip:: This method is a safety method and **overrides any running or reserved resources**, and as such isn't designed to be used on a regular basis.
+ Always consider using :func:`trex_client.CTRexClient.stop_trex` instead.
+
+ In the end of this method, T-Rex will return to IDLE state with no reservation.
+
+ :parameters:
+ confirm : bool
+ Prompt a user confirmation before continue terminating T-Rex session
+
+ :return:
+ + **True** on successful termination
+ + **False** otherwise.
+
+ :raises:
+ + ProtocolError, in case of error in JSON-RPC protocol.
+
+ """
+ if confirm:
+ prompt = "WARNING: This will terminate active T-Rex session indiscriminately.\nAre you sure? "
+ sys.stdout.write('%s [y/n]\n' % prompt)
+ while True:
+ try:
+ if strtobool(user_input().lower()):
+ break
+ else:
+ return
+ except ValueError:
+ sys.stdout.write('Please respond with \'y\' or \'n\'.\n')
+ try:
+ return self.server.force_trex_kill()
+ except AppError as err:
+ # Silence any kind of application errors- by design
+ return False
+ except ProtocolError:
+ raise
+ finally:
+ self.prompt_verbose_data()
+
+ def wait_until_kickoff_finish(self, timeout = 40):
+ """
+ Block the client application until T-Rex changes state from 'Starting' to either 'Idle' or 'Running'
+
+ The request is only valid if the stop intitiator is the same client as the T-Rex run intitiator.
+
+ :parameters:
+ timeout : int
+ maximum time (in seconds) to wait in blocking state until T-Rex changes state from 'Starting' to either 'Idle' or 'Running'
+
+ :return:
+ + **True** on successful termination
+ + **False** if request issued but T-Rex wasn't running.
+
+ :raises:
+ + :exc:`trex_exceptions.TRexIncompleteRunError`, in case one of failed T-Rex run (unexpected termination).
+ + ProtocolError, in case of error in JSON-RPC protocol.
+
+ .. note:: Exceptions are throws only when start_trex did not block in the first place, i.e. `block_to_success` parameter was set to `False`
+
+ """
+
+ try:
+ return self.server.wait_until_kickoff_finish(timeout)
+ except AppError as err:
+ self._handle_AppError_exception(err.args[0])
+ except ProtocolError:
+ raise
+ finally:
+ self.prompt_verbose_data()
+
+ def is_running (self, dump_out = False):
+ """
+ Poll for T-Rex running status.
+
+ If T-Rex is running, a history item will be added into result_obj and processed.
+
+ .. tip:: This method is especially useful for iterating until T-Rex run is finished.
+
+ :parameters:
+ dump_out : dict
+ if passed, the pointer object is cleared and the latest dump stored in it.
+
+ :return:
+ + **True** if T-Rex is running.
+ + **False** if T-Rex is not running.
+
+ :raises:
+ + :exc:`trex_exceptions.TRexIncompleteRunError`, in case one of failed T-Rex run (unexpected termination).
+ + :exc:`TypeError`, in case JSON stream decoding error.
+ + ProtocolError, in case of error in JSON-RPC protocol.
+
+ """
+ try:
+ res = self.get_running_info()
+ if res == {}:
+ return False
+ if (dump_out != False) and (isinstance(dump_out, dict)): # save received dump to given 'dump_out' pointer
+ dump_out.clear()
+ dump_out.update(res)
+ return True
+ except TRexWarning as err:
+ if err.code == -12: # TRex is either still at 'Starting' state or in Idle state, however NO error occured
+ return False
+ except TRexException:
+ raise
+ except ProtocolError as err:
+ raise
+ finally:
+ self.prompt_verbose_data()
+
+ def get_trex_files_path (self):
+ """
+ Fetches the local path in which files are stored when pushed to t-rex server from client.
+
+ :parameters:
+ None
+
+ :return:
+ string representation of the desired path
+
+ .. note:: The returned path represents a path on the T-Rex server **local machine**
+
+ :raises:
+ ProtocolError, in case of error in JSON-RPC protocol.
+
+ """
+ try:
+ return (self.server.get_files_path() + '/')
+ except AppError as err:
+ self._handle_AppError_exception(err.args[0])
+ except ProtocolError:
+ raise
+ finally:
+ self.prompt_verbose_data()
+
+ def get_running_status (self):
+ """
+ Fetches the current T-Rex status.
+
+ If available, a verbose data will accompany the state itself.
+
+ :parameters:
+ None
+
+ :return:
+ dictionary with 'state' and 'verbose' keys.
+
+ :raises:
+ ProtocolError, in case of error in JSON-RPC protocol.
+
+ """
+ try:
+ res = self.server.get_running_status()
+ res['state'] = TRexStatus(res['state'])
+ return res
+ except AppError as err:
+ self._handle_AppError_exception(err.args[0])
+ except ProtocolError:
+ raise
+ finally:
+ self.prompt_verbose_data()
+
+ def get_running_info (self):
+ """
+ Performs single poll of T-Rex running data and process it into the result object (named `result_obj`).
+
+ .. tip:: This method will throw an exception if T-Rex isn't running. Always consider using :func:`trex_client.CTRexClient.is_running` which handles a single poll operation in safer manner.
+
+ :parameters:
+ None
+
+ :return:
+ dictionary containing the most updated data dump from T-Rex.
+
+ :raises:
+ + :exc:`trex_exceptions.TRexIncompleteRunError`, in case one of failed T-Rex run (unexpected termination).
+ + :exc:`TypeError`, in case JSON stream decoding error.
+ + ProtocolError, in case of error in JSON-RPC protocol.
+
+ """
+ if not self.is_query_relevance():
+ # if requested in timeframe smaller than the original sample rate, return the last known data without interacting with server
+ return self.result_obj.get_latest_dump()
+ else:
+ try:
+ latest_dump = self.decoder.decode( self.server.get_running_info() ) # latest dump is not a dict, but json string. decode it.
+ self.result_obj.update_result_data(latest_dump)
+ return latest_dump
+ except TypeError as inst:
+ raise TypeError('JSON-RPC data decoding failed. Check out incoming JSON stream.')
+ except AppError as err:
+ self._handle_AppError_exception(err.args[0])
+ except ProtocolError:
+ raise
+ finally:
+ self.prompt_verbose_data()
+
+ def sample_until_condition (self, condition_func, time_between_samples = 5):
+ """
+ Automatically sets ongoing sampling of T-Rex data, with sampling rate described by time_between_samples.
+
+ On each fetched dump, the condition_func is applied on the result objects, and if returns True, the sampling will stop.
+
+ :parameters:
+ condition_func : function
+ function that operates on result_obj and checks if a condition has been met
+
+ .. note:: `condition_finc` is applied on `CTRexResult` object. Make sure to design a relevant method.
+ time_between_samples : int
+ determines the time between each sample of the server
+
+ default value : **5**
+
+ :return:
+ the first result object (see :class:`CTRexResult` for further details) of the T-Rex run on which the condition has been met.
+
+ :raises:
+ + :exc:`UserWarning`, in case the condition_func method condition hasn't been met
+ + :exc:`trex_exceptions.TRexIncompleteRunError`, in case one of failed T-Rex run (unexpected termination).
+ + :exc:`TypeError`, in case JSON stream decoding error.
+ + ProtocolError, in case of error in JSON-RPC protocol.
+ + :exc:`Exception`, in case the condition_func suffered from any kind of exception
+
+ """
+ # make sure T-Rex is running. raise exceptions here if any
+ self.wait_until_kickoff_finish()
+ try:
+ while self.is_running():
+ results = self.get_result_obj()
+ if condition_func(results):
+ # if condition satisfied, stop T-Rex and return result object
+ self.stop_trex()
+ return results
+ time.sleep(time_between_samples)
+ except TRexWarning:
+ # means we're back to Idle state, and didn't meet our condition
+ raise UserWarning("T-Rex results condition wasn't met during T-Rex run.")
+ except Exception:
+ # this could come from provided method 'condition_func'
+ raise
+
+ def sample_to_run_finish (self, time_between_samples = 5):
+ """
+ Automatically sets automatically sampling of T-Rex data with sampling rate described by time_between_samples until T-Rex run finished.
+
+ :parameters:
+ time_between_samples : int
+ determines the time between each sample of the server
+
+ default value : **5**
+
+ :return:
+ the latest result object (see :class:`CTRexResult` for further details) with sampled data.
+
+ :raises:
+ + :exc:`UserWarning`, in case the condition_func method condition hasn't been met
+ + :exc:`trex_exceptions.TRexIncompleteRunError`, in case one of failed T-Rex run (unexpected termination).
+ + :exc:`TypeError`, in case JSON stream decoding error.
+ + ProtocolError, in case of error in JSON-RPC protocol.
+
+ """
+ self.wait_until_kickoff_finish()
+
+ try:
+ while self.is_running():
+ time.sleep(time_between_samples)
+ except TRexWarning:
+ pass
+ results = self.get_result_obj()
+ return results
+
+
+ def get_result_obj (self, copy_obj = True):
+ """
+ Returns the result object of the trex_client's instance.
+
+ By default, returns a **copy** of the objects (so that changes to the original object are masked).
+
+ :parameters:
+ copy_obj : bool
+ False means that a reference to the original (possibly changing) object are passed
+
+ defaul value : **True**
+
+ :return:
+ the latest result object (see :class:`CTRexResult` for further details) with sampled data.
+
+ """
+ if copy_obj:
+ return copy.deepcopy(self.result_obj)
+ else:
+ return self.result_obj
+
+ def is_reserved (self):
+ """
+ Checks if T-Rex is currently reserved to any user or not.
+
+ :parameters:
+ None
+
+ :return:
+ + **True** if T-Rex is reserved.
+ + **False** otherwise.
+
+ :raises:
+ ProtocolError, in case of error in JSON-RPC protocol.
+
+ """
+ try:
+ return self.server.is_reserved()
+ except AppError as err:
+ self._handle_AppError_exception(err.args[0])
+ except ProtocolError:
+ raise
+ finally:
+ self.prompt_verbose_data()
+
+ def reserve_trex (self, user = None):
+ """
+ Reserves the usage of T-Rex to a certain user.
+
+ When T-Rex is reserved, it can't be reserved.
+
+ :parameters:
+ user : str
+ a username of the desired owner of T-Rex
+
+ default: current logged user
+
+ :return:
+ **True** if reservation made successfully
+
+ :raises:
+ + :exc:`trex_exceptions.TRexRequestDenied`, in case T-Rex is reserved for another user than the one trying to make the reservation.
+ + :exc:`trex_exceptions.TRexInUseError`, in case T-Rex is currently running.
+ + ProtocolError, in case of error in JSON-RPC protocol.
+
+ """
+ username = user or self.__default_user
+ try:
+ return self.server.reserve_trex(user = username)
+ except AppError as err:
+ self._handle_AppError_exception(err.args[0])
+ except ProtocolError:
+ raise
+ finally:
+ self.prompt_verbose_data()
+
+ def cancel_reservation (self, user = None):
+ """
+ Cancels a current reservation of T-Rex to a certain user.
+
+ When T-Rex is reserved, no other user can start new T-Rex runs.
+
+
+ :parameters:
+ user : str
+ a username of the desired owner of T-Rex
+
+ default: current logged user
+
+ :return:
+ + **True** if reservation canceled successfully,
+ + **False** if there was no reservation at all.
+
+ :raises:
+ + :exc:`trex_exceptions.TRexRequestDenied`, in case T-Rex is reserved for another user than the one trying to cancel the reservation.
+ + ProtocolError, in case of error in JSON-RPC protocol.
+
+ """
+
+ username = user or self.__default_user
+ try:
+ return self.server.cancel_reservation(user = username)
+ except AppError as err:
+ self._handle_AppError_exception(err.args[0])
+ except ProtocolError:
+ raise
+ finally:
+ self.prompt_verbose_data()
+
+ def push_files (self, filepaths):
+ """
+ Pushes a file (or a list of files) to store locally on server.
+
+ :parameters:
+ filepaths : str or list
+ a path to a file to be pushed to server.
+ if a list of paths is passed, all of those will be pushed to server
+
+ :return:
+ + **True** if file(s) copied successfully.
+ + **False** otherwise.
+
+ :raises:
+ + :exc:`IOError`, in case specified file wasn't found or could not be accessed.
+ + ProtocolError, in case of error in JSON-RPC protocol.
+
+ """
+ paths_list = None
+ if isinstance(filepaths, str):
+ paths_list = [filepaths]
+ elif isinstance(filepaths, list):
+ paths_list = filepaths
+ else:
+ raise TypeError("filepaths argument must be of type str or list")
+
+ for filepath in paths_list:
+ try:
+ if not os.path.exists(filepath):
+ raise IOError(errno.ENOENT, "The requested `{fname}` file wasn't found. Operation aborted.".format(
+ fname = filepath) )
+ else:
+ filename = os.path.basename(filepath)
+ with open(filepath, 'rb') as f:
+ file_content = f.read()
+ self.server.push_file(filename, binascii.b2a_base64(file_content))
+ finally:
+ self.prompt_verbose_data()
+ return True
+
+ def is_query_relevance(self):
+ """
+ Checks if time between any two consecutive server queries (asking for live running data) passed.
+
+ .. note:: The allowed minimum time between each two consecutive samples is 0.5 seconds.
+
+ :parameters:
+ None
+
+ :return:
+ + **True** if more than 0.5 seconds has been past from last server query.
+ + **False** otherwise.
+
+ """
+ cur_time = time.time()
+ if cur_time-self._last_sample < 0.5:
+ return False
+ else:
+ self._last_sample = cur_time
+ return True
+
+ def call_server_mathod_safely (self, method_to_call):
+ try:
+ return method_to_call()
+ except socket.error as e:
+ if e.errno == errno.ECONNREFUSED:
+ raise SocketError(errno.ECONNREFUSED, "Connection from T-Rex server was refused. Please make sure the server is up.")
+
+ def check_server_connectivity (self):
+ """
+ Checks for server valid connectivity.
+ """
+ try:
+ socket.gethostbyname(self.trex_host)
+ return self.server.connectivity_check()
+ except socket.gaierror as e:
+ raise socket.gaierror(e.errno, "Could not resolve server hostname. Please make sure hostname entered correctly.")
+ except socket.error as e:
+ if e.errno == errno.ECONNREFUSED:
+ raise socket.error(errno.ECONNREFUSED, "Connection from T-Rex server was refused. Please make sure the server is up.")
+ finally:
+ self.prompt_verbose_data()
+
+ def prompt_verbose_data(self):
+ """
+ This method prompts any verbose data available, only if `verbose` option has been turned on.
+ """
+ if self.verbose:
+ print ('\n')
+ print ("(*) JSON-RPC request: "+ self.history.request)
+ print ("(*) JSON-RPC response: "+ self.history.response)
+
+ def __verbose_print(self, print_str):
+ """
+ This private method prints the `print_str` string only in case self.verbose flag is turned on.
+
+ :parameters:
+ print_str : str
+ a string to be printed
+
+ :returns:
+ None
+ """
+ if self.verbose:
+ print (print_str)
+
+
+
+ def _handle_AppError_exception(self, err):
+ """
+ This private method triggres the T-Rex dedicated exception generation in case a general ProtocolError has been raised.
+ """
+ # handle known exceptions based on known error codes.
+ # if error code is not known, raise ProtocolError
+ raise exception_handler.gen_exception(err)
+
+
+class CTRexResult(object):
+ """
+ A class containing all results received from T-Rex.
+
+ Ontop to containing the results, this class offers easier data access and extended results processing options
+ """
+ def __init__(self, max_history_size):
+ """
+ Instatiate a T-Rex result object
+
+ :parameters:
+ max_history_size : int
+ a number to set the maximum history size of a single T-Rex run. Each sampling adds a new item to history.
+
+ """
+ self._history = deque(maxlen = max_history_size)
+ self.clear_results()
+
+ def __repr__(self):
+ return ("Is valid history? {arg}\n".format( arg = self.is_valid_hist() ) +
+ "Done warmup? {arg}\n".format( arg = self.is_done_warmup() ) +
+ "Expected tx rate: {arg}\n".format( arg = self.get_expected_tx_rate() ) +
+ "Current tx rate: {arg}\n".format( arg = self.get_current_tx_rate() ) +
+ "Maximum latency: {arg}\n".format( arg = self.get_max_latency() ) +
+ "Average latency: {arg}\n".format( arg = self.get_avg_latency() ) +
+ "Average window latency: {arg}\n".format( arg = self.get_avg_window_latency() ) +
+ "Total drops: {arg}\n".format( arg = self.get_total_drops() ) +
+ "Drop rate: {arg}\n".format( arg = self.get_drop_rate() ) +
+ "History size so far: {arg}\n".format( arg = len(self._history) ) )
+
+ def get_expected_tx_rate (self):
+ """
+ Fetches the expected TX rate in various units representation
+
+ :parameters:
+ None
+
+ :return:
+ dictionary containing the expected TX rate, where the key is the measurement units, and the value is the measurement value.
+
+ """
+ return self._expected_tx_rate
+
+ def get_current_tx_rate (self):
+ """
+ Fetches the current TX rate in various units representation
+
+ :parameters:
+ None
+
+ :return:
+ dictionary containing the current TX rate, where the key is the measurement units, and the value is the measurement value.
+
+ """
+ return self._current_tx_rate
+
+ def get_max_latency (self):
+ """
+ Fetches the maximum latency measured on each of the interfaces
+
+ :parameters:
+ None
+
+ :return:
+ dictionary containing the maximum latency, where the key is the measurement interface (`c` indicates client), and the value is the measurement value.
+
+ """
+ return self._max_latency
+
+ def get_avg_latency (self):
+ """
+ Fetches the average latency measured on each of the interfaces from the start of T-Rex run
+
+ :parameters:
+ None
+
+ :return:
+ dictionary containing the average latency, where the key is the measurement interface (`c` indicates client), and the value is the measurement value.
+
+ The `all` key represents the average of all interfaces' average
+
+ """
+ return self._avg_latency
+
+ def get_avg_window_latency (self):
+ """
+ Fetches the average latency measured on each of the interfaces from all the sampled currently stored in window.
+
+ :parameters:
+ None
+
+ :return:
+ dictionary containing the average latency, where the key is the measurement interface (`c` indicates client), and the value is the measurement value.
+
+ The `all` key represents the average of all interfaces' average
+
+ """
+ return self._avg_window_latency
+
+ def get_total_drops (self):
+ """
+ Fetches the total number of drops identified from the moment T-Rex run began.
+
+ :parameters:
+ None
+
+ :return:
+ total drops count (as int)
+
+ """
+ return self._total_drops
+
+ def get_drop_rate (self):
+ """
+ Fetches the most recent drop rate in pkts/sec units.
+
+ :parameters:
+ None
+
+ :return:
+ current drop rate (as float)
+
+ """
+ return self._drop_rate
+
+ def is_valid_hist (self):
+ """
+ Checks if result obejct contains valid data.
+
+ :parameters:
+ None
+
+ :return:
+ + **True** if history is valid.
+ + **False** otherwise.
+
+ """
+ return self.valid
+
+ def set_valid_hist (self, valid_stat = True):
+ """
+ Sets result obejct validity status.
+
+ :parameters:
+ valid_stat : bool
+ defines the validity status
+
+ dafault value : **True**
+
+ :return:
+ None
+
+ """
+ self.valid = valid_stat
+
+ def is_done_warmup (self):
+ """
+ Checks if T-Rex latest results TX-rate indicates that T-Rex has reached its expected TX-rate.
+
+ :parameters:
+ None
+
+ :return:
+ + **True** if expected TX-rate has been reached.
+ + **False** otherwise.
+
+ """
+ return self._done_warmup
+
+ def get_last_value (self, tree_path_to_key, regex = None):
+ """
+ A dynamic getter from the latest sampled data item stored in the result object.
+
+ :parameters:
+ tree_path_to_key : str
+ defines a path to desired data.
+
+ .. tip:: | Use '.' to enter one level deeper in dictionary hierarchy.
+ | Use '[i]' to access the i'th indexed obejct of an array.
+
+ tree_path_to_key : regex
+ apply a regex to filter results out from a multiple results set.
+
+ Filter applies only on keys of dictionary type.
+
+ dafault value : **None**
+
+ :return:
+ + a list of values relevant to the specified path
+ + None if no results were fetched or the history isn't valid.
+
+ """
+ if not self.is_valid_hist():
+ return None
+ else:
+ return CTRexResult.__get_value_by_path(self._history[len(self._history)-1], tree_path_to_key, regex)
+
+ def get_value_list (self, tree_path_to_key, regex = None, filter_none = True):
+ """
+ A dynamic getter from all sampled data items stored in the result object.
+
+ :parameters:
+ tree_path_to_key : str
+ defines a path to desired data.
+
+ .. tip:: | Use '.' to enter one level deeper in dictionary hierarchy.
+ | Use '[i]' to access the i'th indexed object of an array.
+
+ tree_path_to_key : regex
+ apply a regex to filter results out from a multiple results set.
+
+ Filter applies only on keys of dictionary type.
+
+ dafault value : **None**
+
+ filter_none : bool
+ specify if None results should be filtered out or not.
+
+ dafault value : **True**
+
+ :return:
+ + a list of values relevant to the specified path. Each item on the list refers to a single server sample.
+ + None if no results were fetched or the history isn't valid.
+ """
+
+ if not self.is_valid_hist():
+ return None
+ else:
+ raw_list = list( map(lambda x: CTRexResult.__get_value_by_path(x, tree_path_to_key, regex), self._history) )
+ if filter_none:
+ return list (filter(lambda x: x!=None, raw_list) )
+ else:
+ return raw_list
+
+ def get_latest_dump(self):
+ """
+ A getter to the latest sampled data item stored in the result object.
+
+ :parameters:
+ None
+
+ :return:
+ + a dictionary of the latest data item
+ + an empty dictionary if history is empty.
+
+ """
+ history_size = len(self._history)
+ if history_size != 0:
+ return self._history[len(self._history) - 1]
+ else:
+ return {}
+
+ def update_result_data (self, latest_dump):
+ """
+ Integrates a `latest_dump` dictionary into the CTRexResult object.
+
+ :parameters:
+ latest_dump : dict
+ a dictionary with the items desired to be integrated into the object history and stats
+
+ :return:
+ None
+
+ """
+ # add latest dump to history
+ if latest_dump != {}:
+ self._history.append(latest_dump)
+ if not self.valid:
+ self.valid = True
+
+ # parse important fields and calculate averages and others
+ if self._expected_tx_rate is None:
+ # get the expected data only once since it doesn't change
+ self._expected_tx_rate = CTRexResult.__get_value_by_path(latest_dump, "trex-global.data", "m_tx_expected_\w+")
+
+ self._current_tx_rate = CTRexResult.__get_value_by_path(latest_dump, "trex-global.data", "m_tx_(?!expected_)\w+")
+ if not self._done_warmup and self._expected_tx_rate is not None:
+ # check for up to 2% change between expected and actual
+ if (self._current_tx_rate['m_tx_bps']/self._expected_tx_rate['m_tx_expected_bps'] > 0.98):
+ self._done_warmup = True
+
+ # handle latency data
+ latency_pre = "trex-latency"
+ self._max_latency = self.get_last_value("{latency}.data".format(latency = latency_pre), ".*max-")#None # TBC
+ # support old typo
+ if self._max_latency is None:
+ latency_pre = "trex-latecny"
+ self._max_latency = self.get_last_value("{latency}.data".format(latency = latency_pre), ".*max-")
+
+ self._avg_latency = self.get_last_value("{latency}.data".format(latency = latency_pre), "avg-")#None # TBC
+ self._avg_latency = CTRexResult.__avg_all_and_rename_keys(self._avg_latency)
+
+ avg_win_latency_list = self.get_value_list("{latency}.data".format(latency = latency_pre), "avg-")
+ self._avg_window_latency = CTRexResult.__calc_latency_win_stats(avg_win_latency_list)
+
+ tx_pkts = CTRexResult.__get_value_by_path(latest_dump, "trex-global.data.m_total_tx_pkts")
+ rx_pkts = CTRexResult.__get_value_by_path(latest_dump, "trex-global.data.m_total_rx_pkts")
+ if tx_pkts is not None and rx_pkts is not None:
+ self._total_drops = tx_pkts - rx_pkts
+ self._drop_rate = CTRexResult.__get_value_by_path(latest_dump, "trex-global.data.m_rx_drop_bps")
+
+ def clear_results (self):
+ """
+ Clears all results and sets the history's validity to `False`
+
+ :parameters:
+ None
+
+ :return:
+ None
+
+ """
+ self.valid = False
+ self._done_warmup = False
+ self._expected_tx_rate = None
+ self._current_tx_rate = None
+ self._max_latency = None
+ self._avg_latency = None
+ self._avg_window_latency = None
+ self._total_drops = None
+ self._drop_rate = None
+ self._history.clear()
+
+ @staticmethod
+ def __get_value_by_path (dct, tree_path, regex = None):
+ try:
+ for i, p in re.findall(r'(\d+)|([\w|-]+)', tree_path):
+ dct = dct[p or int(i)]
+ if regex is not None and isinstance(dct, dict):
+ res = {}
+ for key,val in dct.items():
+ match = re.match(regex, key)
+ if match:
+ res[key]=val
+ return res
+ else:
+ return dct
+ except (KeyError, TypeError):
+ return None
+
+ @staticmethod
+ def __calc_latency_win_stats (latency_win_list):
+ res = {'all' : None }
+ port_dict = {'all' : []}
+ list( map(lambda x: CTRexResult.__update_port_dict(x, port_dict), latency_win_list) )
+
+ # finally, calculate everages for each list
+ res['all'] = float("%.3f" % (sum(port_dict['all'])/float(len(port_dict['all']))) )
+ port_dict.pop('all')
+ for port, avg_list in port_dict.items():
+ res[port] = float("%.3f" % (sum(avg_list)/float(len(avg_list))) )
+
+ return res
+
+ @staticmethod
+ def __update_port_dict (src_avg_dict, dest_port_dict):
+ all_list = src_avg_dict.values()
+ dest_port_dict['all'].extend(all_list)
+ for key, val in src_avg_dict.items():
+ reg_res = re.match("avg-(\d+)", key)
+ if reg_res:
+ tmp_key = "port"+reg_res.group(1)
+ if tmp_key in dest_port_dict:
+ dest_port_dict[tmp_key].append(val)
+ else:
+ dest_port_dict[tmp_key] = [val]
+
+ @staticmethod
+ def __avg_all_and_rename_keys (src_dict):
+ res = {}
+ all_list = src_dict.values()
+ res['all'] = float("%.3f" % (sum(all_list)/float(len(all_list))) )
+ for key, val in src_dict.items():
+ reg_res = re.match("avg-(\d+)", key)
+ if reg_res:
+ tmp_key = "port"+reg_res.group(1)
+ res[tmp_key] = val # don't touch original fields values
+ return res
+
+
+
+if __name__ == "__main__":
+ pass
+
+
+
diff --git a/scripts/automation/trex_control_plane/client_utils/__init__.py b/scripts/automation/trex_control_plane/client_utils/__init__.py
new file mode 100755
index 00000000..c38c2cca
--- /dev/null
+++ b/scripts/automation/trex_control_plane/client_utils/__init__.py
@@ -0,0 +1 @@
+__all__ = ["general_utils", "trex_yaml_gen"]
diff --git a/scripts/automation/trex_control_plane/client_utils/general_utils.py b/scripts/automation/trex_control_plane/client_utils/general_utils.py
new file mode 100755
index 00000000..5544eabc
--- /dev/null
+++ b/scripts/automation/trex_control_plane/client_utils/general_utils.py
@@ -0,0 +1,57 @@
+#!/router/bin/python
+
+import sys,site
+import os
+
+try:
+ import pwd
+except ImportError:
+ import getpass
+ pwd = None
+
+using_python_3 = True if sys.version_info.major == 3 else False
+
+
+def user_input():
+ if using_python_3:
+ return input()
+ else:
+ # using python version 2
+ return raw_input()
+
+def get_current_user():
+ if pwd:
+ return pwd.getpwuid( os.geteuid() ).pw_name
+ else:
+ return getpass.getuser()
+
+def import_module_list_by_path (modules_list):
+ assert(isinstance(modules_list, list))
+ for full_path in modules_list:
+ site.addsitedir(full_path)
+
+def find_path_to_pardir (pardir, base_path = os.getcwd() ):
+ """
+ Finds the absolute path for some parent dir `pardir`, starting from base_path
+
+ The request is only valid if the stop intitiator is the same client as the T-Rex run intitiator.
+
+ :parameters:
+ pardir : str
+ name of an upper-level directory to which we want to find an absolute path for
+ base_path : str
+ a full (usually nested) path from which we want to find a parent folder.
+
+ default value : **current working dir**
+
+ :return:
+ string representation of the full path to
+
+ """
+ components = base_path.split(os.sep)
+ return str.join(os.sep, components[:components.index(pardir)+1])
+
+
+
+if __name__ == "__main__":
+ pass
diff --git a/scripts/automation/trex_control_plane/client_utils/trex_yaml_gen.py b/scripts/automation/trex_control_plane/client_utils/trex_yaml_gen.py
new file mode 100755
index 00000000..755674ea
--- /dev/null
+++ b/scripts/automation/trex_control_plane/client_utils/trex_yaml_gen.py
@@ -0,0 +1,212 @@
+#!/router/bin/python
+
+import pprint
+import yaml
+import os
+# import bisect
+
+class CTRexYaml(object):
+ """
+ This class functions as a YAML generator according to T-Rex YAML format.
+
+ CTRexYaml is compatible with both Python 2 and Python 3.
+ """
+ YAML_TEMPLATE = [{'cap_info': [],
+ 'duration': 10.0,
+ 'generator': {'clients_end': '16.0.1.255',
+ 'clients_per_gb': 201,
+ 'clients_start': '16.0.0.1',
+ 'distribution': 'seq',
+ 'dual_port_mask': '1.0.0.0',
+ 'min_clients': 101,
+ 'servers_end': '48.0.0.255',
+ 'servers_start': '48.0.0.1',
+ 'tcp_aging': 1,
+ 'udp_aging': 1},
+ 'mac' : [0x00,0x00,0x00,0x01,0x00,0x00]}]
+ PCAP_TEMPLATE = {'cps': 1.0,
+ 'ipg': 10000,
+ 'name': '',
+ 'rtt': 10000,
+ 'w': 1}
+
+ def __init__ (self, trex_files_path):
+ """
+ The initialization of this class creates a CTRexYaml object with **empty** 'cap-info', and with default client-server configuration.
+
+ Use class methods to add and assign pcap files and export the data to a YAML file.
+
+ :parameters:
+ trex_files_path : str
+ a path (on T-Rex server side) for the pcap files using which T-Rex can access it.
+
+ """
+ self.yaml_obj = list(CTRexYaml.YAML_TEMPLATE)
+ self.empty_cap = True
+ self.file_list = []
+ self.yaml_dumped = False
+ self.trex_files_path = trex_files_path
+
+ def add_pcap_file (self, local_pcap_path):
+ """
+ Adds a .pcap file with recorded traffic to the yaml object by linking the file with 'cap-info' template key fields.
+
+ :parameters:
+ local_pcap_path : str
+ a path (on client side) for the pcap file to be added.
+
+ :return:
+ + The index of the inserted item (as int) if item added successfully
+ + -1 if pcap file already exists in 'cap_info'.
+
+ """
+ new_pcap = dict(CTRexYaml.PCAP_TEMPLATE)
+ new_pcap['name'] = self.trex_files_path + os.path.basename(local_pcap_path)
+ if self.get_pcap_idx(new_pcap['name']) != -1:
+ # pcap already exists in 'cap_info'
+ return -1
+ else:
+ self.yaml_obj[0]['cap_info'].append(new_pcap)
+ if self.empty_cap:
+ self.empty_cap = False
+ self.file_list.append(local_pcap_path)
+ return ( len(self.yaml_obj[0]['cap_info']) - 1)
+
+
+ def get_pcap_idx (self, pcap_name):
+ """
+ Checks if a certain .pcap file has been added into the yaml object.
+
+ :parameters:
+ pcap_name : str
+ the name of the pcap file to be searched
+
+ :return:
+ + The index of the pcap file (as int) if exists
+ + -1 if not exists.
+
+ """
+ comp_pcap = pcap_name if pcap_name.startswith(self.trex_files_path) else (self.trex_files_path + pcap_name)
+ for idx, pcap in enumerate(self.yaml_obj[0]['cap_info']):
+ print (pcap['name'] == comp_pcap)
+ if pcap['name'] == comp_pcap:
+ return idx
+ # pcap file wasn't found
+ return -1
+
+ def dump_as_python_obj (self):
+ """
+ dumps with nice indentation the pythonic format (dictionaries and lists) of the currently built yaml object.
+
+ :parameters:
+ None
+
+ :return:
+ None
+
+ """
+ pprint.pprint(self.yaml_obj)
+
+ def dump(self):
+ """
+ dumps with nice indentation the YAML format of the currently built yaml object.
+
+ :parameters:
+ None
+
+ :reaturn:
+ None
+
+ """
+ print (yaml.safe_dump(self.yaml_obj, default_flow_style = False))
+
+ def to_yaml(self, filename):
+ """
+ Exports to YAML file the built configuration into an actual YAML file.
+
+ :parameters:
+ filename : str
+ a path (on client side, including filename) to store the generated yaml file.
+
+ :return:
+ None
+
+ :raises:
+ + :exc:`ValueError`, in case no pcap files has been added to the object.
+ + :exc:`EnvironmentError`, in case of any IO error of writing to the files or OSError when trying to open it for writing.
+
+ """
+ if self.empty_cap:
+ raise ValueError("No .pcap file has been assigned to yaml object. Must add at least one")
+ else:
+ try:
+ with open(filename, 'w') as yaml_file:
+ yaml_file.write( yaml.safe_dump(self.yaml_obj, default_flow_style = False) )
+ self.yaml_dumped = True
+ self.file_list.append(filename)
+ except EnvironmentError as inst:
+ raise
+
+ def set_cap_info_param (self, param, value, seq):
+ """
+ Set cap-info parameters' value of a specific pcap file.
+
+ :parameters:
+ param : str
+ the name of the parameters to be set.
+ value : int/float
+ the desired value to be set to `param` key.
+ seq : int
+ an index to the relevant caps array to be changed (index supplied when adding new pcap file, see :func:`add_pcap_file`).
+
+ :return:
+ **True** on success
+
+ :raises:
+ :exc:`IndexError`, in case an out-of range index was given.
+
+ """
+ try:
+ self.yaml_obj[0]['cap_info'][seq][param] = value
+
+ return True
+ except IndexError:
+ return False
+
+ def set_generator_param (self, param, value):
+ """
+ Set generator parameters' value of the yaml object.
+
+ :parameters:
+ param : str
+ the name of the parameters to be set.
+ value : int/float/str
+ the desired value to be set to `param` key.
+
+ :return:
+ None
+
+ """
+ self.yaml_obj[0]['generator'][param] = value
+
+ def get_file_list(self):
+ """
+ Returns a list of all files related to the YAML object, including the YAML filename itself.
+
+ .. tip:: This method is especially useful for listing all the files that should be pushed to T-Rex server as part of the same yaml selection.
+
+ :parameters:
+ None
+
+ :return:
+ a list of filepaths, each is a local client-machine file path.
+
+ """
+ if not self.yaml_dumped:
+ print ("WARNING: .yaml file wasn't dumped yet. Files list contains only .pcap files")
+ return self.file_list
+
+
+
+if __name__ == "__main__":
+ pass
diff --git a/scripts/automation/trex_control_plane/common/__init__.py b/scripts/automation/trex_control_plane/common/__init__.py
new file mode 100755
index 00000000..5a1da046
--- /dev/null
+++ b/scripts/automation/trex_control_plane/common/__init__.py
@@ -0,0 +1 @@
+__all__ = ["trex_status_e", "trex_exceptions"]
diff --git a/scripts/automation/trex_control_plane/common/trex_exceptions.py b/scripts/automation/trex_control_plane/common/trex_exceptions.py
new file mode 100755
index 00000000..1353fd00
--- /dev/null
+++ b/scripts/automation/trex_control_plane/common/trex_exceptions.py
@@ -0,0 +1,140 @@
+#!/router/bin/python
+
+#from rpc_exceptions import RPCExceptionHandler, WrappedRPCError
+
+from jsonrpclib import Fault, ProtocolError, AppError
+
+class RPCError(Exception):
+ """
+ This is the general RPC error exception class from which :exc:`trex_exceptions.TRexException` inherits.
+
+ Every exception in this class has as error format according to JSON-RPC convention convention: code, message and data.
+
+ """
+ def __init__(self, code, message, remote_data = None):
+ self.code = code
+ self.msg = message or self._default_message
+ self.data = remote_data
+ self.args = (code, self.msg, remote_data)
+
+ def __str__(self):
+ return self.__repr__()
+ def __repr__(self):
+ if self.args[2] is not None:
+ return u"[errcode:%r] %r. Extended data: %r" % (self.args[0], self.args[1], self.args[2])
+ else:
+ return u"[errcode:%r] %r" % (self.args[0], self.args[1])
+
+class TRexException(RPCError):
+ """
+ This is the most general T-Rex exception.
+
+ All exceptions inherits from this class has an error code and a default message which describes the most common use case of the error.
+
+ This exception isn't used by default and will only when an unrelated to ProtocolError will occur, and it can't be resolved to any of the deriviate exceptions.
+
+ """
+ code = -10
+ _default_message = 'T-Rex encountered an unexpected error. please contact T-Rex dev team.'
+ # api_name = 'TRex'
+
+class TRexError(TRexException):
+ """
+ This is the most general T-Rex exception.
+
+ This exception isn't used by default and will only when an unrelated to ProtocolError will occur, and it can't be resolved to any of the deriviate exceptions.
+ """
+ code = -11
+ _default_message = 'T-Rex run failed due to wrong input parameters, or due to reachability issues.'
+
+class TRexWarning(TRexException):
+ """ Indicates a warning from T-Rex server. When this exception raises it normally used to indicate required data isn't ready yet """
+ code = -12
+ _default_message = 'T-Rex is starting (data is not available yet).'
+
+class TRexRequestDenied(TRexException):
+ """ Indicates the desired reques was denied by the server """
+ code = -33
+ _default_message = 'T-Rex desired request denied because the requested resource is already taken. Try again once T-Rex is back in IDLE state.'
+
+class TRexInUseError(TRexException):
+ """
+ Indicates that T-Rex is currently in use
+
+ """
+ code = -13
+ _default_message = 'T-Rex is already being used by another user or process. Try again once T-Rex is back in IDLE state.'
+
+class TRexRunFailedError(TRexException):
+ """ Indicates that T-Rex has failed due to some reason. This Exception is used when T-Rex process itself terminates due to unknown reason """
+ code = -14
+ _default_message = ''
+
+class TRexIncompleteRunError(TRexException):
+ """
+ Indicates that T-Rex has failed due to some reason.
+ This Exception is used when T-Rex process itself terminated with error fault or it has been terminated by an external intervention in the OS.
+
+ """
+ code = -15
+ _default_message = 'T-Rex run was terminated unexpectedly by outer process or by the hosting OS'
+
+EXCEPTIONS = [TRexException, TRexError, TRexWarning, TRexInUseError, TRexRequestDenied, TRexRunFailedError, TRexIncompleteRunError]
+
+class CExceptionHandler(object):
+ """
+ CExceptionHandler is responsible for generating T-Rex API related exceptions in client side.
+ """
+ def __init__(self, exceptions):
+ """
+ Instatiate a CExceptionHandler object
+
+ :parameters:
+
+ exceptions : list
+ a list of all T-Rex acceptable exception objects.
+
+ default list:
+ - :exc:`trex_exceptions.TRexException`
+ - :exc:`trex_exceptions.TRexError`
+ - :exc:`trex_exceptions.TRexWarning`
+ - :exc:`trex_exceptions.TRexInUseError`
+ - :exc:`trex_exceptions.TRexRequestDenied`
+ - :exc:`trex_exceptions.TRexRunFailedError`
+ - :exc:`trex_exceptions.TRexIncompleteRunError`
+
+ """
+ if isinstance(exceptions, type):
+ exceptions = [ exceptions, ]
+ self.exceptions = exceptions
+ self.exceptions_dict = dict((e.code, e) for e in self.exceptions)
+
+ def gen_exception (self, err):
+ """
+ Generates an exception based on a general ProtocolError exception object `err`.
+
+ When T-Rex is reserved, no other user can start new T-Rex runs.
+
+
+ :parameters:
+
+ err : exception
+ a ProtocolError exception raised by :class:`trex_client.CTRexClient` class
+
+ :return:
+ A T-Rex exception from the exception list defined in class creation.
+
+ If such exception wasn't found, returns a TRexException exception
+
+ """
+ code, message, data = err
+ try:
+ exp = self.exceptions_dict[code]
+ return exp(exp.code, message, data)
+ except KeyError:
+ # revert to TRexException when unknown error application raised
+ return TRexException(err)
+
+
+exception_handler = CExceptionHandler( EXCEPTIONS )
+
diff --git a/scripts/automation/trex_control_plane/common/trex_status_e.py b/scripts/automation/trex_control_plane/common/trex_status_e.py
new file mode 100755
index 00000000..34db9b39
--- /dev/null
+++ b/scripts/automation/trex_control_plane/common/trex_status_e.py
@@ -0,0 +1,8 @@
+#!/router/bin/python
+
+# import outer_packages
+from enum import Enum
+
+
+# define the states in which a T-Rex can hold during its lifetime
+TRexStatus = Enum('TRexStatus', 'Idle Starting Running')
diff --git a/scripts/automation/trex_control_plane/dirtree_no_files.txt b/scripts/automation/trex_control_plane/dirtree_no_files.txt
new file mode 100755
index 00000000..b87c4167
--- /dev/null
+++ b/scripts/automation/trex_control_plane/dirtree_no_files.txt
@@ -0,0 +1,11 @@
+trex_control_plane/
+|-- Client
+|-- Server
+|-- common
+`-- python_lib
+ |-- enum34-1.0.4
+ |-- jsonrpclib-0.1.3
+ |-- lockfile-0.10.2
+ |-- python-daemon-2.0.5
+ `-- zmq
+
diff --git a/scripts/automation/trex_control_plane/dirtree_with_files.txt b/scripts/automation/trex_control_plane/dirtree_with_files.txt
new file mode 100755
index 00000000..5ce7cdfc
--- /dev/null
+++ b/scripts/automation/trex_control_plane/dirtree_with_files.txt
@@ -0,0 +1,31 @@
+trex_control_plane/
+|-- Client
+| |-- __init__.py
+| |-- outer_packages.py
+| |-- python_lib
+| `-- trex_client.py
+|-- Server
+| |-- CCustomLogger.py
+| |-- outer_packages.py
+| |-- trex_daemon_server
+| |-- trex_daemon_server.py
+| |-- trex_launch_thread.py
+| |-- trex_server.py
+| `-- zmq_monitor_thread.py
+|-- __init__.py
+|-- common
+| |-- __init__.py
+| |-- __init__.pyc
+| |-- trex_status_e.py
+| `-- trex_status_e.pyc
+|-- dirtree_no_files.txt
+|-- dirtree_with_files.txt
+`-- python_lib
+ |-- __init__.py
+ |-- enum34-1.0.4
+ |-- jsonrpclib-0.1.3
+ |-- lockfile-0.10.2
+ |-- python-daemon-2.0.5
+ |-- zmq
+ `-- zmq_fedora.tar.gz
+
diff --git a/scripts/automation/trex_control_plane/doc/Makefile b/scripts/automation/trex_control_plane/doc/Makefile
new file mode 100755
index 00000000..37a28d0f
--- /dev/null
+++ b/scripts/automation/trex_control_plane/doc/Makefile
@@ -0,0 +1,192 @@
+# Makefile for Sphinx documentation
+#
+
+# You can set these variables from the command line.
+SPHINXOPTS =
+SPHINXBUILD = sphinx-build
+PAPER =
+BUILDDIR = _build
+
+# User-friendly check for sphinx-build
+ifeq ($(shell which $(SPHINXBUILD) >/dev/null 2>&1; echo $$?), 1)
+$(error The '$(SPHINXBUILD)' command was not found. Make sure you have Sphinx installed, then set the SPHINXBUILD environment variable to point to the full path of the '$(SPHINXBUILD)' executable. Alternatively you can add the directory with the executable to your PATH. If you don't have Sphinx installed, grab it from http://sphinx-doc.org/)
+endif
+
+# Internal variables.
+PAPEROPT_a4 = -D latex_paper_size=a4
+PAPEROPT_letter = -D latex_paper_size=letter
+ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) .
+# the i18n builder cannot share the environment and doctrees with the others
+I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) .
+
+.PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest coverage gettext
+
+help:
+ @echo "Please use \`make <target>' where <target> is one of"
+ @echo " html to make standalone HTML files"
+ @echo " dirhtml to make HTML files named index.html in directories"
+ @echo " singlehtml to make a single large HTML file"
+ @echo " pickle to make pickle files"
+ @echo " json to make JSON files"
+ @echo " htmlhelp to make HTML files and a HTML help project"
+ @echo " qthelp to make HTML files and a qthelp project"
+ @echo " applehelp to make an Apple Help Book"
+ @echo " devhelp to make HTML files and a Devhelp project"
+ @echo " epub to make an epub"
+ @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter"
+ @echo " latexpdf to make LaTeX files and run them through pdflatex"
+ @echo " latexpdfja to make LaTeX files and run them through platex/dvipdfmx"
+ @echo " text to make text files"
+ @echo " man to make manual pages"
+ @echo " texinfo to make Texinfo files"
+ @echo " info to make Texinfo files and run them through makeinfo"
+ @echo " gettext to make PO message catalogs"
+ @echo " changes to make an overview of all changed/added/deprecated items"
+ @echo " xml to make Docutils-native XML files"
+ @echo " pseudoxml to make pseudoxml-XML files for display purposes"
+ @echo " linkcheck to check all external links for integrity"
+ @echo " doctest to run all doctests embedded in the documentation (if enabled)"
+ @echo " coverage to run coverage check of the documentation (if enabled)"
+
+clean:
+ rm -rf $(BUILDDIR)/*
+
+html:
+ $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html
+ @echo
+ @echo "Build finished. The HTML pages are in $(BUILDDIR)/html."
+
+dirhtml:
+ $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml
+ @echo
+ @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml."
+
+singlehtml:
+ $(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml
+ @echo
+ @echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml."
+
+pickle:
+ $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle
+ @echo
+ @echo "Build finished; now you can process the pickle files."
+
+json:
+ $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json
+ @echo
+ @echo "Build finished; now you can process the JSON files."
+
+htmlhelp:
+ $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp
+ @echo
+ @echo "Build finished; now you can run HTML Help Workshop with the" \
+ ".hhp project file in $(BUILDDIR)/htmlhelp."
+
+qthelp:
+ $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp
+ @echo
+ @echo "Build finished; now you can run "qcollectiongenerator" with the" \
+ ".qhcp project file in $(BUILDDIR)/qthelp, like this:"
+ @echo "# qcollectiongenerator $(BUILDDIR)/qthelp/T-RexProjectControlPlain.qhcp"
+ @echo "To view the help file:"
+ @echo "# assistant -collectionFile $(BUILDDIR)/qthelp/T-RexProjectControlPlain.qhc"
+
+applehelp:
+ $(SPHINXBUILD) -b applehelp $(ALLSPHINXOPTS) $(BUILDDIR)/applehelp
+ @echo
+ @echo "Build finished. The help book is in $(BUILDDIR)/applehelp."
+ @echo "N.B. You won't be able to view it unless you put it in" \
+ "~/Library/Documentation/Help or install it in your application" \
+ "bundle."
+
+devhelp:
+ $(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp
+ @echo
+ @echo "Build finished."
+ @echo "To view the help file:"
+ @echo "# mkdir -p $$HOME/.local/share/devhelp/T-RexProjectControlPlain"
+ @echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/T-RexProjectControlPlain"
+ @echo "# devhelp"
+
+epub:
+ $(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub
+ @echo
+ @echo "Build finished. The epub file is in $(BUILDDIR)/epub."
+
+latex:
+ $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
+ @echo
+ @echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex."
+ @echo "Run \`make' in that directory to run these through (pdf)latex" \
+ "(use \`make latexpdf' here to do that automatically)."
+
+latexpdf:
+ $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
+ @echo "Running LaTeX files through pdflatex..."
+ $(MAKE) -C $(BUILDDIR)/latex all-pdf
+ @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex."
+
+latexpdfja:
+ $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
+ @echo "Running LaTeX files through platex and dvipdfmx..."
+ $(MAKE) -C $(BUILDDIR)/latex all-pdf-ja
+ @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex."
+
+text:
+ $(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text
+ @echo
+ @echo "Build finished. The text files are in $(BUILDDIR)/text."
+
+man:
+ $(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man
+ @echo
+ @echo "Build finished. The manual pages are in $(BUILDDIR)/man."
+
+texinfo:
+ $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo
+ @echo
+ @echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo."
+ @echo "Run \`make' in that directory to run these through makeinfo" \
+ "(use \`make info' here to do that automatically)."
+
+info:
+ $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo
+ @echo "Running Texinfo files through makeinfo..."
+ make -C $(BUILDDIR)/texinfo info
+ @echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo."
+
+gettext:
+ $(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale
+ @echo
+ @echo "Build finished. The message catalogs are in $(BUILDDIR)/locale."
+
+changes:
+ $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes
+ @echo
+ @echo "The overview file is in $(BUILDDIR)/changes."
+
+linkcheck:
+ $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck
+ @echo
+ @echo "Link check complete; look for any errors in the above output " \
+ "or in $(BUILDDIR)/linkcheck/output.txt."
+
+doctest:
+ $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest
+ @echo "Testing of doctests in the sources finished, look at the " \
+ "results in $(BUILDDIR)/doctest/output.txt."
+
+coverage:
+ $(SPHINXBUILD) -b coverage $(ALLSPHINXOPTS) $(BUILDDIR)/coverage
+ @echo "Testing of coverage in the sources finished, look at the " \
+ "results in $(BUILDDIR)/coverage/python.txt."
+
+xml:
+ $(SPHINXBUILD) -b xml $(ALLSPHINXOPTS) $(BUILDDIR)/xml
+ @echo
+ @echo "Build finished. The XML files are in $(BUILDDIR)/xml."
+
+pseudoxml:
+ $(SPHINXBUILD) -b pseudoxml $(ALLSPHINXOPTS) $(BUILDDIR)/pseudoxml
+ @echo
+ @echo "Build finished. The pseudo-XML files are in $(BUILDDIR)/pseudoxml."
diff --git a/scripts/automation/trex_control_plane/doc/_static/no_scrollbars.css b/scripts/automation/trex_control_plane/doc/_static/no_scrollbars.css
new file mode 100755
index 00000000..f86e823a
--- /dev/null
+++ b/scripts/automation/trex_control_plane/doc/_static/no_scrollbars.css
@@ -0,0 +1,10 @@
+/* override table width restrictions */
+.wy-table-responsive table td, .wy-table-responsive table th {
+ /* !important prevents the common CSS stylesheets from
+ overriding this as on RTD they are loaded after this stylesheet */
+ white-space: normal !important;
+}
+
+.wy-table-responsive {
+ overflow: visible !important;
+} \ No newline at end of file
diff --git a/scripts/automation/trex_control_plane/doc/about_trex.rst b/scripts/automation/trex_control_plane/doc/about_trex.rst
new file mode 100755
index 00000000..97cad97d
--- /dev/null
+++ b/scripts/automation/trex_control_plane/doc/about_trex.rst
@@ -0,0 +1,16 @@
+===================
+About T-Rex project
+===================
+
+Full project's official site
+----------------------------
+
+To learn all about T-Rex project, visit Cisco's internal `official site <http://csi-wiki-01:8080/display/bpsim/Home>`_
+
+Even more
+---------
+
+.. toctree::
+ :maxdepth: 2
+
+ authors \ No newline at end of file
diff --git a/scripts/automation/trex_control_plane/doc/api/client_code.rst b/scripts/automation/trex_control_plane/doc/api/client_code.rst
new file mode 100755
index 00000000..0cda3451
--- /dev/null
+++ b/scripts/automation/trex_control_plane/doc/api/client_code.rst
@@ -0,0 +1,17 @@
+
+trex_client Module documentation
+================================
+
+
+CTRexClient class
+-----------------
+
+.. autoclass:: trex_client.CTRexClient
+ :members:
+ :member-order: alphabetical
+
+CTRexResult class
+-----------------
+
+.. autoclass:: trex_client.CTRexResult
+ :members:
diff --git a/scripts/automation/trex_control_plane/doc/api/exceptions.rst b/scripts/automation/trex_control_plane/doc/api/exceptions.rst
new file mode 100755
index 00000000..d9df6484
--- /dev/null
+++ b/scripts/automation/trex_control_plane/doc/api/exceptions.rst
@@ -0,0 +1,7 @@
+
+
+trex_exceptions Exceptions module
+=================================
+
+.. automodule:: trex_exceptions
+ :members:
diff --git a/scripts/automation/trex_control_plane/doc/api/index.rst b/scripts/automation/trex_control_plane/doc/api/index.rst
new file mode 100755
index 00000000..8233a634
--- /dev/null
+++ b/scripts/automation/trex_control_plane/doc/api/index.rst
@@ -0,0 +1,19 @@
+
+API Reference
+=============
+The T-Rex API reference section is currently a work in progress.
+
+**T-Rex Modules**
+
+.. toctree::
+ :maxdepth: 4
+
+ client_code
+ exceptions
+
+**T-Rex JSON Template**
+
+.. toctree::
+ :maxdepth: 4
+
+ json_fields
diff --git a/scripts/automation/trex_control_plane/doc/api/json_fields.rst b/scripts/automation/trex_control_plane/doc/api/json_fields.rst
new file mode 100755
index 00000000..b1a2af7c
--- /dev/null
+++ b/scripts/automation/trex_control_plane/doc/api/json_fields.rst
@@ -0,0 +1,233 @@
+
+T-Rex JSON Template
+===================
+
+Whenever T-Rex is publishing live data, it uses JSON notation to describe the data-object.
+
+Each client may parse it diffrently, however this page will describe the values meaning when published by T-Rex server.
+
+
+Main Fields
+-----------
+
+Each T-Rex server-published JSON object contains data divided to main fields under which the actual data lays.
+
+These main fields are:
+
++-----------------------------+----------------------------------------------------+---------------------------+
+| Main field | Contains | Comments |
++=============================+====================================================+===========================+
+| :ref:`trex-global-field` | Must-have data on T-Rex run, | |
+| | mainly regarding Tx/Rx and packet drops | |
++-----------------------------+----------------------------------------------------+---------------------------+
+| :ref:`tx-gen-field` | Data indicate the quality of the transmit process. | |
+| | In case histogram is zero it means that all packets| |
+| | were injected in the right time. | |
++-----------------------------+----------------------------------------------------+---------------------------+
+| :ref:`trex-latecny-field` | Latency reports, containing latency data on | - Generated when latency |
+| | generated data and on response traffic | test is enabled (``l`` |
+| | | param) |
+| | | - *typo* on field key: |
++-----------------------------+----------------------------------------------------+ will be fixed on next |
+| :ref:`trex-latecny-v2-field`| Extended latency information | release |
++-----------------------------+----------------------------------------------------+---------------------------+
+
+
+Each of these fields contains keys for field general data (such as its name) and its actual data, which is always stored under the **"data"** key.
+
+For example, in order to access some trex-global data, the access path would look like::
+
+ AllData -> trex-global -> data -> desired_info
+
+
+
+
+Detailed explanation
+--------------------
+
+.. _trex-global-field:
+
+trex-global field
+~~~~~~~~~~~~~~~~~
+
+
++--------------------------------+-------+-----------------------------------------------------------+
+| Sub-key | Type | Meaning |
++================================+=======+===========================================================+
+| m_cpu_util | float | CPU utilization (0-100) |
++--------------------------------+-------+-----------------------------------------------------------+
+| m_platform_factor | float | multiplier factor |
++--------------------------------+-------+-----------------------------------------------------------+
+| m_tx_bps | float | total tx bit per second |
++--------------------------------+-------+-----------------------------------------------------------+
+| m_rx_bps | float | total rx bit per second |
++--------------------------------+-------+-----------------------------------------------------------+
+| m_tx_pps | float | total tx packet per second |
++--------------------------------+-------+-----------------------------------------------------------+
+| m_tx_cps | float | total tx connection per second |
++--------------------------------+-------+-----------------------------------------------------------+
+| m_tx_expected_cps | float | expected tx connection per second |
++--------------------------------+-------+-----------------------------------------------------------+
+| m_tx_expected_pps | float | expected tx packet per second |
++--------------------------------+-------+-----------------------------------------------------------+
+| m_tx_expected_bps | float | expected tx bit per second |
++--------------------------------+-------+-----------------------------------------------------------+
+| m_rx_drop_bps | float | drop rate in bit per second |
++--------------------------------+-------+-----------------------------------------------------------+
+| m_active_flows | float | active trex flows |
++--------------------------------+-------+-----------------------------------------------------------+
+| m_open_flows | float | open trex flows from startup (monotonically incrementing) |
++--------------------------------+-------+-----------------------------------------------------------+
+| m_total_tx_pkts | int | total tx in packets |
++--------------------------------+-------+-----------------------------------------------------------+
+| m_total_rx_pkts | int | total rx in packets |
++--------------------------------+-------+-----------------------------------------------------------+
+| m_total_tx_bytes | int | total tx in bytes |
++--------------------------------+-------+-----------------------------------------------------------+
+| m_total_rx_bytes | int | total rx in bytes |
++--------------------------------+-------+-----------------------------------------------------------+
+| opackets-# | int | output packets (per interface) |
++--------------------------------+-------+-----------------------------------------------------------+
+| obytes-# | int | output bytes (per interface) |
++--------------------------------+-------+-----------------------------------------------------------+
+| ipackets-# | int | input packet (per interface) |
++--------------------------------+-------+-----------------------------------------------------------+
+| ibytes-# | int | input bytes (per interface) |
++--------------------------------+-------+-----------------------------------------------------------+
+| ierrors-# | int | input errors (per interface) |
++--------------------------------+-------+-----------------------------------------------------------+
+| oerrors-# | int | input errors (per interface) |
++--------------------------------+-------+-----------------------------------------------------------+
+| m_total_tx_bps-# | float | total transmitted data in bit per second |
++--------------------------------+-------+-----------------------------------------------------------+
+| unknown | int | |
++--------------------------------+-------+-----------------------------------------------------------+
+| m_total_nat_learn_error [#f1]_ | int | |
++--------------------------------+-------+-----------------------------------------------------------+
+| m_total_nat_active [#f2]_ | int | |
++--------------------------------+-------+-----------------------------------------------------------+
+| m_total_nat_no_fid [#f2]_ | int | |
++--------------------------------+-------+-----------------------------------------------------------+
+| m_total_nat_time_out [#f2]_ | int | |
++--------------------------------+-------+-----------------------------------------------------------+
+| m_total_nat_open [#f2]_ | int | |
++--------------------------------+-------+-----------------------------------------------------------+
+
+
+.. _tx-gen-field:
+
+tx-gen field
+~~~~~~~~~~~~~~
+
++-------------------+-------+-----------------------------------------------------------+
+| Sub-key | Type | Meaning |
++===================+=======+===========================================================+
+| realtime-hist | dict | histogram of transmission. See extended information about |
+| | | histogram object under :ref:`histogram-object-fields`. |
+| | | The attribute analyzed is time packet has been sent |
+| | | before/after it was intended to be |
++-------------------+-------+-----------------------------------------------------------+
+| unknown | int | |
++-------------------+-------+-----------------------------------------------------------+
+
+.. _trex-latecny-field:
+
+trex-latecny field
+~~~~~~~~~~~~~~~~~~
+
++---------+-------+---------------------------------------------------------+
+| Sub-key | Type | Meaning |
++=========+=======+=========================================================+
+| avg-# | float | average latency in usec (per interface) |
++---------+-------+---------------------------------------------------------+
+| max-# | float | max latency in usec from the test start (per interface) |
++---------+-------+---------------------------------------------------------+
+| c-max-# | float | max in the last 1 sec window (per interface) |
++---------+-------+---------------------------------------------------------+
+| error-# | float | errors in latency packets (per interface) |
++---------+-------+---------------------------------------------------------+
+| unknown | int | |
++---------+-------+---------------------------------------------------------+
+
+.. _trex-latecny-v2-field:
+
+trex-latecny-v2 field
+~~~~~~~~~~~~~~~~~~~~~
+
++--------------------------------------+-------+--------------------------------------+
+| Sub-key | Type | Meaning |
++======================================+=======+======================================+
+| cpu_util | float | rx thread cpu % (this is not trex DP |
+| | | threads cpu%%) |
++--------------------------------------+-------+--------------------------------------+
+| port-# | | Containing per interface |
+| | dict | information. See extended |
+| | | information under ``port-# -> |
+| | | key_name -> sub_key`` |
++--------------------------------------+-------+--------------------------------------+
+| port-#->hist | dict | histogram of latency. See extended |
+| | | information about histogram object |
+| | | under :ref:`histogram-object-fields`.|
++--------------------------------------+-------+--------------------------------------+
+| port-#->stats | | Containing per interface |
+| | dict | information. See extended |
+| | | information under ``port-# -> |
+| | | key_name -> sub_key`` |
++--------------------------------------+-------+--------------------------------------+
+| port-#->stats->m_tx_pkt_ok | int | total of try sent packets |
++--------------------------------------+-------+--------------------------------------+
+| port-#->stats->m_pkt_ok | int | total of packets sent from hardware |
++--------------------------------------+-------+--------------------------------------+
+| port-#->stats->m_no_magic | int | rx error with no magic |
++--------------------------------------+-------+--------------------------------------+
+| port-#->stats->m_no_id | int | rx errors with no id |
++--------------------------------------+-------+--------------------------------------+
+| port-#->stats->m_seq_error | int | error in seq number |
++--------------------------------------+-------+--------------------------------------+
+| port-#->stats->m_length_error | int | |
++--------------------------------------+-------+--------------------------------------+
+| port-#->stats->m_rx_check | int | packets tested in rx |
++--------------------------------------+-------+--------------------------------------+
+| unknown | int | |
++--------------------------------------+-------+--------------------------------------+
+
+
+
+.. _histogram-object-fields:
+
+Histogram object fields
+~~~~~~~~~~~~~~~~~~~~~~~
+
+The histogram object is being used in number of place throughout the JSON object.
+The following section describes its fields in detail.
+
+
++-----------+-------+-----------------------------------------------------------------------------------+
+| Sub-key | Type | Meaning |
++===========+=======+===================================================================================+
+| min_usec | int | min attribute value in usec. pkt with latency less than this value is not counted |
++-----------+-------+-----------------------------------------------------------------------------------+
+| max_usec | int | max attribute value in usec |
++-----------+-------+-----------------------------------------------------------------------------------+
+| high_cnt | int | how many packets on which its attribute > min_usec |
++-----------+-------+-----------------------------------------------------------------------------------+
+| cnt | int | total packets from test startup |
++-----------+-------+-----------------------------------------------------------------------------------+
+| s_avg | float | average value from test startup |
++-----------+-------+-----------------------------------------------------------------------------------+
+| histogram | | histogram of relevant object by the following keys: |
+| | array | - key: value in usec |
+| | | - val: number of packets |
++-----------+-------+-----------------------------------------------------------------------------------+
+
+
+Access Examples
+---------------
+
+
+
+.. rubric:: Footnotes
+
+.. [#f1] Available only in NAT and NAT learning operation (``learn`` and ``learn-verify`` flags)
+
+.. [#f2] Available only in NAT operation (``learn`` flag) \ No newline at end of file
diff --git a/scripts/automation/trex_control_plane/doc/authors.rst b/scripts/automation/trex_control_plane/doc/authors.rst
new file mode 100755
index 00000000..3b85f020
--- /dev/null
+++ b/scripts/automation/trex_control_plane/doc/authors.rst
@@ -0,0 +1,12 @@
+=======
+Authors
+=======
+
+T-Rex is developed in Cisco Systems Inc. as the next generation traffic generator.
+
+T-Rex core-team developers are:
+
+ - Hanoch Haim
+ - Dave Johnson
+ - Wenxian Li
+ - Dan Klein \ No newline at end of file
diff --git a/scripts/automation/trex_control_plane/doc/client_utils.rst b/scripts/automation/trex_control_plane/doc/client_utils.rst
new file mode 100755
index 00000000..224dfe19
--- /dev/null
+++ b/scripts/automation/trex_control_plane/doc/client_utils.rst
@@ -0,0 +1,14 @@
+
+Client Utilities
+================
+
+T-Rex YAML generator
+--------------------
+
+.. automodule:: trex_yaml_gen
+ :members:
+
+General Utilities
+-----------------
+.. automodule:: general_utils
+ :members: \ No newline at end of file
diff --git a/scripts/automation/trex_control_plane/doc/conf.py b/scripts/automation/trex_control_plane/doc/conf.py
new file mode 100755
index 00000000..fb9ea83c
--- /dev/null
+++ b/scripts/automation/trex_control_plane/doc/conf.py
@@ -0,0 +1,303 @@
+# -*- coding: utf-8 -*-
+#
+# T-Rex Control Plain documentation build configuration file, created by
+# sphinx-quickstart on Tue Jun 2 07:48:10 2015.
+#
+# This file is execfile()d with the current directory set to its
+# containing dir.
+#
+# Note that not all possible configuration values are present in this
+# autogenerated file.
+#
+# All configuration values have a default; values that are commented out
+# serve to show the default.
+
+import sys
+import os
+import shlex
+
+
+# If extensions (or modules to document with autodoc) are in another directory,
+# add these directories to sys.path here. If the directory is relative to the
+# documentation root, use os.path.abspath to make it absolute, like shown here.
+sys.path.insert(0, os.path.abspath('../client'))
+sys.path.insert(0, os.path.abspath('../client_utils'))
+sys.path.insert(0, os.path.abspath('../examples'))
+sys.path.insert(0, os.path.abspath('../common'))
+
+# -- General configuration ------------------------------------------------
+
+# If your documentation needs a minimal Sphinx version, state it here.
+#needs_sphinx = '1.0'
+
+# Add any Sphinx extension module names here, as strings. They can be
+# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
+# ones.
+extensions = [
+ 'sphinx.ext.autodoc',
+ 'sphinx.ext.todo',
+ 'sphinx.ext.viewcode',
+]
+
+# Add any paths that contain templates here, relative to this directory.
+templates_path = ['_templates']
+
+# The suffix(es) of source filenames.
+# You can specify multiple suffix as a list of string:
+# source_suffix = ['.rst', '.md']
+source_suffix = '.rst'
+
+# The encoding of source files.
+#source_encoding = 'utf-8-sig'
+
+# The master toctree document.
+master_doc = 'index'
+
+# General information about the project.
+project = u'T-Rex Control Plain'
+copyright = u'2015, Cisco Systems Inc.'
+author = u'Dan Klein for Cisco Systems Inc.'
+
+# The version info for the project you're documenting, acts as replacement for
+# |version| and |release|, also used in various other places throughout the
+# built documents.
+#
+# The short X.Y version.
+version = '1.7'
+# The full version, including alpha/beta/rc tags.
+release = '1.7.1'
+
+# The language for content autogenerated by Sphinx. Refer to documentation
+# for a list of supported languages.
+#
+# This is also used if you do content translation via gettext catalogs.
+# Usually you set "language" from the command line for these cases.
+language = None
+
+# There are two options for replacing |today|: either, you set today to some
+# non-false value, then it is used:
+#today = ''
+# Else, today_fmt is used as the format for a strftime call.
+#today_fmt = '%B %d, %Y'
+
+# List of patterns, relative to source directory, that match files and
+# directories to ignore when looking for source files.
+exclude_patterns = ['_build']
+
+# The reST default role (used for this markup: `text`) to use for all
+# documents.
+#default_role = None
+
+# If true, '()' will be appended to :func: etc. cross-reference text.
+#add_function_parentheses = True
+
+# If true, the current module name will be prepended to all description
+# unit titles (such as .. function::).
+#add_module_names = True
+
+# If true, sectionauthor and moduleauthor directives will be shown in the
+# output. They are ignored by default.
+#show_authors = False
+
+# The name of the Pygments (syntax highlighting) style to use.
+pygments_style = 'sphinx'
+
+# A list of ignored prefixes for module index sorting.
+#modindex_common_prefix = []
+
+# If true, keep warnings as "system message" paragraphs in the built documents.
+#keep_warnings = False
+
+# If true, `todo` and `todoList` produce output, else they produce nothing.
+todo_include_todos = True
+
+
+# -- Options for HTML output ----------------------------------------------
+
+# The theme to use for HTML and HTML Help pages. See the documentation for
+# a list of builtin themes.
+html_theme = 'sphinx_rtd_theme'
+html_theme_options = {
+# "rightsidebar": "true"
+ }
+
+# Theme options are theme-specific and customize the look and feel of a theme
+# further. For a list of options available for each theme, see the
+# documentation.
+#html_theme_options = {}
+
+# Add any paths that contain custom themes here, relative to this directory.
+#html_theme_path = []
+
+# The name for this set of Sphinx documents. If None, it defaults to
+# "<project> v<release> documentation".
+#html_title = None
+
+# A shorter title for the navigation bar. Default is the same as html_title.
+#html_short_title = None
+
+# The name of an image file (relative to this directory) to place at the top
+# of the sidebar.
+#html_logo = None
+
+# The name of an image file (within the static path) to use as favicon of the
+# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
+# pixels large.
+#html_favicon = None
+
+# Add any paths that contain custom static files (such as style sheets) here,
+# relative to this directory. They are copied after the builtin static files,
+# so a file named "default.css" will overwrite the builtin "default.css".
+html_static_path = ['_static']
+
+# Add any extra paths that contain custom files (such as robots.txt or
+# .htaccess) here, relative to this directory. These files are copied
+# directly to the root of the documentation.
+#html_extra_path = []
+
+# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
+# using the given strftime format.
+#html_last_updated_fmt = '%b %d, %Y'
+
+# If true, SmartyPants will be used to convert quotes and dashes to
+# typographically correct entities.
+#html_use_smartypants = True
+
+# Custom sidebar templates, maps document names to template names.
+#html_sidebars = {}
+
+# Additional templates that should be rendered to pages, maps page names to
+# template names.
+#html_additional_pages = {}
+
+# If false, no module index is generated.
+#html_domain_indices = True
+
+# If false, no index is generated.
+#html_use_index = True
+
+# If true, the index is split into individual pages for each letter.
+#html_split_index = False
+
+# If true, links to the reST sources are added to the pages.
+#html_show_sourcelink = True
+
+# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
+#html_show_sphinx = True
+
+# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
+#html_show_copyright = True
+
+# If true, an OpenSearch description file will be output, and all pages will
+# contain a <link> tag referring to it. The value of this option must be the
+# base URL from which the finished HTML is served.
+#html_use_opensearch = ''
+
+# This is the file name suffix for HTML files (e.g. ".xhtml").
+#html_file_suffix = None
+
+# Language to be used for generating the HTML full-text search index.
+# Sphinx supports the following languages:
+# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
+# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
+#html_search_language = 'en'
+
+# A dictionary with options for the search language support, empty by default.
+# Now only 'ja' uses this config value
+#html_search_options = {'type': 'default'}
+
+# The name of a javascript file (relative to the configuration directory) that
+# implements a search results scorer. If empty, the default will be used.
+#html_search_scorer = 'scorer.js'
+
+# Output file base name for HTML help builder.
+htmlhelp_basename = 'T-RexControlPlaindoc'
+
+# -- Options for LaTeX output ---------------------------------------------
+
+latex_elements = {
+# The paper size ('letterpaper' or 'a4paper').
+#'papersize': 'letterpaper',
+
+# The font size ('10pt', '11pt' or '12pt').
+#'pointsize': '10pt',
+
+# Additional stuff for the LaTeX preamble.
+#'preamble': '',
+
+# Latex figure (float) alignment
+#'figure_align': 'htbp',
+}
+
+# Grouping the document tree into LaTeX files. List of tuples
+# (source start file, target name, title,
+# author, documentclass [howto, manual, or own class]).
+latex_documents = [
+ (master_doc, 'T-RexControlPlain.tex', u'T-Rex Control Plain Documentation',
+ u'Dan Klein for Cisco Systems Inc', 'manual'),
+]
+
+# The name of an image file (relative to this directory) to place at the top of
+# the title page.
+#latex_logo = None
+
+# For "manual" documents, if this is true, then toplevel headings are parts,
+# not chapters.
+#latex_use_parts = False
+
+# If true, show page references after internal links.
+#latex_show_pagerefs = False
+
+# If true, show URL addresses after external links.
+#latex_show_urls = False
+
+# Documents to append as an appendix to all manuals.
+#latex_appendices = []
+
+# If false, no module index is generated.
+#latex_domain_indices = True
+
+
+# -- Options for manual page output ---------------------------------------
+
+# One entry per manual page. List of tuples
+# (source start file, name, description, authors, manual section).
+man_pages = [
+ (master_doc, 't-rexcontrolplain', u'T-Rex Control Plain Documentation',
+ [author], 1)
+]
+
+# If true, show URL addresses after external links.
+#man_show_urls = False
+
+
+# -- Options for Texinfo output -------------------------------------------
+
+# Grouping the document tree into Texinfo files. List of tuples
+# (source start file, target name, title, author,
+# dir menu entry, description, category)
+texinfo_documents = [
+ (master_doc, 'T-RexControlPlain', u'T-Rex Control Plain Documentation',
+ author, 'T-RexControlPlain', 'One line description of project.',
+ 'Miscellaneous'),
+]
+
+# Documents to append as an appendix to all manuals.
+#texinfo_appendices = []
+
+# If false, no module index is generated.
+#texinfo_domain_indices = True
+
+# How to display URL addresses: 'footnote', 'no', or 'inline'.
+#texinfo_show_urls = 'footnote'
+
+# If true, do not generate a @detailmenu in the "Top" node's menu.
+#texinfo_no_detailmenu = False
+
+
+# show documentation for both __init__ methods and class methods
+autoclass_content = "both"
+
+# A workaround for the responsive tables always having annoying scrollbars.
+def setup(app):
+ app.add_stylesheet("no_scrollbars.css") \ No newline at end of file
diff --git a/scripts/automation/trex_control_plane/doc/docs_utilities.py b/scripts/automation/trex_control_plane/doc/docs_utilities.py
new file mode 100755
index 00000000..e80d765f
--- /dev/null
+++ b/scripts/automation/trex_control_plane/doc/docs_utilities.py
@@ -0,0 +1,37 @@
+#!/router/bin/python
+
+from texttable import Texttable
+import yaml
+
+
+def handle_data_items(field_yaml_dict):
+ data_items = field_yaml_dict['data']
+ return [ [json_key, meaning['type'], meaning['exp'] ]
+ for json_key,meaning in data_items.items() ]
+
+
+def json_dict_to_txt_table(dict_yaml_file):
+
+ # table = Texttable(max_width=120)
+ with open(dict_yaml_file, 'r') as f:
+ yaml_stream = yaml.load(f)
+
+ for main_field, sub_key in yaml_stream.items():
+ print main_field + ' field' '\n' + '~'*len(main_field+' field') + '\n'
+
+ field_data_rows = handle_data_items(sub_key)
+ table = Texttable(max_width=120)
+ table.set_cols_align(["l", "c", "l"])
+ table.set_cols_valign(["t", "m", "m"])
+ # create table's header
+ table.add_rows([["Sub-key", "Type", "Meaning"]])
+ table.add_rows(field_data_rows, header=False)
+
+
+ print table.draw() + "\n"
+
+
+
+
+
+json_dict_to_txt_table("json_dictionary.yaml") \ No newline at end of file
diff --git a/scripts/automation/trex_control_plane/doc/index.rst b/scripts/automation/trex_control_plane/doc/index.rst
new file mode 100755
index 00000000..e7a619d8
--- /dev/null
+++ b/scripts/automation/trex_control_plane/doc/index.rst
@@ -0,0 +1,57 @@
+.. T-Rex Control Plain documentation master file, created by
+ sphinx-quickstart on Tue Jun 2 07:48:10 2015.
+ You can adapt this file completely to your liking, but it should at least
+ contain the root `toctree` directive.
+
+Welcome to T-Rex Control Plain's documentation!
+===============================================
+
+T-Rex is a **realistic traffic generator** that enables you to do get learn more about your under developement devices.
+
+This site covers the Python API of T-Rex control plane, and explains how to utilize it to your needs.
+However, since the entire API is JSON-RPC [#f1]_ based, you may want to check out other implementations that could suit you.
+
+
+To understand the entirely how the API works and how to set up the server side, check out the `API documentation <http://csi-wiki-01:8080/display/bpsim/Documentation>`_ undee the documentation section of T-Rex website.
+
+
+**Use the table of contents below or the menu to your left to navigate through the site**
+
+
+Getting Started
+===============
+.. toctree::
+ :maxdepth: 2
+
+ installation
+ client_utils
+ usage_examples
+
+API Reference
+=============
+.. toctree::
+ :maxdepth: 2
+
+ api/index
+
+About T-Rex
+===========
+.. toctree::
+ :maxdepth: 2
+
+ All about T-Rex <about_trex>
+ license
+
+
+
+Indices and tables
+==================
+
+* :ref:`genindex`
+* :ref:`modindex`
+* :ref:`search`
+
+
+.. rubric:: Footnotes
+
+.. [#f1] For more information on JSON-RPC, check out the `official site <http://www.jsonrpc.org/>`_ \ No newline at end of file
diff --git a/scripts/automation/trex_control_plane/doc/installation.rst b/scripts/automation/trex_control_plane/doc/installation.rst
new file mode 100755
index 00000000..dda32f56
--- /dev/null
+++ b/scripts/automation/trex_control_plane/doc/installation.rst
@@ -0,0 +1,25 @@
+============
+Installation
+============
+
+Prerequisites
+-------------
+The T-Rex control plane is based on client-server model that interacts using JSON-RPC.
+
+In order to use the client-side API documented a T-Rex server daemon must be up and listening on the same host and port that the client tries to connect with.
+
+Compatibility
+-------------
+Both client and server side were developed for Linux platform.
+The client-side module is also compatible with windows python.
+
+The client side can be used with both Python 2 and Python 3 versions.
+However, the server side was desined to and best fits with Python 2.7.6 and on (all 2.x series, assuming > 2.6.9).
+
+
+Installation manual
+-------------------
+
+T-Rex Control Plane is a cross-platform, cross-operatin system APi to control and run T-Rex.
+
+The full, most updated manual (which refers to all programming languages) can be found under the `Automation API documentation <http://csi-wiki-01:8080/display/bpsim/Documentation>`_ . \ No newline at end of file
diff --git a/scripts/automation/trex_control_plane/doc/json_dictionary.yaml b/scripts/automation/trex_control_plane/doc/json_dictionary.yaml
new file mode 100755
index 00000000..853ded65
--- /dev/null
+++ b/scripts/automation/trex_control_plane/doc/json_dictionary.yaml
@@ -0,0 +1,252 @@
+################################################################
+#### T-Rex JSON Dictionary definitions ####
+################################################################
+
+
+trex-global :
+ name :
+ type : string
+ exp : "this is a name representation of the main field"
+ val : "trex-global"
+ type :
+ type : int
+ val : 0
+ data :
+ m_cpu_util :
+ type : float
+ exp : "CPU utilization (0-100)"
+ val : 0.0
+ m_platform_factor :
+ type : float
+ exp : "multiplier factor"
+ val : 1.0
+ m_tx_bps :
+ type : float
+ exp : "total tx bit per second"
+ val : 0.0
+ m_rx_bps :
+ type : float
+ exp : "total rx bit per second"
+ val : 0.0
+ m_tx_pps :
+ type : float
+ exp : "total tx packet per second"
+ val : 0.0
+ m_tx_cps :
+ type : float
+ exp : "total tx connection per second"
+ val : 0.0
+ m_tx_expected_cps :
+ type : float
+ exp : "expected tx connection per second"
+ val : 0.0
+ m_tx_expected_pps :
+ type : float
+ exp : "expected tx packet per second"
+ val : 0.0
+ m_tx_expected_bps :
+ type : float
+ exp : "expected tx bit per second"
+ val : 0.0
+ m_rx_drop_bps :
+ type : float
+ exp : "drop rate in bit per second"
+ val : 0.0
+ m_active_flows :
+ type : float
+ exp : "active trex flows"
+ val : 0.0
+ m_open_flows :
+ type : float
+ exp : "open trex flows from startup (monotonically incrementing)"
+ val : 0.0
+ m_total_tx_pkts :
+ type : int
+ exp : "total tx in packets"
+ val : 0
+ m_total_rx_pkts :
+ type : int
+ exp : "total rx in packets"
+ val : 0
+ m_total_tx_bytes :
+ type : int
+ exp : "total tx in bytes"
+ val : 0
+ m_total_rx_bytes :
+ type : int
+ exp : "total rx in bytes"
+ val : 0
+ opackets-# :
+ type : int
+ exp : "output packets (per interface)"
+ val : 0
+ obytes-# :
+ type : int
+ exp : "output bytes (per interface)"
+ val : 0
+ ipackets-# :
+ type : int
+ exp : "input packet (per interface)"
+ val : 0
+ ibytes-# :
+ type : int
+ exp : "input bytes (per interface)"
+ val : 0
+ ierrors-# :
+ type : int
+ exp : "input errors (per interface)"
+ val : 0
+ oerrors-# :
+ type : int
+ exp : "input errors (per interface)"
+ val : 0
+ m_total_tx_bps-# :
+ type : float
+ exp : "total transmitted data in bit per second"
+ val : 0.0
+ unknown :
+ type : int
+ exp : ""
+ val : 0
+
+tx-gen :
+ name :
+ type : string
+ exp : "this is a name representation of the main field"
+ val : "tx-gen"
+ type :
+ type : int
+ val : 0
+ data :
+ realtime-hist :
+ type : dict
+ #exp : "Containing TX history data, by the following keys:\n - min_usec (max_usec): min (max) time packet sent before (after) it was intended to be sent\n - cnt (high_cnt): how many packet were lower than min_usec (higher than max_usec) relative to the time these packets were intended to be injected"
+ exp : "histogram of transmission. See extended information about histogram object under :ref:`histogram-object-fields`. The attribute analyzed is time packet has been sent before/after it was intended to be"
+ val : '{ "min_usec":10, "max_usec":0, "high_cnt":0, "cnt":667866, "s_avg":0.0, "histogram":[] }'
+ unknown :
+ type : int
+ exp : ""
+ val : 0
+
+trex-latecny :
+ name :
+ type : string
+ exp : "this is a name representation of the main field"
+ val : "trex-latecny"
+ type :
+ type : int
+ val : 0
+ data :
+ avg-# :
+ type : float
+ exp : "average latency in usec (per interface)"
+ val : 75.0
+ max-# :
+ type : float
+ exp : "max latency in usec from the test start (per interface)"
+ val : 75.0
+ c-max-# :
+ type : float
+ exp : "max in the last 1 sec window (per interface)"
+ val : 75.0
+ error-# :
+ type : float
+ exp : "errors in latency packets (per interface)"
+ val : 75.0
+ unknown :
+ type : int
+ exp : ""
+ val : 0
+
+
+trex-latecny-v2 :
+ name :
+ type : string
+ exp : "this is a name representation of the main field"
+ val : "trex-latecny-v2"
+ type :
+ type : int
+ val : 0
+ data :
+ cpu_util :
+ type : float
+ exp : "rx thread cpu % (this is not trex DP threads cpu%%)"
+ val : 75.0
+ port-# :
+ type : dict
+ exp : "Containing per interface information. See extended information under ``port-# -> key_name -> sub_key``"
+ val : ''
+ port-#->hist :
+ type : dict
+ exp : "histogram of latency. See extended information about histogram object under :ref:`histogram-object-fields`"
+ val : ''
+ port-#->stats :
+ type : dict
+ exp : "Containing per interface information. See extended information under ``port-# -> key_name -> sub_key``"
+ val : ''
+ port-#->stats->m_tx_pkt_ok :
+ type : int
+ exp : "total of try sent packets"
+ val : 60110
+ port-#->stats->m_pkt_ok :
+ type : int
+ exp : "total of packets sent from hardware"
+ val : 60065
+ port-#->stats->m_no_magic :
+ type : int
+ exp : "rx error with no magic"
+ val : 0
+ port-#->stats->m_no_id :
+ type : int
+ exp : "rx errors with no id"
+ val : 0
+ port-#->stats->m_seq_error :
+ type : int
+ exp : "error in seq number"
+ val : 18
+ port-#->stats->m_length_error :
+ type : int
+ exp : ""
+ val : 0
+ port-#->stats->m_rx_check :
+ type : int
+ exp : "packets tested in rx"
+ val : 407495
+ unknown :
+ type : int
+ exp : ""
+ val : 0
+histogram-obj :
+ name :
+ type : string
+ exp : "this is description of a histogram object being used in number of place throughout the JSON object"
+ val : "histogram-obj"
+ data :
+ min_usec :
+ type : int
+ exp : "min attribute value in usec. pkt with latency less than this value is not counted"
+ val : 10
+ max_usec :
+ type : int
+ exp : "max attribute value in usec"
+ val : 83819
+ high_cnt :
+ type : int
+ exp : "how many packets on which its attribute > min_usec"
+ val : 83819
+ cnt :
+ type : int
+ exp : "total packets from test startup"
+ val : 83819
+ s_avg :
+ type : float
+ exp : "average value from test startup"
+ val : 39.3
+ histogram :
+ type : array
+ exp : "histogram of relevant object by the following keys:\n - key: value in usec \n - val: number of packets"
+ val : '[{"key": 20, "val": 5048}, {"key": 30, "val": 6092}, {"key": 40, "val": 2092}]'
+
+
+
+
diff --git a/scripts/automation/trex_control_plane/doc/license.rst b/scripts/automation/trex_control_plane/doc/license.rst
new file mode 100755
index 00000000..b83dd4b3
--- /dev/null
+++ b/scripts/automation/trex_control_plane/doc/license.rst
@@ -0,0 +1,18 @@
+=======
+License
+=======
+
+
+Copyright 2015 Cisco Systems Inc.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
diff --git a/scripts/automation/trex_control_plane/doc/requirements.rst b/scripts/automation/trex_control_plane/doc/requirements.rst
new file mode 100755
index 00000000..e69de29b
--- /dev/null
+++ b/scripts/automation/trex_control_plane/doc/requirements.rst
diff --git a/scripts/automation/trex_control_plane/doc/usage_examples.rst b/scripts/automation/trex_control_plane/doc/usage_examples.rst
new file mode 100755
index 00000000..7116f28c
--- /dev/null
+++ b/scripts/automation/trex_control_plane/doc/usage_examples.rst
@@ -0,0 +1,68 @@
+
+Usage Examples
+==============
+
+
+Full-featured interactive shell
+-------------------------------
+
+The `client_interactive_example.py` extends and uses the `Cmd <https://docs.python.org/2/library/cmd.html>`_ built in python class to create a Full-featured shell using which one can interact with T-Rex server and get instant results.
+
+The help menu of this application is:
+
+.. code-block:: json
+
+ usage: client_interactive_example [options]
+
+ Run T-Rex client API demos and scenarios.
+
+ optional arguments:
+ -h, --help show this help message and exit
+ -v, --version show program's version number and exit
+ -t HOST, --trex-host HOST
+ Specify the hostname or ip to connect with T-Rex
+ server.
+ -p PORT, --trex-port PORT
+ Select port on which the T-Rex server listens. Default
+ port is 8090.
+ -m SIZE, --maxhist SIZE
+ Specify maximum history size saved at client side.
+ Default size is 100.
+ --verbose Switch ON verbose option at T-Rex client. Default is:
+ OFF.
+
+**Code Excerpt**
+
+.. literalinclude:: ../examples/client_interactive_example.py
+ :language: python
+ :emphasize-lines: 0
+ :linenos:
+
+
+End-to-End cycle
+----------------
+
+This example (``pkt_generation_for_trex.py``) demonstrates a full cycle of using the API.
+
+.. note:: this module uses the `Scapy <http://www.secdev.org/projects/scapy/doc/usage.html>`_ in order to generate packets to be used as a basis of the traffic injection. It is recommended to *install* this module to best experience the example.
+
+The demo takes the user a full circle:
+ 1. Generating packets (using Scapy)
+ 2. exporting the generated packets into .pcap file named `dns_traffic.pcap`.
+ 3. Use the :class:`trex_yaml_gen.CTRexYaml` generator to include that pcap file in the yaml object.
+ 4. Export the YAML object onto a YAML file named `dns_traffic.yaml`
+ 5. Push the generated files to T-Rex server.
+ 6. Run T-Rex based on the generated (and pushed) files.
+
+**Code Excerpt** [#f1]_
+
+.. literalinclude:: ../examples/pkt_generation_for_trex.py
+ :language: python
+ :lines: 10-
+ :emphasize-lines: 32,36,42,46,51,60,63-69,76-80
+ :linenos:
+
+
+.. rubric:: Footnotes
+
+.. [#f1] The marked codelines corresponds with the steps mentioned above. \ No newline at end of file
diff --git a/scripts/automation/trex_control_plane/examples/__init__.py b/scripts/automation/trex_control_plane/examples/__init__.py
new file mode 100755
index 00000000..d3f5a12f
--- /dev/null
+++ b/scripts/automation/trex_control_plane/examples/__init__.py
@@ -0,0 +1 @@
+
diff --git a/scripts/automation/trex_control_plane/examples/client_interactive_example.py b/scripts/automation/trex_control_plane/examples/client_interactive_example.py
new file mode 100755
index 00000000..e8d358a9
--- /dev/null
+++ b/scripts/automation/trex_control_plane/examples/client_interactive_example.py
@@ -0,0 +1,256 @@
+#!/router/bin/python-2.7.4
+
+import trex_root_path
+from client.trex_client import *
+from common.trex_exceptions import *
+import cmd
+from python_lib.termstyle import termstyle
+import os
+from argparse import ArgumentParser
+from pprint import pprint
+import json
+import time
+import socket
+import errno
+
+
+class InteractiveTRexClient(cmd.Cmd):
+
+ intro = termstyle.green("\nInteractive shell to play with Cisco's T-Rex API.\nType help to view available pre-defined scenarios\n(c) All rights reserved.\n")
+ prompt = '> '
+
+ def __init__(self, trex_host, max_history_size = 100, trex_port = 8090, verbose_mode = False ):
+ cmd.Cmd.__init__(self)
+ self.verbose = verbose_mode
+ self.trex = CTRexClient(trex_host, max_history_size, trex_daemon_port = trex_port, verbose = verbose_mode)
+ self.DEFAULT_RUN_PARAMS = dict(c = 4,
+ m = 1.5,
+ nc = True,
+ p = True,
+ d = 100,
+ f = 'avl/sfr_delay_10_1g.yaml',
+ l = 1000)
+ self.run_params = self.DEFAULT_RUN_PARAMS
+ self.decoder = json.JSONDecoder()
+
+
+ def do_push_files (self, filepaths):
+ """Pushes a custom file to be stored locally on T-Rex server.\nPush multiple files by spefiying their path separated by ' ' (space)."""
+ try:
+ filepaths = filepaths.split(' ')
+ print termstyle.green("*** Starting pushing files ({trex_files}) to T-Rex. ***".format (trex_files = ', '.join(filepaths)) )
+ ret_val = self.trex.push_files(filepaths)
+ if ret_val:
+ print termstyle.green("*** End of T-Rex push_files method (success) ***")
+ else:
+ print termstyle.magenta("*** End of T-Rex push_files method (failed) ***")
+
+ except IOError as inst:
+ print termstyle.magenta(inst)
+
+ def do_show_default_run_params(self,line):
+ """Outputs the default T-Rex running parameters"""
+ pprint(self.DEFAULT_RUN_PARAMS)
+ print termstyle.green("*** End of default T-Rex running parameters ***")
+
+ def do_show_run_params(self,line):
+ """Outputs the currently configured T-Rex running parameters"""
+ pprint(self.run_params)
+ print termstyle.green("*** End of T-Rex running parameters ***")
+
+ def do_update_run_params(self, json_str):
+ """Updates provided parameters on T-Rex running configuration. Provide using JSON string"""
+ if json_str:
+ try:
+ upd_params = self.decoder.decode(json_str)
+ self.run_params.update(upd_params)
+ print termstyle.green("*** End of T-Rex parameters update ***")
+ except ValueError as inst:
+ print termstyle.magenta("Provided illegal JSON string. Please try again.\n[", inst,"]")
+ else:
+ print termstyle.magenta("JSON configuration string is missing. Please try again.")
+
+ def do_show_status (self, line):
+ """Prompts T-Rex current status"""
+ print self.trex.get_running_status()
+ print termstyle.green("*** End of T-Rex status prompt ***")
+
+ def do_show_trex_files_path (self, line):
+ """Prompts the local path in which files are stored when pushed to t-rex server from client"""
+ print self.trex.get_trex_files_path()
+ print termstyle.green("*** End of trex_files_path prompt ***")
+
+ def do_show_reservation_status (self, line):
+ """Prompts if T-Rex is currently reserved or not"""
+ if self.trex.is_reserved():
+ print "T-Rex is reserved"
+ else:
+ print "T-Rex is NOT reserved"
+ print termstyle.green("*** End of reservation status prompt ***")
+
+ def do_reserve_trex (self, user):
+ """Reserves the usage of T-Rex to a certain user"""
+ try:
+ if not user:
+ ret = self.trex.reserve_trex()
+ else:
+ ret = self.trex.reserve_trex(user.split(' ')[0])
+ print termstyle.green("*** T-Rex reserved successfully ***")
+ except TRexException as inst:
+ print termstyle.red(inst)
+
+ def do_cancel_reservation (self, user):
+ """Cancels a current reservation of T-Rex to a certain user"""
+ try:
+ if not user:
+ ret = self.trex.cancel_reservation()
+ else:
+ ret = self.trex.cancel_reservation(user.split(' ')[0])
+ print termstyle.green("*** T-Rex reservation canceled successfully ***")
+ except TRexException as inst:
+ print termstyle.red(inst)
+
+ def do_restore_run_default (self, line):
+ """Restores original T-Rex running configuration"""
+ self.run_params = self.DEFAULT_RUN_PARAMS
+ print termstyle.green("*** End of restoring default run parameters ***")
+
+ def do_run_until_finish (self, sample_rate):
+ """Starts T-Rex and sample server until run is done."""
+ print termstyle.green("*** Starting T-Rex run_until_finish scenario ***")
+
+ if not sample_rate: # use default sample rate if not passed
+ sample_rate = 5
+ try:
+ sample_rate = int(sample_rate)
+ ret = self.trex.start_trex(**self.run_params)
+ self.trex.sample_to_run_finish(sample_rate)
+ print termstyle.green("*** End of T-Rex run ***")
+ except ValueError as inst:
+ print termstyle.magenta("Provided illegal sample rate value. Please try again.\n[", inst,"]")
+ except TRexException as inst:
+ print termstyle.red(inst)
+
+ def do_run_and_poll (self, sample_rate):
+ """Starts T-Rex and sample server manually until run is done."""
+ print termstyle.green("*** Starting T-Rex run and manually poll scenario ***")
+ if not sample_rate: # use default sample rate if not passed
+ sample_rate = 5
+ try:
+ sample_rate = int(sample_rate)
+ ret = self.trex.start_trex(**self.run_params)
+ last_res = dict()
+ while self.trex.is_running(dump_out = last_res):
+ obj = self.trex.get_result_obj()
+ if (self.verbose):
+ print obj
+ # do WHATEVER here
+ time.sleep(sample_rate)
+
+ print termstyle.green("*** End of T-Rex run ***")
+ except ValueError as inst:
+ print termstyle.magenta("Provided illegal sample rate value. Please try again.\n[", inst,"]")
+ except TRexException as inst:
+ print termstyle.red(inst)
+
+
+ def do_run_until_condition (self, sample_rate):
+ """Starts T-Rex and sample server until condition is satisfied."""
+ print termstyle.green("*** Starting T-Rex run until condition is satisfied scenario ***")
+
+ def condition (result_obj):
+ return result_obj.get_current_tx_rate()['m_tx_pps'] > 200000
+
+ if not sample_rate: # use default sample rate if not passed
+ sample_rate = 5
+ try:
+ sample_rate = int(sample_rate)
+ ret = self.trex.start_trex(**self.run_params)
+ ret_val = self.trex.sample_until_condition(condition, sample_rate)
+ print ret_val
+ print termstyle.green("*** End of T-Rex run ***")
+ except ValueError as inst:
+ print termstyle.magenta("Provided illegal sample rate value. Please try again.\n[", inst,"]")
+ except TRexException as inst:
+ print termstyle.red(inst)
+
+ def do_start_and_return (self, line):
+ """Start T-Rex run and once in 'Running' mode, return to cmd prompt"""
+ print termstyle.green("*** Starting T-Rex run, wait until in 'Running' state ***")
+ try:
+ ret = self.trex.start_trex(**self.run_params)
+ print termstyle.green("*** End of scenario (T-Rex is probably still running!) ***")
+ except TRexException as inst:
+ print termstyle.red(inst)
+
+ def do_poll_once (self, line):
+ """Performs a single poll of T-Rex current data dump (if T-Rex is running) and prompts and short version of latest result_obj"""
+ print termstyle.green("*** Trying T-Rex single poll ***")
+ try:
+ last_res = dict()
+ if self.trex.is_running(dump_out = last_res):
+ obj = self.trex.get_result_obj()
+ print obj
+ else:
+ print termstyle.magenta("T-Rex isn't currently running.")
+ print termstyle.green("*** End of scenario (T-Rex is posssibly still running!) ***")
+ except TRexException as inst:
+ print termstyle.red(inst)
+
+
+ def do_stop_trex (self, line):
+ """Try to stop T-Rex run (if T-Rex is currently running)"""
+ print termstyle.green("*** Starting T-Rex termination ***")
+ try:
+ ret = self.trex.stop_trex()
+ print termstyle.green("*** End of scenario (T-Rex is not running now) ***")
+ except TRexException as inst:
+ print termstyle.red(inst)
+
+ def do_kill_indiscriminately (self, line):
+ """Force killing of running T-Rex process (if exists) on the server."""
+ print termstyle.green("*** Starting T-Rex termination ***")
+ ret = self.trex.force_kill()
+ if ret:
+ print termstyle.green("*** End of scenario (T-Rex is not running now) ***")
+ elif ret is None:
+ print termstyle.magenta("*** End of scenario (T-Rex termination aborted) ***")
+ else:
+ print termstyle.red("*** End of scenario (T-Rex termination failed) ***")
+
+ def do_exit(self, arg):
+ """Quits the application"""
+ print termstyle.cyan('Bye Bye!')
+ return True
+
+
+if __name__ == "__main__":
+ parser = ArgumentParser(description = termstyle.cyan('Run T-Rex client API demos and scenarios.'),
+ usage = """client_interactive_example [options]""" )
+
+ parser.add_argument('-v', '--version', action='version', version='%(prog)s 1.0 \t (C) Cisco Systems Inc.\n')
+
+ parser.add_argument("-t", "--trex-host", required = True, dest="trex_host",
+ action="store", help="Specify the hostname or ip to connect with T-Rex server.",
+ metavar="HOST" )
+ parser.add_argument("-p", "--trex-port", type=int, default = 8090, metavar="PORT", dest="trex_port",
+ help="Select port on which the T-Rex server listens. Default port is 8090.", action="store")
+ parser.add_argument("-m", "--maxhist", type=int, default = 100, metavar="SIZE", dest="hist_size",
+ help="Specify maximum history size saved at client side. Default size is 100.", action="store")
+ parser.add_argument("--verbose", dest="verbose",
+ action="store_true", help="Switch ON verbose option at T-Rex client. Default is: OFF.",
+ default = False )
+ args = parser.parse_args()
+
+ try:
+ InteractiveTRexClient(args.trex_host, args.hist_size, args.trex_port, args.verbose).cmdloop()
+
+ except KeyboardInterrupt:
+ print termstyle.cyan('Bye Bye!')
+ exit(-1)
+ except socket.error, e:
+ if e.errno == errno.ECONNREFUSED:
+ raise socket.error(errno.ECONNREFUSED, "Connection from T-Rex server was terminated. Please make sure the server is up.")
+
+
+
diff --git a/scripts/automation/trex_control_plane/examples/pkt_generation_for_trex.py b/scripts/automation/trex_control_plane/examples/pkt_generation_for_trex.py
new file mode 100755
index 00000000..7e7f6139
--- /dev/null
+++ b/scripts/automation/trex_control_plane/examples/pkt_generation_for_trex.py
@@ -0,0 +1,105 @@
+#!/router/bin/python
+
+######################################################################################
+### ###
+### T-Rex end-to-end demo script, written by T-Rex dev-team ###
+### THIS SCRIPT ASSUMES PyYaml and Scapy INSTALLED ON PYTHON'S RUNNING MACHINE ###
+### (for any question please contact trex-dev team @ trex-dev@cisco.com) ###
+### ###
+######################################################################################
+
+
+import logging
+import time
+import trex_root_path
+from client.trex_client import *
+from client_utils.general_utils import *
+from client_utils.trex_yaml_gen import *
+from pprint import pprint
+from argparse import ArgumentParser
+
+# import scapy package
+logging.getLogger("scapy.runtime").setLevel(logging.ERROR) # supress scapy import warnings from being displayed
+from scapy.all import *
+
+
+def generate_dns_packets (src_ip, dst_ip):
+ dns_rqst = Ether(src='00:15:17:a7:75:a3', dst='e0:5f:b9:69:e9:22')/IP(src=src_ip,dst=dst_ip,version=4L)/UDP(dport=53, sport=1030)/DNS(rd=1,qd=DNSQR(qname="www.cisco.com"))
+ dns_resp = Ether(src='e0:5f:b9:69:e9:22', dst='00:15:17:a7:75:a3')/IP(src=dst_ip,dst=src_ip,version=4L)/UDP(dport=1030, sport=53)/DNS(aa=1L, qr=1L, an=DNSRR(rclass=1, rrname='www.cisco.com.', rdata='100.100.100.100', type=1), ad=0L, qdcount=1, ns=None, tc=0L, rd=0L, ar=None, opcode=0L, ra=1L, cd=0L, z=0L, rcode=0L, qd=DNSQR(qclass=1, qtype=1, qname='www.cisco.com.'))
+ return [dns_rqst, dns_resp]
+
+def pkts_to_pcap (pcap_filename, packets):
+ wrpcap(pcap_filename, packets)
+
+
+def main (args):
+ # instantiate T-Rex client
+ trex = CTRexClient('trex-dan', verbose = args.verbose)
+
+ if args.steps:
+ print "\nNext step: .pcap generation."
+ raw_input("Press Enter to continue...")
+ # generate T-Rex traffic.
+ pkts = generate_dns_packets('21.0.0.2', '22.0.0.12') # In this case - DNS traffic (request-response)
+ print "\ngenerated traffic:"
+ print "=================="
+ map(lambda x: pprint(x.summary()) , pkts)
+ pkts_to_pcap("dns_traffic.pcap", pkts) # Export the generated to a .pcap file
+
+ if args.steps:
+ print "\nNext step: .yaml generation."
+ raw_input("Press Enter to continue...")
+ # Generate .yaml file that uses the generated .pcap file
+ trex_files_path = trex.get_trex_files_path() # fetch the path in which packets are saved on T-Rex server
+ yaml_obj = CTRexYaml(trex_files_path) # instantiate CTRexYaml obj
+
+ # set .yaml file parameters according to need and use
+ ret_idx = yaml_obj.add_pcap_file("dns_traffic.pcap")
+ yaml_obj.set_cap_info_param('cps', 1.1, ret_idx)
+
+ # export yaml_ob to .yaml file
+ yaml_file_path = trex_files_path + 'dns_traffic.yaml'
+ yaml_obj.to_yaml('dns_traffic.yaml')
+ print "\ngenerated .yaml file:"
+ print "===================="
+ yaml_obj.dump()
+
+ if args.steps:
+ print "\nNext step: run T-Rex with provided files."
+ raw_input("Press Enter to continue...")
+ # push all relevant files to server
+ trex.push_files( yaml_obj.get_file_list() )
+
+ print "\nStarting T-Rex..."
+ trex.start_trex(c = 2,
+ m = 1.5,
+ nc = True,
+ p = True,
+ d = 30,
+ f = yaml_file_path, # <-- we use out generated .yaml file here
+ l = 1000)
+
+ if args.verbose:
+ print "T-Rex state changed to 'Running'."
+ print "Sampling T-Rex in 0.2 samples/sec (single sample every 5 secs)"
+
+ last_res = dict()
+ while trex.is_running(dump_out = last_res):
+ print "CURRENT RESULT OBJECT:"
+ obj = trex.get_result_obj()
+ print obj
+ time.sleep(5)
+
+
+if __name__ == "__main__":
+ parser = ArgumentParser(description = 'Run T-Rex client API end-to-end example.',
+ usage = """pkt_generation_for_trex [options]""" )
+
+ parser.add_argument("-s", "--step-by-step", dest="steps",
+ action="store_false", help="Switch OFF step-by-step script overview. Default is: ON.",
+ default = True )
+ parser.add_argument("--verbose", dest="verbose",
+ action="store_true", help="Switch ON verbose option at T-Rex client. Default is: OFF.",
+ default = False )
+ args = parser.parse_args()
+ main(args) \ No newline at end of file
diff --git a/scripts/automation/trex_control_plane/examples/trex_root_path.py b/scripts/automation/trex_control_plane/examples/trex_root_path.py
new file mode 100755
index 00000000..3aefd1d2
--- /dev/null
+++ b/scripts/automation/trex_control_plane/examples/trex_root_path.py
@@ -0,0 +1,15 @@
+#!/router/bin/python
+
+import os
+import sys
+
+def add_root_to_path ():
+ """adds trex_control_plane root dir to script path, up to `depth` parent dirs"""
+ root_dirname = 'trex_control_plane'
+ file_path = os.path.dirname(os.path.realpath(__file__))
+
+ components = file_path.split(os.sep)
+ sys.path.append( str.join(os.sep, components[:components.index(root_dirname)+1]) )
+ return
+
+add_root_to_path()
diff --git a/scripts/automation/trex_control_plane/python_lib/__init__.py b/scripts/automation/trex_control_plane/python_lib/__init__.py
new file mode 100755
index 00000000..d3f5a12f
--- /dev/null
+++ b/scripts/automation/trex_control_plane/python_lib/__init__.py
@@ -0,0 +1 @@
+
diff --git a/scripts/automation/trex_control_plane/python_lib/enum34-1.0.4/PKG-INFO b/scripts/automation/trex_control_plane/python_lib/enum34-1.0.4/PKG-INFO
new file mode 100755
index 00000000..428ce0e3
--- /dev/null
+++ b/scripts/automation/trex_control_plane/python_lib/enum34-1.0.4/PKG-INFO
@@ -0,0 +1,746 @@
+Metadata-Version: 1.1
+Name: enum34
+Version: 1.0.4
+Summary: Python 3.4 Enum backported to 3.3, 3.2, 3.1, 2.7, 2.6, 2.5, and 2.4
+Home-page: https://pypi.python.org/pypi/enum34
+Author: Ethan Furman
+Author-email: ethan@stoneleaf.us
+License: BSD License
+Description: ``enum`` --- support for enumerations
+ ========================================
+
+ .. :synopsis: enumerations are sets of symbolic names bound to unique, constant
+ values.
+ .. :moduleauthor:: Ethan Furman <ethan@stoneleaf.us>
+ .. :sectionauthor:: Barry Warsaw <barry@python.org>,
+ .. :sectionauthor:: Eli Bendersky <eliben@gmail.com>,
+ .. :sectionauthor:: Ethan Furman <ethan@stoneleaf.us>
+
+ ----------------
+
+ An enumeration is a set of symbolic names (members) bound to unique, constant
+ values. Within an enumeration, the members can be compared by identity, and
+ the enumeration itself can be iterated over.
+
+
+ Module Contents
+ ---------------
+
+ This module defines two enumeration classes that can be used to define unique
+ sets of names and values: ``Enum`` and ``IntEnum``. It also defines
+ one decorator, ``unique``.
+
+ ``Enum``
+
+ Base class for creating enumerated constants. See section `Functional API`_
+ for an alternate construction syntax.
+
+ ``IntEnum``
+
+ Base class for creating enumerated constants that are also subclasses of ``int``.
+
+ ``unique``
+
+ Enum class decorator that ensures only one name is bound to any one value.
+
+
+ Creating an Enum
+ ----------------
+
+ Enumerations are created using the ``class`` syntax, which makes them
+ easy to read and write. An alternative creation method is described in
+ `Functional API`_. To define an enumeration, subclass ``Enum`` as
+ follows::
+
+ >>> from enum import Enum
+ >>> class Color(Enum):
+ ... red = 1
+ ... green = 2
+ ... blue = 3
+
+ Note: Nomenclature
+
+ - The class ``Color`` is an *enumeration* (or *enum*)
+ - The attributes ``Color.red``, ``Color.green``, etc., are
+ *enumeration members* (or *enum members*).
+ - The enum members have *names* and *values* (the name of
+ ``Color.red`` is ``red``, the value of ``Color.blue`` is
+ ``3``, etc.)
+
+ Note:
+
+ Even though we use the ``class`` syntax to create Enums, Enums
+ are not normal Python classes. See `How are Enums different?`_ for
+ more details.
+
+ Enumeration members have human readable string representations::
+
+ >>> print(Color.red)
+ Color.red
+
+ ...while their ``repr`` has more information::
+
+ >>> print(repr(Color.red))
+ <Color.red: 1>
+
+ The *type* of an enumeration member is the enumeration it belongs to::
+
+ >>> type(Color.red)
+ <enum 'Color'>
+ >>> isinstance(Color.green, Color)
+ True
+ >>>
+
+ Enum members also have a property that contains just their item name::
+
+ >>> print(Color.red.name)
+ red
+
+ Enumerations support iteration. In Python 3.x definition order is used; in
+ Python 2.x the definition order is not available, but class attribute
+ ``__order__`` is supported; otherwise, value order is used::
+
+ >>> class Shake(Enum):
+ ... __order__ = 'vanilla chocolate cookies mint' # only needed in 2.x
+ ... vanilla = 7
+ ... chocolate = 4
+ ... cookies = 9
+ ... mint = 3
+ ...
+ >>> for shake in Shake:
+ ... print(shake)
+ ...
+ Shake.vanilla
+ Shake.chocolate
+ Shake.cookies
+ Shake.mint
+
+ The ``__order__`` attribute is always removed, and in 3.x it is also ignored
+ (order is definition order); however, in the stdlib version it will be ignored
+ but not removed.
+
+ Enumeration members are hashable, so they can be used in dictionaries and sets::
+
+ >>> apples = {}
+ >>> apples[Color.red] = 'red delicious'
+ >>> apples[Color.green] = 'granny smith'
+ >>> apples == {Color.red: 'red delicious', Color.green: 'granny smith'}
+ True
+
+
+ Programmatic access to enumeration members and their attributes
+ ---------------------------------------------------------------
+
+ Sometimes it's useful to access members in enumerations programmatically (i.e.
+ situations where ``Color.red`` won't do because the exact color is not known
+ at program-writing time). ``Enum`` allows such access::
+
+ >>> Color(1)
+ <Color.red: 1>
+ >>> Color(3)
+ <Color.blue: 3>
+
+ If you want to access enum members by *name*, use item access::
+
+ >>> Color['red']
+ <Color.red: 1>
+ >>> Color['green']
+ <Color.green: 2>
+
+ If have an enum member and need its ``name`` or ``value``::
+
+ >>> member = Color.red
+ >>> member.name
+ 'red'
+ >>> member.value
+ 1
+
+
+ Duplicating enum members and values
+ -----------------------------------
+
+ Having two enum members (or any other attribute) with the same name is invalid;
+ in Python 3.x this would raise an error, but in Python 2.x the second member
+ simply overwrites the first::
+
+ >>> # python 2.x
+ >>> class Shape(Enum):
+ ... square = 2
+ ... square = 3
+ ...
+ >>> Shape.square
+ <Shape.square: 3>
+
+ >>> # python 3.x
+ >>> class Shape(Enum):
+ ... square = 2
+ ... square = 3
+ Traceback (most recent call last):
+ ...
+ TypeError: Attempted to reuse key: 'square'
+
+ However, two enum members are allowed to have the same value. Given two members
+ A and B with the same value (and A defined first), B is an alias to A. By-value
+ lookup of the value of A and B will return A. By-name lookup of B will also
+ return A::
+
+ >>> class Shape(Enum):
+ ... __order__ = 'square diamond circle alias_for_square' # only needed in 2.x
+ ... square = 2
+ ... diamond = 1
+ ... circle = 3
+ ... alias_for_square = 2
+ ...
+ >>> Shape.square
+ <Shape.square: 2>
+ >>> Shape.alias_for_square
+ <Shape.square: 2>
+ >>> Shape(2)
+ <Shape.square: 2>
+
+
+ Allowing aliases is not always desirable. ``unique`` can be used to ensure
+ that none exist in a particular enumeration::
+
+ >>> from enum import unique
+ >>> @unique
+ ... class Mistake(Enum):
+ ... __order__ = 'one two three four' # only needed in 2.x
+ ... one = 1
+ ... two = 2
+ ... three = 3
+ ... four = 3
+ Traceback (most recent call last):
+ ...
+ ValueError: duplicate names found in <enum 'Mistake'>: four -> three
+
+ Iterating over the members of an enum does not provide the aliases::
+
+ >>> list(Shape)
+ [<Shape.square: 2>, <Shape.diamond: 1>, <Shape.circle: 3>]
+
+ The special attribute ``__members__`` is a dictionary mapping names to members.
+ It includes all names defined in the enumeration, including the aliases::
+
+ >>> for name, member in sorted(Shape.__members__.items()):
+ ... name, member
+ ...
+ ('alias_for_square', <Shape.square: 2>)
+ ('circle', <Shape.circle: 3>)
+ ('diamond', <Shape.diamond: 1>)
+ ('square', <Shape.square: 2>)
+
+ The ``__members__`` attribute can be used for detailed programmatic access to
+ the enumeration members. For example, finding all the aliases::
+
+ >>> [name for name, member in Shape.__members__.items() if member.name != name]
+ ['alias_for_square']
+
+ Comparisons
+ -----------
+
+ Enumeration members are compared by identity::
+
+ >>> Color.red is Color.red
+ True
+ >>> Color.red is Color.blue
+ False
+ >>> Color.red is not Color.blue
+ True
+
+ Ordered comparisons between enumeration values are *not* supported. Enum
+ members are not integers (but see `IntEnum`_ below)::
+
+ >>> Color.red < Color.blue
+ Traceback (most recent call last):
+ File "<stdin>", line 1, in <module>
+ TypeError: unorderable types: Color() < Color()
+
+ .. warning::
+
+ In Python 2 *everything* is ordered, even though the ordering may not
+ make sense. If you want your enumerations to have a sensible ordering
+ check out the `OrderedEnum`_ recipe below.
+
+
+ Equality comparisons are defined though::
+
+ >>> Color.blue == Color.red
+ False
+ >>> Color.blue != Color.red
+ True
+ >>> Color.blue == Color.blue
+ True
+
+ Comparisons against non-enumeration values will always compare not equal
+ (again, ``IntEnum`` was explicitly designed to behave differently, see
+ below)::
+
+ >>> Color.blue == 2
+ False
+
+
+ Allowed members and attributes of enumerations
+ ----------------------------------------------
+
+ The examples above use integers for enumeration values. Using integers is
+ short and handy (and provided by default by the `Functional API`_), but not
+ strictly enforced. In the vast majority of use-cases, one doesn't care what
+ the actual value of an enumeration is. But if the value *is* important,
+ enumerations can have arbitrary values.
+
+ Enumerations are Python classes, and can have methods and special methods as
+ usual. If we have this enumeration::
+
+ >>> class Mood(Enum):
+ ... funky = 1
+ ... happy = 3
+ ...
+ ... def describe(self):
+ ... # self is the member here
+ ... return self.name, self.value
+ ...
+ ... def __str__(self):
+ ... return 'my custom str! {0}'.format(self.value)
+ ...
+ ... @classmethod
+ ... def favorite_mood(cls):
+ ... # cls here is the enumeration
+ ... return cls.happy
+
+ Then::
+
+ >>> Mood.favorite_mood()
+ <Mood.happy: 3>
+ >>> Mood.happy.describe()
+ ('happy', 3)
+ >>> str(Mood.funky)
+ 'my custom str! 1'
+
+ The rules for what is allowed are as follows: _sunder_ names (starting and
+ ending with a single underscore) are reserved by enum and cannot be used;
+ all other attributes defined within an enumeration will become members of this
+ enumeration, with the exception of *__dunder__* names and descriptors (methods
+ are also descriptors).
+
+ Note:
+
+ If your enumeration defines ``__new__`` and/or ``__init__`` then
+ whatever value(s) were given to the enum member will be passed into
+ those methods. See `Planet`_ for an example.
+
+
+ Restricted subclassing of enumerations
+ --------------------------------------
+
+ Subclassing an enumeration is allowed only if the enumeration does not define
+ any members. So this is forbidden::
+
+ >>> class MoreColor(Color):
+ ... pink = 17
+ Traceback (most recent call last):
+ ...
+ TypeError: Cannot extend enumerations
+
+ But this is allowed::
+
+ >>> class Foo(Enum):
+ ... def some_behavior(self):
+ ... pass
+ ...
+ >>> class Bar(Foo):
+ ... happy = 1
+ ... sad = 2
+ ...
+
+ Allowing subclassing of enums that define members would lead to a violation of
+ some important invariants of types and instances. On the other hand, it makes
+ sense to allow sharing some common behavior between a group of enumerations.
+ (See `OrderedEnum`_ for an example.)
+
+
+ Pickling
+ --------
+
+ Enumerations can be pickled and unpickled::
+
+ >>> from enum.test_enum import Fruit
+ >>> from pickle import dumps, loads
+ >>> Fruit.tomato is loads(dumps(Fruit.tomato, 2))
+ True
+
+ The usual restrictions for pickling apply: picklable enums must be defined in
+ the top level of a module, since unpickling requires them to be importable
+ from that module.
+
+ Note:
+
+ With pickle protocol version 4 (introduced in Python 3.4) it is possible
+ to easily pickle enums nested in other classes.
+
+
+
+ Functional API
+ --------------
+
+ The ``Enum`` class is callable, providing the following functional API::
+
+ >>> Animal = Enum('Animal', 'ant bee cat dog')
+ >>> Animal
+ <enum 'Animal'>
+ >>> Animal.ant
+ <Animal.ant: 1>
+ >>> Animal.ant.value
+ 1
+ >>> list(Animal)
+ [<Animal.ant: 1>, <Animal.bee: 2>, <Animal.cat: 3>, <Animal.dog: 4>]
+
+ The semantics of this API resemble ``namedtuple``. The first argument
+ of the call to ``Enum`` is the name of the enumeration.
+
+ The second argument is the *source* of enumeration member names. It can be a
+ whitespace-separated string of names, a sequence of names, a sequence of
+ 2-tuples with key/value pairs, or a mapping (e.g. dictionary) of names to
+ values. The last two options enable assigning arbitrary values to
+ enumerations; the others auto-assign increasing integers starting with 1. A
+ new class derived from ``Enum`` is returned. In other words, the above
+ assignment to ``Animal`` is equivalent to::
+
+ >>> class Animals(Enum):
+ ... ant = 1
+ ... bee = 2
+ ... cat = 3
+ ... dog = 4
+
+ Pickling enums created with the functional API can be tricky as frame stack
+ implementation details are used to try and figure out which module the
+ enumeration is being created in (e.g. it will fail if you use a utility
+ function in separate module, and also may not work on IronPython or Jython).
+ The solution is to specify the module name explicitly as follows::
+
+ >>> Animals = Enum('Animals', 'ant bee cat dog', module=__name__)
+
+ Derived Enumerations
+ --------------------
+
+ IntEnum
+ ^^^^^^^
+
+ A variation of ``Enum`` is provided which is also a subclass of
+ ``int``. Members of an ``IntEnum`` can be compared to integers;
+ by extension, integer enumerations of different types can also be compared
+ to each other::
+
+ >>> from enum import IntEnum
+ >>> class Shape(IntEnum):
+ ... circle = 1
+ ... square = 2
+ ...
+ >>> class Request(IntEnum):
+ ... post = 1
+ ... get = 2
+ ...
+ >>> Shape == 1
+ False
+ >>> Shape.circle == 1
+ True
+ >>> Shape.circle == Request.post
+ True
+
+ However, they still can't be compared to standard ``Enum`` enumerations::
+
+ >>> class Shape(IntEnum):
+ ... circle = 1
+ ... square = 2
+ ...
+ >>> class Color(Enum):
+ ... red = 1
+ ... green = 2
+ ...
+ >>> Shape.circle == Color.red
+ False
+
+ ``IntEnum`` values behave like integers in other ways you'd expect::
+
+ >>> int(Shape.circle)
+ 1
+ >>> ['a', 'b', 'c'][Shape.circle]
+ 'b'
+ >>> [i for i in range(Shape.square)]
+ [0, 1]
+
+ For the vast majority of code, ``Enum`` is strongly recommended,
+ since ``IntEnum`` breaks some semantic promises of an enumeration (by
+ being comparable to integers, and thus by transitivity to other
+ unrelated enumerations). It should be used only in special cases where
+ there's no other choice; for example, when integer constants are
+ replaced with enumerations and backwards compatibility is required with code
+ that still expects integers.
+
+
+ Others
+ ^^^^^^
+
+ While ``IntEnum`` is part of the ``enum`` module, it would be very
+ simple to implement independently::
+
+ class IntEnum(int, Enum):
+ pass
+
+ This demonstrates how similar derived enumerations can be defined; for example
+ a ``StrEnum`` that mixes in ``str`` instead of ``int``.
+
+ Some rules:
+
+ 1. When subclassing ``Enum``, mix-in types must appear before
+ ``Enum`` itself in the sequence of bases, as in the ``IntEnum``
+ example above.
+ 2. While ``Enum`` can have members of any type, once you mix in an
+ additional type, all the members must have values of that type, e.g.
+ ``int`` above. This restriction does not apply to mix-ins which only
+ add methods and don't specify another data type such as ``int`` or
+ ``str``.
+ 3. When another data type is mixed in, the ``value`` attribute is *not the
+ same* as the enum member itself, although it is equivalant and will compare
+ equal.
+ 4. %-style formatting: ``%s`` and ``%r`` call ``Enum``'s ``__str__`` and
+ ``__repr__`` respectively; other codes (such as ``%i`` or ``%h`` for
+ IntEnum) treat the enum member as its mixed-in type.
+
+ Note: Prior to Python 3.4 there is a bug in ``str``'s %-formatting: ``int``
+ subclasses are printed as strings and not numbers when the ``%d``, ``%i``,
+ or ``%u`` codes are used.
+ 5. ``str.__format__`` (or ``format``) will use the mixed-in
+ type's ``__format__``. If the ``Enum``'s ``str`` or
+ ``repr`` is desired use the ``!s`` or ``!r`` ``str`` format codes.
+
+
+ Decorators
+ ----------
+
+ unique
+ ^^^^^^
+
+ A ``class`` decorator specifically for enumerations. It searches an
+ enumeration's ``__members__`` gathering any aliases it finds; if any are
+ found ``ValueError`` is raised with the details::
+
+ >>> @unique
+ ... class NoDupes(Enum):
+ ... first = 'one'
+ ... second = 'two'
+ ... third = 'two'
+ Traceback (most recent call last):
+ ...
+ ValueError: duplicate names found in <enum 'NoDupes'>: third -> second
+
+
+ Interesting examples
+ --------------------
+
+ While ``Enum`` and ``IntEnum`` are expected to cover the majority of
+ use-cases, they cannot cover them all. Here are recipes for some different
+ types of enumerations that can be used directly, or as examples for creating
+ one's own.
+
+
+ AutoNumber
+ ^^^^^^^^^^
+
+ Avoids having to specify the value for each enumeration member::
+
+ >>> class AutoNumber(Enum):
+ ... def __new__(cls):
+ ... value = len(cls.__members__) + 1
+ ... obj = object.__new__(cls)
+ ... obj._value_ = value
+ ... return obj
+ ...
+ >>> class Color(AutoNumber):
+ ... __order__ = "red green blue" # only needed in 2.x
+ ... red = ()
+ ... green = ()
+ ... blue = ()
+ ...
+ >>> Color.green.value == 2
+ True
+
+ Note:
+
+ The `__new__` method, if defined, is used during creation of the Enum
+ members; it is then replaced by Enum's `__new__` which is used after
+ class creation for lookup of existing members. Due to the way Enums are
+ supposed to behave, there is no way to customize Enum's `__new__`.
+
+
+ UniqueEnum
+ ^^^^^^^^^^
+
+ Raises an error if a duplicate member name is found instead of creating an
+ alias::
+
+ >>> class UniqueEnum(Enum):
+ ... def __init__(self, *args):
+ ... cls = self.__class__
+ ... if any(self.value == e.value for e in cls):
+ ... a = self.name
+ ... e = cls(self.value).name
+ ... raise ValueError(
+ ... "aliases not allowed in UniqueEnum: %r --> %r"
+ ... % (a, e))
+ ...
+ >>> class Color(UniqueEnum):
+ ... red = 1
+ ... green = 2
+ ... blue = 3
+ ... grene = 2
+ Traceback (most recent call last):
+ ...
+ ValueError: aliases not allowed in UniqueEnum: 'grene' --> 'green'
+
+
+ OrderedEnum
+ ^^^^^^^^^^^
+
+ An ordered enumeration that is not based on ``IntEnum`` and so maintains
+ the normal ``Enum`` invariants (such as not being comparable to other
+ enumerations)::
+
+ >>> class OrderedEnum(Enum):
+ ... def __ge__(self, other):
+ ... if self.__class__ is other.__class__:
+ ... return self._value_ >= other._value_
+ ... return NotImplemented
+ ... def __gt__(self, other):
+ ... if self.__class__ is other.__class__:
+ ... return self._value_ > other._value_
+ ... return NotImplemented
+ ... def __le__(self, other):
+ ... if self.__class__ is other.__class__:
+ ... return self._value_ <= other._value_
+ ... return NotImplemented
+ ... def __lt__(self, other):
+ ... if self.__class__ is other.__class__:
+ ... return self._value_ < other._value_
+ ... return NotImplemented
+ ...
+ >>> class Grade(OrderedEnum):
+ ... __ordered__ = 'A B C D F'
+ ... A = 5
+ ... B = 4
+ ... C = 3
+ ... D = 2
+ ... F = 1
+ ...
+ >>> Grade.C < Grade.A
+ True
+
+
+ Planet
+ ^^^^^^
+
+ If ``__new__`` or ``__init__`` is defined the value of the enum member
+ will be passed to those methods::
+
+ >>> class Planet(Enum):
+ ... MERCURY = (3.303e+23, 2.4397e6)
+ ... VENUS = (4.869e+24, 6.0518e6)
+ ... EARTH = (5.976e+24, 6.37814e6)
+ ... MARS = (6.421e+23, 3.3972e6)
+ ... JUPITER = (1.9e+27, 7.1492e7)
+ ... SATURN = (5.688e+26, 6.0268e7)
+ ... URANUS = (8.686e+25, 2.5559e7)
+ ... NEPTUNE = (1.024e+26, 2.4746e7)
+ ... def __init__(self, mass, radius):
+ ... self.mass = mass # in kilograms
+ ... self.radius = radius # in meters
+ ... @property
+ ... def surface_gravity(self):
+ ... # universal gravitational constant (m3 kg-1 s-2)
+ ... G = 6.67300E-11
+ ... return G * self.mass / (self.radius * self.radius)
+ ...
+ >>> Planet.EARTH.value
+ (5.976e+24, 6378140.0)
+ >>> Planet.EARTH.surface_gravity
+ 9.802652743337129
+
+
+ How are Enums different?
+ ------------------------
+
+ Enums have a custom metaclass that affects many aspects of both derived Enum
+ classes and their instances (members).
+
+
+ Enum Classes
+ ^^^^^^^^^^^^
+
+ The ``EnumMeta`` metaclass is responsible for providing the
+ ``__contains__``, ``__dir__``, ``__iter__`` and other methods that
+ allow one to do things with an ``Enum`` class that fail on a typical
+ class, such as ``list(Color)`` or ``some_var in Color``. ``EnumMeta`` is
+ responsible for ensuring that various other methods on the final ``Enum``
+ class are correct (such as ``__new__``, ``__getnewargs__``,
+ ``__str__`` and ``__repr__``)
+
+
+ Enum Members (aka instances)
+ ^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+ The most interesting thing about Enum members is that they are singletons.
+ ``EnumMeta`` creates them all while it is creating the ``Enum``
+ class itself, and then puts a custom ``__new__`` in place to ensure
+ that no new ones are ever instantiated by returning only the existing
+ member instances.
+
+
+ Finer Points
+ ^^^^^^^^^^^^
+
+ Enum members are instances of an Enum class, and even though they are
+ accessible as ``EnumClass.member``, they are not accessible directly from
+ the member::
+
+ >>> Color.red
+ <Color.red: 1>
+ >>> Color.red.blue
+ Traceback (most recent call last):
+ ...
+ AttributeError: 'Color' object has no attribute 'blue'
+
+ Likewise, ``__members__`` is only available on the class.
+
+ In Python 3.x ``__members__`` is always an ``OrderedDict``, with the order being
+ the definition order. In Python 2.7 ``__members__`` is an ``OrderedDict`` if
+ ``__order__`` was specified, and a plain ``dict`` otherwise. In all other Python
+ 2.x versions ``__members__`` is a plain ``dict`` even if ``__order__`` was specified
+ as the ``OrderedDict`` type didn't exist yet.
+
+ If you give your ``Enum`` subclass extra methods, like the `Planet`_
+ class above, those methods will show up in a `dir` of the member,
+ but not of the class::
+
+ >>> dir(Planet)
+ ['EARTH', 'JUPITER', 'MARS', 'MERCURY', 'NEPTUNE', 'SATURN', 'URANUS',
+ 'VENUS', '__class__', '__doc__', '__members__', '__module__']
+ >>> dir(Planet.EARTH)
+ ['__class__', '__doc__', '__module__', 'name', 'surface_gravity', 'value']
+
+ A ``__new__`` method will only be used for the creation of the
+ ``Enum`` members -- after that it is replaced. This means if you wish to
+ change how ``Enum`` members are looked up you either have to write a
+ helper function or a ``classmethod``.
+
+Platform: UNKNOWN
+Classifier: Development Status :: 5 - Production/Stable
+Classifier: Intended Audience :: Developers
+Classifier: License :: OSI Approved :: BSD License
+Classifier: Programming Language :: Python
+Classifier: Topic :: Software Development
+Classifier: Programming Language :: Python :: 2.4
+Classifier: Programming Language :: Python :: 2.5
+Classifier: Programming Language :: Python :: 2.6
+Classifier: Programming Language :: Python :: 2.7
+Classifier: Programming Language :: Python :: 3
+Provides: enum
diff --git a/scripts/automation/trex_control_plane/python_lib/enum34-1.0.4/enum/LICENSE b/scripts/automation/trex_control_plane/python_lib/enum34-1.0.4/enum/LICENSE
new file mode 100755
index 00000000..9003b885
--- /dev/null
+++ b/scripts/automation/trex_control_plane/python_lib/enum34-1.0.4/enum/LICENSE
@@ -0,0 +1,32 @@
+Copyright (c) 2013, Ethan Furman.
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions
+are met:
+
+ Redistributions of source code must retain the above
+ copyright notice, this list of conditions and the
+ following disclaimer.
+
+ Redistributions in binary form must reproduce the above
+ copyright notice, this list of conditions and the following
+ disclaimer in the documentation and/or other materials
+ provided with the distribution.
+
+ Neither the name Ethan Furman nor the names of any
+ contributors may be used to endorse or promote products
+ derived from this software without specific prior written
+ permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
diff --git a/scripts/automation/trex_control_plane/python_lib/enum34-1.0.4/enum/README b/scripts/automation/trex_control_plane/python_lib/enum34-1.0.4/enum/README
new file mode 100755
index 00000000..511af984
--- /dev/null
+++ b/scripts/automation/trex_control_plane/python_lib/enum34-1.0.4/enum/README
@@ -0,0 +1,2 @@
+enum34 is the new Python stdlib enum module available in Python 3.4
+backported for previous versions of Python from 2.4 to 3.3.
diff --git a/scripts/automation/trex_control_plane/python_lib/enum34-1.0.4/enum/__init__.py b/scripts/automation/trex_control_plane/python_lib/enum34-1.0.4/enum/__init__.py
new file mode 100755
index 00000000..6a327a8a
--- /dev/null
+++ b/scripts/automation/trex_control_plane/python_lib/enum34-1.0.4/enum/__init__.py
@@ -0,0 +1,790 @@
+"""Python Enumerations"""
+
+import sys as _sys
+
+__all__ = ['Enum', 'IntEnum', 'unique']
+
+version = 1, 0, 4
+
+pyver = float('%s.%s' % _sys.version_info[:2])
+
+try:
+ any
+except NameError:
+ def any(iterable):
+ for element in iterable:
+ if element:
+ return True
+ return False
+
+try:
+ from collections import OrderedDict
+except ImportError:
+ OrderedDict = None
+
+try:
+ basestring
+except NameError:
+ # In Python 2 basestring is the ancestor of both str and unicode
+ # in Python 3 it's just str, but was missing in 3.1
+ basestring = str
+
+try:
+ unicode
+except NameError:
+ # In Python 3 unicode no longer exists (it's just str)
+ unicode = str
+
+class _RouteClassAttributeToGetattr(object):
+ """Route attribute access on a class to __getattr__.
+
+ This is a descriptor, used to define attributes that act differently when
+ accessed through an instance and through a class. Instance access remains
+ normal, but access to an attribute through a class will be routed to the
+ class's __getattr__ method; this is done by raising AttributeError.
+
+ """
+ def __init__(self, fget=None):
+ self.fget = fget
+
+ def __get__(self, instance, ownerclass=None):
+ if instance is None:
+ raise AttributeError()
+ return self.fget(instance)
+
+ def __set__(self, instance, value):
+ raise AttributeError("can't set attribute")
+
+ def __delete__(self, instance):
+ raise AttributeError("can't delete attribute")
+
+
+def _is_descriptor(obj):
+ """Returns True if obj is a descriptor, False otherwise."""
+ return (
+ hasattr(obj, '__get__') or
+ hasattr(obj, '__set__') or
+ hasattr(obj, '__delete__'))
+
+
+def _is_dunder(name):
+ """Returns True if a __dunder__ name, False otherwise."""
+ return (name[:2] == name[-2:] == '__' and
+ name[2:3] != '_' and
+ name[-3:-2] != '_' and
+ len(name) > 4)
+
+
+def _is_sunder(name):
+ """Returns True if a _sunder_ name, False otherwise."""
+ return (name[0] == name[-1] == '_' and
+ name[1:2] != '_' and
+ name[-2:-1] != '_' and
+ len(name) > 2)
+
+
+def _make_class_unpicklable(cls):
+ """Make the given class un-picklable."""
+ def _break_on_call_reduce(self, protocol=None):
+ raise TypeError('%r cannot be pickled' % self)
+ cls.__reduce_ex__ = _break_on_call_reduce
+ cls.__module__ = '<unknown>'
+
+
+class _EnumDict(dict):
+ """Track enum member order and ensure member names are not reused.
+
+ EnumMeta will use the names found in self._member_names as the
+ enumeration member names.
+
+ """
+ def __init__(self):
+ super(_EnumDict, self).__init__()
+ self._member_names = []
+
+ def __setitem__(self, key, value):
+ """Changes anything not dundered or not a descriptor.
+
+ If a descriptor is added with the same name as an enum member, the name
+ is removed from _member_names (this may leave a hole in the numerical
+ sequence of values).
+
+ If an enum member name is used twice, an error is raised; duplicate
+ values are not checked for.
+
+ Single underscore (sunder) names are reserved.
+
+ Note: in 3.x __order__ is simply discarded as a not necessary piece
+ leftover from 2.x
+
+ """
+ if pyver >= 3.0 and key == '__order__':
+ return
+ if _is_sunder(key):
+ raise ValueError('_names_ are reserved for future Enum use')
+ elif _is_dunder(key):
+ pass
+ elif key in self._member_names:
+ # descriptor overwriting an enum?
+ raise TypeError('Attempted to reuse key: %r' % key)
+ elif not _is_descriptor(value):
+ if key in self:
+ # enum overwriting a descriptor?
+ raise TypeError('Key already defined as: %r' % self[key])
+ self._member_names.append(key)
+ super(_EnumDict, self).__setitem__(key, value)
+
+
+# Dummy value for Enum as EnumMeta explicity checks for it, but of course until
+# EnumMeta finishes running the first time the Enum class doesn't exist. This
+# is also why there are checks in EnumMeta like `if Enum is not None`
+Enum = None
+
+
+class EnumMeta(type):
+ """Metaclass for Enum"""
+ @classmethod
+ def __prepare__(metacls, cls, bases):
+ return _EnumDict()
+
+ def __new__(metacls, cls, bases, classdict):
+ # an Enum class is final once enumeration items have been defined; it
+ # cannot be mixed with other types (int, float, etc.) if it has an
+ # inherited __new__ unless a new __new__ is defined (or the resulting
+ # class will fail).
+ if type(classdict) is dict:
+ original_dict = classdict
+ classdict = _EnumDict()
+ for k, v in original_dict.items():
+ classdict[k] = v
+
+ member_type, first_enum = metacls._get_mixins_(bases)
+ __new__, save_new, use_args = metacls._find_new_(classdict, member_type,
+ first_enum)
+ # save enum items into separate mapping so they don't get baked into
+ # the new class
+ members = dict((k, classdict[k]) for k in classdict._member_names)
+ for name in classdict._member_names:
+ del classdict[name]
+
+ # py2 support for definition order
+ __order__ = classdict.get('__order__')
+ if __order__ is None:
+ if pyver < 3.0:
+ try:
+ __order__ = [name for (name, value) in sorted(members.items(), key=lambda item: item[1])]
+ except TypeError:
+ __order__ = [name for name in sorted(members.keys())]
+ else:
+ __order__ = classdict._member_names
+ else:
+ del classdict['__order__']
+ if pyver < 3.0:
+ __order__ = __order__.replace(',', ' ').split()
+ aliases = [name for name in members if name not in __order__]
+ __order__ += aliases
+
+ # check for illegal enum names (any others?)
+ invalid_names = set(members) & set(['mro'])
+ if invalid_names:
+ raise ValueError('Invalid enum member name(s): %s' % (
+ ', '.join(invalid_names), ))
+
+ # create our new Enum type
+ enum_class = super(EnumMeta, metacls).__new__(metacls, cls, bases, classdict)
+ enum_class._member_names_ = [] # names in random order
+ if OrderedDict is not None:
+ enum_class._member_map_ = OrderedDict()
+ else:
+ enum_class._member_map_ = {} # name->value map
+ enum_class._member_type_ = member_type
+
+ # Reverse value->name map for hashable values.
+ enum_class._value2member_map_ = {}
+
+ # instantiate them, checking for duplicates as we go
+ # we instantiate first instead of checking for duplicates first in case
+ # a custom __new__ is doing something funky with the values -- such as
+ # auto-numbering ;)
+ if __new__ is None:
+ __new__ = enum_class.__new__
+ for member_name in __order__:
+ value = members[member_name]
+ if not isinstance(value, tuple):
+ args = (value, )
+ else:
+ args = value
+ if member_type is tuple: # special case for tuple enums
+ args = (args, ) # wrap it one more time
+ if not use_args or not args:
+ enum_member = __new__(enum_class)
+ if not hasattr(enum_member, '_value_'):
+ enum_member._value_ = value
+ else:
+ enum_member = __new__(enum_class, *args)
+ if not hasattr(enum_member, '_value_'):
+ enum_member._value_ = member_type(*args)
+ value = enum_member._value_
+ enum_member._name_ = member_name
+ enum_member.__objclass__ = enum_class
+ enum_member.__init__(*args)
+ # If another member with the same value was already defined, the
+ # new member becomes an alias to the existing one.
+ for name, canonical_member in enum_class._member_map_.items():
+ if canonical_member.value == enum_member._value_:
+ enum_member = canonical_member
+ break
+ else:
+ # Aliases don't appear in member names (only in __members__).
+ enum_class._member_names_.append(member_name)
+ enum_class._member_map_[member_name] = enum_member
+ try:
+ # This may fail if value is not hashable. We can't add the value
+ # to the map, and by-value lookups for this value will be
+ # linear.
+ enum_class._value2member_map_[value] = enum_member
+ except TypeError:
+ pass
+
+
+ # If a custom type is mixed into the Enum, and it does not know how
+ # to pickle itself, pickle.dumps will succeed but pickle.loads will
+ # fail. Rather than have the error show up later and possibly far
+ # from the source, sabotage the pickle protocol for this class so
+ # that pickle.dumps also fails.
+ #
+ # However, if the new class implements its own __reduce_ex__, do not
+ # sabotage -- it's on them to make sure it works correctly. We use
+ # __reduce_ex__ instead of any of the others as it is preferred by
+ # pickle over __reduce__, and it handles all pickle protocols.
+ unpicklable = False
+ if '__reduce_ex__' not in classdict:
+ if member_type is not object:
+ methods = ('__getnewargs_ex__', '__getnewargs__',
+ '__reduce_ex__', '__reduce__')
+ if not any(m in member_type.__dict__ for m in methods):
+ _make_class_unpicklable(enum_class)
+ unpicklable = True
+
+
+ # double check that repr and friends are not the mixin's or various
+ # things break (such as pickle)
+ for name in ('__repr__', '__str__', '__format__', '__reduce_ex__'):
+ class_method = getattr(enum_class, name)
+ obj_method = getattr(member_type, name, None)
+ enum_method = getattr(first_enum, name, None)
+ if name not in classdict and class_method is not enum_method:
+ if name == '__reduce_ex__' and unpicklable:
+ continue
+ setattr(enum_class, name, enum_method)
+
+ # method resolution and int's are not playing nice
+ # Python's less than 2.6 use __cmp__
+
+ if pyver < 2.6:
+
+ if issubclass(enum_class, int):
+ setattr(enum_class, '__cmp__', getattr(int, '__cmp__'))
+
+ elif pyver < 3.0:
+
+ if issubclass(enum_class, int):
+ for method in (
+ '__le__',
+ '__lt__',
+ '__gt__',
+ '__ge__',
+ '__eq__',
+ '__ne__',
+ '__hash__',
+ ):
+ setattr(enum_class, method, getattr(int, method))
+
+ # replace any other __new__ with our own (as long as Enum is not None,
+ # anyway) -- again, this is to support pickle
+ if Enum is not None:
+ # if the user defined their own __new__, save it before it gets
+ # clobbered in case they subclass later
+ if save_new:
+ setattr(enum_class, '__member_new__', enum_class.__dict__['__new__'])
+ setattr(enum_class, '__new__', Enum.__dict__['__new__'])
+ return enum_class
+
+ def __call__(cls, value, names=None, module=None, type=None):
+ """Either returns an existing member, or creates a new enum class.
+
+ This method is used both when an enum class is given a value to match
+ to an enumeration member (i.e. Color(3)) and for the functional API
+ (i.e. Color = Enum('Color', names='red green blue')).
+
+ When used for the functional API: `module`, if set, will be stored in
+ the new class' __module__ attribute; `type`, if set, will be mixed in
+ as the first base class.
+
+ Note: if `module` is not set this routine will attempt to discover the
+ calling module by walking the frame stack; if this is unsuccessful
+ the resulting class will not be pickleable.
+
+ """
+ if names is None: # simple value lookup
+ return cls.__new__(cls, value)
+ # otherwise, functional API: we're creating a new Enum type
+ return cls._create_(value, names, module=module, type=type)
+
+ def __contains__(cls, member):
+ return isinstance(member, cls) and member.name in cls._member_map_
+
+ def __delattr__(cls, attr):
+ # nicer error message when someone tries to delete an attribute
+ # (see issue19025).
+ if attr in cls._member_map_:
+ raise AttributeError(
+ "%s: cannot delete Enum member." % cls.__name__)
+ super(EnumMeta, cls).__delattr__(attr)
+
+ def __dir__(self):
+ return (['__class__', '__doc__', '__members__', '__module__'] +
+ self._member_names_)
+
+ @property
+ def __members__(cls):
+ """Returns a mapping of member name->value.
+
+ This mapping lists all enum members, including aliases. Note that this
+ is a copy of the internal mapping.
+
+ """
+ return cls._member_map_.copy()
+
+ def __getattr__(cls, name):
+ """Return the enum member matching `name`
+
+ We use __getattr__ instead of descriptors or inserting into the enum
+ class' __dict__ in order to support `name` and `value` being both
+ properties for enum members (which live in the class' __dict__) and
+ enum members themselves.
+
+ """
+ if _is_dunder(name):
+ raise AttributeError(name)
+ try:
+ return cls._member_map_[name]
+ except KeyError:
+ raise AttributeError(name)
+
+ def __getitem__(cls, name):
+ return cls._member_map_[name]
+
+ def __iter__(cls):
+ return (cls._member_map_[name] for name in cls._member_names_)
+
+ def __reversed__(cls):
+ return (cls._member_map_[name] for name in reversed(cls._member_names_))
+
+ def __len__(cls):
+ return len(cls._member_names_)
+
+ def __repr__(cls):
+ return "<enum %r>" % cls.__name__
+
+ def __setattr__(cls, name, value):
+ """Block attempts to reassign Enum members.
+
+ A simple assignment to the class namespace only changes one of the
+ several possible ways to get an Enum member from the Enum class,
+ resulting in an inconsistent Enumeration.
+
+ """
+ member_map = cls.__dict__.get('_member_map_', {})
+ if name in member_map:
+ raise AttributeError('Cannot reassign members.')
+ super(EnumMeta, cls).__setattr__(name, value)
+
+ def _create_(cls, class_name, names=None, module=None, type=None):
+ """Convenience method to create a new Enum class.
+
+ `names` can be:
+
+ * A string containing member names, separated either with spaces or
+ commas. Values are auto-numbered from 1.
+ * An iterable of member names. Values are auto-numbered from 1.
+ * An iterable of (member name, value) pairs.
+ * A mapping of member name -> value.
+
+ """
+ if pyver < 3.0:
+ # if class_name is unicode, attempt a conversion to ASCII
+ if isinstance(class_name, unicode):
+ try:
+ class_name = class_name.encode('ascii')
+ except UnicodeEncodeError:
+ raise TypeError('%r is not representable in ASCII' % class_name)
+ metacls = cls.__class__
+ if type is None:
+ bases = (cls, )
+ else:
+ bases = (type, cls)
+ classdict = metacls.__prepare__(class_name, bases)
+ __order__ = []
+
+ # special processing needed for names?
+ if isinstance(names, basestring):
+ names = names.replace(',', ' ').split()
+ if isinstance(names, (tuple, list)) and isinstance(names[0], basestring):
+ names = [(e, i+1) for (i, e) in enumerate(names)]
+
+ # Here, names is either an iterable of (name, value) or a mapping.
+ for item in names:
+ if isinstance(item, basestring):
+ member_name, member_value = item, names[item]
+ else:
+ member_name, member_value = item
+ classdict[member_name] = member_value
+ __order__.append(member_name)
+ # only set __order__ in classdict if name/value was not from a mapping
+ if not isinstance(item, basestring):
+ classdict['__order__'] = ' '.join(__order__)
+ enum_class = metacls.__new__(metacls, class_name, bases, classdict)
+
+ # TODO: replace the frame hack if a blessed way to know the calling
+ # module is ever developed
+ if module is None:
+ try:
+ module = _sys._getframe(2).f_globals['__name__']
+ except (AttributeError, ValueError):
+ pass
+ if module is None:
+ _make_class_unpicklable(enum_class)
+ else:
+ enum_class.__module__ = module
+
+ return enum_class
+
+ @staticmethod
+ def _get_mixins_(bases):
+ """Returns the type for creating enum members, and the first inherited
+ enum class.
+
+ bases: the tuple of bases that was given to __new__
+
+ """
+ if not bases or Enum is None:
+ return object, Enum
+
+
+ # double check that we are not subclassing a class with existing
+ # enumeration members; while we're at it, see if any other data
+ # type has been mixed in so we can use the correct __new__
+ member_type = first_enum = None
+ for base in bases:
+ if (base is not Enum and
+ issubclass(base, Enum) and
+ base._member_names_):
+ raise TypeError("Cannot extend enumerations")
+ # base is now the last base in bases
+ if not issubclass(base, Enum):
+ raise TypeError("new enumerations must be created as "
+ "`ClassName([mixin_type,] enum_type)`")
+
+ # get correct mix-in type (either mix-in type of Enum subclass, or
+ # first base if last base is Enum)
+ if not issubclass(bases[0], Enum):
+ member_type = bases[0] # first data type
+ first_enum = bases[-1] # enum type
+ else:
+ for base in bases[0].__mro__:
+ # most common: (IntEnum, int, Enum, object)
+ # possible: (<Enum 'AutoIntEnum'>, <Enum 'IntEnum'>,
+ # <class 'int'>, <Enum 'Enum'>,
+ # <class 'object'>)
+ if issubclass(base, Enum):
+ if first_enum is None:
+ first_enum = base
+ else:
+ if member_type is None:
+ member_type = base
+
+ return member_type, first_enum
+
+ if pyver < 3.0:
+ @staticmethod
+ def _find_new_(classdict, member_type, first_enum):
+ """Returns the __new__ to be used for creating the enum members.
+
+ classdict: the class dictionary given to __new__
+ member_type: the data type whose __new__ will be used by default
+ first_enum: enumeration to check for an overriding __new__
+
+ """
+ # now find the correct __new__, checking to see of one was defined
+ # by the user; also check earlier enum classes in case a __new__ was
+ # saved as __member_new__
+ __new__ = classdict.get('__new__', None)
+ if __new__:
+ return None, True, True # __new__, save_new, use_args
+
+ N__new__ = getattr(None, '__new__')
+ O__new__ = getattr(object, '__new__')
+ if Enum is None:
+ E__new__ = N__new__
+ else:
+ E__new__ = Enum.__dict__['__new__']
+ # check all possibles for __member_new__ before falling back to
+ # __new__
+ for method in ('__member_new__', '__new__'):
+ for possible in (member_type, first_enum):
+ try:
+ target = possible.__dict__[method]
+ except (AttributeError, KeyError):
+ target = getattr(possible, method, None)
+ if target not in [
+ None,
+ N__new__,
+ O__new__,
+ E__new__,
+ ]:
+ if method == '__member_new__':
+ classdict['__new__'] = target
+ return None, False, True
+ if isinstance(target, staticmethod):
+ target = target.__get__(member_type)
+ __new__ = target
+ break
+ if __new__ is not None:
+ break
+ else:
+ __new__ = object.__new__
+
+ # if a non-object.__new__ is used then whatever value/tuple was
+ # assigned to the enum member name will be passed to __new__ and to the
+ # new enum member's __init__
+ if __new__ is object.__new__:
+ use_args = False
+ else:
+ use_args = True
+
+ return __new__, False, use_args
+ else:
+ @staticmethod
+ def _find_new_(classdict, member_type, first_enum):
+ """Returns the __new__ to be used for creating the enum members.
+
+ classdict: the class dictionary given to __new__
+ member_type: the data type whose __new__ will be used by default
+ first_enum: enumeration to check for an overriding __new__
+
+ """
+ # now find the correct __new__, checking to see of one was defined
+ # by the user; also check earlier enum classes in case a __new__ was
+ # saved as __member_new__
+ __new__ = classdict.get('__new__', None)
+
+ # should __new__ be saved as __member_new__ later?
+ save_new = __new__ is not None
+
+ if __new__ is None:
+ # check all possibles for __member_new__ before falling back to
+ # __new__
+ for method in ('__member_new__', '__new__'):
+ for possible in (member_type, first_enum):
+ target = getattr(possible, method, None)
+ if target not in (
+ None,
+ None.__new__,
+ object.__new__,
+ Enum.__new__,
+ ):
+ __new__ = target
+ break
+ if __new__ is not None:
+ break
+ else:
+ __new__ = object.__new__
+
+ # if a non-object.__new__ is used then whatever value/tuple was
+ # assigned to the enum member name will be passed to __new__ and to the
+ # new enum member's __init__
+ if __new__ is object.__new__:
+ use_args = False
+ else:
+ use_args = True
+
+ return __new__, save_new, use_args
+
+
+########################################################
+# In order to support Python 2 and 3 with a single
+# codebase we have to create the Enum methods separately
+# and then use the `type(name, bases, dict)` method to
+# create the class.
+########################################################
+temp_enum_dict = {}
+temp_enum_dict['__doc__'] = "Generic enumeration.\n\n Derive from this class to define new enumerations.\n\n"
+
+def __new__(cls, value):
+ # all enum instances are actually created during class construction
+ # without calling this method; this method is called by the metaclass'
+ # __call__ (i.e. Color(3) ), and by pickle
+ if type(value) is cls:
+ # For lookups like Color(Color.red)
+ value = value.value
+ #return value
+ # by-value search for a matching enum member
+ # see if it's in the reverse mapping (for hashable values)
+ try:
+ if value in cls._value2member_map_:
+ return cls._value2member_map_[value]
+ except TypeError:
+ # not there, now do long search -- O(n) behavior
+ for member in cls._member_map_.values():
+ if member.value == value:
+ return member
+ raise ValueError("%s is not a valid %s" % (value, cls.__name__))
+temp_enum_dict['__new__'] = __new__
+del __new__
+
+def __repr__(self):
+ return "<%s.%s: %r>" % (
+ self.__class__.__name__, self._name_, self._value_)
+temp_enum_dict['__repr__'] = __repr__
+del __repr__
+
+def __str__(self):
+ return "%s.%s" % (self.__class__.__name__, self._name_)
+temp_enum_dict['__str__'] = __str__
+del __str__
+
+def __dir__(self):
+ added_behavior = [
+ m
+ for cls in self.__class__.mro()
+ for m in cls.__dict__
+ if m[0] != '_'
+ ]
+ return (['__class__', '__doc__', '__module__', ] + added_behavior)
+temp_enum_dict['__dir__'] = __dir__
+del __dir__
+
+def __format__(self, format_spec):
+ # mixed-in Enums should use the mixed-in type's __format__, otherwise
+ # we can get strange results with the Enum name showing up instead of
+ # the value
+
+ # pure Enum branch
+ if self._member_type_ is object:
+ cls = str
+ val = str(self)
+ # mix-in branch
+ else:
+ cls = self._member_type_
+ val = self.value
+ return cls.__format__(val, format_spec)
+temp_enum_dict['__format__'] = __format__
+del __format__
+
+
+####################################
+# Python's less than 2.6 use __cmp__
+
+if pyver < 2.6:
+
+ def __cmp__(self, other):
+ if type(other) is self.__class__:
+ if self is other:
+ return 0
+ return -1
+ return NotImplemented
+ raise TypeError("unorderable types: %s() and %s()" % (self.__class__.__name__, other.__class__.__name__))
+ temp_enum_dict['__cmp__'] = __cmp__
+ del __cmp__
+
+else:
+
+ def __le__(self, other):
+ raise TypeError("unorderable types: %s() <= %s()" % (self.__class__.__name__, other.__class__.__name__))
+ temp_enum_dict['__le__'] = __le__
+ del __le__
+
+ def __lt__(self, other):
+ raise TypeError("unorderable types: %s() < %s()" % (self.__class__.__name__, other.__class__.__name__))
+ temp_enum_dict['__lt__'] = __lt__
+ del __lt__
+
+ def __ge__(self, other):
+ raise TypeError("unorderable types: %s() >= %s()" % (self.__class__.__name__, other.__class__.__name__))
+ temp_enum_dict['__ge__'] = __ge__
+ del __ge__
+
+ def __gt__(self, other):
+ raise TypeError("unorderable types: %s() > %s()" % (self.__class__.__name__, other.__class__.__name__))
+ temp_enum_dict['__gt__'] = __gt__
+ del __gt__
+
+
+def __eq__(self, other):
+ if type(other) is self.__class__:
+ return self is other
+ return NotImplemented
+temp_enum_dict['__eq__'] = __eq__
+del __eq__
+
+def __ne__(self, other):
+ if type(other) is self.__class__:
+ return self is not other
+ return NotImplemented
+temp_enum_dict['__ne__'] = __ne__
+del __ne__
+
+def __hash__(self):
+ return hash(self._name_)
+temp_enum_dict['__hash__'] = __hash__
+del __hash__
+
+def __reduce_ex__(self, proto):
+ return self.__class__, (self._value_, )
+temp_enum_dict['__reduce_ex__'] = __reduce_ex__
+del __reduce_ex__
+
+# _RouteClassAttributeToGetattr is used to provide access to the `name`
+# and `value` properties of enum members while keeping some measure of
+# protection from modification, while still allowing for an enumeration
+# to have members named `name` and `value`. This works because enumeration
+# members are not set directly on the enum class -- __getattr__ is
+# used to look them up.
+
+@_RouteClassAttributeToGetattr
+def name(self):
+ return self._name_
+temp_enum_dict['name'] = name
+del name
+
+@_RouteClassAttributeToGetattr
+def value(self):
+ return self._value_
+temp_enum_dict['value'] = value
+del value
+
+Enum = EnumMeta('Enum', (object, ), temp_enum_dict)
+del temp_enum_dict
+
+# Enum has now been created
+###########################
+
+class IntEnum(int, Enum):
+ """Enum where members are also (and must be) ints"""
+
+
+def unique(enumeration):
+ """Class decorator that ensures only unique members exist in an enumeration."""
+ duplicates = []
+ for name, member in enumeration.__members__.items():
+ if name != member.name:
+ duplicates.append((name, member.name))
+ if duplicates:
+ duplicate_names = ', '.join(
+ ["%s -> %s" % (alias, name) for (alias, name) in duplicates]
+ )
+ raise ValueError('duplicate names found in %r: %s' %
+ (enumeration, duplicate_names)
+ )
+ return enumeration
diff --git a/scripts/automation/trex_control_plane/python_lib/enum34-1.0.4/enum/doc/enum.rst b/scripts/automation/trex_control_plane/python_lib/enum34-1.0.4/enum/doc/enum.rst
new file mode 100755
index 00000000..0d429bfc
--- /dev/null
+++ b/scripts/automation/trex_control_plane/python_lib/enum34-1.0.4/enum/doc/enum.rst
@@ -0,0 +1,725 @@
+``enum`` --- support for enumerations
+========================================
+
+.. :synopsis: enumerations are sets of symbolic names bound to unique, constant
+ values.
+.. :moduleauthor:: Ethan Furman <ethan@stoneleaf.us>
+.. :sectionauthor:: Barry Warsaw <barry@python.org>,
+.. :sectionauthor:: Eli Bendersky <eliben@gmail.com>,
+.. :sectionauthor:: Ethan Furman <ethan@stoneleaf.us>
+
+----------------
+
+An enumeration is a set of symbolic names (members) bound to unique, constant
+values. Within an enumeration, the members can be compared by identity, and
+the enumeration itself can be iterated over.
+
+
+Module Contents
+---------------
+
+This module defines two enumeration classes that can be used to define unique
+sets of names and values: ``Enum`` and ``IntEnum``. It also defines
+one decorator, ``unique``.
+
+``Enum``
+
+Base class for creating enumerated constants. See section `Functional API`_
+for an alternate construction syntax.
+
+``IntEnum``
+
+Base class for creating enumerated constants that are also subclasses of ``int``.
+
+``unique``
+
+Enum class decorator that ensures only one name is bound to any one value.
+
+
+Creating an Enum
+----------------
+
+Enumerations are created using the ``class`` syntax, which makes them
+easy to read and write. An alternative creation method is described in
+`Functional API`_. To define an enumeration, subclass ``Enum`` as
+follows::
+
+ >>> from enum import Enum
+ >>> class Color(Enum):
+ ... red = 1
+ ... green = 2
+ ... blue = 3
+
+Note: Nomenclature
+
+ - The class ``Color`` is an *enumeration* (or *enum*)
+ - The attributes ``Color.red``, ``Color.green``, etc., are
+ *enumeration members* (or *enum members*).
+ - The enum members have *names* and *values* (the name of
+ ``Color.red`` is ``red``, the value of ``Color.blue`` is
+ ``3``, etc.)
+
+Note:
+
+ Even though we use the ``class`` syntax to create Enums, Enums
+ are not normal Python classes. See `How are Enums different?`_ for
+ more details.
+
+Enumeration members have human readable string representations::
+
+ >>> print(Color.red)
+ Color.red
+
+...while their ``repr`` has more information::
+
+ >>> print(repr(Color.red))
+ <Color.red: 1>
+
+The *type* of an enumeration member is the enumeration it belongs to::
+
+ >>> type(Color.red)
+ <enum 'Color'>
+ >>> isinstance(Color.green, Color)
+ True
+ >>>
+
+Enum members also have a property that contains just their item name::
+
+ >>> print(Color.red.name)
+ red
+
+Enumerations support iteration. In Python 3.x definition order is used; in
+Python 2.x the definition order is not available, but class attribute
+``__order__`` is supported; otherwise, value order is used::
+
+ >>> class Shake(Enum):
+ ... __order__ = 'vanilla chocolate cookies mint' # only needed in 2.x
+ ... vanilla = 7
+ ... chocolate = 4
+ ... cookies = 9
+ ... mint = 3
+ ...
+ >>> for shake in Shake:
+ ... print(shake)
+ ...
+ Shake.vanilla
+ Shake.chocolate
+ Shake.cookies
+ Shake.mint
+
+The ``__order__`` attribute is always removed, and in 3.x it is also ignored
+(order is definition order); however, in the stdlib version it will be ignored
+but not removed.
+
+Enumeration members are hashable, so they can be used in dictionaries and sets::
+
+ >>> apples = {}
+ >>> apples[Color.red] = 'red delicious'
+ >>> apples[Color.green] = 'granny smith'
+ >>> apples == {Color.red: 'red delicious', Color.green: 'granny smith'}
+ True
+
+
+Programmatic access to enumeration members and their attributes
+---------------------------------------------------------------
+
+Sometimes it's useful to access members in enumerations programmatically (i.e.
+situations where ``Color.red`` won't do because the exact color is not known
+at program-writing time). ``Enum`` allows such access::
+
+ >>> Color(1)
+ <Color.red: 1>
+ >>> Color(3)
+ <Color.blue: 3>
+
+If you want to access enum members by *name*, use item access::
+
+ >>> Color['red']
+ <Color.red: 1>
+ >>> Color['green']
+ <Color.green: 2>
+
+If have an enum member and need its ``name`` or ``value``::
+
+ >>> member = Color.red
+ >>> member.name
+ 'red'
+ >>> member.value
+ 1
+
+
+Duplicating enum members and values
+-----------------------------------
+
+Having two enum members (or any other attribute) with the same name is invalid;
+in Python 3.x this would raise an error, but in Python 2.x the second member
+simply overwrites the first::
+
+ >>> # python 2.x
+ >>> class Shape(Enum):
+ ... square = 2
+ ... square = 3
+ ...
+ >>> Shape.square
+ <Shape.square: 3>
+
+ >>> # python 3.x
+ >>> class Shape(Enum):
+ ... square = 2
+ ... square = 3
+ Traceback (most recent call last):
+ ...
+ TypeError: Attempted to reuse key: 'square'
+
+However, two enum members are allowed to have the same value. Given two members
+A and B with the same value (and A defined first), B is an alias to A. By-value
+lookup of the value of A and B will return A. By-name lookup of B will also
+return A::
+
+ >>> class Shape(Enum):
+ ... __order__ = 'square diamond circle alias_for_square' # only needed in 2.x
+ ... square = 2
+ ... diamond = 1
+ ... circle = 3
+ ... alias_for_square = 2
+ ...
+ >>> Shape.square
+ <Shape.square: 2>
+ >>> Shape.alias_for_square
+ <Shape.square: 2>
+ >>> Shape(2)
+ <Shape.square: 2>
+
+
+Allowing aliases is not always desirable. ``unique`` can be used to ensure
+that none exist in a particular enumeration::
+
+ >>> from enum import unique
+ >>> @unique
+ ... class Mistake(Enum):
+ ... __order__ = 'one two three four' # only needed in 2.x
+ ... one = 1
+ ... two = 2
+ ... three = 3
+ ... four = 3
+ Traceback (most recent call last):
+ ...
+ ValueError: duplicate names found in <enum 'Mistake'>: four -> three
+
+Iterating over the members of an enum does not provide the aliases::
+
+ >>> list(Shape)
+ [<Shape.square: 2>, <Shape.diamond: 1>, <Shape.circle: 3>]
+
+The special attribute ``__members__`` is a dictionary mapping names to members.
+It includes all names defined in the enumeration, including the aliases::
+
+ >>> for name, member in sorted(Shape.__members__.items()):
+ ... name, member
+ ...
+ ('alias_for_square', <Shape.square: 2>)
+ ('circle', <Shape.circle: 3>)
+ ('diamond', <Shape.diamond: 1>)
+ ('square', <Shape.square: 2>)
+
+The ``__members__`` attribute can be used for detailed programmatic access to
+the enumeration members. For example, finding all the aliases::
+
+ >>> [name for name, member in Shape.__members__.items() if member.name != name]
+ ['alias_for_square']
+
+Comparisons
+-----------
+
+Enumeration members are compared by identity::
+
+ >>> Color.red is Color.red
+ True
+ >>> Color.red is Color.blue
+ False
+ >>> Color.red is not Color.blue
+ True
+
+Ordered comparisons between enumeration values are *not* supported. Enum
+members are not integers (but see `IntEnum`_ below)::
+
+ >>> Color.red < Color.blue
+ Traceback (most recent call last):
+ File "<stdin>", line 1, in <module>
+ TypeError: unorderable types: Color() < Color()
+
+.. warning::
+
+ In Python 2 *everything* is ordered, even though the ordering may not
+ make sense. If you want your enumerations to have a sensible ordering
+ check out the `OrderedEnum`_ recipe below.
+
+
+Equality comparisons are defined though::
+
+ >>> Color.blue == Color.red
+ False
+ >>> Color.blue != Color.red
+ True
+ >>> Color.blue == Color.blue
+ True
+
+Comparisons against non-enumeration values will always compare not equal
+(again, ``IntEnum`` was explicitly designed to behave differently, see
+below)::
+
+ >>> Color.blue == 2
+ False
+
+
+Allowed members and attributes of enumerations
+----------------------------------------------
+
+The examples above use integers for enumeration values. Using integers is
+short and handy (and provided by default by the `Functional API`_), but not
+strictly enforced. In the vast majority of use-cases, one doesn't care what
+the actual value of an enumeration is. But if the value *is* important,
+enumerations can have arbitrary values.
+
+Enumerations are Python classes, and can have methods and special methods as
+usual. If we have this enumeration::
+
+ >>> class Mood(Enum):
+ ... funky = 1
+ ... happy = 3
+ ...
+ ... def describe(self):
+ ... # self is the member here
+ ... return self.name, self.value
+ ...
+ ... def __str__(self):
+ ... return 'my custom str! {0}'.format(self.value)
+ ...
+ ... @classmethod
+ ... def favorite_mood(cls):
+ ... # cls here is the enumeration
+ ... return cls.happy
+
+Then::
+
+ >>> Mood.favorite_mood()
+ <Mood.happy: 3>
+ >>> Mood.happy.describe()
+ ('happy', 3)
+ >>> str(Mood.funky)
+ 'my custom str! 1'
+
+The rules for what is allowed are as follows: _sunder_ names (starting and
+ending with a single underscore) are reserved by enum and cannot be used;
+all other attributes defined within an enumeration will become members of this
+enumeration, with the exception of *__dunder__* names and descriptors (methods
+are also descriptors).
+
+Note:
+
+ If your enumeration defines ``__new__`` and/or ``__init__`` then
+ whatever value(s) were given to the enum member will be passed into
+ those methods. See `Planet`_ for an example.
+
+
+Restricted subclassing of enumerations
+--------------------------------------
+
+Subclassing an enumeration is allowed only if the enumeration does not define
+any members. So this is forbidden::
+
+ >>> class MoreColor(Color):
+ ... pink = 17
+ Traceback (most recent call last):
+ ...
+ TypeError: Cannot extend enumerations
+
+But this is allowed::
+
+ >>> class Foo(Enum):
+ ... def some_behavior(self):
+ ... pass
+ ...
+ >>> class Bar(Foo):
+ ... happy = 1
+ ... sad = 2
+ ...
+
+Allowing subclassing of enums that define members would lead to a violation of
+some important invariants of types and instances. On the other hand, it makes
+sense to allow sharing some common behavior between a group of enumerations.
+(See `OrderedEnum`_ for an example.)
+
+
+Pickling
+--------
+
+Enumerations can be pickled and unpickled::
+
+ >>> from enum.test_enum import Fruit
+ >>> from pickle import dumps, loads
+ >>> Fruit.tomato is loads(dumps(Fruit.tomato, 2))
+ True
+
+The usual restrictions for pickling apply: picklable enums must be defined in
+the top level of a module, since unpickling requires them to be importable
+from that module.
+
+Note:
+
+ With pickle protocol version 4 (introduced in Python 3.4) it is possible
+ to easily pickle enums nested in other classes.
+
+
+
+Functional API
+--------------
+
+The ``Enum`` class is callable, providing the following functional API::
+
+ >>> Animal = Enum('Animal', 'ant bee cat dog')
+ >>> Animal
+ <enum 'Animal'>
+ >>> Animal.ant
+ <Animal.ant: 1>
+ >>> Animal.ant.value
+ 1
+ >>> list(Animal)
+ [<Animal.ant: 1>, <Animal.bee: 2>, <Animal.cat: 3>, <Animal.dog: 4>]
+
+The semantics of this API resemble ``namedtuple``. The first argument
+of the call to ``Enum`` is the name of the enumeration.
+
+The second argument is the *source* of enumeration member names. It can be a
+whitespace-separated string of names, a sequence of names, a sequence of
+2-tuples with key/value pairs, or a mapping (e.g. dictionary) of names to
+values. The last two options enable assigning arbitrary values to
+enumerations; the others auto-assign increasing integers starting with 1. A
+new class derived from ``Enum`` is returned. In other words, the above
+assignment to ``Animal`` is equivalent to::
+
+ >>> class Animals(Enum):
+ ... ant = 1
+ ... bee = 2
+ ... cat = 3
+ ... dog = 4
+
+Pickling enums created with the functional API can be tricky as frame stack
+implementation details are used to try and figure out which module the
+enumeration is being created in (e.g. it will fail if you use a utility
+function in separate module, and also may not work on IronPython or Jython).
+The solution is to specify the module name explicitly as follows::
+
+ >>> Animals = Enum('Animals', 'ant bee cat dog', module=__name__)
+
+Derived Enumerations
+--------------------
+
+IntEnum
+^^^^^^^
+
+A variation of ``Enum`` is provided which is also a subclass of
+``int``. Members of an ``IntEnum`` can be compared to integers;
+by extension, integer enumerations of different types can also be compared
+to each other::
+
+ >>> from enum import IntEnum
+ >>> class Shape(IntEnum):
+ ... circle = 1
+ ... square = 2
+ ...
+ >>> class Request(IntEnum):
+ ... post = 1
+ ... get = 2
+ ...
+ >>> Shape == 1
+ False
+ >>> Shape.circle == 1
+ True
+ >>> Shape.circle == Request.post
+ True
+
+However, they still can't be compared to standard ``Enum`` enumerations::
+
+ >>> class Shape(IntEnum):
+ ... circle = 1
+ ... square = 2
+ ...
+ >>> class Color(Enum):
+ ... red = 1
+ ... green = 2
+ ...
+ >>> Shape.circle == Color.red
+ False
+
+``IntEnum`` values behave like integers in other ways you'd expect::
+
+ >>> int(Shape.circle)
+ 1
+ >>> ['a', 'b', 'c'][Shape.circle]
+ 'b'
+ >>> [i for i in range(Shape.square)]
+ [0, 1]
+
+For the vast majority of code, ``Enum`` is strongly recommended,
+since ``IntEnum`` breaks some semantic promises of an enumeration (by
+being comparable to integers, and thus by transitivity to other
+unrelated enumerations). It should be used only in special cases where
+there's no other choice; for example, when integer constants are
+replaced with enumerations and backwards compatibility is required with code
+that still expects integers.
+
+
+Others
+^^^^^^
+
+While ``IntEnum`` is part of the ``enum`` module, it would be very
+simple to implement independently::
+
+ class IntEnum(int, Enum):
+ pass
+
+This demonstrates how similar derived enumerations can be defined; for example
+a ``StrEnum`` that mixes in ``str`` instead of ``int``.
+
+Some rules:
+
+1. When subclassing ``Enum``, mix-in types must appear before
+ ``Enum`` itself in the sequence of bases, as in the ``IntEnum``
+ example above.
+2. While ``Enum`` can have members of any type, once you mix in an
+ additional type, all the members must have values of that type, e.g.
+ ``int`` above. This restriction does not apply to mix-ins which only
+ add methods and don't specify another data type such as ``int`` or
+ ``str``.
+3. When another data type is mixed in, the ``value`` attribute is *not the
+ same* as the enum member itself, although it is equivalant and will compare
+ equal.
+4. %-style formatting: ``%s`` and ``%r`` call ``Enum``'s ``__str__`` and
+ ``__repr__`` respectively; other codes (such as ``%i`` or ``%h`` for
+ IntEnum) treat the enum member as its mixed-in type.
+
+ Note: Prior to Python 3.4 there is a bug in ``str``'s %-formatting: ``int``
+ subclasses are printed as strings and not numbers when the ``%d``, ``%i``,
+ or ``%u`` codes are used.
+5. ``str.__format__`` (or ``format``) will use the mixed-in
+ type's ``__format__``. If the ``Enum``'s ``str`` or
+ ``repr`` is desired use the ``!s`` or ``!r`` ``str`` format codes.
+
+
+Decorators
+----------
+
+unique
+^^^^^^
+
+A ``class`` decorator specifically for enumerations. It searches an
+enumeration's ``__members__`` gathering any aliases it finds; if any are
+found ``ValueError`` is raised with the details::
+
+ >>> @unique
+ ... class NoDupes(Enum):
+ ... first = 'one'
+ ... second = 'two'
+ ... third = 'two'
+ Traceback (most recent call last):
+ ...
+ ValueError: duplicate names found in <enum 'NoDupes'>: third -> second
+
+
+Interesting examples
+--------------------
+
+While ``Enum`` and ``IntEnum`` are expected to cover the majority of
+use-cases, they cannot cover them all. Here are recipes for some different
+types of enumerations that can be used directly, or as examples for creating
+one's own.
+
+
+AutoNumber
+^^^^^^^^^^
+
+Avoids having to specify the value for each enumeration member::
+
+ >>> class AutoNumber(Enum):
+ ... def __new__(cls):
+ ... value = len(cls.__members__) + 1
+ ... obj = object.__new__(cls)
+ ... obj._value_ = value
+ ... return obj
+ ...
+ >>> class Color(AutoNumber):
+ ... __order__ = "red green blue" # only needed in 2.x
+ ... red = ()
+ ... green = ()
+ ... blue = ()
+ ...
+ >>> Color.green.value == 2
+ True
+
+Note:
+
+ The `__new__` method, if defined, is used during creation of the Enum
+ members; it is then replaced by Enum's `__new__` which is used after
+ class creation for lookup of existing members. Due to the way Enums are
+ supposed to behave, there is no way to customize Enum's `__new__`.
+
+
+UniqueEnum
+^^^^^^^^^^
+
+Raises an error if a duplicate member name is found instead of creating an
+alias::
+
+ >>> class UniqueEnum(Enum):
+ ... def __init__(self, *args):
+ ... cls = self.__class__
+ ... if any(self.value == e.value for e in cls):
+ ... a = self.name
+ ... e = cls(self.value).name
+ ... raise ValueError(
+ ... "aliases not allowed in UniqueEnum: %r --> %r"
+ ... % (a, e))
+ ...
+ >>> class Color(UniqueEnum):
+ ... red = 1
+ ... green = 2
+ ... blue = 3
+ ... grene = 2
+ Traceback (most recent call last):
+ ...
+ ValueError: aliases not allowed in UniqueEnum: 'grene' --> 'green'
+
+
+OrderedEnum
+^^^^^^^^^^^
+
+An ordered enumeration that is not based on ``IntEnum`` and so maintains
+the normal ``Enum`` invariants (such as not being comparable to other
+enumerations)::
+
+ >>> class OrderedEnum(Enum):
+ ... def __ge__(self, other):
+ ... if self.__class__ is other.__class__:
+ ... return self._value_ >= other._value_
+ ... return NotImplemented
+ ... def __gt__(self, other):
+ ... if self.__class__ is other.__class__:
+ ... return self._value_ > other._value_
+ ... return NotImplemented
+ ... def __le__(self, other):
+ ... if self.__class__ is other.__class__:
+ ... return self._value_ <= other._value_
+ ... return NotImplemented
+ ... def __lt__(self, other):
+ ... if self.__class__ is other.__class__:
+ ... return self._value_ < other._value_
+ ... return NotImplemented
+ ...
+ >>> class Grade(OrderedEnum):
+ ... __ordered__ = 'A B C D F'
+ ... A = 5
+ ... B = 4
+ ... C = 3
+ ... D = 2
+ ... F = 1
+ ...
+ >>> Grade.C < Grade.A
+ True
+
+
+Planet
+^^^^^^
+
+If ``__new__`` or ``__init__`` is defined the value of the enum member
+will be passed to those methods::
+
+ >>> class Planet(Enum):
+ ... MERCURY = (3.303e+23, 2.4397e6)
+ ... VENUS = (4.869e+24, 6.0518e6)
+ ... EARTH = (5.976e+24, 6.37814e6)
+ ... MARS = (6.421e+23, 3.3972e6)
+ ... JUPITER = (1.9e+27, 7.1492e7)
+ ... SATURN = (5.688e+26, 6.0268e7)
+ ... URANUS = (8.686e+25, 2.5559e7)
+ ... NEPTUNE = (1.024e+26, 2.4746e7)
+ ... def __init__(self, mass, radius):
+ ... self.mass = mass # in kilograms
+ ... self.radius = radius # in meters
+ ... @property
+ ... def surface_gravity(self):
+ ... # universal gravitational constant (m3 kg-1 s-2)
+ ... G = 6.67300E-11
+ ... return G * self.mass / (self.radius * self.radius)
+ ...
+ >>> Planet.EARTH.value
+ (5.976e+24, 6378140.0)
+ >>> Planet.EARTH.surface_gravity
+ 9.802652743337129
+
+
+How are Enums different?
+------------------------
+
+Enums have a custom metaclass that affects many aspects of both derived Enum
+classes and their instances (members).
+
+
+Enum Classes
+^^^^^^^^^^^^
+
+The ``EnumMeta`` metaclass is responsible for providing the
+``__contains__``, ``__dir__``, ``__iter__`` and other methods that
+allow one to do things with an ``Enum`` class that fail on a typical
+class, such as ``list(Color)`` or ``some_var in Color``. ``EnumMeta`` is
+responsible for ensuring that various other methods on the final ``Enum``
+class are correct (such as ``__new__``, ``__getnewargs__``,
+``__str__`` and ``__repr__``)
+
+
+Enum Members (aka instances)
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+The most interesting thing about Enum members is that they are singletons.
+``EnumMeta`` creates them all while it is creating the ``Enum``
+class itself, and then puts a custom ``__new__`` in place to ensure
+that no new ones are ever instantiated by returning only the existing
+member instances.
+
+
+Finer Points
+^^^^^^^^^^^^
+
+Enum members are instances of an Enum class, and even though they are
+accessible as ``EnumClass.member``, they are not accessible directly from
+the member::
+
+ >>> Color.red
+ <Color.red: 1>
+ >>> Color.red.blue
+ Traceback (most recent call last):
+ ...
+ AttributeError: 'Color' object has no attribute 'blue'
+
+Likewise, ``__members__`` is only available on the class.
+
+In Python 3.x ``__members__`` is always an ``OrderedDict``, with the order being
+the definition order. In Python 2.7 ``__members__`` is an ``OrderedDict`` if
+``__order__`` was specified, and a plain ``dict`` otherwise. In all other Python
+2.x versions ``__members__`` is a plain ``dict`` even if ``__order__`` was specified
+as the ``OrderedDict`` type didn't exist yet.
+
+If you give your ``Enum`` subclass extra methods, like the `Planet`_
+class above, those methods will show up in a `dir` of the member,
+but not of the class::
+
+ >>> dir(Planet)
+ ['EARTH', 'JUPITER', 'MARS', 'MERCURY', 'NEPTUNE', 'SATURN', 'URANUS',
+ 'VENUS', '__class__', '__doc__', '__members__', '__module__']
+ >>> dir(Planet.EARTH)
+ ['__class__', '__doc__', '__module__', 'name', 'surface_gravity', 'value']
+
+A ``__new__`` method will only be used for the creation of the
+``Enum`` members -- after that it is replaced. This means if you wish to
+change how ``Enum`` members are looked up you either have to write a
+helper function or a ``classmethod``.
diff --git a/scripts/automation/trex_control_plane/python_lib/enum34-1.0.4/enum/enum.py b/scripts/automation/trex_control_plane/python_lib/enum34-1.0.4/enum/enum.py
new file mode 100755
index 00000000..6a327a8a
--- /dev/null
+++ b/scripts/automation/trex_control_plane/python_lib/enum34-1.0.4/enum/enum.py
@@ -0,0 +1,790 @@
+"""Python Enumerations"""
+
+import sys as _sys
+
+__all__ = ['Enum', 'IntEnum', 'unique']
+
+version = 1, 0, 4
+
+pyver = float('%s.%s' % _sys.version_info[:2])
+
+try:
+ any
+except NameError:
+ def any(iterable):
+ for element in iterable:
+ if element:
+ return True
+ return False
+
+try:
+ from collections import OrderedDict
+except ImportError:
+ OrderedDict = None
+
+try:
+ basestring
+except NameError:
+ # In Python 2 basestring is the ancestor of both str and unicode
+ # in Python 3 it's just str, but was missing in 3.1
+ basestring = str
+
+try:
+ unicode
+except NameError:
+ # In Python 3 unicode no longer exists (it's just str)
+ unicode = str
+
+class _RouteClassAttributeToGetattr(object):
+ """Route attribute access on a class to __getattr__.
+
+ This is a descriptor, used to define attributes that act differently when
+ accessed through an instance and through a class. Instance access remains
+ normal, but access to an attribute through a class will be routed to the
+ class's __getattr__ method; this is done by raising AttributeError.
+
+ """
+ def __init__(self, fget=None):
+ self.fget = fget
+
+ def __get__(self, instance, ownerclass=None):
+ if instance is None:
+ raise AttributeError()
+ return self.fget(instance)
+
+ def __set__(self, instance, value):
+ raise AttributeError("can't set attribute")
+
+ def __delete__(self, instance):
+ raise AttributeError("can't delete attribute")
+
+
+def _is_descriptor(obj):
+ """Returns True if obj is a descriptor, False otherwise."""
+ return (
+ hasattr(obj, '__get__') or
+ hasattr(obj, '__set__') or
+ hasattr(obj, '__delete__'))
+
+
+def _is_dunder(name):
+ """Returns True if a __dunder__ name, False otherwise."""
+ return (name[:2] == name[-2:] == '__' and
+ name[2:3] != '_' and
+ name[-3:-2] != '_' and
+ len(name) > 4)
+
+
+def _is_sunder(name):
+ """Returns True if a _sunder_ name, False otherwise."""
+ return (name[0] == name[-1] == '_' and
+ name[1:2] != '_' and
+ name[-2:-1] != '_' and
+ len(name) > 2)
+
+
+def _make_class_unpicklable(cls):
+ """Make the given class un-picklable."""
+ def _break_on_call_reduce(self, protocol=None):
+ raise TypeError('%r cannot be pickled' % self)
+ cls.__reduce_ex__ = _break_on_call_reduce
+ cls.__module__ = '<unknown>'
+
+
+class _EnumDict(dict):
+ """Track enum member order and ensure member names are not reused.
+
+ EnumMeta will use the names found in self._member_names as the
+ enumeration member names.
+
+ """
+ def __init__(self):
+ super(_EnumDict, self).__init__()
+ self._member_names = []
+
+ def __setitem__(self, key, value):
+ """Changes anything not dundered or not a descriptor.
+
+ If a descriptor is added with the same name as an enum member, the name
+ is removed from _member_names (this may leave a hole in the numerical
+ sequence of values).
+
+ If an enum member name is used twice, an error is raised; duplicate
+ values are not checked for.
+
+ Single underscore (sunder) names are reserved.
+
+ Note: in 3.x __order__ is simply discarded as a not necessary piece
+ leftover from 2.x
+
+ """
+ if pyver >= 3.0 and key == '__order__':
+ return
+ if _is_sunder(key):
+ raise ValueError('_names_ are reserved for future Enum use')
+ elif _is_dunder(key):
+ pass
+ elif key in self._member_names:
+ # descriptor overwriting an enum?
+ raise TypeError('Attempted to reuse key: %r' % key)
+ elif not _is_descriptor(value):
+ if key in self:
+ # enum overwriting a descriptor?
+ raise TypeError('Key already defined as: %r' % self[key])
+ self._member_names.append(key)
+ super(_EnumDict, self).__setitem__(key, value)
+
+
+# Dummy value for Enum as EnumMeta explicity checks for it, but of course until
+# EnumMeta finishes running the first time the Enum class doesn't exist. This
+# is also why there are checks in EnumMeta like `if Enum is not None`
+Enum = None
+
+
+class EnumMeta(type):
+ """Metaclass for Enum"""
+ @classmethod
+ def __prepare__(metacls, cls, bases):
+ return _EnumDict()
+
+ def __new__(metacls, cls, bases, classdict):
+ # an Enum class is final once enumeration items have been defined; it
+ # cannot be mixed with other types (int, float, etc.) if it has an
+ # inherited __new__ unless a new __new__ is defined (or the resulting
+ # class will fail).
+ if type(classdict) is dict:
+ original_dict = classdict
+ classdict = _EnumDict()
+ for k, v in original_dict.items():
+ classdict[k] = v
+
+ member_type, first_enum = metacls._get_mixins_(bases)
+ __new__, save_new, use_args = metacls._find_new_(classdict, member_type,
+ first_enum)
+ # save enum items into separate mapping so they don't get baked into
+ # the new class
+ members = dict((k, classdict[k]) for k in classdict._member_names)
+ for name in classdict._member_names:
+ del classdict[name]
+
+ # py2 support for definition order
+ __order__ = classdict.get('__order__')
+ if __order__ is None:
+ if pyver < 3.0:
+ try:
+ __order__ = [name for (name, value) in sorted(members.items(), key=lambda item: item[1])]
+ except TypeError:
+ __order__ = [name for name in sorted(members.keys())]
+ else:
+ __order__ = classdict._member_names
+ else:
+ del classdict['__order__']
+ if pyver < 3.0:
+ __order__ = __order__.replace(',', ' ').split()
+ aliases = [name for name in members if name not in __order__]
+ __order__ += aliases
+
+ # check for illegal enum names (any others?)
+ invalid_names = set(members) & set(['mro'])
+ if invalid_names:
+ raise ValueError('Invalid enum member name(s): %s' % (
+ ', '.join(invalid_names), ))
+
+ # create our new Enum type
+ enum_class = super(EnumMeta, metacls).__new__(metacls, cls, bases, classdict)
+ enum_class._member_names_ = [] # names in random order
+ if OrderedDict is not None:
+ enum_class._member_map_ = OrderedDict()
+ else:
+ enum_class._member_map_ = {} # name->value map
+ enum_class._member_type_ = member_type
+
+ # Reverse value->name map for hashable values.
+ enum_class._value2member_map_ = {}
+
+ # instantiate them, checking for duplicates as we go
+ # we instantiate first instead of checking for duplicates first in case
+ # a custom __new__ is doing something funky with the values -- such as
+ # auto-numbering ;)
+ if __new__ is None:
+ __new__ = enum_class.__new__
+ for member_name in __order__:
+ value = members[member_name]
+ if not isinstance(value, tuple):
+ args = (value, )
+ else:
+ args = value
+ if member_type is tuple: # special case for tuple enums
+ args = (args, ) # wrap it one more time
+ if not use_args or not args:
+ enum_member = __new__(enum_class)
+ if not hasattr(enum_member, '_value_'):
+ enum_member._value_ = value
+ else:
+ enum_member = __new__(enum_class, *args)
+ if not hasattr(enum_member, '_value_'):
+ enum_member._value_ = member_type(*args)
+ value = enum_member._value_
+ enum_member._name_ = member_name
+ enum_member.__objclass__ = enum_class
+ enum_member.__init__(*args)
+ # If another member with the same value was already defined, the
+ # new member becomes an alias to the existing one.
+ for name, canonical_member in enum_class._member_map_.items():
+ if canonical_member.value == enum_member._value_:
+ enum_member = canonical_member
+ break
+ else:
+ # Aliases don't appear in member names (only in __members__).
+ enum_class._member_names_.append(member_name)
+ enum_class._member_map_[member_name] = enum_member
+ try:
+ # This may fail if value is not hashable. We can't add the value
+ # to the map, and by-value lookups for this value will be
+ # linear.
+ enum_class._value2member_map_[value] = enum_member
+ except TypeError:
+ pass
+
+
+ # If a custom type is mixed into the Enum, and it does not know how
+ # to pickle itself, pickle.dumps will succeed but pickle.loads will
+ # fail. Rather than have the error show up later and possibly far
+ # from the source, sabotage the pickle protocol for this class so
+ # that pickle.dumps also fails.
+ #
+ # However, if the new class implements its own __reduce_ex__, do not
+ # sabotage -- it's on them to make sure it works correctly. We use
+ # __reduce_ex__ instead of any of the others as it is preferred by
+ # pickle over __reduce__, and it handles all pickle protocols.
+ unpicklable = False
+ if '__reduce_ex__' not in classdict:
+ if member_type is not object:
+ methods = ('__getnewargs_ex__', '__getnewargs__',
+ '__reduce_ex__', '__reduce__')
+ if not any(m in member_type.__dict__ for m in methods):
+ _make_class_unpicklable(enum_class)
+ unpicklable = True
+
+
+ # double check that repr and friends are not the mixin's or various
+ # things break (such as pickle)
+ for name in ('__repr__', '__str__', '__format__', '__reduce_ex__'):
+ class_method = getattr(enum_class, name)
+ obj_method = getattr(member_type, name, None)
+ enum_method = getattr(first_enum, name, None)
+ if name not in classdict and class_method is not enum_method:
+ if name == '__reduce_ex__' and unpicklable:
+ continue
+ setattr(enum_class, name, enum_method)
+
+ # method resolution and int's are not playing nice
+ # Python's less than 2.6 use __cmp__
+
+ if pyver < 2.6:
+
+ if issubclass(enum_class, int):
+ setattr(enum_class, '__cmp__', getattr(int, '__cmp__'))
+
+ elif pyver < 3.0:
+
+ if issubclass(enum_class, int):
+ for method in (
+ '__le__',
+ '__lt__',
+ '__gt__',
+ '__ge__',
+ '__eq__',
+ '__ne__',
+ '__hash__',
+ ):
+ setattr(enum_class, method, getattr(int, method))
+
+ # replace any other __new__ with our own (as long as Enum is not None,
+ # anyway) -- again, this is to support pickle
+ if Enum is not None:
+ # if the user defined their own __new__, save it before it gets
+ # clobbered in case they subclass later
+ if save_new:
+ setattr(enum_class, '__member_new__', enum_class.__dict__['__new__'])
+ setattr(enum_class, '__new__', Enum.__dict__['__new__'])
+ return enum_class
+
+ def __call__(cls, value, names=None, module=None, type=None):
+ """Either returns an existing member, or creates a new enum class.
+
+ This method is used both when an enum class is given a value to match
+ to an enumeration member (i.e. Color(3)) and for the functional API
+ (i.e. Color = Enum('Color', names='red green blue')).
+
+ When used for the functional API: `module`, if set, will be stored in
+ the new class' __module__ attribute; `type`, if set, will be mixed in
+ as the first base class.
+
+ Note: if `module` is not set this routine will attempt to discover the
+ calling module by walking the frame stack; if this is unsuccessful
+ the resulting class will not be pickleable.
+
+ """
+ if names is None: # simple value lookup
+ return cls.__new__(cls, value)
+ # otherwise, functional API: we're creating a new Enum type
+ return cls._create_(value, names, module=module, type=type)
+
+ def __contains__(cls, member):
+ return isinstance(member, cls) and member.name in cls._member_map_
+
+ def __delattr__(cls, attr):
+ # nicer error message when someone tries to delete an attribute
+ # (see issue19025).
+ if attr in cls._member_map_:
+ raise AttributeError(
+ "%s: cannot delete Enum member." % cls.__name__)
+ super(EnumMeta, cls).__delattr__(attr)
+
+ def __dir__(self):
+ return (['__class__', '__doc__', '__members__', '__module__'] +
+ self._member_names_)
+
+ @property
+ def __members__(cls):
+ """Returns a mapping of member name->value.
+
+ This mapping lists all enum members, including aliases. Note that this
+ is a copy of the internal mapping.
+
+ """
+ return cls._member_map_.copy()
+
+ def __getattr__(cls, name):
+ """Return the enum member matching `name`
+
+ We use __getattr__ instead of descriptors or inserting into the enum
+ class' __dict__ in order to support `name` and `value` being both
+ properties for enum members (which live in the class' __dict__) and
+ enum members themselves.
+
+ """
+ if _is_dunder(name):
+ raise AttributeError(name)
+ try:
+ return cls._member_map_[name]
+ except KeyError:
+ raise AttributeError(name)
+
+ def __getitem__(cls, name):
+ return cls._member_map_[name]
+
+ def __iter__(cls):
+ return (cls._member_map_[name] for name in cls._member_names_)
+
+ def __reversed__(cls):
+ return (cls._member_map_[name] for name in reversed(cls._member_names_))
+
+ def __len__(cls):
+ return len(cls._member_names_)
+
+ def __repr__(cls):
+ return "<enum %r>" % cls.__name__
+
+ def __setattr__(cls, name, value):
+ """Block attempts to reassign Enum members.
+
+ A simple assignment to the class namespace only changes one of the
+ several possible ways to get an Enum member from the Enum class,
+ resulting in an inconsistent Enumeration.
+
+ """
+ member_map = cls.__dict__.get('_member_map_', {})
+ if name in member_map:
+ raise AttributeError('Cannot reassign members.')
+ super(EnumMeta, cls).__setattr__(name, value)
+
+ def _create_(cls, class_name, names=None, module=None, type=None):
+ """Convenience method to create a new Enum class.
+
+ `names` can be:
+
+ * A string containing member names, separated either with spaces or
+ commas. Values are auto-numbered from 1.
+ * An iterable of member names. Values are auto-numbered from 1.
+ * An iterable of (member name, value) pairs.
+ * A mapping of member name -> value.
+
+ """
+ if pyver < 3.0:
+ # if class_name is unicode, attempt a conversion to ASCII
+ if isinstance(class_name, unicode):
+ try:
+ class_name = class_name.encode('ascii')
+ except UnicodeEncodeError:
+ raise TypeError('%r is not representable in ASCII' % class_name)
+ metacls = cls.__class__
+ if type is None:
+ bases = (cls, )
+ else:
+ bases = (type, cls)
+ classdict = metacls.__prepare__(class_name, bases)
+ __order__ = []
+
+ # special processing needed for names?
+ if isinstance(names, basestring):
+ names = names.replace(',', ' ').split()
+ if isinstance(names, (tuple, list)) and isinstance(names[0], basestring):
+ names = [(e, i+1) for (i, e) in enumerate(names)]
+
+ # Here, names is either an iterable of (name, value) or a mapping.
+ for item in names:
+ if isinstance(item, basestring):
+ member_name, member_value = item, names[item]
+ else:
+ member_name, member_value = item
+ classdict[member_name] = member_value
+ __order__.append(member_name)
+ # only set __order__ in classdict if name/value was not from a mapping
+ if not isinstance(item, basestring):
+ classdict['__order__'] = ' '.join(__order__)
+ enum_class = metacls.__new__(metacls, class_name, bases, classdict)
+
+ # TODO: replace the frame hack if a blessed way to know the calling
+ # module is ever developed
+ if module is None:
+ try:
+ module = _sys._getframe(2).f_globals['__name__']
+ except (AttributeError, ValueError):
+ pass
+ if module is None:
+ _make_class_unpicklable(enum_class)
+ else:
+ enum_class.__module__ = module
+
+ return enum_class
+
+ @staticmethod
+ def _get_mixins_(bases):
+ """Returns the type for creating enum members, and the first inherited
+ enum class.
+
+ bases: the tuple of bases that was given to __new__
+
+ """
+ if not bases or Enum is None:
+ return object, Enum
+
+
+ # double check that we are not subclassing a class with existing
+ # enumeration members; while we're at it, see if any other data
+ # type has been mixed in so we can use the correct __new__
+ member_type = first_enum = None
+ for base in bases:
+ if (base is not Enum and
+ issubclass(base, Enum) and
+ base._member_names_):
+ raise TypeError("Cannot extend enumerations")
+ # base is now the last base in bases
+ if not issubclass(base, Enum):
+ raise TypeError("new enumerations must be created as "
+ "`ClassName([mixin_type,] enum_type)`")
+
+ # get correct mix-in type (either mix-in type of Enum subclass, or
+ # first base if last base is Enum)
+ if not issubclass(bases[0], Enum):
+ member_type = bases[0] # first data type
+ first_enum = bases[-1] # enum type
+ else:
+ for base in bases[0].__mro__:
+ # most common: (IntEnum, int, Enum, object)
+ # possible: (<Enum 'AutoIntEnum'>, <Enum 'IntEnum'>,
+ # <class 'int'>, <Enum 'Enum'>,
+ # <class 'object'>)
+ if issubclass(base, Enum):
+ if first_enum is None:
+ first_enum = base
+ else:
+ if member_type is None:
+ member_type = base
+
+ return member_type, first_enum
+
+ if pyver < 3.0:
+ @staticmethod
+ def _find_new_(classdict, member_type, first_enum):
+ """Returns the __new__ to be used for creating the enum members.
+
+ classdict: the class dictionary given to __new__
+ member_type: the data type whose __new__ will be used by default
+ first_enum: enumeration to check for an overriding __new__
+
+ """
+ # now find the correct __new__, checking to see of one was defined
+ # by the user; also check earlier enum classes in case a __new__ was
+ # saved as __member_new__
+ __new__ = classdict.get('__new__', None)
+ if __new__:
+ return None, True, True # __new__, save_new, use_args
+
+ N__new__ = getattr(None, '__new__')
+ O__new__ = getattr(object, '__new__')
+ if Enum is None:
+ E__new__ = N__new__
+ else:
+ E__new__ = Enum.__dict__['__new__']
+ # check all possibles for __member_new__ before falling back to
+ # __new__
+ for method in ('__member_new__', '__new__'):
+ for possible in (member_type, first_enum):
+ try:
+ target = possible.__dict__[method]
+ except (AttributeError, KeyError):
+ target = getattr(possible, method, None)
+ if target not in [
+ None,
+ N__new__,
+ O__new__,
+ E__new__,
+ ]:
+ if method == '__member_new__':
+ classdict['__new__'] = target
+ return None, False, True
+ if isinstance(target, staticmethod):
+ target = target.__get__(member_type)
+ __new__ = target
+ break
+ if __new__ is not None:
+ break
+ else:
+ __new__ = object.__new__
+
+ # if a non-object.__new__ is used then whatever value/tuple was
+ # assigned to the enum member name will be passed to __new__ and to the
+ # new enum member's __init__
+ if __new__ is object.__new__:
+ use_args = False
+ else:
+ use_args = True
+
+ return __new__, False, use_args
+ else:
+ @staticmethod
+ def _find_new_(classdict, member_type, first_enum):
+ """Returns the __new__ to be used for creating the enum members.
+
+ classdict: the class dictionary given to __new__
+ member_type: the data type whose __new__ will be used by default
+ first_enum: enumeration to check for an overriding __new__
+
+ """
+ # now find the correct __new__, checking to see of one was defined
+ # by the user; also check earlier enum classes in case a __new__ was
+ # saved as __member_new__
+ __new__ = classdict.get('__new__', None)
+
+ # should __new__ be saved as __member_new__ later?
+ save_new = __new__ is not None
+
+ if __new__ is None:
+ # check all possibles for __member_new__ before falling back to
+ # __new__
+ for method in ('__member_new__', '__new__'):
+ for possible in (member_type, first_enum):
+ target = getattr(possible, method, None)
+ if target not in (
+ None,
+ None.__new__,
+ object.__new__,
+ Enum.__new__,
+ ):
+ __new__ = target
+ break
+ if __new__ is not None:
+ break
+ else:
+ __new__ = object.__new__
+
+ # if a non-object.__new__ is used then whatever value/tuple was
+ # assigned to the enum member name will be passed to __new__ and to the
+ # new enum member's __init__
+ if __new__ is object.__new__:
+ use_args = False
+ else:
+ use_args = True
+
+ return __new__, save_new, use_args
+
+
+########################################################
+# In order to support Python 2 and 3 with a single
+# codebase we have to create the Enum methods separately
+# and then use the `type(name, bases, dict)` method to
+# create the class.
+########################################################
+temp_enum_dict = {}
+temp_enum_dict['__doc__'] = "Generic enumeration.\n\n Derive from this class to define new enumerations.\n\n"
+
+def __new__(cls, value):
+ # all enum instances are actually created during class construction
+ # without calling this method; this method is called by the metaclass'
+ # __call__ (i.e. Color(3) ), and by pickle
+ if type(value) is cls:
+ # For lookups like Color(Color.red)
+ value = value.value
+ #return value
+ # by-value search for a matching enum member
+ # see if it's in the reverse mapping (for hashable values)
+ try:
+ if value in cls._value2member_map_:
+ return cls._value2member_map_[value]
+ except TypeError:
+ # not there, now do long search -- O(n) behavior
+ for member in cls._member_map_.values():
+ if member.value == value:
+ return member
+ raise ValueError("%s is not a valid %s" % (value, cls.__name__))
+temp_enum_dict['__new__'] = __new__
+del __new__
+
+def __repr__(self):
+ return "<%s.%s: %r>" % (
+ self.__class__.__name__, self._name_, self._value_)
+temp_enum_dict['__repr__'] = __repr__
+del __repr__
+
+def __str__(self):
+ return "%s.%s" % (self.__class__.__name__, self._name_)
+temp_enum_dict['__str__'] = __str__
+del __str__
+
+def __dir__(self):
+ added_behavior = [
+ m
+ for cls in self.__class__.mro()
+ for m in cls.__dict__
+ if m[0] != '_'
+ ]
+ return (['__class__', '__doc__', '__module__', ] + added_behavior)
+temp_enum_dict['__dir__'] = __dir__
+del __dir__
+
+def __format__(self, format_spec):
+ # mixed-in Enums should use the mixed-in type's __format__, otherwise
+ # we can get strange results with the Enum name showing up instead of
+ # the value
+
+ # pure Enum branch
+ if self._member_type_ is object:
+ cls = str
+ val = str(self)
+ # mix-in branch
+ else:
+ cls = self._member_type_
+ val = self.value
+ return cls.__format__(val, format_spec)
+temp_enum_dict['__format__'] = __format__
+del __format__
+
+
+####################################
+# Python's less than 2.6 use __cmp__
+
+if pyver < 2.6:
+
+ def __cmp__(self, other):
+ if type(other) is self.__class__:
+ if self is other:
+ return 0
+ return -1
+ return NotImplemented
+ raise TypeError("unorderable types: %s() and %s()" % (self.__class__.__name__, other.__class__.__name__))
+ temp_enum_dict['__cmp__'] = __cmp__
+ del __cmp__
+
+else:
+
+ def __le__(self, other):
+ raise TypeError("unorderable types: %s() <= %s()" % (self.__class__.__name__, other.__class__.__name__))
+ temp_enum_dict['__le__'] = __le__
+ del __le__
+
+ def __lt__(self, other):
+ raise TypeError("unorderable types: %s() < %s()" % (self.__class__.__name__, other.__class__.__name__))
+ temp_enum_dict['__lt__'] = __lt__
+ del __lt__
+
+ def __ge__(self, other):
+ raise TypeError("unorderable types: %s() >= %s()" % (self.__class__.__name__, other.__class__.__name__))
+ temp_enum_dict['__ge__'] = __ge__
+ del __ge__
+
+ def __gt__(self, other):
+ raise TypeError("unorderable types: %s() > %s()" % (self.__class__.__name__, other.__class__.__name__))
+ temp_enum_dict['__gt__'] = __gt__
+ del __gt__
+
+
+def __eq__(self, other):
+ if type(other) is self.__class__:
+ return self is other
+ return NotImplemented
+temp_enum_dict['__eq__'] = __eq__
+del __eq__
+
+def __ne__(self, other):
+ if type(other) is self.__class__:
+ return self is not other
+ return NotImplemented
+temp_enum_dict['__ne__'] = __ne__
+del __ne__
+
+def __hash__(self):
+ return hash(self._name_)
+temp_enum_dict['__hash__'] = __hash__
+del __hash__
+
+def __reduce_ex__(self, proto):
+ return self.__class__, (self._value_, )
+temp_enum_dict['__reduce_ex__'] = __reduce_ex__
+del __reduce_ex__
+
+# _RouteClassAttributeToGetattr is used to provide access to the `name`
+# and `value` properties of enum members while keeping some measure of
+# protection from modification, while still allowing for an enumeration
+# to have members named `name` and `value`. This works because enumeration
+# members are not set directly on the enum class -- __getattr__ is
+# used to look them up.
+
+@_RouteClassAttributeToGetattr
+def name(self):
+ return self._name_
+temp_enum_dict['name'] = name
+del name
+
+@_RouteClassAttributeToGetattr
+def value(self):
+ return self._value_
+temp_enum_dict['value'] = value
+del value
+
+Enum = EnumMeta('Enum', (object, ), temp_enum_dict)
+del temp_enum_dict
+
+# Enum has now been created
+###########################
+
+class IntEnum(int, Enum):
+ """Enum where members are also (and must be) ints"""
+
+
+def unique(enumeration):
+ """Class decorator that ensures only unique members exist in an enumeration."""
+ duplicates = []
+ for name, member in enumeration.__members__.items():
+ if name != member.name:
+ duplicates.append((name, member.name))
+ if duplicates:
+ duplicate_names = ', '.join(
+ ["%s -> %s" % (alias, name) for (alias, name) in duplicates]
+ )
+ raise ValueError('duplicate names found in %r: %s' %
+ (enumeration, duplicate_names)
+ )
+ return enumeration
diff --git a/scripts/automation/trex_control_plane/python_lib/enum34-1.0.4/enum/test_enum.py b/scripts/automation/trex_control_plane/python_lib/enum34-1.0.4/enum/test_enum.py
new file mode 100755
index 00000000..d7a97942
--- /dev/null
+++ b/scripts/automation/trex_control_plane/python_lib/enum34-1.0.4/enum/test_enum.py
@@ -0,0 +1,1690 @@
+import enum
+import sys
+import unittest
+from enum import Enum, IntEnum, unique, EnumMeta
+from pickle import dumps, loads, PicklingError, HIGHEST_PROTOCOL
+
+pyver = float('%s.%s' % sys.version_info[:2])
+
+try:
+ any
+except NameError:
+ def any(iterable):
+ for element in iterable:
+ if element:
+ return True
+ return False
+
+try:
+ unicode
+except NameError:
+ unicode = str
+
+try:
+ from collections import OrderedDict
+except ImportError:
+ OrderedDict = None
+
+# for pickle tests
+try:
+ class Stooges(Enum):
+ LARRY = 1
+ CURLY = 2
+ MOE = 3
+except Exception:
+ Stooges = sys.exc_info()[1]
+
+try:
+ class IntStooges(int, Enum):
+ LARRY = 1
+ CURLY = 2
+ MOE = 3
+except Exception:
+ IntStooges = sys.exc_info()[1]
+
+try:
+ class FloatStooges(float, Enum):
+ LARRY = 1.39
+ CURLY = 2.72
+ MOE = 3.142596
+except Exception:
+ FloatStooges = sys.exc_info()[1]
+
+# for pickle test and subclass tests
+try:
+ class StrEnum(str, Enum):
+ 'accepts only string values'
+ class Name(StrEnum):
+ BDFL = 'Guido van Rossum'
+ FLUFL = 'Barry Warsaw'
+except Exception:
+ Name = sys.exc_info()[1]
+
+try:
+ Question = Enum('Question', 'who what when where why', module=__name__)
+except Exception:
+ Question = sys.exc_info()[1]
+
+try:
+ Answer = Enum('Answer', 'him this then there because')
+except Exception:
+ Answer = sys.exc_info()[1]
+
+try:
+ Theory = Enum('Theory', 'rule law supposition', qualname='spanish_inquisition')
+except Exception:
+ Theory = sys.exc_info()[1]
+
+# for doctests
+try:
+ class Fruit(Enum):
+ tomato = 1
+ banana = 2
+ cherry = 3
+except Exception:
+ pass
+
+def test_pickle_dump_load(assertion, source, target=None,
+ protocol=(0, HIGHEST_PROTOCOL)):
+ start, stop = protocol
+ failures = []
+ for protocol in range(start, stop+1):
+ try:
+ if target is None:
+ assertion(loads(dumps(source, protocol=protocol)) is source)
+ else:
+ assertion(loads(dumps(source, protocol=protocol)), target)
+ except Exception:
+ exc, tb = sys.exc_info()[1:]
+ failures.append('%2d: %s' %(protocol, exc))
+ if failures:
+ raise ValueError('Failed with protocols: %s' % ', '.join(failures))
+
+def test_pickle_exception(assertion, exception, obj,
+ protocol=(0, HIGHEST_PROTOCOL)):
+ start, stop = protocol
+ failures = []
+ for protocol in range(start, stop+1):
+ try:
+ assertion(exception, dumps, obj, protocol=protocol)
+ except Exception:
+ exc = sys.exc_info()[1]
+ failures.append('%d: %s %s' % (protocol, exc.__class__.__name__, exc))
+ if failures:
+ raise ValueError('Failed with protocols: %s' % ', '.join(failures))
+
+
+class TestHelpers(unittest.TestCase):
+ # _is_descriptor, _is_sunder, _is_dunder
+
+ def test_is_descriptor(self):
+ class foo:
+ pass
+ for attr in ('__get__','__set__','__delete__'):
+ obj = foo()
+ self.assertFalse(enum._is_descriptor(obj))
+ setattr(obj, attr, 1)
+ self.assertTrue(enum._is_descriptor(obj))
+
+ def test_is_sunder(self):
+ for s in ('_a_', '_aa_'):
+ self.assertTrue(enum._is_sunder(s))
+
+ for s in ('a', 'a_', '_a', '__a', 'a__', '__a__', '_a__', '__a_', '_',
+ '__', '___', '____', '_____',):
+ self.assertFalse(enum._is_sunder(s))
+
+ def test_is_dunder(self):
+ for s in ('__a__', '__aa__'):
+ self.assertTrue(enum._is_dunder(s))
+ for s in ('a', 'a_', '_a', '__a', 'a__', '_a_', '_a__', '__a_', '_',
+ '__', '___', '____', '_____',):
+ self.assertFalse(enum._is_dunder(s))
+
+
+class TestEnum(unittest.TestCase):
+ def setUp(self):
+ class Season(Enum):
+ SPRING = 1
+ SUMMER = 2
+ AUTUMN = 3
+ WINTER = 4
+ self.Season = Season
+
+ class Konstants(float, Enum):
+ E = 2.7182818
+ PI = 3.1415926
+ TAU = 2 * PI
+ self.Konstants = Konstants
+
+ class Grades(IntEnum):
+ A = 5
+ B = 4
+ C = 3
+ D = 2
+ F = 0
+ self.Grades = Grades
+
+ class Directional(str, Enum):
+ EAST = 'east'
+ WEST = 'west'
+ NORTH = 'north'
+ SOUTH = 'south'
+ self.Directional = Directional
+
+ from datetime import date
+ class Holiday(date, Enum):
+ NEW_YEAR = 2013, 1, 1
+ IDES_OF_MARCH = 2013, 3, 15
+ self.Holiday = Holiday
+
+ if pyver >= 2.6: # cannot specify custom `dir` on previous versions
+ def test_dir_on_class(self):
+ Season = self.Season
+ self.assertEqual(
+ set(dir(Season)),
+ set(['__class__', '__doc__', '__members__', '__module__',
+ 'SPRING', 'SUMMER', 'AUTUMN', 'WINTER']),
+ )
+
+ def test_dir_on_item(self):
+ Season = self.Season
+ self.assertEqual(
+ set(dir(Season.WINTER)),
+ set(['__class__', '__doc__', '__module__', 'name', 'value']),
+ )
+
+ def test_dir_on_sub_with_behavior_on_super(self):
+ # see issue22506
+ class SuperEnum(Enum):
+ def invisible(self):
+ return "did you see me?"
+ class SubEnum(SuperEnum):
+ sample = 5
+ self.assertEqual(
+ set(dir(SubEnum.sample)),
+ set(['__class__', '__doc__', '__module__', 'name', 'value', 'invisible']),
+ )
+
+ if pyver >= 2.7: # OrderedDict first available here
+ def test_members_is_ordereddict_if_ordered(self):
+ class Ordered(Enum):
+ __order__ = 'first second third'
+ first = 'bippity'
+ second = 'boppity'
+ third = 'boo'
+ self.assertTrue(type(Ordered.__members__) is OrderedDict)
+
+ def test_members_is_ordereddict_if_not_ordered(self):
+ class Unordered(Enum):
+ this = 'that'
+ these = 'those'
+ self.assertTrue(type(Unordered.__members__) is OrderedDict)
+
+ if pyver >= 3.0: # all objects are ordered in Python 2.x
+ def test_members_is_always_ordered(self):
+ class AlwaysOrdered(Enum):
+ first = 1
+ second = 2
+ third = 3
+ self.assertTrue(type(AlwaysOrdered.__members__) is OrderedDict)
+
+ def test_comparisons(self):
+ def bad_compare():
+ Season.SPRING > 4
+ Season = self.Season
+ self.assertNotEqual(Season.SPRING, 1)
+ self.assertRaises(TypeError, bad_compare)
+
+ class Part(Enum):
+ SPRING = 1
+ CLIP = 2
+ BARREL = 3
+
+ self.assertNotEqual(Season.SPRING, Part.SPRING)
+ def bad_compare():
+ Season.SPRING < Part.CLIP
+ self.assertRaises(TypeError, bad_compare)
+
+ def test_enum_in_enum_out(self):
+ Season = self.Season
+ self.assertTrue(Season(Season.WINTER) is Season.WINTER)
+
+ def test_enum_value(self):
+ Season = self.Season
+ self.assertEqual(Season.SPRING.value, 1)
+
+ def test_intenum_value(self):
+ self.assertEqual(IntStooges.CURLY.value, 2)
+
+ def test_enum(self):
+ Season = self.Season
+ lst = list(Season)
+ self.assertEqual(len(lst), len(Season))
+ self.assertEqual(len(Season), 4, Season)
+ self.assertEqual(
+ [Season.SPRING, Season.SUMMER, Season.AUTUMN, Season.WINTER], lst)
+
+ for i, season in enumerate('SPRING SUMMER AUTUMN WINTER'.split()):
+ i += 1
+ e = Season(i)
+ self.assertEqual(e, getattr(Season, season))
+ self.assertEqual(e.value, i)
+ self.assertNotEqual(e, i)
+ self.assertEqual(e.name, season)
+ self.assertTrue(e in Season)
+ self.assertTrue(type(e) is Season)
+ self.assertTrue(isinstance(e, Season))
+ self.assertEqual(str(e), 'Season.' + season)
+ self.assertEqual(
+ repr(e),
+ '<Season.%s: %s>' % (season, i),
+ )
+
+ def test_value_name(self):
+ Season = self.Season
+ self.assertEqual(Season.SPRING.name, 'SPRING')
+ self.assertEqual(Season.SPRING.value, 1)
+ def set_name(obj, new_value):
+ obj.name = new_value
+ def set_value(obj, new_value):
+ obj.value = new_value
+ self.assertRaises(AttributeError, set_name, Season.SPRING, 'invierno', )
+ self.assertRaises(AttributeError, set_value, Season.SPRING, 2)
+
+ def test_attribute_deletion(self):
+ class Season(Enum):
+ SPRING = 1
+ SUMMER = 2
+ AUTUMN = 3
+ WINTER = 4
+
+ def spam(cls):
+ pass
+
+ self.assertTrue(hasattr(Season, 'spam'))
+ del Season.spam
+ self.assertFalse(hasattr(Season, 'spam'))
+
+ self.assertRaises(AttributeError, delattr, Season, 'SPRING')
+ self.assertRaises(AttributeError, delattr, Season, 'DRY')
+ self.assertRaises(AttributeError, delattr, Season.SPRING, 'name')
+
+ def test_invalid_names(self):
+ def create_bad_class_1():
+ class Wrong(Enum):
+ mro = 9
+ def create_bad_class_2():
+ class Wrong(Enum):
+ _reserved_ = 3
+ self.assertRaises(ValueError, create_bad_class_1)
+ self.assertRaises(ValueError, create_bad_class_2)
+
+ def test_contains(self):
+ Season = self.Season
+ self.assertTrue(Season.AUTUMN in Season)
+ self.assertTrue(3 not in Season)
+
+ val = Season(3)
+ self.assertTrue(val in Season)
+
+ class OtherEnum(Enum):
+ one = 1; two = 2
+ self.assertTrue(OtherEnum.two not in Season)
+
+ if pyver >= 2.6: # when `format` came into being
+
+ def test_format_enum(self):
+ Season = self.Season
+ self.assertEqual('{0}'.format(Season.SPRING),
+ '{0}'.format(str(Season.SPRING)))
+ self.assertEqual( '{0:}'.format(Season.SPRING),
+ '{0:}'.format(str(Season.SPRING)))
+ self.assertEqual('{0:20}'.format(Season.SPRING),
+ '{0:20}'.format(str(Season.SPRING)))
+ self.assertEqual('{0:^20}'.format(Season.SPRING),
+ '{0:^20}'.format(str(Season.SPRING)))
+ self.assertEqual('{0:>20}'.format(Season.SPRING),
+ '{0:>20}'.format(str(Season.SPRING)))
+ self.assertEqual('{0:<20}'.format(Season.SPRING),
+ '{0:<20}'.format(str(Season.SPRING)))
+
+ def test_format_enum_custom(self):
+ class TestFloat(float, Enum):
+ one = 1.0
+ two = 2.0
+ def __format__(self, spec):
+ return 'TestFloat success!'
+ self.assertEqual('{0}'.format(TestFloat.one), 'TestFloat success!')
+
+ def assertFormatIsValue(self, spec, member):
+ self.assertEqual(spec.format(member), spec.format(member.value))
+
+ def test_format_enum_date(self):
+ Holiday = self.Holiday
+ self.assertFormatIsValue('{0}', Holiday.IDES_OF_MARCH)
+ self.assertFormatIsValue('{0:}', Holiday.IDES_OF_MARCH)
+ self.assertFormatIsValue('{0:20}', Holiday.IDES_OF_MARCH)
+ self.assertFormatIsValue('{0:^20}', Holiday.IDES_OF_MARCH)
+ self.assertFormatIsValue('{0:>20}', Holiday.IDES_OF_MARCH)
+ self.assertFormatIsValue('{0:<20}', Holiday.IDES_OF_MARCH)
+ self.assertFormatIsValue('{0:%Y %m}', Holiday.IDES_OF_MARCH)
+ self.assertFormatIsValue('{0:%Y %m %M:00}', Holiday.IDES_OF_MARCH)
+
+ def test_format_enum_float(self):
+ Konstants = self.Konstants
+ self.assertFormatIsValue('{0}', Konstants.TAU)
+ self.assertFormatIsValue('{0:}', Konstants.TAU)
+ self.assertFormatIsValue('{0:20}', Konstants.TAU)
+ self.assertFormatIsValue('{0:^20}', Konstants.TAU)
+ self.assertFormatIsValue('{0:>20}', Konstants.TAU)
+ self.assertFormatIsValue('{0:<20}', Konstants.TAU)
+ self.assertFormatIsValue('{0:n}', Konstants.TAU)
+ self.assertFormatIsValue('{0:5.2}', Konstants.TAU)
+ self.assertFormatIsValue('{0:f}', Konstants.TAU)
+
+ def test_format_enum_int(self):
+ Grades = self.Grades
+ self.assertFormatIsValue('{0}', Grades.C)
+ self.assertFormatIsValue('{0:}', Grades.C)
+ self.assertFormatIsValue('{0:20}', Grades.C)
+ self.assertFormatIsValue('{0:^20}', Grades.C)
+ self.assertFormatIsValue('{0:>20}', Grades.C)
+ self.assertFormatIsValue('{0:<20}', Grades.C)
+ self.assertFormatIsValue('{0:+}', Grades.C)
+ self.assertFormatIsValue('{0:08X}', Grades.C)
+ self.assertFormatIsValue('{0:b}', Grades.C)
+
+ def test_format_enum_str(self):
+ Directional = self.Directional
+ self.assertFormatIsValue('{0}', Directional.WEST)
+ self.assertFormatIsValue('{0:}', Directional.WEST)
+ self.assertFormatIsValue('{0:20}', Directional.WEST)
+ self.assertFormatIsValue('{0:^20}', Directional.WEST)
+ self.assertFormatIsValue('{0:>20}', Directional.WEST)
+ self.assertFormatIsValue('{0:<20}', Directional.WEST)
+
+ def test_hash(self):
+ Season = self.Season
+ dates = {}
+ dates[Season.WINTER] = '1225'
+ dates[Season.SPRING] = '0315'
+ dates[Season.SUMMER] = '0704'
+ dates[Season.AUTUMN] = '1031'
+ self.assertEqual(dates[Season.AUTUMN], '1031')
+
+ def test_enum_duplicates(self):
+ __order__ = "SPRING SUMMER AUTUMN WINTER"
+ class Season(Enum):
+ SPRING = 1
+ SUMMER = 2
+ AUTUMN = FALL = 3
+ WINTER = 4
+ ANOTHER_SPRING = 1
+ lst = list(Season)
+ self.assertEqual(
+ lst,
+ [Season.SPRING, Season.SUMMER,
+ Season.AUTUMN, Season.WINTER,
+ ])
+ self.assertTrue(Season.FALL is Season.AUTUMN)
+ self.assertEqual(Season.FALL.value, 3)
+ self.assertEqual(Season.AUTUMN.value, 3)
+ self.assertTrue(Season(3) is Season.AUTUMN)
+ self.assertTrue(Season(1) is Season.SPRING)
+ self.assertEqual(Season.FALL.name, 'AUTUMN')
+ self.assertEqual(
+ set([k for k,v in Season.__members__.items() if v.name != k]),
+ set(['FALL', 'ANOTHER_SPRING']),
+ )
+
+ if pyver >= 3.0:
+ cls = vars()
+ result = {'Enum':Enum}
+ exec("""def test_duplicate_name(self):
+ with self.assertRaises(TypeError):
+ class Color(Enum):
+ red = 1
+ green = 2
+ blue = 3
+ red = 4
+
+ with self.assertRaises(TypeError):
+ class Color(Enum):
+ red = 1
+ green = 2
+ blue = 3
+ def red(self):
+ return 'red'
+
+ with self.assertRaises(TypeError):
+ class Color(Enum):
+ @property
+
+ def red(self):
+ return 'redder'
+ red = 1
+ green = 2
+ blue = 3""",
+ result)
+ cls['test_duplicate_name'] = result['test_duplicate_name']
+
+ def test_enum_with_value_name(self):
+ class Huh(Enum):
+ name = 1
+ value = 2
+ self.assertEqual(
+ list(Huh),
+ [Huh.name, Huh.value],
+ )
+ self.assertTrue(type(Huh.name) is Huh)
+ self.assertEqual(Huh.name.name, 'name')
+ self.assertEqual(Huh.name.value, 1)
+
+ def test_intenum_from_scratch(self):
+ class phy(int, Enum):
+ pi = 3
+ tau = 2 * pi
+ self.assertTrue(phy.pi < phy.tau)
+
+ def test_intenum_inherited(self):
+ class IntEnum(int, Enum):
+ pass
+ class phy(IntEnum):
+ pi = 3
+ tau = 2 * pi
+ self.assertTrue(phy.pi < phy.tau)
+
+ def test_floatenum_from_scratch(self):
+ class phy(float, Enum):
+ pi = 3.1415926
+ tau = 2 * pi
+ self.assertTrue(phy.pi < phy.tau)
+
+ def test_floatenum_inherited(self):
+ class FloatEnum(float, Enum):
+ pass
+ class phy(FloatEnum):
+ pi = 3.1415926
+ tau = 2 * pi
+ self.assertTrue(phy.pi < phy.tau)
+
+ def test_strenum_from_scratch(self):
+ class phy(str, Enum):
+ pi = 'Pi'
+ tau = 'Tau'
+ self.assertTrue(phy.pi < phy.tau)
+
+ def test_strenum_inherited(self):
+ class StrEnum(str, Enum):
+ pass
+ class phy(StrEnum):
+ pi = 'Pi'
+ tau = 'Tau'
+ self.assertTrue(phy.pi < phy.tau)
+
+ def test_intenum(self):
+ class WeekDay(IntEnum):
+ SUNDAY = 1
+ MONDAY = 2
+ TUESDAY = 3
+ WEDNESDAY = 4
+ THURSDAY = 5
+ FRIDAY = 6
+ SATURDAY = 7
+
+ self.assertEqual(['a', 'b', 'c'][WeekDay.MONDAY], 'c')
+ self.assertEqual([i for i in range(WeekDay.TUESDAY)], [0, 1, 2])
+
+ lst = list(WeekDay)
+ self.assertEqual(len(lst), len(WeekDay))
+ self.assertEqual(len(WeekDay), 7)
+ target = 'SUNDAY MONDAY TUESDAY WEDNESDAY THURSDAY FRIDAY SATURDAY'
+ target = target.split()
+ for i, weekday in enumerate(target):
+ i += 1
+ e = WeekDay(i)
+ self.assertEqual(e, i)
+ self.assertEqual(int(e), i)
+ self.assertEqual(e.name, weekday)
+ self.assertTrue(e in WeekDay)
+ self.assertEqual(lst.index(e)+1, i)
+ self.assertTrue(0 < e < 8)
+ self.assertTrue(type(e) is WeekDay)
+ self.assertTrue(isinstance(e, int))
+ self.assertTrue(isinstance(e, Enum))
+
+ def test_intenum_duplicates(self):
+ class WeekDay(IntEnum):
+ __order__ = 'SUNDAY MONDAY TUESDAY WEDNESDAY THURSDAY FRIDAY SATURDAY'
+ SUNDAY = 1
+ MONDAY = 2
+ TUESDAY = TEUSDAY = 3
+ WEDNESDAY = 4
+ THURSDAY = 5
+ FRIDAY = 6
+ SATURDAY = 7
+ self.assertTrue(WeekDay.TEUSDAY is WeekDay.TUESDAY)
+ self.assertEqual(WeekDay(3).name, 'TUESDAY')
+ self.assertEqual([k for k,v in WeekDay.__members__.items()
+ if v.name != k], ['TEUSDAY', ])
+
+ def test_pickle_enum(self):
+ if isinstance(Stooges, Exception):
+ raise Stooges
+ test_pickle_dump_load(self.assertTrue, Stooges.CURLY)
+ test_pickle_dump_load(self.assertTrue, Stooges)
+
+ def test_pickle_int(self):
+ if isinstance(IntStooges, Exception):
+ raise IntStooges
+ test_pickle_dump_load(self.assertTrue, IntStooges.CURLY)
+ test_pickle_dump_load(self.assertTrue, IntStooges)
+
+ def test_pickle_float(self):
+ if isinstance(FloatStooges, Exception):
+ raise FloatStooges
+ test_pickle_dump_load(self.assertTrue, FloatStooges.CURLY)
+ test_pickle_dump_load(self.assertTrue, FloatStooges)
+
+ def test_pickle_enum_function(self):
+ if isinstance(Answer, Exception):
+ raise Answer
+ test_pickle_dump_load(self.assertTrue, Answer.him)
+ test_pickle_dump_load(self.assertTrue, Answer)
+
+ def test_pickle_enum_function_with_module(self):
+ if isinstance(Question, Exception):
+ raise Question
+ test_pickle_dump_load(self.assertTrue, Question.who)
+ test_pickle_dump_load(self.assertTrue, Question)
+
+ if pyver >= 3.4:
+ def test_class_nested_enum_and_pickle_protocol_four(self):
+ # would normally just have this directly in the class namespace
+ class NestedEnum(Enum):
+ twigs = 'common'
+ shiny = 'rare'
+
+ self.__class__.NestedEnum = NestedEnum
+ self.NestedEnum.__qualname__ = '%s.NestedEnum' % self.__class__.__name__
+ test_pickle_exception(
+ self.assertRaises, PicklingError, self.NestedEnum.twigs,
+ protocol=(0, 3))
+ test_pickle_dump_load(self.assertTrue, self.NestedEnum.twigs,
+ protocol=(4, HIGHEST_PROTOCOL))
+
+ def test_exploding_pickle(self):
+ BadPickle = Enum('BadPickle', 'dill sweet bread-n-butter')
+ enum._make_class_unpicklable(BadPickle)
+ globals()['BadPickle'] = BadPickle
+ test_pickle_exception(self.assertRaises, TypeError, BadPickle.dill)
+ test_pickle_exception(self.assertRaises, PicklingError, BadPickle)
+
+ def test_string_enum(self):
+ class SkillLevel(str, Enum):
+ master = 'what is the sound of one hand clapping?'
+ journeyman = 'why did the chicken cross the road?'
+ apprentice = 'knock, knock!'
+ self.assertEqual(SkillLevel.apprentice, 'knock, knock!')
+
+ def test_getattr_getitem(self):
+ class Period(Enum):
+ morning = 1
+ noon = 2
+ evening = 3
+ night = 4
+ self.assertTrue(Period(2) is Period.noon)
+ self.assertTrue(getattr(Period, 'night') is Period.night)
+ self.assertTrue(Period['morning'] is Period.morning)
+
+ def test_getattr_dunder(self):
+ Season = self.Season
+ self.assertTrue(getattr(Season, '__hash__'))
+
+ def test_iteration_order(self):
+ class Season(Enum):
+ __order__ = 'SUMMER WINTER AUTUMN SPRING'
+ SUMMER = 2
+ WINTER = 4
+ AUTUMN = 3
+ SPRING = 1
+ self.assertEqual(
+ list(Season),
+ [Season.SUMMER, Season.WINTER, Season.AUTUMN, Season.SPRING],
+ )
+
+ def test_iteration_order_with_unorderable_values(self):
+ class Complex(Enum):
+ a = complex(7, 9)
+ b = complex(3.14, 2)
+ c = complex(1, -1)
+ d = complex(-77, 32)
+ self.assertEqual(
+ list(Complex),
+ [Complex.a, Complex.b, Complex.c, Complex.d],
+ )
+
+ def test_programatic_function_string(self):
+ SummerMonth = Enum('SummerMonth', 'june july august')
+ lst = list(SummerMonth)
+ self.assertEqual(len(lst), len(SummerMonth))
+ self.assertEqual(len(SummerMonth), 3, SummerMonth)
+ self.assertEqual(
+ [SummerMonth.june, SummerMonth.july, SummerMonth.august],
+ lst,
+ )
+ for i, month in enumerate('june july august'.split()):
+ i += 1
+ e = SummerMonth(i)
+ self.assertEqual(int(e.value), i)
+ self.assertNotEqual(e, i)
+ self.assertEqual(e.name, month)
+ self.assertTrue(e in SummerMonth)
+ self.assertTrue(type(e) is SummerMonth)
+
+ def test_programatic_function_string_list(self):
+ SummerMonth = Enum('SummerMonth', ['june', 'july', 'august'])
+ lst = list(SummerMonth)
+ self.assertEqual(len(lst), len(SummerMonth))
+ self.assertEqual(len(SummerMonth), 3, SummerMonth)
+ self.assertEqual(
+ [SummerMonth.june, SummerMonth.july, SummerMonth.august],
+ lst,
+ )
+ for i, month in enumerate('june july august'.split()):
+ i += 1
+ e = SummerMonth(i)
+ self.assertEqual(int(e.value), i)
+ self.assertNotEqual(e, i)
+ self.assertEqual(e.name, month)
+ self.assertTrue(e in SummerMonth)
+ self.assertTrue(type(e) is SummerMonth)
+
+ def test_programatic_function_iterable(self):
+ SummerMonth = Enum(
+ 'SummerMonth',
+ (('june', 1), ('july', 2), ('august', 3))
+ )
+ lst = list(SummerMonth)
+ self.assertEqual(len(lst), len(SummerMonth))
+ self.assertEqual(len(SummerMonth), 3, SummerMonth)
+ self.assertEqual(
+ [SummerMonth.june, SummerMonth.july, SummerMonth.august],
+ lst,
+ )
+ for i, month in enumerate('june july august'.split()):
+ i += 1
+ e = SummerMonth(i)
+ self.assertEqual(int(e.value), i)
+ self.assertNotEqual(e, i)
+ self.assertEqual(e.name, month)
+ self.assertTrue(e in SummerMonth)
+ self.assertTrue(type(e) is SummerMonth)
+
+ def test_programatic_function_from_dict(self):
+ SummerMonth = Enum(
+ 'SummerMonth',
+ dict((('june', 1), ('july', 2), ('august', 3)))
+ )
+ lst = list(SummerMonth)
+ self.assertEqual(len(lst), len(SummerMonth))
+ self.assertEqual(len(SummerMonth), 3, SummerMonth)
+ if pyver < 3.0:
+ self.assertEqual(
+ [SummerMonth.june, SummerMonth.july, SummerMonth.august],
+ lst,
+ )
+ for i, month in enumerate('june july august'.split()):
+ i += 1
+ e = SummerMonth(i)
+ self.assertEqual(int(e.value), i)
+ self.assertNotEqual(e, i)
+ self.assertEqual(e.name, month)
+ self.assertTrue(e in SummerMonth)
+ self.assertTrue(type(e) is SummerMonth)
+
+ def test_programatic_function_type(self):
+ SummerMonth = Enum('SummerMonth', 'june july august', type=int)
+ lst = list(SummerMonth)
+ self.assertEqual(len(lst), len(SummerMonth))
+ self.assertEqual(len(SummerMonth), 3, SummerMonth)
+ self.assertEqual(
+ [SummerMonth.june, SummerMonth.july, SummerMonth.august],
+ lst,
+ )
+ for i, month in enumerate('june july august'.split()):
+ i += 1
+ e = SummerMonth(i)
+ self.assertEqual(e, i)
+ self.assertEqual(e.name, month)
+ self.assertTrue(e in SummerMonth)
+ self.assertTrue(type(e) is SummerMonth)
+
+ def test_programatic_function_type_from_subclass(self):
+ SummerMonth = IntEnum('SummerMonth', 'june july august')
+ lst = list(SummerMonth)
+ self.assertEqual(len(lst), len(SummerMonth))
+ self.assertEqual(len(SummerMonth), 3, SummerMonth)
+ self.assertEqual(
+ [SummerMonth.june, SummerMonth.july, SummerMonth.august],
+ lst,
+ )
+ for i, month in enumerate('june july august'.split()):
+ i += 1
+ e = SummerMonth(i)
+ self.assertEqual(e, i)
+ self.assertEqual(e.name, month)
+ self.assertTrue(e in SummerMonth)
+ self.assertTrue(type(e) is SummerMonth)
+
+ def test_programatic_function_unicode(self):
+ SummerMonth = Enum('SummerMonth', unicode('june july august'))
+ lst = list(SummerMonth)
+ self.assertEqual(len(lst), len(SummerMonth))
+ self.assertEqual(len(SummerMonth), 3, SummerMonth)
+ self.assertEqual(
+ [SummerMonth.june, SummerMonth.july, SummerMonth.august],
+ lst,
+ )
+ for i, month in enumerate(unicode('june july august').split()):
+ i += 1
+ e = SummerMonth(i)
+ self.assertEqual(int(e.value), i)
+ self.assertNotEqual(e, i)
+ self.assertEqual(e.name, month)
+ self.assertTrue(e in SummerMonth)
+ self.assertTrue(type(e) is SummerMonth)
+
+ def test_programatic_function_unicode_list(self):
+ SummerMonth = Enum('SummerMonth', [unicode('june'), unicode('july'), unicode('august')])
+ lst = list(SummerMonth)
+ self.assertEqual(len(lst), len(SummerMonth))
+ self.assertEqual(len(SummerMonth), 3, SummerMonth)
+ self.assertEqual(
+ [SummerMonth.june, SummerMonth.july, SummerMonth.august],
+ lst,
+ )
+ for i, month in enumerate(unicode('june july august').split()):
+ i += 1
+ e = SummerMonth(i)
+ self.assertEqual(int(e.value), i)
+ self.assertNotEqual(e, i)
+ self.assertEqual(e.name, month)
+ self.assertTrue(e in SummerMonth)
+ self.assertTrue(type(e) is SummerMonth)
+
+ def test_programatic_function_unicode_iterable(self):
+ SummerMonth = Enum(
+ 'SummerMonth',
+ ((unicode('june'), 1), (unicode('july'), 2), (unicode('august'), 3))
+ )
+ lst = list(SummerMonth)
+ self.assertEqual(len(lst), len(SummerMonth))
+ self.assertEqual(len(SummerMonth), 3, SummerMonth)
+ self.assertEqual(
+ [SummerMonth.june, SummerMonth.july, SummerMonth.august],
+ lst,
+ )
+ for i, month in enumerate(unicode('june july august').split()):
+ i += 1
+ e = SummerMonth(i)
+ self.assertEqual(int(e.value), i)
+ self.assertNotEqual(e, i)
+ self.assertEqual(e.name, month)
+ self.assertTrue(e in SummerMonth)
+ self.assertTrue(type(e) is SummerMonth)
+
+ def test_programatic_function_from_unicode_dict(self):
+ SummerMonth = Enum(
+ 'SummerMonth',
+ dict(((unicode('june'), 1), (unicode('july'), 2), (unicode('august'), 3)))
+ )
+ lst = list(SummerMonth)
+ self.assertEqual(len(lst), len(SummerMonth))
+ self.assertEqual(len(SummerMonth), 3, SummerMonth)
+ if pyver < 3.0:
+ self.assertEqual(
+ [SummerMonth.june, SummerMonth.july, SummerMonth.august],
+ lst,
+ )
+ for i, month in enumerate(unicode('june july august').split()):
+ i += 1
+ e = SummerMonth(i)
+ self.assertEqual(int(e.value), i)
+ self.assertNotEqual(e, i)
+ self.assertEqual(e.name, month)
+ self.assertTrue(e in SummerMonth)
+ self.assertTrue(type(e) is SummerMonth)
+
+ def test_programatic_function_unicode_type(self):
+ SummerMonth = Enum('SummerMonth', unicode('june july august'), type=int)
+ lst = list(SummerMonth)
+ self.assertEqual(len(lst), len(SummerMonth))
+ self.assertEqual(len(SummerMonth), 3, SummerMonth)
+ self.assertEqual(
+ [SummerMonth.june, SummerMonth.july, SummerMonth.august],
+ lst,
+ )
+ for i, month in enumerate(unicode('june july august').split()):
+ i += 1
+ e = SummerMonth(i)
+ self.assertEqual(e, i)
+ self.assertEqual(e.name, month)
+ self.assertTrue(e in SummerMonth)
+ self.assertTrue(type(e) is SummerMonth)
+
+ def test_programatic_function_unicode_type_from_subclass(self):
+ SummerMonth = IntEnum('SummerMonth', unicode('june july august'))
+ lst = list(SummerMonth)
+ self.assertEqual(len(lst), len(SummerMonth))
+ self.assertEqual(len(SummerMonth), 3, SummerMonth)
+ self.assertEqual(
+ [SummerMonth.june, SummerMonth.july, SummerMonth.august],
+ lst,
+ )
+ for i, month in enumerate(unicode('june july august').split()):
+ i += 1
+ e = SummerMonth(i)
+ self.assertEqual(e, i)
+ self.assertEqual(e.name, month)
+ self.assertTrue(e in SummerMonth)
+ self.assertTrue(type(e) is SummerMonth)
+
+ def test_programmatic_function_unicode_class(self):
+ if pyver < 3.0:
+ class_names = unicode('SummerMonth'), 'S\xfcmm\xe9rM\xf6nth'.decode('latin1')
+ else:
+ class_names = 'SummerMonth', 'S\xfcmm\xe9rM\xf6nth'
+ for i, class_name in enumerate(class_names):
+ if pyver < 3.0 and i == 1:
+ self.assertRaises(TypeError, Enum, class_name, unicode('june july august'))
+ else:
+ SummerMonth = Enum(class_name, unicode('june july august'))
+ lst = list(SummerMonth)
+ self.assertEqual(len(lst), len(SummerMonth))
+ self.assertEqual(len(SummerMonth), 3, SummerMonth)
+ self.assertEqual(
+ [SummerMonth.june, SummerMonth.july, SummerMonth.august],
+ lst,
+ )
+ for i, month in enumerate(unicode('june july august').split()):
+ i += 1
+ e = SummerMonth(i)
+ self.assertEqual(e.value, i)
+ self.assertEqual(e.name, month)
+ self.assertTrue(e in SummerMonth)
+ self.assertTrue(type(e) is SummerMonth)
+
+ def test_subclassing(self):
+ if isinstance(Name, Exception):
+ raise Name
+ self.assertEqual(Name.BDFL, 'Guido van Rossum')
+ self.assertTrue(Name.BDFL, Name('Guido van Rossum'))
+ self.assertTrue(Name.BDFL is getattr(Name, 'BDFL'))
+ test_pickle_dump_load(self.assertTrue, Name.BDFL)
+
+ def test_extending(self):
+ def bad_extension():
+ class Color(Enum):
+ red = 1
+ green = 2
+ blue = 3
+ class MoreColor(Color):
+ cyan = 4
+ magenta = 5
+ yellow = 6
+ self.assertRaises(TypeError, bad_extension)
+
+ def test_exclude_methods(self):
+ class whatever(Enum):
+ this = 'that'
+ these = 'those'
+ def really(self):
+ return 'no, not %s' % self.value
+ self.assertFalse(type(whatever.really) is whatever)
+ self.assertEqual(whatever.this.really(), 'no, not that')
+
+ def test_wrong_inheritance_order(self):
+ def wrong_inherit():
+ class Wrong(Enum, str):
+ NotHere = 'error before this point'
+ self.assertRaises(TypeError, wrong_inherit)
+
+ def test_intenum_transitivity(self):
+ class number(IntEnum):
+ one = 1
+ two = 2
+ three = 3
+ class numero(IntEnum):
+ uno = 1
+ dos = 2
+ tres = 3
+ self.assertEqual(number.one, numero.uno)
+ self.assertEqual(number.two, numero.dos)
+ self.assertEqual(number.three, numero.tres)
+
+ def test_introspection(self):
+ class Number(IntEnum):
+ one = 100
+ two = 200
+ self.assertTrue(Number.one._member_type_ is int)
+ self.assertTrue(Number._member_type_ is int)
+ class String(str, Enum):
+ yarn = 'soft'
+ rope = 'rough'
+ wire = 'hard'
+ self.assertTrue(String.yarn._member_type_ is str)
+ self.assertTrue(String._member_type_ is str)
+ class Plain(Enum):
+ vanilla = 'white'
+ one = 1
+ self.assertTrue(Plain.vanilla._member_type_ is object)
+ self.assertTrue(Plain._member_type_ is object)
+
+ def test_wrong_enum_in_call(self):
+ class Monochrome(Enum):
+ black = 0
+ white = 1
+ class Gender(Enum):
+ male = 0
+ female = 1
+ self.assertRaises(ValueError, Monochrome, Gender.male)
+
+ def test_wrong_enum_in_mixed_call(self):
+ class Monochrome(IntEnum):
+ black = 0
+ white = 1
+ class Gender(Enum):
+ male = 0
+ female = 1
+ self.assertRaises(ValueError, Monochrome, Gender.male)
+
+ def test_mixed_enum_in_call_1(self):
+ class Monochrome(IntEnum):
+ black = 0
+ white = 1
+ class Gender(IntEnum):
+ male = 0
+ female = 1
+ self.assertTrue(Monochrome(Gender.female) is Monochrome.white)
+
+ def test_mixed_enum_in_call_2(self):
+ class Monochrome(Enum):
+ black = 0
+ white = 1
+ class Gender(IntEnum):
+ male = 0
+ female = 1
+ self.assertTrue(Monochrome(Gender.male) is Monochrome.black)
+
+ def test_flufl_enum(self):
+ class Fluflnum(Enum):
+ def __int__(self):
+ return int(self.value)
+ class MailManOptions(Fluflnum):
+ option1 = 1
+ option2 = 2
+ option3 = 3
+ self.assertEqual(int(MailManOptions.option1), 1)
+
+ def test_no_such_enum_member(self):
+ class Color(Enum):
+ red = 1
+ green = 2
+ blue = 3
+ self.assertRaises(ValueError, Color, 4)
+ self.assertRaises(KeyError, Color.__getitem__, 'chartreuse')
+
+ def test_new_repr(self):
+ class Color(Enum):
+ red = 1
+ green = 2
+ blue = 3
+ def __repr__(self):
+ return "don't you just love shades of %s?" % self.name
+ self.assertEqual(
+ repr(Color.blue),
+ "don't you just love shades of blue?",
+ )
+
+ def test_inherited_repr(self):
+ class MyEnum(Enum):
+ def __repr__(self):
+ return "My name is %s." % self.name
+ class MyIntEnum(int, MyEnum):
+ this = 1
+ that = 2
+ theother = 3
+ self.assertEqual(repr(MyIntEnum.that), "My name is that.")
+
+ def test_multiple_mixin_mro(self):
+ class auto_enum(EnumMeta):
+ def __new__(metacls, cls, bases, classdict):
+ original_dict = classdict
+ classdict = enum._EnumDict()
+ for k, v in original_dict.items():
+ classdict[k] = v
+ temp = type(classdict)()
+ names = set(classdict._member_names)
+ i = 0
+ for k in classdict._member_names:
+ v = classdict[k]
+ if v == ():
+ v = i
+ else:
+ i = v
+ i += 1
+ temp[k] = v
+ for k, v in classdict.items():
+ if k not in names:
+ temp[k] = v
+ return super(auto_enum, metacls).__new__(
+ metacls, cls, bases, temp)
+
+ AutoNumberedEnum = auto_enum('AutoNumberedEnum', (Enum,), {})
+
+ AutoIntEnum = auto_enum('AutoIntEnum', (IntEnum,), {})
+
+ class TestAutoNumber(AutoNumberedEnum):
+ a = ()
+ b = 3
+ c = ()
+
+ class TestAutoInt(AutoIntEnum):
+ a = ()
+ b = 3
+ c = ()
+
+ def test_subclasses_with_getnewargs(self):
+ class NamedInt(int):
+ __qualname__ = 'NamedInt' # needed for pickle protocol 4
+ def __new__(cls, *args):
+ _args = args
+ if len(args) < 1:
+ raise TypeError("name and value must be specified")
+ name, args = args[0], args[1:]
+ self = int.__new__(cls, *args)
+ self._intname = name
+ self._args = _args
+ return self
+ def __getnewargs__(self):
+ return self._args
+ @property
+ def __name__(self):
+ return self._intname
+ def __repr__(self):
+ # repr() is updated to include the name and type info
+ return "%s(%r, %s)" % (type(self).__name__,
+ self.__name__,
+ int.__repr__(self))
+ def __str__(self):
+ # str() is unchanged, even if it relies on the repr() fallback
+ base = int
+ base_str = base.__str__
+ if base_str.__objclass__ is object:
+ return base.__repr__(self)
+ return base_str(self)
+ # for simplicity, we only define one operator that
+ # propagates expressions
+ def __add__(self, other):
+ temp = int(self) + int( other)
+ if isinstance(self, NamedInt) and isinstance(other, NamedInt):
+ return NamedInt(
+ '(%s + %s)' % (self.__name__, other.__name__),
+ temp )
+ else:
+ return temp
+
+ class NEI(NamedInt, Enum):
+ __qualname__ = 'NEI' # needed for pickle protocol 4
+ x = ('the-x', 1)
+ y = ('the-y', 2)
+
+ self.assertTrue(NEI.__new__ is Enum.__new__)
+ self.assertEqual(repr(NEI.x + NEI.y), "NamedInt('(the-x + the-y)', 3)")
+ globals()['NamedInt'] = NamedInt
+ globals()['NEI'] = NEI
+ NI5 = NamedInt('test', 5)
+ self.assertEqual(NI5, 5)
+ test_pickle_dump_load(self.assertTrue, NI5, 5)
+ self.assertEqual(NEI.y.value, 2)
+ test_pickle_dump_load(self.assertTrue, NEI.y)
+
+ if pyver >= 3.4:
+ def test_subclasses_with_getnewargs_ex(self):
+ class NamedInt(int):
+ __qualname__ = 'NamedInt' # needed for pickle protocol 4
+ def __new__(cls, *args):
+ _args = args
+ if len(args) < 2:
+ raise TypeError("name and value must be specified")
+ name, args = args[0], args[1:]
+ self = int.__new__(cls, *args)
+ self._intname = name
+ self._args = _args
+ return self
+ def __getnewargs_ex__(self):
+ return self._args, {}
+ @property
+ def __name__(self):
+ return self._intname
+ def __repr__(self):
+ # repr() is updated to include the name and type info
+ return "{}({!r}, {})".format(type(self).__name__,
+ self.__name__,
+ int.__repr__(self))
+ def __str__(self):
+ # str() is unchanged, even if it relies on the repr() fallback
+ base = int
+ base_str = base.__str__
+ if base_str.__objclass__ is object:
+ return base.__repr__(self)
+ return base_str(self)
+ # for simplicity, we only define one operator that
+ # propagates expressions
+ def __add__(self, other):
+ temp = int(self) + int( other)
+ if isinstance(self, NamedInt) and isinstance(other, NamedInt):
+ return NamedInt(
+ '({0} + {1})'.format(self.__name__, other.__name__),
+ temp )
+ else:
+ return temp
+
+ class NEI(NamedInt, Enum):
+ __qualname__ = 'NEI' # needed for pickle protocol 4
+ x = ('the-x', 1)
+ y = ('the-y', 2)
+
+
+ self.assertIs(NEI.__new__, Enum.__new__)
+ self.assertEqual(repr(NEI.x + NEI.y), "NamedInt('(the-x + the-y)', 3)")
+ globals()['NamedInt'] = NamedInt
+ globals()['NEI'] = NEI
+ NI5 = NamedInt('test', 5)
+ self.assertEqual(NI5, 5)
+ test_pickle_dump_load(self.assertEqual, NI5, 5, protocol=(4, HIGHEST_PROTOCOL))
+ self.assertEqual(NEI.y.value, 2)
+ test_pickle_dump_load(self.assertTrue, NEI.y, protocol=(4, HIGHEST_PROTOCOL))
+
+ def test_subclasses_with_reduce(self):
+ class NamedInt(int):
+ __qualname__ = 'NamedInt' # needed for pickle protocol 4
+ def __new__(cls, *args):
+ _args = args
+ if len(args) < 1:
+ raise TypeError("name and value must be specified")
+ name, args = args[0], args[1:]
+ self = int.__new__(cls, *args)
+ self._intname = name
+ self._args = _args
+ return self
+ def __reduce__(self):
+ return self.__class__, self._args
+ @property
+ def __name__(self):
+ return self._intname
+ def __repr__(self):
+ # repr() is updated to include the name and type info
+ return "%s(%r, %s)" % (type(self).__name__,
+ self.__name__,
+ int.__repr__(self))
+ def __str__(self):
+ # str() is unchanged, even if it relies on the repr() fallback
+ base = int
+ base_str = base.__str__
+ if base_str.__objclass__ is object:
+ return base.__repr__(self)
+ return base_str(self)
+ # for simplicity, we only define one operator that
+ # propagates expressions
+ def __add__(self, other):
+ temp = int(self) + int( other)
+ if isinstance(self, NamedInt) and isinstance(other, NamedInt):
+ return NamedInt(
+ '(%s + %s)' % (self.__name__, other.__name__),
+ temp )
+ else:
+ return temp
+
+ class NEI(NamedInt, Enum):
+ __qualname__ = 'NEI' # needed for pickle protocol 4
+ x = ('the-x', 1)
+ y = ('the-y', 2)
+
+
+ self.assertTrue(NEI.__new__ is Enum.__new__)
+ self.assertEqual(repr(NEI.x + NEI.y), "NamedInt('(the-x + the-y)', 3)")
+ globals()['NamedInt'] = NamedInt
+ globals()['NEI'] = NEI
+ NI5 = NamedInt('test', 5)
+ self.assertEqual(NI5, 5)
+ test_pickle_dump_load(self.assertEqual, NI5, 5)
+ self.assertEqual(NEI.y.value, 2)
+ test_pickle_dump_load(self.assertTrue, NEI.y)
+
+ def test_subclasses_with_reduce_ex(self):
+ class NamedInt(int):
+ __qualname__ = 'NamedInt' # needed for pickle protocol 4
+ def __new__(cls, *args):
+ _args = args
+ if len(args) < 1:
+ raise TypeError("name and value must be specified")
+ name, args = args[0], args[1:]
+ self = int.__new__(cls, *args)
+ self._intname = name
+ self._args = _args
+ return self
+ def __reduce_ex__(self, proto):
+ return self.__class__, self._args
+ @property
+ def __name__(self):
+ return self._intname
+ def __repr__(self):
+ # repr() is updated to include the name and type info
+ return "%s(%r, %s)" % (type(self).__name__,
+ self.__name__,
+ int.__repr__(self))
+ def __str__(self):
+ # str() is unchanged, even if it relies on the repr() fallback
+ base = int
+ base_str = base.__str__
+ if base_str.__objclass__ is object:
+ return base.__repr__(self)
+ return base_str(self)
+ # for simplicity, we only define one operator that
+ # propagates expressions
+ def __add__(self, other):
+ temp = int(self) + int( other)
+ if isinstance(self, NamedInt) and isinstance(other, NamedInt):
+ return NamedInt(
+ '(%s + %s)' % (self.__name__, other.__name__),
+ temp )
+ else:
+ return temp
+
+ class NEI(NamedInt, Enum):
+ __qualname__ = 'NEI' # needed for pickle protocol 4
+ x = ('the-x', 1)
+ y = ('the-y', 2)
+
+
+ self.assertTrue(NEI.__new__ is Enum.__new__)
+ self.assertEqual(repr(NEI.x + NEI.y), "NamedInt('(the-x + the-y)', 3)")
+ globals()['NamedInt'] = NamedInt
+ globals()['NEI'] = NEI
+ NI5 = NamedInt('test', 5)
+ self.assertEqual(NI5, 5)
+ test_pickle_dump_load(self.assertEqual, NI5, 5)
+ self.assertEqual(NEI.y.value, 2)
+ test_pickle_dump_load(self.assertTrue, NEI.y)
+
+ def test_subclasses_without_direct_pickle_support(self):
+ class NamedInt(int):
+ __qualname__ = 'NamedInt'
+ def __new__(cls, *args):
+ _args = args
+ name, args = args[0], args[1:]
+ if len(args) == 0:
+ raise TypeError("name and value must be specified")
+ self = int.__new__(cls, *args)
+ self._intname = name
+ self._args = _args
+ return self
+ @property
+ def __name__(self):
+ return self._intname
+ def __repr__(self):
+ # repr() is updated to include the name and type info
+ return "%s(%r, %s)" % (type(self).__name__,
+ self.__name__,
+ int.__repr__(self))
+ def __str__(self):
+ # str() is unchanged, even if it relies on the repr() fallback
+ base = int
+ base_str = base.__str__
+ if base_str.__objclass__ is object:
+ return base.__repr__(self)
+ return base_str(self)
+ # for simplicity, we only define one operator that
+ # propagates expressions
+ def __add__(self, other):
+ temp = int(self) + int( other)
+ if isinstance(self, NamedInt) and isinstance(other, NamedInt):
+ return NamedInt(
+ '(%s + %s)' % (self.__name__, other.__name__),
+ temp )
+ else:
+ return temp
+
+ class NEI(NamedInt, Enum):
+ __qualname__ = 'NEI'
+ x = ('the-x', 1)
+ y = ('the-y', 2)
+
+ self.assertTrue(NEI.__new__ is Enum.__new__)
+ self.assertEqual(repr(NEI.x + NEI.y), "NamedInt('(the-x + the-y)', 3)")
+ globals()['NamedInt'] = NamedInt
+ globals()['NEI'] = NEI
+ NI5 = NamedInt('test', 5)
+ self.assertEqual(NI5, 5)
+ self.assertEqual(NEI.y.value, 2)
+ test_pickle_exception(self.assertRaises, TypeError, NEI.x)
+ test_pickle_exception(self.assertRaises, PicklingError, NEI)
+
+ def test_subclasses_without_direct_pickle_support_using_name(self):
+ class NamedInt(int):
+ __qualname__ = 'NamedInt'
+ def __new__(cls, *args):
+ _args = args
+ name, args = args[0], args[1:]
+ if len(args) == 0:
+ raise TypeError("name and value must be specified")
+ self = int.__new__(cls, *args)
+ self._intname = name
+ self._args = _args
+ return self
+ @property
+ def __name__(self):
+ return self._intname
+ def __repr__(self):
+ # repr() is updated to include the name and type info
+ return "%s(%r, %s)" % (type(self).__name__,
+ self.__name__,
+ int.__repr__(self))
+ def __str__(self):
+ # str() is unchanged, even if it relies on the repr() fallback
+ base = int
+ base_str = base.__str__
+ if base_str.__objclass__ is object:
+ return base.__repr__(self)
+ return base_str(self)
+ # for simplicity, we only define one operator that
+ # propagates expressions
+ def __add__(self, other):
+ temp = int(self) + int( other)
+ if isinstance(self, NamedInt) and isinstance(other, NamedInt):
+ return NamedInt(
+ '(%s + %s)' % (self.__name__, other.__name__),
+ temp )
+ else:
+ return temp
+
+ class NEI(NamedInt, Enum):
+ __qualname__ = 'NEI'
+ x = ('the-x', 1)
+ y = ('the-y', 2)
+ def __reduce_ex__(self, proto):
+ return getattr, (self.__class__, self._name_)
+
+ self.assertTrue(NEI.__new__ is Enum.__new__)
+ self.assertEqual(repr(NEI.x + NEI.y), "NamedInt('(the-x + the-y)', 3)")
+ globals()['NamedInt'] = NamedInt
+ globals()['NEI'] = NEI
+ NI5 = NamedInt('test', 5)
+ self.assertEqual(NI5, 5)
+ self.assertEqual(NEI.y.value, 2)
+ test_pickle_dump_load(self.assertTrue, NEI.y)
+ test_pickle_dump_load(self.assertTrue, NEI)
+
+ def test_tuple_subclass(self):
+ class SomeTuple(tuple, Enum):
+ __qualname__ = 'SomeTuple'
+ first = (1, 'for the money')
+ second = (2, 'for the show')
+ third = (3, 'for the music')
+ self.assertTrue(type(SomeTuple.first) is SomeTuple)
+ self.assertTrue(isinstance(SomeTuple.second, tuple))
+ self.assertEqual(SomeTuple.third, (3, 'for the music'))
+ globals()['SomeTuple'] = SomeTuple
+ test_pickle_dump_load(self.assertTrue, SomeTuple.first)
+
+ def test_duplicate_values_give_unique_enum_items(self):
+ class AutoNumber(Enum):
+ __order__ = 'enum_m enum_d enum_y'
+ enum_m = ()
+ enum_d = ()
+ enum_y = ()
+ def __new__(cls):
+ value = len(cls.__members__) + 1
+ obj = object.__new__(cls)
+ obj._value_ = value
+ return obj
+ def __int__(self):
+ return int(self._value_)
+ self.assertEqual(int(AutoNumber.enum_d), 2)
+ self.assertEqual(AutoNumber.enum_y.value, 3)
+ self.assertTrue(AutoNumber(1) is AutoNumber.enum_m)
+ self.assertEqual(
+ list(AutoNumber),
+ [AutoNumber.enum_m, AutoNumber.enum_d, AutoNumber.enum_y],
+ )
+
+ def test_inherited_new_from_enhanced_enum(self):
+ class AutoNumber2(Enum):
+ def __new__(cls):
+ value = len(cls.__members__) + 1
+ obj = object.__new__(cls)
+ obj._value_ = value
+ return obj
+ def __int__(self):
+ return int(self._value_)
+ class Color(AutoNumber2):
+ __order__ = 'red green blue'
+ red = ()
+ green = ()
+ blue = ()
+ self.assertEqual(len(Color), 3, "wrong number of elements: %d (should be %d)" % (len(Color), 3))
+ self.assertEqual(list(Color), [Color.red, Color.green, Color.blue])
+ if pyver >= 3.0:
+ self.assertEqual(list(map(int, Color)), [1, 2, 3])
+
+ def test_inherited_new_from_mixed_enum(self):
+ class AutoNumber3(IntEnum):
+ def __new__(cls):
+ value = len(cls.__members__) + 1
+ obj = int.__new__(cls, value)
+ obj._value_ = value
+ return obj
+ class Color(AutoNumber3):
+ red = ()
+ green = ()
+ blue = ()
+ self.assertEqual(len(Color), 3, "wrong number of elements: %d (should be %d)" % (len(Color), 3))
+ Color.red
+ Color.green
+ Color.blue
+
+ def test_ordered_mixin(self):
+ class OrderedEnum(Enum):
+ def __ge__(self, other):
+ if self.__class__ is other.__class__:
+ return self._value_ >= other._value_
+ return NotImplemented
+ def __gt__(self, other):
+ if self.__class__ is other.__class__:
+ return self._value_ > other._value_
+ return NotImplemented
+ def __le__(self, other):
+ if self.__class__ is other.__class__:
+ return self._value_ <= other._value_
+ return NotImplemented
+ def __lt__(self, other):
+ if self.__class__ is other.__class__:
+ return self._value_ < other._value_
+ return NotImplemented
+ class Grade(OrderedEnum):
+ __order__ = 'A B C D F'
+ A = 5
+ B = 4
+ C = 3
+ D = 2
+ F = 1
+ self.assertEqual(list(Grade), [Grade.A, Grade.B, Grade.C, Grade.D, Grade.F])
+ self.assertTrue(Grade.A > Grade.B)
+ self.assertTrue(Grade.F <= Grade.C)
+ self.assertTrue(Grade.D < Grade.A)
+ self.assertTrue(Grade.B >= Grade.B)
+
+ def test_extending2(self):
+ def bad_extension():
+ class Shade(Enum):
+ def shade(self):
+ print(self.name)
+ class Color(Shade):
+ red = 1
+ green = 2
+ blue = 3
+ class MoreColor(Color):
+ cyan = 4
+ magenta = 5
+ yellow = 6
+ self.assertRaises(TypeError, bad_extension)
+
+ def test_extending3(self):
+ class Shade(Enum):
+ def shade(self):
+ return self.name
+ class Color(Shade):
+ def hex(self):
+ return '%s hexlified!' % self.value
+ class MoreColor(Color):
+ cyan = 4
+ magenta = 5
+ yellow = 6
+ self.assertEqual(MoreColor.magenta.hex(), '5 hexlified!')
+
+ def test_no_duplicates(self):
+ def bad_duplicates():
+ class UniqueEnum(Enum):
+ def __init__(self, *args):
+ cls = self.__class__
+ if any(self.value == e.value for e in cls):
+ a = self.name
+ e = cls(self.value).name
+ raise ValueError(
+ "aliases not allowed in UniqueEnum: %r --> %r"
+ % (a, e)
+ )
+ class Color(UniqueEnum):
+ red = 1
+ green = 2
+ blue = 3
+ class Color(UniqueEnum):
+ red = 1
+ green = 2
+ blue = 3
+ grene = 2
+ self.assertRaises(ValueError, bad_duplicates)
+
+ def test_reversed(self):
+ self.assertEqual(
+ list(reversed(self.Season)),
+ [self.Season.WINTER, self.Season.AUTUMN, self.Season.SUMMER,
+ self.Season.SPRING]
+ )
+
+ def test_init(self):
+ class Planet(Enum):
+ MERCURY = (3.303e+23, 2.4397e6)
+ VENUS = (4.869e+24, 6.0518e6)
+ EARTH = (5.976e+24, 6.37814e6)
+ MARS = (6.421e+23, 3.3972e6)
+ JUPITER = (1.9e+27, 7.1492e7)
+ SATURN = (5.688e+26, 6.0268e7)
+ URANUS = (8.686e+25, 2.5559e7)
+ NEPTUNE = (1.024e+26, 2.4746e7)
+ def __init__(self, mass, radius):
+ self.mass = mass # in kilograms
+ self.radius = radius # in meters
+ @property
+ def surface_gravity(self):
+ # universal gravitational constant (m3 kg-1 s-2)
+ G = 6.67300E-11
+ return G * self.mass / (self.radius * self.radius)
+ self.assertEqual(round(Planet.EARTH.surface_gravity, 2), 9.80)
+ self.assertEqual(Planet.EARTH.value, (5.976e+24, 6.37814e6))
+
+ def test_nonhash_value(self):
+ class AutoNumberInAList(Enum):
+ def __new__(cls):
+ value = [len(cls.__members__) + 1]
+ obj = object.__new__(cls)
+ obj._value_ = value
+ return obj
+ class ColorInAList(AutoNumberInAList):
+ __order__ = 'red green blue'
+ red = ()
+ green = ()
+ blue = ()
+ self.assertEqual(list(ColorInAList), [ColorInAList.red, ColorInAList.green, ColorInAList.blue])
+ self.assertEqual(ColorInAList.red.value, [1])
+ self.assertEqual(ColorInAList([1]), ColorInAList.red)
+
+ def test_conflicting_types_resolved_in_new(self):
+ class LabelledIntEnum(int, Enum):
+ def __new__(cls, *args):
+ value, label = args
+ obj = int.__new__(cls, value)
+ obj.label = label
+ obj._value_ = value
+ return obj
+
+ class LabelledList(LabelledIntEnum):
+ unprocessed = (1, "Unprocessed")
+ payment_complete = (2, "Payment Complete")
+
+ self.assertEqual(list(LabelledList), [LabelledList.unprocessed, LabelledList.payment_complete])
+ self.assertEqual(LabelledList.unprocessed, 1)
+ self.assertEqual(LabelledList(1), LabelledList.unprocessed)
+
+class TestUnique(unittest.TestCase):
+ """2.4 doesn't allow class decorators, use function syntax."""
+
+ def test_unique_clean(self):
+ class Clean(Enum):
+ one = 1
+ two = 'dos'
+ tres = 4.0
+ unique(Clean)
+ class Cleaner(IntEnum):
+ single = 1
+ double = 2
+ triple = 3
+ unique(Cleaner)
+
+ def test_unique_dirty(self):
+ try:
+ class Dirty(Enum):
+ __order__ = 'one two tres'
+ one = 1
+ two = 'dos'
+ tres = 1
+ unique(Dirty)
+ except ValueError:
+ exc = sys.exc_info()[1]
+ message = exc.args[0]
+ self.assertTrue('tres -> one' in message)
+
+ try:
+ class Dirtier(IntEnum):
+ __order__ = 'single double triple turkey'
+ single = 1
+ double = 1
+ triple = 3
+ turkey = 3
+ unique(Dirtier)
+ except ValueError:
+ exc = sys.exc_info()[1]
+ message = exc.args[0]
+ self.assertTrue('double -> single' in message)
+ self.assertTrue('turkey -> triple' in message)
+
+
+class TestMe(unittest.TestCase):
+
+ pass
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/scripts/automation/trex_control_plane/python_lib/enum34-1.0.4/setup.py b/scripts/automation/trex_control_plane/python_lib/enum34-1.0.4/setup.py
new file mode 100755
index 00000000..ecb4944f
--- /dev/null
+++ b/scripts/automation/trex_control_plane/python_lib/enum34-1.0.4/setup.py
@@ -0,0 +1,44 @@
+import os
+import sys
+from distutils.core import setup
+
+if sys.version_info[:2] < (2, 7):
+ required = ['ordereddict']
+else:
+ required = []
+
+long_desc = open('enum/doc/enum.rst').read()
+
+setup( name='enum34',
+ version='1.0.4',
+ url='https://pypi.python.org/pypi/enum34',
+ packages=['enum'],
+ package_data={
+ 'enum' : [
+ 'LICENSE',
+ 'README',
+ 'doc/enum.rst',
+ 'doc/enum.pdf',
+ 'test_enum.py',
+ ]
+ },
+ license='BSD License',
+ description='Python 3.4 Enum backported to 3.3, 3.2, 3.1, 2.7, 2.6, 2.5, and 2.4',
+ long_description=long_desc,
+ provides=['enum'],
+ install_requires=required,
+ author='Ethan Furman',
+ author_email='ethan@stoneleaf.us',
+ classifiers=[
+ 'Development Status :: 5 - Production/Stable',
+ 'Intended Audience :: Developers',
+ 'License :: OSI Approved :: BSD License',
+ 'Programming Language :: Python',
+ 'Topic :: Software Development',
+ 'Programming Language :: Python :: 2.4',
+ 'Programming Language :: Python :: 2.5',
+ 'Programming Language :: Python :: 2.6',
+ 'Programming Language :: Python :: 2.7',
+ 'Programming Language :: Python :: 3',
+ ],
+ )
diff --git a/scripts/automation/trex_control_plane/python_lib/jsonrpclib-0.1.3/LICENSE.txt b/scripts/automation/trex_control_plane/python_lib/jsonrpclib-0.1.3/LICENSE.txt
new file mode 100755
index 00000000..51fca54c
--- /dev/null
+++ b/scripts/automation/trex_control_plane/python_lib/jsonrpclib-0.1.3/LICENSE.txt
@@ -0,0 +1,11 @@
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
diff --git a/scripts/automation/trex_control_plane/python_lib/jsonrpclib-0.1.3/PKG-INFO b/scripts/automation/trex_control_plane/python_lib/jsonrpclib-0.1.3/PKG-INFO
new file mode 100755
index 00000000..7082747b
--- /dev/null
+++ b/scripts/automation/trex_control_plane/python_lib/jsonrpclib-0.1.3/PKG-INFO
@@ -0,0 +1,10 @@
+Metadata-Version: 1.0
+Name: jsonrpclib
+Version: 0.1.3
+Summary: This project is an implementation of the JSON-RPC v2.0 specification (backwards-compatible) as a client library.
+Home-page: http://github.com/joshmarshall/jsonrpclib/
+Author: Josh Marshall
+Author-email: catchjosh@gmail.com
+License: http://www.apache.org/licenses/LICENSE-2.0
+Description: UNKNOWN
+Platform: UNKNOWN
diff --git a/scripts/automation/trex_control_plane/python_lib/jsonrpclib-0.1.3/README.txt b/scripts/automation/trex_control_plane/python_lib/jsonrpclib-0.1.3/README.txt
new file mode 100755
index 00000000..9d431a48
--- /dev/null
+++ b/scripts/automation/trex_control_plane/python_lib/jsonrpclib-0.1.3/README.txt
@@ -0,0 +1,203 @@
+JSONRPClib
+==========
+This library is an implementation of the JSON-RPC specification.
+It supports both the original 1.0 specification, as well as the
+new (proposed) 2.0 spec, which includes batch submission, keyword
+arguments, etc.
+
+It is licensed under the Apache License, Version 2.0
+(http://www.apache.org/licenses/LICENSE-2.0.html).
+
+Communication
+-------------
+Feel free to send any questions, comments, or patches to our Google Group
+mailing list (you'll need to join to send a message):
+http://groups.google.com/group/jsonrpclib
+
+Summary
+-------
+This library implements the JSON-RPC 2.0 proposed specification in pure Python.
+It is designed to be as compatible with the syntax of xmlrpclib as possible
+(it extends where possible), so that projects using xmlrpclib could easily be
+modified to use JSON and experiment with the differences.
+
+It is backwards-compatible with the 1.0 specification, and supports all of the
+new proposed features of 2.0, including:
+
+* Batch submission (via MultiCall)
+* Keyword arguments
+* Notifications (both in a batch and 'normal')
+* Class translation using the 'jsonclass' key.
+
+I've added a "SimpleJSONRPCServer", which is intended to emulate the
+"SimpleXMLRPCServer" from the default Python distribution.
+
+Requirements
+------------
+It supports cjson and simplejson, and looks for the parsers in that order
+(searching first for cjson, then for the "built-in" simplejson as json in 2.6+,
+and then the simplejson external library). One of these must be installed to
+use this library, although if you have a standard distribution of 2.6+, you
+should already have one. Keep in mind that cjson is supposed to be the
+quickest, I believe, so if you are going for full-on optimization you may
+want to pick it up.
+
+Client Usage
+------------
+
+This is (obviously) taken from a console session.
+
+ >>> import jsonrpclib
+ >>> server = jsonrpclib.Server('http://localhost:8080')
+ >>> server.add(5,6)
+ 11
+ >>> print jsonrpclib.history.request
+ {"jsonrpc": "2.0", "params": [5, 6], "id": "gb3c9g37", "method": "add"}
+ >>> print jsonrpclib.history.response
+ {'jsonrpc': '2.0', 'result': 11, 'id': 'gb3c9g37'}
+ >>> server.add(x=5, y=10)
+ 15
+ >>> server._notify.add(5,6)
+ # No result returned...
+ >>> batch = jsonrpclib.MultiCall(server)
+ >>> batch.add(5, 6)
+ >>> batch.ping({'key':'value'})
+ >>> batch._notify.add(4, 30)
+ >>> results = batch()
+ >>> for result in results:
+ >>> ... print result
+ 11
+ {'key': 'value'}
+ # Note that there are only two responses -- this is according to spec.
+
+If you need 1.0 functionality, there are a bunch of places you can pass that
+in, although the best is just to change the value on
+jsonrpclib.config.version:
+
+ >>> import jsonrpclib
+ >>> jsonrpclib.config.version
+ 2.0
+ >>> jsonrpclib.config.version = 1.0
+ >>> server = jsonrpclib.Server('http://localhost:8080')
+ >>> server.add(7, 10)
+ 17
+ >>> print jsonrpclib..history.request
+ {"params": [7, 10], "id": "thes7tl2", "method": "add"}
+ >>> print jsonrpclib.history.response
+ {'id': 'thes7tl2', 'result': 17, 'error': None}
+ >>>
+
+The equivalent loads and dumps functions also exist, although with minor
+modifications. The dumps arguments are almost identical, but it adds three
+arguments: rpcid for the 'id' key, version to specify the JSON-RPC
+compatibility, and notify if it's a request that you want to be a
+notification.
+
+Additionally, the loads method does not return the params and method like
+xmlrpclib, but instead a.) parses for errors, raising ProtocolErrors, and
+b.) returns the entire structure of the request / response for manual parsing.
+
+SimpleJSONRPCServer
+-------------------
+This is identical in usage (or should be) to the SimpleXMLRPCServer in the default Python install. Some of the differences in features are that it obviously supports notification, batch calls, class translation (if left on), etc. Note: The import line is slightly different from the regular SimpleXMLRPCServer, since the SimpleJSONRPCServer is distributed within the jsonrpclib library.
+
+ from jsonrpclib.SimpleJSONRPCServer import SimpleJSONRPCServer
+
+ server = SimpleJSONRPCServer(('localhost', 8080))
+ server.register_function(pow)
+ server.register_function(lambda x,y: x+y, 'add')
+ server.register_function(lambda x: x, 'ping')
+ server.serve_forever()
+
+Class Translation
+-----------------
+I've recently added "automatic" class translation support, although it is
+turned off by default. This can be devastatingly slow if improperly used, so
+the following is just a short list of things to keep in mind when using it.
+
+* Keep It (the object) Simple Stupid. (for exceptions, keep reading.)
+* Do not require init params (for exceptions, keep reading)
+* Getter properties without setters could be dangerous (read: not tested)
+
+If any of the above are issues, use the _serialize method. (see usage below)
+The server and client must BOTH have use_jsonclass configuration item on and
+they must both have access to the same libraries used by the objects for
+this to work.
+
+If you have excessively nested arguments, it would be better to turn off the
+translation and manually invoke it on specific objects using
+jsonrpclib.jsonclass.dump / jsonrpclib.jsonclass.load (since the default
+behavior recursively goes through attributes and lists / dicts / tuples).
+
+[test_obj.py]
+
+ # This object is /very/ simple, and the system will look through the
+ # attributes and serialize what it can.
+ class TestObj(object):
+ foo = 'bar'
+
+ # This object requires __init__ params, so it uses the _serialize method
+ # and returns a tuple of init params and attribute values (the init params
+ # can be a dict or a list, but the attribute values must be a dict.)
+ class TestSerial(object):
+ foo = 'bar'
+ def __init__(self, *args):
+ self.args = args
+ def _serialize(self):
+ return (self.args, {'foo':self.foo,})
+
+[usage]
+
+ import jsonrpclib
+ import test_obj
+
+ jsonrpclib.config.use_jsonclass = True
+
+ testobj1 = test_obj.TestObj()
+ testobj2 = test_obj.TestSerial()
+ server = jsonrpclib.Server('http://localhost:8080')
+ # The 'ping' just returns whatever is sent
+ ping1 = server.ping(testobj1)
+ ping2 = server.ping(testobj2)
+ print jsonrpclib.history.request
+ # {"jsonrpc": "2.0", "params": [{"__jsonclass__": ["test_obj.TestSerial", ["foo"]]}], "id": "a0l976iv", "method": "ping"}
+ print jsonrpclib.history.result
+ # {'jsonrpc': '2.0', 'result': <test_obj.TestSerial object at 0x2744590>, 'id': 'a0l976iv'}
+
+To turn on this behaviour, just set jsonrpclib.config.use_jsonclass to True.
+If you want to use a different method for serialization, just set
+jsonrpclib.config.serialize_method to the method name. Finally, if you are
+using classes that you have defined in the implementation (as in, not a
+separate library), you'll need to add those (on BOTH the server and the
+client) using the jsonrpclib.config.classes.add() method.
+(Examples forthcoming.)
+
+Feedback on this "feature" is very, VERY much appreciated.
+
+Why JSON-RPC?
+-------------
+In my opinion, there are several reasons to choose JSON over XML for RPC:
+
+* Much simpler to read (I suppose this is opinion, but I know I'm right. :)
+* Size / Bandwidth - Main reason, a JSON object representation is just much smaller.
+* Parsing - JSON should be much quicker to parse than XML.
+* Easy class passing with jsonclass (when enabled)
+
+In the interest of being fair, there are also a few reasons to choose XML
+over JSON:
+
+* Your server doesn't do JSON (rather obvious)
+* Wider XML-RPC support across APIs (can we change this? :))
+* Libraries are more established, i.e. more stable (Let's change this too.)
+
+TESTS
+-----
+I've dropped almost-verbatim tests from the JSON-RPC spec 2.0 page.
+You can run it with:
+
+ python tests.py
+
+TODO
+----
+* Use HTTP error codes on SimpleJSONRPCServer
+* Test, test, test and optimize \ No newline at end of file
diff --git a/scripts/automation/trex_control_plane/python_lib/jsonrpclib-0.1.3/jsonrpclib/SimpleJSONRPCServer.py b/scripts/automation/trex_control_plane/python_lib/jsonrpclib-0.1.3/jsonrpclib/SimpleJSONRPCServer.py
new file mode 100755
index 00000000..d76da73e
--- /dev/null
+++ b/scripts/automation/trex_control_plane/python_lib/jsonrpclib-0.1.3/jsonrpclib/SimpleJSONRPCServer.py
@@ -0,0 +1,229 @@
+import jsonrpclib
+from jsonrpclib import Fault
+from jsonrpclib.jsonrpc import USE_UNIX_SOCKETS
+import SimpleXMLRPCServer
+import SocketServer
+import socket
+import logging
+import os
+import types
+import traceback
+import sys
+try:
+ import fcntl
+except ImportError:
+ # For Windows
+ fcntl = None
+
+def get_version(request):
+ # must be a dict
+ if 'jsonrpc' in request.keys():
+ return 2.0
+ if 'id' in request.keys():
+ return 1.0
+ return None
+
+def validate_request(request):
+ if type(request) is not types.DictType:
+ fault = Fault(
+ -32600, 'Request must be {}, not %s.' % type(request)
+ )
+ return fault
+ rpcid = request.get('id', None)
+ version = get_version(request)
+ if not version:
+ fault = Fault(-32600, 'Request %s invalid.' % request, rpcid=rpcid)
+ return fault
+ request.setdefault('params', [])
+ method = request.get('method', None)
+ params = request.get('params')
+ param_types = (types.ListType, types.DictType, types.TupleType)
+ if not method or type(method) not in types.StringTypes or \
+ type(params) not in param_types:
+ fault = Fault(
+ -32600, 'Invalid request parameters or method.', rpcid=rpcid
+ )
+ return fault
+ return True
+
+class SimpleJSONRPCDispatcher(SimpleXMLRPCServer.SimpleXMLRPCDispatcher):
+
+ def __init__(self, encoding=None):
+ SimpleXMLRPCServer.SimpleXMLRPCDispatcher.__init__(self,
+ allow_none=True,
+ encoding=encoding)
+
+ def _marshaled_dispatch(self, data, dispatch_method = None):
+ response = None
+ try:
+ request = jsonrpclib.loads(data)
+ except Exception, e:
+ fault = Fault(-32700, 'Request %s invalid. (%s)' % (data, e))
+ response = fault.response()
+ return response
+ if not request:
+ fault = Fault(-32600, 'Request invalid -- no request data.')
+ return fault.response()
+ if type(request) is types.ListType:
+ # This SHOULD be a batch, by spec
+ responses = []
+ for req_entry in request:
+ result = validate_request(req_entry)
+ if type(result) is Fault:
+ responses.append(result.response())
+ continue
+ resp_entry = self._marshaled_single_dispatch(req_entry)
+ if resp_entry is not None:
+ responses.append(resp_entry)
+ if len(responses) > 0:
+ response = '[%s]' % ','.join(responses)
+ else:
+ response = ''
+ else:
+ result = validate_request(request)
+ if type(result) is Fault:
+ return result.response()
+ response = self._marshaled_single_dispatch(request)
+ return response
+
+ def _marshaled_single_dispatch(self, request):
+ # TODO - Use the multiprocessing and skip the response if
+ # it is a notification
+ # Put in support for custom dispatcher here
+ # (See SimpleXMLRPCServer._marshaled_dispatch)
+ method = request.get('method')
+ params = request.get('params')
+ try:
+ response = self._dispatch(method, params)
+ except:
+ exc_type, exc_value, exc_tb = sys.exc_info()
+ fault = Fault(-32603, '%s:%s' % (exc_type, exc_value))
+ return fault.response()
+ if 'id' not in request.keys() or request['id'] == None:
+ # It's a notification
+ return None
+ try:
+ response = jsonrpclib.dumps(response,
+ methodresponse=True,
+ rpcid=request['id']
+ )
+ return response
+ except:
+ exc_type, exc_value, exc_tb = sys.exc_info()
+ fault = Fault(-32603, '%s:%s' % (exc_type, exc_value))
+ return fault.response()
+
+ def _dispatch(self, method, params):
+ func = None
+ try:
+ func = self.funcs[method]
+ except KeyError:
+ if self.instance is not None:
+ if hasattr(self.instance, '_dispatch'):
+ return self.instance._dispatch(method, params)
+ else:
+ try:
+ func = SimpleXMLRPCServer.resolve_dotted_attribute(
+ self.instance,
+ method,
+ True
+ )
+ except AttributeError:
+ pass
+ if func is not None:
+ try:
+ if type(params) is types.ListType:
+ response = func(*params)
+ else:
+ response = func(**params)
+ return response
+ except TypeError:
+ return Fault(-32602, 'Invalid parameters.')
+ except:
+ err_lines = traceback.format_exc().splitlines()
+ trace_string = '%s | %s' % (err_lines[-3], err_lines[-1])
+ fault = jsonrpclib.Fault(-32603, 'Server error: %s' %
+ trace_string)
+ return fault
+ else:
+ return Fault(-32601, 'Method %s not supported.' % method)
+
+class SimpleJSONRPCRequestHandler(
+ SimpleXMLRPCServer.SimpleXMLRPCRequestHandler):
+
+ def do_POST(self):
+ if not self.is_rpc_path_valid():
+ self.report_404()
+ return
+ try:
+ max_chunk_size = 10*1024*1024
+ size_remaining = int(self.headers["content-length"])
+ L = []
+ while size_remaining:
+ chunk_size = min(size_remaining, max_chunk_size)
+ L.append(self.rfile.read(chunk_size))
+ size_remaining -= len(L[-1])
+ data = ''.join(L)
+ response = self.server._marshaled_dispatch(data)
+ self.send_response(200)
+ except Exception, e:
+ self.send_response(500)
+ err_lines = traceback.format_exc().splitlines()
+ trace_string = '%s | %s' % (err_lines[-3], err_lines[-1])
+ fault = jsonrpclib.Fault(-32603, 'Server error: %s' % trace_string)
+ response = fault.response()
+ if response == None:
+ response = ''
+ self.send_header("Content-type", "application/json-rpc")
+ self.send_header("Content-length", str(len(response)))
+ self.end_headers()
+ self.wfile.write(response)
+ self.wfile.flush()
+ self.connection.shutdown(1)
+
+class SimpleJSONRPCServer(SocketServer.TCPServer, SimpleJSONRPCDispatcher):
+
+ allow_reuse_address = True
+
+ def __init__(self, addr, requestHandler=SimpleJSONRPCRequestHandler,
+ logRequests=True, encoding=None, bind_and_activate=True,
+ address_family=socket.AF_INET):
+ self.logRequests = logRequests
+ SimpleJSONRPCDispatcher.__init__(self, encoding)
+ # TCPServer.__init__ has an extra parameter on 2.6+, so
+ # check Python version and decide on how to call it
+ vi = sys.version_info
+ self.address_family = address_family
+ if USE_UNIX_SOCKETS and address_family == socket.AF_UNIX:
+ # Unix sockets can't be bound if they already exist in the
+ # filesystem. The convention of e.g. X11 is to unlink
+ # before binding again.
+ if os.path.exists(addr):
+ try:
+ os.unlink(addr)
+ except OSError:
+ logging.warning("Could not unlink socket %s", addr)
+ # if python 2.5 and lower
+ if vi[0] < 3 and vi[1] < 6:
+ SocketServer.TCPServer.__init__(self, addr, requestHandler)
+ else:
+ SocketServer.TCPServer.__init__(self, addr, requestHandler,
+ bind_and_activate)
+ if fcntl is not None and hasattr(fcntl, 'FD_CLOEXEC'):
+ flags = fcntl.fcntl(self.fileno(), fcntl.F_GETFD)
+ flags |= fcntl.FD_CLOEXEC
+ fcntl.fcntl(self.fileno(), fcntl.F_SETFD, flags)
+
+class CGIJSONRPCRequestHandler(SimpleJSONRPCDispatcher):
+
+ def __init__(self, encoding=None):
+ SimpleJSONRPCDispatcher.__init__(self, encoding)
+
+ def handle_jsonrpc(self, request_text):
+ response = self._marshaled_dispatch(request_text)
+ print 'Content-Type: application/json-rpc'
+ print 'Content-Length: %d' % len(response)
+ print
+ sys.stdout.write(response)
+
+ handle_xmlrpc = handle_jsonrpc
diff --git a/scripts/automation/trex_control_plane/python_lib/jsonrpclib-0.1.3/jsonrpclib/__init__.py b/scripts/automation/trex_control_plane/python_lib/jsonrpclib-0.1.3/jsonrpclib/__init__.py
new file mode 100755
index 00000000..6e884b83
--- /dev/null
+++ b/scripts/automation/trex_control_plane/python_lib/jsonrpclib-0.1.3/jsonrpclib/__init__.py
@@ -0,0 +1,6 @@
+from jsonrpclib.config import Config
+config = Config.instance()
+from jsonrpclib.history import History
+history = History.instance()
+from jsonrpclib.jsonrpc import Server, MultiCall, Fault
+from jsonrpclib.jsonrpc import ProtocolError, loads, dumps
diff --git a/scripts/automation/trex_control_plane/python_lib/jsonrpclib-0.1.3/jsonrpclib/config.py b/scripts/automation/trex_control_plane/python_lib/jsonrpclib-0.1.3/jsonrpclib/config.py
new file mode 100755
index 00000000..4d28f1b1
--- /dev/null
+++ b/scripts/automation/trex_control_plane/python_lib/jsonrpclib-0.1.3/jsonrpclib/config.py
@@ -0,0 +1,38 @@
+import sys
+
+class LocalClasses(dict):
+ def add(self, cls):
+ self[cls.__name__] = cls
+
+class Config(object):
+ """
+ This is pretty much used exclusively for the 'jsonclass'
+ functionality... set use_jsonclass to False to turn it off.
+ You can change serialize_method and ignore_attribute, or use
+ the local_classes.add(class) to include "local" classes.
+ """
+ use_jsonclass = True
+ # Change to False to keep __jsonclass__ entries raw.
+ serialize_method = '_serialize'
+ # The serialize_method should be a string that references the
+ # method on a custom class object which is responsible for
+ # returning a tuple of the constructor arguments and a dict of
+ # attributes.
+ ignore_attribute = '_ignore'
+ # The ignore attribute should be a string that references the
+ # attribute on a custom class object which holds strings and / or
+ # references of the attributes the class translator should ignore.
+ classes = LocalClasses()
+ # The list of classes to use for jsonclass translation.
+ version = 2.0
+ # Version of the JSON-RPC spec to support
+ user_agent = 'jsonrpclib/0.1 (Python %s)' % \
+ '.'.join([str(ver) for ver in sys.version_info[0:3]])
+ # User agent to use for calls.
+ _instance = None
+
+ @classmethod
+ def instance(cls):
+ if not cls._instance:
+ cls._instance = cls()
+ return cls._instance
diff --git a/scripts/automation/trex_control_plane/python_lib/jsonrpclib-0.1.3/jsonrpclib/history.py b/scripts/automation/trex_control_plane/python_lib/jsonrpclib-0.1.3/jsonrpclib/history.py
new file mode 100755
index 00000000..d11863dc
--- /dev/null
+++ b/scripts/automation/trex_control_plane/python_lib/jsonrpclib-0.1.3/jsonrpclib/history.py
@@ -0,0 +1,40 @@
+class History(object):
+ """
+ This holds all the response and request objects for a
+ session. A server using this should call "clear" after
+ each request cycle in order to keep it from clogging
+ memory.
+ """
+ requests = []
+ responses = []
+ _instance = None
+
+ @classmethod
+ def instance(cls):
+ if not cls._instance:
+ cls._instance = cls()
+ return cls._instance
+
+ def add_response(self, response_obj):
+ self.responses.append(response_obj)
+
+ def add_request(self, request_obj):
+ self.requests.append(request_obj)
+
+ @property
+ def request(self):
+ if len(self.requests) == 0:
+ return None
+ else:
+ return self.requests[-1]
+
+ @property
+ def response(self):
+ if len(self.responses) == 0:
+ return None
+ else:
+ return self.responses[-1]
+
+ def clear(self):
+ del self.requests[:]
+ del self.responses[:]
diff --git a/scripts/automation/trex_control_plane/python_lib/jsonrpclib-0.1.3/jsonrpclib/jsonclass.py b/scripts/automation/trex_control_plane/python_lib/jsonrpclib-0.1.3/jsonrpclib/jsonclass.py
new file mode 100755
index 00000000..298c3da3
--- /dev/null
+++ b/scripts/automation/trex_control_plane/python_lib/jsonrpclib-0.1.3/jsonrpclib/jsonclass.py
@@ -0,0 +1,145 @@
+import types
+import inspect
+import re
+import traceback
+
+from jsonrpclib import config
+
+iter_types = [
+ types.DictType,
+ types.ListType,
+ types.TupleType
+]
+
+string_types = [
+ types.StringType,
+ types.UnicodeType
+]
+
+numeric_types = [
+ types.IntType,
+ types.LongType,
+ types.FloatType
+]
+
+value_types = [
+ types.BooleanType,
+ types.NoneType
+]
+
+supported_types = iter_types+string_types+numeric_types+value_types
+invalid_module_chars = r'[^a-zA-Z0-9\_\.]'
+
+class TranslationError(Exception):
+ pass
+
+def dump(obj, serialize_method=None, ignore_attribute=None, ignore=[]):
+ if not serialize_method:
+ serialize_method = config.serialize_method
+ if not ignore_attribute:
+ ignore_attribute = config.ignore_attribute
+ obj_type = type(obj)
+ # Parse / return default "types"...
+ if obj_type in numeric_types+string_types+value_types:
+ return obj
+ if obj_type in iter_types:
+ if obj_type in (types.ListType, types.TupleType):
+ new_obj = []
+ for item in obj:
+ new_obj.append(dump(item, serialize_method,
+ ignore_attribute, ignore))
+ if obj_type is types.TupleType:
+ new_obj = tuple(new_obj)
+ return new_obj
+ # It's a dict...
+ else:
+ new_obj = {}
+ for key, value in obj.iteritems():
+ new_obj[key] = dump(value, serialize_method,
+ ignore_attribute, ignore)
+ return new_obj
+ # It's not a standard type, so it needs __jsonclass__
+ module_name = inspect.getmodule(obj).__name__
+ class_name = obj.__class__.__name__
+ json_class = class_name
+ if module_name not in ['', '__main__']:
+ json_class = '%s.%s' % (module_name, json_class)
+ return_obj = {"__jsonclass__":[json_class,]}
+ # If a serialization method is defined..
+ if serialize_method in dir(obj):
+ # Params can be a dict (keyword) or list (positional)
+ # Attrs MUST be a dict.
+ serialize = getattr(obj, serialize_method)
+ params, attrs = serialize()
+ return_obj['__jsonclass__'].append(params)
+ return_obj.update(attrs)
+ return return_obj
+ # Otherwise, try to figure it out
+ # Obviously, we can't assume to know anything about the
+ # parameters passed to __init__
+ return_obj['__jsonclass__'].append([])
+ attrs = {}
+ ignore_list = getattr(obj, ignore_attribute, [])+ignore
+ for attr_name, attr_value in obj.__dict__.iteritems():
+ if type(attr_value) in supported_types and \
+ attr_name not in ignore_list and \
+ attr_value not in ignore_list:
+ attrs[attr_name] = dump(attr_value, serialize_method,
+ ignore_attribute, ignore)
+ return_obj.update(attrs)
+ return return_obj
+
+def load(obj):
+ if type(obj) in string_types+numeric_types+value_types:
+ return obj
+ if type(obj) is types.ListType:
+ return_list = []
+ for entry in obj:
+ return_list.append(load(entry))
+ return return_list
+ # Othewise, it's a dict type
+ if '__jsonclass__' not in obj.keys():
+ return_dict = {}
+ for key, value in obj.iteritems():
+ new_value = load(value)
+ return_dict[key] = new_value
+ return return_dict
+ # It's a dict, and it's a __jsonclass__
+ orig_module_name = obj['__jsonclass__'][0]
+ params = obj['__jsonclass__'][1]
+ if orig_module_name == '':
+ raise TranslationError('Module name empty.')
+ json_module_clean = re.sub(invalid_module_chars, '', orig_module_name)
+ if json_module_clean != orig_module_name:
+ raise TranslationError('Module name %s has invalid characters.' %
+ orig_module_name)
+ json_module_parts = json_module_clean.split('.')
+ json_class = None
+ if len(json_module_parts) == 1:
+ # Local class name -- probably means it won't work
+ if json_module_parts[0] not in config.classes.keys():
+ raise TranslationError('Unknown class or module %s.' %
+ json_module_parts[0])
+ json_class = config.classes[json_module_parts[0]]
+ else:
+ json_class_name = json_module_parts.pop()
+ json_module_tree = '.'.join(json_module_parts)
+ try:
+ temp_module = __import__(json_module_tree)
+ except ImportError:
+ raise TranslationError('Could not import %s from module %s.' %
+ (json_class_name, json_module_tree))
+ json_class = getattr(temp_module, json_class_name)
+ # Creating the object...
+ new_obj = None
+ if type(params) is types.ListType:
+ new_obj = json_class(*params)
+ elif type(params) is types.DictType:
+ new_obj = json_class(**params)
+ else:
+ raise TranslationError('Constructor args must be a dict or list.')
+ for key, value in obj.iteritems():
+ if key == '__jsonclass__':
+ continue
+ setattr(new_obj, key, value)
+ return new_obj
diff --git a/scripts/automation/trex_control_plane/python_lib/jsonrpclib-0.1.3/jsonrpclib/jsonrpc.py b/scripts/automation/trex_control_plane/python_lib/jsonrpclib-0.1.3/jsonrpclib/jsonrpc.py
new file mode 100755
index 00000000..e11939ae
--- /dev/null
+++ b/scripts/automation/trex_control_plane/python_lib/jsonrpclib-0.1.3/jsonrpclib/jsonrpc.py
@@ -0,0 +1,556 @@
+"""
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+============================
+JSONRPC Library (jsonrpclib)
+============================
+
+This library is a JSON-RPC v.2 (proposed) implementation which
+follows the xmlrpclib API for portability between clients. It
+uses the same Server / ServerProxy, loads, dumps, etc. syntax,
+while providing features not present in XML-RPC like:
+
+* Keyword arguments
+* Notifications
+* Versioning
+* Batches and batch notifications
+
+Eventually, I'll add a SimpleXMLRPCServer compatible library,
+and other things to tie the thing off nicely. :)
+
+For a quick-start, just open a console and type the following,
+replacing the server address, method, and parameters
+appropriately.
+>>> import jsonrpclib
+>>> server = jsonrpclib.Server('http://localhost:8181')
+>>> server.add(5, 6)
+11
+>>> server._notify.add(5, 6)
+>>> batch = jsonrpclib.MultiCall(server)
+>>> batch.add(3, 50)
+>>> batch.add(2, 3)
+>>> batch._notify.add(3, 5)
+>>> batch()
+[53, 5]
+
+See http://code.google.com/p/jsonrpclib/ for more info.
+"""
+
+import types
+import sys
+from xmlrpclib import Transport as XMLTransport
+from xmlrpclib import SafeTransport as XMLSafeTransport
+from xmlrpclib import ServerProxy as XMLServerProxy
+from xmlrpclib import _Method as XML_Method
+import time
+import string
+import random
+
+# Library includes
+import jsonrpclib
+from jsonrpclib import config
+from jsonrpclib import history
+
+# JSON library importing
+cjson = None
+json = None
+try:
+ import cjson
+except ImportError:
+ try:
+ import json
+ except ImportError:
+ try:
+ import simplejson as json
+ except ImportError:
+ raise ImportError(
+ 'You must have the cjson, json, or simplejson ' +
+ 'module(s) available.'
+ )
+
+IDCHARS = string.ascii_lowercase+string.digits
+
+class UnixSocketMissing(Exception):
+ """
+ Just a properly named Exception if Unix Sockets usage is
+ attempted on a platform that doesn't support them (Windows)
+ """
+ pass
+
+#JSON Abstractions
+
+def jdumps(obj, encoding='utf-8'):
+ # Do 'serialize' test at some point for other classes
+ global cjson
+ if cjson:
+ return cjson.encode(obj)
+ else:
+ return json.dumps(obj, encoding=encoding)
+
+def jloads(json_string):
+ global cjson
+ if cjson:
+ return cjson.decode(json_string)
+ else:
+ return json.loads(json_string)
+
+
+# XMLRPClib re-implementations
+
+class ProtocolError(Exception):
+ pass
+
+class TransportMixIn(object):
+ """ Just extends the XMLRPC transport where necessary. """
+ user_agent = config.user_agent
+ # for Python 2.7 support
+ _connection = None
+
+ def send_content(self, connection, request_body):
+ connection.putheader("Content-Type", "application/json-rpc")
+ connection.putheader("Content-Length", str(len(request_body)))
+ connection.endheaders()
+ if request_body:
+ connection.send(request_body)
+
+ def getparser(self):
+ target = JSONTarget()
+ return JSONParser(target), target
+
+class JSONParser(object):
+ def __init__(self, target):
+ self.target = target
+
+ def feed(self, data):
+ self.target.feed(data)
+
+ def close(self):
+ pass
+
+class JSONTarget(object):
+ def __init__(self):
+ self.data = []
+
+ def feed(self, data):
+ self.data.append(data)
+
+ def close(self):
+ return ''.join(self.data)
+
+class Transport(TransportMixIn, XMLTransport):
+ pass
+
+class SafeTransport(TransportMixIn, XMLSafeTransport):
+ pass
+from httplib import HTTP, HTTPConnection
+from socket import socket
+
+USE_UNIX_SOCKETS = False
+
+try:
+ from socket import AF_UNIX, SOCK_STREAM
+ USE_UNIX_SOCKETS = True
+except ImportError:
+ pass
+
+if (USE_UNIX_SOCKETS):
+
+ class UnixHTTPConnection(HTTPConnection):
+ def connect(self):
+ self.sock = socket(AF_UNIX, SOCK_STREAM)
+ self.sock.connect(self.host)
+
+ class UnixHTTP(HTTP):
+ _connection_class = UnixHTTPConnection
+
+ class UnixTransport(TransportMixIn, XMLTransport):
+ def make_connection(self, host):
+ import httplib
+ host, extra_headers, x509 = self.get_host_info(host)
+ return UnixHTTP(host)
+
+
+class ServerProxy(XMLServerProxy):
+ """
+ Unfortunately, much more of this class has to be copied since
+ so much of it does the serialization.
+ """
+
+ def __init__(self, uri, transport=None, encoding=None,
+ verbose=0, version=None):
+ import urllib
+ if not version:
+ version = config.version
+ self.__version = version
+ schema, uri = urllib.splittype(uri)
+ if schema not in ('http', 'https', 'unix'):
+ raise IOError('Unsupported JSON-RPC protocol.')
+ if schema == 'unix':
+ if not USE_UNIX_SOCKETS:
+ # Don't like the "generic" Exception...
+ raise UnixSocketMissing("Unix sockets not available.")
+ self.__host = uri
+ self.__handler = '/'
+ else:
+ self.__host, self.__handler = urllib.splithost(uri)
+ if not self.__handler:
+ # Not sure if this is in the JSON spec?
+ #self.__handler = '/'
+ self.__handler == '/'
+ if transport is None:
+ if schema == 'unix':
+ transport = UnixTransport()
+ elif schema == 'https':
+ transport = SafeTransport()
+ else:
+ transport = Transport()
+ self.__transport = transport
+ self.__encoding = encoding
+ self.__verbose = verbose
+
+ def _request(self, methodname, params, rpcid=None):
+ request = dumps(params, methodname, encoding=self.__encoding,
+ rpcid=rpcid, version=self.__version)
+ response = self._run_request(request)
+ check_for_errors(response)
+ return response['result']
+
+ def _request_notify(self, methodname, params, rpcid=None):
+ request = dumps(params, methodname, encoding=self.__encoding,
+ rpcid=rpcid, version=self.__version, notify=True)
+ response = self._run_request(request, notify=True)
+ check_for_errors(response)
+ return
+
+ def _run_request(self, request, notify=None):
+ history.add_request(request)
+
+ response = self.__transport.request(
+ self.__host,
+ self.__handler,
+ request,
+ verbose=self.__verbose
+ )
+
+ # Here, the XMLRPC library translates a single list
+ # response to the single value -- should we do the
+ # same, and require a tuple / list to be passed to
+ # the response object, or expect the Server to be
+ # outputting the response appropriately?
+
+ history.add_response(response)
+ if not response:
+ return None
+ return_obj = loads(response)
+ return return_obj
+
+ def __getattr__(self, name):
+ # Same as original, just with new _Method reference
+ return _Method(self._request, name)
+
+ @property
+ def _notify(self):
+ # Just like __getattr__, but with notify namespace.
+ return _Notify(self._request_notify)
+
+
+class _Method(XML_Method):
+
+ def __call__(self, *args, **kwargs):
+ if len(args) > 0 and len(kwargs) > 0:
+ raise ProtocolError('Cannot use both positional ' +
+ 'and keyword arguments (according to JSON-RPC spec.)')
+ if len(args) > 0:
+ return self.__send(self.__name, args)
+ else:
+ return self.__send(self.__name, kwargs)
+
+ def __getattr__(self, name):
+ self.__name = '%s.%s' % (self.__name, name)
+ return self
+ # The old method returned a new instance, but this seemed wasteful.
+ # The only thing that changes is the name.
+ #return _Method(self.__send, "%s.%s" % (self.__name, name))
+
+class _Notify(object):
+ def __init__(self, request):
+ self._request = request
+
+ def __getattr__(self, name):
+ return _Method(self._request, name)
+
+# Batch implementation
+
+class MultiCallMethod(object):
+
+ def __init__(self, method, notify=False):
+ self.method = method
+ self.params = []
+ self.notify = notify
+
+ def __call__(self, *args, **kwargs):
+ if len(kwargs) > 0 and len(args) > 0:
+ raise ProtocolError('JSON-RPC does not support both ' +
+ 'positional and keyword arguments.')
+ if len(kwargs) > 0:
+ self.params = kwargs
+ else:
+ self.params = args
+
+ def request(self, encoding=None, rpcid=None):
+ return dumps(self.params, self.method, version=2.0,
+ encoding=encoding, rpcid=rpcid, notify=self.notify)
+
+ def __repr__(self):
+ return '%s' % self.request()
+
+ def __getattr__(self, method):
+ new_method = '%s.%s' % (self.method, method)
+ self.method = new_method
+ return self
+
+class MultiCallNotify(object):
+
+ def __init__(self, multicall):
+ self.multicall = multicall
+
+ def __getattr__(self, name):
+ new_job = MultiCallMethod(name, notify=True)
+ self.multicall._job_list.append(new_job)
+ return new_job
+
+class MultiCallIterator(object):
+
+ def __init__(self, results):
+ self.results = results
+
+ def __iter__(self):
+ for i in range(0, len(self.results)):
+ yield self[i]
+ raise StopIteration
+
+ def __getitem__(self, i):
+ item = self.results[i]
+ check_for_errors(item)
+ return item['result']
+
+ def __len__(self):
+ return len(self.results)
+
+class MultiCall(object):
+
+ def __init__(self, server):
+ self._server = server
+ self._job_list = []
+
+ def _request(self):
+ if len(self._job_list) < 1:
+ # Should we alert? This /is/ pretty obvious.
+ return
+ request_body = '[ %s ]' % ','.join([job.request() for
+ job in self._job_list])
+ responses = self._server._run_request(request_body)
+ del self._job_list[:]
+ if not responses:
+ responses = []
+ return MultiCallIterator(responses)
+
+ @property
+ def _notify(self):
+ return MultiCallNotify(self)
+
+ def __getattr__(self, name):
+ new_job = MultiCallMethod(name)
+ self._job_list.append(new_job)
+ return new_job
+
+ __call__ = _request
+
+# These lines conform to xmlrpclib's "compatibility" line.
+# Not really sure if we should include these, but oh well.
+Server = ServerProxy
+
+class Fault(object):
+ # JSON-RPC error class
+ def __init__(self, code=-32000, message='Server error', rpcid=None):
+ self.faultCode = code
+ self.faultString = message
+ self.rpcid = rpcid
+
+ def error(self):
+ return {'code':self.faultCode, 'message':self.faultString}
+
+ def response(self, rpcid=None, version=None):
+ if not version:
+ version = config.version
+ if rpcid:
+ self.rpcid = rpcid
+ return dumps(
+ self, methodresponse=True, rpcid=self.rpcid, version=version
+ )
+
+ def __repr__(self):
+ return '<Fault %s: %s>' % (self.faultCode, self.faultString)
+
+def random_id(length=8):
+ return_id = ''
+ for i in range(length):
+ return_id += random.choice(IDCHARS)
+ return return_id
+
+class Payload(dict):
+ def __init__(self, rpcid=None, version=None):
+ if not version:
+ version = config.version
+ self.id = rpcid
+ self.version = float(version)
+
+ def request(self, method, params=[]):
+ if type(method) not in types.StringTypes:
+ raise ValueError('Method name must be a string.')
+ if not self.id:
+ self.id = random_id()
+ request = { 'id':self.id, 'method':method }
+ if params:
+ request['params'] = params
+ if self.version >= 2:
+ request['jsonrpc'] = str(self.version)
+ return request
+
+ def notify(self, method, params=[]):
+ request = self.request(method, params)
+ if self.version >= 2:
+ del request['id']
+ else:
+ request['id'] = None
+ return request
+
+ def response(self, result=None):
+ response = {'result':result, 'id':self.id}
+ if self.version >= 2:
+ response['jsonrpc'] = str(self.version)
+ else:
+ response['error'] = None
+ return response
+
+ def error(self, code=-32000, message='Server error.'):
+ error = self.response()
+ if self.version >= 2:
+ del error['result']
+ else:
+ error['result'] = None
+ error['error'] = {'code':code, 'message':message}
+ return error
+
+def dumps(params=[], methodname=None, methodresponse=None,
+ encoding=None, rpcid=None, version=None, notify=None):
+ """
+ This differs from the Python implementation in that it implements
+ the rpcid argument since the 2.0 spec requires it for responses.
+ """
+ if not version:
+ version = config.version
+ valid_params = (types.TupleType, types.ListType, types.DictType)
+ if methodname in types.StringTypes and \
+ type(params) not in valid_params and \
+ not isinstance(params, Fault):
+ """
+ If a method, and params are not in a listish or a Fault,
+ error out.
+ """
+ raise TypeError('Params must be a dict, list, tuple or Fault ' +
+ 'instance.')
+ # Begin parsing object
+ payload = Payload(rpcid=rpcid, version=version)
+ if not encoding:
+ encoding = 'utf-8'
+ if type(params) is Fault:
+ response = payload.error(params.faultCode, params.faultString)
+ return jdumps(response, encoding=encoding)
+ if type(methodname) not in types.StringTypes and methodresponse != True:
+ raise ValueError('Method name must be a string, or methodresponse '+
+ 'must be set to True.')
+ if config.use_jsonclass == True:
+ from jsonrpclib import jsonclass
+ params = jsonclass.dump(params)
+ if methodresponse is True:
+ if rpcid is None:
+ raise ValueError('A method response must have an rpcid.')
+ response = payload.response(params)
+ return jdumps(response, encoding=encoding)
+ request = None
+ if notify == True:
+ request = payload.notify(methodname, params)
+ else:
+ request = payload.request(methodname, params)
+ return jdumps(request, encoding=encoding)
+
+def loads(data):
+ """
+ This differs from the Python implementation, in that it returns
+ the request structure in Dict format instead of the method, params.
+ It will return a list in the case of a batch request / response.
+ """
+ if data == '':
+ # notification
+ return None
+ result = jloads(data)
+ # if the above raises an error, the implementing server code
+ # should return something like the following:
+ # { 'jsonrpc':'2.0', 'error': fault.error(), id: None }
+ if config.use_jsonclass == True:
+ from jsonrpclib import jsonclass
+ result = jsonclass.load(result)
+ return result
+
+def check_for_errors(result):
+ if not result:
+ # Notification
+ return result
+ if type(result) is not types.DictType:
+ raise TypeError('Response is not a dict.')
+ if 'jsonrpc' in result.keys() and float(result['jsonrpc']) > 2.0:
+ raise NotImplementedError('JSON-RPC version not yet supported.')
+ if 'result' not in result.keys() and 'error' not in result.keys():
+ raise ValueError('Response does not have a result or error key.')
+ if 'error' in result.keys() and result['error'] != None:
+ code = result['error']['code']
+ message = result['error']['message']
+ raise ProtocolError((code, message))
+ return result
+
+def isbatch(result):
+ if type(result) not in (types.ListType, types.TupleType):
+ return False
+ if len(result) < 1:
+ return False
+ if type(result[0]) is not types.DictType:
+ return False
+ if 'jsonrpc' not in result[0].keys():
+ return False
+ try:
+ version = float(result[0]['jsonrpc'])
+ except ValueError:
+ raise ProtocolError('"jsonrpc" key must be a float(able) value.')
+ if version < 2:
+ return False
+ return True
+
+def isnotification(request):
+ if 'id' not in request.keys():
+ # 2.0 notification
+ return True
+ if request['id'] == None:
+ # 1.0 notification
+ return True
+ return False
diff --git a/scripts/automation/trex_control_plane/python_lib/jsonrpclib-0.1.3/setup.py b/scripts/automation/trex_control_plane/python_lib/jsonrpclib-0.1.3/setup.py
new file mode 100755
index 00000000..569b6367
--- /dev/null
+++ b/scripts/automation/trex_control_plane/python_lib/jsonrpclib-0.1.3/setup.py
@@ -0,0 +1,28 @@
+#!/usr/bin/env/python
+"""
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+"""
+
+import distutils.core
+
+distutils.core.setup(
+ name = "jsonrpclib",
+ version = "0.1.3",
+ packages = ["jsonrpclib"],
+ author = "Josh Marshall",
+ author_email = "catchjosh@gmail.com",
+ url = "http://github.com/joshmarshall/jsonrpclib/",
+ license = "http://www.apache.org/licenses/LICENSE-2.0",
+ description = "This project is an implementation of the JSON-RPC v2.0 " +
+ "specification (backwards-compatible) as a client library.",
+)
diff --git a/scripts/automation/trex_control_plane/python_lib/jsonrpclib-pelix-0.2.5/LICENSE.txt b/scripts/automation/trex_control_plane/python_lib/jsonrpclib-pelix-0.2.5/LICENSE.txt
new file mode 100755
index 00000000..eb0864bd
--- /dev/null
+++ b/scripts/automation/trex_control_plane/python_lib/jsonrpclib-pelix-0.2.5/LICENSE.txt
@@ -0,0 +1,11 @@
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
diff --git a/scripts/automation/trex_control_plane/python_lib/jsonrpclib-pelix-0.2.5/MANIFEST.in b/scripts/automation/trex_control_plane/python_lib/jsonrpclib-pelix-0.2.5/MANIFEST.in
new file mode 100755
index 00000000..42f4acf5
--- /dev/null
+++ b/scripts/automation/trex_control_plane/python_lib/jsonrpclib-pelix-0.2.5/MANIFEST.in
@@ -0,0 +1,2 @@
+include *.txt
+include README.rst
diff --git a/scripts/automation/trex_control_plane/python_lib/jsonrpclib-pelix-0.2.5/PKG-INFO b/scripts/automation/trex_control_plane/python_lib/jsonrpclib-pelix-0.2.5/PKG-INFO
new file mode 100755
index 00000000..9d0f3fca
--- /dev/null
+++ b/scripts/automation/trex_control_plane/python_lib/jsonrpclib-pelix-0.2.5/PKG-INFO
@@ -0,0 +1,460 @@
+Metadata-Version: 1.1
+Name: jsonrpclib-pelix
+Version: 0.2.5
+Summary: This project is an implementation of the JSON-RPC v2.0 specification (backwards-compatible) as a client library, for Python 2.6+ and Python 3.This version is a fork of jsonrpclib by Josh Marshall, usable with Pelix remote services.
+Home-page: http://github.com/tcalmant/jsonrpclib/
+Author: Thomas Calmant
+Author-email: thomas.calmant+github@gmail.com
+License: Apache License 2.0
+Description: JSONRPClib (patched for Pelix)
+ ##############################
+
+ .. image:: https://pypip.in/license/jsonrpclib-pelix/badge.svg
+ :target: https://pypi.python.org/pypi/jsonrpclib-pelix/
+
+ .. image:: https://travis-ci.org/tcalmant/jsonrpclib.svg?branch=master
+ :target: https://travis-ci.org/tcalmant/jsonrpclib
+
+ .. image:: https://coveralls.io/repos/tcalmant/jsonrpclib/badge.svg?branch=master
+ :target: https://coveralls.io/r/tcalmant/jsonrpclib?branch=master
+
+
+ This library is an implementation of the JSON-RPC specification.
+ It supports both the original 1.0 specification, as well as the
+ new (proposed) 2.0 specification, which includes batch submission, keyword
+ arguments, etc.
+
+ It is licensed under the Apache License, Version 2.0
+ (http://www.apache.org/licenses/LICENSE-2.0.html).
+
+
+ About this version
+ ******************
+
+ This is a patched version of the original ``jsonrpclib`` project by
+ Josh Marshall, available at https://github.com/joshmarshall/jsonrpclib.
+
+ The suffix *-pelix* only indicates that this version works with Pelix Remote
+ Services, but it is **not** a Pelix specific implementation.
+
+ * This version adds support for Python 3, staying compatible with Python 2.
+ * It is now possible to use the dispatch_method argument while extending
+ the SimpleJSONRPCDispatcher, to use a custom dispatcher.
+ This allows to use this package by Pelix Remote Services.
+ * It can use thread pools to control the number of threads spawned to handle
+ notification requests and clients connections.
+ * The modifications added in other forks of this project have been added:
+
+ * From https://github.com/drdaeman/jsonrpclib:
+
+ * Improved JSON-RPC 1.0 support
+ * Less strict error response handling
+
+ * From https://github.com/tuomassalo/jsonrpclib:
+
+ * In case of a non-pre-defined error, raise an AppError and give access to
+ *error.data*
+
+ * From https://github.com/dejw/jsonrpclib:
+
+ * Custom headers can be sent with request and associated tests
+
+ * The support for Unix sockets has been removed, as it is not trivial to convert
+ to Python 3 (and I don't use them)
+ * This version cannot be installed with the original ``jsonrpclib``, as it uses
+ the same package name.
+
+
+ Summary
+ *******
+
+ This library implements the JSON-RPC 2.0 proposed specification in pure Python.
+ It is designed to be as compatible with the syntax of ``xmlrpclib`` as possible
+ (it extends where possible), so that projects using ``xmlrpclib`` could easily
+ be modified to use JSON and experiment with the differences.
+
+ It is backwards-compatible with the 1.0 specification, and supports all of the
+ new proposed features of 2.0, including:
+
+ * Batch submission (via MultiCall)
+ * Keyword arguments
+ * Notifications (both in a batch and 'normal')
+ * Class translation using the ``__jsonclass__`` key.
+
+ I've added a "SimpleJSONRPCServer", which is intended to emulate the
+ "SimpleXMLRPCServer" from the default Python distribution.
+
+
+ Requirements
+ ************
+
+ It supports ``cjson`` and ``simplejson``, and looks for the parsers in that
+ order (searching first for ``cjson``, then for the *built-in* ``json`` in 2.6+,
+ and then the ``simplejson`` external library).
+ One of these must be installed to use this library, although if you have a
+ standard distribution of 2.6+, you should already have one.
+ Keep in mind that ``cjson`` is supposed to be the quickest, I believe, so if
+ you are going for full-on optimization you may want to pick it up.
+
+ Since library uses ``contextlib`` module, you should have at least Python 2.5
+ installed.
+
+
+ Installation
+ ************
+
+ You can install this from PyPI with one of the following commands (sudo
+ may be required):
+
+ .. code-block:: console
+
+ easy_install jsonrpclib-pelix
+ pip install jsonrpclib-pelix
+
+ Alternatively, you can download the source from the GitHub repository
+ at http://github.com/tcalmant/jsonrpclib and manually install it
+ with the following commands:
+
+ .. code-block:: console
+
+ git clone git://github.com/tcalmant/jsonrpclib.git
+ cd jsonrpclib
+ python setup.py install
+
+
+ SimpleJSONRPCServer
+ *******************
+
+ This is identical in usage (or should be) to the SimpleXMLRPCServer in the
+ Python standard library. Some of the differences in features are that it
+ obviously supports notification, batch calls, class translation (if left on),
+ etc.
+ Note: The import line is slightly different from the regular SimpleXMLRPCServer,
+ since the SimpleJSONRPCServer is distributed within the ``jsonrpclib`` library.
+
+ .. code-block:: python
+
+ from jsonrpclib.SimpleJSONRPCServer import SimpleJSONRPCServer
+
+ server = SimpleJSONRPCServer(('localhost', 8080))
+ server.register_function(pow)
+ server.register_function(lambda x,y: x+y, 'add')
+ server.register_function(lambda x: x, 'ping')
+ server.serve_forever()
+
+ To start protect the server with SSL, use the following snippet:
+
+ .. code-block:: python
+
+ from jsonrpclib.SimpleJSONRPCServer import SimpleJSONRPCServer
+
+ # Setup the SSL socket
+ server = SimpleJSONRPCServer(('localhost', 8080), bind_and_activate=False)
+ server.socket = ssl.wrap_socket(server.socket, certfile='server.pem',
+ server_side=True)
+ server.server_bind()
+ server.server_activate()
+
+ # ... register functions
+ # Start the server
+ server.serve_forever()
+
+
+ Notification Thread Pool
+ ========================
+
+ By default, notification calls are handled in the request handling thread.
+ It is possible to use a thread pool to handle them, by giving it to the server
+ using the ``set_notification_pool()`` method:
+
+ .. code-block:: python
+
+ from jsonrpclib.SimpleJSONRPCServer import SimpleJSONRPCServer
+ from jsonrpclib.threadpool import ThreadPool
+
+ # Setup the thread pool: between 0 and 10 threads
+ pool = ThreadPool(max_threads=10, min_threads=0)
+
+ # Don't forget to start it
+ pool.start()
+
+ # Setup the server
+ server = SimpleJSONRPCServer(('localhost', 8080), config)
+ server.set_notification_pool(pool)
+
+ # Register methods
+ server.register_function(pow)
+ server.register_function(lambda x,y: x+y, 'add')
+ server.register_function(lambda x: x, 'ping')
+
+ try:
+ server.serve_forever()
+ finally:
+ # Stop the thread pool (let threads finish their current task)
+ pool.stop()
+ server.set_notification_pool(None)
+
+
+ Threaded server
+ ===============
+
+ It is also possible to use a thread pool to handle clients requests, using the
+ ``PooledJSONRPCServer`` class.
+ By default, this class uses pool of 0 to 30 threads. A custom pool can be given
+ with the ``thread_pool`` parameter of the class constructor.
+
+ The notification pool and the request pool are different: by default, a server
+ with a request pool doesn't have a notification pool.
+
+ .. code-block:: python
+
+ from jsonrpclib.SimpleJSONRPCServer import PooledJSONRPCServer
+ from jsonrpclib.threadpool import ThreadPool
+
+ # Setup the notification and request pools
+ nofif_pool = ThreadPool(max_threads=10, min_threads=0)
+ request_pool = ThreadPool(max_threads=50, min_threads=10)
+
+ # Don't forget to start them
+ nofif_pool.start()
+ request_pool.start()
+
+ # Setup the server
+ server = PooledJSONRPCServer(('localhost', 8080), config,
+ thread_pool=request_pool)
+ server.set_notification_pool(nofif_pool)
+
+ # Register methods
+ server.register_function(pow)
+ server.register_function(lambda x,y: x+y, 'add')
+ server.register_function(lambda x: x, 'ping')
+
+ try:
+ server.serve_forever()
+ finally:
+ # Stop the thread pools (let threads finish their current task)
+ request_pool.stop()
+ nofif_pool.stop()
+ server.set_notification_pool(None)
+
+ Client Usage
+ ************
+
+ This is (obviously) taken from a console session.
+
+ .. code-block:: python
+
+ >>> import jsonrpclib
+ >>> server = jsonrpclib.ServerProxy('http://localhost:8080')
+ >>> server.add(5,6)
+ 11
+ >>> server.add(x=5, y=10)
+ 15
+ >>> server._notify.add(5,6)
+ # No result returned...
+ >>> batch = jsonrpclib.MultiCall(server)
+ >>> batch.add(5, 6)
+ >>> batch.ping({'key':'value'})
+ >>> batch._notify.add(4, 30)
+ >>> results = batch()
+ >>> for result in results:
+ >>> ... print(result)
+ 11
+ {'key': 'value'}
+ # Note that there are only two responses -- this is according to spec.
+
+ # Clean up
+ >>> server('close')()
+
+ # Using client history
+ >>> history = jsonrpclib.history.History()
+ >>> server = jsonrpclib.ServerProxy('http://localhost:8080', history=history)
+ >>> server.add(5,6)
+ 11
+ >>> print(history.request)
+ {"id": "f682b956-c8e1-4506-9db4-29fe8bc9fcaa", "jsonrpc": "2.0",
+ "method": "add", "params": [5, 6]}
+ >>> print(history.response)
+ {"id": "f682b956-c8e1-4506-9db4-29fe8bc9fcaa", "jsonrpc": "2.0",
+ "result": 11}
+
+ # Clean up
+ >>> server('close')()
+
+ If you need 1.0 functionality, there are a bunch of places you can pass that in,
+ although the best is just to give a specific configuration to
+ ``jsonrpclib.ServerProxy``:
+
+ .. code-block:: python
+
+ >>> import jsonrpclib
+ >>> jsonrpclib.config.DEFAULT.version
+ 2.0
+ >>> config = jsonrpclib.config.Config(version=1.0)
+ >>> history = jsonrpclib.history.History()
+ >>> server = jsonrpclib.ServerProxy('http://localhost:8080', config=config,
+ history=history)
+ >>> server.add(7, 10)
+ 17
+ >>> print(history.request)
+ {"id": "827b2923-5b37-49a5-8b36-e73920a16d32",
+ "method": "add", "params": [7, 10]}
+ >>> print(history.response)
+ {"id": "827b2923-5b37-49a5-8b36-e73920a16d32", "error": null, "result": 17}
+ >>> server('close')()
+
+ The equivalent ``loads`` and ``dumps`` functions also exist, although with minor
+ modifications. The ``dumps`` arguments are almost identical, but it adds three
+ arguments: ``rpcid`` for the 'id' key, ``version`` to specify the JSON-RPC
+ compatibility, and ``notify`` if it's a request that you want to be a
+ notification.
+
+ Additionally, the ``loads`` method does not return the params and method like
+ ``xmlrpclib``, but instead a.) parses for errors, raising ProtocolErrors, and
+ b.) returns the entire structure of the request / response for manual parsing.
+
+
+ Additional headers
+ ******************
+
+ If your remote service requires custom headers in request, you can pass them
+ as as a ``headers`` keyword argument, when creating the ``ServerProxy``:
+
+ .. code-block:: python
+
+ >>> import jsonrpclib
+ >>> server = jsonrpclib.ServerProxy("http://localhost:8080",
+ headers={'X-Test' : 'Test'})
+
+ You can also put additional request headers only for certain method invocation:
+
+ .. code-block:: python
+
+ >>> import jsonrpclib
+ >>> server = jsonrpclib.Server("http://localhost:8080")
+ >>> with server._additional_headers({'X-Test' : 'Test'}) as test_server:
+ ... test_server.ping(42)
+ ...
+ >>> # X-Test header will be no longer sent in requests
+
+ Of course ``_additional_headers`` contexts can be nested as well.
+
+
+ Class Translation
+ *****************
+
+ I've recently added "automatic" class translation support, although it is
+ turned off by default. This can be devastatingly slow if improperly used, so
+ the following is just a short list of things to keep in mind when using it.
+
+ * Keep It (the object) Simple Stupid. (for exceptions, keep reading.)
+ * Do not require init params (for exceptions, keep reading)
+ * Getter properties without setters could be dangerous (read: not tested)
+
+ If any of the above are issues, use the _serialize method. (see usage below)
+ The server and client must BOTH have use_jsonclass configuration item on and
+ they must both have access to the same libraries used by the objects for
+ this to work.
+
+ If you have excessively nested arguments, it would be better to turn off the
+ translation and manually invoke it on specific objects using
+ ``jsonrpclib.jsonclass.dump`` / ``jsonrpclib.jsonclass.load`` (since the default
+ behavior recursively goes through attributes and lists / dicts / tuples).
+
+ Sample file: *test_obj.py*
+
+ .. code-block:: python
+
+ # This object is /very/ simple, and the system will look through the
+ # attributes and serialize what it can.
+ class TestObj(object):
+ foo = 'bar'
+
+ # This object requires __init__ params, so it uses the _serialize method
+ # and returns a tuple of init params and attribute values (the init params
+ # can be a dict or a list, but the attribute values must be a dict.)
+ class TestSerial(object):
+ foo = 'bar'
+ def __init__(self, *args):
+ self.args = args
+ def _serialize(self):
+ return (self.args, {'foo':self.foo,})
+
+ * Sample usage
+
+ .. code-block:: python
+
+ >>> import jsonrpclib
+ >>> import test_obj
+
+ # History is used only to print the serialized form of beans
+ >>> history = jsonrpclib.history.History()
+ >>> testobj1 = test_obj.TestObj()
+ >>> testobj2 = test_obj.TestSerial()
+ >>> server = jsonrpclib.Server('http://localhost:8080', history=history)
+
+ # The 'ping' just returns whatever is sent
+ >>> ping1 = server.ping(testobj1)
+ >>> ping2 = server.ping(testobj2)
+
+ >>> print(history.request)
+ {"id": "7805f1f9-9abd-49c6-81dc-dbd47229fe13", "jsonrpc": "2.0",
+ "method": "ping", "params": [{"__jsonclass__":
+ ["test_obj.TestSerial", []], "foo": "bar"}
+ ]}
+ >>> print(history.response)
+ {"id": "7805f1f9-9abd-49c6-81dc-dbd47229fe13", "jsonrpc": "2.0",
+ "result": {"__jsonclass__": ["test_obj.TestSerial", []], "foo": "bar"}}
+
+ This behavior is turned by default. To deactivate it, just set the
+ ``use_jsonclass`` member of a server ``Config`` to False.
+ If you want to use a per-class serialization method, set its name in the
+ ``serialize_method`` member of a server ``Config``.
+ Finally, if you are using classes that you have defined in the implementation
+ (as in, not a separate library), you'll need to add those (on BOTH the server
+ and the client) using the ``config.classes.add()`` method.
+
+ Feedback on this "feature" is very, VERY much appreciated.
+
+ Why JSON-RPC?
+ *************
+
+ In my opinion, there are several reasons to choose JSON over XML for RPC:
+
+ * Much simpler to read (I suppose this is opinion, but I know I'm right. :)
+ * Size / Bandwidth - Main reason, a JSON object representation is just much smaller.
+ * Parsing - JSON should be much quicker to parse than XML.
+ * Easy class passing with ``jsonclass`` (when enabled)
+
+ In the interest of being fair, there are also a few reasons to choose XML
+ over JSON:
+
+ * Your server doesn't do JSON (rather obvious)
+ * Wider XML-RPC support across APIs (can we change this? :))
+ * Libraries are more established, i.e. more stable (Let's change this too.)
+
+ Tests
+ *****
+
+ Tests are an almost-verbatim drop from the JSON-RPC specification 2.0 page.
+ They can be run using *unittest* or *nosetest*:
+
+ .. code-block:: console
+
+ python -m unittest discover tests
+ python3 -m unittest discover tests
+ nosetests tests
+
+Platform: UNKNOWN
+Classifier: Development Status :: 5 - Production/Stable
+Classifier: Intended Audience :: Developers
+Classifier: License :: OSI Approved :: Apache Software License
+Classifier: Operating System :: OS Independent
+Classifier: Programming Language :: Python :: 2.6
+Classifier: Programming Language :: Python :: 2.7
+Classifier: Programming Language :: Python :: 3
+Classifier: Programming Language :: Python :: 3.0
+Classifier: Programming Language :: Python :: 3.1
+Classifier: Programming Language :: Python :: 3.2
+Classifier: Programming Language :: Python :: 3.3
+Classifier: Programming Language :: Python :: 3.4
diff --git a/scripts/automation/trex_control_plane/python_lib/jsonrpclib-pelix-0.2.5/README.rst b/scripts/automation/trex_control_plane/python_lib/jsonrpclib-pelix-0.2.5/README.rst
new file mode 100755
index 00000000..29da2708
--- /dev/null
+++ b/scripts/automation/trex_control_plane/python_lib/jsonrpclib-pelix-0.2.5/README.rst
@@ -0,0 +1,438 @@
+JSONRPClib (patched for Pelix)
+##############################
+
+.. image:: https://pypip.in/license/jsonrpclib-pelix/badge.svg
+ :target: https://pypi.python.org/pypi/jsonrpclib-pelix/
+
+.. image:: https://travis-ci.org/tcalmant/jsonrpclib.svg?branch=master
+ :target: https://travis-ci.org/tcalmant/jsonrpclib
+
+.. image:: https://coveralls.io/repos/tcalmant/jsonrpclib/badge.svg?branch=master
+ :target: https://coveralls.io/r/tcalmant/jsonrpclib?branch=master
+
+
+This library is an implementation of the JSON-RPC specification.
+It supports both the original 1.0 specification, as well as the
+new (proposed) 2.0 specification, which includes batch submission, keyword
+arguments, etc.
+
+It is licensed under the Apache License, Version 2.0
+(http://www.apache.org/licenses/LICENSE-2.0.html).
+
+
+About this version
+******************
+
+This is a patched version of the original ``jsonrpclib`` project by
+Josh Marshall, available at https://github.com/joshmarshall/jsonrpclib.
+
+The suffix *-pelix* only indicates that this version works with Pelix Remote
+Services, but it is **not** a Pelix specific implementation.
+
+* This version adds support for Python 3, staying compatible with Python 2.
+* It is now possible to use the dispatch_method argument while extending
+ the SimpleJSONRPCDispatcher, to use a custom dispatcher.
+ This allows to use this package by Pelix Remote Services.
+* It can use thread pools to control the number of threads spawned to handle
+ notification requests and clients connections.
+* The modifications added in other forks of this project have been added:
+
+ * From https://github.com/drdaeman/jsonrpclib:
+
+ * Improved JSON-RPC 1.0 support
+ * Less strict error response handling
+
+ * From https://github.com/tuomassalo/jsonrpclib:
+
+ * In case of a non-pre-defined error, raise an AppError and give access to
+ *error.data*
+
+ * From https://github.com/dejw/jsonrpclib:
+
+ * Custom headers can be sent with request and associated tests
+
+* The support for Unix sockets has been removed, as it is not trivial to convert
+ to Python 3 (and I don't use them)
+* This version cannot be installed with the original ``jsonrpclib``, as it uses
+ the same package name.
+
+
+Summary
+*******
+
+This library implements the JSON-RPC 2.0 proposed specification in pure Python.
+It is designed to be as compatible with the syntax of ``xmlrpclib`` as possible
+(it extends where possible), so that projects using ``xmlrpclib`` could easily
+be modified to use JSON and experiment with the differences.
+
+It is backwards-compatible with the 1.0 specification, and supports all of the
+new proposed features of 2.0, including:
+
+* Batch submission (via MultiCall)
+* Keyword arguments
+* Notifications (both in a batch and 'normal')
+* Class translation using the ``__jsonclass__`` key.
+
+I've added a "SimpleJSONRPCServer", which is intended to emulate the
+"SimpleXMLRPCServer" from the default Python distribution.
+
+
+Requirements
+************
+
+It supports ``cjson`` and ``simplejson``, and looks for the parsers in that
+order (searching first for ``cjson``, then for the *built-in* ``json`` in 2.6+,
+and then the ``simplejson`` external library).
+One of these must be installed to use this library, although if you have a
+standard distribution of 2.6+, you should already have one.
+Keep in mind that ``cjson`` is supposed to be the quickest, I believe, so if
+you are going for full-on optimization you may want to pick it up.
+
+Since library uses ``contextlib`` module, you should have at least Python 2.5
+installed.
+
+
+Installation
+************
+
+You can install this from PyPI with one of the following commands (sudo
+may be required):
+
+.. code-block:: console
+
+ easy_install jsonrpclib-pelix
+ pip install jsonrpclib-pelix
+
+Alternatively, you can download the source from the GitHub repository
+at http://github.com/tcalmant/jsonrpclib and manually install it
+with the following commands:
+
+.. code-block:: console
+
+ git clone git://github.com/tcalmant/jsonrpclib.git
+ cd jsonrpclib
+ python setup.py install
+
+
+SimpleJSONRPCServer
+*******************
+
+This is identical in usage (or should be) to the SimpleXMLRPCServer in the
+Python standard library. Some of the differences in features are that it
+obviously supports notification, batch calls, class translation (if left on),
+etc.
+Note: The import line is slightly different from the regular SimpleXMLRPCServer,
+since the SimpleJSONRPCServer is distributed within the ``jsonrpclib`` library.
+
+.. code-block:: python
+
+ from jsonrpclib.SimpleJSONRPCServer import SimpleJSONRPCServer
+
+ server = SimpleJSONRPCServer(('localhost', 8080))
+ server.register_function(pow)
+ server.register_function(lambda x,y: x+y, 'add')
+ server.register_function(lambda x: x, 'ping')
+ server.serve_forever()
+
+To start protect the server with SSL, use the following snippet:
+
+.. code-block:: python
+
+ from jsonrpclib.SimpleJSONRPCServer import SimpleJSONRPCServer
+
+ # Setup the SSL socket
+ server = SimpleJSONRPCServer(('localhost', 8080), bind_and_activate=False)
+ server.socket = ssl.wrap_socket(server.socket, certfile='server.pem',
+ server_side=True)
+ server.server_bind()
+ server.server_activate()
+
+ # ... register functions
+ # Start the server
+ server.serve_forever()
+
+
+Notification Thread Pool
+========================
+
+By default, notification calls are handled in the request handling thread.
+It is possible to use a thread pool to handle them, by giving it to the server
+using the ``set_notification_pool()`` method:
+
+.. code-block:: python
+
+ from jsonrpclib.SimpleJSONRPCServer import SimpleJSONRPCServer
+ from jsonrpclib.threadpool import ThreadPool
+
+ # Setup the thread pool: between 0 and 10 threads
+ pool = ThreadPool(max_threads=10, min_threads=0)
+
+ # Don't forget to start it
+ pool.start()
+
+ # Setup the server
+ server = SimpleJSONRPCServer(('localhost', 8080), config)
+ server.set_notification_pool(pool)
+
+ # Register methods
+ server.register_function(pow)
+ server.register_function(lambda x,y: x+y, 'add')
+ server.register_function(lambda x: x, 'ping')
+
+ try:
+ server.serve_forever()
+ finally:
+ # Stop the thread pool (let threads finish their current task)
+ pool.stop()
+ server.set_notification_pool(None)
+
+
+Threaded server
+===============
+
+It is also possible to use a thread pool to handle clients requests, using the
+``PooledJSONRPCServer`` class.
+By default, this class uses pool of 0 to 30 threads. A custom pool can be given
+with the ``thread_pool`` parameter of the class constructor.
+
+The notification pool and the request pool are different: by default, a server
+with a request pool doesn't have a notification pool.
+
+.. code-block:: python
+
+ from jsonrpclib.SimpleJSONRPCServer import PooledJSONRPCServer
+ from jsonrpclib.threadpool import ThreadPool
+
+ # Setup the notification and request pools
+ nofif_pool = ThreadPool(max_threads=10, min_threads=0)
+ request_pool = ThreadPool(max_threads=50, min_threads=10)
+
+ # Don't forget to start them
+ nofif_pool.start()
+ request_pool.start()
+
+ # Setup the server
+ server = PooledJSONRPCServer(('localhost', 8080), config,
+ thread_pool=request_pool)
+ server.set_notification_pool(nofif_pool)
+
+ # Register methods
+ server.register_function(pow)
+ server.register_function(lambda x,y: x+y, 'add')
+ server.register_function(lambda x: x, 'ping')
+
+ try:
+ server.serve_forever()
+ finally:
+ # Stop the thread pools (let threads finish their current task)
+ request_pool.stop()
+ nofif_pool.stop()
+ server.set_notification_pool(None)
+
+Client Usage
+************
+
+This is (obviously) taken from a console session.
+
+.. code-block:: python
+
+ >>> import jsonrpclib
+ >>> server = jsonrpclib.ServerProxy('http://localhost:8080')
+ >>> server.add(5,6)
+ 11
+ >>> server.add(x=5, y=10)
+ 15
+ >>> server._notify.add(5,6)
+ # No result returned...
+ >>> batch = jsonrpclib.MultiCall(server)
+ >>> batch.add(5, 6)
+ >>> batch.ping({'key':'value'})
+ >>> batch._notify.add(4, 30)
+ >>> results = batch()
+ >>> for result in results:
+ >>> ... print(result)
+ 11
+ {'key': 'value'}
+ # Note that there are only two responses -- this is according to spec.
+
+ # Clean up
+ >>> server('close')()
+
+ # Using client history
+ >>> history = jsonrpclib.history.History()
+ >>> server = jsonrpclib.ServerProxy('http://localhost:8080', history=history)
+ >>> server.add(5,6)
+ 11
+ >>> print(history.request)
+ {"id": "f682b956-c8e1-4506-9db4-29fe8bc9fcaa", "jsonrpc": "2.0",
+ "method": "add", "params": [5, 6]}
+ >>> print(history.response)
+ {"id": "f682b956-c8e1-4506-9db4-29fe8bc9fcaa", "jsonrpc": "2.0",
+ "result": 11}
+
+ # Clean up
+ >>> server('close')()
+
+If you need 1.0 functionality, there are a bunch of places you can pass that in,
+although the best is just to give a specific configuration to
+``jsonrpclib.ServerProxy``:
+
+.. code-block:: python
+
+ >>> import jsonrpclib
+ >>> jsonrpclib.config.DEFAULT.version
+ 2.0
+ >>> config = jsonrpclib.config.Config(version=1.0)
+ >>> history = jsonrpclib.history.History()
+ >>> server = jsonrpclib.ServerProxy('http://localhost:8080', config=config,
+ history=history)
+ >>> server.add(7, 10)
+ 17
+ >>> print(history.request)
+ {"id": "827b2923-5b37-49a5-8b36-e73920a16d32",
+ "method": "add", "params": [7, 10]}
+ >>> print(history.response)
+ {"id": "827b2923-5b37-49a5-8b36-e73920a16d32", "error": null, "result": 17}
+ >>> server('close')()
+
+The equivalent ``loads`` and ``dumps`` functions also exist, although with minor
+modifications. The ``dumps`` arguments are almost identical, but it adds three
+arguments: ``rpcid`` for the 'id' key, ``version`` to specify the JSON-RPC
+compatibility, and ``notify`` if it's a request that you want to be a
+notification.
+
+Additionally, the ``loads`` method does not return the params and method like
+``xmlrpclib``, but instead a.) parses for errors, raising ProtocolErrors, and
+b.) returns the entire structure of the request / response for manual parsing.
+
+
+Additional headers
+******************
+
+If your remote service requires custom headers in request, you can pass them
+as as a ``headers`` keyword argument, when creating the ``ServerProxy``:
+
+.. code-block:: python
+
+ >>> import jsonrpclib
+ >>> server = jsonrpclib.ServerProxy("http://localhost:8080",
+ headers={'X-Test' : 'Test'})
+
+You can also put additional request headers only for certain method invocation:
+
+.. code-block:: python
+
+ >>> import jsonrpclib
+ >>> server = jsonrpclib.Server("http://localhost:8080")
+ >>> with server._additional_headers({'X-Test' : 'Test'}) as test_server:
+ ... test_server.ping(42)
+ ...
+ >>> # X-Test header will be no longer sent in requests
+
+Of course ``_additional_headers`` contexts can be nested as well.
+
+
+Class Translation
+*****************
+
+I've recently added "automatic" class translation support, although it is
+turned off by default. This can be devastatingly slow if improperly used, so
+the following is just a short list of things to keep in mind when using it.
+
+* Keep It (the object) Simple Stupid. (for exceptions, keep reading.)
+* Do not require init params (for exceptions, keep reading)
+* Getter properties without setters could be dangerous (read: not tested)
+
+If any of the above are issues, use the _serialize method. (see usage below)
+The server and client must BOTH have use_jsonclass configuration item on and
+they must both have access to the same libraries used by the objects for
+this to work.
+
+If you have excessively nested arguments, it would be better to turn off the
+translation and manually invoke it on specific objects using
+``jsonrpclib.jsonclass.dump`` / ``jsonrpclib.jsonclass.load`` (since the default
+behavior recursively goes through attributes and lists / dicts / tuples).
+
+ Sample file: *test_obj.py*
+
+.. code-block:: python
+
+ # This object is /very/ simple, and the system will look through the
+ # attributes and serialize what it can.
+ class TestObj(object):
+ foo = 'bar'
+
+ # This object requires __init__ params, so it uses the _serialize method
+ # and returns a tuple of init params and attribute values (the init params
+ # can be a dict or a list, but the attribute values must be a dict.)
+ class TestSerial(object):
+ foo = 'bar'
+ def __init__(self, *args):
+ self.args = args
+ def _serialize(self):
+ return (self.args, {'foo':self.foo,})
+
+* Sample usage
+
+.. code-block:: python
+
+ >>> import jsonrpclib
+ >>> import test_obj
+
+ # History is used only to print the serialized form of beans
+ >>> history = jsonrpclib.history.History()
+ >>> testobj1 = test_obj.TestObj()
+ >>> testobj2 = test_obj.TestSerial()
+ >>> server = jsonrpclib.Server('http://localhost:8080', history=history)
+
+ # The 'ping' just returns whatever is sent
+ >>> ping1 = server.ping(testobj1)
+ >>> ping2 = server.ping(testobj2)
+
+ >>> print(history.request)
+ {"id": "7805f1f9-9abd-49c6-81dc-dbd47229fe13", "jsonrpc": "2.0",
+ "method": "ping", "params": [{"__jsonclass__":
+ ["test_obj.TestSerial", []], "foo": "bar"}
+ ]}
+ >>> print(history.response)
+ {"id": "7805f1f9-9abd-49c6-81dc-dbd47229fe13", "jsonrpc": "2.0",
+ "result": {"__jsonclass__": ["test_obj.TestSerial", []], "foo": "bar"}}
+
+This behavior is turned by default. To deactivate it, just set the
+``use_jsonclass`` member of a server ``Config`` to False.
+If you want to use a per-class serialization method, set its name in the
+``serialize_method`` member of a server ``Config``.
+Finally, if you are using classes that you have defined in the implementation
+(as in, not a separate library), you'll need to add those (on BOTH the server
+and the client) using the ``config.classes.add()`` method.
+
+Feedback on this "feature" is very, VERY much appreciated.
+
+Why JSON-RPC?
+*************
+
+In my opinion, there are several reasons to choose JSON over XML for RPC:
+
+* Much simpler to read (I suppose this is opinion, but I know I'm right. :)
+* Size / Bandwidth - Main reason, a JSON object representation is just much smaller.
+* Parsing - JSON should be much quicker to parse than XML.
+* Easy class passing with ``jsonclass`` (when enabled)
+
+In the interest of being fair, there are also a few reasons to choose XML
+over JSON:
+
+* Your server doesn't do JSON (rather obvious)
+* Wider XML-RPC support across APIs (can we change this? :))
+* Libraries are more established, i.e. more stable (Let's change this too.)
+
+Tests
+*****
+
+Tests are an almost-verbatim drop from the JSON-RPC specification 2.0 page.
+They can be run using *unittest* or *nosetest*:
+
+.. code-block:: console
+
+ python -m unittest discover tests
+ python3 -m unittest discover tests
+ nosetests tests
diff --git a/scripts/automation/trex_control_plane/python_lib/jsonrpclib-pelix-0.2.5/jsonrpclib/SimpleJSONRPCServer.py b/scripts/automation/trex_control_plane/python_lib/jsonrpclib-pelix-0.2.5/jsonrpclib/SimpleJSONRPCServer.py
new file mode 100755
index 00000000..f7a7b652
--- /dev/null
+++ b/scripts/automation/trex_control_plane/python_lib/jsonrpclib-pelix-0.2.5/jsonrpclib/SimpleJSONRPCServer.py
@@ -0,0 +1,602 @@
+#!/usr/bin/python
+# -- Content-Encoding: UTF-8 --
+"""
+Defines a request dispatcher, a HTTP request handler, a HTTP server and a
+CGI request handler.
+
+:authors: Josh Marshall, Thomas Calmant
+:copyright: Copyright 2015, isandlaTech
+:license: Apache License 2.0
+:version: 0.2.5
+
+..
+
+ Copyright 2015 isandlaTech
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+"""
+
+# Module version
+__version_info__ = (0, 2, 5)
+__version__ = ".".join(str(x) for x in __version_info__)
+
+# Documentation strings format
+__docformat__ = "restructuredtext en"
+
+# ------------------------------------------------------------------------------
+# Local modules
+from jsonrpclib import Fault
+import jsonrpclib.config
+import jsonrpclib.utils as utils
+import jsonrpclib.threadpool
+
+# Standard library
+import logging
+import socket
+import sys
+import traceback
+
+# Prepare the logger
+_logger = logging.getLogger(__name__)
+
+try:
+ # Python 3
+ # pylint: disable=F0401,E0611
+ import xmlrpc.server as xmlrpcserver
+ import socketserver
+except (ImportError, AttributeError):
+ # Python 2 or IronPython
+ # pylint: disable=F0401,E0611
+ import SimpleXMLRPCServer as xmlrpcserver
+ import SocketServer as socketserver
+
+try:
+ # Windows
+ import fcntl
+except ImportError:
+ # Other systems
+ # pylint: disable=C0103
+ fcntl = None
+
+# ------------------------------------------------------------------------------
+
+
+def get_version(request):
+ """
+ Computes the JSON-RPC version
+
+ :param request: A request dictionary
+ :return: The JSON-RPC version or None
+ """
+ if 'jsonrpc' in request:
+ return 2.0
+ elif 'id' in request:
+ return 1.0
+
+ return None
+
+
+def validate_request(request, json_config):
+ """
+ Validates the format of a request dictionary
+
+ :param request: A request dictionary
+ :param json_config: A JSONRPClib Config instance
+ :return: True if the dictionary is valid, else a Fault object
+ """
+ if not isinstance(request, utils.DictType):
+ # Invalid request type
+ fault = Fault(-32600, 'Request must be a dict, not {0}'
+ .format(type(request).__name__),
+ config=json_config)
+ _logger.warning("Invalid request content: %s", fault)
+ return fault
+
+ # Get the request ID
+ rpcid = request.get('id', None)
+
+ # Check request version
+ version = get_version(request)
+ if not version:
+ fault = Fault(-32600, 'Request {0} invalid.'.format(request),
+ rpcid=rpcid, config=json_config)
+ _logger.warning("No version in request: %s", fault)
+ return fault
+
+ # Default parameters: empty list
+ request.setdefault('params', [])
+
+ # Check parameters
+ method = request.get('method', None)
+ params = request.get('params')
+ param_types = (utils.ListType, utils.DictType, utils.TupleType)
+
+ if not method or not isinstance(method, utils.string_types) or \
+ not isinstance(params, param_types):
+ # Invalid type of method name or parameters
+ fault = Fault(-32600, 'Invalid request parameters or method.',
+ rpcid=rpcid, config=json_config)
+ _logger.warning("Invalid request content: %s", fault)
+ return fault
+
+ # Valid request
+ return True
+
+# ------------------------------------------------------------------------------
+
+
+class NoMulticallResult(Exception):
+ """
+ No result in multicall
+ """
+ pass
+
+
+class SimpleJSONRPCDispatcher(xmlrpcserver.SimpleXMLRPCDispatcher, object):
+ """
+ Mix-in class that dispatches JSON-RPC requests.
+
+ This class is used to register JSON-RPC method handlers
+ and then to dispatch them. This class doesn't need to be
+ instanced directly when used by SimpleJSONRPCServer.
+ """
+ def __init__(self, encoding=None, config=jsonrpclib.config.DEFAULT):
+ """
+ Sets up the dispatcher with the given encoding.
+ None values are allowed.
+ """
+ xmlrpcserver.SimpleXMLRPCDispatcher.__init__(
+ self, allow_none=True, encoding=encoding or "UTF-8")
+ self.json_config = config
+
+ # Notification thread pool
+ self.__notification_pool = None
+
+ def set_notification_pool(self, thread_pool):
+ """
+ Sets the thread pool to use to handle notifications
+ """
+ self.__notification_pool = thread_pool
+
+ def _unmarshaled_dispatch(self, request, dispatch_method=None):
+ """
+ Loads the request dictionary (unmarshaled), calls the method(s)
+ accordingly and returns a JSON-RPC dictionary (not marshaled)
+
+ :param request: JSON-RPC request dictionary (or list of)
+ :param dispatch_method: Custom dispatch method (for method resolution)
+ :return: A JSON-RPC dictionary (or an array of) or None if the request
+ was a notification
+ :raise NoMulticallResult: No result in batch
+ """
+ if not request:
+ # Invalid request dictionary
+ fault = Fault(-32600, 'Request invalid -- no request data.',
+ config=self.json_config)
+ _logger.warning("Invalid request: %s", fault)
+ return fault.dump()
+
+ if isinstance(request, utils.ListType):
+ # This SHOULD be a batch, by spec
+ responses = []
+ for req_entry in request:
+ # Validate the request
+ result = validate_request(req_entry, self.json_config)
+ if isinstance(result, Fault):
+ responses.append(result.dump())
+ continue
+
+ # Call the method
+ resp_entry = self._marshaled_single_dispatch(req_entry,
+ dispatch_method)
+
+ # Store its result
+ if isinstance(resp_entry, Fault):
+ # pylint: disable=E1103
+ responses.append(resp_entry.dump())
+ elif resp_entry is not None:
+ responses.append(resp_entry)
+
+ if not responses:
+ # No non-None result
+ _logger.error("No result in Multicall")
+ raise NoMulticallResult("No result")
+
+ return responses
+
+ else:
+ # Single call
+ result = validate_request(request, self.json_config)
+ if isinstance(result, Fault):
+ return result.dump()
+
+ # Call the method
+ response = self._marshaled_single_dispatch(request,
+ dispatch_method)
+ if isinstance(response, Fault):
+ # pylint: disable=E1103
+ return response.dump()
+
+ return response
+
+ def _marshaled_dispatch(self, data, dispatch_method=None, path=None):
+ """
+ Parses the request data (marshaled), calls method(s) and returns a
+ JSON string (marshaled)
+
+ :param data: A JSON request string
+ :param dispatch_method: Custom dispatch method (for method resolution)
+ :param path: Unused parameter, to keep compatibility with xmlrpclib
+ :return: A JSON-RPC response string (marshaled)
+ """
+ # Parse the request
+ try:
+ request = jsonrpclib.loads(data, self.json_config)
+ except Exception as ex:
+ # Parsing/loading error
+ fault = Fault(-32700, 'Request {0} invalid. ({1}:{2})'
+ .format(data, type(ex).__name__, ex),
+ config=self.json_config)
+ _logger.warning("Error parsing request: %s", fault)
+ return fault.response()
+
+ # Get the response dictionary
+ try:
+ response = self._unmarshaled_dispatch(request, dispatch_method)
+ if response is not None:
+ # Compute the string representation of the dictionary/list
+ return jsonrpclib.jdumps(response, self.encoding)
+ else:
+ # No result (notification)
+ return ''
+ except NoMulticallResult:
+ # Return an empty string (jsonrpclib internal behaviour)
+ return ''
+
+ def _marshaled_single_dispatch(self, request, dispatch_method=None):
+ """
+ Dispatches a single method call
+
+ :param request: A validated request dictionary
+ :param dispatch_method: Custom dispatch method (for method resolution)
+ :return: A JSON-RPC response dictionary, or None if it was a
+ notification request
+ """
+ method = request.get('method')
+ params = request.get('params')
+
+ # Prepare a request-specific configuration
+ if 'jsonrpc' not in request and self.json_config.version >= 2:
+ # JSON-RPC 1.0 request on a JSON-RPC 2.0
+ # => compatibility needed
+ config = self.json_config.copy()
+ config.version = 1.0
+ else:
+ # Keep server configuration as is
+ config = self.json_config
+
+ # Test if this is a notification request
+ is_notification = 'id' not in request or request['id'] in (None, '')
+ if is_notification and self.__notification_pool is not None:
+ # Use the thread pool for notifications
+ if dispatch_method is not None:
+ self.__notification_pool.enqueue(dispatch_method,
+ method, params)
+ else:
+ self.__notification_pool.enqueue(self._dispatch,
+ method, params, config)
+
+ # Return immediately
+ return None
+ else:
+ # Synchronous call
+ try:
+ # Call the method
+ if dispatch_method is not None:
+ response = dispatch_method(method, params)
+ else:
+ response = self._dispatch(method, params, config)
+ except Exception as ex:
+ # Return a fault
+ fault = Fault(-32603, '{0}:{1}'.format(type(ex).__name__, ex),
+ config=config)
+ _logger.error("Error calling method %s: %s", method, fault)
+ return fault.dump()
+
+ if is_notification:
+ # It's a notification, no result needed
+ # Do not use 'not id' as it might be the integer 0
+ return None
+
+ # Prepare a JSON-RPC dictionary
+ try:
+ return jsonrpclib.dump(response, rpcid=request['id'],
+ is_response=True, config=config)
+ except Exception as ex:
+ # JSON conversion exception
+ fault = Fault(-32603, '{0}:{1}'.format(type(ex).__name__, ex),
+ config=config)
+ _logger.error("Error preparing JSON-RPC result: %s", fault)
+ return fault.dump()
+
+ def _dispatch(self, method, params, config=None):
+ """
+ Default method resolver and caller
+
+ :param method: Name of the method to call
+ :param params: List of arguments to give to the method
+ :param config: Request-specific configuration
+ :return: The result of the method
+ """
+ config = config or self.json_config
+
+ func = None
+ try:
+ # Look into registered methods
+ func = self.funcs[method]
+ except KeyError:
+ if self.instance is not None:
+ # Try with the registered instance
+ try:
+ # Instance has a custom dispatcher
+ return getattr(self.instance, '_dispatch')(method, params)
+ except AttributeError:
+ # Resolve the method name in the instance
+ try:
+ func = xmlrpcserver.resolve_dotted_attribute(
+ self.instance, method, True)
+ except AttributeError:
+ # Unknown method
+ pass
+
+ if func is not None:
+ try:
+ # Call the method
+ if isinstance(params, utils.ListType):
+ return func(*params)
+ else:
+ return func(**params)
+ except TypeError as ex:
+ # Maybe the parameters are wrong
+ fault = Fault(-32602, 'Invalid parameters: {0}'.format(ex),
+ config=config)
+ _logger.warning("Invalid call parameters: %s", fault)
+ return fault
+ except:
+ # Method exception
+ err_lines = traceback.format_exc().splitlines()
+ trace_string = '{0} | {1}'.format(err_lines[-3], err_lines[-1])
+ fault = Fault(-32603, 'Server error: {0}'.format(trace_string),
+ config=config)
+ _logger.exception("Server-side exception: %s", fault)
+ return fault
+ else:
+ # Unknown method
+ fault = Fault(-32601, 'Method {0} not supported.'.format(method),
+ config=config)
+ _logger.warning("Unknown method: %s", fault)
+ return fault
+
+# ------------------------------------------------------------------------------
+
+
+class SimpleJSONRPCRequestHandler(xmlrpcserver.SimpleXMLRPCRequestHandler):
+ """
+ HTTP request handler.
+
+ The server that receives the requests must have a json_config member,
+ containing a JSONRPClib Config instance
+ """
+ def do_POST(self):
+ """
+ Handles POST requests
+ """
+ if not self.is_rpc_path_valid():
+ self.report_404()
+ return
+
+ # Retrieve the configuration
+ config = getattr(self.server, 'json_config', jsonrpclib.config.DEFAULT)
+
+ try:
+ # Read the request body
+ max_chunk_size = 10 * 1024 * 1024
+ size_remaining = int(self.headers["content-length"])
+ chunks = []
+ while size_remaining:
+ chunk_size = min(size_remaining, max_chunk_size)
+ raw_chunk = self.rfile.read(chunk_size)
+ if not raw_chunk:
+ break
+ chunks.append(utils.from_bytes(raw_chunk))
+ size_remaining -= len(chunks[-1])
+ data = ''.join(chunks)
+
+ try:
+ # Decode content
+ data = self.decode_request_content(data)
+ if data is None:
+ # Unknown encoding, response has been sent
+ return
+ except AttributeError:
+ # Available since Python 2.7
+ pass
+
+ # Execute the method
+ response = self.server._marshaled_dispatch(
+ data, getattr(self, '_dispatch', None), self.path)
+
+ # No exception: send a 200 OK
+ self.send_response(200)
+ except:
+ # Exception: send 500 Server Error
+ self.send_response(500)
+ err_lines = traceback.format_exc().splitlines()
+ trace_string = '{0} | {1}'.format(err_lines[-3], err_lines[-1])
+ fault = jsonrpclib.Fault(-32603, 'Server error: {0}'
+ .format(trace_string), config=config)
+ _logger.exception("Server-side error: %s", fault)
+ response = fault.response()
+
+ if response is None:
+ # Avoid to send None
+ response = ''
+
+ # Convert the response to the valid string format
+ response = utils.to_bytes(response)
+
+ # Send it
+ self.send_header("Content-type", config.content_type)
+ self.send_header("Content-length", str(len(response)))
+ self.end_headers()
+ if response:
+ self.wfile.write(response)
+
+# ------------------------------------------------------------------------------
+
+
+class SimpleJSONRPCServer(socketserver.TCPServer, SimpleJSONRPCDispatcher):
+ """
+ JSON-RPC server (and dispatcher)
+ """
+ # This simplifies server restart after error
+ allow_reuse_address = True
+
+ # pylint: disable=C0103
+ def __init__(self, addr, requestHandler=SimpleJSONRPCRequestHandler,
+ logRequests=True, encoding=None, bind_and_activate=True,
+ address_family=socket.AF_INET,
+ config=jsonrpclib.config.DEFAULT):
+ """
+ Sets up the server and the dispatcher
+
+ :param addr: The server listening address
+ :param requestHandler: Custom request handler
+ :param logRequests: Flag to(de)activate requests logging
+ :param encoding: The dispatcher request encoding
+ :param bind_and_activate: If True, starts the server immediately
+ :param address_family: The server listening address family
+ :param config: A JSONRPClib Config instance
+ """
+ # Set up the dispatcher fields
+ SimpleJSONRPCDispatcher.__init__(self, encoding, config)
+
+ # Prepare the server configuration
+ # logRequests is used by SimpleXMLRPCRequestHandler
+ self.logRequests = logRequests
+ self.address_family = address_family
+ self.json_config = config
+
+ # Work on the request handler
+ class RequestHandlerWrapper(requestHandler, object):
+ """
+ Wraps the request handle to have access to the configuration
+ """
+ def __init__(self, *args, **kwargs):
+ """
+ Constructs the wrapper after having stored the configuration
+ """
+ self.config = config
+ super(RequestHandlerWrapper, self).__init__(*args, **kwargs)
+
+ # Set up the server
+ socketserver.TCPServer.__init__(self, addr, requestHandler,
+ bind_and_activate)
+
+ # Windows-specific
+ if fcntl is not None and hasattr(fcntl, 'FD_CLOEXEC'):
+ flags = fcntl.fcntl(self.fileno(), fcntl.F_GETFD)
+ flags |= fcntl.FD_CLOEXEC
+ fcntl.fcntl(self.fileno(), fcntl.F_SETFD, flags)
+
+# ------------------------------------------------------------------------------
+
+
+class PooledJSONRPCServer(SimpleJSONRPCServer, socketserver.ThreadingMixIn):
+ """
+ JSON-RPC server based on a thread pool
+ """
+ def __init__(self, addr, requestHandler=SimpleJSONRPCRequestHandler,
+ logRequests=True, encoding=None, bind_and_activate=True,
+ address_family=socket.AF_INET,
+ config=jsonrpclib.config.DEFAULT, thread_pool=None):
+ """
+ Sets up the server and the dispatcher
+
+ :param addr: The server listening address
+ :param requestHandler: Custom request handler
+ :param logRequests: Flag to(de)activate requests logging
+ :param encoding: The dispatcher request encoding
+ :param bind_and_activate: If True, starts the server immediately
+ :param address_family: The server listening address family
+ :param config: A JSONRPClib Config instance
+ :param thread_pool: A ThreadPool object. The pool must be started.
+ """
+ # Normalize the thread pool
+ if thread_pool is None:
+ # Start a thread pool with 30 threads max, 0 thread min
+ thread_pool = jsonrpclib.threadpool.ThreadPool(
+ 30, 0, logname="PooledJSONRPCServer")
+ thread_pool.start()
+
+ # Store the thread pool
+ self.__request_pool = thread_pool
+
+ # Prepare the server
+ SimpleJSONRPCServer.__init__(self, addr, requestHandler, logRequests,
+ encoding, bind_and_activate,
+ address_family, config)
+
+ def process_request(self, request, client_address):
+ """
+ Handle a client request: queue it in the thread pool
+ """
+ self.__request_pool.enqueue(self.process_request_thread,
+ request, client_address)
+
+ def server_close(self):
+ """
+ Clean up the server
+ """
+ SimpleJSONRPCServer.server_close(self)
+ self.__request_pool.stop()
+
+# ------------------------------------------------------------------------------
+
+
+class CGIJSONRPCRequestHandler(SimpleJSONRPCDispatcher):
+ """
+ JSON-RPC CGI handler (and dispatcher)
+ """
+ def __init__(self, encoding=None, config=jsonrpclib.config.DEFAULT):
+ """
+ Sets up the dispatcher
+
+ :param encoding: Dispatcher encoding
+ :param config: A JSONRPClib Config instance
+ """
+ SimpleJSONRPCDispatcher.__init__(self, encoding, config)
+
+ def handle_jsonrpc(self, request_text):
+ """
+ Handle a JSON-RPC request
+ """
+ response = self._marshaled_dispatch(request_text)
+ sys.stdout.write('Content-Type: {0}\r\n'
+ .format(self.json_config.content_type))
+ sys.stdout.write('Content-Length: {0:d}\r\n'.format(len(response)))
+ sys.stdout.write('\r\n')
+ sys.stdout.write(response)
+
+ # XML-RPC alias
+ handle_xmlrpc = handle_jsonrpc
diff --git a/scripts/automation/trex_control_plane/python_lib/jsonrpclib-pelix-0.2.5/jsonrpclib/__init__.py b/scripts/automation/trex_control_plane/python_lib/jsonrpclib-pelix-0.2.5/jsonrpclib/__init__.py
new file mode 100755
index 00000000..2c7dc1c5
--- /dev/null
+++ b/scripts/automation/trex_control_plane/python_lib/jsonrpclib-pelix-0.2.5/jsonrpclib/__init__.py
@@ -0,0 +1,34 @@
+#!/usr/bin/python
+# -- Content-Encoding: UTF-8 --
+"""
+Aliases to ease access to jsonrpclib classes
+
+:authors: Josh Marshall, Thomas Calmant
+:copyright: Copyright 2015, isandlaTech
+:license: Apache License 2.0
+:version: 0.2.5
+
+..
+
+ Copyright 2015 isandlaTech
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+"""
+
+# Easy access to utility methods and classes
+from jsonrpclib.jsonrpc import Server, ServerProxy
+from jsonrpclib.jsonrpc import MultiCall, Fault, ProtocolError, AppError
+from jsonrpclib.jsonrpc import loads, dumps, load, dump
+from jsonrpclib.jsonrpc import jloads, jdumps
+import jsonrpclib.history as history
+import jsonrpclib.utils as utils
diff --git a/scripts/automation/trex_control_plane/python_lib/jsonrpclib-pelix-0.2.5/jsonrpclib/config.py b/scripts/automation/trex_control_plane/python_lib/jsonrpclib-pelix-0.2.5/jsonrpclib/config.py
new file mode 100755
index 00000000..d2c5a811
--- /dev/null
+++ b/scripts/automation/trex_control_plane/python_lib/jsonrpclib-pelix-0.2.5/jsonrpclib/config.py
@@ -0,0 +1,141 @@
+#!/usr/bin/python
+# -- Content-Encoding: UTF-8 --
+"""
+The configuration module.
+
+:copyright: Copyright 2015, isandlaTech
+:license: Apache License 2.0
+:version: 0.2.5
+
+..
+
+ Copyright 2015 isandlaTech
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+"""
+
+# Module version
+__version_info__ = (0, 2, 5)
+__version__ = ".".join(str(x) for x in __version_info__)
+
+# Documentation strings format
+__docformat__ = "restructuredtext en"
+
+# ------------------------------------------------------------------------------
+
+import sys
+
+# ------------------------------------------------------------------------------
+
+
+class LocalClasses(dict):
+ """
+ Associates local classes with their names (used in the jsonclass module)
+ """
+ def add(self, cls, name=None):
+ """
+ Stores a local class
+
+ :param cls: A class
+ :param name: Custom name used in the __jsonclass__ attribute
+ """
+ if not name:
+ name = cls.__name__
+ self[name] = cls
+
+# ------------------------------------------------------------------------------
+
+
+class Config(object):
+ """
+ This is pretty much used exclusively for the 'jsonclass'
+ functionality... set use_jsonclass to False to turn it off.
+ You can change serialize_method and ignore_attribute, or use
+ the local_classes.add(class) to include "local" classes.
+ """
+ def __init__(self, version=2.0, content_type="application/json-rpc",
+ user_agent=None, use_jsonclass=True,
+ serialize_method='_serialize',
+ ignore_attribute='_ignore',
+ serialize_handlers=None):
+ """
+ Sets up a configuration of JSONRPClib
+
+ :param version: JSON-RPC specification version
+ :param content_type: HTTP content type header value
+ :param user_agent: The HTTP request user agent
+ :param use_jsonclass: Allow bean marshalling
+ :param serialize_method: A string that references the method on a
+ custom class object which is responsible for
+ returning a tuple of the arguments and a dict
+ of attributes.
+ :param ignore_attribute: A string that references the attribute on a
+ custom class object which holds strings and/or
+ references of the attributes the class
+ translator should ignore.
+ :param serialize_handlers: A dictionary of dump handler functions by
+ type for additional type support and for
+ overriding dump of built-in types in utils
+ """
+ # JSON-RPC specification
+ self.version = version
+
+ # Change to False to keep __jsonclass__ entries raw.
+ self.use_jsonclass = use_jsonclass
+
+ # it SHOULD be 'application/json-rpc'
+ # but MAY be 'application/json' or 'application/jsonrequest'
+ self.content_type = content_type
+
+ # Default user agent
+ if user_agent is None:
+ user_agent = 'jsonrpclib/{0} (Python {1})'.format(
+ __version__, '.'.join(str(ver)
+ for ver in sys.version_info[0:3]))
+ self.user_agent = user_agent
+
+ # The list of classes to use for jsonclass translation.
+ self.classes = LocalClasses()
+
+ # The serialize_method should be a string that references the
+ # method on a custom class object which is responsible for
+ # returning a tuple of the constructor arguments and a dict of
+ # attributes.
+ self.serialize_method = serialize_method
+
+ # The ignore attribute should be a string that references the
+ # attribute on a custom class object which holds strings and / or
+ # references of the attributes the class translator should ignore.
+ self.ignore_attribute = ignore_attribute
+
+ # The list of serialize handler functions for jsonclass dump.
+ # Used for handling additional types and overriding built-in types.
+ # Functions are expected to have the same parameters as jsonclass dump
+ # (possibility to call standard jsonclass dump function within).
+ self.serialize_handlers = serialize_handlers or {}
+
+ def copy(self):
+ """
+ Returns a shallow copy of this configuration bean
+
+ :return: A shallow copy of this configuration
+ """
+ new_config = Config(self.version, self.content_type, self.user_agent,
+ self.use_jsonclass, self.serialize_method,
+ self.ignore_attribute, None)
+ new_config.classes = self.classes.copy()
+ new_config.serialize_handlers = self.serialize_handlers.copy()
+ return new_config
+
+# Default configuration
+DEFAULT = Config()
diff --git a/scripts/automation/trex_control_plane/python_lib/jsonrpclib-pelix-0.2.5/jsonrpclib/history.py b/scripts/automation/trex_control_plane/python_lib/jsonrpclib-pelix-0.2.5/jsonrpclib/history.py
new file mode 100755
index 00000000..7062ab66
--- /dev/null
+++ b/scripts/automation/trex_control_plane/python_lib/jsonrpclib-pelix-0.2.5/jsonrpclib/history.py
@@ -0,0 +1,95 @@
+#!/usr/bin/python
+# -- Content-Encoding: UTF-8 --
+"""
+The history module.
+
+:authors: Josh Marshall, Thomas Calmant
+:copyright: Copyright 2015, isandlaTech
+:license: Apache License 2.0
+:version: 0.2.5
+
+..
+
+ Copyright 2015 isandlaTech
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+"""
+
+# Module version
+__version_info__ = (0, 2, 5)
+__version__ = ".".join(str(x) for x in __version_info__)
+
+# Documentation strings format
+__docformat__ = "restructuredtext en"
+
+# ------------------------------------------------------------------------------
+
+
+class History(object):
+ """
+ This holds all the response and request objects for a
+ session. A server using this should call "clear" after
+ each request cycle in order to keep it from clogging
+ memory.
+ """
+ def __init__(self):
+ """
+ Sets up members
+ """
+ self.requests = []
+ self.responses = []
+
+ def add_response(self, response_obj):
+ """
+ Adds a response to the history
+
+ :param response_obj: Response content
+ """
+ self.responses.append(response_obj)
+
+ def add_request(self, request_obj):
+ """
+ Adds a request to the history
+
+ :param request_obj: A request object
+ """
+ self.requests.append(request_obj)
+
+ @property
+ def request(self):
+ """
+ Returns the latest stored request or None
+ """
+ try:
+ return self.requests[-1]
+
+ except IndexError:
+ return None
+
+ @property
+ def response(self):
+ """
+ Returns the latest stored response or None
+ """
+ try:
+ return self.responses[-1]
+
+ except IndexError:
+ return None
+
+ def clear(self):
+ """
+ Clears the history lists
+ """
+ del self.requests[:]
+ del self.responses[:]
diff --git a/scripts/automation/trex_control_plane/python_lib/jsonrpclib-pelix-0.2.5/jsonrpclib/jsonclass.py b/scripts/automation/trex_control_plane/python_lib/jsonrpclib-pelix-0.2.5/jsonrpclib/jsonclass.py
new file mode 100755
index 00000000..c7cc4c35
--- /dev/null
+++ b/scripts/automation/trex_control_plane/python_lib/jsonrpclib-pelix-0.2.5/jsonrpclib/jsonclass.py
@@ -0,0 +1,295 @@
+#!/usr/bin/python
+# -- Content-Encoding: UTF-8 --
+"""
+The serialization module
+
+:authors: Josh Marshall, Thomas Calmant
+:copyright: Copyright 2015, isandlaTech
+:license: Apache License 2.0
+:version: 0.2.5
+
+..
+
+ Copyright 2015 isandlaTech
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+"""
+
+# Module version
+__version_info__ = (0, 2, 5)
+__version__ = ".".join(str(x) for x in __version_info__)
+
+# Documentation strings format
+__docformat__ = "restructuredtext en"
+
+# ------------------------------------------------------------------------------
+
+# Local package
+import jsonrpclib.config
+import jsonrpclib.utils as utils
+
+# Standard library
+import inspect
+import re
+
+# ------------------------------------------------------------------------------
+
+# Supported transmitted code
+SUPPORTED_TYPES = (utils.DictType,) + utils.iterable_types \
+ + utils.primitive_types
+
+# Regex of invalid module characters
+INVALID_MODULE_CHARS = r'[^a-zA-Z0-9\_\.]'
+
+# ------------------------------------------------------------------------------
+
+
+class TranslationError(Exception):
+ """
+ Unmarshaling exception
+ """
+ pass
+
+
+def _slots_finder(clazz, fields_set):
+ """
+ Recursively visits the class hierarchy to find all slots
+
+ :param clazz: Class to analyze
+ :param fields_set: Set where to store __slots___ content
+ """
+ # ... class level
+ try:
+ fields_set.update(clazz.__slots__)
+ except AttributeError:
+ pass
+
+ # ... parent classes level
+ for base_class in clazz.__bases__:
+ _slots_finder(base_class, fields_set)
+
+
+def _find_fields(obj):
+ """
+ Returns the names of the fields of the given object
+
+ :param obj: An object to analyze
+ :return: A set of field names
+ """
+ # Find fields...
+ fields = set()
+
+ # ... using __dict__
+ try:
+ fields.update(obj.__dict__)
+ except AttributeError:
+ pass
+
+ # ... using __slots__
+ _slots_finder(obj.__class__, fields)
+ return fields
+
+
+def dump(obj, serialize_method=None, ignore_attribute=None, ignore=None,
+ config=jsonrpclib.config.DEFAULT):
+ """
+ Transforms the given object into a JSON-RPC compliant form.
+ Converts beans into dictionaries with a __jsonclass__ entry.
+ Doesn't change primitive types.
+
+ :param obj: An object to convert
+ :param serialize_method: Custom serialization method
+ :param ignore_attribute: Name of the object attribute containing the names
+ of members to ignore
+ :param ignore: A list of members to ignore
+ :param config: A JSONRPClib Config instance
+ :return: A JSON-RPC compliant object
+ """
+ # Normalize arguments
+ serialize_method = serialize_method or config.serialize_method
+ ignore_attribute = ignore_attribute or config.ignore_attribute
+ ignore = ignore or []
+
+ # Parse / return default "types"...
+ # Apply additional types, override built-in types
+ # (reminder: config.serialize_handlers is a dict)
+ try:
+ serializer = config.serialize_handlers[type(obj)]
+ except KeyError:
+ # Not a serializer
+ pass
+ else:
+ if serializer is not None:
+ return serializer(obj, serialize_method, ignore_attribute,
+ ignore, config)
+
+ # Primitive
+ if isinstance(obj, utils.primitive_types):
+ return obj
+
+ # Iterative
+ elif isinstance(obj, utils.iterable_types):
+ # List, set or tuple
+ return [dump(item, serialize_method, ignore_attribute, ignore, config)
+ for item in obj]
+
+ elif isinstance(obj, utils.DictType):
+ # Dictionary
+ return dict((key, dump(value, serialize_method,
+ ignore_attribute, ignore, config))
+ for key, value in obj.items())
+
+ # It's not a standard type, so it needs __jsonclass__
+ module_name = inspect.getmodule(type(obj)).__name__
+ json_class = obj.__class__.__name__
+
+ if module_name not in ('', '__main__'):
+ json_class = '{0}.{1}'.format(module_name, json_class)
+
+ # Keep the class name in the returned object
+ return_obj = {"__jsonclass__": [json_class]}
+
+ # If a serialization method is defined..
+ if hasattr(obj, serialize_method):
+ # Params can be a dict (keyword) or list (positional)
+ # Attrs MUST be a dict.
+ serialize = getattr(obj, serialize_method)
+ params, attrs = serialize()
+ return_obj['__jsonclass__'].append(params)
+ return_obj.update(attrs)
+ return return_obj
+
+ else:
+ # Otherwise, try to figure it out
+ # Obviously, we can't assume to know anything about the
+ # parameters passed to __init__
+ return_obj['__jsonclass__'].append([])
+
+ # Prepare filtering lists
+ known_types = SUPPORTED_TYPES + tuple(config.serialize_handlers)
+ ignore_list = getattr(obj, ignore_attribute, []) + ignore
+
+ # Find fields and filter them by name
+ fields = _find_fields(obj)
+ fields.difference_update(ignore_list)
+
+ # Dump field values
+ attrs = {}
+ for attr_name in fields:
+ attr_value = getattr(obj, attr_name)
+ if isinstance(attr_value, known_types) and \
+ attr_value not in ignore_list:
+ attrs[attr_name] = dump(attr_value, serialize_method,
+ ignore_attribute, ignore, config)
+ return_obj.update(attrs)
+ return return_obj
+
+# ------------------------------------------------------------------------------
+
+
+def load(obj, classes=None):
+ """
+ If 'obj' is a dictionary containing a __jsonclass__ entry, converts the
+ dictionary item into a bean of this class.
+
+ :param obj: An object from a JSON-RPC dictionary
+ :param classes: A custom {name: class} dictionary
+ :return: The loaded object
+ """
+ # Primitive
+ if isinstance(obj, utils.primitive_types):
+ return obj
+
+ # List, set or tuple
+ elif isinstance(obj, utils.iterable_types):
+ # This comes from a JSON parser, so it can only be a list...
+ return [load(entry) for entry in obj]
+
+ # Otherwise, it's a dict type
+ elif '__jsonclass__' not in obj:
+ return dict((key, load(value)) for key, value in obj.items())
+
+ # It's a dictionary, and it has a __jsonclass__
+ orig_module_name = obj['__jsonclass__'][0]
+ params = obj['__jsonclass__'][1]
+
+ # Validate the module name
+ if not orig_module_name:
+ raise TranslationError('Module name empty.')
+
+ json_module_clean = re.sub(INVALID_MODULE_CHARS, '', orig_module_name)
+ if json_module_clean != orig_module_name:
+ raise TranslationError('Module name {0} has invalid characters.'
+ .format(orig_module_name))
+
+ # Load the class
+ json_module_parts = json_module_clean.split('.')
+ json_class = None
+ if classes and len(json_module_parts) == 1:
+ # Local class name -- probably means it won't work
+ try:
+ json_class = classes[json_module_parts[0]]
+ except KeyError:
+ raise TranslationError('Unknown class or module {0}.'
+ .format(json_module_parts[0]))
+
+ else:
+ # Module + class
+ json_class_name = json_module_parts.pop()
+ json_module_tree = '.'.join(json_module_parts)
+ try:
+ # Use fromlist to load the module itself, not the package
+ temp_module = __import__(json_module_tree,
+ fromlist=[json_class_name])
+ except ImportError:
+ raise TranslationError('Could not import {0} from module {1}.'
+ .format(json_class_name, json_module_tree))
+
+ try:
+ json_class = getattr(temp_module, json_class_name)
+ except AttributeError:
+ raise TranslationError("Unknown class {0}.{1}."
+ .format(json_module_tree, json_class_name))
+
+ # Create the object
+ new_obj = None
+ if isinstance(params, utils.ListType):
+ try:
+ new_obj = json_class(*params)
+ except TypeError as ex:
+ raise TranslationError("Error instantiating {0}: {1}"
+ .format(json_class.__name__, ex))
+
+ elif isinstance(params, utils.DictType):
+ try:
+ new_obj = json_class(**params)
+ except TypeError as ex:
+ raise TranslationError("Error instantiating {0}: {1}"
+ .format(json_class.__name__, ex))
+
+ else:
+ raise TranslationError("Constructor args must be a dict or a list, "
+ "not {0}".format(type(params).__name__))
+
+ # Remove the class information, as it must be ignored during the
+ # reconstruction of the object
+ raw_jsonclass = obj.pop('__jsonclass__')
+
+ for key, value in obj.items():
+ # Recursive loading
+ setattr(new_obj, key, load(value, classes))
+
+ # Restore the class information for further usage
+ obj['__jsonclass__'] = raw_jsonclass
+
+ return new_obj
diff --git a/scripts/automation/trex_control_plane/python_lib/jsonrpclib-pelix-0.2.5/jsonrpclib/jsonrpc.py b/scripts/automation/trex_control_plane/python_lib/jsonrpclib-pelix-0.2.5/jsonrpclib/jsonrpc.py
new file mode 100755
index 00000000..8ee902b0
--- /dev/null
+++ b/scripts/automation/trex_control_plane/python_lib/jsonrpclib-pelix-0.2.5/jsonrpclib/jsonrpc.py
@@ -0,0 +1,1192 @@
+#!/usr/bin/python
+# -- Content-Encoding: UTF-8 --
+"""
+============================
+JSONRPC Library (jsonrpclib)
+============================
+
+This library is a JSON-RPC v.2 (proposed) implementation which
+follows the xmlrpclib API for portability between clients. It
+uses the same Server / ServerProxy, loads, dumps, etc. syntax,
+while providing features not present in XML-RPC like:
+
+* Keyword arguments
+* Notifications
+* Versioning
+* Batches and batch notifications
+
+Eventually, I'll add a SimpleXMLRPCServer compatible library,
+and other things to tie the thing off nicely. :)
+
+For a quick-start, just open a console and type the following,
+replacing the server address, method, and parameters
+appropriately.
+>>> import jsonrpclib
+>>> server = jsonrpclib.Server('http://localhost:8181')
+>>> server.add(5, 6)
+11
+>>> server._notify.add(5, 6)
+>>> batch = jsonrpclib.MultiCall(server)
+>>> batch.add(3, 50)
+>>> batch.add(2, 3)
+>>> batch._notify.add(3, 5)
+>>> batch()
+[53, 5]
+
+See https://github.com/tcalmant/jsonrpclib for more info.
+
+:authors: Josh Marshall, Thomas Calmant
+:copyright: Copyright 2015, isandlaTech
+:license: Apache License 2.0
+:version: 0.2.5
+
+..
+
+ Copyright 2015 isandlaTech
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+"""
+
+# Module version
+__version_info__ = (0, 2, 5)
+__version__ = ".".join(str(x) for x in __version_info__)
+
+# Documentation strings format
+__docformat__ = "restructuredtext en"
+
+# ------------------------------------------------------------------------------
+
+# Library includes
+import jsonrpclib.config
+import jsonrpclib.utils as utils
+
+# Standard library
+import contextlib
+import logging
+import sys
+import uuid
+
+# Create the logger
+_logger = logging.getLogger(__name__)
+
+try:
+ # Python 3
+ # pylint: disable=F0401,E0611
+ from urllib.parse import splittype
+ from urllib.parse import splithost
+ from xmlrpc.client import Transport as XMLTransport
+ from xmlrpc.client import SafeTransport as XMLSafeTransport
+ from xmlrpc.client import ServerProxy as XMLServerProxy
+ from xmlrpc.client import _Method as XML_Method
+
+except ImportError:
+ # Python 2
+ # pylint: disable=F0401,E0611
+ from urllib import splittype
+ from urllib import splithost
+ from xmlrpclib import Transport as XMLTransport
+ from xmlrpclib import SafeTransport as XMLSafeTransport
+ from xmlrpclib import ServerProxy as XMLServerProxy
+ from xmlrpclib import _Method as XML_Method
+
+# ------------------------------------------------------------------------------
+# JSON library import
+
+# JSON class serialization
+from jsonrpclib import jsonclass
+
+try:
+ # pylint: disable=F0401,E0611
+ # Using cjson
+ import cjson
+ _logger.debug("Using cjson as JSON library")
+
+ # Declare cjson methods
+ def jdumps(obj, encoding='utf-8'):
+ """
+ Serializes ``obj`` to a JSON formatted string, using cjson.
+ """
+ return cjson.encode(obj)
+
+ def jloads(json_string):
+ """
+ Deserializes ``json_string`` (a string containing a JSON document)
+ to a Python object, using cjson.
+ """
+ return cjson.decode(json_string)
+
+except ImportError:
+ # pylint: disable=F0401,E0611
+ # Use json or simplejson
+ try:
+ import json
+ _logger.debug("Using json as JSON library")
+
+ except ImportError:
+ try:
+ import simplejson as json
+ _logger.debug("Using simplejson as JSON library")
+ except ImportError:
+ _logger.error("No supported JSON library found")
+ raise ImportError('You must have the cjson, json, or simplejson '
+ 'module(s) available.')
+
+ # Declare json methods
+ if sys.version_info[0] < 3:
+ def jdumps(obj, encoding='utf-8'):
+ """
+ Serializes ``obj`` to a JSON formatted string.
+ """
+ # Python 2 (explicit encoding)
+ return json.dumps(obj, encoding=encoding)
+
+ else:
+ # Python 3
+ def jdumps(obj, encoding='utf-8'):
+ """
+ Serializes ``obj`` to a JSON formatted string.
+ """
+ # Python 3 (the encoding parameter has been removed)
+ return json.dumps(obj)
+
+ def jloads(json_string):
+ """
+ Deserializes ``json_string`` (a string containing a JSON document)
+ to a Python object.
+ """
+ return json.loads(json_string)
+
+# ------------------------------------------------------------------------------
+# XMLRPClib re-implementations
+
+
+class ProtocolError(Exception):
+ """
+ JSON-RPC error
+
+ ProtocolError.args[0] can be:
+ * an error message (string)
+ * a (code, message) tuple
+ """
+ pass
+
+
+class AppError(ProtocolError):
+ """
+ Application error: the error code is not in the pre-defined ones
+
+ AppError.args[0][0]: Error code
+ AppError.args[0][1]: Error message or trace
+ AppError.args[0][2]: Associated data
+ """
+ def data(self):
+ """
+ Retrieves the value found in the 'data' entry of the error, or None
+
+ :return: The data associated to the error, or None
+ """
+ return self.args[0][2]
+
+
+class JSONParser(object):
+ """
+ Default JSON parser
+ """
+ def __init__(self, target):
+ """
+ Associates the target loader to the parser
+
+ :param target: a JSONTarget instance
+ """
+ self.target = target
+
+ def feed(self, data):
+ """
+ Feeds the associated target with the given data
+ """
+ self.target.feed(data)
+
+ def close(self):
+ """
+ Does nothing
+ """
+ pass
+
+
+class JSONTarget(object):
+ """
+ Unmarshalls stream data to a string
+ """
+ def __init__(self):
+ """
+ Sets up the unmarshaller
+ """
+ self.data = []
+
+ def feed(self, data):
+ """
+ Stores the given raw data into a buffer
+ """
+ # Store raw data as it might not contain whole wide-character
+ self.data.append(data)
+
+ def close(self):
+ """
+ Unmarshalls the buffered data
+ """
+ if not self.data:
+ return ''
+ else:
+ # Use type to have a valid join (str vs. bytes)
+ data = type(self.data[0])().join(self.data)
+ try:
+ # Convert the whole final string
+ data = utils.from_bytes(data)
+ except:
+ # Try a pass-through
+ pass
+
+ return data
+
+
+class TransportMixIn(object):
+ """ Just extends the XMLRPC transport where necessary. """
+ # for Python 2.7 support
+ _connection = None
+
+ # List of non-overridable headers
+ # Use the configuration to change the content-type
+ readonly_headers = ('content-length', 'content-type')
+
+ def __init__(self, config=jsonrpclib.config.DEFAULT, context=None):
+ """
+ Sets up the transport
+
+ :param config: A JSONRPClib Config instance
+ """
+ # Store the configuration
+ self._config = config
+
+ # Store the SSL context
+ self.context = context
+
+ # Set up the user agent
+ self.user_agent = config.user_agent
+
+ # Additional headers: list of dictionaries
+ self.additional_headers = []
+
+ def push_headers(self, headers):
+ """
+ Adds a dictionary of headers to the additional headers list
+
+ :param headers: A dictionary
+ """
+ self.additional_headers.append(headers)
+
+ def pop_headers(self, headers):
+ """
+ Removes the given dictionary from the additional headers list.
+ Also validates that given headers are on top of the stack
+
+ :param headers: Headers to remove
+ :raise AssertionError: The given dictionary is not on the latest stored
+ in the additional headers list
+ """
+ assert self.additional_headers[-1] == headers
+ self.additional_headers.pop()
+
+ def emit_additional_headers(self, connection):
+ """
+ Puts headers as is in the request, filtered read only headers
+
+ :param connection: The request connection
+ """
+ additional_headers = {}
+
+ # Prepare the merged dictionary
+ for headers in self.additional_headers:
+ additional_headers.update(headers)
+
+ # Remove forbidden keys
+ for forbidden in self.readonly_headers:
+ additional_headers.pop(forbidden, None)
+
+ # Reversed order: in the case of multiple headers value definition,
+ # the latest pushed has priority
+ for key, value in additional_headers.items():
+ key = str(key)
+ if key.lower() not in self.readonly_headers:
+ # Only accept replaceable headers
+ connection.putheader(str(key), str(value))
+
+ def send_content(self, connection, request_body):
+ """
+ Completes the request headers and sends the request body of a JSON-RPC
+ request over a HTTPConnection
+
+ :param connection: An HTTPConnection object
+ :param request_body: JSON-RPC request body
+ """
+ # Convert the body first
+ request_body = utils.to_bytes(request_body)
+
+ # "static" headers
+ connection.putheader("Content-Type", self._config.content_type)
+ connection.putheader("Content-Length", str(len(request_body)))
+
+ # Emit additional headers here in order not to override content-length
+ self.emit_additional_headers(connection)
+
+ connection.endheaders()
+ if request_body:
+ connection.send(request_body)
+
+ def getparser(self):
+ """
+ Create an instance of the parser, and attach it to an unmarshalling
+ object. Return both objects.
+
+ :return: The parser and unmarshaller instances
+ """
+ target = JSONTarget()
+ return JSONParser(target), target
+
+
+class Transport(TransportMixIn, XMLTransport):
+ """
+ Mixed-in HTTP transport
+ """
+ pass
+
+
+class SafeTransport(TransportMixIn, XMLSafeTransport):
+ """
+ Mixed-in HTTPS transport
+ """
+ pass
+
+# ------------------------------------------------------------------------------
+
+
+class ServerProxy(XMLServerProxy):
+ """
+ Unfortunately, much more of this class has to be copied since
+ so much of it does the serialization.
+ """
+ def __init__(self, uri, transport=None, encoding=None,
+ verbose=0, version=None, headers=None, history=None,
+ config=jsonrpclib.config.DEFAULT, context=None):
+ """
+ Sets up the server proxy
+
+ :param uri: Request URI
+ :param transport: Custom transport handler
+ :param encoding: Specified encoding
+ :param verbose: Log verbosity level
+ :param version: JSON-RPC specification version
+ :param headers: Custom additional headers for each request
+ :param history: History object (for tests)
+ :param config: A JSONRPClib Config instance
+ :param context: The optional SSLContext to use
+ """
+ # Store the configuration
+ self._config = config
+ self.__version = version or config.version
+
+ schema, uri = splittype(uri)
+ if schema not in ('http', 'https'):
+ _logger.error("jsonrpclib only support http(s) URIs, not %s",
+ schema)
+ raise IOError('Unsupported JSON-RPC protocol.')
+
+ self.__host, self.__handler = splithost(uri)
+ if not self.__handler:
+ # Not sure if this is in the JSON spec?
+ self.__handler = '/'
+
+ if transport is None:
+ if schema == 'https':
+ transport = SafeTransport(config=config, context=context)
+ else:
+ transport = Transport(config=config)
+ self.__transport = transport
+
+ self.__encoding = encoding
+ self.__verbose = verbose
+ self.__history = history
+
+ # Global custom headers are injected into Transport
+ self.__transport.push_headers(headers or {})
+
+ def _request(self, methodname, params, rpcid=None):
+ """
+ Calls a method on the remote server
+
+ :param methodname: Name of the method to call
+ :param params: Method parameters
+ :param rpcid: ID of the remote call
+ :return: The parsed result of the call
+ """
+ request = dumps(params, methodname, encoding=self.__encoding,
+ rpcid=rpcid, version=self.__version,
+ config=self._config)
+ response = self._run_request(request)
+ check_for_errors(response)
+ return response['result']
+
+ def _request_notify(self, methodname, params, rpcid=None):
+ """
+ Calls a method as a notification
+
+ :param methodname: Name of the method to call
+ :param params: Method parameters
+ :param rpcid: ID of the remote call
+ """
+ request = dumps(params, methodname, encoding=self.__encoding,
+ rpcid=rpcid, version=self.__version, notify=True,
+ config=self._config)
+ response = self._run_request(request, notify=True)
+ check_for_errors(response)
+
+ def _run_request(self, request, notify=False):
+ """
+ Sends the given request to the remote server
+
+ :param request: The request to send
+ :param notify: Notification request flag (unused)
+ :return: The response as a parsed JSON object
+ """
+ if self.__history is not None:
+ self.__history.add_request(request)
+
+ response = self.__transport.request(
+ self.__host,
+ self.__handler,
+ request,
+ verbose=self.__verbose
+ )
+
+ # Here, the XMLRPC library translates a single list
+ # response to the single value -- should we do the
+ # same, and require a tuple / list to be passed to
+ # the response object, or expect the Server to be
+ # outputting the response appropriately?
+
+ if self.__history is not None:
+ self.__history.add_response(response)
+
+ if not response:
+ return None
+ else:
+ return_obj = loads(response, self._config)
+ return return_obj
+
+ def __getattr__(self, name):
+ """
+ Returns a callable object to call the remote service
+ """
+ # Same as original, just with new _Method reference
+ return _Method(self._request, name)
+
+ def __close(self):
+ """
+ Closes the transport layer
+ """
+ try:
+ self.__transport.close()
+ except AttributeError:
+ # Not available in Python 2.6
+ pass
+
+ def __call__(self, attr):
+ """
+ A workaround to get special attributes on the ServerProxy
+ without interfering with the magic __getattr__
+
+ (code from xmlrpclib in Python 2.7)
+ """
+ if attr == "close":
+ return self.__close
+
+ elif attr == "transport":
+ return self.__transport
+
+ raise AttributeError("Attribute {0} not found".format(attr))
+
+ @property
+ def _notify(self):
+ """
+ Like __getattr__, but sending a notification request instead of a call
+ """
+ return _Notify(self._request_notify)
+
+ @contextlib.contextmanager
+ def _additional_headers(self, headers):
+ """
+ Allows to specify additional headers, to be added inside the with
+ block.
+ Example of usage:
+
+ >>> with client._additional_headers({'X-Test' : 'Test'}) as new_client:
+ ... new_client.method()
+ ...
+ >>> # Here old headers are restored
+ """
+ self.__transport.push_headers(headers)
+ yield self
+ self.__transport.pop_headers(headers)
+
+# ------------------------------------------------------------------------------
+
+
+class _Method(XML_Method):
+ """
+ Some magic to bind an JSON-RPC method to an RPC server.
+ """
+ def __call__(self, *args, **kwargs):
+ """
+ Sends an RPC request and returns the unmarshalled result
+ """
+ if args and kwargs:
+ raise ProtocolError("Cannot use both positional and keyword "
+ "arguments (according to JSON-RPC spec.)")
+ if args:
+ return self.__send(self.__name, args)
+ else:
+ return self.__send(self.__name, kwargs)
+
+ def __getattr__(self, name):
+ """
+ Returns a Method object for nested calls
+ """
+ if name == "__name__":
+ return self.__name
+ return _Method(self.__send, "{0}.{1}".format(self.__name, name))
+
+
+class _Notify(object):
+ """
+ Same as _Method, but to send notifications
+ """
+ def __init__(self, request):
+ """
+ Sets the method to call to send a request to the server
+ """
+ self._request = request
+
+ def __getattr__(self, name):
+ """
+ Returns a Method object, to be called as a notification
+ """
+ return _Method(self._request, name)
+
+# ------------------------------------------------------------------------------
+# Batch implementation
+
+
+class MultiCallMethod(object):
+ """
+ Stores calls made to a MultiCall object for batch execution
+ """
+ def __init__(self, method, notify=False, config=jsonrpclib.config.DEFAULT):
+ """
+ Sets up the store
+
+ :param method: Name of the method to call
+ :param notify: Notification flag
+ :param config: Request configuration
+ """
+ self.method = method
+ self.params = []
+ self.notify = notify
+ self._config = config
+
+ def __call__(self, *args, **kwargs):
+ """
+ Normalizes call parameters
+ """
+ if kwargs and args:
+ raise ProtocolError('JSON-RPC does not support both ' +
+ 'positional and keyword arguments.')
+ if kwargs:
+ self.params = kwargs
+ else:
+ self.params = args
+
+ def request(self, encoding=None, rpcid=None):
+ """
+ Returns the request object as JSON-formatted string
+ """
+ return dumps(self.params, self.method, version=2.0,
+ encoding=encoding, rpcid=rpcid, notify=self.notify,
+ config=self._config)
+
+ def __repr__(self):
+ """
+ String representation
+ """
+ return str(self.request())
+
+ def __getattr__(self, method):
+ """
+ Updates the object for a nested call
+ """
+ self.method = "{0}.{1}".format(self.method, method)
+ return self
+
+
+class MultiCallNotify(object):
+ """
+ Same as MultiCallMethod but for notifications
+ """
+ def __init__(self, multicall, config=jsonrpclib.config.DEFAULT):
+ """
+ Sets ip the store
+
+ :param multicall: The parent MultiCall instance
+ :param config: Request configuration
+ """
+ self.multicall = multicall
+ self._config = config
+
+ def __getattr__(self, name):
+ """
+ Returns the MultiCallMethod to use as a notification
+ """
+ new_job = MultiCallMethod(name, notify=True, config=self._config)
+ self.multicall._job_list.append(new_job)
+ return new_job
+
+
+class MultiCallIterator(object):
+ """
+ Iterates over the results of a MultiCall.
+ Exceptions are raised in response to JSON-RPC faults
+ """
+ def __init__(self, results):
+ """
+ Sets up the results store
+ """
+ self.results = results
+
+ def __get_result(self, item):
+ """
+ Checks for error and returns the "real" result stored in a MultiCall
+ result.
+ """
+ check_for_errors(item)
+ return item['result']
+
+ def __iter__(self):
+ """
+ Iterates over all results
+ """
+ for item in self.results:
+ yield self.__get_result(item)
+ raise StopIteration
+
+ def __getitem__(self, i):
+ """
+ Returns the i-th object of the results
+ """
+ return self.__get_result(self.results[i])
+
+ def __len__(self):
+ """
+ Returns the number of results stored
+ """
+ return len(self.results)
+
+
+class MultiCall(object):
+ """
+ server -> a object used to boxcar method calls, where server should be a
+ ServerProxy object.
+
+ Methods can be added to the MultiCall using normal
+ method call syntax e.g.:
+
+ multicall = MultiCall(server_proxy)
+ multicall.add(2,3)
+ multicall.get_address("Guido")
+
+ To execute the multicall, call the MultiCall object e.g.:
+
+ add_result, address = multicall()
+ """
+ def __init__(self, server, config=jsonrpclib.config.DEFAULT):
+ """
+ Sets up the multicall
+
+ :param server: A ServerProxy object
+ :param config: Request configuration
+ """
+ self._server = server
+ self._job_list = []
+ self._config = config
+
+ def _request(self):
+ """
+ Sends the request to the server and returns the responses
+
+ :return: A MultiCallIterator object
+ """
+ if len(self._job_list) < 1:
+ # Should we alert? This /is/ pretty obvious.
+ return
+ request_body = "[ {0} ]".format(
+ ','.join(job.request() for job in self._job_list))
+ responses = self._server._run_request(request_body)
+ del self._job_list[:]
+ if not responses:
+ responses = []
+ return MultiCallIterator(responses)
+
+ @property
+ def _notify(self):
+ """
+ Prepares a notification call
+ """
+ return MultiCallNotify(self, self._config)
+
+ def __getattr__(self, name):
+ """
+ Registers a method call
+ """
+ new_job = MultiCallMethod(name, config=self._config)
+ self._job_list.append(new_job)
+ return new_job
+
+ __call__ = _request
+
+# These lines conform to xmlrpclib's "compatibility" line.
+# Not really sure if we should include these, but oh well.
+Server = ServerProxy
+
+# ------------------------------------------------------------------------------
+
+
+class Fault(object):
+ """
+ JSON-RPC error class
+ """
+ def __init__(self, code=-32000, message='Server error', rpcid=None,
+ config=jsonrpclib.config.DEFAULT, data=None):
+ """
+ Sets up the error description
+
+ :param code: Fault code
+ :param message: Associated message
+ :param rpcid: Request ID
+ :param config: A JSONRPClib Config instance
+ :param data: Extra information added to an error description
+ """
+ self.faultCode = code
+ self.faultString = message
+ self.rpcid = rpcid
+ self.config = config
+ self.data = data
+
+ def error(self):
+ """
+ Returns the error as a dictionary
+
+ :returns: A {'code', 'message'} dictionary
+ """
+ return {'code': self.faultCode, 'message': self.faultString,
+ 'data': self.data}
+
+ def response(self, rpcid=None, version=None):
+ """
+ Returns the error as a JSON-RPC response string
+
+ :param rpcid: Forced request ID
+ :param version: JSON-RPC version
+ :return: A JSON-RPC response string
+ """
+ if not version:
+ version = self.config.version
+
+ if rpcid:
+ self.rpcid = rpcid
+
+ return dumps(self, methodresponse=True, rpcid=self.rpcid,
+ version=version, config=self.config)
+
+ def dump(self, rpcid=None, version=None):
+ """
+ Returns the error as a JSON-RPC response dictionary
+
+ :param rpcid: Forced request ID
+ :param version: JSON-RPC version
+ :return: A JSON-RPC response dictionary
+ """
+ if not version:
+ version = self.config.version
+
+ if rpcid:
+ self.rpcid = rpcid
+
+ return dump(self, is_response=True, rpcid=self.rpcid,
+ version=version, config=self.config)
+
+ def __repr__(self):
+ """
+ String representation
+ """
+ return '<Fault {0}: {1}>'.format(self.faultCode, self.faultString)
+
+
+class Payload(object):
+ """
+ JSON-RPC content handler
+ """
+ def __init__(self, rpcid=None, version=None,
+ config=jsonrpclib.config.DEFAULT):
+ """
+ Sets up the JSON-RPC handler
+
+ :param rpcid: Request ID
+ :param version: JSON-RPC version
+ :param config: A JSONRPClib Config instance
+ """
+ if not version:
+ version = config.version
+
+ self.id = rpcid
+ self.version = float(version)
+
+ def request(self, method, params=None):
+ """
+ Prepares a method call request
+
+ :param method: Method name
+ :param params: Method parameters
+ :return: A JSON-RPC request dictionary
+ """
+ if not isinstance(method, utils.string_types):
+ raise ValueError('Method name must be a string.')
+
+ if not self.id:
+ # Generate a request ID
+ self.id = str(uuid.uuid4())
+
+ request = {'id': self.id, 'method': method}
+ if params or self.version < 1.1:
+ request['params'] = params or []
+
+ if self.version >= 2:
+ request['jsonrpc'] = str(self.version)
+
+ return request
+
+ def notify(self, method, params=None):
+ """
+ Prepares a notification request
+
+ :param method: Notification name
+ :param params: Notification parameters
+ :return: A JSON-RPC notification dictionary
+ """
+ # Prepare the request dictionary
+ request = self.request(method, params)
+
+ # Remove the request ID, as it's a notification
+ if self.version >= 2:
+ del request['id']
+ else:
+ request['id'] = None
+
+ return request
+
+ def response(self, result=None):
+ """
+ Prepares a response dictionary
+
+ :param result: The result of method call
+ :return: A JSON-RPC response dictionary
+ """
+ response = {'result': result, 'id': self.id}
+
+ if self.version >= 2:
+ response['jsonrpc'] = str(self.version)
+ else:
+ response['error'] = None
+
+ return response
+
+ def error(self, code=-32000, message='Server error.', data=None):
+ """
+ Prepares an error dictionary
+
+ :param code: Error code
+ :param message: Error message
+ :return: A JSON-RPC error dictionary
+ """
+ error = self.response()
+ if self.version >= 2:
+ del error['result']
+ else:
+ error['result'] = None
+ error['error'] = {'code': code, 'message': message}
+ if data is not None:
+ error['error']['data'] = data
+ return error
+
+# ------------------------------------------------------------------------------
+
+
+def dump(params=None, methodname=None, rpcid=None, version=None,
+ is_response=None, is_notify=None, config=jsonrpclib.config.DEFAULT):
+ """
+ Prepares a JSON-RPC dictionary (request, notification, response or error)
+
+ :param params: Method parameters (if a method name is given) or a Fault
+ :param methodname: Method name
+ :param rpcid: Request ID
+ :param version: JSON-RPC version
+ :param is_response: If True, this is a response dictionary
+ :param is_notify: If True, this is a notification request
+ :param config: A JSONRPClib Config instance
+ :return: A JSON-RPC dictionary
+ """
+ # Default version
+ if not version:
+ version = config.version
+
+ if not is_response and params is None:
+ params = []
+
+ # Validate method name and parameters
+ valid_params = [utils.TupleType, utils.ListType, utils.DictType, Fault]
+ if is_response:
+ valid_params.append(type(None))
+
+ if isinstance(methodname, utils.string_types) and \
+ not isinstance(params, tuple(valid_params)):
+ """
+ If a method, and params are not in a listish or a Fault,
+ error out.
+ """
+ raise TypeError("Params must be a dict, list, tuple "
+ "or Fault instance.")
+
+ # Prepares the JSON-RPC content
+ payload = Payload(rpcid=rpcid, version=version)
+
+ if isinstance(params, Fault):
+ # Prepare an error dictionary
+ # pylint: disable=E1103
+ return payload.error(params.faultCode, params.faultString, params.data)
+
+ if not isinstance(methodname, utils.string_types) and not is_response:
+ # Neither a request nor a response
+ raise ValueError('Method name must be a string, or is_response '
+ 'must be set to True.')
+
+ if config.use_jsonclass:
+ # Use jsonclass to convert the parameters
+ params = jsonclass.dump(params, config=config)
+
+ if is_response:
+ # Prepare a response dictionary
+ if rpcid is None:
+ # A response must have a request ID
+ raise ValueError('A method response must have an rpcid.')
+ return payload.response(params)
+
+ if is_notify:
+ # Prepare a notification dictionary
+ return payload.notify(methodname, params)
+ else:
+ # Prepare a method call dictionary
+ return payload.request(methodname, params)
+
+
+def dumps(params=None, methodname=None, methodresponse=None,
+ encoding=None, rpcid=None, version=None, notify=None,
+ config=jsonrpclib.config.DEFAULT):
+ """
+ Prepares a JSON-RPC request/response string
+
+ :param params: Method parameters (if a method name is given) or a Fault
+ :param methodname: Method name
+ :param methodresponse: If True, this is a response dictionary
+ :param encoding: Result string encoding
+ :param rpcid: Request ID
+ :param version: JSON-RPC version
+ :param notify: If True, this is a notification request
+ :param config: A JSONRPClib Config instance
+ :return: A JSON-RPC dictionary
+ """
+ # Prepare the dictionary
+ request = dump(params, methodname, rpcid, version, methodresponse, notify,
+ config)
+
+ # Returns it as a JSON string
+ return jdumps(request, encoding=encoding or "UTF-8")
+
+
+def load(data, config=jsonrpclib.config.DEFAULT):
+ """
+ Loads a JSON-RPC request/response dictionary. Calls jsonclass to load beans
+
+ :param data: A JSON-RPC dictionary
+ :param config: A JSONRPClib Config instance (or None for default values)
+ :return: A parsed dictionary or None
+ """
+ if data is None:
+ # Notification
+ return None
+
+ # if the above raises an error, the implementing server code
+ # should return something like the following:
+ # { 'jsonrpc':'2.0', 'error': fault.error(), id: None }
+ if config.use_jsonclass:
+ # Convert beans
+ data = jsonclass.load(data, config.classes)
+
+ return data
+
+
+def loads(data, config=jsonrpclib.config.DEFAULT):
+ """
+ Loads a JSON-RPC request/response string. Calls jsonclass to load beans
+
+ :param data: A JSON-RPC string
+ :param config: A JSONRPClib Config instance (or None for default values)
+ :return: A parsed dictionary or None
+ """
+ if data == '':
+ # Notification
+ return None
+
+ # Parse the JSON dictionary
+ result = jloads(data)
+
+ # Load the beans
+ return load(result, config)
+
+# ------------------------------------------------------------------------------
+
+
+def check_for_errors(result):
+ """
+ Checks if a result dictionary signals an error
+
+ :param result: A result dictionary
+ :raise TypeError: Invalid parameter
+ :raise NotImplementedError: Unknown JSON-RPC version
+ :raise ValueError: Invalid dictionary content
+ :raise ProtocolError: An error occurred on the server side
+ :return: The result parameter
+ """
+ if not result:
+ # Notification
+ return result
+
+ if not isinstance(result, utils.DictType):
+ # Invalid argument
+ raise TypeError('Response is not a dict.')
+
+ if 'jsonrpc' in result and float(result['jsonrpc']) > 2.0:
+ # Unknown JSON-RPC version
+ raise NotImplementedError('JSON-RPC version not yet supported.')
+
+ if 'result' not in result and 'error' not in result:
+ # Invalid dictionary content
+ raise ValueError('Response does not have a result or error key.')
+
+ if 'error' in result and result['error']:
+ # Server-side error
+ if 'code' in result['error']:
+ # Code + Message
+ code = result['error']['code']
+ try:
+ # Get the message (jsonrpclib)
+ message = result['error']['message']
+ except KeyError:
+ # Get the trace (jabsorb)
+ message = result['error'].get('trace', '<no error message>')
+
+ if -32700 <= code <= -32000:
+ # Pre-defined errors
+ # See http://www.jsonrpc.org/specification#error_object
+ raise ProtocolError((code, message))
+ else:
+ # Application error
+ data = result['error'].get('data', None)
+ raise AppError((code, message, data))
+
+ elif isinstance(result['error'], dict) and len(result['error']) == 1:
+ # Error with a single entry ('reason', ...): use its content
+ error_key = result['error'].keys()[0]
+ raise ProtocolError(result['error'][error_key])
+
+ else:
+ # Use the raw error content
+ raise ProtocolError(result['error'])
+
+ return result
+
+
+def isbatch(request):
+ """
+ Tests if the given request is a batch call, i.e. a list of multiple calls
+ :param request: a JSON-RPC request object
+ :return: True if the request is a batch call
+ """
+ if not isinstance(request, (utils.ListType, utils.TupleType)):
+ # Not a list: not a batch call
+ return False
+ elif len(request) < 1:
+ # Only one request: not a batch call
+ return False
+ elif not isinstance(request[0], utils.DictType):
+ # One of the requests is not a dictionary, i.e. a JSON Object
+ # therefore it is not a valid JSON-RPC request
+ return False
+ elif 'jsonrpc' not in request[0].keys():
+ # No "jsonrpc" version in the JSON object: not a request
+ return False
+
+ try:
+ version = float(request[0]['jsonrpc'])
+ except ValueError:
+ # Bad version of JSON-RPC
+ raise ProtocolError('"jsonrpc" key must be a float(able) value.')
+
+ if version < 2:
+ # Batch call were not supported before JSON-RPC 2.0
+ return False
+
+ return True
+
+
+def isnotification(request):
+ """
+ Tests if the given request is a notification
+
+ :param request: A request dictionary
+ :return: True if the request is a notification
+ """
+ if 'id' not in request:
+ # 2.0 notification
+ return True
+
+ if request['id'] is None:
+ # 1.0 notification
+ return True
+
+ return False
diff --git a/scripts/automation/trex_control_plane/python_lib/jsonrpclib-pelix-0.2.5/jsonrpclib/threadpool.py b/scripts/automation/trex_control_plane/python_lib/jsonrpclib-pelix-0.2.5/jsonrpclib/threadpool.py
new file mode 100755
index 00000000..3919c105
--- /dev/null
+++ b/scripts/automation/trex_control_plane/python_lib/jsonrpclib-pelix-0.2.5/jsonrpclib/threadpool.py
@@ -0,0 +1,490 @@
+#!/usr/bin/env python
+# -- Content-Encoding: UTF-8 --
+"""
+Cached thread pool, inspired from Pelix/iPOPO Thread Pool
+
+:author: Thomas Calmant
+:copyright: Copyright 2015, isandlaTech
+:license: Apache License 2.0
+:version: 0.2.5
+
+..
+
+ Copyright 2015 isandlaTech
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+"""
+
+# Documentation strings format
+__docformat__ = "restructuredtext en"
+
+# Module version
+__version_info__ = (0, 2, 5)
+__version__ = ".".join(str(x) for x in __version_info__)
+
+# ------------------------------------------------------------------------------
+
+# Standard library
+import logging
+import threading
+
+try:
+ # Python 3
+ # pylint: disable=F0401
+ import queue
+except ImportError:
+ # Python 2
+ # pylint: disable=F0401
+ import Queue as queue
+
+# ------------------------------------------------------------------------------
+
+
+class EventData(object):
+ """
+ A threading event with some associated data
+ """
+ def __init__(self):
+ """
+ Sets up the event
+ """
+ self.__event = threading.Event()
+ self.__data = None
+ self.__exception = None
+
+ @property
+ def data(self):
+ """
+ Returns the associated value
+ """
+ return self.__data
+
+ @property
+ def exception(self):
+ """
+ Returns the exception used to stop the wait() method
+ """
+ return self.__exception
+
+ def clear(self):
+ """
+ Clears the event
+ """
+ self.__event.clear()
+ self.__data = None
+ self.__exception = None
+
+ def is_set(self):
+ """
+ Checks if the event is set
+ """
+ return self.__event.is_set()
+
+ def set(self, data=None):
+ """
+ Sets the event
+ """
+ self.__data = data
+ self.__exception = None
+ self.__event.set()
+
+ def raise_exception(self, exception):
+ """
+ Raises an exception in wait()
+
+ :param exception: An Exception object
+ """
+ self.__data = None
+ self.__exception = exception
+ self.__event.set()
+
+ def wait(self, timeout=None):
+ """
+ Waits for the event or for the timeout
+
+ :param timeout: Wait timeout (in seconds)
+ :return: True if the event as been set, else False
+ """
+ # The 'or' part is for Python 2.6
+ result = self.__event.wait(timeout) or self.__event.is_set()
+ # pylint: disable=E0702
+ # Pylint seems to miss the "is None" check below
+ if self.__exception is None:
+ return result
+ else:
+ raise self.__exception
+
+
+class FutureResult(object):
+ """
+ An object to wait for the result of a threaded execution
+ """
+ def __init__(self, logger=None):
+ """
+ Sets up the FutureResult object
+
+ :param logger: The Logger to use in case of error (optional)
+ """
+ self._logger = logger or logging.getLogger(__name__)
+ self._done_event = EventData()
+ self.__callback = None
+ self.__extra = None
+
+ def __notify(self):
+ """
+ Notify the given callback about the result of the execution
+ """
+ if self.__callback is not None:
+ try:
+ self.__callback(self._done_event.data,
+ self._done_event.exception,
+ self.__extra)
+ except Exception as ex:
+ self._logger.exception("Error calling back method: %s", ex)
+
+ def set_callback(self, method, extra=None):
+ """
+ Sets a callback method, called once the result has been computed or in
+ case of exception.
+
+ The callback method must have the following signature:
+ ``callback(result, exception, extra)``.
+
+ :param method: The method to call back in the end of the execution
+ :param extra: Extra parameter to be given to the callback method
+ """
+ self.__callback = method
+ self.__extra = extra
+ if self._done_event.is_set():
+ # The execution has already finished
+ self.__notify()
+
+ def execute(self, method, args, kwargs):
+ """
+ Execute the given method and stores its result.
+ The result is considered "done" even if the method raises an exception
+
+ :param method: The method to execute
+ :param args: Method positional arguments
+ :param kwargs: Method keyword arguments
+ :raise Exception: The exception raised by the method
+ """
+ # Normalize arguments
+ if args is None:
+ args = []
+
+ if kwargs is None:
+ kwargs = {}
+
+ try:
+ # Call the method
+ result = method(*args, **kwargs)
+ except Exception as ex:
+ # Something went wrong: propagate to the event and to the caller
+ self._done_event.raise_exception(ex)
+ raise
+ else:
+ # Store the result
+ self._done_event.set(result)
+ finally:
+ # In any case: notify the call back (if any)
+ self.__notify()
+
+ def done(self):
+ """
+ Returns True if the job has finished, else False
+ """
+ return self._done_event.is_set()
+
+ def result(self, timeout=None):
+ """
+ Waits up to timeout for the result the threaded job.
+ Returns immediately the result if the job has already been done.
+
+ :param timeout: The maximum time to wait for a result (in seconds)
+ :raise OSError: The timeout raised before the job finished
+ :raise Exception: The exception encountered during the call, if any
+ """
+ if self._done_event.wait(timeout):
+ return self._done_event.data
+ else:
+ raise OSError("Timeout raised")
+
+# ------------------------------------------------------------------------------
+
+
+class ThreadPool(object):
+ """
+ Executes the tasks stored in a FIFO in a thread pool
+ """
+ def __init__(self, max_threads, min_threads=1, queue_size=0, timeout=60,
+ logname=None):
+ """
+ Sets up the thread pool.
+
+ Threads are kept alive 60 seconds (timeout argument).
+
+ :param max_threads: Maximum size of the thread pool
+ :param min_threads: Minimum size of the thread pool
+ :param queue_size: Size of the task queue (0 for infinite)
+ :param timeout: Queue timeout (in seconds, 60s by default)
+ :param logname: Name of the logger
+ :raise ValueError: Invalid number of threads
+ """
+ # Validate parameters
+ try:
+ max_threads = int(max_threads)
+ if max_threads < 1:
+ raise ValueError("Pool size must be greater than 0")
+ except (TypeError, ValueError) as ex:
+ raise ValueError("Invalid pool size: {0}".format(ex))
+
+ try:
+ min_threads = int(min_threads)
+ if min_threads < 0:
+ min_threads = 0
+ elif min_threads > max_threads:
+ min_threads = max_threads
+ except (TypeError, ValueError) as ex:
+ raise ValueError("Invalid pool size: {0}".format(ex))
+
+ # The logger
+ self._logger = logging.getLogger(logname or __name__)
+
+ # The loop control event
+ self._done_event = threading.Event()
+ self._done_event.set()
+
+ # The task queue
+ try:
+ queue_size = int(queue_size)
+ except (TypeError, ValueError):
+ # Not a valid integer
+ queue_size = 0
+
+ self._queue = queue.Queue(queue_size)
+ self._timeout = timeout
+ self.__lock = threading.RLock()
+
+ # The thread pool
+ self._min_threads = min_threads
+ self._max_threads = max_threads
+ self._threads = []
+
+ # Thread count
+ self._thread_id = 0
+
+ # Current number of threads, active and alive
+ self.__nb_threads = 0
+ self.__nb_active_threads = 0
+
+ def start(self):
+ """
+ Starts the thread pool. Does nothing if the pool is already started.
+ """
+ if not self._done_event.is_set():
+ # Stop event not set: we're running
+ return
+
+ # Clear the stop event
+ self._done_event.clear()
+
+ # Compute the number of threads to start to handle pending tasks
+ nb_pending_tasks = self._queue.qsize()
+ if nb_pending_tasks > self._max_threads:
+ nb_threads = self._max_threads
+ elif nb_pending_tasks < self._min_threads:
+ nb_threads = self._min_threads
+ else:
+ nb_threads = nb_pending_tasks
+
+ # Create the threads
+ for _ in range(nb_threads):
+ self.__start_thread()
+
+ def __start_thread(self):
+ """
+ Starts a new thread, if possible
+ """
+ with self.__lock:
+ if self.__nb_threads >= self._max_threads:
+ # Can't create more threads
+ return False
+
+ if self._done_event.is_set():
+ # We're stopped: do nothing
+ return False
+
+ # Prepare thread and start it
+ name = "{0}-{1}".format(self._logger.name, self._thread_id)
+ self._thread_id += 1
+
+ thread = threading.Thread(target=self.__run, name=name)
+ thread.daemon = True
+ self._threads.append(thread)
+ thread.start()
+ return True
+
+ def stop(self):
+ """
+ Stops the thread pool. Does nothing if the pool is already stopped.
+ """
+ if self._done_event.is_set():
+ # Stop event set: we're stopped
+ return
+
+ # Set the stop event
+ self._done_event.set()
+
+ with self.__lock:
+ # Add something in the queue (to unlock the join())
+ try:
+ for _ in self._threads:
+ self._queue.put(self._done_event, True, self._timeout)
+ except queue.Full:
+ # There is already something in the queue
+ pass
+
+ # Copy the list of threads to wait for
+ threads = self._threads[:]
+
+ # Join threads outside the lock
+ for thread in threads:
+ while thread.is_alive():
+ # Wait 3 seconds
+ thread.join(3)
+ if thread.is_alive():
+ # Thread is still alive: something might be wrong
+ self._logger.warning("Thread %s is still alive...",
+ thread.name)
+
+ # Clear storage
+ del self._threads[:]
+ self.clear()
+
+ def enqueue(self, method, *args, **kwargs):
+ """
+ Queues a task in the pool
+
+ :param method: Method to call
+ :return: A FutureResult object, to get the result of the task
+ :raise ValueError: Invalid method
+ :raise Full: The task queue is full
+ """
+ if not hasattr(method, '__call__'):
+ raise ValueError("{0} has no __call__ member."
+ .format(method.__name__))
+
+ # Prepare the future result object
+ future = FutureResult(self._logger)
+
+ # Use a lock, as we might be "resetting" the queue
+ with self.__lock:
+ # Add the task to the queue
+ self._queue.put((method, args, kwargs, future), True,
+ self._timeout)
+
+ if self.__nb_active_threads == self.__nb_threads:
+ # All threads are taken: start a new one
+ self.__start_thread()
+
+ return future
+
+ def clear(self):
+ """
+ Empties the current queue content.
+ Returns once the queue have been emptied.
+ """
+ with self.__lock:
+ # Empty the current queue
+ try:
+ while True:
+ self._queue.get_nowait()
+ self._queue.task_done()
+ except queue.Empty:
+ # Queue is now empty
+ pass
+
+ # Wait for the tasks currently executed
+ self.join()
+
+ def join(self, timeout=None):
+ """
+ Waits for all the tasks to be executed
+
+ :param timeout: Maximum time to wait (in seconds)
+ :return: True if the queue has been emptied, else False
+ """
+ if self._queue.empty():
+ # Nothing to wait for...
+ return True
+ elif timeout is None:
+ # Use the original join
+ self._queue.join()
+ return True
+ else:
+ # Wait for the condition
+ with self._queue.all_tasks_done:
+ self._queue.all_tasks_done.wait(timeout)
+ return not bool(self._queue.unfinished_tasks)
+
+ def __run(self):
+ """
+ The main loop
+ """
+ with self.__lock:
+ self.__nb_threads += 1
+
+ while not self._done_event.is_set():
+ try:
+ # Wait for an action (blocking)
+ task = self._queue.get(True, self._timeout)
+ if task is self._done_event:
+ # Stop event in the queue: get out
+ self._queue.task_done()
+ with self.__lock:
+ self.__nb_threads -= 1
+ return
+ except queue.Empty:
+ # Nothing to do yet
+ pass
+ else:
+ with self.__lock:
+ self.__nb_active_threads += 1
+
+ # Extract elements
+ method, args, kwargs, future = task
+ try:
+ # Call the method
+ future.execute(method, args, kwargs)
+ except Exception as ex:
+ self._logger.exception("Error executing %s: %s",
+ method.__name__, ex)
+ finally:
+ # Mark the action as executed
+ self._queue.task_done()
+
+ # Thread is not active anymore
+ self.__nb_active_threads -= 1
+
+ # Clean up thread if necessary
+ with self.__lock:
+ if self.__nb_threads > self._min_threads:
+ # No more work for this thread, and we're above the
+ # minimum number of threads: stop this one
+ self.__nb_threads -= 1
+ return
+
+ with self.__lock:
+ # Thread stops
+ self.__nb_threads -= 1
diff --git a/scripts/automation/trex_control_plane/python_lib/jsonrpclib-pelix-0.2.5/jsonrpclib/utils.py b/scripts/automation/trex_control_plane/python_lib/jsonrpclib-pelix-0.2.5/jsonrpclib/utils.py
new file mode 100755
index 00000000..01b71fce
--- /dev/null
+++ b/scripts/automation/trex_control_plane/python_lib/jsonrpclib-pelix-0.2.5/jsonrpclib/utils.py
@@ -0,0 +1,122 @@
+#!/usr/bin/python
+# -- Content-Encoding: UTF-8 --
+"""
+Utility methods, for compatibility between Python version
+
+:author: Thomas Calmant
+:copyright: Copyright 2015, isandlaTech
+:license: Apache License 2.0
+:version: 0.2.5
+
+..
+
+ Copyright 2015 isandlaTech
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+"""
+
+# Module version
+__version_info__ = (0, 2, 5)
+__version__ = ".".join(str(x) for x in __version_info__)
+
+# Documentation strings format
+__docformat__ = "restructuredtext en"
+
+# ------------------------------------------------------------------------------
+
+import sys
+
+# ------------------------------------------------------------------------------
+
+if sys.version_info[0] < 3:
+ # Python 2
+ import types
+ try:
+ string_types = (
+ types.StringType,
+ types.UnicodeType
+ )
+ except NameError:
+ # Python built without unicode support
+ string_types = (types.StringType,)
+
+ numeric_types = (
+ types.IntType,
+ types.LongType,
+ types.FloatType
+ )
+
+ def to_bytes(string):
+ """
+ Converts the given string into bytes
+ """
+ if type(string) is unicode:
+ return str(string)
+ return string
+
+ def from_bytes(data):
+ """
+ Converts the given bytes into a string
+ """
+ if type(data) is str:
+ return data
+ return str(data)
+
+else:
+ # Python 3
+ string_types = (
+ bytes,
+ str
+ )
+
+ numeric_types = (
+ int,
+ float
+ )
+
+ def to_bytes(string):
+ """
+ Converts the given string into bytes
+ """
+ if type(string) is bytes:
+ return string
+ return bytes(string, "UTF-8")
+
+ def from_bytes(data):
+ """
+ Converts the given bytes into a string
+ """
+ if type(data) is str:
+ return data
+ return str(data, "UTF-8")
+
+# ------------------------------------------------------------------------------
+# Common
+
+DictType = dict
+
+ListType = list
+TupleType = tuple
+
+iterable_types = (
+ list,
+ set, frozenset,
+ tuple
+)
+
+value_types = (
+ bool,
+ type(None)
+)
+
+primitive_types = string_types + numeric_types + value_types
diff --git a/scripts/automation/trex_control_plane/python_lib/jsonrpclib-pelix-0.2.5/jsonrpclib_pelix.egg-info/PKG-INFO b/scripts/automation/trex_control_plane/python_lib/jsonrpclib-pelix-0.2.5/jsonrpclib_pelix.egg-info/PKG-INFO
new file mode 100755
index 00000000..9d0f3fca
--- /dev/null
+++ b/scripts/automation/trex_control_plane/python_lib/jsonrpclib-pelix-0.2.5/jsonrpclib_pelix.egg-info/PKG-INFO
@@ -0,0 +1,460 @@
+Metadata-Version: 1.1
+Name: jsonrpclib-pelix
+Version: 0.2.5
+Summary: This project is an implementation of the JSON-RPC v2.0 specification (backwards-compatible) as a client library, for Python 2.6+ and Python 3.This version is a fork of jsonrpclib by Josh Marshall, usable with Pelix remote services.
+Home-page: http://github.com/tcalmant/jsonrpclib/
+Author: Thomas Calmant
+Author-email: thomas.calmant+github@gmail.com
+License: Apache License 2.0
+Description: JSONRPClib (patched for Pelix)
+ ##############################
+
+ .. image:: https://pypip.in/license/jsonrpclib-pelix/badge.svg
+ :target: https://pypi.python.org/pypi/jsonrpclib-pelix/
+
+ .. image:: https://travis-ci.org/tcalmant/jsonrpclib.svg?branch=master
+ :target: https://travis-ci.org/tcalmant/jsonrpclib
+
+ .. image:: https://coveralls.io/repos/tcalmant/jsonrpclib/badge.svg?branch=master
+ :target: https://coveralls.io/r/tcalmant/jsonrpclib?branch=master
+
+
+ This library is an implementation of the JSON-RPC specification.
+ It supports both the original 1.0 specification, as well as the
+ new (proposed) 2.0 specification, which includes batch submission, keyword
+ arguments, etc.
+
+ It is licensed under the Apache License, Version 2.0
+ (http://www.apache.org/licenses/LICENSE-2.0.html).
+
+
+ About this version
+ ******************
+
+ This is a patched version of the original ``jsonrpclib`` project by
+ Josh Marshall, available at https://github.com/joshmarshall/jsonrpclib.
+
+ The suffix *-pelix* only indicates that this version works with Pelix Remote
+ Services, but it is **not** a Pelix specific implementation.
+
+ * This version adds support for Python 3, staying compatible with Python 2.
+ * It is now possible to use the dispatch_method argument while extending
+ the SimpleJSONRPCDispatcher, to use a custom dispatcher.
+ This allows to use this package by Pelix Remote Services.
+ * It can use thread pools to control the number of threads spawned to handle
+ notification requests and clients connections.
+ * The modifications added in other forks of this project have been added:
+
+ * From https://github.com/drdaeman/jsonrpclib:
+
+ * Improved JSON-RPC 1.0 support
+ * Less strict error response handling
+
+ * From https://github.com/tuomassalo/jsonrpclib:
+
+ * In case of a non-pre-defined error, raise an AppError and give access to
+ *error.data*
+
+ * From https://github.com/dejw/jsonrpclib:
+
+ * Custom headers can be sent with request and associated tests
+
+ * The support for Unix sockets has been removed, as it is not trivial to convert
+ to Python 3 (and I don't use them)
+ * This version cannot be installed with the original ``jsonrpclib``, as it uses
+ the same package name.
+
+
+ Summary
+ *******
+
+ This library implements the JSON-RPC 2.0 proposed specification in pure Python.
+ It is designed to be as compatible with the syntax of ``xmlrpclib`` as possible
+ (it extends where possible), so that projects using ``xmlrpclib`` could easily
+ be modified to use JSON and experiment with the differences.
+
+ It is backwards-compatible with the 1.0 specification, and supports all of the
+ new proposed features of 2.0, including:
+
+ * Batch submission (via MultiCall)
+ * Keyword arguments
+ * Notifications (both in a batch and 'normal')
+ * Class translation using the ``__jsonclass__`` key.
+
+ I've added a "SimpleJSONRPCServer", which is intended to emulate the
+ "SimpleXMLRPCServer" from the default Python distribution.
+
+
+ Requirements
+ ************
+
+ It supports ``cjson`` and ``simplejson``, and looks for the parsers in that
+ order (searching first for ``cjson``, then for the *built-in* ``json`` in 2.6+,
+ and then the ``simplejson`` external library).
+ One of these must be installed to use this library, although if you have a
+ standard distribution of 2.6+, you should already have one.
+ Keep in mind that ``cjson`` is supposed to be the quickest, I believe, so if
+ you are going for full-on optimization you may want to pick it up.
+
+ Since library uses ``contextlib`` module, you should have at least Python 2.5
+ installed.
+
+
+ Installation
+ ************
+
+ You can install this from PyPI with one of the following commands (sudo
+ may be required):
+
+ .. code-block:: console
+
+ easy_install jsonrpclib-pelix
+ pip install jsonrpclib-pelix
+
+ Alternatively, you can download the source from the GitHub repository
+ at http://github.com/tcalmant/jsonrpclib and manually install it
+ with the following commands:
+
+ .. code-block:: console
+
+ git clone git://github.com/tcalmant/jsonrpclib.git
+ cd jsonrpclib
+ python setup.py install
+
+
+ SimpleJSONRPCServer
+ *******************
+
+ This is identical in usage (or should be) to the SimpleXMLRPCServer in the
+ Python standard library. Some of the differences in features are that it
+ obviously supports notification, batch calls, class translation (if left on),
+ etc.
+ Note: The import line is slightly different from the regular SimpleXMLRPCServer,
+ since the SimpleJSONRPCServer is distributed within the ``jsonrpclib`` library.
+
+ .. code-block:: python
+
+ from jsonrpclib.SimpleJSONRPCServer import SimpleJSONRPCServer
+
+ server = SimpleJSONRPCServer(('localhost', 8080))
+ server.register_function(pow)
+ server.register_function(lambda x,y: x+y, 'add')
+ server.register_function(lambda x: x, 'ping')
+ server.serve_forever()
+
+ To start protect the server with SSL, use the following snippet:
+
+ .. code-block:: python
+
+ from jsonrpclib.SimpleJSONRPCServer import SimpleJSONRPCServer
+
+ # Setup the SSL socket
+ server = SimpleJSONRPCServer(('localhost', 8080), bind_and_activate=False)
+ server.socket = ssl.wrap_socket(server.socket, certfile='server.pem',
+ server_side=True)
+ server.server_bind()
+ server.server_activate()
+
+ # ... register functions
+ # Start the server
+ server.serve_forever()
+
+
+ Notification Thread Pool
+ ========================
+
+ By default, notification calls are handled in the request handling thread.
+ It is possible to use a thread pool to handle them, by giving it to the server
+ using the ``set_notification_pool()`` method:
+
+ .. code-block:: python
+
+ from jsonrpclib.SimpleJSONRPCServer import SimpleJSONRPCServer
+ from jsonrpclib.threadpool import ThreadPool
+
+ # Setup the thread pool: between 0 and 10 threads
+ pool = ThreadPool(max_threads=10, min_threads=0)
+
+ # Don't forget to start it
+ pool.start()
+
+ # Setup the server
+ server = SimpleJSONRPCServer(('localhost', 8080), config)
+ server.set_notification_pool(pool)
+
+ # Register methods
+ server.register_function(pow)
+ server.register_function(lambda x,y: x+y, 'add')
+ server.register_function(lambda x: x, 'ping')
+
+ try:
+ server.serve_forever()
+ finally:
+ # Stop the thread pool (let threads finish their current task)
+ pool.stop()
+ server.set_notification_pool(None)
+
+
+ Threaded server
+ ===============
+
+ It is also possible to use a thread pool to handle clients requests, using the
+ ``PooledJSONRPCServer`` class.
+ By default, this class uses pool of 0 to 30 threads. A custom pool can be given
+ with the ``thread_pool`` parameter of the class constructor.
+
+ The notification pool and the request pool are different: by default, a server
+ with a request pool doesn't have a notification pool.
+
+ .. code-block:: python
+
+ from jsonrpclib.SimpleJSONRPCServer import PooledJSONRPCServer
+ from jsonrpclib.threadpool import ThreadPool
+
+ # Setup the notification and request pools
+ nofif_pool = ThreadPool(max_threads=10, min_threads=0)
+ request_pool = ThreadPool(max_threads=50, min_threads=10)
+
+ # Don't forget to start them
+ nofif_pool.start()
+ request_pool.start()
+
+ # Setup the server
+ server = PooledJSONRPCServer(('localhost', 8080), config,
+ thread_pool=request_pool)
+ server.set_notification_pool(nofif_pool)
+
+ # Register methods
+ server.register_function(pow)
+ server.register_function(lambda x,y: x+y, 'add')
+ server.register_function(lambda x: x, 'ping')
+
+ try:
+ server.serve_forever()
+ finally:
+ # Stop the thread pools (let threads finish their current task)
+ request_pool.stop()
+ nofif_pool.stop()
+ server.set_notification_pool(None)
+
+ Client Usage
+ ************
+
+ This is (obviously) taken from a console session.
+
+ .. code-block:: python
+
+ >>> import jsonrpclib
+ >>> server = jsonrpclib.ServerProxy('http://localhost:8080')
+ >>> server.add(5,6)
+ 11
+ >>> server.add(x=5, y=10)
+ 15
+ >>> server._notify.add(5,6)
+ # No result returned...
+ >>> batch = jsonrpclib.MultiCall(server)
+ >>> batch.add(5, 6)
+ >>> batch.ping({'key':'value'})
+ >>> batch._notify.add(4, 30)
+ >>> results = batch()
+ >>> for result in results:
+ >>> ... print(result)
+ 11
+ {'key': 'value'}
+ # Note that there are only two responses -- this is according to spec.
+
+ # Clean up
+ >>> server('close')()
+
+ # Using client history
+ >>> history = jsonrpclib.history.History()
+ >>> server = jsonrpclib.ServerProxy('http://localhost:8080', history=history)
+ >>> server.add(5,6)
+ 11
+ >>> print(history.request)
+ {"id": "f682b956-c8e1-4506-9db4-29fe8bc9fcaa", "jsonrpc": "2.0",
+ "method": "add", "params": [5, 6]}
+ >>> print(history.response)
+ {"id": "f682b956-c8e1-4506-9db4-29fe8bc9fcaa", "jsonrpc": "2.0",
+ "result": 11}
+
+ # Clean up
+ >>> server('close')()
+
+ If you need 1.0 functionality, there are a bunch of places you can pass that in,
+ although the best is just to give a specific configuration to
+ ``jsonrpclib.ServerProxy``:
+
+ .. code-block:: python
+
+ >>> import jsonrpclib
+ >>> jsonrpclib.config.DEFAULT.version
+ 2.0
+ >>> config = jsonrpclib.config.Config(version=1.0)
+ >>> history = jsonrpclib.history.History()
+ >>> server = jsonrpclib.ServerProxy('http://localhost:8080', config=config,
+ history=history)
+ >>> server.add(7, 10)
+ 17
+ >>> print(history.request)
+ {"id": "827b2923-5b37-49a5-8b36-e73920a16d32",
+ "method": "add", "params": [7, 10]}
+ >>> print(history.response)
+ {"id": "827b2923-5b37-49a5-8b36-e73920a16d32", "error": null, "result": 17}
+ >>> server('close')()
+
+ The equivalent ``loads`` and ``dumps`` functions also exist, although with minor
+ modifications. The ``dumps`` arguments are almost identical, but it adds three
+ arguments: ``rpcid`` for the 'id' key, ``version`` to specify the JSON-RPC
+ compatibility, and ``notify`` if it's a request that you want to be a
+ notification.
+
+ Additionally, the ``loads`` method does not return the params and method like
+ ``xmlrpclib``, but instead a.) parses for errors, raising ProtocolErrors, and
+ b.) returns the entire structure of the request / response for manual parsing.
+
+
+ Additional headers
+ ******************
+
+ If your remote service requires custom headers in request, you can pass them
+ as as a ``headers`` keyword argument, when creating the ``ServerProxy``:
+
+ .. code-block:: python
+
+ >>> import jsonrpclib
+ >>> server = jsonrpclib.ServerProxy("http://localhost:8080",
+ headers={'X-Test' : 'Test'})
+
+ You can also put additional request headers only for certain method invocation:
+
+ .. code-block:: python
+
+ >>> import jsonrpclib
+ >>> server = jsonrpclib.Server("http://localhost:8080")
+ >>> with server._additional_headers({'X-Test' : 'Test'}) as test_server:
+ ... test_server.ping(42)
+ ...
+ >>> # X-Test header will be no longer sent in requests
+
+ Of course ``_additional_headers`` contexts can be nested as well.
+
+
+ Class Translation
+ *****************
+
+ I've recently added "automatic" class translation support, although it is
+ turned off by default. This can be devastatingly slow if improperly used, so
+ the following is just a short list of things to keep in mind when using it.
+
+ * Keep It (the object) Simple Stupid. (for exceptions, keep reading.)
+ * Do not require init params (for exceptions, keep reading)
+ * Getter properties without setters could be dangerous (read: not tested)
+
+ If any of the above are issues, use the _serialize method. (see usage below)
+ The server and client must BOTH have use_jsonclass configuration item on and
+ they must both have access to the same libraries used by the objects for
+ this to work.
+
+ If you have excessively nested arguments, it would be better to turn off the
+ translation and manually invoke it on specific objects using
+ ``jsonrpclib.jsonclass.dump`` / ``jsonrpclib.jsonclass.load`` (since the default
+ behavior recursively goes through attributes and lists / dicts / tuples).
+
+ Sample file: *test_obj.py*
+
+ .. code-block:: python
+
+ # This object is /very/ simple, and the system will look through the
+ # attributes and serialize what it can.
+ class TestObj(object):
+ foo = 'bar'
+
+ # This object requires __init__ params, so it uses the _serialize method
+ # and returns a tuple of init params and attribute values (the init params
+ # can be a dict or a list, but the attribute values must be a dict.)
+ class TestSerial(object):
+ foo = 'bar'
+ def __init__(self, *args):
+ self.args = args
+ def _serialize(self):
+ return (self.args, {'foo':self.foo,})
+
+ * Sample usage
+
+ .. code-block:: python
+
+ >>> import jsonrpclib
+ >>> import test_obj
+
+ # History is used only to print the serialized form of beans
+ >>> history = jsonrpclib.history.History()
+ >>> testobj1 = test_obj.TestObj()
+ >>> testobj2 = test_obj.TestSerial()
+ >>> server = jsonrpclib.Server('http://localhost:8080', history=history)
+
+ # The 'ping' just returns whatever is sent
+ >>> ping1 = server.ping(testobj1)
+ >>> ping2 = server.ping(testobj2)
+
+ >>> print(history.request)
+ {"id": "7805f1f9-9abd-49c6-81dc-dbd47229fe13", "jsonrpc": "2.0",
+ "method": "ping", "params": [{"__jsonclass__":
+ ["test_obj.TestSerial", []], "foo": "bar"}
+ ]}
+ >>> print(history.response)
+ {"id": "7805f1f9-9abd-49c6-81dc-dbd47229fe13", "jsonrpc": "2.0",
+ "result": {"__jsonclass__": ["test_obj.TestSerial", []], "foo": "bar"}}
+
+ This behavior is turned by default. To deactivate it, just set the
+ ``use_jsonclass`` member of a server ``Config`` to False.
+ If you want to use a per-class serialization method, set its name in the
+ ``serialize_method`` member of a server ``Config``.
+ Finally, if you are using classes that you have defined in the implementation
+ (as in, not a separate library), you'll need to add those (on BOTH the server
+ and the client) using the ``config.classes.add()`` method.
+
+ Feedback on this "feature" is very, VERY much appreciated.
+
+ Why JSON-RPC?
+ *************
+
+ In my opinion, there are several reasons to choose JSON over XML for RPC:
+
+ * Much simpler to read (I suppose this is opinion, but I know I'm right. :)
+ * Size / Bandwidth - Main reason, a JSON object representation is just much smaller.
+ * Parsing - JSON should be much quicker to parse than XML.
+ * Easy class passing with ``jsonclass`` (when enabled)
+
+ In the interest of being fair, there are also a few reasons to choose XML
+ over JSON:
+
+ * Your server doesn't do JSON (rather obvious)
+ * Wider XML-RPC support across APIs (can we change this? :))
+ * Libraries are more established, i.e. more stable (Let's change this too.)
+
+ Tests
+ *****
+
+ Tests are an almost-verbatim drop from the JSON-RPC specification 2.0 page.
+ They can be run using *unittest* or *nosetest*:
+
+ .. code-block:: console
+
+ python -m unittest discover tests
+ python3 -m unittest discover tests
+ nosetests tests
+
+Platform: UNKNOWN
+Classifier: Development Status :: 5 - Production/Stable
+Classifier: Intended Audience :: Developers
+Classifier: License :: OSI Approved :: Apache Software License
+Classifier: Operating System :: OS Independent
+Classifier: Programming Language :: Python :: 2.6
+Classifier: Programming Language :: Python :: 2.7
+Classifier: Programming Language :: Python :: 3
+Classifier: Programming Language :: Python :: 3.0
+Classifier: Programming Language :: Python :: 3.1
+Classifier: Programming Language :: Python :: 3.2
+Classifier: Programming Language :: Python :: 3.3
+Classifier: Programming Language :: Python :: 3.4
diff --git a/scripts/automation/trex_control_plane/python_lib/jsonrpclib-pelix-0.2.5/jsonrpclib_pelix.egg-info/SOURCES.txt b/scripts/automation/trex_control_plane/python_lib/jsonrpclib-pelix-0.2.5/jsonrpclib_pelix.egg-info/SOURCES.txt
new file mode 100755
index 00000000..f5714032
--- /dev/null
+++ b/scripts/automation/trex_control_plane/python_lib/jsonrpclib-pelix-0.2.5/jsonrpclib_pelix.egg-info/SOURCES.txt
@@ -0,0 +1,17 @@
+LICENSE.txt
+MANIFEST.in
+README.rst
+setup.cfg
+setup.py
+jsonrpclib/SimpleJSONRPCServer.py
+jsonrpclib/__init__.py
+jsonrpclib/config.py
+jsonrpclib/history.py
+jsonrpclib/jsonclass.py
+jsonrpclib/jsonrpc.py
+jsonrpclib/threadpool.py
+jsonrpclib/utils.py
+jsonrpclib_pelix.egg-info/PKG-INFO
+jsonrpclib_pelix.egg-info/SOURCES.txt
+jsonrpclib_pelix.egg-info/dependency_links.txt
+jsonrpclib_pelix.egg-info/top_level.txt \ No newline at end of file
diff --git a/scripts/automation/trex_control_plane/python_lib/jsonrpclib-pelix-0.2.5/jsonrpclib_pelix.egg-info/dependency_links.txt b/scripts/automation/trex_control_plane/python_lib/jsonrpclib-pelix-0.2.5/jsonrpclib_pelix.egg-info/dependency_links.txt
new file mode 100755
index 00000000..8b137891
--- /dev/null
+++ b/scripts/automation/trex_control_plane/python_lib/jsonrpclib-pelix-0.2.5/jsonrpclib_pelix.egg-info/dependency_links.txt
@@ -0,0 +1 @@
+
diff --git a/scripts/automation/trex_control_plane/python_lib/jsonrpclib-pelix-0.2.5/jsonrpclib_pelix.egg-info/top_level.txt b/scripts/automation/trex_control_plane/python_lib/jsonrpclib-pelix-0.2.5/jsonrpclib_pelix.egg-info/top_level.txt
new file mode 100755
index 00000000..1410b2ff
--- /dev/null
+++ b/scripts/automation/trex_control_plane/python_lib/jsonrpclib-pelix-0.2.5/jsonrpclib_pelix.egg-info/top_level.txt
@@ -0,0 +1 @@
+jsonrpclib
diff --git a/scripts/automation/trex_control_plane/python_lib/jsonrpclib-pelix-0.2.5/setup.cfg b/scripts/automation/trex_control_plane/python_lib/jsonrpclib-pelix-0.2.5/setup.cfg
new file mode 100755
index 00000000..26c67942
--- /dev/null
+++ b/scripts/automation/trex_control_plane/python_lib/jsonrpclib-pelix-0.2.5/setup.cfg
@@ -0,0 +1,8 @@
+[bdist_wheel]
+universal = 1
+
+[egg_info]
+tag_date = 0
+tag_svn_revision = 0
+tag_build =
+
diff --git a/scripts/automation/trex_control_plane/python_lib/jsonrpclib-pelix-0.2.5/setup.py b/scripts/automation/trex_control_plane/python_lib/jsonrpclib-pelix-0.2.5/setup.py
new file mode 100755
index 00000000..a64f2fb0
--- /dev/null
+++ b/scripts/automation/trex_control_plane/python_lib/jsonrpclib-pelix-0.2.5/setup.py
@@ -0,0 +1,74 @@
+#!/usr/bin/env python
+# -- Content-Encoding: UTF-8 --
+"""
+Installation script
+
+:authors: Josh Marshall, Thomas Calmant
+:copyright: Copyright 2015, isandlaTech
+:license: Apache License 2.0
+:version: 0.2.5
+
+..
+
+ Copyright 2015 isandlaTech
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+"""
+
+# Module version
+__version_info__ = (0, 2, 5)
+__version__ = ".".join(str(x) for x in __version_info__)
+
+# Documentation strings format
+__docformat__ = "restructuredtext en"
+
+# ------------------------------------------------------------------------------
+
+import sys
+
+try:
+ from setuptools import setup
+except ImportError:
+ from distutils.core import setup
+
+# ------------------------------------------------------------------------------
+
+setup(
+ name="jsonrpclib-pelix",
+ version=__version__,
+ license="Apache License 2.0",
+ author="Thomas Calmant",
+ author_email="thomas.calmant+github@gmail.com",
+ url="http://github.com/tcalmant/jsonrpclib/",
+ description=
+ "This project is an implementation of the JSON-RPC v2.0 specification "
+ "(backwards-compatible) as a client library, for Python 2.6+ and Python 3."
+ "This version is a fork of jsonrpclib by Josh Marshall, "
+ "usable with Pelix remote services.",
+ long_description=open("README.rst").read(),
+ packages=["jsonrpclib"],
+ classifiers=[
+ 'Development Status :: 5 - Production/Stable',
+ 'Intended Audience :: Developers',
+ 'License :: OSI Approved :: Apache Software License',
+ 'Operating System :: OS Independent',
+ 'Programming Language :: Python :: 2.6',
+ 'Programming Language :: Python :: 2.7',
+ 'Programming Language :: Python :: 3',
+ 'Programming Language :: Python :: 3.0',
+ 'Programming Language :: Python :: 3.1',
+ 'Programming Language :: Python :: 3.2',
+ 'Programming Language :: Python :: 3.3',
+ 'Programming Language :: Python :: 3.4'],
+ tests_require=['unittest2'] if sys.version_info < (2, 7) else []
+)
diff --git a/scripts/automation/trex_control_plane/python_lib/lockfile-0.10.2/ACKS b/scripts/automation/trex_control_plane/python_lib/lockfile-0.10.2/ACKS
new file mode 100755
index 00000000..44519d17
--- /dev/null
+++ b/scripts/automation/trex_control_plane/python_lib/lockfile-0.10.2/ACKS
@@ -0,0 +1,6 @@
+Thanks to the following people for help with lockfile.
+
+ Scott Dial
+ Ben Finney
+ Frank Niessink
+ Konstantin Veretennicov
diff --git a/scripts/automation/trex_control_plane/python_lib/lockfile-0.10.2/AUTHORS b/scripts/automation/trex_control_plane/python_lib/lockfile-0.10.2/AUTHORS
new file mode 100755
index 00000000..fda721cd
--- /dev/null
+++ b/scripts/automation/trex_control_plane/python_lib/lockfile-0.10.2/AUTHORS
@@ -0,0 +1,5 @@
+(no author) <(no author)>
+Elmo Todurov <elmo.todurov@skype.net>
+Julien Danjou <julien@danjou.info>
+Skip Montanaro <skip@pobox.com>
+skip.montanaro <skip.montanaro@gmail.com>
diff --git a/scripts/automation/trex_control_plane/python_lib/lockfile-0.10.2/ChangeLog b/scripts/automation/trex_control_plane/python_lib/lockfile-0.10.2/ChangeLog
new file mode 100755
index 00000000..3ba36a7d
--- /dev/null
+++ b/scripts/automation/trex_control_plane/python_lib/lockfile-0.10.2/ChangeLog
@@ -0,0 +1,165 @@
+CHANGES
+=======
+
+0.10.2
+------
+
+* Fix package name
+
+0.10.1
+------
+
+* Add missing cover env in tox
+
+0.10.0
+------
+
+* Fix documentation bug report address
+* Add py34 in tox
+* Remove old diff file
+* Add .gitreview, tox targets and use pbr
+* fix for timeout=0
+* remove 2.5, 3.1 and 3.4 from the list for the time being - may get added back later
+* Bugfix: locking two different files in the same directory caused an error during unlocking the last unlocking
+* typo
+
+0.9.1
+-----
+
+* ignore dist dir
+* update to python 3 imports
+* python 3 tweaks
+* python 3 tweaks
+* ignore Emacs backups
+* note nose as a dependency
+* remove this test file - way incompatible with current code
+* stuff to ignore
+* Add py33, py34, delete py24, py25
+* Update source location
+* merge delete
+* merge delete
+* more merge stuff
+* this didn't come across with svn merge
+* all screwed up now
+* merge
+* merge
+* Make it clear that the path and threaded attributes to SymlinkLockFile and MkdirLockFile have the same constraints as for LinkLockFile. In particular, the directory which will contain path must be writable
+* add pidlockfile test stuff from Ben Finney - still a few problems - maybe I can get him to solve them :-)
+* ignore Sphinx build directory
+* Catch up on a little documentation
+* adapt decorator patch from issue 5
+* Allow timeout in constructor - resolves issue 3
+* add info to raise statements - from issue 6, yyurevich@jellycrystal.com
+* add useful repr() - from issue 6, yyurevich@jellycrystal.com
+* add symlinklockfile module
+* + py24
+* good for the branch? must be good for the trunk
+* add tox stuff, ignore dist dir
+* new version, move to Google Code
+*
+*
+* * Thread support is currently broken. This is more likely because of problems in this module, but suppress those tests for now just the same
+* By the nature of what it's trying to do PIDLockFile doesn't support threaded operation
+* defer creating testdb until we've instantiated a SQLiteLockFile instance
+* tweak unique_name slightly
+* Specify mode in octal
+* update to match pidlockfile change
+* missing import
+* I think I finally have this correct
+* patch pidlockfile module too
+* use abs import here as well
+* *argh*
+* Update to elide new import syntax
+* * Move future import for division where it's used. * Use __absolute_import__ to spell relative imports
+* Some PIDLockFile tests are failing. Check in anyway so others can consider the problems
+* Account for fact that Thread objects in Python 2.4 and earlier do not have an ident attribute
+* Make this a daemon thread so if things go awry the test run won't hang
+* * Add pidlockfile (not quite working properly) * Rearrange MANIFEST.in slightly to include test directory
+* Split those test methods which try both threaded and non-threaded naming schemes. More to do. Obviously you need to have test cases when using the non-threaded naming scheme from multiple threads
+* acknowledge Ben and Frank, alphabetize list
+* I don't think these are needed any longer - they came back during the hg->svn conversion
+* grand renaming: "filelock" -> "lockfile" & "FileLock" -> "LockFile"
+* Update for packages
+* Avoid using the backwards compatibility functions for FileLock. That object is not deprecated
+* how does the test dir keep sneaking into MANIFEST? also, include 2.4.diff in dist
+* update for new structure, use of ident attr
+* adjust build setup
+* move test helpers into test dir
+* first cut at packagized lockfile
+* Protect some more complex locking stuff so if they fail we don't deadlock
+* merge r75 from head
+* * One implementation of tname, not two - make it an instance attribute as a result
+* beginnings of a packagized lockfile
+* get the structure right
+* start over with the branches..
+* hmmm
+* hmmm
+* get us back to lockfile 0.8
+* r72 from hg
+* r70 from hg
+* r69 from hg
+* r68 from hg
+* r67 from hg
+* r66 from hg
+* r65 from hg
+* r64 from hg
+* r64 from hg
+* r63 from hg
+* r62 from hg
+* r61 from hg
+* r60 from hg
+* r59 from hg
+* r58 from hg
+* r57 from hg
+* r56 from hg
+* r55 from hg
+* r54 from hg
+* r53 from hg
+* r52 from hg
+* r51 from hg
+* r50 from hg
+* r49 from hg
+* r47 from hg
+* r46 from hg
+* r45 from hg
+* r44 from hg
+* r43 from hg
+* r42 from hg
+* r41 from hg
+* r38 from hg
+* r37 from hg
+* r36 from hg
+* r35 from hg
+* r34 from hg
+* r33 from hg
+* r32 from hg
+* r31 from hg
+* r29 from hg
+* r28 from hg
+* r27 from hg
+* r26 from hg
+* r25 from hg
+* r24 from hg
+* r23 from hg
+* r22 from hg
+* r21 from hg
+* r20 from hg
+* r19 from hg
+* r18 from hg
+* r16 from hg
+* r14 from hg
+* r13 from hg
+* r12 from hg
+* r11 from hg
+* r10 from hg
+* r9 from hg
+* r8 from hg
+* r7 from hg
+* r6 from hg
+* r5 from hg
+* r4 from hg
+* r3 from hg
+* r2 from hg
+* r1 from hg
+* r0 from hg
+* Initial directory structure
diff --git a/scripts/automation/trex_control_plane/python_lib/lockfile-0.10.2/LICENSE b/scripts/automation/trex_control_plane/python_lib/lockfile-0.10.2/LICENSE
new file mode 100755
index 00000000..610c0793
--- /dev/null
+++ b/scripts/automation/trex_control_plane/python_lib/lockfile-0.10.2/LICENSE
@@ -0,0 +1,21 @@
+This is the MIT license: http://www.opensource.org/licenses/mit-license.php
+
+Copyright (c) 2007 Skip Montanaro.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to
+deal in the Software without restriction, including without limitation the
+rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+sell copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+IN THE SOFTWARE.
diff --git a/scripts/automation/trex_control_plane/python_lib/lockfile-0.10.2/PKG-INFO b/scripts/automation/trex_control_plane/python_lib/lockfile-0.10.2/PKG-INFO
new file mode 100755
index 00000000..9f72376f
--- /dev/null
+++ b/scripts/automation/trex_control_plane/python_lib/lockfile-0.10.2/PKG-INFO
@@ -0,0 +1,51 @@
+Metadata-Version: 1.1
+Name: lockfile
+Version: 0.10.2
+Summary: Platform-independent file locking module
+Home-page: http://launchpad.net/pylockfile
+Author: OpenStack
+Author-email: openstack-dev@lists.openstack.org
+License: UNKNOWN
+Description: The lockfile package exports a LockFile class which provides a simple API for
+ locking files. Unlike the Windows msvcrt.locking function, the fcntl.lockf
+ and flock functions, and the deprecated posixfile module, the API is
+ identical across both Unix (including Linux and Mac) and Windows platforms.
+ The lock mechanism relies on the atomic nature of the link (on Unix) and
+ mkdir (on Windows) system calls. An implementation based on SQLite is also
+ provided, more as a demonstration of the possibilities it provides than as
+ production-quality code.
+
+ Note: In version 0.9 the API changed in two significant ways:
+
+ * It changed from a module defining several classes to a package containing
+ several modules, each defining a single class.
+
+ * Where classes had been named SomethingFileLock before the last two words
+ have been reversed, so that class is now SomethingLockFile.
+
+ The previous module-level definitions of LinkFileLock, MkdirFileLock and
+ SQLiteFileLock will be retained until the 1.0 release.
+
+ Available on GitHub from:
+
+ git://github.com/smontanaro/pylockfile.git
+
+ To install:
+
+ python setup.py install
+
+
+Platform: UNKNOWN
+Classifier: Intended Audience :: Developers
+Classifier: License :: OSI Approved :: MIT License
+Classifier: Operating System :: POSIX :: Linux
+Classifier: Operating System :: MacOS
+Classifier: Operating System :: Microsoft :: Windows :: Windows NT/2000
+Classifier: Operating System :: POSIX
+Classifier: Programming Language :: Python
+Classifier: Programming Language :: Python :: 2
+Classifier: Programming Language :: Python :: 2.7
+Classifier: Programming Language :: Python :: 2.6
+Classifier: Programming Language :: Python :: 3
+Classifier: Programming Language :: Python :: 3.3
+Classifier: Topic :: Software Development :: Libraries :: Python Modules
diff --git a/scripts/automation/trex_control_plane/python_lib/lockfile-0.10.2/README b/scripts/automation/trex_control_plane/python_lib/lockfile-0.10.2/README
new file mode 100755
index 00000000..5f7acbc4
--- /dev/null
+++ b/scripts/automation/trex_control_plane/python_lib/lockfile-0.10.2/README
@@ -0,0 +1,27 @@
+The lockfile package exports a LockFile class which provides a simple API for
+locking files. Unlike the Windows msvcrt.locking function, the fcntl.lockf
+and flock functions, and the deprecated posixfile module, the API is
+identical across both Unix (including Linux and Mac) and Windows platforms.
+The lock mechanism relies on the atomic nature of the link (on Unix) and
+mkdir (on Windows) system calls. An implementation based on SQLite is also
+provided, more as a demonstration of the possibilities it provides than as
+production-quality code.
+
+Note: In version 0.9 the API changed in two significant ways:
+
+ * It changed from a module defining several classes to a package containing
+ several modules, each defining a single class.
+
+ * Where classes had been named SomethingFileLock before the last two words
+ have been reversed, so that class is now SomethingLockFile.
+
+The previous module-level definitions of LinkFileLock, MkdirFileLock and
+SQLiteFileLock will be retained until the 1.0 release.
+
+Available on GitHub from:
+
+ git://github.com/smontanaro/pylockfile.git
+
+To install:
+
+ python setup.py install
diff --git a/scripts/automation/trex_control_plane/python_lib/lockfile-0.10.2/RELEASE-NOTES b/scripts/automation/trex_control_plane/python_lib/lockfile-0.10.2/RELEASE-NOTES
new file mode 100755
index 00000000..8b452ed1
--- /dev/null
+++ b/scripts/automation/trex_control_plane/python_lib/lockfile-0.10.2/RELEASE-NOTES
@@ -0,0 +1,50 @@
+Version 0.9.1
+=============
+
+* This release moves the source location to Google Code.
+
+* Threaded support is currently broken. (It might not actually be broken.
+ It might just be the tests which are broken.)
+
+Version 0.9
+===========
+
+* The lockfile module was reorganized into a package.
+
+* The names of the three main classes have changed as follows:
+
+ LinkFileLock -> LinkLockFile
+ MkdirFileLock -> MkdirLockFile
+ SQLiteFileLock -> SQLiteLockFile
+
+* A PIDLockFile class was added.
+
+Version 0.3
+===========
+
+* Fix 2.4.diff file error.
+
+* More documentation updates.
+
+Version 0.2
+===========
+
+* Added 2.4.diff file to patch lockfile to work with Python 2.4 (removes use
+ of with statement).
+
+* Renamed _FileLock base class to LockBase to expose it (and its docstrings)
+ to pydoc.
+
+* Got rid of time.sleep() calls in tests (thanks to Konstantin
+ Veretennicov).
+
+* Use thread.get_ident() as the thread discriminator.
+
+* Updated documentation a bit.
+
+* Added RELEASE-NOTES.
+
+Version 0.1
+===========
+
+* First release - All basic functionality there.
diff --git a/scripts/automation/trex_control_plane/python_lib/lockfile-0.10.2/doc/source/Makefile b/scripts/automation/trex_control_plane/python_lib/lockfile-0.10.2/doc/source/Makefile
new file mode 100755
index 00000000..1b1e8d28
--- /dev/null
+++ b/scripts/automation/trex_control_plane/python_lib/lockfile-0.10.2/doc/source/Makefile
@@ -0,0 +1,73 @@
+# Makefile for Sphinx documentation
+#
+
+# You can set these variables from the command line.
+SPHINXOPTS =
+SPHINXBUILD = sphinx-build
+PAPER =
+
+# Internal variables.
+PAPEROPT_a4 = -D latex_paper_size=a4
+PAPEROPT_letter = -D latex_paper_size=letter
+ALLSPHINXOPTS = -d .build/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) .
+
+.PHONY: help clean html web pickle htmlhelp latex changes linkcheck
+
+help:
+ @echo "Please use \`make <target>' where <target> is one of"
+ @echo " html to make standalone HTML files"
+ @echo " pickle to make pickle files (usable by e.g. sphinx-web)"
+ @echo " htmlhelp to make HTML files and a HTML help project"
+ @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter"
+ @echo " changes to make an overview over all changed/added/deprecated items"
+ @echo " linkcheck to check all external links for integrity"
+
+clean:
+ -rm -rf .build/*
+
+html:
+ mkdir -p .build/html .build/doctrees
+ $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) .build/html
+ @echo
+ @echo "Build finished. The HTML pages are in .build/html."
+
+pickle:
+ mkdir -p .build/pickle .build/doctrees
+ $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) .build/pickle
+ @echo
+ @echo "Build finished; now you can process the pickle files or run"
+ @echo " sphinx-web .build/pickle"
+ @echo "to start the sphinx-web server."
+
+web: pickle
+
+htmlhelp:
+ mkdir -p .build/htmlhelp .build/doctrees
+ $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) .build/htmlhelp
+ @echo
+ @echo "Build finished; now you can run HTML Help Workshop with the" \
+ ".hhp project file in .build/htmlhelp."
+
+latex:
+ mkdir -p .build/latex .build/doctrees
+ $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) .build/latex
+ @echo
+ @echo "Build finished; the LaTeX files are in .build/latex."
+ @echo "Run \`make all-pdf' or \`make all-ps' in that directory to" \
+ "run these through (pdf)latex."
+
+changes:
+ mkdir -p .build/changes .build/doctrees
+ $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) .build/changes
+ @echo
+ @echo "The overview file is in .build/changes."
+
+linkcheck:
+ mkdir -p .build/linkcheck .build/doctrees
+ $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) .build/linkcheck
+ @echo
+ @echo "Link check complete; look for any errors in the above output " \
+ "or in .build/linkcheck/output.txt."
+
+html.zip: html
+ (cd .build/html ; zip -r ../../$@ *)
diff --git a/scripts/automation/trex_control_plane/python_lib/lockfile-0.10.2/doc/source/conf.py b/scripts/automation/trex_control_plane/python_lib/lockfile-0.10.2/doc/source/conf.py
new file mode 100755
index 00000000..623edcb5
--- /dev/null
+++ b/scripts/automation/trex_control_plane/python_lib/lockfile-0.10.2/doc/source/conf.py
@@ -0,0 +1,179 @@
+# -*- coding: utf-8 -*-
+#
+# lockfile documentation build configuration file, created by
+# sphinx-quickstart on Sat Sep 13 17:54:17 2008.
+#
+# This file is execfile()d with the current directory set to its containing dir.
+#
+# The contents of this file are pickled, so don't put values in the namespace
+# that aren't pickleable (module imports are okay, they're removed automatically).
+#
+# All configuration values have a default value; values that are commented out
+# serve to show the default value.
+
+import sys, os
+
+# If your extensions are in another directory, add it here. If the directory
+# is relative to the documentation root, use os.path.abspath to make it
+# absolute, like shown here.
+#sys.path.append(os.path.abspath('some/directory'))
+
+# General configuration
+# ---------------------
+
+# Add any Sphinx extension module names here, as strings. They can be extensions
+# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
+extensions = []
+
+# Add any paths that contain templates here, relative to this directory.
+templates_path = ['.templates']
+
+# The suffix of source filenames.
+source_suffix = '.rst'
+
+# The master toctree document.
+master_doc = 'index'
+
+# General substitutions.
+project = 'lockfile'
+copyright = '2008, Skip Montanaro'
+
+# The default replacements for |version| and |release|, also used in various
+# other places throughout the built documents.
+#
+# The short X.Y version.
+version = '0.3'
+# The full version, including alpha/beta/rc tags.
+release = '0.3'
+
+# There are two options for replacing |today|: either, you set today to some
+# non-false value, then it is used:
+#today = ''
+# Else, today_fmt is used as the format for a strftime call.
+today_fmt = '%B %d, %Y'
+
+# List of documents that shouldn't be included in the build.
+#unused_docs = []
+
+# List of directories, relative to source directories, that shouldn't be searched
+# for source files.
+#exclude_dirs = []
+
+# The reST default role (used for this markup: `text`) to use for all documents.
+#default_role = None
+
+# If true, '()' will be appended to :func: etc. cross-reference text.
+#add_function_parentheses = True
+
+# If true, the current module name will be prepended to all description
+# unit titles (such as .. function::).
+#add_module_names = True
+
+# If true, sectionauthor and moduleauthor directives will be shown in the
+# output. They are ignored by default.
+#show_authors = False
+
+# The name of the Pygments (syntax highlighting) style to use.
+pygments_style = 'sphinx'
+
+
+# Options for HTML output
+# -----------------------
+
+# The style sheet to use for HTML and HTML Help pages. A file of that name
+# must exist either in Sphinx' static/ path, or in one of the custom paths
+# given in html_static_path.
+html_style = 'default.css'
+
+# The name for this set of Sphinx documents. If None, it defaults to
+# "<project> v<release> documentation".
+#html_title = None
+
+# A shorter title for the navigation bar. Default is the same as html_title.
+#html_short_title = None
+
+# The name of an image file (within the static path) to place at the top of
+# the sidebar.
+#html_logo = None
+
+# The name of an image file (within the static path) to use as favicon of the
+# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
+# pixels large.
+#html_favicon = None
+
+# Add any paths that contain custom static files (such as style sheets) here,
+# relative to this directory. They are copied after the builtin static files,
+# so a file named "default.css" will overwrite the builtin "default.css".
+#html_static_path = ['.static']
+
+# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
+# using the given strftime format.
+html_last_updated_fmt = '%b %d, %Y'
+
+# If true, SmartyPants will be used to convert quotes and dashes to
+# typographically correct entities.
+#html_use_smartypants = True
+
+# Custom sidebar templates, maps document names to template names.
+#html_sidebars = {}
+
+# Additional templates that should be rendered to pages, maps page names to
+# template names.
+#html_additional_pages = {}
+
+# If false, no module index is generated.
+#html_use_modindex = True
+
+# If false, no index is generated.
+#html_use_index = True
+
+# If true, the index is split into individual pages for each letter.
+#html_split_index = False
+
+# If true, the reST sources are included in the HTML build as _sources/<name>.
+#html_copy_source = True
+
+# If true, an OpenSearch description file will be output, and all pages will
+# contain a <link> tag referring to it. The value of this option must be the
+# base URL from which the finished HTML is served.
+#html_use_opensearch = ''
+
+# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
+#html_file_suffix = ''
+
+# Output file base name for HTML help builder.
+htmlhelp_basename = 'lockfiledoc'
+
+
+# Options for LaTeX output
+# ------------------------
+
+# The paper size ('letter' or 'a4').
+#latex_paper_size = 'letter'
+
+# The font size ('10pt', '11pt' or '12pt').
+#latex_font_size = '10pt'
+
+# Grouping the document tree into LaTeX files. List of tuples
+# (source start file, target name, title, author, document class [howto/manual]).
+latex_documents = [
+ ('lockfile', 'lockfile.tex', 'lockfile Documentation',
+ 'Skip Montanaro', 'manual'),
+]
+
+# The name of an image file (relative to this directory) to place at the top of
+# the title page.
+#latex_logo = None
+
+# For "manual" documents, if this is true, then toplevel headings are parts,
+# not chapters.
+#latex_use_parts = False
+
+# Additional stuff for the LaTeX preamble.
+#latex_preamble = ''
+
+# Documents to append as an appendix to all manuals.
+#latex_appendices = []
+
+# If false, no module index is generated.
+#latex_use_modindex = True
diff --git a/scripts/automation/trex_control_plane/python_lib/lockfile-0.10.2/doc/source/index.rst b/scripts/automation/trex_control_plane/python_lib/lockfile-0.10.2/doc/source/index.rst
new file mode 100755
index 00000000..f76173dc
--- /dev/null
+++ b/scripts/automation/trex_control_plane/python_lib/lockfile-0.10.2/doc/source/index.rst
@@ -0,0 +1,275 @@
+
+:mod:`lockfile` --- Platform-independent file locking
+=====================================================
+
+.. module:: lockfile
+ :synopsis: Platform-independent file locking
+.. moduleauthor:: Skip Montanaro <skip@pobox.com>
+.. sectionauthor:: Skip Montanaro <skip@pobox.com>
+
+
+.. note::
+
+ This package is pre-release software. Between versions 0.8 and 0.9 it
+ was changed from a module to a package. It is quite possible that the
+ API and implementation will change again in important ways as people test
+ it and provide feedback and bug fixes. In particular, if the mkdir-based
+ locking scheme is sufficient for both Windows and Unix platforms, the
+ link-based scheme may be deleted so that only a single locking scheme is
+ used, providing cross-platform lockfile cooperation.
+
+.. note::
+
+ The implementation uses the `with` statement, both in the tests and in the
+ main code, so will only work out-of-the-box with Python 2.5 or later.
+ However, the use of the `with` statement is minimal, so if you apply the
+ patch in the included 2.4.diff file you can use it with Python 2.4. It's
+ possible that it will work in Python 2.3 with that patch applied as well,
+ though the doctest code relies on APIs new in 2.4, so will have to be
+ rewritten somewhat to allow testing on 2.3. As they say, patches welcome.
+ ``;-)``
+
+The :mod:`lockfile` package exports a :class:`LockFile` class which provides
+a simple API for locking files. Unlike the Windows :func:`msvcrt.locking`
+function, the Unix :func:`fcntl.flock`, :func:`fcntl.lockf` and the
+deprecated :mod:`posixfile` module, the API is identical across both Unix
+(including Linux and Mac) and Windows platforms. The lock mechanism relies
+on the atomic nature of the :func:`link` (on Unix) and :func:`mkdir` (On
+Windows) system calls. It also contains several lock-method-specific
+modules: :mod:`lockfile.linklockfile`, :mod:`lockfile.mkdirlockfile`, and
+:mod:`lockfile.sqlitelockfile`, each one exporting a single class. For
+backwards compatibility with versions before 0.9 the :class:`LinkFileLock`,
+:class:`MkdirFileLock` and :class:`SQLiteFileLock` objects are exposed as
+attributes of the top-level lockfile package, though this use was deprecated
+starting with version 0.9 and will be removed in version 1.0.
+
+.. note::
+
+ The current implementation uses :func:`os.link` on Unix, but since that
+ function is unavailable on Windows it uses :func:`os.mkdir` there. At
+ this point it's not clear that using the :func:`os.mkdir` method would be
+ insufficient on Unix systems. If it proves to be adequate on Unix then
+ the implementation could be simplified and truly cross-platform locking
+ would be possible.
+
+.. note::
+
+ The current implementation doesn't provide for shared vs. exclusive
+ locks. It should be possible for multiple reader processes to hold the
+ lock at the same time.
+
+The module defines the following exceptions:
+
+.. exception:: Error
+
+ This is the base class for all exceptions raised by the :class:`LockFile`
+ class.
+
+.. exception:: LockError
+
+ This is the base class for all exceptions raised when attempting to lock
+ a file.
+
+.. exception:: UnlockError
+
+ This is the base class for all exceptions raised when attempting to
+ unlock a file.
+
+.. exception:: LockTimeout
+
+ This exception is raised if the :func:`LockFile.acquire` method is
+ called with a timeout which expires before an existing lock is released.
+
+.. exception:: AlreadyLocked
+
+ This exception is raised if the :func:`LockFile.acquire` detects a
+ file is already locked when in non-blocking mode.
+
+.. exception:: LockFailed
+
+ This exception is raised if the :func:`LockFile.acquire` detects some
+ other condition (such as a non-writable directory) which prevents it from
+ creating its lock file.
+
+.. exception:: NotLocked
+
+ This exception is raised if the file is not locked when
+ :func:`LockFile.release` is called.
+
+.. exception:: NotMyLock
+
+ This exception is raised if the file is locked by another thread or
+ process when :func:`LockFile.release` is called.
+
+The following classes are provided:
+
+.. class:: linklockfile.LinkLockFile(path, threaded=True)
+
+ This class uses the :func:`link(2)` system call as the basic lock
+ mechanism. *path* is an object in the file system to be locked. It need
+ not exist, but its directory must exist and be writable at the time the
+ :func:`acquire` and :func:`release` methods are called. *threaded* is
+ optional, but when set to :const:`True` locks will be distinguished
+ between threads in the same process.
+
+.. class:: symlinklockfile.SymlinkLockFile(path, threaded=True)
+
+ This class uses the :func:`symlink(2)` system call as the basic lock
+ mechanism. The parameters have the same meaning and constraints as for
+ the :class:`LinkLockFile` class.
+
+.. class:: mkdirlockfile.MkdirLockFile(path, threaded=True)
+
+ This class uses the :func:`mkdir(2)` system call as the basic lock
+ mechanism. The parameters have the same meaning and constraints as for
+ the :class:`LinkLockFile` class.
+
+.. class:: sqlitelockfile.SQLiteLockFile(path, threaded=True)
+
+ This class uses the :mod:`sqlite3` module to implement the lock
+ mechanism. The parameters have the same meaning as for the
+ :class:`LinkLockFile` class.
+
+.. class:: LockBase(path, threaded=True)
+
+ This is the base class for all concrete implementations and is available
+ at the lockfile package level so programmers can implement other locking
+ schemes.
+
+.. function:: locked(path, timeout=None)
+
+ This function provides a decorator which insures the decorated function
+ is always called with the lock held.
+
+By default, the :const:`LockFile` object refers to the
+:class:`mkdirlockfile.MkdirLockFile` class on Windows. On all other
+platforms it refers to the :class:`linklockfile.LinkLockFile` class.
+
+When locking a file the :class:`linklockfile.LinkLockFile` class creates a
+uniquely named hard link to an empty lock file. That hard link contains the
+hostname, process id, and if locks between threads are distinguished, the
+thread identifier. For example, if you want to lock access to a file named
+"README", the lock file is named "README.lock". With per-thread locks
+enabled the hard link is named HOSTNAME-THREADID-PID. With only per-process
+locks enabled the hard link is named HOSTNAME--PID.
+
+When using the :class:`mkdirlockfile.MkdirLockFile` class the lock file is a
+directory. Referring to the example above, README.lock will be a directory
+and HOSTNAME-THREADID-PID will be an empty file within that directory.
+
+.. seealso::
+
+ Module :mod:`msvcrt`
+ Provides the :func:`locking` function, the standard Windows way of
+ locking (parts of) a file.
+
+ Module :mod:`posixfile`
+ The deprecated (since Python 1.5) way of locking files on Posix systems.
+
+ Module :mod:`fcntl`
+ Provides the current best way to lock files on Unix systems
+ (:func:`lockf` and :func:`flock`).
+
+LockFile Objects
+----------------
+
+:class:`LockFile` objects support the `context manager` protocol used by the
+statement:`with` statement. The timeout option is not supported when used in
+this fashion. While support for timeouts could be implemented, there is no
+support for handling the eventual :exc:`Timeout` exceptions raised by the
+:func:`__enter__` method, so you would have to protect the `with` statement with
+a `try` statement. The resulting construct would not be any simpler than just
+using a `try` statement in the first place.
+
+:class:`LockFile` has the following user-visible methods:
+
+.. method:: LockFile.acquire(timeout=None)
+
+ Lock the file associated with the :class:`LockFile` object. If the
+ *timeout* is omitted or :const:`None` the caller will block until the
+ file is unlocked by the object currently holding the lock. If the
+ *timeout* is zero or a negative number the :exc:`AlreadyLocked` exception
+ will be raised if the file is currently locked by another process or
+ thread. If the *timeout* is positive, the caller will block for that
+ many seconds waiting for the lock to be released. If the lock is not
+ released within that period the :exc:`LockTimeout` exception will be
+ raised.
+
+.. method:: LockFile.release()
+
+ Unlock the file associated with the :class:`LockFile` object. If the
+ file is not currently locked, the :exc:`NotLocked` exception is raised.
+ If the file is locked by another thread or process the :exc:`NotMyLock`
+ exception is raised.
+
+.. method:: is_locked()
+
+ Return the status of the lock on the current file. If any process or
+ thread (including the current one) is locking the file, :const:`True` is
+ returned, otherwise :const:`False` is returned.
+
+.. method:: break_lock()
+
+ If the file is currently locked, break it.
+
+.. method:: i_am_locking()
+
+ Returns true if the caller holds the lock.
+
+Examples
+--------
+
+This example is the "hello world" for the :mod:`lockfile` package::
+
+ from lockfile import LockFile
+ lock = LockFile("/some/file/or/other")
+ with lock:
+ print lock.path, 'is locked.'
+
+To use this with Python 2.4, you can execute::
+
+ from lockfile import LockFile
+ lock = LockFile("/some/file/or/other")
+ lock.acquire()
+ print lock.path, 'is locked.'
+ lock.release()
+
+If you don't want to wait forever, you might try::
+
+ from lockfile import LockFile
+ lock = LockFile("/some/file/or/other")
+ while not lock.i_am_locking():
+ try:
+ lock.acquire(timeout=60) # wait up to 60 seconds
+ except LockTimeout:
+ lock.break_lock()
+ lock.acquire()
+ print "I locked", lock.path
+ lock.release()
+
+You can also insure that a lock is always held when appropriately decorated
+functions are called::
+
+ from lockfile import locked
+ @locked("/tmp/mylock")
+ def func(a, b):
+ return a + b
+
+Other Libraries
+---------------
+
+The idea of implementing advisory locking with a standard API is not new
+with :mod:`lockfile`. There are a number of other libraries available:
+
+* locknix - http://pypi.python.org/pypi/locknix - Unix only
+* mx.MiscLockFile - from Marc André Lemburg, part of the mx.Base
+ distribution - cross-platform.
+* Twisted - http://twistedmatrix.com/trac/browser/trunk/twisted/python/lockfile.py
+* zc.lockfile - http://pypi.python.org/pypi/zc.lockfile
+
+
+Contacting the Author
+---------------------
+
+If you encounter any problems with ``lockfile``, would like help or want to
+submit a patch, check http://launchpad.net/pylockfile
diff --git a/scripts/automation/trex_control_plane/python_lib/lockfile-0.10.2/lockfile.egg-info/PKG-INFO b/scripts/automation/trex_control_plane/python_lib/lockfile-0.10.2/lockfile.egg-info/PKG-INFO
new file mode 100755
index 00000000..9f72376f
--- /dev/null
+++ b/scripts/automation/trex_control_plane/python_lib/lockfile-0.10.2/lockfile.egg-info/PKG-INFO
@@ -0,0 +1,51 @@
+Metadata-Version: 1.1
+Name: lockfile
+Version: 0.10.2
+Summary: Platform-independent file locking module
+Home-page: http://launchpad.net/pylockfile
+Author: OpenStack
+Author-email: openstack-dev@lists.openstack.org
+License: UNKNOWN
+Description: The lockfile package exports a LockFile class which provides a simple API for
+ locking files. Unlike the Windows msvcrt.locking function, the fcntl.lockf
+ and flock functions, and the deprecated posixfile module, the API is
+ identical across both Unix (including Linux and Mac) and Windows platforms.
+ The lock mechanism relies on the atomic nature of the link (on Unix) and
+ mkdir (on Windows) system calls. An implementation based on SQLite is also
+ provided, more as a demonstration of the possibilities it provides than as
+ production-quality code.
+
+ Note: In version 0.9 the API changed in two significant ways:
+
+ * It changed from a module defining several classes to a package containing
+ several modules, each defining a single class.
+
+ * Where classes had been named SomethingFileLock before the last two words
+ have been reversed, so that class is now SomethingLockFile.
+
+ The previous module-level definitions of LinkFileLock, MkdirFileLock and
+ SQLiteFileLock will be retained until the 1.0 release.
+
+ Available on GitHub from:
+
+ git://github.com/smontanaro/pylockfile.git
+
+ To install:
+
+ python setup.py install
+
+
+Platform: UNKNOWN
+Classifier: Intended Audience :: Developers
+Classifier: License :: OSI Approved :: MIT License
+Classifier: Operating System :: POSIX :: Linux
+Classifier: Operating System :: MacOS
+Classifier: Operating System :: Microsoft :: Windows :: Windows NT/2000
+Classifier: Operating System :: POSIX
+Classifier: Programming Language :: Python
+Classifier: Programming Language :: Python :: 2
+Classifier: Programming Language :: Python :: 2.7
+Classifier: Programming Language :: Python :: 2.6
+Classifier: Programming Language :: Python :: 3
+Classifier: Programming Language :: Python :: 3.3
+Classifier: Topic :: Software Development :: Libraries :: Python Modules
diff --git a/scripts/automation/trex_control_plane/python_lib/lockfile-0.10.2/lockfile.egg-info/SOURCES.txt b/scripts/automation/trex_control_plane/python_lib/lockfile-0.10.2/lockfile.egg-info/SOURCES.txt
new file mode 100755
index 00000000..4b289f3a
--- /dev/null
+++ b/scripts/automation/trex_control_plane/python_lib/lockfile-0.10.2/lockfile.egg-info/SOURCES.txt
@@ -0,0 +1,26 @@
+ACKS
+AUTHORS
+ChangeLog
+LICENSE
+README
+RELEASE-NOTES
+setup.cfg
+setup.py
+test-requirements.txt
+tox.ini
+doc/source/Makefile
+doc/source/conf.py
+doc/source/index.rst
+lockfile/__init__.py
+lockfile/linklockfile.py
+lockfile/mkdirlockfile.py
+lockfile/pidlockfile.py
+lockfile/sqlitelockfile.py
+lockfile/symlinklockfile.py
+lockfile.egg-info/PKG-INFO
+lockfile.egg-info/SOURCES.txt
+lockfile.egg-info/dependency_links.txt
+lockfile.egg-info/not-zip-safe
+lockfile.egg-info/top_level.txt
+test/compliancetest.py
+test/test_lockfile.py \ No newline at end of file
diff --git a/scripts/automation/trex_control_plane/python_lib/lockfile-0.10.2/lockfile.egg-info/dependency_links.txt b/scripts/automation/trex_control_plane/python_lib/lockfile-0.10.2/lockfile.egg-info/dependency_links.txt
new file mode 100755
index 00000000..8b137891
--- /dev/null
+++ b/scripts/automation/trex_control_plane/python_lib/lockfile-0.10.2/lockfile.egg-info/dependency_links.txt
@@ -0,0 +1 @@
+
diff --git a/scripts/automation/trex_control_plane/python_lib/lockfile-0.10.2/lockfile.egg-info/not-zip-safe b/scripts/automation/trex_control_plane/python_lib/lockfile-0.10.2/lockfile.egg-info/not-zip-safe
new file mode 100755
index 00000000..8b137891
--- /dev/null
+++ b/scripts/automation/trex_control_plane/python_lib/lockfile-0.10.2/lockfile.egg-info/not-zip-safe
@@ -0,0 +1 @@
+
diff --git a/scripts/automation/trex_control_plane/python_lib/lockfile-0.10.2/lockfile.egg-info/top_level.txt b/scripts/automation/trex_control_plane/python_lib/lockfile-0.10.2/lockfile.egg-info/top_level.txt
new file mode 100755
index 00000000..5a13159a
--- /dev/null
+++ b/scripts/automation/trex_control_plane/python_lib/lockfile-0.10.2/lockfile.egg-info/top_level.txt
@@ -0,0 +1 @@
+lockfile
diff --git a/scripts/automation/trex_control_plane/python_lib/lockfile-0.10.2/lockfile/__init__.py b/scripts/automation/trex_control_plane/python_lib/lockfile-0.10.2/lockfile/__init__.py
new file mode 100755
index 00000000..d905af96
--- /dev/null
+++ b/scripts/automation/trex_control_plane/python_lib/lockfile-0.10.2/lockfile/__init__.py
@@ -0,0 +1,326 @@
+"""
+lockfile.py - Platform-independent advisory file locks.
+
+Requires Python 2.5 unless you apply 2.4.diff
+Locking is done on a per-thread basis instead of a per-process basis.
+
+Usage:
+
+>>> lock = LockFile('somefile')
+>>> try:
+... lock.acquire()
+... except AlreadyLocked:
+... print 'somefile', 'is locked already.'
+... except LockFailed:
+... print 'somefile', 'can\\'t be locked.'
+... else:
+... print 'got lock'
+got lock
+>>> print lock.is_locked()
+True
+>>> lock.release()
+
+>>> lock = LockFile('somefile')
+>>> print lock.is_locked()
+False
+>>> with lock:
+... print lock.is_locked()
+True
+>>> print lock.is_locked()
+False
+
+>>> lock = LockFile('somefile')
+>>> # It is okay to lock twice from the same thread...
+>>> with lock:
+... lock.acquire()
+...
+>>> # Though no counter is kept, so you can't unlock multiple times...
+>>> print lock.is_locked()
+False
+
+Exceptions:
+
+ Error - base class for other exceptions
+ LockError - base class for all locking exceptions
+ AlreadyLocked - Another thread or process already holds the lock
+ LockFailed - Lock failed for some other reason
+ UnlockError - base class for all unlocking exceptions
+ AlreadyUnlocked - File was not locked.
+ NotMyLock - File was locked but not by the current thread/process
+"""
+
+from __future__ import absolute_import
+
+import sys
+import socket
+import os
+import threading
+import time
+import urllib
+import warnings
+import functools
+
+# Work with PEP8 and non-PEP8 versions of threading module.
+if not hasattr(threading, "current_thread"):
+ threading.current_thread = threading.currentThread
+if not hasattr(threading.Thread, "get_name"):
+ threading.Thread.get_name = threading.Thread.getName
+
+__all__ = ['Error', 'LockError', 'LockTimeout', 'AlreadyLocked',
+ 'LockFailed', 'UnlockError', 'NotLocked', 'NotMyLock',
+ 'LinkLockFile', 'MkdirLockFile', 'SQLiteLockFile',
+ 'LockBase', 'locked']
+
+class Error(Exception):
+ """
+ Base class for other exceptions.
+
+ >>> try:
+ ... raise Error
+ ... except Exception:
+ ... pass
+ """
+ pass
+
+class LockError(Error):
+ """
+ Base class for error arising from attempts to acquire the lock.
+
+ >>> try:
+ ... raise LockError
+ ... except Error:
+ ... pass
+ """
+ pass
+
+class LockTimeout(LockError):
+ """Raised when lock creation fails within a user-defined period of time.
+
+ >>> try:
+ ... raise LockTimeout
+ ... except LockError:
+ ... pass
+ """
+ pass
+
+class AlreadyLocked(LockError):
+ """Some other thread/process is locking the file.
+
+ >>> try:
+ ... raise AlreadyLocked
+ ... except LockError:
+ ... pass
+ """
+ pass
+
+class LockFailed(LockError):
+ """Lock file creation failed for some other reason.
+
+ >>> try:
+ ... raise LockFailed
+ ... except LockError:
+ ... pass
+ """
+ pass
+
+class UnlockError(Error):
+ """
+ Base class for errors arising from attempts to release the lock.
+
+ >>> try:
+ ... raise UnlockError
+ ... except Error:
+ ... pass
+ """
+ pass
+
+class NotLocked(UnlockError):
+ """Raised when an attempt is made to unlock an unlocked file.
+
+ >>> try:
+ ... raise NotLocked
+ ... except UnlockError:
+ ... pass
+ """
+ pass
+
+class NotMyLock(UnlockError):
+ """Raised when an attempt is made to unlock a file someone else locked.
+
+ >>> try:
+ ... raise NotMyLock
+ ... except UnlockError:
+ ... pass
+ """
+ pass
+
+class LockBase:
+ """Base class for platform-specific lock classes."""
+ def __init__(self, path, threaded=True, timeout=None):
+ """
+ >>> lock = LockBase('somefile')
+ >>> lock = LockBase('somefile', threaded=False)
+ """
+ self.path = path
+ self.lock_file = os.path.abspath(path) + ".lock"
+ self.hostname = socket.gethostname()
+ self.pid = os.getpid()
+ if threaded:
+ t = threading.current_thread()
+ # Thread objects in Python 2.4 and earlier do not have ident
+ # attrs. Worm around that.
+ ident = getattr(t, "ident", hash(t))
+ self.tname = "-%x" % (ident & 0xffffffff)
+ else:
+ self.tname = ""
+ dirname = os.path.dirname(self.lock_file)
+
+ # unique name is mostly about the current process, but must
+ # also contain the path -- otherwise, two adjacent locked
+ # files conflict (one file gets locked, creating lock-file and
+ # unique file, the other one gets locked, creating lock-file
+ # and overwriting the already existing lock-file, then one
+ # gets unlocked, deleting both lock-file and unique file,
+ # finally the last lock errors out upon releasing.
+ self.unique_name = os.path.join(dirname,
+ "%s%s.%s%s" % (self.hostname,
+ self.tname,
+ self.pid,
+ hash(self.path)))
+ self.timeout = timeout
+
+ def acquire(self, timeout=None):
+ """
+ Acquire the lock.
+
+ * If timeout is omitted (or None), wait forever trying to lock the
+ file.
+
+ * If timeout > 0, try to acquire the lock for that many seconds. If
+ the lock period expires and the file is still locked, raise
+ LockTimeout.
+
+ * If timeout <= 0, raise AlreadyLocked immediately if the file is
+ already locked.
+ """
+ raise NotImplemented("implement in subclass")
+
+ def release(self):
+ """
+ Release the lock.
+
+ If the file is not locked, raise NotLocked.
+ """
+ raise NotImplemented("implement in subclass")
+
+ def is_locked(self):
+ """
+ Tell whether or not the file is locked.
+ """
+ raise NotImplemented("implement in subclass")
+
+ def i_am_locking(self):
+ """
+ Return True if this object is locking the file.
+ """
+ raise NotImplemented("implement in subclass")
+
+ def break_lock(self):
+ """
+ Remove a lock. Useful if a locking thread failed to unlock.
+ """
+ raise NotImplemented("implement in subclass")
+
+ def __enter__(self):
+ """
+ Context manager support.
+ """
+ self.acquire()
+ return self
+
+ def __exit__(self, *_exc):
+ """
+ Context manager support.
+ """
+ self.release()
+
+ def __repr__(self):
+ return "<%s: %r -- %r>" % (self.__class__.__name__, self.unique_name,
+ self.path)
+
+def _fl_helper(cls, mod, *args, **kwds):
+ warnings.warn("Import from %s module instead of lockfile package" % mod,
+ DeprecationWarning, stacklevel=2)
+ # This is a bit funky, but it's only for awhile. The way the unit tests
+ # are constructed this function winds up as an unbound method, so it
+ # actually takes three args, not two. We want to toss out self.
+ if not isinstance(args[0], str):
+ # We are testing, avoid the first arg
+ args = args[1:]
+ if len(args) == 1 and not kwds:
+ kwds["threaded"] = True
+ return cls(*args, **kwds)
+
+def LinkFileLock(*args, **kwds):
+ """Factory function provided for backwards compatibility.
+
+ Do not use in new code. Instead, import LinkLockFile from the
+ lockfile.linklockfile module.
+ """
+ from . import linklockfile
+ return _fl_helper(linklockfile.LinkLockFile, "lockfile.linklockfile",
+ *args, **kwds)
+
+def MkdirFileLock(*args, **kwds):
+ """Factory function provided for backwards compatibility.
+
+ Do not use in new code. Instead, import MkdirLockFile from the
+ lockfile.mkdirlockfile module.
+ """
+ from . import mkdirlockfile
+ return _fl_helper(mkdirlockfile.MkdirLockFile, "lockfile.mkdirlockfile",
+ *args, **kwds)
+
+def SQLiteFileLock(*args, **kwds):
+ """Factory function provided for backwards compatibility.
+
+ Do not use in new code. Instead, import SQLiteLockFile from the
+ lockfile.mkdirlockfile module.
+ """
+ from . import sqlitelockfile
+ return _fl_helper(sqlitelockfile.SQLiteLockFile, "lockfile.sqlitelockfile",
+ *args, **kwds)
+
+def locked(path, timeout=None):
+ """Decorator which enables locks for decorated function.
+
+ Arguments:
+ - path: path for lockfile.
+ - timeout (optional): Timeout for acquiring lock.
+
+ Usage:
+ @locked('/var/run/myname', timeout=0)
+ def myname(...):
+ ...
+ """
+ def decor(func):
+ @functools.wraps(func)
+ def wrapper(*args, **kwargs):
+ lock = FileLock(path, timeout=timeout)
+ lock.acquire()
+ try:
+ return func(*args, **kwargs)
+ finally:
+ lock.release()
+ return wrapper
+ return decor
+
+if hasattr(os, "link"):
+ from . import linklockfile as _llf
+ LockFile = _llf.LinkLockFile
+else:
+ from . import mkdirlockfile as _mlf
+ LockFile = _mlf.MkdirLockFile
+
+FileLock = LockFile
+
diff --git a/scripts/automation/trex_control_plane/python_lib/lockfile-0.10.2/lockfile/linklockfile.py b/scripts/automation/trex_control_plane/python_lib/lockfile-0.10.2/lockfile/linklockfile.py
new file mode 100755
index 00000000..9c506734
--- /dev/null
+++ b/scripts/automation/trex_control_plane/python_lib/lockfile-0.10.2/lockfile/linklockfile.py
@@ -0,0 +1,73 @@
+from __future__ import absolute_import
+
+import time
+import os
+
+from . import (LockBase, LockFailed, NotLocked, NotMyLock, LockTimeout,
+ AlreadyLocked)
+
+class LinkLockFile(LockBase):
+ """Lock access to a file using atomic property of link(2).
+
+ >>> lock = LinkLockFile('somefile')
+ >>> lock = LinkLockFile('somefile', threaded=False)
+ """
+
+ def acquire(self, timeout=None):
+ try:
+ open(self.unique_name, "wb").close()
+ except IOError:
+ raise LockFailed("failed to create %s" % self.unique_name)
+
+ timeout = timeout is not None and timeout or self.timeout
+ end_time = time.time()
+ if timeout is not None and timeout > 0:
+ end_time += timeout
+
+ while True:
+ # Try and create a hard link to it.
+ try:
+ os.link(self.unique_name, self.lock_file)
+ except OSError:
+ # Link creation failed. Maybe we've double-locked?
+ nlinks = os.stat(self.unique_name).st_nlink
+ if nlinks == 2:
+ # The original link plus the one I created == 2. We're
+ # good to go.
+ return
+ else:
+ # Otherwise the lock creation failed.
+ if timeout is not None and time.time() > end_time:
+ os.unlink(self.unique_name)
+ if timeout > 0:
+ raise LockTimeout("Timeout waiting to acquire"
+ " lock for %s" %
+ self.path)
+ else:
+ raise AlreadyLocked("%s is already locked" %
+ self.path)
+ time.sleep(timeout is not None and timeout/10 or 0.1)
+ else:
+ # Link creation succeeded. We're good to go.
+ return
+
+ def release(self):
+ if not self.is_locked():
+ raise NotLocked("%s is not locked" % self.path)
+ elif not os.path.exists(self.unique_name):
+ raise NotMyLock("%s is locked, but not by me" % self.path)
+ os.unlink(self.unique_name)
+ os.unlink(self.lock_file)
+
+ def is_locked(self):
+ return os.path.exists(self.lock_file)
+
+ def i_am_locking(self):
+ return (self.is_locked() and
+ os.path.exists(self.unique_name) and
+ os.stat(self.unique_name).st_nlink == 2)
+
+ def break_lock(self):
+ if os.path.exists(self.lock_file):
+ os.unlink(self.lock_file)
+
diff --git a/scripts/automation/trex_control_plane/python_lib/lockfile-0.10.2/lockfile/mkdirlockfile.py b/scripts/automation/trex_control_plane/python_lib/lockfile-0.10.2/lockfile/mkdirlockfile.py
new file mode 100755
index 00000000..8d2c801f
--- /dev/null
+++ b/scripts/automation/trex_control_plane/python_lib/lockfile-0.10.2/lockfile/mkdirlockfile.py
@@ -0,0 +1,83 @@
+from __future__ import absolute_import, division
+
+import time
+import os
+import sys
+import errno
+
+from . import (LockBase, LockFailed, NotLocked, NotMyLock, LockTimeout,
+ AlreadyLocked)
+
+class MkdirLockFile(LockBase):
+ """Lock file by creating a directory."""
+ def __init__(self, path, threaded=True, timeout=None):
+ """
+ >>> lock = MkdirLockFile('somefile')
+ >>> lock = MkdirLockFile('somefile', threaded=False)
+ """
+ LockBase.__init__(self, path, threaded, timeout)
+ # Lock file itself is a directory. Place the unique file name into
+ # it.
+ self.unique_name = os.path.join(self.lock_file,
+ "%s.%s%s" % (self.hostname,
+ self.tname,
+ self.pid))
+
+ def acquire(self, timeout=None):
+ timeout = timeout is not None and timeout or self.timeout
+ end_time = time.time()
+ if timeout is not None and timeout > 0:
+ end_time += timeout
+
+ if timeout is None:
+ wait = 0.1
+ else:
+ wait = max(0, timeout / 10)
+
+ while True:
+ try:
+ os.mkdir(self.lock_file)
+ except OSError:
+ err = sys.exc_info()[1]
+ if err.errno == errno.EEXIST:
+ # Already locked.
+ if os.path.exists(self.unique_name):
+ # Already locked by me.
+ return
+ if timeout is not None and time.time() > end_time:
+ if timeout > 0:
+ raise LockTimeout("Timeout waiting to acquire"
+ " lock for %s" %
+ self.path)
+ else:
+ # Someone else has the lock.
+ raise AlreadyLocked("%s is already locked" %
+ self.path)
+ time.sleep(wait)
+ else:
+ # Couldn't create the lock for some other reason
+ raise LockFailed("failed to create %s" % self.lock_file)
+ else:
+ open(self.unique_name, "wb").close()
+ return
+
+ def release(self):
+ if not self.is_locked():
+ raise NotLocked("%s is not locked" % self.path)
+ elif not os.path.exists(self.unique_name):
+ raise NotMyLock("%s is locked, but not by me" % self.path)
+ os.unlink(self.unique_name)
+ os.rmdir(self.lock_file)
+
+ def is_locked(self):
+ return os.path.exists(self.lock_file)
+
+ def i_am_locking(self):
+ return (self.is_locked() and
+ os.path.exists(self.unique_name))
+
+ def break_lock(self):
+ if os.path.exists(self.lock_file):
+ for name in os.listdir(self.lock_file):
+ os.unlink(os.path.join(self.lock_file, name))
+ os.rmdir(self.lock_file)
diff --git a/scripts/automation/trex_control_plane/python_lib/lockfile-0.10.2/lockfile/pidlockfile.py b/scripts/automation/trex_control_plane/python_lib/lockfile-0.10.2/lockfile/pidlockfile.py
new file mode 100755
index 00000000..e92f9ead
--- /dev/null
+++ b/scripts/automation/trex_control_plane/python_lib/lockfile-0.10.2/lockfile/pidlockfile.py
@@ -0,0 +1,193 @@
+# -*- coding: utf-8 -*-
+
+# pidlockfile.py
+#
+# Copyright © 2008–2009 Ben Finney <ben+python@benfinney.id.au>
+#
+# This is free software: you may copy, modify, and/or distribute this work
+# under the terms of the Python Software Foundation License, version 2 or
+# later as published by the Python Software Foundation.
+# No warranty expressed or implied. See the file LICENSE.PSF-2 for details.
+
+""" Lockfile behaviour implemented via Unix PID files.
+ """
+
+from __future__ import absolute_import
+
+import os
+import sys
+import errno
+import time
+
+from . import (LockBase, AlreadyLocked, LockFailed, NotLocked, NotMyLock,
+ LockTimeout)
+
+
+class PIDLockFile(LockBase):
+ """ Lockfile implemented as a Unix PID file.
+
+ The lock file is a normal file named by the attribute `path`.
+ A lock's PID file contains a single line of text, containing
+ the process ID (PID) of the process that acquired the lock.
+
+ >>> lock = PIDLockFile('somefile')
+ >>> lock = PIDLockFile('somefile')
+ """
+
+ def __init__(self, path, threaded=False, timeout=None):
+ # pid lockfiles don't support threaded operation, so always force
+ # False as the threaded arg.
+ LockBase.__init__(self, path, False, timeout)
+ dirname = os.path.dirname(self.lock_file)
+ basename = os.path.split(self.path)[-1]
+ self.unique_name = self.path
+
+ def read_pid(self):
+ """ Get the PID from the lock file.
+ """
+ return read_pid_from_pidfile(self.path)
+
+ def is_locked(self):
+ """ Test if the lock is currently held.
+
+ The lock is held if the PID file for this lock exists.
+
+ """
+ return os.path.exists(self.path)
+
+ def i_am_locking(self):
+ """ Test if the lock is held by the current process.
+
+ Returns ``True`` if the current process ID matches the
+ number stored in the PID file.
+ """
+ return self.is_locked() and os.getpid() == self.read_pid()
+
+ def acquire(self, timeout=None):
+ """ Acquire the lock.
+
+ Creates the PID file for this lock, or raises an error if
+ the lock could not be acquired.
+ """
+
+ timeout = timeout is not None and timeout or self.timeout
+ end_time = time.time()
+ if timeout is not None and timeout > 0:
+ end_time += timeout
+
+ while True:
+ try:
+ write_pid_to_pidfile(self.path)
+ except OSError as exc:
+ if exc.errno == errno.EEXIST:
+ # The lock creation failed. Maybe sleep a bit.
+ if timeout is not None and time.time() > end_time:
+ if timeout > 0:
+ raise LockTimeout("Timeout waiting to acquire"
+ " lock for %s" %
+ self.path)
+ else:
+ raise AlreadyLocked("%s is already locked" %
+ self.path)
+ time.sleep(timeout is not None and timeout/10 or 0.1)
+ else:
+ raise LockFailed("failed to create %s" % self.path)
+ else:
+ return
+
+ def release(self):
+ """ Release the lock.
+
+ Removes the PID file to release the lock, or raises an
+ error if the current process does not hold the lock.
+
+ """
+ if not self.is_locked():
+ raise NotLocked("%s is not locked" % self.path)
+ if not self.i_am_locking():
+ raise NotMyLock("%s is locked, but not by me" % self.path)
+ remove_existing_pidfile(self.path)
+
+ def break_lock(self):
+ """ Break an existing lock.
+
+ Removes the PID file if it already exists, otherwise does
+ nothing.
+
+ """
+ remove_existing_pidfile(self.path)
+
+def read_pid_from_pidfile(pidfile_path):
+ """ Read the PID recorded in the named PID file.
+
+ Read and return the numeric PID recorded as text in the named
+ PID file. If the PID file cannot be read, or if the content is
+ not a valid PID, return ``None``.
+
+ """
+ pid = None
+ try:
+ pidfile = open(pidfile_path, 'r')
+ except IOError:
+ pass
+ else:
+ # According to the FHS 2.3 section on PID files in /var/run:
+ #
+ # The file must consist of the process identifier in
+ # ASCII-encoded decimal, followed by a newline character.
+ #
+ # Programs that read PID files should be somewhat flexible
+ # in what they accept; i.e., they should ignore extra
+ # whitespace, leading zeroes, absence of the trailing
+ # newline, or additional lines in the PID file.
+
+ line = pidfile.readline().strip()
+ try:
+ pid = int(line)
+ except ValueError:
+ pass
+ pidfile.close()
+
+ return pid
+
+
+def write_pid_to_pidfile(pidfile_path):
+ """ Write the PID in the named PID file.
+
+ Get the numeric process ID (“PID”) of the current process
+ and write it to the named file as a line of text.
+
+ """
+ open_flags = (os.O_CREAT | os.O_EXCL | os.O_WRONLY)
+ open_mode = 0o644
+ pidfile_fd = os.open(pidfile_path, open_flags, open_mode)
+ pidfile = os.fdopen(pidfile_fd, 'w')
+
+ # According to the FHS 2.3 section on PID files in /var/run:
+ #
+ # The file must consist of the process identifier in
+ # ASCII-encoded decimal, followed by a newline character. For
+ # example, if crond was process number 25, /var/run/crond.pid
+ # would contain three characters: two, five, and newline.
+
+ pid = os.getpid()
+ line = "%(pid)d\n" % vars()
+ pidfile.write(line)
+ pidfile.close()
+
+
+def remove_existing_pidfile(pidfile_path):
+ """ Remove the named PID file if it exists.
+
+ Removing a PID file that doesn't already exist puts us in the
+ desired state, so we ignore the condition if the file does not
+ exist.
+
+ """
+ try:
+ os.remove(pidfile_path)
+ except OSError as exc:
+ if exc.errno == errno.ENOENT:
+ pass
+ else:
+ raise
diff --git a/scripts/automation/trex_control_plane/python_lib/lockfile-0.10.2/lockfile/sqlitelockfile.py b/scripts/automation/trex_control_plane/python_lib/lockfile-0.10.2/lockfile/sqlitelockfile.py
new file mode 100755
index 00000000..7dee4a85
--- /dev/null
+++ b/scripts/automation/trex_control_plane/python_lib/lockfile-0.10.2/lockfile/sqlitelockfile.py
@@ -0,0 +1,155 @@
+from __future__ import absolute_import, division
+
+import time
+import os
+
+try:
+ unicode
+except NameError:
+ unicode = str
+
+from . import LockBase, NotLocked, NotMyLock, LockTimeout, AlreadyLocked
+
+class SQLiteLockFile(LockBase):
+ "Demonstrate SQL-based locking."
+
+ testdb = None
+
+ def __init__(self, path, threaded=True, timeout=None):
+ """
+ >>> lock = SQLiteLockFile('somefile')
+ >>> lock = SQLiteLockFile('somefile', threaded=False)
+ """
+ LockBase.__init__(self, path, threaded, timeout)
+ self.lock_file = unicode(self.lock_file)
+ self.unique_name = unicode(self.unique_name)
+
+ if SQLiteLockFile.testdb is None:
+ import tempfile
+ _fd, testdb = tempfile.mkstemp()
+ os.close(_fd)
+ os.unlink(testdb)
+ del _fd, tempfile
+ SQLiteLockFile.testdb = testdb
+
+ import sqlite3
+ self.connection = sqlite3.connect(SQLiteLockFile.testdb)
+
+ c = self.connection.cursor()
+ try:
+ c.execute("create table locks"
+ "("
+ " lock_file varchar(32),"
+ " unique_name varchar(32)"
+ ")")
+ except sqlite3.OperationalError:
+ pass
+ else:
+ self.connection.commit()
+ import atexit
+ atexit.register(os.unlink, SQLiteLockFile.testdb)
+
+ def acquire(self, timeout=None):
+ timeout = timeout is not None and timeout or self.timeout
+ end_time = time.time()
+ if timeout is not None and timeout > 0:
+ end_time += timeout
+
+ if timeout is None:
+ wait = 0.1
+ elif timeout <= 0:
+ wait = 0
+ else:
+ wait = timeout / 10
+
+ cursor = self.connection.cursor()
+
+ while True:
+ if not self.is_locked():
+ # Not locked. Try to lock it.
+ cursor.execute("insert into locks"
+ " (lock_file, unique_name)"
+ " values"
+ " (?, ?)",
+ (self.lock_file, self.unique_name))
+ self.connection.commit()
+
+ # Check to see if we are the only lock holder.
+ cursor.execute("select * from locks"
+ " where unique_name = ?",
+ (self.unique_name,))
+ rows = cursor.fetchall()
+ if len(rows) > 1:
+ # Nope. Someone else got there. Remove our lock.
+ cursor.execute("delete from locks"
+ " where unique_name = ?",
+ (self.unique_name,))
+ self.connection.commit()
+ else:
+ # Yup. We're done, so go home.
+ return
+ else:
+ # Check to see if we are the only lock holder.
+ cursor.execute("select * from locks"
+ " where unique_name = ?",
+ (self.unique_name,))
+ rows = cursor.fetchall()
+ if len(rows) == 1:
+ # We're the locker, so go home.
+ return
+
+ # Maybe we should wait a bit longer.
+ if timeout is not None and time.time() > end_time:
+ if timeout > 0:
+ # No more waiting.
+ raise LockTimeout("Timeout waiting to acquire"
+ " lock for %s" %
+ self.path)
+ else:
+ # Someone else has the lock and we are impatient..
+ raise AlreadyLocked("%s is already locked" % self.path)
+
+ # Well, okay. We'll give it a bit longer.
+ time.sleep(wait)
+
+ def release(self):
+ if not self.is_locked():
+ raise NotLocked("%s is not locked" % self.path)
+ if not self.i_am_locking():
+ raise NotMyLock("%s is locked, but not by me (by %s)" %
+ (self.unique_name, self._who_is_locking()))
+ cursor = self.connection.cursor()
+ cursor.execute("delete from locks"
+ " where unique_name = ?",
+ (self.unique_name,))
+ self.connection.commit()
+
+ def _who_is_locking(self):
+ cursor = self.connection.cursor()
+ cursor.execute("select unique_name from locks"
+ " where lock_file = ?",
+ (self.lock_file,))
+ return cursor.fetchone()[0]
+
+ def is_locked(self):
+ cursor = self.connection.cursor()
+ cursor.execute("select * from locks"
+ " where lock_file = ?",
+ (self.lock_file,))
+ rows = cursor.fetchall()
+ return not not rows
+
+ def i_am_locking(self):
+ cursor = self.connection.cursor()
+ cursor.execute("select * from locks"
+ " where lock_file = ?"
+ " and unique_name = ?",
+ (self.lock_file, self.unique_name))
+ return not not cursor.fetchall()
+
+ def break_lock(self):
+ cursor = self.connection.cursor()
+ cursor.execute("delete from locks"
+ " where lock_file = ?",
+ (self.lock_file,))
+ self.connection.commit()
diff --git a/scripts/automation/trex_control_plane/python_lib/lockfile-0.10.2/lockfile/symlinklockfile.py b/scripts/automation/trex_control_plane/python_lib/lockfile-0.10.2/lockfile/symlinklockfile.py
new file mode 100755
index 00000000..57551a36
--- /dev/null
+++ b/scripts/automation/trex_control_plane/python_lib/lockfile-0.10.2/lockfile/symlinklockfile.py
@@ -0,0 +1,69 @@
+from __future__ import absolute_import
+
+import time
+import os
+
+from . import (LockBase, LockFailed, NotLocked, NotMyLock, LockTimeout,
+ AlreadyLocked)
+
+class SymlinkLockFile(LockBase):
+ """Lock access to a file using symlink(2)."""
+
+ def __init__(self, path, threaded=True, timeout=None):
+ # super(SymlinkLockFile).__init(...)
+ LockBase.__init__(self, path, threaded, timeout)
+ # split it back!
+ self.unique_name = os.path.split(self.unique_name)[1]
+
+ def acquire(self, timeout=None):
+ # Hopefully unnecessary for symlink.
+ #try:
+ # open(self.unique_name, "wb").close()
+ #except IOError:
+ # raise LockFailed("failed to create %s" % self.unique_name)
+ timeout = timeout is not None and timeout or self.timeout
+ end_time = time.time()
+ if timeout is not None and timeout > 0:
+ end_time += timeout
+
+ while True:
+ # Try and create a symbolic link to it.
+ try:
+ os.symlink(self.unique_name, self.lock_file)
+ except OSError:
+ # Link creation failed. Maybe we've double-locked?
+ if self.i_am_locking():
+ # Linked to out unique name. Proceed.
+ return
+ else:
+ # Otherwise the lock creation failed.
+ if timeout is not None and time.time() > end_time:
+ if timeout > 0:
+ raise LockTimeout("Timeout waiting to acquire"
+ " lock for %s" %
+ self.path)
+ else:
+ raise AlreadyLocked("%s is already locked" %
+ self.path)
+ time.sleep(timeout/10 if timeout is not None else 0.1)
+ else:
+ # Link creation succeeded. We're good to go.
+ return
+
+ def release(self):
+ if not self.is_locked():
+ raise NotLocked("%s is not locked" % self.path)
+ elif not self.i_am_locking():
+ raise NotMyLock("%s is locked, but not by me" % self.path)
+ os.unlink(self.lock_file)
+
+ def is_locked(self):
+ return os.path.islink(self.lock_file)
+
+ def i_am_locking(self):
+ return os.path.islink(self.lock_file) and \
+ os.readlink(self.lock_file) == self.unique_name
+
+ def break_lock(self):
+ if os.path.islink(self.lock_file): # exists && link
+ os.unlink(self.lock_file)
diff --git a/scripts/automation/trex_control_plane/python_lib/lockfile-0.10.2/setup.cfg b/scripts/automation/trex_control_plane/python_lib/lockfile-0.10.2/setup.cfg
new file mode 100755
index 00000000..c1fb3984
--- /dev/null
+++ b/scripts/automation/trex_control_plane/python_lib/lockfile-0.10.2/setup.cfg
@@ -0,0 +1,39 @@
+[metadata]
+name = lockfile
+summary = Platform-independent file locking module
+description-file =
+ README
+author = OpenStack
+author-email = openstack-dev@lists.openstack.org
+home-page = http://launchpad.net/pylockfile
+classifier =
+ Intended Audience :: Developers
+ License :: OSI Approved :: MIT License
+ Operating System :: POSIX :: Linux
+ Operating System :: MacOS
+ Operating System :: Microsoft :: Windows :: Windows NT/2000
+ Operating System :: POSIX
+ Programming Language :: Python
+ Programming Language :: Python :: 2
+ Programming Language :: Python :: 2.7
+ Programming Language :: Python :: 2.6
+ Programming Language :: Python :: 3
+ Programming Language :: Python :: 3.3
+ Topic :: Software Development :: Libraries :: Python Modules
+
+[files]
+packages = lockfile
+
+[pbr]
+warnerrors = true
+
+[build_sphinx]
+source-dir = doc/source
+build-dir = doc/build
+all_files = 1
+
+[egg_info]
+tag_build =
+tag_date = 0
+tag_svn_revision = 0
+
diff --git a/scripts/automation/trex_control_plane/python_lib/lockfile-0.10.2/setup.py b/scripts/automation/trex_control_plane/python_lib/lockfile-0.10.2/setup.py
new file mode 100755
index 00000000..73637574
--- /dev/null
+++ b/scripts/automation/trex_control_plane/python_lib/lockfile-0.10.2/setup.py
@@ -0,0 +1,30 @@
+#!/usr/bin/env python
+# Copyright (c) 2013 Hewlett-Packard Development Company, L.P.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# THIS FILE IS MANAGED BY THE GLOBAL REQUIREMENTS REPO - DO NOT EDIT
+import setuptools
+
+# In python < 2.7.4, a lazy loading of package `pbr` will break
+# setuptools if some other modules registered functions in `atexit`.
+# solution from: http://bugs.python.org/issue15881#msg170215
+try:
+ import multiprocessing # noqa
+except ImportError:
+ pass
+
+setuptools.setup(
+ setup_requires=['pbr'],
+ pbr=True)
diff --git a/scripts/automation/trex_control_plane/python_lib/lockfile-0.10.2/test-requirements.txt b/scripts/automation/trex_control_plane/python_lib/lockfile-0.10.2/test-requirements.txt
new file mode 100755
index 00000000..2e087ff1
--- /dev/null
+++ b/scripts/automation/trex_control_plane/python_lib/lockfile-0.10.2/test-requirements.txt
@@ -0,0 +1,2 @@
+nose
+sphinx>=1.1.2,!=1.2.0,<1.3
diff --git a/scripts/automation/trex_control_plane/python_lib/lockfile-0.10.2/test/compliancetest.py b/scripts/automation/trex_control_plane/python_lib/lockfile-0.10.2/test/compliancetest.py
new file mode 100755
index 00000000..e0258b11
--- /dev/null
+++ b/scripts/automation/trex_control_plane/python_lib/lockfile-0.10.2/test/compliancetest.py
@@ -0,0 +1,261 @@
+import os
+import threading
+import shutil
+
+import lockfile
+
+class ComplianceTest(object):
+ def __init__(self):
+ self.saved_class = lockfile.LockFile
+
+ def _testfile(self):
+ """Return platform-appropriate file. Helper for tests."""
+ import tempfile
+ return os.path.join(tempfile.gettempdir(), 'trash-%s' % os.getpid())
+
+ def setup(self):
+ lockfile.LockFile = self.class_to_test
+
+ def teardown(self):
+ try:
+ tf = self._testfile()
+ if os.path.isdir(tf):
+ shutil.rmtree(tf)
+ elif os.path.isfile(tf):
+ os.unlink(tf)
+ elif not os.path.exists(tf):
+ pass
+ else:
+ raise SystemError("unrecognized file: %s" % tf)
+ finally:
+ lockfile.LockFile = self.saved_class
+
+ def _test_acquire_helper(self, tbool):
+ # As simple as it gets.
+ lock = lockfile.LockFile(self._testfile(), threaded=tbool)
+ lock.acquire()
+ assert lock.i_am_locking()
+ lock.release()
+ assert not lock.is_locked()
+
+## def test_acquire_basic_threaded(self):
+## self._test_acquire_helper(True)
+
+ def test_acquire_basic_unthreaded(self):
+ self._test_acquire_helper(False)
+
+ def _test_acquire_no_timeout_helper(self, tbool):
+ # No timeout test
+ e1, e2 = threading.Event(), threading.Event()
+ t = _in_thread(self._lock_wait_unlock, e1, e2)
+ e1.wait() # wait for thread t to acquire lock
+ lock2 = lockfile.LockFile(self._testfile(), threaded=tbool)
+ assert lock2.is_locked()
+ if tbool:
+ assert not lock2.i_am_locking()
+ else:
+ assert lock2.i_am_locking()
+
+ try:
+ lock2.acquire(timeout=-1)
+ except lockfile.AlreadyLocked:
+ pass
+ else:
+ lock2.release()
+ raise AssertionError("did not raise AlreadyLocked in"
+ " thread %s" %
+ threading.current_thread().get_name())
+
+ try:
+ lock2.acquire(timeout=0)
+ except lockfile.AlreadyLocked:
+ pass
+ else:
+ lock2.release()
+ raise AssertionError("did not raise AlreadyLocked in"
+ " thread %s" %
+ threading.current_thread().get_name())
+
+ e2.set() # tell thread t to release lock
+ t.join()
+
+## def test_acquire_no_timeout_threaded(self):
+## self._test_acquire_no_timeout_helper(True)
+
+## def test_acquire_no_timeout_unthreaded(self):
+## self._test_acquire_no_timeout_helper(False)
+
+ def _test_acquire_timeout_helper(self, tbool):
+ # Timeout test
+ e1, e2 = threading.Event(), threading.Event()
+ t = _in_thread(self._lock_wait_unlock, e1, e2)
+ e1.wait() # wait for thread t to acquire lock
+ lock2 = lockfile.LockFile(self._testfile(), threaded=tbool)
+ assert lock2.is_locked()
+ try:
+ lock2.acquire(timeout=0.1)
+ except lockfile.LockTimeout:
+ pass
+ else:
+ lock2.release()
+ raise AssertionError("did not raise LockTimeout in thread %s" %
+ threading.current_thread().get_name())
+
+ e2.set()
+ t.join()
+
+ def test_acquire_timeout_threaded(self):
+ self._test_acquire_timeout_helper(True)
+
+ def test_acquire_timeout_unthreaded(self):
+ self._test_acquire_timeout_helper(False)
+
+ def _test_context_timeout_helper(self, tbool):
+ # Timeout test
+ e1, e2 = threading.Event(), threading.Event()
+ t = _in_thread(self._lock_wait_unlock, e1, e2)
+ e1.wait() # wait for thread t to acquire lock
+ lock2 = lockfile.LockFile(self._testfile(), threaded=tbool,
+ timeout=0.2)
+ assert lock2.is_locked()
+ try:
+ lock2.acquire()
+ except lockfile.LockTimeout:
+ pass
+ else:
+ lock2.release()
+ raise AssertionError("did not raise LockTimeout in thread %s" %
+ threading.current_thread().get_name())
+
+ e2.set()
+ t.join()
+
+ def test_context_timeout_unthreaded(self):
+ self._test_context_timeout_helper(False)
+
+ def _test_release_basic_helper(self, tbool):
+ lock = lockfile.LockFile(self._testfile(), threaded=tbool)
+ lock.acquire()
+ assert lock.is_locked()
+ lock.release()
+ assert not lock.is_locked()
+ assert not lock.i_am_locking()
+ try:
+ lock.release()
+ except lockfile.NotLocked:
+ pass
+ except lockfile.NotMyLock:
+ raise AssertionError('unexpected exception: %s' %
+ lockfile.NotMyLock)
+ else:
+ raise AssertionError('erroneously unlocked file')
+
+## def test_release_basic_threaded(self):
+## self._test_release_basic_helper(True)
+
+ def test_release_basic_unthreaded(self):
+ self._test_release_basic_helper(False)
+
+## def test_release_from_thread(self):
+## e1, e2 = threading.Event(), threading.Event()
+## t = _in_thread(self._lock_wait_unlock, e1, e2)
+## e1.wait()
+## lock2 = lockfile.LockFile(self._testfile(), threaded=False)
+## assert not lock2.i_am_locking()
+## try:
+## lock2.release()
+## except lockfile.NotMyLock:
+## pass
+## else:
+## raise AssertionError('erroneously unlocked a file locked'
+## ' by another thread.')
+## e2.set()
+## t.join()
+
+ def _test_is_locked_helper(self, tbool):
+ lock = lockfile.LockFile(self._testfile(), threaded=tbool)
+ lock.acquire(timeout=2)
+ assert lock.is_locked()
+ lock.release()
+ assert not lock.is_locked(), "still locked after release!"
+
+## def test_is_locked_threaded(self):
+## self._test_is_locked_helper(True)
+
+ def test_is_locked_unthreaded(self):
+ self._test_is_locked_helper(False)
+
+## def test_i_am_locking_threaded(self):
+## self._test_i_am_locking_helper(True)
+
+ def test_i_am_locking_unthreaded(self):
+ self._test_i_am_locking_helper(False)
+
+ def _test_i_am_locking_helper(self, tbool):
+ lock1 = lockfile.LockFile(self._testfile(), threaded=tbool)
+ assert not lock1.is_locked()
+ lock1.acquire()
+ try:
+ assert lock1.i_am_locking()
+ lock2 = lockfile.LockFile(self._testfile(), threaded=tbool)
+ assert lock2.is_locked()
+ if tbool:
+ assert not lock2.i_am_locking()
+ finally:
+ lock1.release()
+
+ def _test_break_lock_helper(self, tbool):
+ lock = lockfile.LockFile(self._testfile(), threaded=tbool)
+ lock.acquire()
+ assert lock.is_locked()
+ lock2 = lockfile.LockFile(self._testfile(), threaded=tbool)
+ assert lock2.is_locked()
+ lock2.break_lock()
+ assert not lock2.is_locked()
+ try:
+ lock.release()
+ except lockfile.NotLocked:
+ pass
+ else:
+ raise AssertionError('break lock failed')
+
+## def test_break_lock_threaded(self):
+## self._test_break_lock_helper(True)
+
+ def test_break_lock_unthreaded(self):
+ self._test_break_lock_helper(False)
+
+ def _lock_wait_unlock(self, event1, event2):
+ """Lock from another thread. Helper for tests."""
+ l = lockfile.LockFile(self._testfile())
+ l.acquire()
+ try:
+ event1.set() # we're in,
+ event2.wait() # wait for boss's permission to leave
+ finally:
+ l.release()
+
+ def test_enter(self):
+ lock = lockfile.LockFile(self._testfile())
+ lock.acquire()
+ try:
+ assert lock.is_locked(), "Not locked after acquire!"
+ finally:
+ lock.release()
+ assert not lock.is_locked(), "still locked after release!"
+
+ def test_decorator(self):
+ @lockfile.locked(self._testfile())
+ def func(a, b):
+ return a + b
+ assert func(4, 3) == 7
+
+def _in_thread(func, *args, **kwargs):
+ """Execute func(*args, **kwargs) after dt seconds. Helper for tests."""
+ def _f():
+ func(*args, **kwargs)
+ t = threading.Thread(target=_f, name='/*/*')
+ t.setDaemon(True)
+ t.start()
+ return t
+
diff --git a/scripts/automation/trex_control_plane/python_lib/lockfile-0.10.2/test/test_lockfile.py b/scripts/automation/trex_control_plane/python_lib/lockfile-0.10.2/test/test_lockfile.py
new file mode 100755
index 00000000..e1f4f72f
--- /dev/null
+++ b/scripts/automation/trex_control_plane/python_lib/lockfile-0.10.2/test/test_lockfile.py
@@ -0,0 +1,36 @@
+import sys
+
+import lockfile.linklockfile
+import lockfile.mkdirlockfile
+import lockfile.pidlockfile
+import lockfile.symlinklockfile
+
+from compliancetest import ComplianceTest
+
+class TestLinkLockFile(ComplianceTest):
+ class_to_test = lockfile.linklockfile.LinkLockFile
+
+class TestSymlinkLockFile(ComplianceTest):
+ class_to_test = lockfile.symlinklockfile.SymlinkLockFile
+
+class TestMkdirLockFile(ComplianceTest):
+ class_to_test = lockfile.mkdirlockfile.MkdirLockFile
+
+class TestPIDLockFile(ComplianceTest):
+ class_to_test = lockfile.pidlockfile.PIDLockFile
+
+# Check backwards compatibility
+class TestLinkFileLock(ComplianceTest):
+ class_to_test = lockfile.LinkFileLock
+
+class TestMkdirFileLock(ComplianceTest):
+ class_to_test = lockfile.MkdirFileLock
+
+try:
+ import sqlite3
+except ImportError:
+ pass
+else:
+ import lockfile.sqlitelockfile
+ class TestSQLiteLockFile(ComplianceTest):
+ class_to_test = lockfile.sqlitelockfile.SQLiteLockFile
diff --git a/scripts/automation/trex_control_plane/python_lib/lockfile-0.10.2/tox.ini b/scripts/automation/trex_control_plane/python_lib/lockfile-0.10.2/tox.ini
new file mode 100755
index 00000000..b0a868a3
--- /dev/null
+++ b/scripts/automation/trex_control_plane/python_lib/lockfile-0.10.2/tox.ini
@@ -0,0 +1,28 @@
+# content of: tox.ini , put in same dir as setup.py
+[tox]
+envlist = py26,py27,py32,py33,py34
+
+[testenv]
+deps = -r{toxinidir}/test-requirements.txt
+commands=nosetests
+
+[testenv:venv]
+commands = {posargs}
+
+[testenv:pep8]
+deps = flake8
+commands = flake8
+
+[testenv:docs]
+commands = python setup.py build_sphinx
+
+[testenv:cover]
+deps = {[testenv]deps}
+ coverage
+commands =
+ nosetests --with-coverage --cover-erase --cover-package=lockfile --cover-inclusive []
+
+[flake8]
+ignore = E121,E123,E128,E221,E226,E261,E265,E301,E302,E713,F401,F841,W291,W293,W391
+exclude=.venv,.git,.tox,dist,doc
+show-source = True
diff --git a/scripts/automation/trex_control_plane/python_lib/python-daemon-2.0.5/ChangeLog b/scripts/automation/trex_control_plane/python_lib/python-daemon-2.0.5/ChangeLog
new file mode 100755
index 00000000..4975f781
--- /dev/null
+++ b/scripts/automation/trex_control_plane/python_lib/python-daemon-2.0.5/ChangeLog
@@ -0,0 +1,380 @@
+Version 2.0.5
+=============
+
+:Released: 2015-02-02
+:Maintainer: Ben Finney <ben+python@benfinney.id.au>
+
+* Refine compatibility of exceptions for file operations.
+* Specify the text encoding when opening the changelog file.
+
+
+Version 2.0.4
+=============
+
+:Released: 2015-01-23
+:Maintainer: Ben Finney <ben+python@benfinney.id.au>
+
+* Record version info via Setuptools commands.
+* Remove the custom Setuptools entry points.
+ This closes Alioth bug#314948.
+
+
+Version 2.0.3
+=============
+
+:Released: 2015-01-14
+:Maintainer: Ben Finney <ben+python@benfinney.id.au>
+
+* Break circular import dependency for ‘setup.py’.
+* Refactor all initial metadata functionality to ‘daemon._metadata’.
+* Distribute ‘version’ (and its tests) only in source, not install.
+* Build a “universal” (Python 2 and Python 3) wheel.
+
+
+Version 2.0.2
+=============
+
+:Released: 2015-01-13
+:Maintainer: Ben Finney <ben+python@benfinney.id.au>
+
+* Declare test-time dependency on recent ‘unittest2’.
+* Declare packaging-time dependency on ‘docutils’ library.
+* Include unit tests for ‘version’ module with source distribution.
+* Record version info consistent with distribution metadata.
+
+
+Version 2.0.1
+=============
+
+:Released: 2015-01-11
+:Maintainer: Ben Finney <ben+python@benfinney.id.au>
+
+* Include the ‘version’ module with source distribution.
+
+
+Version 2.0
+===========
+
+:Released: 2015-01-10
+:Maintainer: Ben Finney <ben+python@benfinney.id.au>
+
+* Support both Python 3 (version 3.2 or later) and Python 2 (version
+ 2.7 or later).
+* Document the API of all functions comprehensively in docstrings.
+* Add a hacking guide for developers.
+* Add explicit credit for contributors.
+* Document the security impact of the default umask.
+
+* Specify explicit text or binary mode when opening files.
+* Preserve exception context in custom exceptions.
+
+* Declare compatibility with current Python versions.
+* Depend on Python 3 compatible libraries.
+* Update package homepage to Alioth hosted project page.
+* Use ‘pydoc.splitdoc’ to get package description text.
+* Remove ASCII translation of package description, not needed now the
+ docstring is a proper Unicode text value.
+* Include test suite with source distribution.
+* Move package metadata to ‘daemon/_metadata.py’.
+* Migrate to JSON (instead of Python) for serialised version info.
+* Add unit tests for metadata.
+* Store and retrieve version info in Setuptools metadata.
+
+* Migrate to ‘str.format’ for interpolation of values into text.
+* Migrate to ‘mock’ library for mock objects in tests.
+* Migrate to ‘testscenarios’ library for unit test scenarios.
+* Migrate to ‘unittest2’ library for back-ported improvements.
+ Remove custom test suite creation.
+* Discriminate Python 2-and-3 compatible usage of dict methods.
+* Discriminate Python 2-and-3 compatible bytes versus text.
+* Declare explicit absolute and relative imports.
+* Discriminate between different ‘fileno’ method behaviours.
+ In Python 3, ‘StringIO.fileno’ is callable but raises an exception.
+* Migrate to built-in ‘next’ function.
+* Wrap the ‘fromlist’ parameter of ‘__import__’ for Python 3
+ compatibility.
+* Wrap function introspection for Python 3 compatibility.
+* Wrap standard library imports where names changed in Python 3.
+
+
+Version 1.6.1
+=============
+
+:Released: 2014-08-04
+:Maintainer: Ben Finney <ben+python@benfinney.id.au>
+
+* Use unambiguous “except FooType as foo” syntax.
+ This is to ease the port to Python 3, where the ambiguous comma
+ usage is an error.
+* Ensure a ‘basestring’ name bound to the base type for strings.
+ This is to allow checks to work on Python 2 and 3.
+* Specify versions of Python supported, as trove classifiers.
+
+* Update copyright notices.
+* Add editor hints for most files.
+* Distinguish continuation-line indentation versus block indentation.
+
+* Use unicode literals by default, specifying bytes where necessary.
+ This is to ease the port to Python 3, where the default string type
+ is unicode.
+* Update copyright notices.
+* Update the GPL license file to version 3, as declared in our
+ copyright notices.
+
+* Change license of library code to Apache License 2.0. Rationale at
+ <URL:http://wiki.python.org/moin/PythonSoftwareFoundationLicenseFaq#Contributing_Code_to_Python>.
+
+
+Version 1.6
+===========
+
+:Released: 2010-05-10
+:Maintainer: Ben Finney <ben+python@benfinney.id.au>
+
+* Use absolute imports to disambiguate provenance of names.
+* setup.py: Require ‘lockfile >=0.9’.
+* daemon/pidfile.py: Renamed from ‘daemon/pidlockfile.py’. Change
+ references elsewhere to use this new name.
+* test/test_pidfile.py: Renamed from ‘test/test_pidlockfile.py’.
+ Change references elsewhere to use this new name.
+* daemon/pidfile.py: Remove functionality now migrated to ‘lockfile’
+ library.
+
+* FAQ: Add some entries and re-structure the document.
+
+* Use ‘unicode’ data type for all text values.
+* Prepare for Python 3 upgrade by tweaking some names and imports.
+
+* MANIFEST.in: Include the documentation in the distribution.
+
+
+Version 1.5.5
+=============
+
+:Released: 2010-03-02
+:Maintainer: Ben Finney <ben+python@benfinney.id.au>
+
+* Stop using ‘pkg_resources’ and revert to pre-1.5.3 version-string
+ handling, until a better way that doesn't break everyone else's
+ installation can be found.
+
+
+Version 1.5.4
+=============
+
+:Released: 2010-02-27
+:Maintainer: Ben Finney <ben+python@benfinney.id.au>
+
+* MANIFEST.in: Explicitly include version data file, otherwise
+ everything breaks for users of the sdist.
+
+
+Version 1.5.3
+=============
+
+:Released: 2010-02-26
+:Maintainer: Ben Finney <ben+python@benfinney.id.au>
+
+* daemon/daemon.py: Invoke the pidfile context manager's ‘__exit__’
+ method with the correct arguments (as per
+ <URL:http://docs.python.org/library/stdtypes.html#typecontextmanager>).
+ Thanks to Ludvig Ericson for the bug report.
+* version: New plain-text data file to store project version string.
+* setup.py: Read version string from data file.
+* daemon/version/__init__.py: Query version string with ‘pkg_resources’.
+
+* Add ‘pylint’ configuration for this project.
+* Update copyright notices.
+
+
+Version 1.5.2
+=============
+
+:Released: 2009-10-24
+:Maintainer: Ben Finney <ben+python@benfinney.id.au>
+
+* Ensure we only prevent core dumps if ‘prevent_core’ is true.
+ Thanks to Denis Bilenko for reporting the lacking implementation of
+ this documented option.
+
+* Add initial Frequently Asked Questions document.
+
+
+Version 1.5.1
+=============
+
+:Released: 2009-09-26
+:Maintainer: Ben Finney <ben+python@benfinney.id.au>
+
+* Make a separate collection of DaemonRunner test scenarios.
+* Handle a start request with a timeout on the PID file lock acquire.
+
+* Implement ‘TimeoutPIDLockFile’ to specify a timeout in advance of
+ lock acquisition.
+* Use lock with timeout for ‘DaemonRunner’.
+
+
+Version 1.5
+===========
+
+:Released: 2009-09-24
+:Maintainer: Ben Finney <ben+python@benfinney.id.au>
+
+* Make a separate collection of PIDLockFile test scenarios.
+
+* Raise specific errors on ‘DaemonRunner’ failures.
+* Distinguish different conditions on reading and parsing PID file.
+* Refactor code to ‘_terminate_daemon_process’ method.
+* Improve explanations in comments and docstrings.
+* Don't set pidfile at all if no path specified to constructor.
+* Write the PID file using correct OS locking and permissions.
+* Close the PID file after writing.
+* Implement ‘PIDLockFile’ as subclass of ‘lockfile.LinkFileLock’.
+* Remove redundant checks for file existence.
+
+* Manage the excluded file descriptors as a set (not a list).
+* Only inspect the file descriptor of streams if they actually have
+ one (via a ‘fileno’ method) when determining which file descriptors
+ to close. Thanks to Ask Solem for revealing this bug.
+
+
+Version 1.4.8
+=============
+
+:Released: 2009-09-17
+:Maintainer: Ben Finney <ben+python@benfinney.id.au>
+
+* Remove child-exit signal (‘SIGCLD’, ‘SIGCHLD’) from default signal
+ map. Thanks to Joel Martin for pinpointing this issue.
+* Document requirement for ensuring any operating-system specific
+ signal handlers are considered.
+* Refactor ‘fork_then_exit_parent’ functionality to avoid duplicate
+ code.
+* Remove redundant imports.
+* Remove unused code from unit test suite scaffold.
+* Add specific license terms for unit test suite scaffold.
+
+
+Version 1.4.7
+=============
+
+:Released: 2009-09-03
+:Maintainer: Ben Finney <ben+python@benfinney.id.au>
+
+* Fix keywords argument for distribution setup.
+* Exclude ‘test’ package from distribution installation.
+
+
+Version 1.4.6
+=============
+
+:Released: 2009-06-21
+:Maintainer: Ben Finney <ben+python@benfinney.id.au>
+
+* Update documentation for changes from latest PEP 3143 revision.
+* Implement DaemonContext.is_open method.
+
+
+Version 1.4.5
+=============
+
+:Released: 2009-05-17
+:Maintainer: Ben Finney <ben+python@benfinney.id.au>
+
+* Register DaemonContext.close method for atexit processing.
+* Move PID file cleanup to close method.
+* Improve docstrings by reference to, and copy from, PEP 3143.
+* Use mock checking capabilities of newer ‘MiniMock’ library.
+* Automate building a versioned distribution tarball.
+* Include developer documentation files in source distribution.
+
+
+Version 1.4.4
+=============
+
+:Released: 2009-03-26
+:Maintainer: Ben Finney <ben+python@benfinney.id.au>
+
+* Conform to current PEP version, now released as PEP 3143 “Standard
+ daemon process library”.
+* Ensure UID and GID are set in correct order.
+* Delay closing all open files until just before re-binding standard
+ streams.
+* Redirect standard streams to null device by default.
+
+
+Version 1.4.3
+=============
+
+:Released: 2009-03-19
+:Maintainer: Ben Finney <ben+python@benfinney.id.au>
+
+* Close the PID file context on exit.
+
+
+Version 1.4.2
+=============
+
+:Released: 2009-03-18
+:Maintainer: Ben Finney <ben+python@benfinney.id.au>
+
+* Context manager methods for DaemonContext.
+
+
+Version 1.4.1
+=============
+
+:Released: 2009-03-18
+:Maintainer: Ben Finney <ben+python@benfinney.id.au>
+
+* Improvements to docstrings.
+* Further conformance with draft PEP.
+
+
+Version 1.4
+===========
+
+:Released: 2009-03-17
+:Maintainer: Ben Finney <ben+python@benfinney.id.au>
+
+* Implement the interface from a draft PEP for process daemonisation.
+* Complete statement coverage from unit test suite.
+
+
+Version 1.3
+===========
+
+:Released: 2009-03-12
+:Maintainer: Ben Finney <ben+python@benfinney.id.au>
+
+* Separate controller (now ‘DaemonRunner’) from daemon process
+ context (now ‘DaemonContext’).
+* Fix many corner cases and bugs.
+* Huge increase in unit test suite.
+
+
+Version 1.2
+===========
+
+:Released: 2009-01-27
+:Maintainer: Ben Finney <ben+python@benfinney.id.au>
+
+* Initial release of this project forked from ‘bda.daemon’. Thanks,
+ Robert Niederreiter.
+* Refactor some functionality out to helper functions.
+* Begin unit test suite.
+
+
+..
+ This is free software: you may copy, modify, and/or distribute this work
+ under the terms of the Apache License version 2.0 as published by the
+ Apache Software Foundation.
+ No warranty expressed or implied. See the file ‘LICENSE.ASF-2’ for details.
+
+..
+ Local variables:
+ coding: utf-8
+ mode: text
+ mode: rst
+ End:
+ vim: fileencoding=utf-8 filetype=rst :
diff --git a/scripts/automation/trex_control_plane/python_lib/python-daemon-2.0.5/LICENSE.ASF-2 b/scripts/automation/trex_control_plane/python_lib/python-daemon-2.0.5/LICENSE.ASF-2
new file mode 100755
index 00000000..d6456956
--- /dev/null
+++ b/scripts/automation/trex_control_plane/python_lib/python-daemon-2.0.5/LICENSE.ASF-2
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/scripts/automation/trex_control_plane/python_lib/python-daemon-2.0.5/LICENSE.GPL-3 b/scripts/automation/trex_control_plane/python_lib/python-daemon-2.0.5/LICENSE.GPL-3
new file mode 100755
index 00000000..94a9ed02
--- /dev/null
+++ b/scripts/automation/trex_control_plane/python_lib/python-daemon-2.0.5/LICENSE.GPL-3
@@ -0,0 +1,674 @@
+ GNU GENERAL PUBLIC LICENSE
+ Version 3, 29 June 2007
+
+ Copyright (C) 2007 Free Software Foundation, Inc. <http://fsf.org/>
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+ Preamble
+
+ The GNU General Public License is a free, copyleft license for
+software and other kinds of works.
+
+ The licenses for most software and other practical works are designed
+to take away your freedom to share and change the works. By contrast,
+the GNU General Public License is intended to guarantee your freedom to
+share and change all versions of a program--to make sure it remains free
+software for all its users. We, the Free Software Foundation, use the
+GNU General Public License for most of our software; it applies also to
+any other work released this way by its authors. You can apply it to
+your programs, too.
+
+ When we speak of free software, we are referring to freedom, not
+price. Our General Public Licenses are designed to make sure that you
+have the freedom to distribute copies of free software (and charge for
+them if you wish), that you receive source code or can get it if you
+want it, that you can change the software or use pieces of it in new
+free programs, and that you know you can do these things.
+
+ To protect your rights, we need to prevent others from denying you
+these rights or asking you to surrender the rights. Therefore, you have
+certain responsibilities if you distribute copies of the software, or if
+you modify it: responsibilities to respect the freedom of others.
+
+ For example, if you distribute copies of such a program, whether
+gratis or for a fee, you must pass on to the recipients the same
+freedoms that you received. You must make sure that they, too, receive
+or can get the source code. And you must show them these terms so they
+know their rights.
+
+ Developers that use the GNU GPL protect your rights with two steps:
+(1) assert copyright on the software, and (2) offer you this License
+giving you legal permission to copy, distribute and/or modify it.
+
+ For the developers' and authors' protection, the GPL clearly explains
+that there is no warranty for this free software. For both users' and
+authors' sake, the GPL requires that modified versions be marked as
+changed, so that their problems will not be attributed erroneously to
+authors of previous versions.
+
+ Some devices are designed to deny users access to install or run
+modified versions of the software inside them, although the manufacturer
+can do so. This is fundamentally incompatible with the aim of
+protecting users' freedom to change the software. The systematic
+pattern of such abuse occurs in the area of products for individuals to
+use, which is precisely where it is most unacceptable. Therefore, we
+have designed this version of the GPL to prohibit the practice for those
+products. If such problems arise substantially in other domains, we
+stand ready to extend this provision to those domains in future versions
+of the GPL, as needed to protect the freedom of users.
+
+ Finally, every program is threatened constantly by software patents.
+States should not allow patents to restrict development and use of
+software on general-purpose computers, but in those that do, we wish to
+avoid the special danger that patents applied to a free program could
+make it effectively proprietary. To prevent this, the GPL assures that
+patents cannot be used to render the program non-free.
+
+ The precise terms and conditions for copying, distribution and
+modification follow.
+
+ TERMS AND CONDITIONS
+
+ 0. Definitions.
+
+ "This License" refers to version 3 of the GNU General Public License.
+
+ "Copyright" also means copyright-like laws that apply to other kinds of
+works, such as semiconductor masks.
+
+ "The Program" refers to any copyrightable work licensed under this
+License. Each licensee is addressed as "you". "Licensees" and
+"recipients" may be individuals or organizations.
+
+ To "modify" a work means to copy from or adapt all or part of the work
+in a fashion requiring copyright permission, other than the making of an
+exact copy. The resulting work is called a "modified version" of the
+earlier work or a work "based on" the earlier work.
+
+ A "covered work" means either the unmodified Program or a work based
+on the Program.
+
+ To "propagate" a work means to do anything with it that, without
+permission, would make you directly or secondarily liable for
+infringement under applicable copyright law, except executing it on a
+computer or modifying a private copy. Propagation includes copying,
+distribution (with or without modification), making available to the
+public, and in some countries other activities as well.
+
+ To "convey" a work means any kind of propagation that enables other
+parties to make or receive copies. Mere interaction with a user through
+a computer network, with no transfer of a copy, is not conveying.
+
+ An interactive user interface displays "Appropriate Legal Notices"
+to the extent that it includes a convenient and prominently visible
+feature that (1) displays an appropriate copyright notice, and (2)
+tells the user that there is no warranty for the work (except to the
+extent that warranties are provided), that licensees may convey the
+work under this License, and how to view a copy of this License. If
+the interface presents a list of user commands or options, such as a
+menu, a prominent item in the list meets this criterion.
+
+ 1. Source Code.
+
+ The "source code" for a work means the preferred form of the work
+for making modifications to it. "Object code" means any non-source
+form of a work.
+
+ A "Standard Interface" means an interface that either is an official
+standard defined by a recognized standards body, or, in the case of
+interfaces specified for a particular programming language, one that
+is widely used among developers working in that language.
+
+ The "System Libraries" of an executable work include anything, other
+than the work as a whole, that (a) is included in the normal form of
+packaging a Major Component, but which is not part of that Major
+Component, and (b) serves only to enable use of the work with that
+Major Component, or to implement a Standard Interface for which an
+implementation is available to the public in source code form. A
+"Major Component", in this context, means a major essential component
+(kernel, window system, and so on) of the specific operating system
+(if any) on which the executable work runs, or a compiler used to
+produce the work, or an object code interpreter used to run it.
+
+ The "Corresponding Source" for a work in object code form means all
+the source code needed to generate, install, and (for an executable
+work) run the object code and to modify the work, including scripts to
+control those activities. However, it does not include the work's
+System Libraries, or general-purpose tools or generally available free
+programs which are used unmodified in performing those activities but
+which are not part of the work. For example, Corresponding Source
+includes interface definition files associated with source files for
+the work, and the source code for shared libraries and dynamically
+linked subprograms that the work is specifically designed to require,
+such as by intimate data communication or control flow between those
+subprograms and other parts of the work.
+
+ The Corresponding Source need not include anything that users
+can regenerate automatically from other parts of the Corresponding
+Source.
+
+ The Corresponding Source for a work in source code form is that
+same work.
+
+ 2. Basic Permissions.
+
+ All rights granted under this License are granted for the term of
+copyright on the Program, and are irrevocable provided the stated
+conditions are met. This License explicitly affirms your unlimited
+permission to run the unmodified Program. The output from running a
+covered work is covered by this License only if the output, given its
+content, constitutes a covered work. This License acknowledges your
+rights of fair use or other equivalent, as provided by copyright law.
+
+ You may make, run and propagate covered works that you do not
+convey, without conditions so long as your license otherwise remains
+in force. You may convey covered works to others for the sole purpose
+of having them make modifications exclusively for you, or provide you
+with facilities for running those works, provided that you comply with
+the terms of this License in conveying all material for which you do
+not control copyright. Those thus making or running the covered works
+for you must do so exclusively on your behalf, under your direction
+and control, on terms that prohibit them from making any copies of
+your copyrighted material outside their relationship with you.
+
+ Conveying under any other circumstances is permitted solely under
+the conditions stated below. Sublicensing is not allowed; section 10
+makes it unnecessary.
+
+ 3. Protecting Users' Legal Rights From Anti-Circumvention Law.
+
+ No covered work shall be deemed part of an effective technological
+measure under any applicable law fulfilling obligations under article
+11 of the WIPO copyright treaty adopted on 20 December 1996, or
+similar laws prohibiting or restricting circumvention of such
+measures.
+
+ When you convey a covered work, you waive any legal power to forbid
+circumvention of technological measures to the extent such circumvention
+is effected by exercising rights under this License with respect to
+the covered work, and you disclaim any intention to limit operation or
+modification of the work as a means of enforcing, against the work's
+users, your or third parties' legal rights to forbid circumvention of
+technological measures.
+
+ 4. Conveying Verbatim Copies.
+
+ You may convey verbatim copies of the Program's source code as you
+receive it, in any medium, provided that you conspicuously and
+appropriately publish on each copy an appropriate copyright notice;
+keep intact all notices stating that this License and any
+non-permissive terms added in accord with section 7 apply to the code;
+keep intact all notices of the absence of any warranty; and give all
+recipients a copy of this License along with the Program.
+
+ You may charge any price or no price for each copy that you convey,
+and you may offer support or warranty protection for a fee.
+
+ 5. Conveying Modified Source Versions.
+
+ You may convey a work based on the Program, or the modifications to
+produce it from the Program, in the form of source code under the
+terms of section 4, provided that you also meet all of these conditions:
+
+ a) The work must carry prominent notices stating that you modified
+ it, and giving a relevant date.
+
+ b) The work must carry prominent notices stating that it is
+ released under this License and any conditions added under section
+ 7. This requirement modifies the requirement in section 4 to
+ "keep intact all notices".
+
+ c) You must license the entire work, as a whole, under this
+ License to anyone who comes into possession of a copy. This
+ License will therefore apply, along with any applicable section 7
+ additional terms, to the whole of the work, and all its parts,
+ regardless of how they are packaged. This License gives no
+ permission to license the work in any other way, but it does not
+ invalidate such permission if you have separately received it.
+
+ d) If the work has interactive user interfaces, each must display
+ Appropriate Legal Notices; however, if the Program has interactive
+ interfaces that do not display Appropriate Legal Notices, your
+ work need not make them do so.
+
+ A compilation of a covered work with other separate and independent
+works, which are not by their nature extensions of the covered work,
+and which are not combined with it such as to form a larger program,
+in or on a volume of a storage or distribution medium, is called an
+"aggregate" if the compilation and its resulting copyright are not
+used to limit the access or legal rights of the compilation's users
+beyond what the individual works permit. Inclusion of a covered work
+in an aggregate does not cause this License to apply to the other
+parts of the aggregate.
+
+ 6. Conveying Non-Source Forms.
+
+ You may convey a covered work in object code form under the terms
+of sections 4 and 5, provided that you also convey the
+machine-readable Corresponding Source under the terms of this License,
+in one of these ways:
+
+ a) Convey the object code in, or embodied in, a physical product
+ (including a physical distribution medium), accompanied by the
+ Corresponding Source fixed on a durable physical medium
+ customarily used for software interchange.
+
+ b) Convey the object code in, or embodied in, a physical product
+ (including a physical distribution medium), accompanied by a
+ written offer, valid for at least three years and valid for as
+ long as you offer spare parts or customer support for that product
+ model, to give anyone who possesses the object code either (1) a
+ copy of the Corresponding Source for all the software in the
+ product that is covered by this License, on a durable physical
+ medium customarily used for software interchange, for a price no
+ more than your reasonable cost of physically performing this
+ conveying of source, or (2) access to copy the
+ Corresponding Source from a network server at no charge.
+
+ c) Convey individual copies of the object code with a copy of the
+ written offer to provide the Corresponding Source. This
+ alternative is allowed only occasionally and noncommercially, and
+ only if you received the object code with such an offer, in accord
+ with subsection 6b.
+
+ d) Convey the object code by offering access from a designated
+ place (gratis or for a charge), and offer equivalent access to the
+ Corresponding Source in the same way through the same place at no
+ further charge. You need not require recipients to copy the
+ Corresponding Source along with the object code. If the place to
+ copy the object code is a network server, the Corresponding Source
+ may be on a different server (operated by you or a third party)
+ that supports equivalent copying facilities, provided you maintain
+ clear directions next to the object code saying where to find the
+ Corresponding Source. Regardless of what server hosts the
+ Corresponding Source, you remain obligated to ensure that it is
+ available for as long as needed to satisfy these requirements.
+
+ e) Convey the object code using peer-to-peer transmission, provided
+ you inform other peers where the object code and Corresponding
+ Source of the work are being offered to the general public at no
+ charge under subsection 6d.
+
+ A separable portion of the object code, whose source code is excluded
+from the Corresponding Source as a System Library, need not be
+included in conveying the object code work.
+
+ A "User Product" is either (1) a "consumer product", which means any
+tangible personal property which is normally used for personal, family,
+or household purposes, or (2) anything designed or sold for incorporation
+into a dwelling. In determining whether a product is a consumer product,
+doubtful cases shall be resolved in favor of coverage. For a particular
+product received by a particular user, "normally used" refers to a
+typical or common use of that class of product, regardless of the status
+of the particular user or of the way in which the particular user
+actually uses, or expects or is expected to use, the product. A product
+is a consumer product regardless of whether the product has substantial
+commercial, industrial or non-consumer uses, unless such uses represent
+the only significant mode of use of the product.
+
+ "Installation Information" for a User Product means any methods,
+procedures, authorization keys, or other information required to install
+and execute modified versions of a covered work in that User Product from
+a modified version of its Corresponding Source. The information must
+suffice to ensure that the continued functioning of the modified object
+code is in no case prevented or interfered with solely because
+modification has been made.
+
+ If you convey an object code work under this section in, or with, or
+specifically for use in, a User Product, and the conveying occurs as
+part of a transaction in which the right of possession and use of the
+User Product is transferred to the recipient in perpetuity or for a
+fixed term (regardless of how the transaction is characterized), the
+Corresponding Source conveyed under this section must be accompanied
+by the Installation Information. But this requirement does not apply
+if neither you nor any third party retains the ability to install
+modified object code on the User Product (for example, the work has
+been installed in ROM).
+
+ The requirement to provide Installation Information does not include a
+requirement to continue to provide support service, warranty, or updates
+for a work that has been modified or installed by the recipient, or for
+the User Product in which it has been modified or installed. Access to a
+network may be denied when the modification itself materially and
+adversely affects the operation of the network or violates the rules and
+protocols for communication across the network.
+
+ Corresponding Source conveyed, and Installation Information provided,
+in accord with this section must be in a format that is publicly
+documented (and with an implementation available to the public in
+source code form), and must require no special password or key for
+unpacking, reading or copying.
+
+ 7. Additional Terms.
+
+ "Additional permissions" are terms that supplement the terms of this
+License by making exceptions from one or more of its conditions.
+Additional permissions that are applicable to the entire Program shall
+be treated as though they were included in this License, to the extent
+that they are valid under applicable law. If additional permissions
+apply only to part of the Program, that part may be used separately
+under those permissions, but the entire Program remains governed by
+this License without regard to the additional permissions.
+
+ When you convey a copy of a covered work, you may at your option
+remove any additional permissions from that copy, or from any part of
+it. (Additional permissions may be written to require their own
+removal in certain cases when you modify the work.) You may place
+additional permissions on material, added by you to a covered work,
+for which you have or can give appropriate copyright permission.
+
+ Notwithstanding any other provision of this License, for material you
+add to a covered work, you may (if authorized by the copyright holders of
+that material) supplement the terms of this License with terms:
+
+ a) Disclaiming warranty or limiting liability differently from the
+ terms of sections 15 and 16 of this License; or
+
+ b) Requiring preservation of specified reasonable legal notices or
+ author attributions in that material or in the Appropriate Legal
+ Notices displayed by works containing it; or
+
+ c) Prohibiting misrepresentation of the origin of that material, or
+ requiring that modified versions of such material be marked in
+ reasonable ways as different from the original version; or
+
+ d) Limiting the use for publicity purposes of names of licensors or
+ authors of the material; or
+
+ e) Declining to grant rights under trademark law for use of some
+ trade names, trademarks, or service marks; or
+
+ f) Requiring indemnification of licensors and authors of that
+ material by anyone who conveys the material (or modified versions of
+ it) with contractual assumptions of liability to the recipient, for
+ any liability that these contractual assumptions directly impose on
+ those licensors and authors.
+
+ All other non-permissive additional terms are considered "further
+restrictions" within the meaning of section 10. If the Program as you
+received it, or any part of it, contains a notice stating that it is
+governed by this License along with a term that is a further
+restriction, you may remove that term. If a license document contains
+a further restriction but permits relicensing or conveying under this
+License, you may add to a covered work material governed by the terms
+of that license document, provided that the further restriction does
+not survive such relicensing or conveying.
+
+ If you add terms to a covered work in accord with this section, you
+must place, in the relevant source files, a statement of the
+additional terms that apply to those files, or a notice indicating
+where to find the applicable terms.
+
+ Additional terms, permissive or non-permissive, may be stated in the
+form of a separately written license, or stated as exceptions;
+the above requirements apply either way.
+
+ 8. Termination.
+
+ You may not propagate or modify a covered work except as expressly
+provided under this License. Any attempt otherwise to propagate or
+modify it is void, and will automatically terminate your rights under
+this License (including any patent licenses granted under the third
+paragraph of section 11).
+
+ However, if you cease all violation of this License, then your
+license from a particular copyright holder is reinstated (a)
+provisionally, unless and until the copyright holder explicitly and
+finally terminates your license, and (b) permanently, if the copyright
+holder fails to notify you of the violation by some reasonable means
+prior to 60 days after the cessation.
+
+ Moreover, your license from a particular copyright holder is
+reinstated permanently if the copyright holder notifies you of the
+violation by some reasonable means, this is the first time you have
+received notice of violation of this License (for any work) from that
+copyright holder, and you cure the violation prior to 30 days after
+your receipt of the notice.
+
+ Termination of your rights under this section does not terminate the
+licenses of parties who have received copies or rights from you under
+this License. If your rights have been terminated and not permanently
+reinstated, you do not qualify to receive new licenses for the same
+material under section 10.
+
+ 9. Acceptance Not Required for Having Copies.
+
+ You are not required to accept this License in order to receive or
+run a copy of the Program. Ancillary propagation of a covered work
+occurring solely as a consequence of using peer-to-peer transmission
+to receive a copy likewise does not require acceptance. However,
+nothing other than this License grants you permission to propagate or
+modify any covered work. These actions infringe copyright if you do
+not accept this License. Therefore, by modifying or propagating a
+covered work, you indicate your acceptance of this License to do so.
+
+ 10. Automatic Licensing of Downstream Recipients.
+
+ Each time you convey a covered work, the recipient automatically
+receives a license from the original licensors, to run, modify and
+propagate that work, subject to this License. You are not responsible
+for enforcing compliance by third parties with this License.
+
+ An "entity transaction" is a transaction transferring control of an
+organization, or substantially all assets of one, or subdividing an
+organization, or merging organizations. If propagation of a covered
+work results from an entity transaction, each party to that
+transaction who receives a copy of the work also receives whatever
+licenses to the work the party's predecessor in interest had or could
+give under the previous paragraph, plus a right to possession of the
+Corresponding Source of the work from the predecessor in interest, if
+the predecessor has it or can get it with reasonable efforts.
+
+ You may not impose any further restrictions on the exercise of the
+rights granted or affirmed under this License. For example, you may
+not impose a license fee, royalty, or other charge for exercise of
+rights granted under this License, and you may not initiate litigation
+(including a cross-claim or counterclaim in a lawsuit) alleging that
+any patent claim is infringed by making, using, selling, offering for
+sale, or importing the Program or any portion of it.
+
+ 11. Patents.
+
+ A "contributor" is a copyright holder who authorizes use under this
+License of the Program or a work on which the Program is based. The
+work thus licensed is called the contributor's "contributor version".
+
+ A contributor's "essential patent claims" are all patent claims
+owned or controlled by the contributor, whether already acquired or
+hereafter acquired, that would be infringed by some manner, permitted
+by this License, of making, using, or selling its contributor version,
+but do not include claims that would be infringed only as a
+consequence of further modification of the contributor version. For
+purposes of this definition, "control" includes the right to grant
+patent sublicenses in a manner consistent with the requirements of
+this License.
+
+ Each contributor grants you a non-exclusive, worldwide, royalty-free
+patent license under the contributor's essential patent claims, to
+make, use, sell, offer for sale, import and otherwise run, modify and
+propagate the contents of its contributor version.
+
+ In the following three paragraphs, a "patent license" is any express
+agreement or commitment, however denominated, not to enforce a patent
+(such as an express permission to practice a patent or covenant not to
+sue for patent infringement). To "grant" such a patent license to a
+party means to make such an agreement or commitment not to enforce a
+patent against the party.
+
+ If you convey a covered work, knowingly relying on a patent license,
+and the Corresponding Source of the work is not available for anyone
+to copy, free of charge and under the terms of this License, through a
+publicly available network server or other readily accessible means,
+then you must either (1) cause the Corresponding Source to be so
+available, or (2) arrange to deprive yourself of the benefit of the
+patent license for this particular work, or (3) arrange, in a manner
+consistent with the requirements of this License, to extend the patent
+license to downstream recipients. "Knowingly relying" means you have
+actual knowledge that, but for the patent license, your conveying the
+covered work in a country, or your recipient's use of the covered work
+in a country, would infringe one or more identifiable patents in that
+country that you have reason to believe are valid.
+
+ If, pursuant to or in connection with a single transaction or
+arrangement, you convey, or propagate by procuring conveyance of, a
+covered work, and grant a patent license to some of the parties
+receiving the covered work authorizing them to use, propagate, modify
+or convey a specific copy of the covered work, then the patent license
+you grant is automatically extended to all recipients of the covered
+work and works based on it.
+
+ A patent license is "discriminatory" if it does not include within
+the scope of its coverage, prohibits the exercise of, or is
+conditioned on the non-exercise of one or more of the rights that are
+specifically granted under this License. You may not convey a covered
+work if you are a party to an arrangement with a third party that is
+in the business of distributing software, under which you make payment
+to the third party based on the extent of your activity of conveying
+the work, and under which the third party grants, to any of the
+parties who would receive the covered work from you, a discriminatory
+patent license (a) in connection with copies of the covered work
+conveyed by you (or copies made from those copies), or (b) primarily
+for and in connection with specific products or compilations that
+contain the covered work, unless you entered into that arrangement,
+or that patent license was granted, prior to 28 March 2007.
+
+ Nothing in this License shall be construed as excluding or limiting
+any implied license or other defenses to infringement that may
+otherwise be available to you under applicable patent law.
+
+ 12. No Surrender of Others' Freedom.
+
+ If conditions are imposed on you (whether by court order, agreement or
+otherwise) that contradict the conditions of this License, they do not
+excuse you from the conditions of this License. If you cannot convey a
+covered work so as to satisfy simultaneously your obligations under this
+License and any other pertinent obligations, then as a consequence you may
+not convey it at all. For example, if you agree to terms that obligate you
+to collect a royalty for further conveying from those to whom you convey
+the Program, the only way you could satisfy both those terms and this
+License would be to refrain entirely from conveying the Program.
+
+ 13. Use with the GNU Affero General Public License.
+
+ Notwithstanding any other provision of this License, you have
+permission to link or combine any covered work with a work licensed
+under version 3 of the GNU Affero General Public License into a single
+combined work, and to convey the resulting work. The terms of this
+License will continue to apply to the part which is the covered work,
+but the special requirements of the GNU Affero General Public License,
+section 13, concerning interaction through a network will apply to the
+combination as such.
+
+ 14. Revised Versions of this License.
+
+ The Free Software Foundation may publish revised and/or new versions of
+the GNU General Public License from time to time. Such new versions will
+be similar in spirit to the present version, but may differ in detail to
+address new problems or concerns.
+
+ Each version is given a distinguishing version number. If the
+Program specifies that a certain numbered version of the GNU General
+Public License "or any later version" applies to it, you have the
+option of following the terms and conditions either of that numbered
+version or of any later version published by the Free Software
+Foundation. If the Program does not specify a version number of the
+GNU General Public License, you may choose any version ever published
+by the Free Software Foundation.
+
+ If the Program specifies that a proxy can decide which future
+versions of the GNU General Public License can be used, that proxy's
+public statement of acceptance of a version permanently authorizes you
+to choose that version for the Program.
+
+ Later license versions may give you additional or different
+permissions. However, no additional obligations are imposed on any
+author or copyright holder as a result of your choosing to follow a
+later version.
+
+ 15. Disclaimer of Warranty.
+
+ THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
+APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
+HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
+OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
+THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
+IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
+ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
+
+ 16. Limitation of Liability.
+
+ IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
+WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
+THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
+GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
+USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
+DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
+PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
+EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
+SUCH DAMAGES.
+
+ 17. Interpretation of Sections 15 and 16.
+
+ If the disclaimer of warranty and limitation of liability provided
+above cannot be given local legal effect according to their terms,
+reviewing courts shall apply local law that most closely approximates
+an absolute waiver of all civil liability in connection with the
+Program, unless a warranty or assumption of liability accompanies a
+copy of the Program in return for a fee.
+
+ END OF TERMS AND CONDITIONS
+
+ How to Apply These Terms to Your New Programs
+
+ If you develop a new program, and you want it to be of the greatest
+possible use to the public, the best way to achieve this is to make it
+free software which everyone can redistribute and change under these terms.
+
+ To do so, attach the following notices to the program. It is safest
+to attach them to the start of each source file to most effectively
+state the exclusion of warranty; and each file should have at least
+the "copyright" line and a pointer to where the full notice is found.
+
+ <one line to give the program's name and a brief idea of what it does.>
+ Copyright (C) <year> <name of author>
+
+ This program is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation, either version 3 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+Also add information on how to contact you by electronic and paper mail.
+
+ If the program does terminal interaction, make it output a short
+notice like this when it starts in an interactive mode:
+
+ <program> Copyright (C) <year> <name of author>
+ This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
+ This is free software, and you are welcome to redistribute it
+ under certain conditions; type `show c' for details.
+
+The hypothetical commands `show w' and `show c' should show the appropriate
+parts of the General Public License. Of course, your program's commands
+might be different; for a GUI interface, you would use an "about box".
+
+ You should also get your employer (if you work as a programmer) or school,
+if any, to sign a "copyright disclaimer" for the program, if necessary.
+For more information on this, and how to apply and follow the GNU GPL, see
+<http://www.gnu.org/licenses/>.
+
+ The GNU General Public License does not permit incorporating your program
+into proprietary programs. If your program is a subroutine library, you
+may consider it more useful to permit linking proprietary applications with
+the library. If this is what you want to do, use the GNU Lesser General
+Public License instead of this License. But first, please read
+<http://www.gnu.org/philosophy/why-not-lgpl.html>.
diff --git a/scripts/automation/trex_control_plane/python_lib/python-daemon-2.0.5/MANIFEST.in b/scripts/automation/trex_control_plane/python_lib/python-daemon-2.0.5/MANIFEST.in
new file mode 100755
index 00000000..d3d4341e
--- /dev/null
+++ b/scripts/automation/trex_control_plane/python_lib/python-daemon-2.0.5/MANIFEST.in
@@ -0,0 +1,7 @@
+include MANIFEST.in
+include LICENSE.*
+include ChangeLog
+recursive-include doc *
+include version.py
+include test_version.py
+recursive-include test *.py
diff --git a/scripts/automation/trex_control_plane/python_lib/python-daemon-2.0.5/PKG-INFO b/scripts/automation/trex_control_plane/python_lib/python-daemon-2.0.5/PKG-INFO
new file mode 100755
index 00000000..fd81f509
--- /dev/null
+++ b/scripts/automation/trex_control_plane/python_lib/python-daemon-2.0.5/PKG-INFO
@@ -0,0 +1,38 @@
+Metadata-Version: 1.1
+Name: python-daemon
+Version: 2.0.5
+Summary: Library to implement a well-behaved Unix daemon process.
+Home-page: https://alioth.debian.org/projects/python-daemon/
+Author: Ben Finney
+Author-email: ben+python@benfinney.id.au
+License: Apache-2
+Description: This library implements the well-behaved daemon specification of
+ :pep:`3143`, “Standard daemon process library”.
+
+ A well-behaved Unix daemon process is tricky to get right, but the
+ required steps are much the same for every daemon program. A
+ `DaemonContext` instance holds the behaviour and configured
+ process environment for the program; use the instance as a context
+ manager to enter a daemon state.
+
+ Simple example of usage::
+
+ import daemon
+
+ from spam import do_main_program
+
+ with daemon.DaemonContext():
+ do_main_program()
+
+ Customisation of the steps to become a daemon is available by
+ setting options on the `DaemonContext` instance; see the
+ documentation for that class for each option.
+Keywords: daemon,fork,unix
+Platform: UNKNOWN
+Classifier: Development Status :: 4 - Beta
+Classifier: License :: OSI Approved :: Apache Software License
+Classifier: Operating System :: POSIX
+Classifier: Programming Language :: Python :: 2.7
+Classifier: Programming Language :: Python :: 3
+Classifier: Intended Audience :: Developers
+Classifier: Topic :: Software Development :: Libraries :: Python Modules
diff --git a/scripts/automation/trex_control_plane/python_lib/python-daemon-2.0.5/daemon/__init__.py b/scripts/automation/trex_control_plane/python_lib/python-daemon-2.0.5/daemon/__init__.py
new file mode 100755
index 00000000..4731a6ef
--- /dev/null
+++ b/scripts/automation/trex_control_plane/python_lib/python-daemon-2.0.5/daemon/__init__.py
@@ -0,0 +1,49 @@
+# -*- coding: utf-8 -*-
+
+# daemon/__init__.py
+# Part of ‘python-daemon’, an implementation of PEP 3143.
+#
+# Copyright © 2009–2015 Ben Finney <ben+python@benfinney.id.au>
+# Copyright © 2006 Robert Niederreiter
+#
+# This is free software: you may copy, modify, and/or distribute this work
+# under the terms of the Apache License, version 2.0 as published by the
+# Apache Software Foundation.
+# No warranty expressed or implied. See the file ‘LICENSE.ASF-2’ for details.
+
+""" Library to implement a well-behaved Unix daemon process.
+
+ This library implements the well-behaved daemon specification of
+ :pep:`3143`, “Standard daemon process library”.
+
+ A well-behaved Unix daemon process is tricky to get right, but the
+ required steps are much the same for every daemon program. A
+ `DaemonContext` instance holds the behaviour and configured
+ process environment for the program; use the instance as a context
+ manager to enter a daemon state.
+
+ Simple example of usage::
+
+ import daemon
+
+ from spam import do_main_program
+
+ with daemon.DaemonContext():
+ do_main_program()
+
+ Customisation of the steps to become a daemon is available by
+ setting options on the `DaemonContext` instance; see the
+ documentation for that class for each option.
+
+ """
+
+from __future__ import (absolute_import, unicode_literals)
+
+from .daemon import DaemonContext
+
+
+# Local variables:
+# coding: utf-8
+# mode: python
+# End:
+# vim: fileencoding=utf-8 filetype=python :
diff --git a/scripts/automation/trex_control_plane/python_lib/python-daemon-2.0.5/daemon/_metadata.py b/scripts/automation/trex_control_plane/python_lib/python-daemon-2.0.5/daemon/_metadata.py
new file mode 100755
index 00000000..6d22a2b7
--- /dev/null
+++ b/scripts/automation/trex_control_plane/python_lib/python-daemon-2.0.5/daemon/_metadata.py
@@ -0,0 +1,152 @@
+# -*- coding: utf-8 -*-
+
+# daemon/_metadata.py
+# Part of ‘python-daemon’, an implementation of PEP 3143.
+#
+# Copyright © 2008–2015 Ben Finney <ben+python@benfinney.id.au>
+#
+# This is free software: you may copy, modify, and/or distribute this work
+# under the terms of the Apache License, version 2.0 as published by the
+# Apache Software Foundation.
+# No warranty expressed or implied. See the file ‘LICENSE.ASF-2’ for details.
+
+""" Package metadata for the ‘python-daemon’ distribution. """
+
+from __future__ import (absolute_import, unicode_literals)
+
+import json
+import re
+import collections
+import datetime
+
+import pkg_resources
+
+
+distribution_name = "python-daemon"
+version_info_filename = "version_info.json"
+
+def get_distribution_version_info(filename=version_info_filename):
+ """ Get the version info from the installed distribution.
+
+ :param filename: Base filename of the version info resource.
+ :return: The version info as a mapping of fields. If the
+ distribution is not available, the mapping is empty.
+
+ The version info is stored as a metadata file in the
+ distribution.
+
+ """
+ version_info = {
+ 'release_date': "UNKNOWN",
+ 'version': "UNKNOWN",
+ 'maintainer': "UNKNOWN",
+ }
+
+ try:
+ distribution = pkg_resources.get_distribution(distribution_name)
+ except pkg_resources.DistributionNotFound:
+ distribution = None
+
+ if distribution is not None:
+ if distribution.has_metadata(version_info_filename):
+ content = distribution.get_metadata(version_info_filename)
+ version_info = json.loads(content)
+
+ return version_info
+
+version_info = get_distribution_version_info()
+
+version_installed = version_info['version']
+
+
+rfc822_person_regex = re.compile(
+ "^(?P<name>[^<]+) <(?P<email>[^>]+)>$")
+
+ParsedPerson = collections.namedtuple('ParsedPerson', ['name', 'email'])
+
+def parse_person_field(value):
+ """ Parse a person field into name and email address.
+
+ :param value: The text value specifying a person.
+ :return: A 2-tuple (name, email) for the person's details.
+
+ If the `value` does not match a standard person with email
+ address, the `email` item is ``None``.
+
+ """
+ result = (None, None)
+
+ match = rfc822_person_regex.match(value)
+ if len(value):
+ if match is not None:
+ result = ParsedPerson(
+ name=match.group('name'),
+ email=match.group('email'))
+ else:
+ result = ParsedPerson(name=value, email=None)
+
+ return result
+
+author_name = "Ben Finney"
+author_email = "ben+python@benfinney.id.au"
+author = "{name} <{email}>".format(name=author_name, email=author_email)
+
+
+class YearRange:
+ """ A range of years spanning a period. """
+
+ def __init__(self, begin, end=None):
+ self.begin = begin
+ self.end = end
+
+ def __unicode__(self):
+ text = "{range.begin:04d}".format(range=self)
+ if self.end is not None:
+ if self.end > self.begin:
+ text = "{range.begin:04d}–{range.end:04d}".format(range=self)
+ return text
+
+ __str__ = __unicode__
+
+
+def make_year_range(begin_year, end_date=None):
+ """ Construct the year range given a start and possible end date.
+
+ :param begin_date: The beginning year (text) for the range.
+ :param end_date: The end date (text, ISO-8601 format) for the
+ range, or a non-date token string.
+ :return: The range of years as a `YearRange` instance.
+
+ If the `end_date` is not a valid ISO-8601 date string, the
+ range has ``None`` for the end year.
+
+ """
+ begin_year = int(begin_year)
+
+ try:
+ end_date = datetime.datetime.strptime(end_date, "%Y-%m-%d")
+ except (TypeError, ValueError):
+ # Specified end_date value is not a valid date.
+ end_year = None
+ else:
+ end_year = end_date.year
+
+ year_range = YearRange(begin=begin_year, end=end_year)
+
+ return year_range
+
+copyright_year_begin = "2001"
+build_date = version_info['release_date']
+copyright_year_range = make_year_range(copyright_year_begin, build_date)
+
+copyright = "Copyright © {year_range} {author} and others".format(
+ year_range=copyright_year_range, author=author)
+license = "Apache-2"
+url = "https://alioth.debian.org/projects/python-daemon/"
+
+
+# Local variables:
+# coding: utf-8
+# mode: python
+# End:
+# vim: fileencoding=utf-8 filetype=python :
diff --git a/scripts/automation/trex_control_plane/python_lib/python-daemon-2.0.5/daemon/daemon.py b/scripts/automation/trex_control_plane/python_lib/python-daemon-2.0.5/daemon/daemon.py
new file mode 100755
index 00000000..07810cf1
--- /dev/null
+++ b/scripts/automation/trex_control_plane/python_lib/python-daemon-2.0.5/daemon/daemon.py
@@ -0,0 +1,926 @@
+# -*- coding: utf-8 -*-
+
+# daemon/daemon.py
+# Part of ‘python-daemon’, an implementation of PEP 3143.
+#
+# Copyright © 2008–2015 Ben Finney <ben+python@benfinney.id.au>
+# Copyright © 2007–2008 Robert Niederreiter, Jens Klein
+# Copyright © 2004–2005 Chad J. Schroeder
+# Copyright © 2003 Clark Evans
+# Copyright © 2002 Noah Spurrier
+# Copyright © 2001 Jürgen Hermann
+#
+# This is free software: you may copy, modify, and/or distribute this work
+# under the terms of the Apache License, version 2.0 as published by the
+# Apache Software Foundation.
+# No warranty expressed or implied. See the file ‘LICENSE.ASF-2’ for details.
+
+""" Daemon process behaviour.
+ """
+
+from __future__ import (absolute_import, unicode_literals)
+
+import os
+import sys
+import resource
+import errno
+import signal
+import socket
+import atexit
+try:
+ # Python 2 has both ‘str’ (bytes) and ‘unicode’ (text).
+ basestring = basestring
+ unicode = unicode
+except NameError:
+ # Python 3 names the Unicode data type ‘str’.
+ basestring = str
+ unicode = str
+
+
+class DaemonError(Exception):
+ """ Base exception class for errors from this module. """
+
+ def __init__(self, *args, **kwargs):
+ self._chain_from_context()
+
+ super(DaemonError, self).__init__(*args, **kwargs)
+
+ def _chain_from_context(self):
+ _chain_exception_from_existing_exception_context(self, as_cause=True)
+
+
+class DaemonOSEnvironmentError(DaemonError, OSError):
+ """ Exception raised when daemon OS environment setup receives error. """
+
+
+class DaemonProcessDetachError(DaemonError, OSError):
+ """ Exception raised when process detach fails. """
+
+
+class DaemonContext:
+ """ Context for turning the current program into a daemon process.
+
+ A `DaemonContext` instance represents the behaviour settings and
+ process context for the program when it becomes a daemon. The
+ behaviour and environment is customised by setting options on the
+ instance, before calling the `open` method.
+
+ Each option can be passed as a keyword argument to the `DaemonContext`
+ constructor, or subsequently altered by assigning to an attribute on
+ the instance at any time prior to calling `open`. That is, for
+ options named `wibble` and `wubble`, the following invocation::
+
+ foo = daemon.DaemonContext(wibble=bar, wubble=baz)
+ foo.open()
+
+ is equivalent to::
+
+ foo = daemon.DaemonContext()
+ foo.wibble = bar
+ foo.wubble = baz
+ foo.open()
+
+ The following options are defined.
+
+ `files_preserve`
+ :Default: ``None``
+
+ List of files that should *not* be closed when starting the
+ daemon. If ``None``, all open file descriptors will be closed.
+
+ Elements of the list are file descriptors (as returned by a file
+ object's `fileno()` method) or Python `file` objects. Each
+ specifies a file that is not to be closed during daemon start.
+
+ `chroot_directory`
+ :Default: ``None``
+
+ Full path to a directory to set as the effective root directory of
+ the process. If ``None``, specifies that the root directory is not
+ to be changed.
+
+ `working_directory`
+ :Default: ``'/'``
+
+ Full path of the working directory to which the process should
+ change on daemon start.
+
+ Since a filesystem cannot be unmounted if a process has its
+ current working directory on that filesystem, this should either
+ be left at default or set to a directory that is a sensible “home
+ directory” for the daemon while it is running.
+
+ `umask`
+ :Default: ``0``
+
+ File access creation mask (“umask”) to set for the process on
+ daemon start.
+
+ A daemon should not rely on the parent process's umask value,
+ which is beyond its control and may prevent creating a file with
+ the required access mode. So when the daemon context opens, the
+ umask is set to an explicit known value.
+
+ If the conventional value of 0 is too open, consider setting a
+ value such as 0o022, 0o027, 0o077, or another specific value.
+ Otherwise, ensure the daemon creates every file with an
+ explicit access mode for the purpose.
+
+ `pidfile`
+ :Default: ``None``
+
+ Context manager for a PID lock file. When the daemon context opens
+ and closes, it enters and exits the `pidfile` context manager.
+
+ `detach_process`
+ :Default: ``None``
+
+ If ``True``, detach the process context when opening the daemon
+ context; if ``False``, do not detach.
+
+ If unspecified (``None``) during initialisation of the instance,
+ this will be set to ``True`` by default, and ``False`` only if
+ detaching the process is determined to be redundant; for example,
+ in the case when the process was started by `init`, by `initd`, or
+ by `inetd`.
+
+ `signal_map`
+ :Default: system-dependent
+
+ Mapping from operating system signals to callback actions.
+
+ The mapping is used when the daemon context opens, and determines
+ the action for each signal's signal handler:
+
+ * A value of ``None`` will ignore the signal (by setting the
+ signal action to ``signal.SIG_IGN``).
+
+ * A string value will be used as the name of an attribute on the
+ ``DaemonContext`` instance. The attribute's value will be used
+ as the action for the signal handler.
+
+ * Any other value will be used as the action for the
+ signal handler. See the ``signal.signal`` documentation
+ for details of the signal handler interface.
+
+ The default value depends on which signals are defined on the
+ running system. Each item from the list below whose signal is
+ actually defined in the ``signal`` module will appear in the
+ default map:
+
+ * ``signal.SIGTTIN``: ``None``
+
+ * ``signal.SIGTTOU``: ``None``
+
+ * ``signal.SIGTSTP``: ``None``
+
+ * ``signal.SIGTERM``: ``'terminate'``
+
+ Depending on how the program will interact with its child
+ processes, it may need to specify a signal map that
+ includes the ``signal.SIGCHLD`` signal (received when a
+ child process exits). See the specific operating system's
+ documentation for more detail on how to determine what
+ circumstances dictate the need for signal handlers.
+
+ `uid`
+ :Default: ``os.getuid()``
+
+ `gid`
+ :Default: ``os.getgid()``
+
+ The user ID (“UID”) value and group ID (“GID”) value to switch
+ the process to on daemon start.
+
+ The default values, the real UID and GID of the process, will
+ relinquish any effective privilege elevation inherited by the
+ process.
+
+ `prevent_core`
+ :Default: ``True``
+
+ If true, prevents the generation of core files, in order to avoid
+ leaking sensitive information from daemons run as `root`.
+
+ `stdin`
+ :Default: ``None``
+
+ `stdout`
+ :Default: ``None``
+
+ `stderr`
+ :Default: ``None``
+
+ Each of `stdin`, `stdout`, and `stderr` is a file-like object
+ which will be used as the new file for the standard I/O stream
+ `sys.stdin`, `sys.stdout`, and `sys.stderr` respectively. The file
+ should therefore be open, with a minimum of mode 'r' in the case
+ of `stdin`, and mimimum of mode 'w+' in the case of `stdout` and
+ `stderr`.
+
+ If the object has a `fileno()` method that returns a file
+ descriptor, the corresponding file will be excluded from being
+ closed during daemon start (that is, it will be treated as though
+ it were listed in `files_preserve`).
+
+ If ``None``, the corresponding system stream is re-bound to the
+ file named by `os.devnull`.
+
+ """
+
+ __metaclass__ = type
+
+ def __init__(
+ self,
+ chroot_directory=None,
+ working_directory="/",
+ umask=0,
+ uid=None,
+ gid=None,
+ prevent_core=True,
+ detach_process=None,
+ files_preserve=None,
+ pidfile=None,
+ stdin=None,
+ stdout=None,
+ stderr=None,
+ signal_map=None,
+ ):
+ """ Set up a new instance. """
+ self.chroot_directory = chroot_directory
+ self.working_directory = working_directory
+ self.umask = umask
+ self.prevent_core = prevent_core
+ self.files_preserve = files_preserve
+ self.pidfile = pidfile
+ self.stdin = stdin
+ self.stdout = stdout
+ self.stderr = stderr
+
+ if uid is None:
+ uid = os.getuid()
+ self.uid = uid
+ if gid is None:
+ gid = os.getgid()
+ self.gid = gid
+
+ if detach_process is None:
+ detach_process = is_detach_process_context_required()
+ self.detach_process = detach_process
+
+ if signal_map is None:
+ signal_map = make_default_signal_map()
+ self.signal_map = signal_map
+
+ self._is_open = False
+
+ @property
+ def is_open(self):
+ """ ``True`` if the instance is currently open. """
+ return self._is_open
+
+ def open(self):
+ """ Become a daemon process.
+
+ :return: ``None``.
+
+ Open the daemon context, turning the current program into a daemon
+ process. This performs the following steps:
+
+ * If this instance's `is_open` property is true, return
+ immediately. This makes it safe to call `open` multiple times on
+ an instance.
+
+ * If the `prevent_core` attribute is true, set the resource limits
+ for the process to prevent any core dump from the process.
+
+ * If the `chroot_directory` attribute is not ``None``, set the
+ effective root directory of the process to that directory (via
+ `os.chroot`).
+
+ This allows running the daemon process inside a “chroot gaol”
+ as a means of limiting the system's exposure to rogue behaviour
+ by the process. Note that the specified directory needs to
+ already be set up for this purpose.
+
+ * Set the process UID and GID to the `uid` and `gid` attribute
+ values.
+
+ * Close all open file descriptors. This excludes those listed in
+ the `files_preserve` attribute, and those that correspond to the
+ `stdin`, `stdout`, or `stderr` attributes.
+
+ * Change current working directory to the path specified by the
+ `working_directory` attribute.
+
+ * Reset the file access creation mask to the value specified by
+ the `umask` attribute.
+
+ * If the `detach_process` option is true, detach the current
+ process into its own process group, and disassociate from any
+ controlling terminal.
+
+ * Set signal handlers as specified by the `signal_map` attribute.
+
+ * If any of the attributes `stdin`, `stdout`, `stderr` are not
+ ``None``, bind the system streams `sys.stdin`, `sys.stdout`,
+ and/or `sys.stderr` to the files represented by the
+ corresponding attributes. Where the attribute has a file
+ descriptor, the descriptor is duplicated (instead of re-binding
+ the name).
+
+ * If the `pidfile` attribute is not ``None``, enter its context
+ manager.
+
+ * Mark this instance as open (for the purpose of future `open` and
+ `close` calls).
+
+ * Register the `close` method to be called during Python's exit
+ processing.
+
+ When the function returns, the running program is a daemon
+ process.
+
+ """
+ if self.is_open:
+ return
+
+ if self.chroot_directory is not None:
+ change_root_directory(self.chroot_directory)
+
+ if self.prevent_core:
+ prevent_core_dump()
+
+ change_file_creation_mask(self.umask)
+ change_working_directory(self.working_directory)
+ change_process_owner(self.uid, self.gid)
+
+ if self.detach_process:
+ detach_process_context()
+
+ signal_handler_map = self._make_signal_handler_map()
+ set_signal_handlers(signal_handler_map)
+
+ exclude_fds = self._get_exclude_file_descriptors()
+ close_all_open_files(exclude=exclude_fds)
+
+ redirect_stream(sys.stdin, self.stdin)
+ redirect_stream(sys.stdout, self.stdout)
+ redirect_stream(sys.stderr, self.stderr)
+
+ if self.pidfile is not None:
+ self.pidfile.__enter__()
+
+ self._is_open = True
+
+ register_atexit_function(self.close)
+
+ def __enter__(self):
+ """ Context manager entry point. """
+ self.open()
+ return self
+
+ def close(self):
+ """ Exit the daemon process context.
+
+ :return: ``None``.
+
+ Close the daemon context. This performs the following steps:
+
+ * If this instance's `is_open` property is false, return
+ immediately. This makes it safe to call `close` multiple times
+ on an instance.
+
+ * If the `pidfile` attribute is not ``None``, exit its context
+ manager.
+
+ * Mark this instance as closed (for the purpose of future `open`
+ and `close` calls).
+
+ """
+ if not self.is_open:
+ return
+
+ if self.pidfile is not None:
+ # Follow the interface for telling a context manager to exit,
+ # <URL:http://docs.python.org/library/stdtypes.html#typecontextmanager>.
+ self.pidfile.__exit__(None, None, None)
+
+ self._is_open = False
+
+ def __exit__(self, exc_type, exc_value, traceback):
+ """ Context manager exit point. """
+ self.close()
+
+ def terminate(self, signal_number, stack_frame):
+ """ Signal handler for end-process signals.
+
+ :param signal_number: The OS signal number received.
+ :param stack_frame: The frame object at the point the
+ signal was received.
+ :return: ``None``.
+
+ Signal handler for the ``signal.SIGTERM`` signal. Performs the
+ following step:
+
+ * Raise a ``SystemExit`` exception explaining the signal.
+
+ """
+ exception = SystemExit(
+ "Terminating on signal {signal_number!r}".format(
+ signal_number=signal_number))
+ raise exception
+
+ def _get_exclude_file_descriptors(self):
+ """ Get the set of file descriptors to exclude closing.
+
+ :return: A set containing the file descriptors for the
+ files to be preserved.
+
+ The file descriptors to be preserved are those from the
+ items in `files_preserve`, and also each of `stdin`,
+ `stdout`, and `stderr`. For each item:
+
+ * If the item is ``None``, it is omitted from the return
+ set.
+
+ * If the item's ``fileno()`` method returns a value, that
+ value is in the return set.
+
+ * Otherwise, the item is in the return set verbatim.
+
+ """
+ files_preserve = self.files_preserve
+ if files_preserve is None:
+ files_preserve = []
+ files_preserve.extend(
+ item for item in [self.stdin, self.stdout, self.stderr]
+ if hasattr(item, 'fileno'))
+
+ exclude_descriptors = set()
+ for item in files_preserve:
+ if item is None:
+ continue
+ file_descriptor = _get_file_descriptor(item)
+ if file_descriptor is not None:
+ exclude_descriptors.add(file_descriptor)
+ else:
+ exclude_descriptors.add(item)
+
+ return exclude_descriptors
+
+ def _make_signal_handler(self, target):
+ """ Make the signal handler for a specified target object.
+
+ :param target: A specification of the target for the
+ handler; see below.
+ :return: The value for use by `signal.signal()`.
+
+ If `target` is ``None``, return ``signal.SIG_IGN``. If `target`
+ is a text string, return the attribute of this instance named
+ by that string. Otherwise, return `target` itself.
+
+ """
+ if target is None:
+ result = signal.SIG_IGN
+ elif isinstance(target, unicode):
+ name = target
+ result = getattr(self, name)
+ else:
+ result = target
+
+ return result
+
+ def _make_signal_handler_map(self):
+ """ Make the map from signals to handlers for this instance.
+
+ :return: The constructed signal map for this instance.
+
+ Construct a map from signal numbers to handlers for this
+ context instance, suitable for passing to
+ `set_signal_handlers`.
+
+ """
+ signal_handler_map = dict(
+ (signal_number, self._make_signal_handler(target))
+ for (signal_number, target) in self.signal_map.items())
+ return signal_handler_map
+
+
+def _get_file_descriptor(obj):
+ """ Get the file descriptor, if the object has one.
+
+ :param obj: The object expected to be a file-like object.
+ :return: The file descriptor iff the file supports it; otherwise
+ ``None``.
+
+ The object may be a non-file object. It may also be a
+ file-like object with no support for a file descriptor. In
+ either case, return ``None``.
+
+ """
+ file_descriptor = None
+ if hasattr(obj, 'fileno'):
+ try:
+ file_descriptor = obj.fileno()
+ except ValueError:
+ # The item doesn't support a file descriptor.
+ pass
+
+ return file_descriptor
+
+
+def change_working_directory(directory):
+ """ Change the working directory of this process.
+
+ :param directory: The target directory path.
+ :return: ``None``.
+
+ """
+ try:
+ os.chdir(directory)
+ except Exception as exc:
+ error = DaemonOSEnvironmentError(
+ "Unable to change working directory ({exc})".format(exc=exc))
+ raise error
+
+
+def change_root_directory(directory):
+ """ Change the root directory of this process.
+
+ :param directory: The target directory path.
+ :return: ``None``.
+
+ Set the current working directory, then the process root directory,
+ to the specified `directory`. Requires appropriate OS privileges
+ for this process.
+
+ """
+ try:
+ os.chdir(directory)
+ os.chroot(directory)
+ except Exception as exc:
+ error = DaemonOSEnvironmentError(
+ "Unable to change root directory ({exc})".format(exc=exc))
+ raise error
+
+
+def change_file_creation_mask(mask):
+ """ Change the file creation mask for this process.
+
+ :param mask: The numeric file creation mask to set.
+ :return: ``None``.
+
+ """
+ try:
+ os.umask(mask)
+ except Exception as exc:
+ error = DaemonOSEnvironmentError(
+ "Unable to change file creation mask ({exc})".format(exc=exc))
+ raise error
+
+
+def change_process_owner(uid, gid):
+ """ Change the owning UID and GID of this process.
+
+ :param uid: The target UID for the daemon process.
+ :param gid: The target GID for the daemon process.
+ :return: ``None``.
+
+ Set the GID then the UID of the process (in that order, to avoid
+ permission errors) to the specified `gid` and `uid` values.
+ Requires appropriate OS privileges for this process.
+
+ """
+ try:
+ os.setgid(gid)
+ os.setuid(uid)
+ except Exception as exc:
+ error = DaemonOSEnvironmentError(
+ "Unable to change process owner ({exc})".format(exc=exc))
+ raise error
+
+
+def prevent_core_dump():
+ """ Prevent this process from generating a core dump.
+
+ :return: ``None``.
+
+ Set the soft and hard limits for core dump size to zero. On Unix,
+ this entirely prevents the process from creating core dump.
+
+ """
+ core_resource = resource.RLIMIT_CORE
+
+ try:
+ # Ensure the resource limit exists on this platform, by requesting
+ # its current value.
+ core_limit_prev = resource.getrlimit(core_resource)
+ except ValueError as exc:
+ error = DaemonOSEnvironmentError(
+ "System does not support RLIMIT_CORE resource limit"
+ " ({exc})".format(exc=exc))
+ raise error
+
+ # Set hard and soft limits to zero, i.e. no core dump at all.
+ core_limit = (0, 0)
+ resource.setrlimit(core_resource, core_limit)
+
+
+def detach_process_context():
+ """ Detach the process context from parent and session.
+
+ :return: ``None``.
+
+ Detach from the parent process and session group, allowing the
+ parent to exit while this process continues running.
+
+ Reference: “Advanced Programming in the Unix Environment”,
+ section 13.3, by W. Richard Stevens, published 1993 by
+ Addison-Wesley.
+
+ """
+
+ def fork_then_exit_parent(error_message):
+ """ Fork a child process, then exit the parent process.
+
+ :param error_message: Message for the exception in case of a
+ detach failure.
+ :return: ``None``.
+ :raise DaemonProcessDetachError: If the fork fails.
+
+ """
+ try:
+ pid = os.fork()
+ if pid > 0:
+ os._exit(0)
+ except OSError as exc:
+ error = DaemonProcessDetachError(
+ "{message}: [{exc.errno:d}] {exc.strerror}".format(
+ message=error_message, exc=exc))
+ raise error
+
+ fork_then_exit_parent(error_message="Failed first fork")
+ os.setsid()
+ fork_then_exit_parent(error_message="Failed second fork")
+
+
+def is_process_started_by_init():
+ """ Determine whether the current process is started by `init`.
+
+ :return: ``True`` iff the parent process is `init`; otherwise
+ ``False``.
+
+ The `init` process is the one with process ID of 1.
+
+ """
+ result = False
+
+ init_pid = 1
+ if os.getppid() == init_pid:
+ result = True
+
+ return result
+
+
+def is_socket(fd):
+ """ Determine whether the file descriptor is a socket.
+
+ :param fd: The file descriptor to interrogate.
+ :return: ``True`` iff the file descriptor is a socket; otherwise
+ ``False``.
+
+ Query the socket type of `fd`. If there is no error, the file is a
+ socket.
+
+ """
+ result = False
+
+ file_socket = socket.fromfd(fd, socket.AF_INET, socket.SOCK_RAW)
+
+ try:
+ socket_type = file_socket.getsockopt(
+ socket.SOL_SOCKET, socket.SO_TYPE)
+ except socket.error as exc:
+ exc_errno = exc.args[0]
+ if exc_errno == errno.ENOTSOCK:
+ # Socket operation on non-socket.
+ pass
+ else:
+ # Some other socket error.
+ result = True
+ else:
+ # No error getting socket type.
+ result = True
+
+ return result
+
+
+def is_process_started_by_superserver():
+ """ Determine whether the current process is started by the superserver.
+
+ :return: ``True`` if this process was started by the internet
+ superserver; otherwise ``False``.
+
+ The internet superserver creates a network socket, and
+ attaches it to the standard streams of the child process. If
+ that is the case for this process, return ``True``, otherwise
+ ``False``.
+
+ """
+ result = False
+
+ stdin_fd = sys.__stdin__.fileno()
+ if is_socket(stdin_fd):
+ result = True
+
+ return result
+
+
+def is_detach_process_context_required():
+ """ Determine whether detaching the process context is required.
+
+ :return: ``True`` iff the process is already detached; otherwise
+ ``False``.
+
+ The process environment is interrogated for the following:
+
+ * Process was started by `init`; or
+
+ * Process was started by `inetd`.
+
+ If any of the above are true, the process is deemed to be already
+ detached.
+
+ """
+ result = True
+ if is_process_started_by_init() or is_process_started_by_superserver():
+ result = False
+
+ return result
+
+
+def close_file_descriptor_if_open(fd):
+ """ Close a file descriptor if already open.
+
+ :param fd: The file descriptor to close.
+ :return: ``None``.
+
+ Close the file descriptor `fd`, suppressing an error in the
+ case the file was not open.
+
+ """
+ try:
+ os.close(fd)
+ except EnvironmentError as exc:
+ if exc.errno == errno.EBADF:
+ # File descriptor was not open.
+ pass
+ else:
+ error = DaemonOSEnvironmentError(
+ "Failed to close file descriptor {fd:d} ({exc})".format(
+ fd=fd, exc=exc))
+ raise error
+
+
+MAXFD = 2048
+
+def get_maximum_file_descriptors():
+ """ Get the maximum number of open file descriptors for this process.
+
+ :return: The number (integer) to use as the maximum number of open
+ files for this process.
+
+ The maximum is the process hard resource limit of maximum number of
+ open file descriptors. If the limit is “infinity”, a default value
+ of ``MAXFD`` is returned.
+
+ """
+ limits = resource.getrlimit(resource.RLIMIT_NOFILE)
+ result = limits[1]
+ if result == resource.RLIM_INFINITY:
+ result = MAXFD
+ return result
+
+
+def close_all_open_files(exclude=set()):
+ """ Close all open file descriptors.
+
+ :param exclude: Collection of file descriptors to skip when closing
+ files.
+ :return: ``None``.
+
+ Closes every file descriptor (if open) of this process. If
+ specified, `exclude` is a set of file descriptors to *not*
+ close.
+
+ """
+ maxfd = get_maximum_file_descriptors()
+ for fd in reversed(range(maxfd)):
+ if fd not in exclude:
+ close_file_descriptor_if_open(fd)
+
+
+def redirect_stream(system_stream, target_stream):
+ """ Redirect a system stream to a specified file.
+
+ :param standard_stream: A file object representing a standard I/O
+ stream.
+ :param target_stream: The target file object for the redirected
+ stream, or ``None`` to specify the null device.
+ :return: ``None``.
+
+ `system_stream` is a standard system stream such as
+ ``sys.stdout``. `target_stream` is an open file object that
+ should replace the corresponding system stream object.
+
+ If `target_stream` is ``None``, defaults to opening the
+ operating system's null device and using its file descriptor.
+
+ """
+ if target_stream is None:
+ target_fd = os.open(os.devnull, os.O_RDWR)
+ else:
+ target_fd = target_stream.fileno()
+ os.dup2(target_fd, system_stream.fileno())
+
+
+def make_default_signal_map():
+ """ Make the default signal map for this system.
+
+ :return: A mapping from signal number to handler object.
+
+ The signals available differ by system. The map will not contain
+ any signals not defined on the running system.
+
+ """
+ name_map = {
+ 'SIGTSTP': None,
+ 'SIGTTIN': None,
+ 'SIGTTOU': None,
+ 'SIGTERM': 'terminate',
+ }
+ signal_map = dict(
+ (getattr(signal, name), target)
+ for (name, target) in name_map.items()
+ if hasattr(signal, name))
+
+ return signal_map
+
+
+def set_signal_handlers(signal_handler_map):
+ """ Set the signal handlers as specified.
+
+ :param signal_handler_map: A map from signal number to handler
+ object.
+ :return: ``None``.
+
+ See the `signal` module for details on signal numbers and signal
+ handlers.
+
+ """
+ for (signal_number, handler) in signal_handler_map.items():
+ signal.signal(signal_number, handler)
+
+
+def register_atexit_function(func):
+ """ Register a function for processing at program exit.
+
+ :param func: A callable function expecting no arguments.
+ :return: ``None``.
+
+ The function `func` is registered for a call with no arguments
+ at program exit.
+
+ """
+ atexit.register(func)
+
+
+def _chain_exception_from_existing_exception_context(exc, as_cause=False):
+ """ Decorate the specified exception with the existing exception context.
+
+ :param exc: The exception instance to decorate.
+ :param as_cause: If true, the existing context is declared to be
+ the cause of the exception.
+ :return: ``None``.
+
+ :PEP:`344` describes syntax and attributes (`__traceback__`,
+ `__context__`, `__cause__`) for use in exception chaining.
+
+ Python 2 does not have that syntax, so this function decorates
+ the exception with values from the current exception context.
+
+ """
+ (existing_exc_type, existing_exc, existing_traceback) = sys.exc_info()
+ if as_cause:
+ exc.__cause__ = existing_exc
+ else:
+ exc.__context__ = existing_exc
+ exc.__traceback__ = existing_traceback
+
+
+# Local variables:
+# coding: utf-8
+# mode: python
+# End:
+# vim: fileencoding=utf-8 filetype=python :
diff --git a/scripts/automation/trex_control_plane/python_lib/python-daemon-2.0.5/daemon/pidfile.py b/scripts/automation/trex_control_plane/python_lib/python-daemon-2.0.5/daemon/pidfile.py
new file mode 100755
index 00000000..4517ee0e
--- /dev/null
+++ b/scripts/automation/trex_control_plane/python_lib/python-daemon-2.0.5/daemon/pidfile.py
@@ -0,0 +1,67 @@
+# -*- coding: utf-8 -*-
+
+# daemon/pidfile.py
+# Part of ‘python-daemon’, an implementation of PEP 3143.
+#
+# Copyright © 2008–2015 Ben Finney <ben+python@benfinney.id.au>
+#
+# This is free software: you may copy, modify, and/or distribute this work
+# under the terms of the Apache License, version 2.0 as published by the
+# Apache Software Foundation.
+# No warranty expressed or implied. See the file ‘LICENSE.ASF-2’ for details.
+
+""" Lockfile behaviour implemented via Unix PID files.
+ """
+
+from __future__ import (absolute_import, unicode_literals)
+
+from lockfile.pidlockfile import PIDLockFile
+
+
+class TimeoutPIDLockFile(PIDLockFile, object):
+ """ Lockfile with default timeout, implemented as a Unix PID file.
+
+ This uses the ``PIDLockFile`` implementation, with the
+ following changes:
+
+ * The `acquire_timeout` parameter to the initialiser will be
+ used as the default `timeout` parameter for the `acquire`
+ method.
+
+ """
+
+ def __init__(self, path, acquire_timeout=None, *args, **kwargs):
+ """ Set up the parameters of a TimeoutPIDLockFile.
+
+ :param path: Filesystem path to the PID file.
+ :param acquire_timeout: Value to use by default for the
+ `acquire` call.
+ :return: ``None``.
+
+ """
+ self.acquire_timeout = acquire_timeout
+ super(TimeoutPIDLockFile, self).__init__(path, *args, **kwargs)
+
+ def acquire(self, timeout=None, *args, **kwargs):
+ """ Acquire the lock.
+
+ :param timeout: Specifies the timeout; see below for valid
+ values.
+ :return: ``None``.
+
+ The `timeout` defaults to the value set during
+ initialisation with the `acquire_timeout` parameter. It is
+ passed to `PIDLockFile.acquire`; see that method for
+ details.
+
+ """
+ if timeout is None:
+ timeout = self.acquire_timeout
+ super(TimeoutPIDLockFile, self).acquire(timeout, *args, **kwargs)
+
+
+# Local variables:
+# coding: utf-8
+# mode: python
+# End:
+# vim: fileencoding=utf-8 filetype=python :
diff --git a/scripts/automation/trex_control_plane/python_lib/python-daemon-2.0.5/daemon/runner.py b/scripts/automation/trex_control_plane/python_lib/python-daemon-2.0.5/daemon/runner.py
new file mode 100755
index 00000000..6973cf1c
--- /dev/null
+++ b/scripts/automation/trex_control_plane/python_lib/python-daemon-2.0.5/daemon/runner.py
@@ -0,0 +1,324 @@
+# -*- coding: utf-8 -*-
+
+# daemon/runner.py
+# Part of ‘python-daemon’, an implementation of PEP 3143.
+#
+# Copyright © 2009–2015 Ben Finney <ben+python@benfinney.id.au>
+# Copyright © 2007–2008 Robert Niederreiter, Jens Klein
+# Copyright © 2003 Clark Evans
+# Copyright © 2002 Noah Spurrier
+# Copyright © 2001 Jürgen Hermann
+#
+# This is free software: you may copy, modify, and/or distribute this work
+# under the terms of the Apache License, version 2.0 as published by the
+# Apache Software Foundation.
+# No warranty expressed or implied. See the file ‘LICENSE.ASF-2’ for details.
+
+""" Daemon runner library.
+ """
+
+from __future__ import (absolute_import, unicode_literals)
+
+import sys
+import os
+import signal
+import errno
+try:
+ # Python 3 standard library.
+ ProcessLookupError
+except NameError:
+ # No such class in Python 2.
+ ProcessLookupError = NotImplemented
+
+import lockfile
+
+from . import pidfile
+from .daemon import (basestring, unicode)
+from .daemon import DaemonContext
+from .daemon import _chain_exception_from_existing_exception_context
+
+
+class DaemonRunnerError(Exception):
+ """ Abstract base class for errors from DaemonRunner. """
+
+ def __init__(self, *args, **kwargs):
+ self._chain_from_context()
+
+ super(DaemonRunnerError, self).__init__(*args, **kwargs)
+
+ def _chain_from_context(self):
+ _chain_exception_from_existing_exception_context(self, as_cause=True)
+
+
+class DaemonRunnerInvalidActionError(DaemonRunnerError, ValueError):
+ """ Raised when specified action for DaemonRunner is invalid. """
+
+ def _chain_from_context(self):
+ # This exception is normally not caused by another.
+ _chain_exception_from_existing_exception_context(self, as_cause=False)
+
+
+class DaemonRunnerStartFailureError(DaemonRunnerError, RuntimeError):
+ """ Raised when failure starting DaemonRunner. """
+
+
+class DaemonRunnerStopFailureError(DaemonRunnerError, RuntimeError):
+ """ Raised when failure stopping DaemonRunner. """
+
+
+class DaemonRunner:
+ """ Controller for a callable running in a separate background process.
+
+ The first command-line argument is the action to take:
+
+ * 'start': Become a daemon and call `app.run()`.
+ * 'stop': Exit the daemon process specified in the PID file.
+ * 'restart': Stop, then start.
+
+ """
+
+ __metaclass__ = type
+
+ start_message = "started with pid {pid:d}"
+
+ def __init__(self, app):
+ """ Set up the parameters of a new runner.
+
+ :param app: The application instance; see below.
+ :return: ``None``.
+
+ The `app` argument must have the following attributes:
+
+ * `stdin_path`, `stdout_path`, `stderr_path`: Filesystem paths
+ to open and replace the existing `sys.stdin`, `sys.stdout`,
+ `sys.stderr`.
+
+ * `pidfile_path`: Absolute filesystem path to a file that will
+ be used as the PID file for the daemon. If ``None``, no PID
+ file will be used.
+
+ * `pidfile_timeout`: Used as the default acquisition timeout
+ value supplied to the runner's PID lock file.
+
+ * `run`: Callable that will be invoked when the daemon is
+ started.
+
+ """
+ self.parse_args()
+ self.app = app
+ self.daemon_context = DaemonContext()
+ self.daemon_context.stdin = open(app.stdin_path, 'rt')
+ self.daemon_context.stdout = open(app.stdout_path, 'w+t')
+ self.daemon_context.stderr = open(
+ app.stderr_path, 'w+t', buffering=0)
+
+ self.pidfile = None
+ if app.pidfile_path is not None:
+ self.pidfile = make_pidlockfile(
+ app.pidfile_path, app.pidfile_timeout)
+ self.daemon_context.pidfile = self.pidfile
+
+ def _usage_exit(self, argv):
+ """ Emit a usage message, then exit.
+
+ :param argv: The command-line arguments used to invoke the
+ program, as a sequence of strings.
+ :return: ``None``.
+
+ """
+ progname = os.path.basename(argv[0])
+ usage_exit_code = 2
+ action_usage = "|".join(self.action_funcs.keys())
+ message = "usage: {progname} {usage}".format(
+ progname=progname, usage=action_usage)
+ emit_message(message)
+ sys.exit(usage_exit_code)
+
+ def parse_args(self, argv=None):
+ """ Parse command-line arguments.
+
+ :param argv: The command-line arguments used to invoke the
+ program, as a sequence of strings.
+
+ :return: ``None``.
+
+ The parser expects the first argument as the program name, the
+ second argument as the action to perform.
+
+ If the parser fails to parse the arguments, emit a usage
+ message and exit the program.
+
+ """
+ if argv is None:
+ argv = sys.argv
+
+ min_args = 2
+ if len(argv) < min_args:
+ self._usage_exit(argv)
+
+ self.action = unicode(argv[1])
+ if self.action not in self.action_funcs:
+ self._usage_exit(argv)
+
+ def _start(self):
+ """ Open the daemon context and run the application.
+
+ :return: ``None``.
+ :raises DaemonRunnerStartFailureError: If the PID file cannot
+ be locked by this process.
+
+ """
+ if is_pidfile_stale(self.pidfile):
+ self.pidfile.break_lock()
+
+ try:
+ self.daemon_context.open()
+ except lockfile.AlreadyLocked:
+ error = DaemonRunnerStartFailureError(
+ "PID file {pidfile.path!r} already locked".format(
+ pidfile=self.pidfile))
+ raise error
+
+ pid = os.getpid()
+ message = self.start_message.format(pid=pid)
+ emit_message(message)
+
+ self.app.run()
+
+ def _terminate_daemon_process(self):
+ """ Terminate the daemon process specified in the current PID file.
+
+ :return: ``None``.
+ :raises DaemonRunnerStopFailureError: If terminating the daemon
+ fails with an OS error.
+
+ """
+ pid = self.pidfile.read_pid()
+ try:
+ os.kill(pid, signal.SIGTERM)
+ except OSError as exc:
+ error = DaemonRunnerStopFailureError(
+ "Failed to terminate {pid:d}: {exc}".format(
+ pid=pid, exc=exc))
+ raise error
+
+ def _stop(self):
+ """ Exit the daemon process specified in the current PID file.
+
+ :return: ``None``.
+ :raises DaemonRunnerStopFailureError: If the PID file is not
+ already locked.
+
+ """
+ if not self.pidfile.is_locked():
+ error = DaemonRunnerStopFailureError(
+ "PID file {pidfile.path!r} not locked".format(
+ pidfile=self.pidfile))
+ raise error
+
+ if is_pidfile_stale(self.pidfile):
+ self.pidfile.break_lock()
+ else:
+ self._terminate_daemon_process()
+
+ def _restart(self):
+ """ Stop, then start.
+ """
+ self._stop()
+ self._start()
+
+ action_funcs = {
+ 'start': _start,
+ 'stop': _stop,
+ 'restart': _restart,
+ }
+
+ def _get_action_func(self):
+ """ Get the function for the specified action.
+
+ :return: The function object corresponding to the specified
+ action.
+ :raises DaemonRunnerInvalidActionError: if the action is
+ unknown.
+
+ The action is specified by the `action` attribute, which is set
+ during `parse_args`.
+
+ """
+ try:
+ func = self.action_funcs[self.action]
+ except KeyError:
+ error = DaemonRunnerInvalidActionError(
+ "Unknown action: {action!r}".format(
+ action=self.action))
+ raise error
+ return func
+
+ def do_action(self):
+ """ Perform the requested action.
+
+ :return: ``None``.
+
+ The action is specified by the `action` attribute, which is set
+ during `parse_args`.
+
+ """
+ func = self._get_action_func()
+ func(self)
+
+
+def emit_message(message, stream=None):
+ """ Emit a message to the specified stream (default `sys.stderr`). """
+ if stream is None:
+ stream = sys.stderr
+ stream.write("{message}\n".format(message=message))
+ stream.flush()
+
+
+def make_pidlockfile(path, acquire_timeout):
+ """ Make a PIDLockFile instance with the given filesystem path. """
+ if not isinstance(path, basestring):
+ error = ValueError("Not a filesystem path: {path!r}".format(
+ path=path))
+ raise error
+ if not os.path.isabs(path):
+ error = ValueError("Not an absolute path: {path!r}".format(
+ path=path))
+ raise error
+ lockfile = pidfile.TimeoutPIDLockFile(path, acquire_timeout)
+
+ return lockfile
+
+
+def is_pidfile_stale(pidfile):
+ """ Determine whether a PID file is stale.
+
+ :return: ``True`` iff the PID file is stale; otherwise ``False``.
+
+ The PID file is “stale” if its contents are valid but do not
+ match the PID of a currently-running process.
+
+ """
+ result = False
+
+ pidfile_pid = pidfile.read_pid()
+ if pidfile_pid is not None:
+ try:
+ os.kill(pidfile_pid, signal.SIG_DFL)
+ except ProcessLookupError:
+ # The specified PID does not exist.
+ result = True
+ except OSError as exc:
+ if exc.errno == errno.ESRCH:
+ # Under Python 2, process lookup error is an OSError.
+ # The specified PID does not exist.
+ result = True
+
+ return result
+
+
+# Local variables:
+# coding: utf-8
+# mode: python
+# End:
+# vim: fileencoding=utf-8 filetype=python :
diff --git a/scripts/automation/trex_control_plane/python_lib/python-daemon-2.0.5/doc/CREDITS b/scripts/automation/trex_control_plane/python_lib/python-daemon-2.0.5/doc/CREDITS
new file mode 100755
index 00000000..feb65d5e
--- /dev/null
+++ b/scripts/automation/trex_control_plane/python_lib/python-daemon-2.0.5/doc/CREDITS
@@ -0,0 +1,53 @@
+Credits for contributors to ‘python-daemon’
+###########################################
+
+:Updated: 2014-12-23
+
+The ‘python-daemon’ library is the work of many contributors.
+
+
+Primary developers
+==================
+
+The library has been maintained over the years by:
+
+* Ben Finney <ben+python@benfinney.id.au>
+* Robert Niederreiter
+* Jens Klein
+
+
+Precursors
+==========
+
+The library code base is inherited from prior work by:
+
+* Chad J. Schroeder
+* Clark Evans
+* Noah Spurrier
+* Jürgen Hermann
+
+
+Additional contributors
+=======================
+
+People who have also contributed substantial improvements:
+
+
+
+..
+ This is free software: you may copy, modify, and/or distribute this work
+ under the terms of the Apache License version 2.0 as published by the
+ Apache Software Foundation.
+ No warranty expressed or implied. See the file ‘LICENSE.ASF-2’ for details.
+
+..
+ Local variables:
+ coding: utf-8
+ mode: text
+ mode: rst
+ time-stamp-format: "%:y-%02m-%02d"
+ time-stamp-start: "^:Updated:[ ]+"
+ time-stamp-end: "$"
+ time-stamp-line-limit: 20
+ End:
+ vim: fileencoding=utf-8 filetype=rst :
diff --git a/scripts/automation/trex_control_plane/python_lib/python-daemon-2.0.5/doc/FAQ b/scripts/automation/trex_control_plane/python_lib/python-daemon-2.0.5/doc/FAQ
new file mode 100755
index 00000000..1fcc4658
--- /dev/null
+++ b/scripts/automation/trex_control_plane/python_lib/python-daemon-2.0.5/doc/FAQ
@@ -0,0 +1,156 @@
+‘python-daemon’ Frequently Asked Questions
+##########################################
+
+:Author: Ben Finney <ben+python@benfinney.id.au>
+:Updated: 2015-01-10
+
+.. contents::
+..
+ 1 General
+ 1.1 What is the purpose of the ‘python-daemon’ library?
+ 1.2 How can I run a service communicating with a separate daemon process?
+ 2 Security
+ 2.1 Why is the umask set to 0 by default?
+ 3 File descriptors
+ 3.1 Why does the output stop after opening the daemon context?
+ 3.2 How can I preserve a ‘logging’ handler's file descriptor?
+
+General
+=======
+
+What is the purpose of the ‘python-daemon’ library?
+---------------------------------------------------
+
+The ‘python-daemon’ library has a deliberately narrow focus: that of
+being a reference implementation for `PEP 3143`_, “Standard daemon
+process library”.
+
+.. _`PEP 3143`: http://www.python.org/dev/peps/pep-3143
+
+How can I run a service communicating with a separate daemon process?
+---------------------------------------------------------------------
+
+As specified in `PEP 3143`_, the ‘python-daemon’ library is
+specifically focussed on the goal of having the *current running
+program* become a well-behaved Unix daemon process. This leaves open
+the question of how this program is started, or about multiple
+programs interacting. As detailed in PEP 3143:
+
+ A daemon is not a service
+
+ There is a related concept in many systems, called a “service”. A
+ service differs from the model in this PEP, in that rather than
+ having the *current* program continue to run as a daemon process,
+ a service starts an *additional* process to run in the background,
+ and the current process communicates with that additional process
+ via some defined channels.
+
+ The Unix-style daemon model in this PEP can be used, among other
+ things, to implement the background-process part of a service; but
+ this PEP does not address the other aspects of setting up and
+ managing a service.
+
+A possible starting point for such a “service” model of execution is
+in a `message from 2009-01-30`_ to the ``python-ideas`` forum.
+
+.. _`message from 2009-01-30`: http://mail.python.org/pipermail/python-ideas/2009-January/002606.html
+
+
+Security
+========
+
+Why is the umask set to 0 by default?
+-------------------------------------
+
+A daemon should not rely on the parent process's umask value, which is
+beyond its control and may prevent creating a file with the required
+access mode. So when the daemon context opens, the umask is set to an
+explicit known value.
+
+If the conventional value of 0 is too open, consider setting a value
+such as 0o022, 0o027, 0o077, or another specific value. Otherwise,
+ensure the daemon creates every file with an explicit access mode for
+the purpose.
+
+
+File descriptors
+================
+
+Why does the output stop after opening the daemon context?
+----------------------------------------------------------
+
+The specified behaviour in `PEP 3143`_ includes the requirement to
+detach the process from the controlling terminal (to allow the process
+to continue to run as a daemon), and to close all file descriptors not
+known to be safe once detached (to ensure any files that continue to
+be used are under the control of the daemon process).
+
+If you want the process to generate output via the system streams
+‘sys.stdout’ and ‘sys.stderr’, set the ‘DaemonContext’'s ‘stdout’
+and/or ‘stderr’ options to a file-like object (e.g. the ‘stream’
+attribute of a ‘logging.Handler’ instance). If these objects have file
+descriptors, they will be preserved when the daemon context opens.
+
+How can I preserve a ‘logging’ handler's file descriptor?
+---------------------------------------------------------
+
+The ‘DaemonContext.open’ method conforms to `PEP 3143`_ by closing all
+open file descriptors, but excluding those files specified in the
+‘files_preserve’ option. This option is a list of files or file
+descriptors.
+
+The Python standard library ‘logging’ module provides log handlers
+that write to streams, including to files via the ‘StreamHandler’
+class and its sub-classes. The documentation (both the online `logging
+module documentation`_ and the docstrings for the code) makes no
+mention of a way to get at the stream associated with a handler
+object.
+
+However, looking at the source code for ‘StreamHandler’, in Python 2.5
+as ``/usr/lib/python2.5/logging/__init__.py``, shows a ‘stream’
+attribute that is bound to the stream object. The attribute is not
+marked private (i.e. it is not named with a leading underscore), so we
+can presume it is part of the public API.
+
+That attribute can then be used to specify that a logging handler's
+file descriptor should, when the ‘DaemonContext’ opens, be excluded
+from closure::
+
+ import logging
+ import daemon
+
+ # any subclass of StreamHandler should provide the ‘stream’ attribute.
+ lh = logging.handlers.TimedRotatingFileHandler(
+ "/var/log/foo.log",
+ # …
+ )
+
+ # … do some logging and other activity …
+
+ daemon_context = daemon.DaemonContext()
+ daemon_context.files_preserve = [lh.stream]
+
+ daemon_context.open()
+
+ # … continue as a daemon process …
+
+.. _`logging module documentation`: http://docs.python.org/library/logging
+
+
+..
+ This is free software: you may copy, modify, and/or distribute this work
+ under the terms of the Apache License version 2.0 as published by the
+ Apache Software Foundation.
+ No warranty expressed or implied. See the file ‘LICENSE.ASF-2’ for details.
+
+..
+ Local variables:
+ coding: utf-8
+ mode: text
+ mode: rst
+ time-stamp-format: "%:y-%02m-%02d"
+ time-stamp-start: "^:Updated:[ ]+"
+ time-stamp-end: "$"
+ time-stamp-line-limit: 20
+ End:
+ vim: fileencoding=utf-8 filetype=rst :
diff --git a/scripts/automation/trex_control_plane/python_lib/python-daemon-2.0.5/doc/TODO b/scripts/automation/trex_control_plane/python_lib/python-daemon-2.0.5/doc/TODO
new file mode 100755
index 00000000..81b41481
--- /dev/null
+++ b/scripts/automation/trex_control_plane/python_lib/python-daemon-2.0.5/doc/TODO
@@ -0,0 +1,95 @@
+TODO for ‘python-daemon’ library
+################################
+
+:Updated: 2015-01-10
+
+=======
+PENDING
+=======
+
+Tests
+=====
+
+Libraries
+=========
+
+* Evaluate switching to ‘flufl.lock’ library for PID lockfile behaviour
+ <http://pypi.python.org/pypi/flufl.lock>_.
+
+Features
+========
+
+Important
+---------
+
+Wishlist
+--------
+
+* Allow specification of a syslog service name to log as (default:
+ output to stdout and stderr, not syslog).
+
+Documentation
+=============
+
+Standard library inclusion
+==========================
+
+
+====
+DONE
+====
+
+* Convert to Python 2 and Python 3 compatible code base.
+
+* Work correctly with current ‘lockfile’ library (0.10 or later).
+
+* Write full unit tests for every new or changed behaviour at time of
+ commit.
+
+* Detect whether started by another process that handles
+ daemonisation, such as ‘inetd’, and behave appropriately.
+
+* Detach to new process and session group.
+
+* Allow specification of working directory (default: '/').
+
+* Allow specification of umask (default: 0o000).
+
+* Drop ‘suid’ and ‘sgid’ privileges if set.
+
+* Close all open file handles.
+
+* Re-open stdin, stdout, stderr to user-specified files.
+
+* Default re-open stdin, stdout, stderr to ‘/dev/null’.
+
+* Allow specification of a non-root user and group to drop to, if
+ started as ‘root’ (default: no change of user or group).
+
+* Implement context manager protocol for daemon context.
+
+* Allow specification of PID file with its own context manager
+ (default: no PID file).
+
+* Full docstrings for functions, classes, and modules.
+
+* PEP 3143 for adding this library to the Python standard library.
+
+
+..
+ This is free software: you may copy, modify, and/or distribute this work
+ under the terms of the Apache License version 2.0 as published by the
+ Apache Software Foundation.
+ No warranty expressed or implied. See the file ‘LICENSE.ASF-2’ for details.
+
+..
+ Local variables:
+ coding: utf-8
+ mode: text
+ mode: rst
+ time-stamp-format: "%:y-%02m-%02d"
+ time-stamp-start: "^:Updated:[ ]+"
+ time-stamp-end: "$"
+ time-stamp-line-limit: 20
+ End:
+ vim: fileencoding=utf-8 filetype=rst :
diff --git a/scripts/automation/trex_control_plane/python_lib/python-daemon-2.0.5/doc/hacking.txt b/scripts/automation/trex_control_plane/python_lib/python-daemon-2.0.5/doc/hacking.txt
new file mode 100755
index 00000000..9484dbd0
--- /dev/null
+++ b/scripts/automation/trex_control_plane/python_lib/python-daemon-2.0.5/doc/hacking.txt
@@ -0,0 +1,180 @@
+Developer's guide
+#################
+
+:Author: Ben Finney <ben+python@benfinney.id.au>
+:Updated: 2014-11-28
+
+
+Project layout
+==============
+
+::
+
+ ./ Top level of source tree
+ doc/ Project documentation
+ bin/ Executable programs
+ daemon/ Main ‘daemon’ library
+ test/ Unit tests
+
+
+Code style
+==========
+
+Python
+------
+
+All Python code should conform to the guidelines in PEP8_. In
+particular:
+
+* Indent each level using 4 spaces (``U+0020 SPACE``), and no TABs
+ (``U+0008 CHARACTER TABULATION``).
+
+* Name modules in lower case, ``multiplewordslikethis``.
+
+* Name classes in title case, ``MultipleWordsLikeThis``.
+
+* Name functions, instances and other variables in lower case,
+ ``multiple_words_like_this``.
+
+* Every module, class, and function has a Python doc string explaining
+ its purpose and API.
+
+ *Exception*: Functions whose purpose and API are mandated by Python
+ itself (dunder-named methods) do not need a doc string.
+
+* Doc strings are written as triple-quoted strings.
+
+ * The text of the doc string is marked up with reStructuredText.
+
+ * The first line is a one-line synopsis of the object. This summary
+ line appears on the same line as the opening triple-quote,
+ separated by a single space.
+
+ * Further lines, if needed, are separated from the first by one
+ blank line.
+
+ * The synopsis is separated by one space from the opening
+ triple-quote; this causes it to appear four columns past the
+ beginning of the line. All subsequent lines are indented at least
+ four columns also.
+
+ * The synopsis is followed by a reStructuredText field list. The
+ field names are: “param foo” for each parameter (where “foo” is
+ the parameter name), and “return” for the return value. The field
+ values describe the purpose of each.
+
+ * The closing triple-quote appears on a separate line.
+
+ Example::
+
+ def frobnicate(spam, algorithm="dv"):
+ """ Perform frobnication on ``spam``.
+
+ :param spam: A travortionate (as a sequence of strings).
+ :param algorithm: The name of the algorithm to use for
+ frobnicating the travortionate.
+ :return: The frobnicated travortionate, if it is
+ non-empty; otherwise None.
+
+ The frobnication is done by the Dietzel-Venkman algorithm,
+ and optimises for the case where ``spam`` is freebled and
+ agglutinative.
+
+ """
+ spagnify(spam)
+ # …
+
+* All ``import`` statements appear at the top of the module.
+
+* Each ``import`` statement imports a single module, or multiple names
+ from a single module.
+
+ Example::
+
+ import sys
+ import os
+ from spam import foo, bar, baz
+
+.. _PEP8: http://www.python.org/dev/peps/pep-0008/
+
+Additional style guidelines:
+
+* All text files (including program code) are encoded in UTF-8.
+
+* A page break (``U+000C FORM FEED``) whitespace character is used
+ within a module to break up semantically separate areas of the
+ module.
+
+* Editor hints for Emacs and Vim appear in a comment block at the
+ file's end::
+
+
+ # Local variables:
+ # coding: utf-8
+ # mode: python
+ # End:
+ # vim: fileencoding=utf-8 filetype=python :
+
+
+Unit tests
+==========
+
+All code should aim for 100% coverage by unit tests. New code, or
+changes to existing code, will only be considered for inclusion in the
+development tree when accompanied by corresponding additions or
+changes to the unit tests.
+
+Test-driven development
+-----------------------
+
+Where possible, practice test-driven development to implement program
+code.
+
+* During a development session, maintain a separate window or terminal
+ with the unit test suite for the project running continuously, or
+ automatically every few seconds.
+
+* Any time a test is failing, the only valid change is to make all
+ tests pass.
+
+* Develop new interface features (changes to the program unit's
+ behaviour) only when all current tests pass.
+
+* Refactor as needed, but only when all tests pass.
+
+ * Refactoring is any change to the code which does not alter its
+ interface or expected behaviour, such as performance
+ optimisations, readability improvements, modularisation
+ improvements etc.
+
+* Develop new or changed program behaviour by:
+
+ * *First* write a single, specific test case for that new behaviour,
+ then watch the test fail in the absence of the desired behaviour.
+
+ * Implement the minimum necessary change to satisfy the failing
+ test. Continue until all tests pass again, then stop making
+ functional changes.
+
+ * Once all tests (including the new test) pass, consider refactoring
+ the code and the tests immediately, then ensure all the tests pass
+ again after any changes.
+
+ * Iterate for each incremental change in interface or behaviour.
+
+Test-driven development is not absolutely necessary, but is the
+simplest, most direct way to generate the kind of program changes
+accompanied by unit tests that are necessary for inclusion in the
+project.
+
+
+..
+ Local variables:
+ coding: utf-8
+ mode: rst
+ time-stamp-format: "%:y-%02m-%02d"
+ time-stamp-start: "^:Updated:[ ]+"
+ time-stamp-end: "$"
+ time-stamp-line-limit: 20
+ End:
+ vim: fileencoding=utf-8 filetype=rst :
diff --git a/scripts/automation/trex_control_plane/python_lib/python-daemon-2.0.5/python_daemon.egg-info/PKG-INFO b/scripts/automation/trex_control_plane/python_lib/python-daemon-2.0.5/python_daemon.egg-info/PKG-INFO
new file mode 100755
index 00000000..fd81f509
--- /dev/null
+++ b/scripts/automation/trex_control_plane/python_lib/python-daemon-2.0.5/python_daemon.egg-info/PKG-INFO
@@ -0,0 +1,38 @@
+Metadata-Version: 1.1
+Name: python-daemon
+Version: 2.0.5
+Summary: Library to implement a well-behaved Unix daemon process.
+Home-page: https://alioth.debian.org/projects/python-daemon/
+Author: Ben Finney
+Author-email: ben+python@benfinney.id.au
+License: Apache-2
+Description: This library implements the well-behaved daemon specification of
+ :pep:`3143`, “Standard daemon process library”.
+
+ A well-behaved Unix daemon process is tricky to get right, but the
+ required steps are much the same for every daemon program. A
+ `DaemonContext` instance holds the behaviour and configured
+ process environment for the program; use the instance as a context
+ manager to enter a daemon state.
+
+ Simple example of usage::
+
+ import daemon
+
+ from spam import do_main_program
+
+ with daemon.DaemonContext():
+ do_main_program()
+
+ Customisation of the steps to become a daemon is available by
+ setting options on the `DaemonContext` instance; see the
+ documentation for that class for each option.
+Keywords: daemon,fork,unix
+Platform: UNKNOWN
+Classifier: Development Status :: 4 - Beta
+Classifier: License :: OSI Approved :: Apache Software License
+Classifier: Operating System :: POSIX
+Classifier: Programming Language :: Python :: 2.7
+Classifier: Programming Language :: Python :: 3
+Classifier: Intended Audience :: Developers
+Classifier: Topic :: Software Development :: Libraries :: Python Modules
diff --git a/scripts/automation/trex_control_plane/python_lib/python-daemon-2.0.5/python_daemon.egg-info/SOURCES.txt b/scripts/automation/trex_control_plane/python_lib/python-daemon-2.0.5/python_daemon.egg-info/SOURCES.txt
new file mode 100755
index 00000000..6e176719
--- /dev/null
+++ b/scripts/automation/trex_control_plane/python_lib/python-daemon-2.0.5/python_daemon.egg-info/SOURCES.txt
@@ -0,0 +1,30 @@
+ChangeLog
+LICENSE.ASF-2
+LICENSE.GPL-3
+MANIFEST.in
+setup.cfg
+setup.py
+test_version.py
+version.py
+daemon/__init__.py
+daemon/_metadata.py
+daemon/daemon.py
+daemon/pidfile.py
+daemon/runner.py
+doc/CREDITS
+doc/FAQ
+doc/TODO
+doc/hacking.txt
+python_daemon.egg-info/PKG-INFO
+python_daemon.egg-info/SOURCES.txt
+python_daemon.egg-info/dependency_links.txt
+python_daemon.egg-info/not-zip-safe
+python_daemon.egg-info/requires.txt
+python_daemon.egg-info/top_level.txt
+python_daemon.egg-info/version_info.json
+test/__init__.py
+test/scaffold.py
+test/test_daemon.py
+test/test_metadata.py
+test/test_pidfile.py
+test/test_runner.py \ No newline at end of file
diff --git a/scripts/automation/trex_control_plane/python_lib/python-daemon-2.0.5/python_daemon.egg-info/dependency_links.txt b/scripts/automation/trex_control_plane/python_lib/python-daemon-2.0.5/python_daemon.egg-info/dependency_links.txt
new file mode 100755
index 00000000..8b137891
--- /dev/null
+++ b/scripts/automation/trex_control_plane/python_lib/python-daemon-2.0.5/python_daemon.egg-info/dependency_links.txt
@@ -0,0 +1 @@
+
diff --git a/scripts/automation/trex_control_plane/python_lib/python-daemon-2.0.5/python_daemon.egg-info/not-zip-safe b/scripts/automation/trex_control_plane/python_lib/python-daemon-2.0.5/python_daemon.egg-info/not-zip-safe
new file mode 100755
index 00000000..8b137891
--- /dev/null
+++ b/scripts/automation/trex_control_plane/python_lib/python-daemon-2.0.5/python_daemon.egg-info/not-zip-safe
@@ -0,0 +1 @@
+
diff --git a/scripts/automation/trex_control_plane/python_lib/python-daemon-2.0.5/python_daemon.egg-info/requires.txt b/scripts/automation/trex_control_plane/python_lib/python-daemon-2.0.5/python_daemon.egg-info/requires.txt
new file mode 100755
index 00000000..d1496b02
--- /dev/null
+++ b/scripts/automation/trex_control_plane/python_lib/python-daemon-2.0.5/python_daemon.egg-info/requires.txt
@@ -0,0 +1,3 @@
+setuptools
+docutils
+lockfile >=0.10
diff --git a/scripts/automation/trex_control_plane/python_lib/python-daemon-2.0.5/python_daemon.egg-info/top_level.txt b/scripts/automation/trex_control_plane/python_lib/python-daemon-2.0.5/python_daemon.egg-info/top_level.txt
new file mode 100755
index 00000000..28e3ee0c
--- /dev/null
+++ b/scripts/automation/trex_control_plane/python_lib/python-daemon-2.0.5/python_daemon.egg-info/top_level.txt
@@ -0,0 +1 @@
+daemon
diff --git a/scripts/automation/trex_control_plane/python_lib/python-daemon-2.0.5/python_daemon.egg-info/version_info.json b/scripts/automation/trex_control_plane/python_lib/python-daemon-2.0.5/python_daemon.egg-info/version_info.json
new file mode 100755
index 00000000..bac1b84f
--- /dev/null
+++ b/scripts/automation/trex_control_plane/python_lib/python-daemon-2.0.5/python_daemon.egg-info/version_info.json
@@ -0,0 +1,6 @@
+{
+ "release_date": "2015-02-02",
+ "version": "2.0.5",
+ "maintainer": "Ben Finney <ben+python@benfinney.id.au>",
+ "body": "* Refine compatibility of exceptions for file operations.\n* Specify the text encoding when opening the changelog file.\n"
+} \ No newline at end of file
diff --git a/scripts/automation/trex_control_plane/python_lib/python-daemon-2.0.5/setup.cfg b/scripts/automation/trex_control_plane/python_lib/python-daemon-2.0.5/setup.cfg
new file mode 100755
index 00000000..9d3d2c02
--- /dev/null
+++ b/scripts/automation/trex_control_plane/python_lib/python-daemon-2.0.5/setup.cfg
@@ -0,0 +1,11 @@
+[aliases]
+distribute = register sdist bdist_wheel upload
+
+[bdist_wheel]
+universal = true
+
+[egg_info]
+tag_svn_revision = 0
+tag_date = 0
+tag_build =
+
diff --git a/scripts/automation/trex_control_plane/python_lib/python-daemon-2.0.5/setup.py b/scripts/automation/trex_control_plane/python_lib/python-daemon-2.0.5/setup.py
new file mode 100755
index 00000000..16a6a6a6
--- /dev/null
+++ b/scripts/automation/trex_control_plane/python_lib/python-daemon-2.0.5/setup.py
@@ -0,0 +1,106 @@
+# -*- coding: utf-8 -*-
+
+# setup.py
+# Part of ‘python-daemon’, an implementation of PEP 3143.
+#
+# Copyright © 2008–2015 Ben Finney <ben+python@benfinney.id.au>
+# Copyright © 2008 Robert Niederreiter, Jens Klein
+#
+# This is free software: you may copy, modify, and/or distribute this work
+# under the terms of the GNU General Public License as published by the
+# Free Software Foundation; version 3 of that license or any later version.
+# No warranty expressed or implied. See the file ‘LICENSE.GPL-3’ for details.
+
+""" Distribution setup for ‘python-daemon’ library. """
+
+from __future__ import (absolute_import, unicode_literals)
+
+import sys
+import os
+import os.path
+import pydoc
+import distutils.util
+
+from setuptools import (setup, find_packages)
+
+import version
+
+
+fromlist_expects_type = str
+if sys.version_info < (3, 0):
+ fromlist_expects_type = bytes
+
+
+main_module_name = 'daemon'
+main_module_fromlist = list(map(fromlist_expects_type, [
+ '_metadata']))
+main_module = __import__(
+ main_module_name,
+ level=0, fromlist=main_module_fromlist)
+metadata = main_module._metadata
+
+(synopsis, long_description) = pydoc.splitdoc(pydoc.getdoc(main_module))
+
+version_info = metadata.get_distribution_version_info()
+version_string = version_info['version']
+
+(maintainer_name, maintainer_email) = metadata.parse_person_field(
+ version_info['maintainer'])
+
+
+setup(
+ name=metadata.distribution_name,
+ version=version_string,
+ packages=find_packages(exclude=["test"]),
+ cmdclass={
+ "write_version_info": version.WriteVersionInfoCommand,
+ "egg_info": version.EggInfoCommand,
+ },
+
+ # Setuptools metadata.
+ maintainer=maintainer_name,
+ maintainer_email=maintainer_email,
+ zip_safe=False,
+ setup_requires=[
+ "docutils",
+ ],
+ test_suite="unittest2.collector",
+ tests_require=[
+ "unittest2 >=0.6",
+ "testtools",
+ "testscenarios >=0.4",
+ "mock >=1.0",
+ "docutils",
+ ],
+ install_requires=[
+ "setuptools",
+ "docutils",
+ "lockfile >=0.10",
+ ],
+
+ # PyPI metadata.
+ author=metadata.author_name,
+ author_email=metadata.author_email,
+ description=synopsis,
+ license=metadata.license,
+ keywords="daemon fork unix".split(),
+ url=metadata.url,
+ long_description=long_description,
+ classifiers=[
+ # Reference: http://pypi.python.org/pypi?%3Aaction=list_classifiers
+ "Development Status :: 4 - Beta",
+ "License :: OSI Approved :: Apache Software License",
+ "Operating System :: POSIX",
+ "Programming Language :: Python :: 2.7",
+ "Programming Language :: Python :: 3",
+ "Intended Audience :: Developers",
+ "Topic :: Software Development :: Libraries :: Python Modules",
+ ],
+ )
+
+
+# Local variables:
+# coding: utf-8
+# mode: python
+# End:
+# vim: fileencoding=utf-8 filetype=python :
diff --git a/scripts/automation/trex_control_plane/python_lib/python-daemon-2.0.5/test/__init__.py b/scripts/automation/trex_control_plane/python_lib/python-daemon-2.0.5/test/__init__.py
new file mode 100755
index 00000000..398519f1
--- /dev/null
+++ b/scripts/automation/trex_control_plane/python_lib/python-daemon-2.0.5/test/__init__.py
@@ -0,0 +1,23 @@
+# -*- coding: utf-8 -*-
+#
+# test/__init__.py
+# Part of ‘python-daemon’, an implementation of PEP 3143.
+#
+# Copyright © 2008–2015 Ben Finney <ben+python@benfinney.id.au>
+#
+# This is free software: you may copy, modify, and/or distribute this work
+# under the terms of the Apache License, version 2.0 as published by the
+# Apache Software Foundation.
+# No warranty expressed or implied. See the file ‘LICENSE.ASF-2’ for details.
+
+""" Unit test suite for ‘daemon’ package.
+ """
+
+from __future__ import (absolute_import, unicode_literals)
+
+
+# Local variables:
+# coding: utf-8
+# mode: python
+# End:
+# vim: fileencoding=utf-8 filetype=python :
diff --git a/scripts/automation/trex_control_plane/python_lib/python-daemon-2.0.5/test/scaffold.py b/scripts/automation/trex_control_plane/python_lib/python-daemon-2.0.5/test/scaffold.py
new file mode 100755
index 00000000..9a4f1150
--- /dev/null
+++ b/scripts/automation/trex_control_plane/python_lib/python-daemon-2.0.5/test/scaffold.py
@@ -0,0 +1,322 @@
+# -*- coding: utf-8 -*-
+
+# test/scaffold.py
+# Part of ‘python-daemon’, an implementation of PEP 3143.
+#
+# Copyright © 2007–2015 Ben Finney <ben+python@benfinney.id.au>
+#
+# This is free software: you may copy, modify, and/or distribute this work
+# under the terms of the Apache License, version 2.0 as published by the
+# Apache Software Foundation.
+# No warranty expressed or implied. See the file ‘LICENSE.ASF-2’ for details.
+
+""" Scaffolding for unit test modules.
+ """
+
+from __future__ import (absolute_import, unicode_literals)
+
+import unittest
+import doctest
+import logging
+import os
+import sys
+import operator
+import textwrap
+from copy import deepcopy
+import functools
+
+try:
+ # Python 2 has both ‘str’ (bytes) and ‘unicode’ (text).
+ basestring = basestring
+ unicode = unicode
+except NameError:
+ # Python 3 names the Unicode data type ‘str’.
+ basestring = str
+ unicode = str
+
+import testscenarios
+import testtools.testcase
+
+
+test_dir = os.path.dirname(os.path.abspath(__file__))
+parent_dir = os.path.dirname(test_dir)
+if not test_dir in sys.path:
+ sys.path.insert(1, test_dir)
+if not parent_dir in sys.path:
+ sys.path.insert(1, parent_dir)
+
+# Disable all but the most critical logging messages.
+logging.disable(logging.CRITICAL)
+
+
+def get_function_signature(func):
+ """ Get the function signature as a mapping of attributes.
+
+ :param func: The function object to interrogate.
+ :return: A mapping of the components of a function signature.
+
+ The signature is constructed as a mapping:
+
+ * 'name': The function's defined name.
+ * 'arg_count': The number of arguments expected by the function.
+ * 'arg_names': A sequence of the argument names, as strings.
+ * 'arg_defaults': A sequence of the default values for the arguments.
+ * 'va_args': The name bound to remaining positional arguments.
+ * 'va_kw_args': The name bound to remaining keyword arguments.
+
+ """
+ try:
+ # Python 3 function attributes.
+ func_code = func.__code__
+ func_defaults = func.__defaults__
+ except AttributeError:
+ # Python 2 function attributes.
+ func_code = func.func_code
+ func_defaults = func.func_defaults
+
+ arg_count = func_code.co_argcount
+ arg_names = func_code.co_varnames[:arg_count]
+
+ arg_defaults = {}
+ if func_defaults is not None:
+ arg_defaults = dict(
+ (name, value)
+ for (name, value) in
+ zip(arg_names[::-1], func_defaults[::-1]))
+
+ signature = {
+ 'name': func.__name__,
+ 'arg_count': arg_count,
+ 'arg_names': arg_names,
+ 'arg_defaults': arg_defaults,
+ }
+
+ non_pos_names = list(func_code.co_varnames[arg_count:])
+ COLLECTS_ARBITRARY_POSITIONAL_ARGS = 0x04
+ if func_code.co_flags & COLLECTS_ARBITRARY_POSITIONAL_ARGS:
+ signature['var_args'] = non_pos_names.pop(0)
+ COLLECTS_ARBITRARY_KEYWORD_ARGS = 0x08
+ if func_code.co_flags & COLLECTS_ARBITRARY_KEYWORD_ARGS:
+ signature['var_kw_args'] = non_pos_names.pop(0)
+
+ return signature
+
+
+def format_function_signature(func):
+ """ Format the function signature as printable text.
+
+ :param func: The function object to interrogate.
+ :return: A formatted text representation of the function signature.
+
+ The signature is rendered a text; for example::
+
+ foo(spam, eggs, ham=True, beans=None, *args, **kwargs)
+
+ """
+ signature = get_function_signature(func)
+
+ args_text = []
+ for arg_name in signature['arg_names']:
+ if arg_name in signature['arg_defaults']:
+ arg_text = "{name}={value!r}".format(
+ name=arg_name, value=signature['arg_defaults'][arg_name])
+ else:
+ arg_text = "{name}".format(
+ name=arg_name)
+ args_text.append(arg_text)
+ if 'var_args' in signature:
+ args_text.append("*{var_args}".format(signature))
+ if 'var_kw_args' in signature:
+ args_text.append("**{var_kw_args}".format(signature))
+ signature_args_text = ", ".join(args_text)
+
+ func_name = signature['name']
+ signature_text = "{name}({args})".format(
+ name=func_name, args=signature_args_text)
+
+ return signature_text
+
+
+class TestCase(testtools.testcase.TestCase):
+ """ Test case behaviour. """
+
+ def failUnlessOutputCheckerMatch(self, want, got, msg=None):
+ """ Fail unless the specified string matches the expected.
+
+ :param want: The desired output pattern.
+ :param got: The actual text to match.
+ :param msg: A message to prefix on the failure message.
+ :return: ``None``.
+ :raises self.failureException: If the text does not match.
+
+ Fail the test unless ``want`` matches ``got``, as determined by
+ a ``doctest.OutputChecker`` instance. This is not an equality
+ check, but a pattern match according to the ``OutputChecker``
+ rules.
+
+ """
+ checker = doctest.OutputChecker()
+ want = textwrap.dedent(want)
+ source = ""
+ example = doctest.Example(source, want)
+ got = textwrap.dedent(got)
+ checker_optionflags = functools.reduce(operator.or_, [
+ doctest.ELLIPSIS,
+ ])
+ if not checker.check_output(want, got, checker_optionflags):
+ if msg is None:
+ diff = checker.output_difference(
+ example, got, checker_optionflags)
+ msg = "\n".join([
+ "Output received did not match expected output",
+ "{diff}",
+ ]).format(
+ diff=diff)
+ raise self.failureException(msg)
+
+ assertOutputCheckerMatch = failUnlessOutputCheckerMatch
+
+ def failUnlessFunctionInTraceback(self, traceback, function, msg=None):
+ """ Fail if the function is not in the traceback.
+
+ :param traceback: The traceback object to interrogate.
+ :param function: The function object to match.
+ :param msg: A message to prefix on the failure message.
+ :return: ``None``.
+
+ :raises self.failureException: If the function is not in the
+ traceback.
+
+ Fail the test if the function ``function`` is not at any of the
+ levels in the traceback object ``traceback``.
+
+ """
+ func_in_traceback = False
+ expected_code = function.func_code
+ current_traceback = traceback
+ while current_traceback is not None:
+ if expected_code is current_traceback.tb_frame.f_code:
+ func_in_traceback = True
+ break
+ current_traceback = current_traceback.tb_next
+
+ if not func_in_traceback:
+ if msg is None:
+ msg = (
+ "Traceback did not lead to original function"
+ " {function}"
+ ).format(
+ function=function)
+ raise self.failureException(msg)
+
+ assertFunctionInTraceback = failUnlessFunctionInTraceback
+
+ def failUnlessFunctionSignatureMatch(self, first, second, msg=None):
+ """ Fail if the function signatures do not match.
+
+ :param first: The first function to compare.
+ :param second: The second function to compare.
+ :param msg: A message to prefix to the failure message.
+ :return: ``None``.
+
+ :raises self.failureException: If the function signatures do
+ not match.
+
+ Fail the test if the function signature does not match between
+ the ``first`` function and the ``second`` function.
+
+ The function signature includes:
+
+ * function name,
+
+ * count of named parameters,
+
+ * sequence of named parameters,
+
+ * default values of named parameters,
+
+ * collector for arbitrary positional arguments,
+
+ * collector for arbitrary keyword arguments.
+
+ """
+ first_signature = get_function_signature(first)
+ second_signature = get_function_signature(second)
+
+ if first_signature != second_signature:
+ if msg is None:
+ first_signature_text = format_function_signature(first)
+ second_signature_text = format_function_signature(second)
+ msg = (textwrap.dedent("""\
+ Function signatures do not match:
+ {first!r} != {second!r}
+ Expected:
+ {first_text}
+ Got:
+ {second_text}""")
+ ).format(
+ first=first_signature,
+ first_text=first_signature_text,
+ second=second_signature,
+ second_text=second_signature_text,
+ )
+ raise self.failureException(msg)
+
+ assertFunctionSignatureMatch = failUnlessFunctionSignatureMatch
+
+
+class TestCaseWithScenarios(testscenarios.WithScenarios, TestCase):
+ """ Test cases run per scenario. """
+
+
+class Exception_TestCase(TestCaseWithScenarios):
+ """ Test cases for exception classes. """
+
+ def test_exception_instance(self):
+ """ Exception instance should be created. """
+ self.assertIsNot(self.instance, None)
+
+ def test_exception_types(self):
+ """ Exception instance should match expected types. """
+ for match_type in self.types:
+ self.assertIsInstance(self.instance, match_type)
+
+
+def make_exception_scenarios(scenarios):
+ """ Make test scenarios for exception classes.
+
+ :param scenarios: Sequence of scenarios.
+ :return: List of scenarios with additional mapping entries.
+
+ Use this with `testscenarios` to adapt `Exception_TestCase`_ for
+ any exceptions that need testing.
+
+ Each scenario is a tuple (`name`, `map`) where `map` is a mapping
+ of attributes to be applied to each test case. Attributes map must
+ contain items for:
+
+ :key exc_type:
+ The exception type to be tested.
+ :key min_args:
+ The minimum argument count for the exception instance
+ initialiser.
+ :key types:
+ Sequence of types that should be superclasses of each
+ instance of the exception type.
+
+ """
+ updated_scenarios = deepcopy(scenarios)
+ for (name, scenario) in updated_scenarios:
+ args = (None,) * scenario['min_args']
+ scenario['args'] = args
+ instance = scenario['exc_type'](*args)
+ scenario['instance'] = instance
+
+ return updated_scenarios
+
+
+# Local variables:
+# coding: utf-8
+# mode: python
+# End:
+# vim: fileencoding=utf-8 filetype=python :
diff --git a/scripts/automation/trex_control_plane/python_lib/python-daemon-2.0.5/test/test_daemon.py b/scripts/automation/trex_control_plane/python_lib/python-daemon-2.0.5/test/test_daemon.py
new file mode 100755
index 00000000..a911858a
--- /dev/null
+++ b/scripts/automation/trex_control_plane/python_lib/python-daemon-2.0.5/test/test_daemon.py
@@ -0,0 +1,1744 @@
+# -*- coding: utf-8 -*-
+#
+# test/test_daemon.py
+# Part of ‘python-daemon’, an implementation of PEP 3143.
+#
+# Copyright © 2008–2015 Ben Finney <ben+python@benfinney.id.au>
+#
+# This is free software: you may copy, modify, and/or distribute this work
+# under the terms of the Apache License, version 2.0 as published by the
+# Apache Software Foundation.
+# No warranty expressed or implied. See the file ‘LICENSE.ASF-2’ for details.
+
+""" Unit test for ‘daemon’ module.
+ """
+
+from __future__ import (absolute_import, unicode_literals)
+
+import os
+import sys
+import tempfile
+import resource
+import errno
+import signal
+import socket
+from types import ModuleType
+import collections
+import functools
+try:
+ # Standard library of Python 2.7 and later.
+ from io import StringIO
+except ImportError:
+ # Standard library of Python 2.6 and earlier.
+ from StringIO import StringIO
+
+import mock
+
+from . import scaffold
+from .scaffold import (basestring, unicode)
+from .test_pidfile import (
+ FakeFileDescriptorStringIO,
+ setup_pidfile_fixtures,
+ )
+
+import daemon
+
+
+class ModuleExceptions_TestCase(scaffold.Exception_TestCase):
+ """ Test cases for module exception classes. """
+
+ scenarios = scaffold.make_exception_scenarios([
+ ('daemon.daemon.DaemonError', dict(
+ exc_type = daemon.daemon.DaemonError,
+ min_args = 1,
+ types = [Exception],
+ )),
+ ('daemon.daemon.DaemonOSEnvironmentError', dict(
+ exc_type = daemon.daemon.DaemonOSEnvironmentError,
+ min_args = 1,
+ types = [daemon.daemon.DaemonError, OSError],
+ )),
+ ('daemon.daemon.DaemonProcessDetachError', dict(
+ exc_type = daemon.daemon.DaemonProcessDetachError,
+ min_args = 1,
+ types = [daemon.daemon.DaemonError, OSError],
+ )),
+ ])
+
+
+def setup_daemon_context_fixtures(testcase):
+ """ Set up common test fixtures for DaemonContext test case.
+
+ :param testcase: A ``TestCase`` instance to decorate.
+ :return: ``None``.
+
+ Decorate the `testcase` with fixtures for tests involving
+ `DaemonContext`.
+
+ """
+ setup_streams_fixtures(testcase)
+
+ setup_pidfile_fixtures(testcase)
+
+ testcase.fake_pidfile_path = tempfile.mktemp()
+ testcase.mock_pidlockfile = mock.MagicMock()
+ testcase.mock_pidlockfile.path = testcase.fake_pidfile_path
+
+ testcase.daemon_context_args = dict(
+ stdin=testcase.stream_files_by_name['stdin'],
+ stdout=testcase.stream_files_by_name['stdout'],
+ stderr=testcase.stream_files_by_name['stderr'],
+ )
+ testcase.test_instance = daemon.DaemonContext(
+ **testcase.daemon_context_args)
+
+fake_default_signal_map = object()
+
+@mock.patch.object(
+ daemon.daemon, "is_detach_process_context_required",
+ new=(lambda: True))
+@mock.patch.object(
+ daemon.daemon, "make_default_signal_map",
+ new=(lambda: fake_default_signal_map))
+@mock.patch.object(os, "setgid", new=(lambda x: object()))
+@mock.patch.object(os, "setuid", new=(lambda x: object()))
+class DaemonContext_BaseTestCase(scaffold.TestCase):
+ """ Base class for DaemonContext test case classes. """
+
+ def setUp(self):
+ """ Set up test fixtures. """
+ super(DaemonContext_BaseTestCase, self).setUp()
+
+ setup_daemon_context_fixtures(self)
+
+
+class DaemonContext_TestCase(DaemonContext_BaseTestCase):
+ """ Test cases for DaemonContext class. """
+
+ def test_instantiate(self):
+ """ New instance of DaemonContext should be created. """
+ self.assertIsInstance(
+ self.test_instance, daemon.daemon.DaemonContext)
+
+ def test_minimum_zero_arguments(self):
+ """ Initialiser should not require any arguments. """
+ instance = daemon.daemon.DaemonContext()
+ self.assertIsNot(instance, None)
+
+ def test_has_specified_chroot_directory(self):
+ """ Should have specified chroot_directory option. """
+ args = dict(
+ chroot_directory=object(),
+ )
+ expected_directory = args['chroot_directory']
+ instance = daemon.daemon.DaemonContext(**args)
+ self.assertEqual(expected_directory, instance.chroot_directory)
+
+ def test_has_specified_working_directory(self):
+ """ Should have specified working_directory option. """
+ args = dict(
+ working_directory=object(),
+ )
+ expected_directory = args['working_directory']
+ instance = daemon.daemon.DaemonContext(**args)
+ self.assertEqual(expected_directory, instance.working_directory)
+
+ def test_has_default_working_directory(self):
+ """ Should have default working_directory option. """
+ args = dict()
+ expected_directory = "/"
+ instance = daemon.daemon.DaemonContext(**args)
+ self.assertEqual(expected_directory, instance.working_directory)
+
+ def test_has_specified_creation_mask(self):
+ """ Should have specified umask option. """
+ args = dict(
+ umask=object(),
+ )
+ expected_mask = args['umask']
+ instance = daemon.daemon.DaemonContext(**args)
+ self.assertEqual(expected_mask, instance.umask)
+
+ def test_has_default_creation_mask(self):
+ """ Should have default umask option. """
+ args = dict()
+ expected_mask = 0
+ instance = daemon.daemon.DaemonContext(**args)
+ self.assertEqual(expected_mask, instance.umask)
+
+ def test_has_specified_uid(self):
+ """ Should have specified uid option. """
+ args = dict(
+ uid=object(),
+ )
+ expected_id = args['uid']
+ instance = daemon.daemon.DaemonContext(**args)
+ self.assertEqual(expected_id, instance.uid)
+
+ def test_has_derived_uid(self):
+ """ Should have uid option derived from process. """
+ args = dict()
+ expected_id = os.getuid()
+ instance = daemon.daemon.DaemonContext(**args)
+ self.assertEqual(expected_id, instance.uid)
+
+ def test_has_specified_gid(self):
+ """ Should have specified gid option. """
+ args = dict(
+ gid=object(),
+ )
+ expected_id = args['gid']
+ instance = daemon.daemon.DaemonContext(**args)
+ self.assertEqual(expected_id, instance.gid)
+
+ def test_has_derived_gid(self):
+ """ Should have gid option derived from process. """
+ args = dict()
+ expected_id = os.getgid()
+ instance = daemon.daemon.DaemonContext(**args)
+ self.assertEqual(expected_id, instance.gid)
+
+ def test_has_specified_detach_process(self):
+ """ Should have specified detach_process option. """
+ args = dict(
+ detach_process=object(),
+ )
+ expected_value = args['detach_process']
+ instance = daemon.daemon.DaemonContext(**args)
+ self.assertEqual(expected_value, instance.detach_process)
+
+ def test_has_derived_detach_process(self):
+ """ Should have detach_process option derived from environment. """
+ args = dict()
+ func = daemon.daemon.is_detach_process_context_required
+ expected_value = func()
+ instance = daemon.daemon.DaemonContext(**args)
+ self.assertEqual(expected_value, instance.detach_process)
+
+ def test_has_specified_files_preserve(self):
+ """ Should have specified files_preserve option. """
+ args = dict(
+ files_preserve=object(),
+ )
+ expected_files_preserve = args['files_preserve']
+ instance = daemon.daemon.DaemonContext(**args)
+ self.assertEqual(expected_files_preserve, instance.files_preserve)
+
+ def test_has_specified_pidfile(self):
+ """ Should have the specified pidfile. """
+ args = dict(
+ pidfile=object(),
+ )
+ expected_pidfile = args['pidfile']
+ instance = daemon.daemon.DaemonContext(**args)
+ self.assertEqual(expected_pidfile, instance.pidfile)
+
+ def test_has_specified_stdin(self):
+ """ Should have specified stdin option. """
+ args = dict(
+ stdin=object(),
+ )
+ expected_file = args['stdin']
+ instance = daemon.daemon.DaemonContext(**args)
+ self.assertEqual(expected_file, instance.stdin)
+
+ def test_has_specified_stdout(self):
+ """ Should have specified stdout option. """
+ args = dict(
+ stdout=object(),
+ )
+ expected_file = args['stdout']
+ instance = daemon.daemon.DaemonContext(**args)
+ self.assertEqual(expected_file, instance.stdout)
+
+ def test_has_specified_stderr(self):
+ """ Should have specified stderr option. """
+ args = dict(
+ stderr=object(),
+ )
+ expected_file = args['stderr']
+ instance = daemon.daemon.DaemonContext(**args)
+ self.assertEqual(expected_file, instance.stderr)
+
+ def test_has_specified_signal_map(self):
+ """ Should have specified signal_map option. """
+ args = dict(
+ signal_map=object(),
+ )
+ expected_signal_map = args['signal_map']
+ instance = daemon.daemon.DaemonContext(**args)
+ self.assertEqual(expected_signal_map, instance.signal_map)
+
+ def test_has_derived_signal_map(self):
+ """ Should have signal_map option derived from system. """
+ args = dict()
+ expected_signal_map = daemon.daemon.make_default_signal_map()
+ instance = daemon.daemon.DaemonContext(**args)
+ self.assertEqual(expected_signal_map, instance.signal_map)
+
+
+class DaemonContext_is_open_TestCase(DaemonContext_BaseTestCase):
+ """ Test cases for DaemonContext.is_open property. """
+
+ def test_begin_false(self):
+ """ Initial value of is_open should be False. """
+ instance = self.test_instance
+ self.assertEqual(False, instance.is_open)
+
+ def test_write_fails(self):
+ """ Writing to is_open should fail. """
+ instance = self.test_instance
+ self.assertRaises(
+ AttributeError,
+ setattr, instance, 'is_open', object())
+
+
+class DaemonContext_open_TestCase(DaemonContext_BaseTestCase):
+ """ Test cases for DaemonContext.open method. """
+
+ def setUp(self):
+ """ Set up test fixtures. """
+ super(DaemonContext_open_TestCase, self).setUp()
+
+ self.test_instance._is_open = False
+
+ self.mock_module_daemon = mock.MagicMock()
+ daemon_func_patchers = dict(
+ (func_name, mock.patch.object(
+ daemon.daemon, func_name))
+ for func_name in [
+ "detach_process_context",
+ "change_working_directory",
+ "change_root_directory",
+ "change_file_creation_mask",
+ "change_process_owner",
+ "prevent_core_dump",
+ "close_all_open_files",
+ "redirect_stream",
+ "set_signal_handlers",
+ "register_atexit_function",
+ ])
+ for (func_name, patcher) in daemon_func_patchers.items():
+ mock_func = patcher.start()
+ self.addCleanup(patcher.stop)
+ self.mock_module_daemon.attach_mock(mock_func, func_name)
+
+ self.mock_module_daemon.attach_mock(mock.Mock(), 'DaemonContext')
+
+ self.test_files_preserve_fds = object()
+ self.test_signal_handler_map = object()
+ daemoncontext_method_return_values = {
+ '_get_exclude_file_descriptors':
+ self.test_files_preserve_fds,
+ '_make_signal_handler_map':
+ self.test_signal_handler_map,
+ }
+ daemoncontext_func_patchers = dict(
+ (func_name, mock.patch.object(
+ daemon.daemon.DaemonContext,
+ func_name,
+ return_value=return_value))
+ for (func_name, return_value) in
+ daemoncontext_method_return_values.items())
+ for (func_name, patcher) in daemoncontext_func_patchers.items():
+ mock_func = patcher.start()
+ self.addCleanup(patcher.stop)
+ self.mock_module_daemon.DaemonContext.attach_mock(
+ mock_func, func_name)
+
+ def test_performs_steps_in_expected_sequence(self):
+ """ Should perform daemonisation steps in expected sequence. """
+ instance = self.test_instance
+ instance.chroot_directory = object()
+ instance.detach_process = True
+ instance.pidfile = self.mock_pidlockfile
+ self.mock_module_daemon.attach_mock(
+ self.mock_pidlockfile, 'pidlockfile')
+ expected_calls = [
+ mock.call.change_root_directory(mock.ANY),
+ mock.call.prevent_core_dump(),
+ mock.call.change_file_creation_mask(mock.ANY),
+ mock.call.change_working_directory(mock.ANY),
+ mock.call.change_process_owner(mock.ANY, mock.ANY),
+ mock.call.detach_process_context(),
+ mock.call.DaemonContext._make_signal_handler_map(),
+ mock.call.set_signal_handlers(mock.ANY),
+ mock.call.DaemonContext._get_exclude_file_descriptors(),
+ mock.call.close_all_open_files(exclude=mock.ANY),
+ mock.call.redirect_stream(mock.ANY, mock.ANY),
+ mock.call.redirect_stream(mock.ANY, mock.ANY),
+ mock.call.redirect_stream(mock.ANY, mock.ANY),
+ mock.call.pidlockfile.__enter__(),
+ mock.call.register_atexit_function(mock.ANY),
+ ]
+ instance.open()
+ self.mock_module_daemon.assert_has_calls(expected_calls)
+
+ def test_returns_immediately_if_is_open(self):
+ """ Should return immediately if is_open property is true. """
+ instance = self.test_instance
+ instance._is_open = True
+ instance.open()
+ self.assertEqual(0, len(self.mock_module_daemon.mock_calls))
+
+ def test_changes_root_directory_to_chroot_directory(self):
+ """ Should change root directory to `chroot_directory` option. """
+ instance = self.test_instance
+ chroot_directory = object()
+ instance.chroot_directory = chroot_directory
+ instance.open()
+ self.mock_module_daemon.change_root_directory.assert_called_with(
+ chroot_directory)
+
+ def test_omits_chroot_if_no_chroot_directory(self):
+ """ Should omit changing root directory if no `chroot_directory`. """
+ instance = self.test_instance
+ instance.chroot_directory = None
+ instance.open()
+ self.assertFalse(self.mock_module_daemon.change_root_directory.called)
+
+ def test_prevents_core_dump(self):
+ """ Should request prevention of core dumps. """
+ instance = self.test_instance
+ instance.open()
+ self.mock_module_daemon.prevent_core_dump.assert_called_with()
+
+ def test_omits_prevent_core_dump_if_prevent_core_false(self):
+ """ Should omit preventing core dumps if `prevent_core` is false. """
+ instance = self.test_instance
+ instance.prevent_core = False
+ instance.open()
+ self.assertFalse(self.mock_module_daemon.prevent_core_dump.called)
+
+ def test_closes_open_files(self):
+ """ Should close all open files, excluding `files_preserve`. """
+ instance = self.test_instance
+ expected_exclude = self.test_files_preserve_fds
+ instance.open()
+ self.mock_module_daemon.close_all_open_files.assert_called_with(
+ exclude=expected_exclude)
+
+ def test_changes_directory_to_working_directory(self):
+ """ Should change current directory to `working_directory` option. """
+ instance = self.test_instance
+ working_directory = object()
+ instance.working_directory = working_directory
+ instance.open()
+ self.mock_module_daemon.change_working_directory.assert_called_with(
+ working_directory)
+
+ def test_changes_creation_mask_to_umask(self):
+ """ Should change file creation mask to `umask` option. """
+ instance = self.test_instance
+ umask = object()
+ instance.umask = umask
+ instance.open()
+ self.mock_module_daemon.change_file_creation_mask.assert_called_with(
+ umask)
+
+ def test_changes_owner_to_specified_uid_and_gid(self):
+ """ Should change process UID and GID to `uid` and `gid` options. """
+ instance = self.test_instance
+ uid = object()
+ gid = object()
+ instance.uid = uid
+ instance.gid = gid
+ instance.open()
+ self.mock_module_daemon.change_process_owner.assert_called_with(
+ uid, gid)
+
+ def test_detaches_process_context(self):
+ """ Should request detach of process context. """
+ instance = self.test_instance
+ instance.open()
+ self.mock_module_daemon.detach_process_context.assert_called_with()
+
+ def test_omits_process_detach_if_not_required(self):
+ """ Should omit detach of process context if not required. """
+ instance = self.test_instance
+ instance.detach_process = False
+ instance.open()
+ self.assertFalse(self.mock_module_daemon.detach_process_context.called)
+
+ def test_sets_signal_handlers_from_signal_map(self):
+ """ Should set signal handlers according to `signal_map`. """
+ instance = self.test_instance
+ instance.signal_map = object()
+ expected_signal_handler_map = self.test_signal_handler_map
+ instance.open()
+ self.mock_module_daemon.set_signal_handlers.assert_called_with(
+ expected_signal_handler_map)
+
+ def test_redirects_standard_streams(self):
+ """ Should request redirection of standard stream files. """
+ instance = self.test_instance
+ (system_stdin, system_stdout, system_stderr) = (
+ sys.stdin, sys.stdout, sys.stderr)
+ (target_stdin, target_stdout, target_stderr) = (
+ self.stream_files_by_name[name]
+ for name in ['stdin', 'stdout', 'stderr'])
+ expected_calls = [
+ mock.call(system_stdin, target_stdin),
+ mock.call(system_stdout, target_stdout),
+ mock.call(system_stderr, target_stderr),
+ ]
+ instance.open()
+ self.mock_module_daemon.redirect_stream.assert_has_calls(
+ expected_calls, any_order=True)
+
+ def test_enters_pidfile_context(self):
+ """ Should enter the PID file context manager. """
+ instance = self.test_instance
+ instance.pidfile = self.mock_pidlockfile
+ instance.open()
+ self.mock_pidlockfile.__enter__.assert_called_with()
+
+ def test_sets_is_open_true(self):
+ """ Should set the `is_open` property to True. """
+ instance = self.test_instance
+ instance.open()
+ self.assertEqual(True, instance.is_open)
+
+ def test_registers_close_method_for_atexit(self):
+ """ Should register the `close` method for atexit processing. """
+ instance = self.test_instance
+ close_method = instance.close
+ instance.open()
+ self.mock_module_daemon.register_atexit_function.assert_called_with(
+ close_method)
+
+
+class DaemonContext_close_TestCase(DaemonContext_BaseTestCase):
+ """ Test cases for DaemonContext.close method. """
+
+ def setUp(self):
+ """ Set up test fixtures. """
+ super(DaemonContext_close_TestCase, self).setUp()
+
+ self.test_instance._is_open = True
+
+ def test_returns_immediately_if_not_is_open(self):
+ """ Should return immediately if is_open property is false. """
+ instance = self.test_instance
+ instance._is_open = False
+ instance.pidfile = object()
+ instance.close()
+ self.assertFalse(self.mock_pidlockfile.__exit__.called)
+
+ def test_exits_pidfile_context(self):
+ """ Should exit the PID file context manager. """
+ instance = self.test_instance
+ instance.pidfile = self.mock_pidlockfile
+ instance.close()
+ self.mock_pidlockfile.__exit__.assert_called_with(None, None, None)
+
+ def test_returns_none(self):
+ """ Should return None. """
+ instance = self.test_instance
+ expected_result = None
+ result = instance.close()
+ self.assertIs(result, expected_result)
+
+ def test_sets_is_open_false(self):
+ """ Should set the `is_open` property to False. """
+ instance = self.test_instance
+ instance.close()
+ self.assertEqual(False, instance.is_open)
+
+
+@mock.patch.object(daemon.daemon.DaemonContext, "open")
+class DaemonContext_context_manager_enter_TestCase(DaemonContext_BaseTestCase):
+ """ Test cases for DaemonContext.__enter__ method. """
+
+ def test_opens_daemon_context(self, mock_func_daemoncontext_open):
+ """ Should open the DaemonContext. """
+ instance = self.test_instance
+ instance.__enter__()
+ mock_func_daemoncontext_open.assert_called_with()
+
+ def test_returns_self_instance(self, mock_func_daemoncontext_open):
+ """ Should return DaemonContext instance. """
+ instance = self.test_instance
+ expected_result = instance
+ result = instance.__enter__()
+ self.assertIs(result, expected_result)
+
+
+@mock.patch.object(daemon.daemon.DaemonContext, "close")
+class DaemonContext_context_manager_exit_TestCase(DaemonContext_BaseTestCase):
+ """ Test cases for DaemonContext.__exit__ method. """
+
+ def setUp(self):
+ """ Set up test fixtures. """
+ super(DaemonContext_context_manager_exit_TestCase, self).setUp()
+
+ self.test_args = dict(
+ exc_type=object(),
+ exc_value=object(),
+ traceback=object(),
+ )
+
+ def test_closes_daemon_context(self, mock_func_daemoncontext_close):
+ """ Should close the DaemonContext. """
+ instance = self.test_instance
+ args = self.test_args
+ instance.__exit__(**args)
+ mock_func_daemoncontext_close.assert_called_with()
+
+ def test_returns_none(self, mock_func_daemoncontext_close):
+ """ Should return None, indicating exception was not handled. """
+ instance = self.test_instance
+ args = self.test_args
+ expected_result = None
+ result = instance.__exit__(**args)
+ self.assertIs(result, expected_result)
+
+
+class DaemonContext_terminate_TestCase(DaemonContext_BaseTestCase):
+ """ Test cases for DaemonContext.terminate method. """
+
+ def setUp(self):
+ """ Set up test fixtures. """
+ super(DaemonContext_terminate_TestCase, self).setUp()
+
+ self.test_signal = signal.SIGTERM
+ self.test_frame = None
+ self.test_args = (self.test_signal, self.test_frame)
+
+ def test_raises_system_exit(self):
+ """ Should raise SystemExit. """
+ instance = self.test_instance
+ args = self.test_args
+ expected_exception = SystemExit
+ self.assertRaises(
+ expected_exception,
+ instance.terminate, *args)
+
+ def test_exception_message_contains_signal_number(self):
+ """ Should raise exception with a message containing signal number. """
+ instance = self.test_instance
+ args = self.test_args
+ signal_number = self.test_signal
+ expected_exception = SystemExit
+ exc = self.assertRaises(
+ expected_exception,
+ instance.terminate, *args)
+ self.assertIn(unicode(signal_number), unicode(exc))
+
+
+class DaemonContext_get_exclude_file_descriptors_TestCase(
+ DaemonContext_BaseTestCase):
+ """ Test cases for DaemonContext._get_exclude_file_descriptors function. """
+
+ def setUp(self):
+ """ Set up test fixtures. """
+ super(
+ DaemonContext_get_exclude_file_descriptors_TestCase,
+ self).setUp()
+
+ self.test_files = {
+ 2: FakeFileDescriptorStringIO(),
+ 5: 5,
+ 11: FakeFileDescriptorStringIO(),
+ 17: None,
+ 23: FakeFileDescriptorStringIO(),
+ 37: 37,
+ 42: FakeFileDescriptorStringIO(),
+ }
+ for (fileno, item) in self.test_files.items():
+ if hasattr(item, '_fileno'):
+ item._fileno = fileno
+ self.test_file_descriptors = set(
+ fd for (fd, item) in self.test_files.items()
+ if item is not None)
+ self.test_file_descriptors.update(
+ self.stream_files_by_name[name].fileno()
+ for name in ['stdin', 'stdout', 'stderr']
+ )
+
+ def test_returns_expected_file_descriptors(self):
+ """ Should return expected set of file descriptors. """
+ instance = self.test_instance
+ instance.files_preserve = list(self.test_files.values())
+ expected_result = self.test_file_descriptors
+ result = instance._get_exclude_file_descriptors()
+ self.assertEqual(expected_result, result)
+
+ def test_returns_stream_redirects_if_no_files_preserve(self):
+ """ Should return only stream redirects if no files_preserve. """
+ instance = self.test_instance
+ instance.files_preserve = None
+ expected_result = set(
+ stream.fileno()
+ for stream in self.stream_files_by_name.values())
+ result = instance._get_exclude_file_descriptors()
+ self.assertEqual(expected_result, result)
+
+ def test_returns_empty_set_if_no_files(self):
+ """ Should return empty set if no file options. """
+ instance = self.test_instance
+ for name in ['files_preserve', 'stdin', 'stdout', 'stderr']:
+ setattr(instance, name, None)
+ expected_result = set()
+ result = instance._get_exclude_file_descriptors()
+ self.assertEqual(expected_result, result)
+
+ def test_omits_non_file_streams(self):
+ """ Should omit non-file stream attributes. """
+ instance = self.test_instance
+ instance.files_preserve = list(self.test_files.values())
+ stream_files = self.stream_files_by_name
+ expected_result = self.test_file_descriptors.copy()
+ for (pseudo_stream_name, pseudo_stream) in stream_files.items():
+ test_non_file_object = object()
+ setattr(instance, pseudo_stream_name, test_non_file_object)
+ stream_fd = pseudo_stream.fileno()
+ expected_result.discard(stream_fd)
+ result = instance._get_exclude_file_descriptors()
+ self.assertEqual(expected_result, result)
+
+ def test_includes_verbatim_streams_without_file_descriptor(self):
+ """ Should include verbatim any stream without a file descriptor. """
+ instance = self.test_instance
+ instance.files_preserve = list(self.test_files.values())
+ stream_files = self.stream_files_by_name
+ mock_fileno_method = mock.MagicMock(
+ spec=sys.__stdin__.fileno,
+ side_effect=ValueError)
+ expected_result = self.test_file_descriptors.copy()
+ for (pseudo_stream_name, pseudo_stream) in stream_files.items():
+ test_non_fd_stream = StringIO()
+ if not hasattr(test_non_fd_stream, 'fileno'):
+ # Python < 3 StringIO doesn't have ‘fileno’ at all.
+ # Add a method which raises an exception.
+ test_non_fd_stream.fileno = mock_fileno_method
+ setattr(instance, pseudo_stream_name, test_non_fd_stream)
+ stream_fd = pseudo_stream.fileno()
+ expected_result.discard(stream_fd)
+ expected_result.add(test_non_fd_stream)
+ result = instance._get_exclude_file_descriptors()
+ self.assertEqual(expected_result, result)
+
+ def test_omits_none_streams(self):
+ """ Should omit any stream attribute which is None. """
+ instance = self.test_instance
+ instance.files_preserve = list(self.test_files.values())
+ stream_files = self.stream_files_by_name
+ expected_result = self.test_file_descriptors.copy()
+ for (pseudo_stream_name, pseudo_stream) in stream_files.items():
+ setattr(instance, pseudo_stream_name, None)
+ stream_fd = pseudo_stream.fileno()
+ expected_result.discard(stream_fd)
+ result = instance._get_exclude_file_descriptors()
+ self.assertEqual(expected_result, result)
+
+
+class DaemonContext_make_signal_handler_TestCase(DaemonContext_BaseTestCase):
+ """ Test cases for DaemonContext._make_signal_handler function. """
+
+ def test_returns_ignore_for_none(self):
+ """ Should return SIG_IGN when None handler specified. """
+ instance = self.test_instance
+ target = None
+ expected_result = signal.SIG_IGN
+ result = instance._make_signal_handler(target)
+ self.assertEqual(expected_result, result)
+
+ def test_returns_method_for_name(self):
+ """ Should return method of DaemonContext when name specified. """
+ instance = self.test_instance
+ target = 'terminate'
+ expected_result = instance.terminate
+ result = instance._make_signal_handler(target)
+ self.assertEqual(expected_result, result)
+
+ def test_raises_error_for_unknown_name(self):
+ """ Should raise AttributeError for unknown method name. """
+ instance = self.test_instance
+ target = 'b0gUs'
+ expected_error = AttributeError
+ self.assertRaises(
+ expected_error,
+ instance._make_signal_handler, target)
+
+ def test_returns_object_for_object(self):
+ """ Should return same object for any other object. """
+ instance = self.test_instance
+ target = object()
+ expected_result = target
+ result = instance._make_signal_handler(target)
+ self.assertEqual(expected_result, result)
+
+
+class DaemonContext_make_signal_handler_map_TestCase(
+ DaemonContext_BaseTestCase):
+ """ Test cases for DaemonContext._make_signal_handler_map function. """
+
+ def setUp(self):
+ """ Set up test fixtures. """
+ super(DaemonContext_make_signal_handler_map_TestCase, self).setUp()
+
+ self.test_instance.signal_map = {
+ object(): object(),
+ object(): object(),
+ object(): object(),
+ }
+
+ self.test_signal_handlers = dict(
+ (key, object())
+ for key in self.test_instance.signal_map.values())
+ self.test_signal_handler_map = dict(
+ (key, self.test_signal_handlers[target])
+ for (key, target) in self.test_instance.signal_map.items())
+
+ def fake_make_signal_handler(target):
+ return self.test_signal_handlers[target]
+
+ func_patcher_make_signal_handler = mock.patch.object(
+ daemon.daemon.DaemonContext, "_make_signal_handler",
+ side_effect=fake_make_signal_handler)
+ self.mock_func_make_signal_handler = (
+ func_patcher_make_signal_handler.start())
+ self.addCleanup(func_patcher_make_signal_handler.stop)
+
+ def test_returns_constructed_signal_handler_items(self):
+ """ Should return items as constructed via make_signal_handler. """
+ instance = self.test_instance
+ expected_result = self.test_signal_handler_map
+ result = instance._make_signal_handler_map()
+ self.assertEqual(expected_result, result)
+
+
+try:
+ FileNotFoundError
+except NameError:
+ # Python 2 uses IOError.
+ FileNotFoundError = functools.partial(IOError, errno.ENOENT)
+
+
+@mock.patch.object(os, "chdir")
+class change_working_directory_TestCase(scaffold.TestCase):
+ """ Test cases for change_working_directory function. """
+
+ def setUp(self):
+ """ Set up test fixtures. """
+ super(change_working_directory_TestCase, self).setUp()
+
+ self.test_directory = object()
+ self.test_args = dict(
+ directory=self.test_directory,
+ )
+
+ def test_changes_working_directory_to_specified_directory(
+ self,
+ mock_func_os_chdir):
+ """ Should change working directory to specified directory. """
+ args = self.test_args
+ directory = self.test_directory
+ daemon.daemon.change_working_directory(**args)
+ mock_func_os_chdir.assert_called_with(directory)
+
+ def test_raises_daemon_error_on_os_error(
+ self,
+ mock_func_os_chdir):
+ """ Should raise a DaemonError on receiving an IOError. """
+ args = self.test_args
+ test_error = FileNotFoundError("No such directory")
+ mock_func_os_chdir.side_effect = test_error
+ expected_error = daemon.daemon.DaemonOSEnvironmentError
+ exc = self.assertRaises(
+ expected_error,
+ daemon.daemon.change_working_directory, **args)
+ self.assertEqual(test_error, exc.__cause__)
+
+ def test_error_message_contains_original_error_message(
+ self,
+ mock_func_os_chdir):
+ """ Should raise a DaemonError with original message. """
+ args = self.test_args
+ test_error = FileNotFoundError("No such directory")
+ mock_func_os_chdir.side_effect = test_error
+ expected_error = daemon.daemon.DaemonOSEnvironmentError
+ exc = self.assertRaises(
+ expected_error,
+ daemon.daemon.change_working_directory, **args)
+ self.assertIn(unicode(test_error), unicode(exc))
+
+
+@mock.patch.object(os, "chroot")
+@mock.patch.object(os, "chdir")
+class change_root_directory_TestCase(scaffold.TestCase):
+ """ Test cases for change_root_directory function. """
+
+ def setUp(self):
+ """ Set up test fixtures. """
+ super(change_root_directory_TestCase, self).setUp()
+
+ self.test_directory = object()
+ self.test_args = dict(
+ directory=self.test_directory,
+ )
+
+ def test_changes_working_directory_to_specified_directory(
+ self,
+ mock_func_os_chdir, mock_func_os_chroot):
+ """ Should change working directory to specified directory. """
+ args = self.test_args
+ directory = self.test_directory
+ daemon.daemon.change_root_directory(**args)
+ mock_func_os_chdir.assert_called_with(directory)
+
+ def test_changes_root_directory_to_specified_directory(
+ self,
+ mock_func_os_chdir, mock_func_os_chroot):
+ """ Should change root directory to specified directory. """
+ args = self.test_args
+ directory = self.test_directory
+ daemon.daemon.change_root_directory(**args)
+ mock_func_os_chroot.assert_called_with(directory)
+
+ def test_raises_daemon_error_on_os_error_from_chdir(
+ self,
+ mock_func_os_chdir, mock_func_os_chroot):
+ """ Should raise a DaemonError on receiving an IOError from chdir. """
+ args = self.test_args
+ test_error = FileNotFoundError("No such directory")
+ mock_func_os_chdir.side_effect = test_error
+ expected_error = daemon.daemon.DaemonOSEnvironmentError
+ exc = self.assertRaises(
+ expected_error,
+ daemon.daemon.change_root_directory, **args)
+ self.assertEqual(test_error, exc.__cause__)
+
+ def test_raises_daemon_error_on_os_error_from_chroot(
+ self,
+ mock_func_os_chdir, mock_func_os_chroot):
+ """ Should raise a DaemonError on receiving an OSError from chroot. """
+ args = self.test_args
+ test_error = OSError(errno.EPERM, "No chroot for you!")
+ mock_func_os_chroot.side_effect = test_error
+ expected_error = daemon.daemon.DaemonOSEnvironmentError
+ exc = self.assertRaises(
+ expected_error,
+ daemon.daemon.change_root_directory, **args)
+ self.assertEqual(test_error, exc.__cause__)
+
+ def test_error_message_contains_original_error_message(
+ self,
+ mock_func_os_chdir, mock_func_os_chroot):
+ """ Should raise a DaemonError with original message. """
+ args = self.test_args
+ test_error = FileNotFoundError("No such directory")
+ mock_func_os_chdir.side_effect = test_error
+ expected_error = daemon.daemon.DaemonOSEnvironmentError
+ exc = self.assertRaises(
+ expected_error,
+ daemon.daemon.change_root_directory, **args)
+ self.assertIn(unicode(test_error), unicode(exc))
+
+
+@mock.patch.object(os, "umask")
+class change_file_creation_mask_TestCase(scaffold.TestCase):
+ """ Test cases for change_file_creation_mask function. """
+
+ def setUp(self):
+ """ Set up test fixtures. """
+ super(change_file_creation_mask_TestCase, self).setUp()
+
+ self.test_mask = object()
+ self.test_args = dict(
+ mask=self.test_mask,
+ )
+
+ def test_changes_umask_to_specified_mask(self, mock_func_os_umask):
+ """ Should change working directory to specified directory. """
+ args = self.test_args
+ mask = self.test_mask
+ daemon.daemon.change_file_creation_mask(**args)
+ mock_func_os_umask.assert_called_with(mask)
+
+ def test_raises_daemon_error_on_os_error_from_chdir(
+ self,
+ mock_func_os_umask):
+ """ Should raise a DaemonError on receiving an OSError from umask. """
+ args = self.test_args
+ test_error = OSError(errno.EINVAL, "Whatchoo talkin' 'bout?")
+ mock_func_os_umask.side_effect = test_error
+ expected_error = daemon.daemon.DaemonOSEnvironmentError
+ exc = self.assertRaises(
+ expected_error,
+ daemon.daemon.change_file_creation_mask, **args)
+ self.assertEqual(test_error, exc.__cause__)
+
+ def test_error_message_contains_original_error_message(
+ self,
+ mock_func_os_umask):
+ """ Should raise a DaemonError with original message. """
+ args = self.test_args
+ test_error = FileNotFoundError("No such directory")
+ mock_func_os_umask.side_effect = test_error
+ expected_error = daemon.daemon.DaemonOSEnvironmentError
+ exc = self.assertRaises(
+ expected_error,
+ daemon.daemon.change_file_creation_mask, **args)
+ self.assertIn(unicode(test_error), unicode(exc))
+
+
+@mock.patch.object(os, "setgid")
+@mock.patch.object(os, "setuid")
+class change_process_owner_TestCase(scaffold.TestCase):
+ """ Test cases for change_process_owner function. """
+
+ def setUp(self):
+ """ Set up test fixtures. """
+ super(change_process_owner_TestCase, self).setUp()
+
+ self.test_uid = object()
+ self.test_gid = object()
+ self.test_args = dict(
+ uid=self.test_uid,
+ gid=self.test_gid,
+ )
+
+ def test_changes_gid_and_uid_in_order(
+ self,
+ mock_func_os_setuid, mock_func_os_setgid):
+ """ Should change process GID and UID in correct order.
+
+ Since the process requires appropriate privilege to use
+ either of `setuid` or `setgid`, changing the UID must be
+ done last.
+
+ """
+ args = self.test_args
+ daemon.daemon.change_process_owner(**args)
+ mock_func_os_setuid.assert_called()
+ mock_func_os_setgid.assert_called()
+
+ def test_changes_group_id_to_gid(
+ self,
+ mock_func_os_setuid, mock_func_os_setgid):
+ """ Should change process GID to specified value. """
+ args = self.test_args
+ gid = self.test_gid
+ daemon.daemon.change_process_owner(**args)
+ mock_func_os_setgid.assert_called(gid)
+
+ def test_changes_user_id_to_uid(
+ self,
+ mock_func_os_setuid, mock_func_os_setgid):
+ """ Should change process UID to specified value. """
+ args = self.test_args
+ uid = self.test_uid
+ daemon.daemon.change_process_owner(**args)
+ mock_func_os_setuid.assert_called(uid)
+
+ def test_raises_daemon_error_on_os_error_from_setgid(
+ self,
+ mock_func_os_setuid, mock_func_os_setgid):
+ """ Should raise a DaemonError on receiving an OSError from setgid. """
+ args = self.test_args
+ test_error = OSError(errno.EPERM, "No switching for you!")
+ mock_func_os_setgid.side_effect = test_error
+ expected_error = daemon.daemon.DaemonOSEnvironmentError
+ exc = self.assertRaises(
+ expected_error,
+ daemon.daemon.change_process_owner, **args)
+ self.assertEqual(test_error, exc.__cause__)
+
+ def test_raises_daemon_error_on_os_error_from_setuid(
+ self,
+ mock_func_os_setuid, mock_func_os_setgid):
+ """ Should raise a DaemonError on receiving an OSError from setuid. """
+ args = self.test_args
+ test_error = OSError(errno.EPERM, "No switching for you!")
+ mock_func_os_setuid.side_effect = test_error
+ expected_error = daemon.daemon.DaemonOSEnvironmentError
+ exc = self.assertRaises(
+ expected_error,
+ daemon.daemon.change_process_owner, **args)
+ self.assertEqual(test_error, exc.__cause__)
+
+ def test_error_message_contains_original_error_message(
+ self,
+ mock_func_os_setuid, mock_func_os_setgid):
+ """ Should raise a DaemonError with original message. """
+ args = self.test_args
+ test_error = OSError(errno.EINVAL, "Whatchoo talkin' 'bout?")
+ mock_func_os_setuid.side_effect = test_error
+ expected_error = daemon.daemon.DaemonOSEnvironmentError
+ exc = self.assertRaises(
+ expected_error,
+ daemon.daemon.change_process_owner, **args)
+ self.assertIn(unicode(test_error), unicode(exc))
+
+
+RLimitResult = collections.namedtuple('RLimitResult', ['soft', 'hard'])
+
+fake_RLIMIT_CORE = object()
+
+@mock.patch.object(resource, "RLIMIT_CORE", new=fake_RLIMIT_CORE)
+@mock.patch.object(resource, "setrlimit", side_effect=(lambda x, y: None))
+@mock.patch.object(resource, "getrlimit", side_effect=(lambda x: None))
+class prevent_core_dump_TestCase(scaffold.TestCase):
+ """ Test cases for prevent_core_dump function. """
+
+ def setUp(self):
+ """ Set up test fixtures. """
+ super(prevent_core_dump_TestCase, self).setUp()
+
+ def test_sets_core_limit_to_zero(
+ self,
+ mock_func_resource_getrlimit, mock_func_resource_setrlimit):
+ """ Should set the RLIMIT_CORE resource to zero. """
+ expected_resource = fake_RLIMIT_CORE
+ expected_limit = tuple(RLimitResult(soft=0, hard=0))
+ daemon.daemon.prevent_core_dump()
+ mock_func_resource_getrlimit.assert_called_with(expected_resource)
+ mock_func_resource_setrlimit.assert_called_with(
+ expected_resource, expected_limit)
+
+ def test_raises_error_when_no_core_resource(
+ self,
+ mock_func_resource_getrlimit, mock_func_resource_setrlimit):
+ """ Should raise DaemonError if no RLIMIT_CORE resource. """
+ test_error = ValueError("Bogus platform doesn't have RLIMIT_CORE")
+ def fake_getrlimit(res):
+ if res == resource.RLIMIT_CORE:
+ raise test_error
+ else:
+ return None
+ mock_func_resource_getrlimit.side_effect = fake_getrlimit
+ expected_error = daemon.daemon.DaemonOSEnvironmentError
+ exc = self.assertRaises(
+ expected_error,
+ daemon.daemon.prevent_core_dump)
+ self.assertEqual(test_error, exc.__cause__)
+
+
+@mock.patch.object(os, "close")
+class close_file_descriptor_if_open_TestCase(scaffold.TestCase):
+ """ Test cases for close_file_descriptor_if_open function. """
+
+ def setUp(self):
+ """ Set up test fixtures. """
+ super(close_file_descriptor_if_open_TestCase, self).setUp()
+
+ self.fake_fd = 274
+
+ def test_requests_file_descriptor_close(self, mock_func_os_close):
+ """ Should request close of file descriptor. """
+ fd = self.fake_fd
+ daemon.daemon.close_file_descriptor_if_open(fd)
+ mock_func_os_close.assert_called_with(fd)
+
+ def test_ignores_badfd_error_on_close(self, mock_func_os_close):
+ """ Should ignore OSError EBADF when closing. """
+ fd = self.fake_fd
+ test_error = OSError(errno.EBADF, "Bad file descriptor")
+ def fake_os_close(fd):
+ raise test_error
+ mock_func_os_close.side_effect = fake_os_close
+ daemon.daemon.close_file_descriptor_if_open(fd)
+ mock_func_os_close.assert_called_with(fd)
+
+ def test_raises_error_if_oserror_on_close(self, mock_func_os_close):
+ """ Should raise DaemonError if an OSError occurs when closing. """
+ fd = self.fake_fd
+ test_error = OSError(object(), "Unexpected error")
+ def fake_os_close(fd):
+ raise test_error
+ mock_func_os_close.side_effect = fake_os_close
+ expected_error = daemon.daemon.DaemonOSEnvironmentError
+ exc = self.assertRaises(
+ expected_error,
+ daemon.daemon.close_file_descriptor_if_open, fd)
+ self.assertEqual(test_error, exc.__cause__)
+
+ def test_raises_error_if_ioerror_on_close(self, mock_func_os_close):
+ """ Should raise DaemonError if an IOError occurs when closing. """
+ fd = self.fake_fd
+ test_error = IOError(object(), "Unexpected error")
+ def fake_os_close(fd):
+ raise test_error
+ mock_func_os_close.side_effect = fake_os_close
+ expected_error = daemon.daemon.DaemonOSEnvironmentError
+ exc = self.assertRaises(
+ expected_error,
+ daemon.daemon.close_file_descriptor_if_open, fd)
+ self.assertEqual(test_error, exc.__cause__)
+
+
+class maxfd_TestCase(scaffold.TestCase):
+ """ Test cases for module MAXFD constant. """
+
+ def test_positive(self):
+ """ Should be a positive number. """
+ maxfd = daemon.daemon.MAXFD
+ self.assertTrue(maxfd > 0)
+
+ def test_integer(self):
+ """ Should be an integer. """
+ maxfd = daemon.daemon.MAXFD
+ self.assertEqual(int(maxfd), maxfd)
+
+ def test_reasonably_high(self):
+ """ Should be reasonably high for default open files limit.
+
+ If the system reports a limit of “infinity” on maximum
+ file descriptors, we still need a finite number in order
+ to close “all” of them. Ensure this is reasonably high
+ to catch most use cases.
+
+ """
+ expected_minimum = 2048
+ maxfd = daemon.daemon.MAXFD
+ self.assertTrue(
+ expected_minimum <= maxfd,
+ msg=(
+ "MAXFD should be at least {minimum!r}"
+ " (got {maxfd!r})".format(
+ minimum=expected_minimum, maxfd=maxfd)))
+
+
+fake_default_maxfd = 8
+fake_RLIMIT_NOFILE = object()
+fake_RLIM_INFINITY = object()
+fake_rlimit_nofile_large = 2468
+
+def fake_getrlimit_nofile_soft_infinity(resource):
+ result = RLimitResult(soft=fake_RLIM_INFINITY, hard=object())
+ if resource != fake_RLIMIT_NOFILE:
+ result = NotImplemented
+ return result
+
+def fake_getrlimit_nofile_hard_infinity(resource):
+ result = RLimitResult(soft=object(), hard=fake_RLIM_INFINITY)
+ if resource != fake_RLIMIT_NOFILE:
+ result = NotImplemented
+ return result
+
+def fake_getrlimit_nofile_hard_large(resource):
+ result = RLimitResult(soft=object(), hard=fake_rlimit_nofile_large)
+ if resource != fake_RLIMIT_NOFILE:
+ result = NotImplemented
+ return result
+
+@mock.patch.object(daemon.daemon, "MAXFD", new=fake_default_maxfd)
+@mock.patch.object(resource, "RLIMIT_NOFILE", new=fake_RLIMIT_NOFILE)
+@mock.patch.object(resource, "RLIM_INFINITY", new=fake_RLIM_INFINITY)
+@mock.patch.object(
+ resource, "getrlimit",
+ side_effect=fake_getrlimit_nofile_hard_large)
+class get_maximum_file_descriptors_TestCase(scaffold.TestCase):
+ """ Test cases for get_maximum_file_descriptors function. """
+
+ def test_returns_system_hard_limit(self, mock_func_resource_getrlimit):
+ """ Should return process hard limit on number of files. """
+ expected_result = fake_rlimit_nofile_large
+ result = daemon.daemon.get_maximum_file_descriptors()
+ self.assertEqual(expected_result, result)
+
+ def test_returns_module_default_if_hard_limit_infinity(
+ self, mock_func_resource_getrlimit):
+ """ Should return module MAXFD if hard limit is infinity. """
+ mock_func_resource_getrlimit.side_effect = (
+ fake_getrlimit_nofile_hard_infinity)
+ expected_result = fake_default_maxfd
+ result = daemon.daemon.get_maximum_file_descriptors()
+ self.assertEqual(expected_result, result)
+
+
+def fake_get_maximum_file_descriptors():
+ return fake_default_maxfd
+
+@mock.patch.object(resource, "RLIMIT_NOFILE", new=fake_RLIMIT_NOFILE)
+@mock.patch.object(resource, "RLIM_INFINITY", new=fake_RLIM_INFINITY)
+@mock.patch.object(
+ resource, "getrlimit",
+ new=fake_getrlimit_nofile_soft_infinity)
+@mock.patch.object(
+ daemon.daemon, "get_maximum_file_descriptors",
+ new=fake_get_maximum_file_descriptors)
+@mock.patch.object(daemon.daemon, "close_file_descriptor_if_open")
+class close_all_open_files_TestCase(scaffold.TestCase):
+ """ Test cases for close_all_open_files function. """
+
+ def test_requests_all_open_files_to_close(
+ self, mock_func_close_file_descriptor_if_open):
+ """ Should request close of all open files. """
+ expected_file_descriptors = range(fake_default_maxfd)
+ expected_calls = [
+ mock.call(fd) for fd in expected_file_descriptors]
+ daemon.daemon.close_all_open_files()
+ mock_func_close_file_descriptor_if_open.assert_has_calls(
+ expected_calls, any_order=True)
+
+ def test_requests_all_but_excluded_files_to_close(
+ self, mock_func_close_file_descriptor_if_open):
+ """ Should request close of all open files but those excluded. """
+ test_exclude = set([3, 7])
+ args = dict(
+ exclude=test_exclude,
+ )
+ expected_file_descriptors = set(
+ fd for fd in range(fake_default_maxfd)
+ if fd not in test_exclude)
+ expected_calls = [
+ mock.call(fd) for fd in expected_file_descriptors]
+ daemon.daemon.close_all_open_files(**args)
+ mock_func_close_file_descriptor_if_open.assert_has_calls(
+ expected_calls, any_order=True)
+
+
+class detach_process_context_TestCase(scaffold.TestCase):
+ """ Test cases for detach_process_context function. """
+
+ class FakeOSExit(SystemExit):
+ """ Fake exception raised for os._exit(). """
+
+ def setUp(self):
+ """ Set up test fixtures. """
+ super(detach_process_context_TestCase, self).setUp()
+
+ self.mock_module_os = mock.MagicMock(wraps=os)
+
+ fake_pids = [0, 0]
+ func_patcher_os_fork = mock.patch.object(
+ os, "fork",
+ side_effect=iter(fake_pids))
+ self.mock_func_os_fork = func_patcher_os_fork.start()
+ self.addCleanup(func_patcher_os_fork.stop)
+ self.mock_module_os.attach_mock(self.mock_func_os_fork, "fork")
+
+ func_patcher_os_setsid = mock.patch.object(os, "setsid")
+ self.mock_func_os_setsid = func_patcher_os_setsid.start()
+ self.addCleanup(func_patcher_os_setsid.stop)
+ self.mock_module_os.attach_mock(self.mock_func_os_setsid, "setsid")
+
+ def raise_os_exit(status=None):
+ raise self.FakeOSExit(status)
+
+ func_patcher_os_force_exit = mock.patch.object(
+ os, "_exit",
+ side_effect=raise_os_exit)
+ self.mock_func_os_force_exit = func_patcher_os_force_exit.start()
+ self.addCleanup(func_patcher_os_force_exit.stop)
+ self.mock_module_os.attach_mock(self.mock_func_os_force_exit, "_exit")
+
+ def test_parent_exits(self):
+ """ Parent process should exit. """
+ parent_pid = 23
+ self.mock_func_os_fork.side_effect = iter([parent_pid])
+ self.assertRaises(
+ self.FakeOSExit,
+ daemon.daemon.detach_process_context)
+ self.mock_module_os.assert_has_calls([
+ mock.call.fork(),
+ mock.call._exit(0),
+ ])
+
+ def test_first_fork_error_raises_error(self):
+ """ Error on first fork should raise DaemonProcessDetachError. """
+ fork_errno = 13
+ fork_strerror = "Bad stuff happened"
+ test_error = OSError(fork_errno, fork_strerror)
+ test_pids_iter = iter([test_error])
+
+ def fake_fork():
+ next_item = next(test_pids_iter)
+ if isinstance(next_item, Exception):
+ raise next_item
+ else:
+ return next_item
+
+ self.mock_func_os_fork.side_effect = fake_fork
+ exc = self.assertRaises(
+ daemon.daemon.DaemonProcessDetachError,
+ daemon.daemon.detach_process_context)
+ self.assertEqual(test_error, exc.__cause__)
+ self.mock_module_os.assert_has_calls([
+ mock.call.fork(),
+ ])
+
+ def test_child_starts_new_process_group(self):
+ """ Child should start new process group. """
+ daemon.daemon.detach_process_context()
+ self.mock_module_os.assert_has_calls([
+ mock.call.fork(),
+ mock.call.setsid(),
+ ])
+
+ def test_child_forks_next_parent_exits(self):
+ """ Child should fork, then exit if parent. """
+ fake_pids = [0, 42]
+ self.mock_func_os_fork.side_effect = iter(fake_pids)
+ self.assertRaises(
+ self.FakeOSExit,
+ daemon.daemon.detach_process_context)
+ self.mock_module_os.assert_has_calls([
+ mock.call.fork(),
+ mock.call.setsid(),
+ mock.call.fork(),
+ mock.call._exit(0),
+ ])
+
+ def test_second_fork_error_reports_to_stderr(self):
+ """ Error on second fork should cause report to stderr. """
+ fork_errno = 17
+ fork_strerror = "Nasty stuff happened"
+ test_error = OSError(fork_errno, fork_strerror)
+ test_pids_iter = iter([0, test_error])
+
+ def fake_fork():
+ next_item = next(test_pids_iter)
+ if isinstance(next_item, Exception):
+ raise next_item
+ else:
+ return next_item
+
+ self.mock_func_os_fork.side_effect = fake_fork
+ exc = self.assertRaises(
+ daemon.daemon.DaemonProcessDetachError,
+ daemon.daemon.detach_process_context)
+ self.assertEqual(test_error, exc.__cause__)
+ self.mock_module_os.assert_has_calls([
+ mock.call.fork(),
+ mock.call.setsid(),
+ mock.call.fork(),
+ ])
+
+ def test_child_forks_next_child_continues(self):
+ """ Child should fork, then continue if child. """
+ daemon.daemon.detach_process_context()
+ self.mock_module_os.assert_has_calls([
+ mock.call.fork(),
+ mock.call.setsid(),
+ mock.call.fork(),
+ ])
+
+
+@mock.patch("os.getppid", return_value=765)
+class is_process_started_by_init_TestCase(scaffold.TestCase):
+ """ Test cases for is_process_started_by_init function. """
+
+ def test_returns_false_by_default(self, mock_func_os_getppid):
+ """ Should return False under normal circumstances. """
+ expected_result = False
+ result = daemon.daemon.is_process_started_by_init()
+ self.assertIs(result, expected_result)
+
+ def test_returns_true_if_parent_process_is_init(
+ self, mock_func_os_getppid):
+ """ Should return True if parent process is `init`. """
+ init_pid = 1
+ mock_func_os_getppid.return_value = init_pid
+ expected_result = True
+ result = daemon.daemon.is_process_started_by_init()
+ self.assertIs(result, expected_result)
+
+
+class is_socket_TestCase(scaffold.TestCase):
+ """ Test cases for is_socket function. """
+
+ def setUp(self):
+ """ Set up test fixtures. """
+ super(is_socket_TestCase, self).setUp()
+
+ def fake_getsockopt(level, optname, buflen=None):
+ result = object()
+ if optname is socket.SO_TYPE:
+ result = socket.SOCK_RAW
+ return result
+
+ self.fake_socket_getsockopt_func = fake_getsockopt
+
+ self.fake_socket_error = socket.error(
+ errno.ENOTSOCK,
+ "Socket operation on non-socket")
+
+ self.mock_socket = mock.MagicMock(spec=socket.socket)
+ self.mock_socket.getsockopt.side_effect = self.fake_socket_error
+
+ def fake_socket_fromfd(fd, family, type, proto=None):
+ return self.mock_socket
+
+ func_patcher_socket_fromfd = mock.patch.object(
+ socket, "fromfd",
+ side_effect=fake_socket_fromfd)
+ func_patcher_socket_fromfd.start()
+ self.addCleanup(func_patcher_socket_fromfd.stop)
+
+ def test_returns_false_by_default(self):
+ """ Should return False under normal circumstances. """
+ test_fd = 23
+ expected_result = False
+ result = daemon.daemon.is_socket(test_fd)
+ self.assertIs(result, expected_result)
+
+ def test_returns_true_if_stdin_is_socket(self):
+ """ Should return True if `stdin` is a socket. """
+ test_fd = 23
+ getsockopt = self.mock_socket.getsockopt
+ getsockopt.side_effect = self.fake_socket_getsockopt_func
+ expected_result = True
+ result = daemon.daemon.is_socket(test_fd)
+ self.assertIs(result, expected_result)
+
+ def test_returns_false_if_stdin_socket_raises_error(self):
+ """ Should return True if `stdin` is a socket and raises error. """
+ test_fd = 23
+ getsockopt = self.mock_socket.getsockopt
+ getsockopt.side_effect = socket.error(
+ object(), "Weird socket stuff")
+ expected_result = True
+ result = daemon.daemon.is_socket(test_fd)
+ self.assertIs(result, expected_result)
+
+
+class is_process_started_by_superserver_TestCase(scaffold.TestCase):
+ """ Test cases for is_process_started_by_superserver function. """
+
+ def setUp(self):
+ """ Set up test fixtures. """
+ super(is_process_started_by_superserver_TestCase, self).setUp()
+
+ def fake_is_socket(fd):
+ if sys.__stdin__.fileno() == fd:
+ result = self.fake_stdin_is_socket_func()
+ else:
+ result = False
+ return result
+
+ self.fake_stdin_is_socket_func = (lambda: False)
+
+ func_patcher_is_socket = mock.patch.object(
+ daemon.daemon, "is_socket",
+ side_effect=fake_is_socket)
+ func_patcher_is_socket.start()
+ self.addCleanup(func_patcher_is_socket.stop)
+
+ def test_returns_false_by_default(self):
+ """ Should return False under normal circumstances. """
+ expected_result = False
+ result = daemon.daemon.is_process_started_by_superserver()
+ self.assertIs(result, expected_result)
+
+ def test_returns_true_if_stdin_is_socket(self):
+ """ Should return True if `stdin` is a socket. """
+ self.fake_stdin_is_socket_func = (lambda: True)
+ expected_result = True
+ result = daemon.daemon.is_process_started_by_superserver()
+ self.assertIs(result, expected_result)
+
+
+@mock.patch.object(
+ daemon.daemon, "is_process_started_by_superserver",
+ return_value=False)
+@mock.patch.object(
+ daemon.daemon, "is_process_started_by_init",
+ return_value=False)
+class is_detach_process_context_required_TestCase(scaffold.TestCase):
+ """ Test cases for is_detach_process_context_required function. """
+
+ def test_returns_true_by_default(
+ self,
+ mock_func_is_process_started_by_init,
+ mock_func_is_process_started_by_superserver):
+ """ Should return True under normal circumstances. """
+ expected_result = True
+ result = daemon.daemon.is_detach_process_context_required()
+ self.assertIs(result, expected_result)
+
+ def test_returns_false_if_started_by_init(
+ self,
+ mock_func_is_process_started_by_init,
+ mock_func_is_process_started_by_superserver):
+ """ Should return False if current process started by init. """
+ mock_func_is_process_started_by_init.return_value = True
+ expected_result = False
+ result = daemon.daemon.is_detach_process_context_required()
+ self.assertIs(result, expected_result)
+
+ def test_returns_true_if_started_by_superserver(
+ self,
+ mock_func_is_process_started_by_init,
+ mock_func_is_process_started_by_superserver):
+ """ Should return False if current process started by superserver. """
+ mock_func_is_process_started_by_superserver.return_value = True
+ expected_result = False
+ result = daemon.daemon.is_detach_process_context_required()
+ self.assertIs(result, expected_result)
+
+
+def setup_streams_fixtures(testcase):
+ """ Set up common test fixtures for standard streams. """
+ testcase.stream_file_paths = dict(
+ stdin=tempfile.mktemp(),
+ stdout=tempfile.mktemp(),
+ stderr=tempfile.mktemp(),
+ )
+
+ testcase.stream_files_by_name = dict(
+ (name, FakeFileDescriptorStringIO())
+ for name in ['stdin', 'stdout', 'stderr']
+ )
+
+ testcase.stream_files_by_path = dict(
+ (testcase.stream_file_paths[name],
+ testcase.stream_files_by_name[name])
+ for name in ['stdin', 'stdout', 'stderr']
+ )
+
+
+@mock.patch.object(os, "dup2")
+class redirect_stream_TestCase(scaffold.TestCase):
+ """ Test cases for redirect_stream function. """
+
+ def setUp(self):
+ """ Set up test fixtures. """
+ super(redirect_stream_TestCase, self).setUp()
+
+ self.test_system_stream = FakeFileDescriptorStringIO()
+ self.test_target_stream = FakeFileDescriptorStringIO()
+ self.test_null_file = FakeFileDescriptorStringIO()
+
+ def fake_os_open(path, flag, mode=None):
+ if path == os.devnull:
+ result = self.test_null_file.fileno()
+ else:
+ raise FileNotFoundError("No such file", path)
+ return result
+
+ func_patcher_os_open = mock.patch.object(
+ os, "open",
+ side_effect=fake_os_open)
+ self.mock_func_os_open = func_patcher_os_open.start()
+ self.addCleanup(func_patcher_os_open.stop)
+
+ def test_duplicates_target_file_descriptor(
+ self, mock_func_os_dup2):
+ """ Should duplicate file descriptor from target to system stream. """
+ system_stream = self.test_system_stream
+ system_fileno = system_stream.fileno()
+ target_stream = self.test_target_stream
+ target_fileno = target_stream.fileno()
+ daemon.daemon.redirect_stream(system_stream, target_stream)
+ mock_func_os_dup2.assert_called_with(target_fileno, system_fileno)
+
+ def test_duplicates_null_file_descriptor_by_default(
+ self, mock_func_os_dup2):
+ """ Should by default duplicate the null file to the system stream. """
+ system_stream = self.test_system_stream
+ system_fileno = system_stream.fileno()
+ target_stream = None
+ null_path = os.devnull
+ null_flag = os.O_RDWR
+ null_file = self.test_null_file
+ null_fileno = null_file.fileno()
+ daemon.daemon.redirect_stream(system_stream, target_stream)
+ self.mock_func_os_open.assert_called_with(null_path, null_flag)
+ mock_func_os_dup2.assert_called_with(null_fileno, system_fileno)
+
+
+class make_default_signal_map_TestCase(scaffold.TestCase):
+ """ Test cases for make_default_signal_map function. """
+
+ def setUp(self):
+ """ Set up test fixtures. """
+ super(make_default_signal_map_TestCase, self).setUp()
+
+ # Use whatever default string type this Python version needs.
+ signal_module_name = str('signal')
+ self.fake_signal_module = ModuleType(signal_module_name)
+
+ fake_signal_names = [
+ 'SIGHUP',
+ 'SIGCLD',
+ 'SIGSEGV',
+ 'SIGTSTP',
+ 'SIGTTIN',
+ 'SIGTTOU',
+ 'SIGTERM',
+ ]
+ for name in fake_signal_names:
+ setattr(self.fake_signal_module, name, object())
+
+ module_patcher_signal = mock.patch.object(
+ daemon.daemon, "signal", new=self.fake_signal_module)
+ module_patcher_signal.start()
+ self.addCleanup(module_patcher_signal.stop)
+
+ default_signal_map_by_name = {
+ 'SIGTSTP': None,
+ 'SIGTTIN': None,
+ 'SIGTTOU': None,
+ 'SIGTERM': 'terminate',
+ }
+ self.default_signal_map = dict(
+ (getattr(self.fake_signal_module, name), target)
+ for (name, target) in default_signal_map_by_name.items())
+
+ def test_returns_constructed_signal_map(self):
+ """ Should return map per default. """
+ expected_result = self.default_signal_map
+ result = daemon.daemon.make_default_signal_map()
+ self.assertEqual(expected_result, result)
+
+ def test_returns_signal_map_with_only_ids_in_signal_module(self):
+ """ Should return map with only signals in the `signal` module.
+
+ The `signal` module is documented to only define those
+ signals which exist on the running system. Therefore the
+ default map should not contain any signals which are not
+ defined in the `signal` module.
+
+ """
+ del(self.default_signal_map[self.fake_signal_module.SIGTTOU])
+ del(self.fake_signal_module.SIGTTOU)
+ expected_result = self.default_signal_map
+ result = daemon.daemon.make_default_signal_map()
+ self.assertEqual(expected_result, result)
+
+
+@mock.patch.object(daemon.daemon.signal, "signal")
+class set_signal_handlers_TestCase(scaffold.TestCase):
+ """ Test cases for set_signal_handlers function. """
+
+ def setUp(self):
+ """ Set up test fixtures. """
+ super(set_signal_handlers_TestCase, self).setUp()
+
+ self.signal_handler_map = {
+ signal.SIGQUIT: object(),
+ signal.SIGSEGV: object(),
+ signal.SIGINT: object(),
+ }
+
+ def test_sets_signal_handler_for_each_item(self, mock_func_signal_signal):
+ """ Should set signal handler for each item in map. """
+ signal_handler_map = self.signal_handler_map
+ expected_calls = [
+ mock.call(signal_number, handler)
+ for (signal_number, handler) in signal_handler_map.items()]
+ daemon.daemon.set_signal_handlers(signal_handler_map)
+ self.assertEquals(expected_calls, mock_func_signal_signal.mock_calls)
+
+
+@mock.patch.object(daemon.daemon.atexit, "register")
+class register_atexit_function_TestCase(scaffold.TestCase):
+ """ Test cases for register_atexit_function function. """
+
+ def test_registers_function_for_atexit_processing(
+ self, mock_func_atexit_register):
+ """ Should register specified function for atexit processing. """
+ func = object()
+ daemon.daemon.register_atexit_function(func)
+ mock_func_atexit_register.assert_called_with(func)
+
+
+# Local variables:
+# coding: utf-8
+# mode: python
+# End:
+# vim: fileencoding=utf-8 filetype=python :
diff --git a/scripts/automation/trex_control_plane/python_lib/python-daemon-2.0.5/test/test_metadata.py b/scripts/automation/trex_control_plane/python_lib/python-daemon-2.0.5/test/test_metadata.py
new file mode 100755
index 00000000..692753f4
--- /dev/null
+++ b/scripts/automation/trex_control_plane/python_lib/python-daemon-2.0.5/test/test_metadata.py
@@ -0,0 +1,380 @@
+# -*- coding: utf-8 -*-
+#
+# test/test_metadata.py
+# Part of ‘python-daemon’, an implementation of PEP 3143.
+#
+# Copyright © 2008–2015 Ben Finney <ben+python@benfinney.id.au>
+#
+# This is free software: you may copy, modify, and/or distribute this work
+# under the terms of the Apache License, version 2.0 as published by the
+# Apache Software Foundation.
+# No warranty expressed or implied. See the file ‘LICENSE.ASF-2’ for details.
+
+""" Unit test for ‘_metadata’ private module.
+ """
+
+from __future__ import (absolute_import, unicode_literals)
+
+import sys
+import errno
+import re
+try:
+ # Python 3 standard library.
+ import urllib.parse as urlparse
+except ImportError:
+ # Python 2 standard library.
+ import urlparse
+import functools
+import collections
+import json
+
+import pkg_resources
+import mock
+import testtools.helpers
+import testtools.matchers
+import testscenarios
+
+from . import scaffold
+from .scaffold import (basestring, unicode)
+
+import daemon._metadata as metadata
+
+
+class HasAttribute(testtools.matchers.Matcher):
+ """ A matcher to assert an object has a named attribute. """
+
+ def __init__(self, name):
+ self.attribute_name = name
+
+ def match(self, instance):
+ """ Assert the object `instance` has an attribute named `name`. """
+ result = None
+ if not testtools.helpers.safe_hasattr(instance, self.attribute_name):
+ result = AttributeNotFoundMismatch(instance, self.attribute_name)
+ return result
+
+
+class AttributeNotFoundMismatch(testtools.matchers.Mismatch):
+ """ The specified instance does not have the named attribute. """
+
+ def __init__(self, instance, name):
+ self.instance = instance
+ self.attribute_name = name
+
+ def describe(self):
+ """ Emit a text description of this mismatch. """
+ text = (
+ "{instance!r}"
+ " has no attribute named {name!r}").format(
+ instance=self.instance, name=self.attribute_name)
+ return text
+
+
+class metadata_value_TestCase(scaffold.TestCaseWithScenarios):
+ """ Test cases for metadata module values. """
+
+ expected_str_attributes = set([
+ 'version_installed',
+ 'author',
+ 'copyright',
+ 'license',
+ 'url',
+ ])
+
+ scenarios = [
+ (name, {'attribute_name': name})
+ for name in expected_str_attributes]
+ for (name, params) in scenarios:
+ if name == 'version_installed':
+ # No duck typing, this attribute might be None.
+ params['ducktype_attribute_name'] = NotImplemented
+ continue
+ # Expect an attribute of ‘str’ to test this value.
+ params['ducktype_attribute_name'] = 'isdigit'
+
+ def test_module_has_attribute(self):
+ """ Metadata should have expected value as a module attribute. """
+ self.assertThat(
+ metadata, HasAttribute(self.attribute_name))
+
+ def test_module_attribute_has_duck_type(self):
+ """ Metadata value should have expected duck-typing attribute. """
+ if self.ducktype_attribute_name == NotImplemented:
+ self.skipTest("Can't assert this attribute's type")
+ instance = getattr(metadata, self.attribute_name)
+ self.assertThat(
+ instance, HasAttribute(self.ducktype_attribute_name))
+
+
+class parse_person_field_TestCase(
+ testscenarios.WithScenarios, testtools.TestCase):
+ """ Test cases for ‘get_latest_version’ function. """
+
+ scenarios = [
+ ('simple', {
+ 'test_person': "Foo Bar <foo.bar@example.com>",
+ 'expected_result': ("Foo Bar", "foo.bar@example.com"),
+ }),
+ ('empty', {
+ 'test_person': "",
+ 'expected_result': (None, None),
+ }),
+ ('none', {
+ 'test_person': None,
+ 'expected_error': TypeError,
+ }),
+ ('no email', {
+ 'test_person': "Foo Bar",
+ 'expected_result': ("Foo Bar", None),
+ }),
+ ]
+
+ def test_returns_expected_result(self):
+ """ Should return expected result. """
+ if hasattr(self, 'expected_error'):
+ self.assertRaises(
+ self.expected_error,
+ metadata.parse_person_field, self.test_person)
+ else:
+ result = metadata.parse_person_field(self.test_person)
+ self.assertEqual(self.expected_result, result)
+
+
+class YearRange_TestCase(scaffold.TestCaseWithScenarios):
+ """ Test cases for ‘YearRange’ class. """
+
+ scenarios = [
+ ('simple', {
+ 'begin_year': 1970,
+ 'end_year': 1979,
+ 'expected_text': "1970–1979",
+ }),
+ ('same year', {
+ 'begin_year': 1970,
+ 'end_year': 1970,
+ 'expected_text': "1970",
+ }),
+ ('no end year', {
+ 'begin_year': 1970,
+ 'end_year': None,
+ 'expected_text': "1970",
+ }),
+ ]
+
+ def setUp(self):
+ """ Set up test fixtures. """
+ super(YearRange_TestCase, self).setUp()
+
+ self.test_instance = metadata.YearRange(
+ self.begin_year, self.end_year)
+
+ def test_text_representation_as_expected(self):
+ """ Text representation should be as expected. """
+ result = unicode(self.test_instance)
+ self.assertEqual(result, self.expected_text)
+
+
+FakeYearRange = collections.namedtuple('FakeYearRange', ['begin', 'end'])
+
+@mock.patch.object(metadata, 'YearRange', new=FakeYearRange)
+class make_year_range_TestCase(scaffold.TestCaseWithScenarios):
+ """ Test cases for ‘make_year_range’ function. """
+
+ scenarios = [
+ ('simple', {
+ 'begin_year': "1970",
+ 'end_date': "1979-01-01",
+ 'expected_range': FakeYearRange(begin=1970, end=1979),
+ }),
+ ('same year', {
+ 'begin_year': "1970",
+ 'end_date': "1970-01-01",
+ 'expected_range': FakeYearRange(begin=1970, end=1970),
+ }),
+ ('no end year', {
+ 'begin_year': "1970",
+ 'end_date': None,
+ 'expected_range': FakeYearRange(begin=1970, end=None),
+ }),
+ ('end date UNKNOWN token', {
+ 'begin_year': "1970",
+ 'end_date': "UNKNOWN",
+ 'expected_range': FakeYearRange(begin=1970, end=None),
+ }),
+ ('end date FUTURE token', {
+ 'begin_year': "1970",
+ 'end_date': "FUTURE",
+ 'expected_range': FakeYearRange(begin=1970, end=None),
+ }),
+ ]
+
+ def test_result_matches_expected_range(self):
+ """ Result should match expected YearRange. """
+ result = metadata.make_year_range(self.begin_year, self.end_date)
+ self.assertEqual(result, self.expected_range)
+
+
+class metadata_content_TestCase(scaffold.TestCase):
+ """ Test cases for content of metadata. """
+
+ def test_copyright_formatted_correctly(self):
+ """ Copyright statement should be formatted correctly. """
+ regex_pattern = (
+ "Copyright © "
+ "\d{4}" # four-digit year
+ "(?:–\d{4})?" # optional range dash and ending four-digit year
+ )
+ regex_flags = re.UNICODE
+ self.assertThat(
+ metadata.copyright,
+ testtools.matchers.MatchesRegex(regex_pattern, regex_flags))
+
+ def test_author_formatted_correctly(self):
+ """ Author information should be formatted correctly. """
+ regex_pattern = (
+ ".+ " # name
+ "<[^>]+>" # email address, in angle brackets
+ )
+ regex_flags = re.UNICODE
+ self.assertThat(
+ metadata.author,
+ testtools.matchers.MatchesRegex(regex_pattern, regex_flags))
+
+ def test_copyright_contains_author(self):
+ """ Copyright information should contain author information. """
+ self.assertThat(
+ metadata.copyright,
+ testtools.matchers.Contains(metadata.author))
+
+ def test_url_parses_correctly(self):
+ """ Homepage URL should parse correctly. """
+ result = urlparse.urlparse(metadata.url)
+ self.assertIsInstance(
+ result, urlparse.ParseResult,
+ "URL value {url!r} did not parse correctly".format(
+ url=metadata.url))
+
+
+try:
+ FileNotFoundError
+except NameError:
+ # Python 2 uses IOError.
+ FileNotFoundError = functools.partial(IOError, errno.ENOENT)
+
+version_info_filename = "version_info.json"
+
+def fake_func_has_metadata(testcase, resource_name):
+ """ Fake the behaviour of ‘pkg_resources.Distribution.has_metadata’. """
+ if (
+ resource_name != testcase.expected_resource_name
+ or not hasattr(testcase, 'test_version_info')):
+ return False
+ return True
+
+
+def fake_func_get_metadata(testcase, resource_name):
+ """ Fake the behaviour of ‘pkg_resources.Distribution.get_metadata’. """
+ if not fake_func_has_metadata(testcase, resource_name):
+ error = FileNotFoundError(resource_name)
+ raise error
+ content = testcase.test_version_info
+ return content
+
+
+def fake_func_get_distribution(testcase, distribution_name):
+ """ Fake the behaviour of ‘pkg_resources.get_distribution’. """
+ if distribution_name != metadata.distribution_name:
+ raise pkg_resources.DistributionNotFound
+ if hasattr(testcase, 'get_distribution_error'):
+ raise testcase.get_distribution_error
+ mock_distribution = testcase.mock_distribution
+ mock_distribution.has_metadata.side_effect = functools.partial(
+ fake_func_has_metadata, testcase)
+ mock_distribution.get_metadata.side_effect = functools.partial(
+ fake_func_get_metadata, testcase)
+ return mock_distribution
+
+
+@mock.patch.object(metadata, 'distribution_name', new="mock-dist")
+class get_distribution_version_info_TestCase(scaffold.TestCaseWithScenarios):
+ """ Test cases for ‘get_distribution_version_info’ function. """
+
+ default_version_info = {
+ 'release_date': "UNKNOWN",
+ 'version': "UNKNOWN",
+ 'maintainer': "UNKNOWN",
+ }
+
+ scenarios = [
+ ('version 0.0', {
+ 'test_version_info': json.dumps({
+ 'version': "0.0",
+ }),
+ 'expected_version_info': {'version': "0.0"},
+ }),
+ ('version 1.0', {
+ 'test_version_info': json.dumps({
+ 'version': "1.0",
+ }),
+ 'expected_version_info': {'version': "1.0"},
+ }),
+ ('file lorem_ipsum.json', {
+ 'version_info_filename': "lorem_ipsum.json",
+ 'test_version_info': json.dumps({
+ 'version': "1.0",
+ }),
+ 'expected_version_info': {'version': "1.0"},
+ }),
+ ('not installed', {
+ 'get_distribution_error': pkg_resources.DistributionNotFound(),
+ 'expected_version_info': default_version_info,
+ }),
+ ('no version_info', {
+ 'expected_version_info': default_version_info,
+ }),
+ ]
+
+ def setUp(self):
+ """ Set up test fixtures. """
+ super(get_distribution_version_info_TestCase, self).setUp()
+
+ if hasattr(self, 'expected_resource_name'):
+ self.test_args = {'filename': self.expected_resource_name}
+ else:
+ self.test_args = {}
+ self.expected_resource_name = version_info_filename
+
+ self.mock_distribution = mock.MagicMock()
+ func_patcher_get_distribution = mock.patch.object(
+ pkg_resources, 'get_distribution')
+ func_patcher_get_distribution.start()
+ self.addCleanup(func_patcher_get_distribution.stop)
+ pkg_resources.get_distribution.side_effect = functools.partial(
+ fake_func_get_distribution, self)
+
+ def test_requests_installed_distribution(self):
+ """ The package distribution should be retrieved. """
+ expected_distribution_name = metadata.distribution_name
+ version_info = metadata.get_distribution_version_info(**self.test_args)
+ pkg_resources.get_distribution.assert_called_with(
+ expected_distribution_name)
+
+ def test_requests_specified_filename(self):
+ """ The specified metadata resource name should be requested. """
+ if hasattr(self, 'get_distribution_error'):
+ self.skipTest("No access to distribution")
+ version_info = metadata.get_distribution_version_info(**self.test_args)
+ self.mock_distribution.has_metadata.assert_called_with(
+ self.expected_resource_name)
+
+ def test_result_matches_expected_items(self):
+ """ The result should match the expected items. """
+ version_info = metadata.get_distribution_version_info(**self.test_args)
+ self.assertEqual(self.expected_version_info, version_info)
+
+
+# Local variables:
+# coding: utf-8
+# mode: python
+# End:
+# vim: fileencoding=utf-8 filetype=python :
diff --git a/scripts/automation/trex_control_plane/python_lib/python-daemon-2.0.5/test/test_pidfile.py b/scripts/automation/trex_control_plane/python_lib/python-daemon-2.0.5/test/test_pidfile.py
new file mode 100755
index 00000000..9b636ec8
--- /dev/null
+++ b/scripts/automation/trex_control_plane/python_lib/python-daemon-2.0.5/test/test_pidfile.py
@@ -0,0 +1,472 @@
+# -*- coding: utf-8 -*-
+#
+# test/test_pidfile.py
+# Part of ‘python-daemon’, an implementation of PEP 3143.
+#
+# Copyright © 2008–2015 Ben Finney <ben+python@benfinney.id.au>
+#
+# This is free software: you may copy, modify, and/or distribute this work
+# under the terms of the Apache License, version 2.0 as published by the
+# Apache Software Foundation.
+# No warranty expressed or implied. See the file ‘LICENSE.ASF-2’ for details.
+
+""" Unit test for ‘pidfile’ module.
+ """
+
+from __future__ import (absolute_import, unicode_literals)
+
+try:
+ # Python 3 standard library.
+ import builtins
+except ImportError:
+ # Python 2 standard library.
+ import __builtin__ as builtins
+import os
+import itertools
+import tempfile
+import errno
+import functools
+try:
+ # Standard library of Python 2.7 and later.
+ from io import StringIO
+except ImportError:
+ # Standard library of Python 2.6 and earlier.
+ from StringIO import StringIO
+
+import mock
+import lockfile
+
+from . import scaffold
+
+import daemon.pidfile
+
+
+class FakeFileDescriptorStringIO(StringIO, object):
+ """ A StringIO class that fakes a file descriptor. """
+
+ _fileno_generator = itertools.count()
+
+ def __init__(self, *args, **kwargs):
+ self._fileno = next(self._fileno_generator)
+ super(FakeFileDescriptorStringIO, self).__init__(*args, **kwargs)
+
+ def fileno(self):
+ return self._fileno
+
+ def __enter__(self):
+ return self
+
+ def __exit__(self, exc_type, exc_val, exc_tb):
+ pass
+
+
+try:
+ FileNotFoundError
+ PermissionError
+except NameError:
+ # Python 2 uses IOError.
+ FileNotFoundError = functools.partial(IOError, errno.ENOENT)
+ PermissionError = functools.partial(IOError, errno.EPERM)
+
+
+def make_pidlockfile_scenarios():
+ """ Make a collection of scenarios for testing `PIDLockFile` instances.
+
+ :return: A collection of scenarios for tests involving
+ `PIDLockfFile` instances.
+
+ The collection is a mapping from scenario name to a dictionary of
+ scenario attributes.
+
+ """
+
+ fake_current_pid = 235
+ fake_other_pid = 8642
+ fake_pidfile_path = tempfile.mktemp()
+
+ fake_pidfile_empty = FakeFileDescriptorStringIO()
+ fake_pidfile_current_pid = FakeFileDescriptorStringIO(
+ "{pid:d}\n".format(pid=fake_current_pid))
+ fake_pidfile_other_pid = FakeFileDescriptorStringIO(
+ "{pid:d}\n".format(pid=fake_other_pid))
+ fake_pidfile_bogus = FakeFileDescriptorStringIO(
+ "b0gUs")
+
+ scenarios = {
+ 'simple': {},
+ 'not-exist': {
+ 'open_func_name': 'fake_open_nonexist',
+ 'os_open_func_name': 'fake_os_open_nonexist',
+ },
+ 'not-exist-write-denied': {
+ 'open_func_name': 'fake_open_nonexist',
+ 'os_open_func_name': 'fake_os_open_nonexist',
+ },
+ 'not-exist-write-busy': {
+ 'open_func_name': 'fake_open_nonexist',
+ 'os_open_func_name': 'fake_os_open_nonexist',
+ },
+ 'exist-read-denied': {
+ 'open_func_name': 'fake_open_read_denied',
+ 'os_open_func_name': 'fake_os_open_read_denied',
+ },
+ 'exist-locked-read-denied': {
+ 'locking_pid': fake_other_pid,
+ 'open_func_name': 'fake_open_read_denied',
+ 'os_open_func_name': 'fake_os_open_read_denied',
+ },
+ 'exist-empty': {},
+ 'exist-invalid': {
+ 'pidfile': fake_pidfile_bogus,
+ },
+ 'exist-current-pid': {
+ 'pidfile': fake_pidfile_current_pid,
+ 'pidfile_pid': fake_current_pid,
+ },
+ 'exist-current-pid-locked': {
+ 'pidfile': fake_pidfile_current_pid,
+ 'pidfile_pid': fake_current_pid,
+ 'locking_pid': fake_current_pid,
+ },
+ 'exist-other-pid': {
+ 'pidfile': fake_pidfile_other_pid,
+ 'pidfile_pid': fake_other_pid,
+ },
+ 'exist-other-pid-locked': {
+ 'pidfile': fake_pidfile_other_pid,
+ 'pidfile_pid': fake_other_pid,
+ 'locking_pid': fake_other_pid,
+ },
+ }
+
+ for scenario in scenarios.values():
+ scenario['pid'] = fake_current_pid
+ scenario['pidfile_path'] = fake_pidfile_path
+ if 'pidfile' not in scenario:
+ scenario['pidfile'] = fake_pidfile_empty
+ if 'pidfile_pid' not in scenario:
+ scenario['pidfile_pid'] = None
+ if 'locking_pid' not in scenario:
+ scenario['locking_pid'] = None
+ if 'open_func_name' not in scenario:
+ scenario['open_func_name'] = 'fake_open_okay'
+ if 'os_open_func_name' not in scenario:
+ scenario['os_open_func_name'] = 'fake_os_open_okay'
+
+ return scenarios
+
+
+def setup_pidfile_fixtures(testcase):
+ """ Set up common fixtures for PID file test cases.
+
+ :param testcase: A `TestCase` instance to decorate.
+
+ Decorate the `testcase` with attributes to be fixtures for tests
+ involving `PIDLockFile` instances.
+
+ """
+ scenarios = make_pidlockfile_scenarios()
+ testcase.pidlockfile_scenarios = scenarios
+
+ def get_scenario_option(testcase, key, default=None):
+ value = default
+ try:
+ value = testcase.scenario[key]
+ except (NameError, TypeError, AttributeError, KeyError):
+ pass
+ return value
+
+ func_patcher_os_getpid = mock.patch.object(
+ os, "getpid",
+ return_value=scenarios['simple']['pid'])
+ func_patcher_os_getpid.start()
+ testcase.addCleanup(func_patcher_os_getpid.stop)
+
+ def make_fake_open_funcs(testcase):
+
+ def fake_open_nonexist(filename, mode, buffering):
+ if mode.startswith('r'):
+ error = FileNotFoundError(
+ "No such file {filename!r}".format(
+ filename=filename))
+ raise error
+ else:
+ result = testcase.scenario['pidfile']
+ return result
+
+ def fake_open_read_denied(filename, mode, buffering):
+ if mode.startswith('r'):
+ error = PermissionError(
+ "Read denied on {filename!r}".format(
+ filename=filename))
+ raise error
+ else:
+ result = testcase.scenario['pidfile']
+ return result
+
+ def fake_open_okay(filename, mode, buffering):
+ result = testcase.scenario['pidfile']
+ return result
+
+ def fake_os_open_nonexist(filename, flags, mode):
+ if (flags & os.O_CREAT):
+ result = testcase.scenario['pidfile'].fileno()
+ else:
+ error = FileNotFoundError(
+ "No such file {filename!r}".format(
+ filename=filename))
+ raise error
+ return result
+
+ def fake_os_open_read_denied(filename, flags, mode):
+ if (flags & os.O_CREAT):
+ result = testcase.scenario['pidfile'].fileno()
+ else:
+ error = PermissionError(
+ "Read denied on {filename!r}".format(
+ filename=filename))
+ raise error
+ return result
+
+ def fake_os_open_okay(filename, flags, mode):
+ result = testcase.scenario['pidfile'].fileno()
+ return result
+
+ funcs = dict(
+ (name, obj) for (name, obj) in vars().items()
+ if callable(obj))
+
+ return funcs
+
+ testcase.fake_pidfile_open_funcs = make_fake_open_funcs(testcase)
+
+ def fake_open(filename, mode='rt', buffering=None):
+ scenario_path = get_scenario_option(testcase, 'pidfile_path')
+ if filename == scenario_path:
+ func_name = testcase.scenario['open_func_name']
+ fake_open_func = testcase.fake_pidfile_open_funcs[func_name]
+ result = fake_open_func(filename, mode, buffering)
+ else:
+ result = FakeFileDescriptorStringIO()
+ return result
+
+ mock_open = mock.mock_open()
+ mock_open.side_effect = fake_open
+
+ func_patcher_builtin_open = mock.patch.object(
+ builtins, "open",
+ new=mock_open)
+ func_patcher_builtin_open.start()
+ testcase.addCleanup(func_patcher_builtin_open.stop)
+
+ def fake_os_open(filename, flags, mode=None):
+ scenario_path = get_scenario_option(testcase, 'pidfile_path')
+ if filename == scenario_path:
+ func_name = testcase.scenario['os_open_func_name']
+ fake_os_open_func = testcase.fake_pidfile_open_funcs[func_name]
+ result = fake_os_open_func(filename, flags, mode)
+ else:
+ result = FakeFileDescriptorStringIO().fileno()
+ return result
+
+ mock_os_open = mock.MagicMock(side_effect=fake_os_open)
+
+ func_patcher_os_open = mock.patch.object(
+ os, "open",
+ new=mock_os_open)
+ func_patcher_os_open.start()
+ testcase.addCleanup(func_patcher_os_open.stop)
+
+ def fake_os_fdopen(fd, mode='rt', buffering=None):
+ scenario_pidfile = get_scenario_option(
+ testcase, 'pidfile', FakeFileDescriptorStringIO())
+ if fd == testcase.scenario['pidfile'].fileno():
+ result = testcase.scenario['pidfile']
+ else:
+ raise OSError(errno.EBADF, "Bad file descriptor")
+ return result
+
+ mock_os_fdopen = mock.MagicMock(side_effect=fake_os_fdopen)
+
+ func_patcher_os_fdopen = mock.patch.object(
+ os, "fdopen",
+ new=mock_os_fdopen)
+ func_patcher_os_fdopen.start()
+ testcase.addCleanup(func_patcher_os_fdopen.stop)
+
+
+def make_lockfile_method_fakes(scenario):
+ """ Make common fake methods for lockfile class.
+
+ :param scenario: A scenario for testing with PIDLockFile.
+ :return: A mapping from normal function name to the corresponding
+ fake function.
+
+ Each fake function behaves appropriately for the specified `scenario`.
+
+ """
+
+ def fake_func_read_pid():
+ return scenario['pidfile_pid']
+ def fake_func_is_locked():
+ return (scenario['locking_pid'] is not None)
+ def fake_func_i_am_locking():
+ return (
+ scenario['locking_pid'] == scenario['pid'])
+ def fake_func_acquire(timeout=None):
+ if scenario['locking_pid'] is not None:
+ raise lockfile.AlreadyLocked()
+ scenario['locking_pid'] = scenario['pid']
+ def fake_func_release():
+ if scenario['locking_pid'] is None:
+ raise lockfile.NotLocked()
+ if scenario['locking_pid'] != scenario['pid']:
+ raise lockfile.NotMyLock()
+ scenario['locking_pid'] = None
+ def fake_func_break_lock():
+ scenario['locking_pid'] = None
+
+ fake_methods = dict(
+ (
+ func_name.replace('fake_func_', ''),
+ mock.MagicMock(side_effect=fake_func))
+ for (func_name, fake_func) in vars().items()
+ if func_name.startswith('fake_func_'))
+
+ return fake_methods
+
+
+def apply_lockfile_method_mocks(mock_lockfile, testcase, scenario):
+ """ Apply common fake methods to mock lockfile class.
+
+ :param mock_lockfile: An object providing the `LockFile` interface.
+ :param testcase: The `TestCase` instance providing the context for
+ the patch.
+ :param scenario: The `PIDLockFile` test scenario to use.
+
+ Mock the `LockFile` methods of `mock_lockfile`, by applying fake
+ methods customised for `scenario`. The mock is does by a patch
+ within the context of `testcase`.
+
+ """
+ fake_methods = dict(
+ (func_name, fake_func)
+ for (func_name, fake_func) in
+ make_lockfile_method_fakes(scenario).items()
+ if func_name not in ['read_pid'])
+
+ for (func_name, fake_func) in fake_methods.items():
+ func_patcher = mock.patch.object(
+ mock_lockfile, func_name,
+ new=fake_func)
+ func_patcher.start()
+ testcase.addCleanup(func_patcher.stop)
+
+
+def setup_pidlockfile_fixtures(testcase, scenario_name=None):
+ """ Set up common fixtures for PIDLockFile test cases.
+
+ :param testcase: The `TestCase` instance to decorate.
+ :param scenario_name: The name of the `PIDLockFile` scenario to use.
+
+ Decorate the `testcase` with attributes that are fixtures for test
+ cases involving `PIDLockFile` instances.`
+
+ """
+
+ setup_pidfile_fixtures(testcase)
+
+ for func_name in [
+ 'write_pid_to_pidfile',
+ 'remove_existing_pidfile',
+ ]:
+ func_patcher = mock.patch.object(lockfile.pidlockfile, func_name)
+ func_patcher.start()
+ testcase.addCleanup(func_patcher.stop)
+
+
+class TimeoutPIDLockFile_TestCase(scaffold.TestCase):
+ """ Test cases for ‘TimeoutPIDLockFile’ class. """
+
+ def setUp(self):
+ """ Set up test fixtures. """
+ super(TimeoutPIDLockFile_TestCase, self).setUp()
+
+ pidlockfile_scenarios = make_pidlockfile_scenarios()
+ self.pidlockfile_scenario = pidlockfile_scenarios['simple']
+ pidfile_path = self.pidlockfile_scenario['pidfile_path']
+
+ for func_name in ['__init__', 'acquire']:
+ func_patcher = mock.patch.object(
+ lockfile.pidlockfile.PIDLockFile, func_name)
+ func_patcher.start()
+ self.addCleanup(func_patcher.stop)
+
+ self.scenario = {
+ 'pidfile_path': self.pidlockfile_scenario['pidfile_path'],
+ 'acquire_timeout': self.getUniqueInteger(),
+ }
+
+ self.test_kwargs = dict(
+ path=self.scenario['pidfile_path'],
+ acquire_timeout=self.scenario['acquire_timeout'],
+ )
+ self.test_instance = daemon.pidfile.TimeoutPIDLockFile(
+ **self.test_kwargs)
+
+ def test_inherits_from_pidlockfile(self):
+ """ Should inherit from PIDLockFile. """
+ instance = self.test_instance
+ self.assertIsInstance(instance, lockfile.pidlockfile.PIDLockFile)
+
+ def test_init_has_expected_signature(self):
+ """ Should have expected signature for ‘__init__’. """
+ def test_func(self, path, acquire_timeout=None, *args, **kwargs): pass
+ test_func.__name__ = str('__init__')
+ self.assertFunctionSignatureMatch(
+ test_func,
+ daemon.pidfile.TimeoutPIDLockFile.__init__)
+
+ def test_has_specified_acquire_timeout(self):
+ """ Should have specified ‘acquire_timeout’ value. """
+ instance = self.test_instance
+ expected_timeout = self.test_kwargs['acquire_timeout']
+ self.assertEqual(expected_timeout, instance.acquire_timeout)
+
+ @mock.patch.object(
+ lockfile.pidlockfile.PIDLockFile, "__init__",
+ autospec=True)
+ def test_calls_superclass_init(self, mock_init):
+ """ Should call the superclass ‘__init__’. """
+ expected_path = self.test_kwargs['path']
+ instance = daemon.pidfile.TimeoutPIDLockFile(**self.test_kwargs)
+ mock_init.assert_called_with(instance, expected_path)
+
+ @mock.patch.object(
+ lockfile.pidlockfile.PIDLockFile, "acquire",
+ autospec=True)
+ def test_acquire_uses_specified_timeout(self, mock_func_acquire):
+ """ Should call the superclass ‘acquire’ with specified timeout. """
+ instance = self.test_instance
+ test_timeout = self.getUniqueInteger()
+ expected_timeout = test_timeout
+ instance.acquire(test_timeout)
+ mock_func_acquire.assert_called_with(instance, expected_timeout)
+
+ @mock.patch.object(
+ lockfile.pidlockfile.PIDLockFile, "acquire",
+ autospec=True)
+ def test_acquire_uses_stored_timeout_by_default(self, mock_func_acquire):
+ """ Should call superclass ‘acquire’ with stored timeout by default. """
+ instance = self.test_instance
+ test_timeout = self.test_kwargs['acquire_timeout']
+ expected_timeout = test_timeout
+ instance.acquire()
+ mock_func_acquire.assert_called_with(instance, expected_timeout)
+
+
+# Local variables:
+# coding: utf-8
+# mode: python
+# End:
+# vim: fileencoding=utf-8 filetype=python :
diff --git a/scripts/automation/trex_control_plane/python_lib/python-daemon-2.0.5/test/test_runner.py b/scripts/automation/trex_control_plane/python_lib/python-daemon-2.0.5/test/test_runner.py
new file mode 100755
index 00000000..4c0c714a
--- /dev/null
+++ b/scripts/automation/trex_control_plane/python_lib/python-daemon-2.0.5/test/test_runner.py
@@ -0,0 +1,675 @@
+# -*- coding: utf-8 -*-
+#
+# test/test_runner.py
+# Part of ‘python-daemon’, an implementation of PEP 3143.
+#
+# Copyright © 2009–2015 Ben Finney <ben+python@benfinney.id.au>
+#
+# This is free software: you may copy, modify, and/or distribute this work
+# under the terms of the Apache License, version 2.0 as published by the
+# Apache Software Foundation.
+# No warranty expressed or implied. See the file ‘LICENSE.ASF-2’ for details.
+
+""" Unit test for ‘runner’ module.
+ """
+
+from __future__ import (absolute_import, unicode_literals)
+
+try:
+ # Python 3 standard library.
+ import builtins
+except ImportError:
+ # Python 2 standard library.
+ import __builtin__ as builtins
+import os
+import os.path
+import sys
+import tempfile
+import errno
+import signal
+import functools
+
+import lockfile
+import mock
+import testtools
+
+from . import scaffold
+from .scaffold import (basestring, unicode)
+from .test_pidfile import (
+ FakeFileDescriptorStringIO,
+ setup_pidfile_fixtures,
+ make_pidlockfile_scenarios,
+ apply_lockfile_method_mocks,
+ )
+from .test_daemon import (
+ setup_streams_fixtures,
+ )
+
+import daemon.daemon
+import daemon.runner
+import daemon.pidfile
+
+
+class ModuleExceptions_TestCase(scaffold.Exception_TestCase):
+ """ Test cases for module exception classes. """
+
+ scenarios = scaffold.make_exception_scenarios([
+ ('daemon.runner.DaemonRunnerError', dict(
+ exc_type = daemon.runner.DaemonRunnerError,
+ min_args = 1,
+ types = [Exception],
+ )),
+ ('daemon.runner.DaemonRunnerInvalidActionError', dict(
+ exc_type = daemon.runner.DaemonRunnerInvalidActionError,
+ min_args = 1,
+ types = [daemon.runner.DaemonRunnerError, ValueError],
+ )),
+ ('daemon.runner.DaemonRunnerStartFailureError', dict(
+ exc_type = daemon.runner.DaemonRunnerStartFailureError,
+ min_args = 1,
+ types = [daemon.runner.DaemonRunnerError, RuntimeError],
+ )),
+ ('daemon.runner.DaemonRunnerStopFailureError', dict(
+ exc_type = daemon.runner.DaemonRunnerStopFailureError,
+ min_args = 1,
+ types = [daemon.runner.DaemonRunnerError, RuntimeError],
+ )),
+ ])
+
+
+def make_runner_scenarios():
+ """ Make a collection of scenarios for testing `DaemonRunner` instances.
+
+ :return: A collection of scenarios for tests involving
+ `DaemonRunner` instances.
+
+ The collection is a mapping from scenario name to a dictionary of
+ scenario attributes.
+
+ """
+
+ pidlockfile_scenarios = make_pidlockfile_scenarios()
+
+ scenarios = {
+ 'simple': {
+ 'pidlockfile_scenario_name': 'simple',
+ },
+ 'pidfile-locked': {
+ 'pidlockfile_scenario_name': 'exist-other-pid-locked',
+ },
+ }
+
+ for scenario in scenarios.values():
+ if 'pidlockfile_scenario_name' in scenario:
+ pidlockfile_scenario = pidlockfile_scenarios.pop(
+ scenario['pidlockfile_scenario_name'])
+ scenario['pid'] = pidlockfile_scenario['pid']
+ scenario['pidfile_path'] = pidlockfile_scenario['pidfile_path']
+ scenario['pidfile_timeout'] = 23
+ scenario['pidlockfile_scenario'] = pidlockfile_scenario
+
+ return scenarios
+
+
+def set_runner_scenario(testcase, scenario_name):
+ """ Set the DaemonRunner test scenario for the test case.
+
+ :param testcase: The `TestCase` instance to decorate.
+ :param scenario_name: The name of the scenario to use.
+
+ Set the `DaemonRunner` test scenario name and decorate the
+ `testcase` with the corresponding scenario fixtures.
+
+ """
+ scenarios = testcase.runner_scenarios
+ testcase.scenario = scenarios[scenario_name]
+ apply_lockfile_method_mocks(
+ testcase.mock_runner_lockfile,
+ testcase,
+ testcase.scenario['pidlockfile_scenario'])
+
+
+def setup_runner_fixtures(testcase):
+ """ Set up common fixtures for `DaemonRunner` test cases.
+
+ :param testcase: A `TestCase` instance to decorate.
+
+ Decorate the `testcase` with attributes to be fixtures for tests
+ involving `DaemonRunner` instances.
+
+ """
+ setup_pidfile_fixtures(testcase)
+ setup_streams_fixtures(testcase)
+
+ testcase.runner_scenarios = make_runner_scenarios()
+
+ patcher_stderr = mock.patch.object(
+ sys, "stderr",
+ new=FakeFileDescriptorStringIO())
+ testcase.fake_stderr = patcher_stderr.start()
+ testcase.addCleanup(patcher_stderr.stop)
+
+ simple_scenario = testcase.runner_scenarios['simple']
+
+ testcase.mock_runner_lockfile = mock.MagicMock(
+ spec=daemon.pidfile.TimeoutPIDLockFile)
+ apply_lockfile_method_mocks(
+ testcase.mock_runner_lockfile,
+ testcase,
+ simple_scenario['pidlockfile_scenario'])
+ testcase.mock_runner_lockfile.path = simple_scenario['pidfile_path']
+
+ patcher_lockfile_class = mock.patch.object(
+ daemon.pidfile, "TimeoutPIDLockFile",
+ return_value=testcase.mock_runner_lockfile)
+ patcher_lockfile_class.start()
+ testcase.addCleanup(patcher_lockfile_class.stop)
+
+ class TestApp(object):
+
+ def __init__(self):
+ self.stdin_path = testcase.stream_file_paths['stdin']
+ self.stdout_path = testcase.stream_file_paths['stdout']
+ self.stderr_path = testcase.stream_file_paths['stderr']
+ self.pidfile_path = simple_scenario['pidfile_path']
+ self.pidfile_timeout = simple_scenario['pidfile_timeout']
+
+ run = mock.MagicMock(name="TestApp.run")
+
+ testcase.TestApp = TestApp
+
+ patcher_runner_daemoncontext = mock.patch.object(
+ daemon.runner, "DaemonContext", autospec=True)
+ patcher_runner_daemoncontext.start()
+ testcase.addCleanup(patcher_runner_daemoncontext.stop)
+
+ testcase.test_app = testcase.TestApp()
+
+ testcase.test_program_name = "bazprog"
+ testcase.test_program_path = os.path.join(
+ "/foo/bar", testcase.test_program_name)
+ testcase.valid_argv_params = {
+ 'start': [testcase.test_program_path, 'start'],
+ 'stop': [testcase.test_program_path, 'stop'],
+ 'restart': [testcase.test_program_path, 'restart'],
+ }
+
+ def fake_open(filename, mode=None, buffering=None):
+ if filename in testcase.stream_files_by_path:
+ result = testcase.stream_files_by_path[filename]
+ else:
+ result = FakeFileDescriptorStringIO()
+ result.mode = mode
+ result.buffering = buffering
+ return result
+
+ mock_open = mock.mock_open()
+ mock_open.side_effect = fake_open
+
+ func_patcher_builtin_open = mock.patch.object(
+ builtins, "open",
+ new=mock_open)
+ func_patcher_builtin_open.start()
+ testcase.addCleanup(func_patcher_builtin_open.stop)
+
+ func_patcher_os_kill = mock.patch.object(os, "kill")
+ func_patcher_os_kill.start()
+ testcase.addCleanup(func_patcher_os_kill.stop)
+
+ patcher_sys_argv = mock.patch.object(
+ sys, "argv",
+ new=testcase.valid_argv_params['start'])
+ patcher_sys_argv.start()
+ testcase.addCleanup(patcher_sys_argv.stop)
+
+ testcase.test_instance = daemon.runner.DaemonRunner(testcase.test_app)
+
+ testcase.scenario = NotImplemented
+
+
+class DaemonRunner_BaseTestCase(scaffold.TestCase):
+ """ Base class for DaemonRunner test case classes. """
+
+ def setUp(self):
+ """ Set up test fixtures. """
+ super(DaemonRunner_BaseTestCase, self).setUp()
+
+ setup_runner_fixtures(self)
+ set_runner_scenario(self, 'simple')
+
+
+class DaemonRunner_TestCase(DaemonRunner_BaseTestCase):
+ """ Test cases for DaemonRunner class. """
+
+ def setUp(self):
+ """ Set up test fixtures. """
+ super(DaemonRunner_TestCase, self).setUp()
+
+ func_patcher_parse_args = mock.patch.object(
+ daemon.runner.DaemonRunner, "parse_args")
+ func_patcher_parse_args.start()
+ self.addCleanup(func_patcher_parse_args.stop)
+
+ # Create a new instance now with our custom patches.
+ self.test_instance = daemon.runner.DaemonRunner(self.test_app)
+
+ def test_instantiate(self):
+ """ New instance of DaemonRunner should be created. """
+ self.assertIsInstance(self.test_instance, daemon.runner.DaemonRunner)
+
+ def test_parses_commandline_args(self):
+ """ Should parse commandline arguments. """
+ self.test_instance.parse_args.assert_called_with()
+
+ def test_has_specified_app(self):
+ """ Should have specified application object. """
+ self.assertIs(self.test_app, self.test_instance.app)
+
+ def test_sets_pidfile_none_when_pidfile_path_is_none(self):
+ """ Should set ‘pidfile’ to ‘None’ when ‘pidfile_path’ is ‘None’. """
+ pidfile_path = None
+ self.test_app.pidfile_path = pidfile_path
+ expected_pidfile = None
+ instance = daemon.runner.DaemonRunner(self.test_app)
+ self.assertIs(expected_pidfile, instance.pidfile)
+
+ def test_error_when_pidfile_path_not_string(self):
+ """ Should raise ValueError when PID file path not a string. """
+ pidfile_path = object()
+ self.test_app.pidfile_path = pidfile_path
+ expected_error = ValueError
+ self.assertRaises(
+ expected_error,
+ daemon.runner.DaemonRunner, self.test_app)
+
+ def test_error_when_pidfile_path_not_absolute(self):
+ """ Should raise ValueError when PID file path not absolute. """
+ pidfile_path = "foo/bar.pid"
+ self.test_app.pidfile_path = pidfile_path
+ expected_error = ValueError
+ self.assertRaises(
+ expected_error,
+ daemon.runner.DaemonRunner, self.test_app)
+
+ def test_creates_lock_with_specified_parameters(self):
+ """ Should create a TimeoutPIDLockFile with specified params. """
+ pidfile_path = self.scenario['pidfile_path']
+ pidfile_timeout = self.scenario['pidfile_timeout']
+ daemon.pidfile.TimeoutPIDLockFile.assert_called_with(
+ pidfile_path, pidfile_timeout)
+
+ def test_has_created_pidfile(self):
+ """ Should have new PID lock file as `pidfile` attribute. """
+ expected_pidfile = self.mock_runner_lockfile
+ instance = self.test_instance
+ self.assertIs(
+ expected_pidfile, instance.pidfile)
+
+ def test_daemon_context_has_created_pidfile(self):
+ """ DaemonContext component should have new PID lock file. """
+ expected_pidfile = self.mock_runner_lockfile
+ daemon_context = self.test_instance.daemon_context
+ self.assertIs(
+ expected_pidfile, daemon_context.pidfile)
+
+ def test_daemon_context_has_specified_stdin_stream(self):
+ """ DaemonContext component should have specified stdin file. """
+ test_app = self.test_app
+ expected_file = self.stream_files_by_name['stdin']
+ daemon_context = self.test_instance.daemon_context
+ self.assertEqual(expected_file, daemon_context.stdin)
+
+ def test_daemon_context_has_stdin_in_read_mode(self):
+ """ DaemonContext component should open stdin file for read. """
+ expected_mode = 'rt'
+ daemon_context = self.test_instance.daemon_context
+ self.assertIn(expected_mode, daemon_context.stdin.mode)
+
+ def test_daemon_context_has_specified_stdout_stream(self):
+ """ DaemonContext component should have specified stdout file. """
+ test_app = self.test_app
+ expected_file = self.stream_files_by_name['stdout']
+ daemon_context = self.test_instance.daemon_context
+ self.assertEqual(expected_file, daemon_context.stdout)
+
+ def test_daemon_context_has_stdout_in_append_mode(self):
+ """ DaemonContext component should open stdout file for append. """
+ expected_mode = 'w+t'
+ daemon_context = self.test_instance.daemon_context
+ self.assertIn(expected_mode, daemon_context.stdout.mode)
+
+ def test_daemon_context_has_specified_stderr_stream(self):
+ """ DaemonContext component should have specified stderr file. """
+ test_app = self.test_app
+ expected_file = self.stream_files_by_name['stderr']
+ daemon_context = self.test_instance.daemon_context
+ self.assertEqual(expected_file, daemon_context.stderr)
+
+ def test_daemon_context_has_stderr_in_append_mode(self):
+ """ DaemonContext component should open stderr file for append. """
+ expected_mode = 'w+t'
+ daemon_context = self.test_instance.daemon_context
+ self.assertIn(expected_mode, daemon_context.stderr.mode)
+
+ def test_daemon_context_has_stderr_with_no_buffering(self):
+ """ DaemonContext component should open stderr file unbuffered. """
+ expected_buffering = 0
+ daemon_context = self.test_instance.daemon_context
+ self.assertEqual(
+ expected_buffering, daemon_context.stderr.buffering)
+
+
+class DaemonRunner_usage_exit_TestCase(DaemonRunner_BaseTestCase):
+ """ Test cases for DaemonRunner.usage_exit method. """
+
+ def test_raises_system_exit(self):
+ """ Should raise SystemExit exception. """
+ instance = self.test_instance
+ argv = [self.test_program_path]
+ self.assertRaises(
+ SystemExit,
+ instance._usage_exit, argv)
+
+ def test_message_follows_conventional_format(self):
+ """ Should emit a conventional usage message. """
+ instance = self.test_instance
+ argv = [self.test_program_path]
+ expected_stderr_output = """\
+ usage: {progname} ...
+ """.format(
+ progname=self.test_program_name)
+ self.assertRaises(
+ SystemExit,
+ instance._usage_exit, argv)
+ self.assertOutputCheckerMatch(
+ expected_stderr_output, self.fake_stderr.getvalue())
+
+
+class DaemonRunner_parse_args_TestCase(DaemonRunner_BaseTestCase):
+ """ Test cases for DaemonRunner.parse_args method. """
+
+ def setUp(self):
+ """ Set up test fixtures. """
+ super(DaemonRunner_parse_args_TestCase, self).setUp()
+
+ func_patcher_usage_exit = mock.patch.object(
+ daemon.runner.DaemonRunner, "_usage_exit",
+ side_effect=NotImplementedError)
+ func_patcher_usage_exit.start()
+ self.addCleanup(func_patcher_usage_exit.stop)
+
+ def test_emits_usage_message_if_insufficient_args(self):
+ """ Should emit a usage message and exit if too few arguments. """
+ instance = self.test_instance
+ argv = [self.test_program_path]
+ exc = self.assertRaises(
+ NotImplementedError,
+ instance.parse_args, argv)
+ daemon.runner.DaemonRunner._usage_exit.assert_called_with(argv)
+
+ def test_emits_usage_message_if_unknown_action_arg(self):
+ """ Should emit a usage message and exit if unknown action. """
+ instance = self.test_instance
+ progname = self.test_program_name
+ argv = [self.test_program_path, 'bogus']
+ exc = self.assertRaises(
+ NotImplementedError,
+ instance.parse_args, argv)
+ daemon.runner.DaemonRunner._usage_exit.assert_called_with(argv)
+
+ def test_should_parse_system_argv_by_default(self):
+ """ Should parse sys.argv by default. """
+ instance = self.test_instance
+ expected_action = 'start'
+ argv = self.valid_argv_params['start']
+ with mock.patch.object(sys, "argv", new=argv):
+ instance.parse_args()
+ self.assertEqual(expected_action, instance.action)
+
+ def test_sets_action_from_first_argument(self):
+ """ Should set action from first commandline argument. """
+ instance = self.test_instance
+ for name, argv in self.valid_argv_params.items():
+ expected_action = name
+ instance.parse_args(argv)
+ self.assertEqual(expected_action, instance.action)
+
+
+try:
+ ProcessLookupError
+except NameError:
+ # Python 2 uses OSError.
+ ProcessLookupError = functools.partial(OSError, errno.ESRCH)
+
+class DaemonRunner_do_action_TestCase(DaemonRunner_BaseTestCase):
+ """ Test cases for DaemonRunner.do_action method. """
+
+ def test_raises_error_if_unknown_action(self):
+ """ Should emit a usage message and exit if action is unknown. """
+ instance = self.test_instance
+ instance.action = 'bogus'
+ expected_error = daemon.runner.DaemonRunnerInvalidActionError
+ self.assertRaises(
+ expected_error,
+ instance.do_action)
+
+
+class DaemonRunner_do_action_start_TestCase(DaemonRunner_BaseTestCase):
+ """ Test cases for DaemonRunner.do_action method, action 'start'. """
+
+ def setUp(self):
+ """ Set up test fixtures. """
+ super(DaemonRunner_do_action_start_TestCase, self).setUp()
+
+ self.test_instance.action = 'start'
+
+ def test_raises_error_if_pidfile_locked(self):
+ """ Should raise error if PID file is locked. """
+
+ instance = self.test_instance
+ instance.daemon_context.open.side_effect = lockfile.AlreadyLocked
+ pidfile_path = self.scenario['pidfile_path']
+ expected_error = daemon.runner.DaemonRunnerStartFailureError
+ expected_message_content = pidfile_path
+ exc = self.assertRaises(
+ expected_error,
+ instance.do_action)
+ self.assertIn(expected_message_content, unicode(exc))
+
+ def test_breaks_lock_if_no_such_process(self):
+ """ Should request breaking lock if PID file process is not running. """
+ set_runner_scenario(self, 'pidfile-locked')
+ instance = self.test_instance
+ self.mock_runner_lockfile.read_pid.return_value = (
+ self.scenario['pidlockfile_scenario']['pidfile_pid'])
+ pidfile_path = self.scenario['pidfile_path']
+ test_pid = self.scenario['pidlockfile_scenario']['pidfile_pid']
+ expected_signal = signal.SIG_DFL
+ test_error = ProcessLookupError("Not running")
+ os.kill.side_effect = test_error
+ instance.do_action()
+ os.kill.assert_called_with(test_pid, expected_signal)
+ self.mock_runner_lockfile.break_lock.assert_called_with()
+
+ def test_requests_daemon_context_open(self):
+ """ Should request the daemon context to open. """
+ instance = self.test_instance
+ instance.do_action()
+ instance.daemon_context.open.assert_called_with()
+
+ def test_emits_start_message_to_stderr(self):
+ """ Should emit start message to stderr. """
+ instance = self.test_instance
+ expected_stderr = """\
+ started with pid {pid:d}
+ """.format(
+ pid=self.scenario['pid'])
+ instance.do_action()
+ self.assertOutputCheckerMatch(
+ expected_stderr, self.fake_stderr.getvalue())
+
+ def test_requests_app_run(self):
+ """ Should request the application to run. """
+ instance = self.test_instance
+ instance.do_action()
+ self.test_app.run.assert_called_with()
+
+
+class DaemonRunner_do_action_stop_TestCase(DaemonRunner_BaseTestCase):
+ """ Test cases for DaemonRunner.do_action method, action 'stop'. """
+
+ def setUp(self):
+ """ Set up test fixtures. """
+ super(DaemonRunner_do_action_stop_TestCase, self).setUp()
+
+ set_runner_scenario(self, 'pidfile-locked')
+
+ self.test_instance.action = 'stop'
+
+ self.mock_runner_lockfile.is_locked.return_value = True
+ self.mock_runner_lockfile.i_am_locking.return_value = False
+ self.mock_runner_lockfile.read_pid.return_value = (
+ self.scenario['pidlockfile_scenario']['pidfile_pid'])
+
+ def test_raises_error_if_pidfile_not_locked(self):
+ """ Should raise error if PID file is not locked. """
+ set_runner_scenario(self, 'simple')
+ instance = self.test_instance
+ self.mock_runner_lockfile.is_locked.return_value = False
+ self.mock_runner_lockfile.i_am_locking.return_value = False
+ self.mock_runner_lockfile.read_pid.return_value = (
+ self.scenario['pidlockfile_scenario']['pidfile_pid'])
+ pidfile_path = self.scenario['pidfile_path']
+ expected_error = daemon.runner.DaemonRunnerStopFailureError
+ expected_message_content = pidfile_path
+ exc = self.assertRaises(
+ expected_error,
+ instance.do_action)
+ self.assertIn(expected_message_content, unicode(exc))
+
+ def test_breaks_lock_if_pidfile_stale(self):
+ """ Should break lock if PID file is stale. """
+ instance = self.test_instance
+ pidfile_path = self.scenario['pidfile_path']
+ test_pid = self.scenario['pidlockfile_scenario']['pidfile_pid']
+ expected_signal = signal.SIG_DFL
+ test_error = OSError(errno.ESRCH, "Not running")
+ os.kill.side_effect = test_error
+ instance.do_action()
+ self.mock_runner_lockfile.break_lock.assert_called_with()
+
+ def test_sends_terminate_signal_to_process_from_pidfile(self):
+ """ Should send SIGTERM to the daemon process. """
+ instance = self.test_instance
+ test_pid = self.scenario['pidlockfile_scenario']['pidfile_pid']
+ expected_signal = signal.SIGTERM
+ instance.do_action()
+ os.kill.assert_called_with(test_pid, expected_signal)
+
+ def test_raises_error_if_cannot_send_signal_to_process(self):
+ """ Should raise error if cannot send signal to daemon process. """
+ instance = self.test_instance
+ test_pid = self.scenario['pidlockfile_scenario']['pidfile_pid']
+ pidfile_path = self.scenario['pidfile_path']
+ test_error = OSError(errno.EPERM, "Nice try")
+ os.kill.side_effect = test_error
+ expected_error = daemon.runner.DaemonRunnerStopFailureError
+ expected_message_content = unicode(test_pid)
+ exc = self.assertRaises(
+ expected_error,
+ instance.do_action)
+ self.assertIn(expected_message_content, unicode(exc))
+
+
+@mock.patch.object(daemon.runner.DaemonRunner, "_start")
+@mock.patch.object(daemon.runner.DaemonRunner, "_stop")
+class DaemonRunner_do_action_restart_TestCase(DaemonRunner_BaseTestCase):
+ """ Test cases for DaemonRunner.do_action method, action 'restart'. """
+
+ def setUp(self):
+ """ Set up test fixtures. """
+ super(DaemonRunner_do_action_restart_TestCase, self).setUp()
+
+ set_runner_scenario(self, 'pidfile-locked')
+
+ self.test_instance.action = 'restart'
+
+ def test_requests_stop_then_start(
+ self,
+ mock_func_daemonrunner_start, mock_func_daemonrunner_stop):
+ """ Should request stop, then start. """
+ instance = self.test_instance
+ instance.do_action()
+ mock_func_daemonrunner_start.assert_called_with()
+ mock_func_daemonrunner_stop.assert_called_with()
+
+
+@mock.patch.object(sys, "stderr")
+class emit_message_TestCase(scaffold.TestCase):
+ """ Test cases for ‘emit_message’ function. """
+
+ def test_writes_specified_message_to_stream(self, mock_stderr):
+ """ Should write specified message to stream. """
+ test_message = self.getUniqueString()
+ expected_content = "{message}\n".format(message=test_message)
+ daemon.runner.emit_message(test_message, stream=mock_stderr)
+ mock_stderr.write.assert_called_with(expected_content)
+
+ def test_writes_to_specified_stream(self, mock_stderr):
+ """ Should write message to specified stream. """
+ test_message = self.getUniqueString()
+ mock_stream = mock.MagicMock()
+ daemon.runner.emit_message(test_message, stream=mock_stream)
+ mock_stream.write.assert_called_with(mock.ANY)
+
+ def test_writes_to_stderr_by_default(self, mock_stderr):
+ """ Should write message to ‘sys.stderr’ by default. """
+ test_message = self.getUniqueString()
+ daemon.runner.emit_message(test_message)
+ mock_stderr.write.assert_called_with(mock.ANY)
+
+
+class is_pidfile_stale_TestCase(scaffold.TestCase):
+ """ Test cases for ‘is_pidfile_stale’ function. """
+
+ def setUp(self):
+ """ Set up test fixtures. """
+ super(is_pidfile_stale_TestCase, self).setUp()
+
+ func_patcher_os_kill = mock.patch.object(os, "kill")
+ func_patcher_os_kill.start()
+ self.addCleanup(func_patcher_os_kill.stop)
+ os.kill.return_value = None
+
+ self.test_pid = self.getUniqueInteger()
+ self.test_pidfile = mock.MagicMock(daemon.pidfile.TimeoutPIDLockFile)
+ self.test_pidfile.read_pid.return_value = self.test_pid
+
+ def test_returns_false_if_no_pid_in_file(self):
+ """ Should return False if the pidfile contains no PID. """
+ self.test_pidfile.read_pid.return_value = None
+ expected_result = False
+ result = daemon.runner.is_pidfile_stale(self.test_pidfile)
+ self.assertEqual(expected_result, result)
+
+ def test_returns_false_if_process_exists(self):
+ """ Should return False if the process with its PID exists. """
+ expected_result = False
+ result = daemon.runner.is_pidfile_stale(self.test_pidfile)
+ self.assertEqual(expected_result, result)
+
+ def test_returns_true_if_process_does_not_exist(self):
+ """ Should return True if the process does not exist. """
+ test_error = ProcessLookupError("No such process")
+ del os.kill.return_value
+ os.kill.side_effect = test_error
+ expected_result = True
+ result = daemon.runner.is_pidfile_stale(self.test_pidfile)
+ self.assertEqual(expected_result, result)
+
+
+# Local variables:
+# coding: utf-8
+# mode: python
+# End:
+# vim: fileencoding=utf-8 filetype=python :
diff --git a/scripts/automation/trex_control_plane/python_lib/python-daemon-2.0.5/test_version.py b/scripts/automation/trex_control_plane/python_lib/python-daemon-2.0.5/test_version.py
new file mode 100755
index 00000000..b52f521d
--- /dev/null
+++ b/scripts/automation/trex_control_plane/python_lib/python-daemon-2.0.5/test_version.py
@@ -0,0 +1,1373 @@
+# -*- coding: utf-8 -*-
+#
+# test_version.py
+# Part of ‘python-daemon’, an implementation of PEP 3143.
+#
+# Copyright © 2008–2015 Ben Finney <ben+python@benfinney.id.au>
+#
+# This is free software: you may copy, modify, and/or distribute this work
+# under the terms of the GNU General Public License as published by the
+# Free Software Foundation; version 3 of that license or any later version.
+# No warranty expressed or implied. See the file ‘LICENSE.GPL-3’ for details.
+
+""" Unit test for ‘version’ packaging module. """
+
+from __future__ import (absolute_import, unicode_literals)
+
+import os
+import os.path
+import io
+import errno
+import functools
+import collections
+import textwrap
+import json
+import tempfile
+import distutils.dist
+import distutils.cmd
+import distutils.errors
+import distutils.fancy_getopt
+try:
+ # Standard library of Python 2.7 and later.
+ from io import StringIO
+except ImportError:
+ # Standard library of Python 2.6 and earlier.
+ from StringIO import StringIO
+
+import mock
+import testtools
+import testscenarios
+import docutils
+import docutils.writers
+import docutils.nodes
+import setuptools
+import setuptools.command
+
+import version
+
+version.ensure_class_bases_begin_with(
+ version.__dict__, str('VersionInfoWriter'), docutils.writers.Writer)
+version.ensure_class_bases_begin_with(
+ version.__dict__, str('VersionInfoTranslator'),
+ docutils.nodes.SparseNodeVisitor)
+
+
+def make_test_classes_for_ensure_class_bases_begin_with():
+ """ Make test classes for use with ‘ensure_class_bases_begin_with’.
+
+ :return: Mapping {`name`: `type`} of the custom types created.
+
+ """
+
+ class quux_metaclass(type):
+ def __new__(metaclass, name, bases, namespace):
+ return super(quux_metaclass, metaclass).__new__(
+ metaclass, name, bases, namespace)
+
+ class Foo(object):
+ __metaclass__ = type
+
+ class Bar(object):
+ pass
+
+ class FooInheritingBar(Bar):
+ __metaclass__ = type
+
+ class FooWithCustomMetaclass(object):
+ __metaclass__ = quux_metaclass
+
+ result = dict(
+ (name, value) for (name, value) in locals().items()
+ if isinstance(value, type))
+
+ return result
+
+class ensure_class_bases_begin_with_TestCase(
+ testscenarios.WithScenarios, testtools.TestCase):
+ """ Test cases for ‘ensure_class_bases_begin_with’ function. """
+
+ test_classes = make_test_classes_for_ensure_class_bases_begin_with()
+
+ scenarios = [
+ ('simple', {
+ 'test_class': test_classes['Foo'],
+ 'base_class': test_classes['Bar'],
+ }),
+ ('custom metaclass', {
+ 'test_class': test_classes['FooWithCustomMetaclass'],
+ 'base_class': test_classes['Bar'],
+ 'expected_metaclass': test_classes['quux_metaclass'],
+ }),
+ ]
+
+ def setUp(self):
+ """ Set up test fixtures. """
+ super(ensure_class_bases_begin_with_TestCase, self).setUp()
+
+ self.class_name = self.test_class.__name__
+ self.test_module_namespace = {self.class_name: self.test_class}
+
+ if not hasattr(self, 'expected_metaclass'):
+ self.expected_metaclass = type
+
+ patcher_metaclass = mock.patch.object(
+ self.test_class, '__metaclass__')
+ patcher_metaclass.start()
+ self.addCleanup(patcher_metaclass.stop)
+
+ self.fake_new_class = type(object)
+ self.test_class.__metaclass__.return_value = (
+ self.fake_new_class)
+
+ def test_module_namespace_contains_new_class(self):
+ """ Specified module namespace should have new class. """
+ version.ensure_class_bases_begin_with(
+ self.test_module_namespace, self.class_name, self.base_class)
+ self.assertIn(self.fake_new_class, self.test_module_namespace.values())
+
+ def test_calls_metaclass_with_expected_class_name(self):
+ """ Should call the metaclass with the expected class name. """
+ version.ensure_class_bases_begin_with(
+ self.test_module_namespace, self.class_name, self.base_class)
+ expected_class_name = self.class_name
+ self.test_class.__metaclass__.assert_called_with(
+ expected_class_name, mock.ANY, mock.ANY)
+
+ def test_calls_metaclass_with_expected_bases(self):
+ """ Should call the metaclass with the expected bases. """
+ version.ensure_class_bases_begin_with(
+ self.test_module_namespace, self.class_name, self.base_class)
+ expected_bases = tuple(
+ [self.base_class]
+ + list(self.test_class.__bases__))
+ self.test_class.__metaclass__.assert_called_with(
+ mock.ANY, expected_bases, mock.ANY)
+
+ def test_calls_metaclass_with_expected_namespace(self):
+ """ Should call the metaclass with the expected class namespace. """
+ version.ensure_class_bases_begin_with(
+ self.test_module_namespace, self.class_name, self.base_class)
+ expected_namespace = self.test_class.__dict__.copy()
+ del expected_namespace['__dict__']
+ self.test_class.__metaclass__.assert_called_with(
+ mock.ANY, mock.ANY, expected_namespace)
+
+
+class ensure_class_bases_begin_with_AlreadyHasBase_TestCase(
+ testscenarios.WithScenarios, testtools.TestCase):
+ """ Test cases for ‘ensure_class_bases_begin_with’ function.
+
+ These test cases test the conditions where the class's base is
+ already the specified base class.
+
+ """
+
+ test_classes = make_test_classes_for_ensure_class_bases_begin_with()
+
+ scenarios = [
+ ('already Bar subclass', {
+ 'test_class': test_classes['FooInheritingBar'],
+ 'base_class': test_classes['Bar'],
+ }),
+ ]
+
+ def setUp(self):
+ """ Set up test fixtures. """
+ super(
+ ensure_class_bases_begin_with_AlreadyHasBase_TestCase,
+ self).setUp()
+
+ self.class_name = self.test_class.__name__
+ self.test_module_namespace = {self.class_name: self.test_class}
+
+ patcher_metaclass = mock.patch.object(
+ self.test_class, '__metaclass__')
+ patcher_metaclass.start()
+ self.addCleanup(patcher_metaclass.stop)
+
+ def test_metaclass_not_called(self):
+ """ Should not call metaclass to create a new type. """
+ version.ensure_class_bases_begin_with(
+ self.test_module_namespace, self.class_name, self.base_class)
+ self.assertFalse(self.test_class.__metaclass__.called)
+
+
+class VersionInfoWriter_TestCase(testtools.TestCase):
+ """ Test cases for ‘VersionInfoWriter’ class. """
+
+ def setUp(self):
+ """ Set up test fixtures. """
+ super(VersionInfoWriter_TestCase, self).setUp()
+
+ self.test_instance = version.VersionInfoWriter()
+
+ def test_declares_version_info_support(self):
+ """ Should declare support for ‘version_info’. """
+ instance = self.test_instance
+ expected_support = "version_info"
+ result = instance.supports(expected_support)
+ self.assertTrue(result)
+
+
+class VersionInfoWriter_translate_TestCase(testtools.TestCase):
+ """ Test cases for ‘VersionInfoWriter.translate’ method. """
+
+ def setUp(self):
+ """ Set up test fixtures. """
+ super(VersionInfoWriter_translate_TestCase, self).setUp()
+
+ patcher_translator = mock.patch.object(
+ version, 'VersionInfoTranslator')
+ self.mock_class_translator = patcher_translator.start()
+ self.addCleanup(patcher_translator.stop)
+ self.mock_translator = self.mock_class_translator.return_value
+
+ self.test_instance = version.VersionInfoWriter()
+ patcher_document = mock.patch.object(
+ self.test_instance, 'document')
+ patcher_document.start()
+ self.addCleanup(patcher_document.stop)
+
+ def test_creates_translator_with_document(self):
+ """ Should create a translator with the writer's document. """
+ instance = self.test_instance
+ expected_document = self.test_instance.document
+ instance.translate()
+ self.mock_class_translator.assert_called_with(expected_document)
+
+ def test_calls_document_walkabout_with_translator(self):
+ """ Should call document.walkabout with the translator. """
+ instance = self.test_instance
+ instance.translate()
+ instance.document.walkabout.assert_called_with(self.mock_translator)
+
+ def test_output_from_translator_astext(self):
+ """ Should have output from translator.astext(). """
+ instance = self.test_instance
+ instance.translate()
+ expected_output = self.mock_translator.astext.return_value
+ self.assertEqual(expected_output, instance.output)
+
+
+class ChangeLogEntry_TestCase(testtools.TestCase):
+ """ Test cases for ‘ChangeLogEntry’ class. """
+
+ def setUp(self):
+ """ Set up test fixtures. """
+ super(ChangeLogEntry_TestCase, self).setUp()
+
+ self.test_instance = version.ChangeLogEntry()
+
+ def test_instantiate(self):
+ """ New instance of ‘ChangeLogEntry’ should be created. """
+ self.assertIsInstance(
+ self.test_instance, version.ChangeLogEntry)
+
+ def test_minimum_zero_arguments(self):
+ """ Initialiser should not require any arguments. """
+ instance = version.ChangeLogEntry()
+ self.assertIsNot(instance, None)
+
+
+class ChangeLogEntry_release_date_TestCase(
+ testscenarios.WithScenarios, testtools.TestCase):
+ """ Test cases for ‘ChangeLogEntry.release_date’ attribute. """
+
+ scenarios = [
+ ('default', {
+ 'test_args': {},
+ 'expected_release_date':
+ version.ChangeLogEntry.default_release_date,
+ }),
+ ('unknown token', {
+ 'test_args': {'release_date': "UNKNOWN"},
+ 'expected_release_date': "UNKNOWN",
+ }),
+ ('future token', {
+ 'test_args': {'release_date': "FUTURE"},
+ 'expected_release_date': "FUTURE",
+ }),
+ ('2001-01-01', {
+ 'test_args': {'release_date': "2001-01-01"},
+ 'expected_release_date': "2001-01-01",
+ }),
+ ('bogus', {
+ 'test_args': {'release_date': "b0gUs"},
+ 'expected_error': ValueError,
+ }),
+ ]
+
+ def test_has_expected_release_date(self):
+ """ Should have default `release_date` attribute. """
+ if hasattr(self, 'expected_error'):
+ self.assertRaises(
+ self.expected_error,
+ version.ChangeLogEntry, **self.test_args)
+ else:
+ instance = version.ChangeLogEntry(**self.test_args)
+ self.assertEqual(self.expected_release_date, instance.release_date)
+
+
+class ChangeLogEntry_version_TestCase(
+ testscenarios.WithScenarios, testtools.TestCase):
+ """ Test cases for ‘ChangeLogEntry.version’ attribute. """
+
+ scenarios = [
+ ('default', {
+ 'test_args': {},
+ 'expected_version':
+ version.ChangeLogEntry.default_version,
+ }),
+ ('unknown token', {
+ 'test_args': {'version': "UNKNOWN"},
+ 'expected_version': "UNKNOWN",
+ }),
+ ('0.0', {
+ 'test_args': {'version': "0.0"},
+ 'expected_version': "0.0",
+ }),
+ ]
+
+ def test_has_expected_version(self):
+ """ Should have default `version` attribute. """
+ instance = version.ChangeLogEntry(**self.test_args)
+ self.assertEqual(self.expected_version, instance.version)
+
+
+class ChangeLogEntry_maintainer_TestCase(
+ testscenarios.WithScenarios, testtools.TestCase):
+ """ Test cases for ‘ChangeLogEntry.maintainer’ attribute. """
+
+ scenarios = [
+ ('default', {
+ 'test_args': {},
+ 'expected_maintainer': None,
+ }),
+ ('person', {
+ 'test_args': {'maintainer': "Foo Bar <foo.bar@example.org>"},
+ 'expected_maintainer': "Foo Bar <foo.bar@example.org>",
+ }),
+ ('bogus', {
+ 'test_args': {'maintainer': "b0gUs"},
+ 'expected_error': ValueError,
+ }),
+ ]
+
+ def test_has_expected_maintainer(self):
+ """ Should have default `maintainer` attribute. """
+ if hasattr(self, 'expected_error'):
+ self.assertRaises(
+ self.expected_error,
+ version.ChangeLogEntry, **self.test_args)
+ else:
+ instance = version.ChangeLogEntry(**self.test_args)
+ self.assertEqual(self.expected_maintainer, instance.maintainer)
+
+
+class ChangeLogEntry_body_TestCase(
+ testscenarios.WithScenarios, testtools.TestCase):
+ """ Test cases for ‘ChangeLogEntry.body’ attribute. """
+
+ scenarios = [
+ ('default', {
+ 'test_args': {},
+ 'expected_body': None,
+ }),
+ ('simple', {
+ 'test_args': {'body': "Foo bar baz."},
+ 'expected_body': "Foo bar baz.",
+ }),
+ ]
+
+ def test_has_expected_body(self):
+ """ Should have default `body` attribute. """
+ instance = version.ChangeLogEntry(**self.test_args)
+ self.assertEqual(self.expected_body, instance.body)
+
+
+class ChangeLogEntry_as_version_info_entry_TestCase(
+ testscenarios.WithScenarios, testtools.TestCase):
+ """ Test cases for ‘ChangeLogEntry.as_version_info_entry’ attribute. """
+
+ scenarios = [
+ ('default', {
+ 'test_args': {},
+ 'expected_result': collections.OrderedDict([
+ ('release_date', version.ChangeLogEntry.default_release_date),
+ ('version', version.ChangeLogEntry.default_version),
+ ('maintainer', None),
+ ('body', None),
+ ]),
+ }),
+ ]
+
+ def setUp(self):
+ """ Set up test fixtures. """
+ super(ChangeLogEntry_as_version_info_entry_TestCase, self).setUp()
+
+ self.test_instance = version.ChangeLogEntry(**self.test_args)
+
+ def test_returns_result(self):
+ """ Should return expected result. """
+ result = self.test_instance.as_version_info_entry()
+ self.assertEqual(self.expected_result, result)
+
+
+def make_mock_field_node(field_name, field_body):
+ """ Make a mock Docutils field node for tests. """
+
+ mock_field_node = mock.MagicMock(
+ name='field', spec=docutils.nodes.field)
+
+ mock_field_name_node = mock.MagicMock(
+ name='field_name', spec=docutils.nodes.field_name)
+ mock_field_name_node.parent = mock_field_node
+ mock_field_name_node.children = [field_name]
+
+ mock_field_body_node = mock.MagicMock(
+ name='field_body', spec=docutils.nodes.field_body)
+ mock_field_body_node.parent = mock_field_node
+ mock_field_body_node.children = [field_body]
+
+ mock_field_node.children = [mock_field_name_node, mock_field_body_node]
+
+ def fake_func_first_child_matching_class(node_class):
+ result = None
+ node_class_name = node_class.__name__
+ for (index, node) in enumerate(mock_field_node.children):
+ if node._mock_name == node_class_name:
+ result = index
+ break
+ return result
+
+ mock_field_node.first_child_matching_class.side_effect = (
+ fake_func_first_child_matching_class)
+
+ return mock_field_node
+
+
+class JsonEqual(testtools.matchers.Matcher):
+ """ A matcher to compare the value of JSON streams. """
+
+ def __init__(self, expected):
+ self.expected_value = expected
+
+ def match(self, content):
+ """ Assert the JSON `content` matches the `expected_content`. """
+ result = None
+ actual_value = json.loads(content.decode('utf-8'))
+ if actual_value != self.expected_value:
+ result = JsonValueMismatch(self.expected_value, actual_value)
+ return result
+
+
+class JsonValueMismatch(testtools.matchers.Mismatch):
+ """ The specified JSON stream does not evaluate to the expected value. """
+
+ def __init__(self, expected, actual):
+ self.expected_value = expected
+ self.actual_value = actual
+
+ def describe(self):
+ """ Emit a text description of this mismatch. """
+ expected_json_text = json.dumps(self.expected_value, indent=4)
+ actual_json_text = json.dumps(self.actual_value, indent=4)
+ text = (
+ "\n"
+ "reference: {expected}\n"
+ "actual: {actual}\n").format(
+ expected=expected_json_text, actual=actual_json_text)
+ return text
+
+
+class changelog_to_version_info_collection_TestCase(
+ testscenarios.WithScenarios, testtools.TestCase):
+ """ Test cases for ‘changelog_to_version_info_collection’ function. """
+
+ scenarios = [
+ ('single entry', {
+ 'test_input': textwrap.dedent("""\
+ Version 1.0
+ ===========
+
+ :Released: 2009-01-01
+ :Maintainer: Foo Bar <foo.bar@example.org>
+
+ * Lorem ipsum dolor sit amet.
+ """),
+ 'expected_version_info': [
+ {
+ 'release_date': "2009-01-01",
+ 'version': "1.0",
+ 'maintainer': "Foo Bar <foo.bar@example.org>",
+ 'body': "* Lorem ipsum dolor sit amet.\n",
+ },
+ ],
+ }),
+ ('multiple entries', {
+ 'test_input': textwrap.dedent("""\
+ Version 1.0
+ ===========
+
+ :Released: 2009-01-01
+ :Maintainer: Foo Bar <foo.bar@example.org>
+
+ * Lorem ipsum dolor sit amet.
+
+
+ Version 0.8
+ ===========
+
+ :Released: 2004-01-01
+ :Maintainer: Foo Bar <foo.bar@example.org>
+
+ * Donec venenatis nisl aliquam ipsum.
+
+
+ Version 0.7.2
+ =============
+
+ :Released: 2001-01-01
+ :Maintainer: Foo Bar <foo.bar@example.org>
+
+ * Pellentesque elementum mollis finibus.
+ """),
+ 'expected_version_info': [
+ {
+ 'release_date': "2009-01-01",
+ 'version': "1.0",
+ 'maintainer': "Foo Bar <foo.bar@example.org>",
+ 'body': "* Lorem ipsum dolor sit amet.\n",
+ },
+ {
+ 'release_date': "2004-01-01",
+ 'version': "0.8",
+ 'maintainer': "Foo Bar <foo.bar@example.org>",
+ 'body': "* Donec venenatis nisl aliquam ipsum.\n",
+ },
+ {
+ 'release_date': "2001-01-01",
+ 'version': "0.7.2",
+ 'maintainer': "Foo Bar <foo.bar@example.org>",
+ 'body': "* Pellentesque elementum mollis finibus.\n",
+ },
+ ],
+ }),
+ ('trailing comment', {
+ 'test_input': textwrap.dedent("""\
+ Version NEXT
+ ============
+
+ :Released: FUTURE
+ :Maintainer:
+
+ * Lorem ipsum dolor sit amet.
+
+ ..
+ Vivamus aliquam felis rutrum rutrum dictum.
+ """),
+ 'expected_version_info': [
+ {
+ 'release_date': "FUTURE",
+ 'version': "NEXT",
+ 'maintainer': "",
+ 'body': "* Lorem ipsum dolor sit amet.\n",
+ },
+ ],
+ }),
+ ('inline comment', {
+ 'test_input': textwrap.dedent("""\
+ Version NEXT
+ ============
+
+ :Released: FUTURE
+ :Maintainer:
+
+ ..
+ Vivamus aliquam felis rutrum rutrum dictum.
+
+ * Lorem ipsum dolor sit amet.
+ """),
+ 'expected_version_info': [
+ {
+ 'release_date': "FUTURE",
+ 'version': "NEXT",
+ 'maintainer': "",
+ 'body': "* Lorem ipsum dolor sit amet.\n",
+ },
+ ],
+ }),
+ ('unreleased entry', {
+ 'test_input': textwrap.dedent("""\
+ Version NEXT
+ ============
+
+ :Released: FUTURE
+ :Maintainer:
+
+ * Lorem ipsum dolor sit amet.
+
+
+ Version 0.8
+ ===========
+
+ :Released: 2001-01-01
+ :Maintainer: Foo Bar <foo.bar@example.org>
+
+ * Donec venenatis nisl aliquam ipsum.
+ """),
+ 'expected_version_info': [
+ {
+ 'release_date': "FUTURE",
+ 'version': "NEXT",
+ 'maintainer': "",
+ 'body': "* Lorem ipsum dolor sit amet.\n",
+ },
+ {
+ 'release_date': "2001-01-01",
+ 'version': "0.8",
+ 'maintainer': "Foo Bar <foo.bar@example.org>",
+ 'body': "* Donec venenatis nisl aliquam ipsum.\n",
+ },
+ ],
+ }),
+ ('no section', {
+ 'test_input': textwrap.dedent("""\
+ :Released: 2009-01-01
+ :Maintainer: Foo Bar <foo.bar@example.org>
+
+ * Lorem ipsum dolor sit amet.
+ """),
+ 'expected_error': version.InvalidFormatError,
+ }),
+ ('subsection', {
+ 'test_input': textwrap.dedent("""\
+ Version 1.0
+ ===========
+
+ :Released: 2009-01-01
+ :Maintainer: Foo Bar <foo.bar@example.org>
+
+ * Lorem ipsum dolor sit amet.
+
+ Ut ultricies fermentum quam
+ ---------------------------
+
+ * In commodo magna facilisis in.
+ """),
+ 'expected_error': version.InvalidFormatError,
+ 'subsection': True,
+ }),
+ ('unknown field', {
+ 'test_input': textwrap.dedent("""\
+ Version 1.0
+ ===========
+
+ :Released: 2009-01-01
+ :Maintainer: Foo Bar <foo.bar@example.org>
+ :Favourite: Spam
+
+ * Lorem ipsum dolor sit amet.
+ """),
+ 'expected_error': version.InvalidFormatError,
+ }),
+ ('invalid version word', {
+ 'test_input': textwrap.dedent("""\
+ BoGuS 1.0
+ =========
+
+ :Released: 2009-01-01
+ :Maintainer: Foo Bar <foo.bar@example.org>
+
+ * Lorem ipsum dolor sit amet.
+ """),
+ 'expected_error': version.InvalidFormatError,
+ }),
+ ('invalid section title', {
+ 'test_input': textwrap.dedent("""\
+ Lorem Ipsum 1.0
+ ===============
+
+ :Released: 2009-01-01
+ :Maintainer: Foo Bar <foo.bar@example.org>
+
+ * Lorem ipsum dolor sit amet.
+ """),
+ 'expected_error': version.InvalidFormatError,
+ }),
+ ]
+
+ def test_returns_expected_version_info(self):
+ """ Should return expected version info mapping. """
+ infile = StringIO(self.test_input)
+ if hasattr(self, 'expected_error'):
+ self.assertRaises(
+ self.expected_error,
+ version.changelog_to_version_info_collection, infile)
+ else:
+ result = version.changelog_to_version_info_collection(infile)
+ self.assertThat(result, JsonEqual(self.expected_version_info))
+
+
+try:
+ FileNotFoundError
+ PermissionError
+except NameError:
+ # Python 2 uses OSError.
+ FileNotFoundError = functools.partial(IOError, errno.ENOENT)
+ PermissionError = functools.partial(IOError, errno.EPERM)
+
+fake_version_info = {
+ 'release_date': "2001-01-01", 'version': "2.0",
+ 'maintainer': None, 'body': None,
+ }
+
+@mock.patch.object(
+ version, "get_latest_version", return_value=fake_version_info)
+class generate_version_info_from_changelog_TestCase(
+ testscenarios.WithScenarios, testtools.TestCase):
+ """ Test cases for ‘generate_version_info_from_changelog’ function. """
+
+ fake_open_side_effects = {
+ 'success': (
+ lambda *args, **kwargs: StringIO()),
+ 'file not found': FileNotFoundError(),
+ 'permission denied': PermissionError(),
+ }
+
+ scenarios = [
+ ('simple', {
+ 'open_scenario': 'success',
+ 'fake_versions_json': json.dumps([fake_version_info]),
+ 'expected_result': fake_version_info,
+ }),
+ ('file not found', {
+ 'open_scenario': 'file not found',
+ 'expected_result': {},
+ }),
+ ('permission denied', {
+ 'open_scenario': 'permission denied',
+ 'expected_result': {},
+ }),
+ ]
+
+ def setUp(self):
+ """ Set up test fixtures. """
+ super(generate_version_info_from_changelog_TestCase, self).setUp()
+
+ self.fake_changelog_file_path = tempfile.mktemp()
+
+ def fake_open(filespec, *args, **kwargs):
+ if filespec == self.fake_changelog_file_path:
+ side_effect = self.fake_open_side_effects[self.open_scenario]
+ if callable(side_effect):
+ result = side_effect()
+ else:
+ raise side_effect
+ else:
+ result = StringIO()
+ return result
+
+ func_patcher_io_open = mock.patch.object(
+ io, "open")
+ func_patcher_io_open.start()
+ self.addCleanup(func_patcher_io_open.stop)
+ io.open.side_effect = fake_open
+
+ self.file_encoding = "utf-8"
+
+ func_patcher_changelog_to_version_info_collection = mock.patch.object(
+ version, "changelog_to_version_info_collection")
+ func_patcher_changelog_to_version_info_collection.start()
+ self.addCleanup(func_patcher_changelog_to_version_info_collection.stop)
+ if hasattr(self, 'fake_versions_json'):
+ version.changelog_to_version_info_collection.return_value = (
+ self.fake_versions_json.encode(self.file_encoding))
+
+ def test_returns_empty_collection_on_read_error(
+ self,
+ mock_func_get_latest_version):
+ """ Should return empty collection on error reading changelog. """
+ test_error = PermissionError("Not for you")
+ version.changelog_to_version_info_collection.side_effect = test_error
+ result = version.generate_version_info_from_changelog(
+ self.fake_changelog_file_path)
+ expected_result = {}
+ self.assertDictEqual(expected_result, result)
+
+ def test_opens_file_with_expected_encoding(
+ self,
+ mock_func_get_latest_version):
+ """ Should open changelog file in text mode with expected encoding. """
+ result = version.generate_version_info_from_changelog(
+ self.fake_changelog_file_path)
+ expected_file_path = self.fake_changelog_file_path
+ expected_open_mode = 'rt'
+ expected_encoding = self.file_encoding
+ (open_args_positional, open_args_kwargs) = io.open.call_args
+ (open_args_filespec, open_args_mode) = open_args_positional[:2]
+ open_args_encoding = open_args_kwargs['encoding']
+ self.assertEqual(expected_file_path, open_args_filespec)
+ self.assertEqual(expected_open_mode, open_args_mode)
+ self.assertEqual(expected_encoding, open_args_encoding)
+
+ def test_returns_expected_result(
+ self,
+ mock_func_get_latest_version):
+ """ Should return expected result. """
+ result = version.generate_version_info_from_changelog(
+ self.fake_changelog_file_path)
+ self.assertEqual(self.expected_result, result)
+
+
+DefaultNoneDict = functools.partial(collections.defaultdict, lambda: None)
+
+class get_latest_version_TestCase(
+ testscenarios.WithScenarios, testtools.TestCase):
+ """ Test cases for ‘get_latest_version’ function. """
+
+ scenarios = [
+ ('simple', {
+ 'test_versions': [
+ DefaultNoneDict({'release_date': "LATEST"}),
+ ],
+ 'expected_result': version.ChangeLogEntry.make_ordered_dict(
+ DefaultNoneDict({'release_date': "LATEST"})),
+ }),
+ ('no versions', {
+ 'test_versions': [],
+ 'expected_result': collections.OrderedDict(),
+ }),
+ ('ordered versions', {
+ 'test_versions': [
+ DefaultNoneDict({'release_date': "1"}),
+ DefaultNoneDict({'release_date': "2"}),
+ DefaultNoneDict({'release_date': "LATEST"}),
+ ],
+ 'expected_result': version.ChangeLogEntry.make_ordered_dict(
+ DefaultNoneDict({'release_date': "LATEST"})),
+ }),
+ ('un-ordered versions', {
+ 'test_versions': [
+ DefaultNoneDict({'release_date': "2"}),
+ DefaultNoneDict({'release_date': "LATEST"}),
+ DefaultNoneDict({'release_date': "1"}),
+ ],
+ 'expected_result': version.ChangeLogEntry.make_ordered_dict(
+ DefaultNoneDict({'release_date': "LATEST"})),
+ }),
+ ]
+
+ def test_returns_expected_result(self):
+ """ Should return expected result. """
+ result = version.get_latest_version(self.test_versions)
+ self.assertDictEqual(self.expected_result, result)
+
+
+@mock.patch.object(json, "dumps", side_effect=json.dumps)
+class serialise_version_info_from_mapping_TestCase(
+ testscenarios.WithScenarios, testtools.TestCase):
+ """ Test cases for ‘get_latest_version’ function. """
+
+ scenarios = [
+ ('simple', {
+ 'test_version_info': {'foo': "spam"},
+ }),
+ ]
+
+ for (name, scenario) in scenarios:
+ scenario['fake_json_dump'] = json.dumps(scenario['test_version_info'])
+ scenario['expected_value'] = scenario['test_version_info']
+
+ def test_passes_specified_object(self, mock_func_json_dumps):
+ """ Should pass the specified object to `json.dumps`. """
+ result = version.serialise_version_info_from_mapping(
+ self.test_version_info)
+ mock_func_json_dumps.assert_called_with(
+ self.test_version_info, indent=mock.ANY)
+
+ def test_returns_expected_result(self, mock_func_json_dumps):
+ """ Should return expected result. """
+ mock_func_json_dumps.return_value = self.fake_json_dump
+ result = version.serialise_version_info_from_mapping(
+ self.test_version_info)
+ value = json.loads(result)
+ self.assertEqual(self.expected_value, value)
+
+
+DistributionMetadata_defaults = {
+ name: None
+ for name in list(collections.OrderedDict.fromkeys(
+ distutils.dist.DistributionMetadata._METHOD_BASENAMES))}
+FakeDistributionMetadata = collections.namedtuple(
+ 'FakeDistributionMetadata', DistributionMetadata_defaults.keys())
+
+Distribution_defaults = {
+ 'metadata': None,
+ 'version': None,
+ 'release_date': None,
+ 'maintainer': None,
+ 'maintainer_email': None,
+ }
+FakeDistribution = collections.namedtuple(
+ 'FakeDistribution', Distribution_defaults.keys())
+
+def make_fake_distribution(
+ fields_override=None, metadata_fields_override=None):
+ metadata_fields = DistributionMetadata_defaults.copy()
+ if metadata_fields_override is not None:
+ metadata_fields.update(metadata_fields_override)
+ metadata = FakeDistributionMetadata(**metadata_fields)
+
+ fields = Distribution_defaults.copy()
+ fields['metadata'] = metadata
+ if fields_override is not None:
+ fields.update(fields_override)
+ distribution = FakeDistribution(**fields)
+
+ return distribution
+
+
+class get_changelog_path_TestCase(
+ testscenarios.WithScenarios, testtools.TestCase):
+ """ Test cases for ‘get_changelog_path’ function. """
+
+ default_path = "."
+ default_script_filename = "setup.py"
+
+ scenarios = [
+ ('simple', {}),
+ ('unusual script name', {
+ 'script_filename': "lorem_ipsum",
+ }),
+ ('relative script path', {
+ 'script_directory': "dolor/sit/amet",
+ }),
+ ('absolute script path', {
+ 'script_directory': "/dolor/sit/amet",
+ }),
+ ('specify filename', {
+ 'changelog_filename': "adipiscing",
+ }),
+ ]
+
+ def setUp(self):
+ """ Set up test fixtures. """
+ super(get_changelog_path_TestCase, self).setUp()
+
+ self.test_distribution = mock.MagicMock(distutils.dist.Distribution)
+
+ if not hasattr(self, 'script_directory'):
+ self.script_directory = self.default_path
+ if not hasattr(self, 'script_filename'):
+ self.script_filename = self.default_script_filename
+ self.test_distribution.script_name = os.path.join(
+ self.script_directory, self.script_filename)
+
+ changelog_filename = version.changelog_filename
+ if hasattr(self, 'changelog_filename'):
+ changelog_filename = self.changelog_filename
+
+ self.expected_result = os.path.join(
+ self.script_directory, changelog_filename)
+
+ def test_returns_expected_result(self):
+ """ Should return expected result. """
+ args = {
+ 'distribution': self.test_distribution,
+ }
+ if hasattr(self, 'changelog_filename'):
+ args.update({'filename': self.changelog_filename})
+ result = version.get_changelog_path(**args)
+ self.assertEqual(self.expected_result, result)
+
+
+class WriteVersionInfoCommand_BaseTestCase(
+ testscenarios.WithScenarios, testtools.TestCase):
+ """ Base class for ‘WriteVersionInfoCommand’ test case classes. """
+
+ def setUp(self):
+ """ Set up test fixtures. """
+ super(WriteVersionInfoCommand_BaseTestCase, self).setUp()
+
+ fake_distribution_name = self.getUniqueString()
+
+ self.test_distribution = distutils.dist.Distribution()
+ self.test_distribution.metadata.name = fake_distribution_name
+
+
+class WriteVersionInfoCommand_TestCase(WriteVersionInfoCommand_BaseTestCase):
+ """ Test cases for ‘WriteVersionInfoCommand’ class. """
+
+ def test_subclass_of_distutils_command(self):
+ """ Should be a subclass of ‘distutils.cmd.Command’. """
+ instance = version.WriteVersionInfoCommand(self.test_distribution)
+ self.assertIsInstance(instance, distutils.cmd.Command)
+
+
+class WriteVersionInfoCommand_user_options_TestCase(
+ WriteVersionInfoCommand_BaseTestCase):
+ """ Test cases for ‘WriteVersionInfoCommand.user_options’ attribute. """
+
+ def setUp(self):
+ """ Set up test fixtures. """
+ super(WriteVersionInfoCommand_user_options_TestCase, self).setUp()
+
+ self.test_instance = version.WriteVersionInfoCommand(
+ self.test_distribution)
+ self.commandline_parser = distutils.fancy_getopt.FancyGetopt(
+ self.test_instance.user_options)
+
+ def test_parses_correctly_as_fancy_getopt(self):
+ """ Should parse correctly in ‘FancyGetopt’. """
+ self.assertIsInstance(
+ self.commandline_parser, distutils.fancy_getopt.FancyGetopt)
+
+ def test_includes_base_class_user_options(self):
+ """ Should include base class's user_options. """
+ base_command = setuptools.command.egg_info.egg_info
+ expected_user_options = base_command.user_options
+ self.assertThat(
+ set(expected_user_options),
+ IsSubset(set(self.test_instance.user_options)))
+
+ def test_has_option_changelog_path(self):
+ """ Should have a ‘changelog-path’ option. """
+ expected_option_name = "changelog-path="
+ result = self.commandline_parser.has_option(expected_option_name)
+ self.assertTrue(result)
+
+ def test_has_option_outfile_path(self):
+ """ Should have a ‘outfile-path’ option. """
+ expected_option_name = "outfile-path="
+ result = self.commandline_parser.has_option(expected_option_name)
+ self.assertTrue(result)
+
+
+class WriteVersionInfoCommand_initialize_options_TestCase(
+ WriteVersionInfoCommand_BaseTestCase):
+ """ Test cases for ‘WriteVersionInfoCommand.initialize_options’ method. """
+
+ def setUp(self):
+ """ Set up test fixtures. """
+ super(
+ WriteVersionInfoCommand_initialize_options_TestCase, self
+ ).setUp()
+
+ patcher_func_egg_info_initialize_options = mock.patch.object(
+ setuptools.command.egg_info.egg_info, "initialize_options")
+ patcher_func_egg_info_initialize_options.start()
+ self.addCleanup(patcher_func_egg_info_initialize_options.stop)
+
+ def test_calls_base_class_method(self):
+ """ Should call base class's ‘initialize_options’ method. """
+ instance = version.WriteVersionInfoCommand(self.test_distribution)
+ base_command_class = setuptools.command.egg_info.egg_info
+ base_command_class.initialize_options.assert_called_with()
+
+ def test_sets_changelog_path_to_none(self):
+ """ Should set ‘changelog_path’ attribute to ``None``. """
+ instance = version.WriteVersionInfoCommand(self.test_distribution)
+ self.assertIs(instance.changelog_path, None)
+
+ def test_sets_outfile_path_to_none(self):
+ """ Should set ‘outfile_path’ attribute to ``None``. """
+ instance = version.WriteVersionInfoCommand(self.test_distribution)
+ self.assertIs(instance.outfile_path, None)
+
+
+class WriteVersionInfoCommand_finalize_options_TestCase(
+ WriteVersionInfoCommand_BaseTestCase):
+ """ Test cases for ‘WriteVersionInfoCommand.finalize_options’ method. """
+
+ def setUp(self):
+ """ Set up test fixtures. """
+ super(WriteVersionInfoCommand_finalize_options_TestCase, self).setUp()
+
+ self.test_instance = version.WriteVersionInfoCommand(self.test_distribution)
+
+ patcher_func_egg_info_finalize_options = mock.patch.object(
+ setuptools.command.egg_info.egg_info, "finalize_options")
+ patcher_func_egg_info_finalize_options.start()
+ self.addCleanup(patcher_func_egg_info_finalize_options.stop)
+
+ self.fake_script_dir = self.getUniqueString()
+ self.test_distribution.script_name = os.path.join(
+ self.fake_script_dir, self.getUniqueString())
+
+ self.fake_egg_dir = self.getUniqueString()
+ self.test_instance.egg_info = self.fake_egg_dir
+
+ patcher_func_get_changelog_path = mock.patch.object(
+ version, "get_changelog_path")
+ patcher_func_get_changelog_path.start()
+ self.addCleanup(patcher_func_get_changelog_path.stop)
+
+ self.fake_changelog_path = self.getUniqueString()
+ version.get_changelog_path.return_value = self.fake_changelog_path
+
+ def test_calls_base_class_method(self):
+ """ Should call base class's ‘finalize_options’ method. """
+ base_command_class = setuptools.command.egg_info.egg_info
+ self.test_instance.finalize_options()
+ base_command_class.finalize_options.assert_called_with()
+
+ def test_sets_force_to_none(self):
+ """ Should set ‘force’ attribute to ``None``. """
+ self.test_instance.finalize_options()
+ self.assertIs(self.test_instance.force, None)
+
+ def test_sets_changelog_path_using_get_changelog_path(self):
+ """ Should set ‘changelog_path’ attribute if it was ``None``. """
+ self.test_instance.changelog_path = None
+ self.test_instance.finalize_options()
+ expected_changelog_path = self.fake_changelog_path
+ self.assertEqual(expected_changelog_path, self.test_instance.changelog_path)
+
+ def test_leaves_changelog_path_if_already_set(self):
+ """ Should leave ‘changelog_path’ attribute set. """
+ prior_changelog_path = self.getUniqueString()
+ self.test_instance.changelog_path = prior_changelog_path
+ self.test_instance.finalize_options()
+ expected_changelog_path = prior_changelog_path
+ self.assertEqual(expected_changelog_path, self.test_instance.changelog_path)
+
+ def test_sets_outfile_path_to_default(self):
+ """ Should set ‘outfile_path’ attribute to default value. """
+ fake_version_info_filename = self.getUniqueString()
+ with mock.patch.object(
+ version, "version_info_filename",
+ new=fake_version_info_filename):
+ self.test_instance.finalize_options()
+ expected_outfile_path = os.path.join(
+ self.fake_egg_dir, fake_version_info_filename)
+ self.assertEqual(expected_outfile_path, self.test_instance.outfile_path)
+
+ def test_leaves_outfile_path_if_already_set(self):
+ """ Should leave ‘outfile_path’ attribute set. """
+ prior_outfile_path = self.getUniqueString()
+ self.test_instance.outfile_path = prior_outfile_path
+ self.test_instance.finalize_options()
+ expected_outfile_path = prior_outfile_path
+ self.assertEqual(expected_outfile_path, self.test_instance.outfile_path)
+
+
+class has_changelog_TestCase(
+ testscenarios.WithScenarios, testtools.TestCase):
+ """ Test cases for ‘has_changelog’ function. """
+
+ fake_os_path_exists_side_effects = {
+ 'true': (lambda path: True),
+ 'false': (lambda path: False),
+ }
+
+ scenarios = [
+ ('no changelog path', {
+ 'changelog_path': None,
+ 'expected_result': False,
+ }),
+ ('changelog exists', {
+ 'os_path_exists_scenario': 'true',
+ 'expected_result': True,
+ }),
+ ('changelog not found', {
+ 'os_path_exists_scenario': 'false',
+ 'expected_result': False,
+ }),
+ ]
+
+ def setUp(self):
+ """ Set up test fixtures. """
+ super(has_changelog_TestCase, self).setUp()
+
+ self.test_distribution = distutils.dist.Distribution()
+ self.test_command = version.EggInfoCommand(
+ self.test_distribution)
+
+ patcher_func_get_changelog_path = mock.patch.object(
+ version, "get_changelog_path")
+ patcher_func_get_changelog_path.start()
+ self.addCleanup(patcher_func_get_changelog_path.stop)
+
+ self.fake_changelog_file_path = self.getUniqueString()
+ if hasattr(self, 'changelog_path'):
+ self.fake_changelog_file_path = self.changelog_path
+ version.get_changelog_path.return_value = self.fake_changelog_file_path
+ self.fake_changelog_file = StringIO()
+
+ def fake_os_path_exists(path):
+ if path == self.fake_changelog_file_path:
+ side_effect = self.fake_os_path_exists_side_effects[
+ self.os_path_exists_scenario]
+ if callable(side_effect):
+ result = side_effect(path)
+ else:
+ raise side_effect
+ else:
+ result = False
+ return result
+
+ func_patcher_os_path_exists = mock.patch.object(
+ os.path, "exists")
+ func_patcher_os_path_exists.start()
+ self.addCleanup(func_patcher_os_path_exists.stop)
+ os.path.exists.side_effect = fake_os_path_exists
+
+ def test_gets_changelog_path_from_distribution(self):
+ """ Should call ‘get_changelog_path’ with distribution. """
+ result = version.has_changelog(self.test_command)
+ version.get_changelog_path.assert_called_with(
+ self.test_distribution)
+
+ def test_returns_expected_result(self):
+ """ Should be a subclass of ‘distutils.cmd.Command’. """
+ result = version.has_changelog(self.test_command)
+ self.assertEqual(self.expected_result, result)
+
+
+@mock.patch.object(version, 'generate_version_info_from_changelog')
+@mock.patch.object(version, 'serialise_version_info_from_mapping')
+@mock.patch.object(version.EggInfoCommand, "write_file")
+class WriteVersionInfoCommand_run_TestCase(
+ WriteVersionInfoCommand_BaseTestCase):
+ """ Test cases for ‘WriteVersionInfoCommand.run’ method. """
+
+ def setUp(self):
+ """ Set up test fixtures. """
+ super(WriteVersionInfoCommand_run_TestCase, self).setUp()
+
+ self.test_instance = version.WriteVersionInfoCommand(
+ self.test_distribution)
+
+ self.fake_changelog_path = self.getUniqueString()
+ self.test_instance.changelog_path = self.fake_changelog_path
+
+ self.fake_outfile_path = self.getUniqueString()
+ self.test_instance.outfile_path = self.fake_outfile_path
+
+ def test_returns_none(
+ self,
+ mock_func_egg_info_write_file,
+ mock_func_serialise_version_info,
+ mock_func_generate_version_info):
+ """ Should return ``None``. """
+ result = self.test_instance.run()
+ self.assertIs(result, None)
+
+ def test_generates_version_info_from_changelog(
+ self,
+ mock_func_egg_info_write_file,
+ mock_func_serialise_version_info,
+ mock_func_generate_version_info):
+ """ Should generate version info from specified changelog. """
+ self.test_instance.run()
+ expected_changelog_path = self.test_instance.changelog_path
+ mock_func_generate_version_info.assert_called_with(
+ expected_changelog_path)
+
+ def test_serialises_version_info_from_mapping(
+ self,
+ mock_func_egg_info_write_file,
+ mock_func_serialise_version_info,
+ mock_func_generate_version_info):
+ """ Should serialise version info from specified mapping. """
+ self.test_instance.run()
+ expected_version_info = mock_func_generate_version_info.return_value
+ mock_func_serialise_version_info.assert_called_with(
+ expected_version_info)
+
+ def test_writes_file_using_command_context(
+ self,
+ mock_func_egg_info_write_file,
+ mock_func_serialise_version_info,
+ mock_func_generate_version_info):
+ """ Should write the metadata file using the command context. """
+ self.test_instance.run()
+ expected_content = mock_func_serialise_version_info.return_value
+ mock_func_egg_info_write_file.assert_called_with(
+ "version info", self.fake_outfile_path, expected_content)
+
+
+IsSubset = testtools.matchers.MatchesPredicateWithParams(
+ set.issubset, "{0} should be a subset of {1}")
+
+class EggInfoCommand_TestCase(testtools.TestCase):
+ """ Test cases for ‘EggInfoCommand’ class. """
+
+ def setUp(self):
+ """ Set up test fixtures. """
+ super(EggInfoCommand_TestCase, self).setUp()
+
+ self.test_distribution = distutils.dist.Distribution()
+ self.test_instance = version.EggInfoCommand(self.test_distribution)
+
+ def test_subclass_of_setuptools_egg_info(self):
+ """ Should be a subclass of Setuptools ‘egg_info’. """
+ self.assertIsInstance(
+ self.test_instance, setuptools.command.egg_info.egg_info)
+
+ def test_sub_commands_include_base_class_sub_commands(self):
+ """ Should include base class's sub-commands in this sub_commands. """
+ base_command = setuptools.command.egg_info.egg_info
+ expected_sub_commands = base_command.sub_commands
+ self.assertThat(
+ set(expected_sub_commands),
+ IsSubset(set(self.test_instance.sub_commands)))
+
+ def test_sub_commands_includes_write_version_info_command(self):
+ """ Should include sub-command named ‘write_version_info’. """
+ commands_by_name = dict(self.test_instance.sub_commands)
+ expected_predicate = version.has_changelog
+ expected_item = ('write_version_info', expected_predicate)
+ self.assertIn(expected_item, commands_by_name.items())
+
+
+@mock.patch.object(setuptools.command.egg_info.egg_info, "run")
+class EggInfoCommand_run_TestCase(testtools.TestCase):
+ """ Test cases for ‘EggInfoCommand.run’ method. """
+
+ def setUp(self):
+ """ Set up test fixtures. """
+ super(EggInfoCommand_run_TestCase, self).setUp()
+
+ self.test_distribution = distutils.dist.Distribution()
+ self.test_instance = version.EggInfoCommand(self.test_distribution)
+
+ base_command = setuptools.command.egg_info.egg_info
+ patcher_func_egg_info_get_sub_commands = mock.patch.object(
+ base_command, "get_sub_commands")
+ patcher_func_egg_info_get_sub_commands.start()
+ self.addCleanup(patcher_func_egg_info_get_sub_commands.stop)
+
+ patcher_func_egg_info_run_command = mock.patch.object(
+ base_command, "run_command")
+ patcher_func_egg_info_run_command.start()
+ self.addCleanup(patcher_func_egg_info_run_command.stop)
+
+ self.fake_sub_commands = ["spam", "eggs", "beans"]
+ base_command.get_sub_commands.return_value = self.fake_sub_commands
+
+ def test_returns_none(self, mock_func_egg_info_run):
+ """ Should return ``None``. """
+ result = self.test_instance.run()
+ self.assertIs(result, None)
+
+ def test_runs_each_command_in_sub_commands(
+ self, mock_func_egg_info_run):
+ """ Should run each command in ‘self.get_sub_commands()’. """
+ base_command = setuptools.command.egg_info.egg_info
+ self.test_instance.run()
+ expected_calls = [mock.call(name) for name in self.fake_sub_commands]
+ base_command.run_command.assert_has_calls(expected_calls)
+
+ def test_calls_base_class_run(self, mock_func_egg_info_run):
+ """ Should call base class's ‘run’ method. """
+ result = self.test_instance.run()
+ mock_func_egg_info_run.assert_called_with()
+
+
+# Local variables:
+# coding: utf-8
+# mode: python
+# End:
+# vim: fileencoding=utf-8 filetype=python :
diff --git a/scripts/automation/trex_control_plane/python_lib/python-daemon-2.0.5/version.py b/scripts/automation/trex_control_plane/python_lib/python-daemon-2.0.5/version.py
new file mode 100755
index 00000000..7e4c4202
--- /dev/null
+++ b/scripts/automation/trex_control_plane/python_lib/python-daemon-2.0.5/version.py
@@ -0,0 +1,547 @@
+# -*- coding: utf-8 -*-
+
+# version.py
+# Part of ‘python-daemon’, an implementation of PEP 3143.
+#
+# Copyright © 2008–2015 Ben Finney <ben+python@benfinney.id.au>
+#
+# This is free software: you may copy, modify, and/or distribute this work
+# under the terms of the GNU General Public License as published by the
+# Free Software Foundation; version 3 of that license or any later version.
+# No warranty expressed or implied. See the file ‘LICENSE.GPL-3’ for details.
+
+""" Version information unified for human- and machine-readable formats.
+
+ The project ‘ChangeLog’ file is a reStructuredText document, with
+ each section describing a version of the project. The document is
+ intended to be readable as-is by end users.
+
+ This module handles transformation from the ‘ChangeLog’ to a
+ mapping of version information, serialised as JSON. It also
+ provides functionality for Distutils to use this information.
+
+ Requires:
+
+ * Docutils <http://docutils.sourceforge.net/>
+ * JSON <https://docs.python.org/3/reference/json.html>
+
+ """
+
+from __future__ import (absolute_import, unicode_literals)
+
+import sys
+import os
+import io
+import errno
+import json
+import datetime
+import textwrap
+import re
+import functools
+import collections
+import distutils
+import distutils.errors
+import distutils.cmd
+try:
+ # Python 2 has both ‘str’ (bytes) and ‘unicode’ (text).
+ basestring = basestring
+ unicode = unicode
+except NameError:
+ # Python 3 names the Unicode data type ‘str’.
+ basestring = str
+ unicode = str
+
+import setuptools
+import setuptools.command.egg_info
+
+
+def ensure_class_bases_begin_with(namespace, class_name, base_class):
+ """ Ensure the named class's bases start with the base class.
+
+ :param namespace: The namespace containing the class name.
+ :param class_name: The name of the class to alter.
+ :param base_class: The type to be the first base class for the
+ newly created type.
+ :return: ``None``.
+
+ This function is a hack to circumvent a circular dependency:
+ using classes from a module which is not installed at the time
+ this module is imported.
+
+ Call this function after ensuring `base_class` is available,
+ before using the class named by `class_name`.
+
+ """
+ existing_class = namespace[class_name]
+ assert isinstance(existing_class, type)
+
+ bases = list(existing_class.__bases__)
+ if base_class is bases[0]:
+ # Already bound to a type with the right bases.
+ return
+ bases.insert(0, base_class)
+
+ new_class_namespace = existing_class.__dict__.copy()
+ # Type creation will assign the correct ‘__dict__’ attribute.
+ del new_class_namespace['__dict__']
+
+ metaclass = existing_class.__metaclass__
+ new_class = metaclass(class_name, tuple(bases), new_class_namespace)
+
+ namespace[class_name] = new_class
+
+
+class VersionInfoWriter(object):
+ """ Docutils writer to produce a version info JSON data stream. """
+
+ # This class needs its base class to be a class from `docutils`.
+ # But that would create a circular dependency: Setuptools cannot
+ # ensure `docutils` is available before importing this module.
+ #
+ # Use `ensure_class_bases_begin_with` after importing `docutils`, to
+ # re-bind the `VersionInfoWriter` name to a new type that inherits
+ # from `docutils.writers.Writer`.
+
+ __metaclass__ = type
+
+ supported = ['version_info']
+ """ Formats this writer supports. """
+
+ def __init__(self):
+ super(VersionInfoWriter, self).__init__()
+ self.translator_class = VersionInfoTranslator
+
+ def translate(self):
+ visitor = self.translator_class(self.document)
+ self.document.walkabout(visitor)
+ self.output = visitor.astext()
+
+
+rfc822_person_regex = re.compile(
+ "^(?P<name>[^<]+) <(?P<email>[^>]+)>$")
+
+class ChangeLogEntry:
+ """ An individual entry from the ‘ChangeLog’ document. """
+
+ __metaclass__ = type
+
+ field_names = [
+ 'release_date',
+ 'version',
+ 'maintainer',
+ 'body',
+ ]
+
+ date_format = "%Y-%m-%d"
+ default_version = "UNKNOWN"
+ default_release_date = "UNKNOWN"
+
+ def __init__(
+ self,
+ release_date=default_release_date, version=default_version,
+ maintainer=None, body=None):
+ self.validate_release_date(release_date)
+ self.release_date = release_date
+
+ self.version = version
+
+ self.validate_maintainer(maintainer)
+ self.maintainer = maintainer
+ self.body = body
+
+ @classmethod
+ def validate_release_date(cls, value):
+ """ Validate the `release_date` value.
+
+ :param value: The prospective `release_date` value.
+ :return: ``None`` if the value is valid.
+ :raises ValueError: If the value is invalid.
+
+ """
+ if value in ["UNKNOWN", "FUTURE"]:
+ # A valid non-date value.
+ return None
+
+ # Raises `ValueError` if parse fails.
+ datetime.datetime.strptime(value, ChangeLogEntry.date_format)
+
+ @classmethod
+ def validate_maintainer(cls, value):
+ """ Validate the `maintainer` value.
+
+ :param value: The prospective `maintainer` value.
+ :return: ``None`` if the value is valid.
+ :raises ValueError: If the value is invalid.
+
+ """
+ valid = False
+
+ if value is None:
+ valid = True
+ elif rfc822_person_regex.search(value):
+ valid = True
+
+ if not valid:
+ raise ValueError("Not a valid person specification {value!r}")
+ else:
+ return None
+
+ @classmethod
+ def make_ordered_dict(cls, fields):
+ """ Make an ordered dict of the fields. """
+ result = collections.OrderedDict(
+ (name, fields[name])
+ for name in cls.field_names)
+ return result
+
+ def as_version_info_entry(self):
+ """ Format the changelog entry as a version info entry. """
+ fields = vars(self)
+ entry = self.make_ordered_dict(fields)
+
+ return entry
+
+
+class InvalidFormatError(ValueError):
+ """ Raised when the document is not a valid ‘ChangeLog’ document. """
+
+
+class VersionInfoTranslator(object):
+ """ Translator from document nodes to a version info stream. """
+
+ # This class needs its base class to be a class from `docutils`.
+ # But that would create a circular dependency: Setuptools cannot
+ # ensure `docutils` is available before importing this module.
+ #
+ # Use `ensure_class_bases_begin_with` after importing `docutils`,
+ # to re-bind the `VersionInfoTranslator` name to a new type that
+ # inherits from `docutils.nodes.SparseNodeVisitor`.
+
+ __metaclass__ = type
+
+ wrap_width = 78
+ bullet_text = "* "
+
+ attr_convert_funcs_by_attr_name = {
+ 'released': ('release_date', unicode),
+ 'version': ('version', unicode),
+ 'maintainer': ('maintainer', unicode),
+ }
+
+ def __init__(self, document):
+ super(VersionInfoTranslator, self).__init__(document)
+ self.settings = document.settings
+ self.current_section_level = 0
+ self.current_field_name = None
+ self.content = []
+ self.indent_width = 0
+ self.initial_indent = ""
+ self.subsequent_indent = ""
+ self.current_entry = None
+
+ # Docutils is not available when this class is defined.
+ # Get the `docutils` module dynamically.
+ self._docutils = sys.modules['docutils']
+
+ def astext(self):
+ """ Return the translated document as text. """
+ text = json.dumps(self.content, indent=4)
+ return text
+
+ def append_to_current_entry(self, text):
+ if self.current_entry is not None:
+ if self.current_entry.body is not None:
+ self.current_entry.body += text
+
+ def visit_Text(self, node):
+ raw_text = node.astext()
+ text = textwrap.fill(
+ raw_text,
+ width=self.wrap_width,
+ initial_indent=self.initial_indent,
+ subsequent_indent=self.subsequent_indent)
+ self.append_to_current_entry(text)
+
+ def depart_Text(self, node):
+ pass
+
+ def visit_comment(self, node):
+ raise self._docutils.nodes.SkipNode
+
+ def visit_field_body(self, node):
+ field_list_node = node.parent.parent
+ if not isinstance(field_list_node, self._docutils.nodes.field_list):
+ raise InvalidFormatError(
+ "Unexpected field within {node!r}".format(
+ node=field_list_node))
+ (attr_name, convert_func) = self.attr_convert_funcs_by_attr_name[
+ self.current_field_name]
+ attr_value = convert_func(node.astext())
+ setattr(self.current_entry, attr_name, attr_value)
+
+ def depart_field_body(self, node):
+ pass
+
+ def visit_field_list(self, node):
+ pass
+
+ def depart_field_list(self, node):
+ self.current_field_name = None
+ self.current_entry.body = ""
+
+ def visit_field_name(self, node):
+ field_name = node.astext()
+ if self.current_section_level == 1:
+ # At a top-level section.
+ if field_name.lower() not in ["released", "maintainer"]:
+ raise InvalidFormatError(
+ "Unexpected field name {name!r}".format(name=field_name))
+ self.current_field_name = field_name.lower()
+
+ def depart_field_name(self, node):
+ pass
+
+ def visit_bullet_list(self, node):
+ self.current_context = []
+
+ def depart_bullet_list(self, node):
+ self.current_entry.changes = self.current_context
+ self.current_context = None
+
+ def adjust_indent_width(self, delta):
+ self.indent_width += delta
+ self.subsequent_indent = " " * self.indent_width
+ self.initial_indent = self.subsequent_indent
+
+ def visit_list_item(self, node):
+ indent_delta = +len(self.bullet_text)
+ self.adjust_indent_width(indent_delta)
+ self.initial_indent = self.subsequent_indent[:-indent_delta]
+ self.append_to_current_entry(self.initial_indent + self.bullet_text)
+
+ def depart_list_item(self, node):
+ indent_delta = +len(self.bullet_text)
+ self.adjust_indent_width(-indent_delta)
+ self.append_to_current_entry("\n")
+
+ def visit_section(self, node):
+ self.current_section_level += 1
+ if self.current_section_level == 1:
+ # At a top-level section.
+ self.current_entry = ChangeLogEntry()
+ else:
+ raise InvalidFormatError(
+ "Subsections not implemented for this writer")
+
+ def depart_section(self, node):
+ self.current_section_level -= 1
+ self.content.append(
+ self.current_entry.as_version_info_entry())
+ self.current_entry = None
+
+ _expected_title_word_length = len("Version FOO".split(" "))
+
+ def depart_title(self, node):
+ title_text = node.astext()
+ # At a top-level section.
+ words = title_text.split(" ")
+ version = None
+ if len(words) != self._expected_title_word_length:
+ raise InvalidFormatError(
+ "Unexpected title text {text!r}".format(text=title_text))
+ if words[0].lower() not in ["version"]:
+ raise InvalidFormatError(
+ "Unexpected title text {text!r}".format(text=title_text))
+ version = words[-1]
+ self.current_entry.version = version
+
+
+def changelog_to_version_info_collection(infile):
+ """ Render the ‘ChangeLog’ document to a version info collection.
+
+ :param infile: A file-like object containing the changelog.
+ :return: The serialised JSON data of the version info collection.
+
+ """
+
+ # Docutils is not available when Setuptools needs this module, so
+ # delay the imports to this function instead.
+ import docutils.core
+ import docutils.nodes
+ import docutils.writers
+
+ ensure_class_bases_begin_with(
+ globals(), str('VersionInfoWriter'), docutils.writers.Writer)
+ ensure_class_bases_begin_with(
+ globals(), str('VersionInfoTranslator'),
+ docutils.nodes.SparseNodeVisitor)
+
+ writer = VersionInfoWriter()
+ settings_overrides = {
+ 'doctitle_xform': False,
+ }
+ version_info_json = docutils.core.publish_string(
+ infile.read(), writer=writer,
+ settings_overrides=settings_overrides)
+
+ return version_info_json
+
+
+try:
+ lru_cache = functools.lru_cache
+except AttributeError:
+ # Python < 3.2 does not have the `functools.lru_cache` function.
+ # Not essential, so replace it with a no-op.
+ lru_cache = lambda maxsize=None, typed=False: lambda func: func
+
+
+@lru_cache(maxsize=128)
+def generate_version_info_from_changelog(infile_path):
+ """ Get the version info for the latest version in the changelog.
+
+ :param infile_path: Filesystem path to the input changelog file.
+ :return: The generated version info mapping; or ``None`` if the
+ file cannot be read.
+
+ The document is explicitly opened as UTF-8 encoded text.
+
+ """
+ version_info = collections.OrderedDict()
+
+ versions_all_json = None
+ try:
+ with io.open(infile_path, 'rt', encoding="utf-8") as infile:
+ versions_all_json = changelog_to_version_info_collection(infile)
+ except EnvironmentError:
+ # If we can't read the input file, leave the collection empty.
+ pass
+
+ if versions_all_json is not None:
+ versions_all = json.loads(versions_all_json.decode('utf-8'))
+ version_info = get_latest_version(versions_all)
+
+ return version_info
+
+
+def get_latest_version(versions):
+ """ Get the latest version from a collection of changelog entries.
+
+ :param versions: A collection of mappings for changelog entries.
+ :return: An ordered mapping of fields for the latest version,
+ if `versions` is non-empty; otherwise, an empty mapping.
+
+ """
+ version_info = collections.OrderedDict()
+
+ versions_by_release_date = {
+ item['release_date']: item
+ for item in versions}
+ if versions_by_release_date:
+ latest_release_date = max(versions_by_release_date.keys())
+ version_info = ChangeLogEntry.make_ordered_dict(
+ versions_by_release_date[latest_release_date])
+
+ return version_info
+
+
+def serialise_version_info_from_mapping(version_info):
+ """ Generate the version info serialised data.
+
+ :param version_info: Mapping of version info items.
+ :return: The version info serialised to JSON.
+
+ """
+ content = json.dumps(version_info, indent=4)
+
+ return content
+
+
+changelog_filename = "ChangeLog"
+
+def get_changelog_path(distribution, filename=changelog_filename):
+ """ Get the changelog file path for the distribution.
+
+ :param distribution: The distutils.dist.Distribution instance.
+ :param filename: The base filename of the changelog document.
+ :return: Filesystem path of the changelog document, or ``None``
+ if not discoverable.
+
+ """
+ setup_dirname = os.path.dirname(distribution.script_name)
+ filepath = os.path.join(setup_dirname, filename)
+
+ return filepath
+
+
+def has_changelog(command):
+ """ Return ``True`` iff the distribution's changelog file exists. """
+ result = False
+
+ changelog_path = get_changelog_path(command.distribution)
+ if changelog_path is not None:
+ if os.path.exists(changelog_path):
+ result = True
+
+ return result
+
+
+class EggInfoCommand(setuptools.command.egg_info.egg_info, object):
+ """ Custom ‘egg_info’ command for this distribution. """
+
+ sub_commands = ([
+ ('write_version_info', has_changelog),
+ ] + setuptools.command.egg_info.egg_info.sub_commands)
+
+ def run(self):
+ """ Execute this command. """
+ super(EggInfoCommand, self).run()
+
+ for command_name in self.get_sub_commands():
+ self.run_command(command_name)
+
+
+version_info_filename = "version_info.json"
+
+class WriteVersionInfoCommand(EggInfoCommand, object):
+ """ Setuptools command to serialise version info metadata. """
+
+ user_options = ([
+ ("changelog-path=", None,
+ "Filesystem path to the changelog document."),
+ ("outfile-path=", None,
+ "Filesystem path to the version info file."),
+ ] + EggInfoCommand.user_options)
+
+ def initialize_options(self):
+ """ Initialise command options to defaults. """
+ super(WriteVersionInfoCommand, self).initialize_options()
+ self.changelog_path = None
+ self.outfile_path = None
+
+ def finalize_options(self):
+ """ Finalise command options before execution. """
+ self.set_undefined_options(
+ 'build',
+ ('force', 'force'))
+
+ super(WriteVersionInfoCommand, self).finalize_options()
+
+ if self.changelog_path is None:
+ self.changelog_path = get_changelog_path(self.distribution)
+
+ if self.outfile_path is None:
+ egg_dir = self.egg_info
+ self.outfile_path = os.path.join(egg_dir, version_info_filename)
+
+ def run(self):
+ """ Execute this command. """
+ version_info = generate_version_info_from_changelog(self.changelog_path)
+ content = serialise_version_info_from_mapping(version_info)
+ self.write_file("version info", self.outfile_path, content)
+
+
+# Local variables:
+# coding: utf-8
+# mode: python
+# End:
+# vim: fileencoding=utf-8 filetype=python :
diff --git a/scripts/automation/trex_control_plane/python_lib/rednose-0.4.1/rednose.py b/scripts/automation/trex_control_plane/python_lib/rednose-0.4.1/rednose.py
new file mode 100755
index 00000000..1ff892ad
--- /dev/null
+++ b/scripts/automation/trex_control_plane/python_lib/rednose-0.4.1/rednose.py
@@ -0,0 +1,387 @@
+# Copyright (c) 2009, Tim Cuthbertson # All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following
+# disclaimer in the documentation and/or other materials provided
+# with the distribution.
+# * Neither the name of the organisation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
+# OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
+# AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY
+# WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+
+from __future__ import print_function
+import os
+import sys
+import linecache
+import re
+import time
+
+import nose
+
+import termstyle
+
+failure = 'FAILED'
+error = 'ERROR'
+success = 'passed'
+skip = 'skipped'
+line_length = 77
+
+PY3 = sys.version_info[0] >= 3
+if PY3:
+ to_unicode = str
+else:
+ def to_unicode(s):
+ try:
+ return unicode(s)
+ except UnicodeDecodeError:
+ return unicode(repr(str(s)))
+
+BLACKLISTED_WRITERS = [
+ 'nose[\\/]result\\.pyc?$',
+ 'unittest[\\/]runner\\.pyc?$'
+]
+REDNOSE_DEBUG = False
+
+
+class RedNose(nose.plugins.Plugin):
+ env_opt = 'NOSE_REDNOSE'
+ env_opt_color = 'NOSE_REDNOSE_COLOR'
+ score = 199 # just under the `coverage` module
+
+ def __init__(self, *args):
+ super(RedNose, self).__init__(*args)
+ self.reports = []
+ self.error = self.success = self.failure = self.skip = 0
+ self.total = 0
+ self.stream = None
+ self.verbose = False
+ self.enabled = False
+ self.tree = False
+
+ def options(self, parser, env=os.environ):
+ global REDNOSE_DEBUG
+ rednose_on = bool(env.get(self.env_opt, False))
+ rednose_color = env.get(self.env_opt_color, 'auto')
+ REDNOSE_DEBUG = bool(env.get('REDNOSE_DEBUG', False))
+
+ parser.add_option(
+ "--rednose",
+ action="store_true",
+ default=rednose_on,
+ dest="rednose",
+ help="enable colour output (alternatively, set $%s=1)" % (self.env_opt,)
+ )
+ parser.add_option(
+ "--no-color",
+ action="store_false",
+ dest="rednose",
+ help="disable colour output"
+ )
+ parser.add_option(
+ "--force-color",
+ action="store_const",
+ dest='rednose_color',
+ default=rednose_color,
+ const='force',
+ help="force colour output when not using a TTY (alternatively, set $%s=force)" % (self.env_opt_color,)
+ )
+ parser.add_option(
+ "--immediate",
+ action="store_true",
+ default=False,
+ help="print errors and failures as they happen, as well as at the end"
+ )
+
+ def configure(self, options, conf):
+ if options.rednose:
+ self.enabled = True
+ termstyle_init = {
+ 'force': termstyle.enable,
+ 'off': termstyle.disable
+ }.get(options.rednose_color, termstyle.auto)
+ termstyle_init()
+
+ self.immediate = options.immediate
+ self.verbose = options.verbosity >= 2
+
+ def begin(self):
+ self.start_time = time.time()
+ self._in_test = False
+
+ def _format_test_name(self, test):
+ return test.shortDescription() or to_unicode(test)
+
+ def prepareTestResult(self, result):
+ result.stream = FilteringStream(self.stream, BLACKLISTED_WRITERS)
+
+ def beforeTest(self, test):
+ self._in_test = True
+ if self.verbose:
+ self._out(self._format_test_name(test) + ' ... ')
+
+ def afterTest(self, test):
+ if self._in_test:
+ self.addSkip()
+
+ def _print_test(self, type_, color):
+ self.total += 1
+ if self.verbose:
+ self._outln(color(type_))
+ else:
+ if type_ == failure:
+ short_ = 'F'
+ elif type_ == error:
+ short_ = 'X'
+ elif type_ == skip:
+ short_ = '-'
+ else:
+ short_ = '.'
+ self._out(color(short_))
+ if self.total % line_length == 0:
+ self._outln()
+ self._in_test = False
+
+ def _add_report(self, report):
+ failure_type, test, err = report
+ self.reports.append(report)
+ if self.immediate:
+ self._outln()
+ self._report_test(len(self.reports), *report)
+
+ def addFailure(self, test, err):
+ self.failure += 1
+ self._add_report((failure, test, err))
+ self._print_test(failure, termstyle.red)
+
+ def addError(self, test, err):
+ if err[0].__name__ == 'SkipTest':
+ self.addSkip(test, err)
+ return
+ self.error += 1
+ self._add_report((error, test, err))
+ self._print_test(error, termstyle.yellow)
+
+ def addSuccess(self, test):
+ self.success += 1
+ self._print_test(success, termstyle.green)
+
+ def addSkip(self, test=None, err=None):
+ self.skip += 1
+ self._print_test(skip, termstyle.blue)
+
+ def setOutputStream(self, stream):
+ self.stream = stream
+
+ def report(self, stream):
+ """report on all registered failures and errors"""
+ self._outln()
+ if self.immediate:
+ for x in range(0, 5):
+ self._outln()
+ report_num = 0
+ if len(self.reports) > 0:
+ for report_num, report in enumerate(self.reports):
+ self._report_test(report_num + 1, *report)
+ self._outln()
+
+ self._summarize()
+
+ def _summarize(self):
+ """summarize all tests - the number of failures, errors and successes"""
+ self._line(termstyle.black)
+ self._out("%s test%s run in %0.1f seconds" % (
+ self.total,
+ self._plural(self.total),
+ time.time() - self.start_time))
+ if self.total > self.success:
+ self._outln(". ")
+ additionals = []
+ if self.failure > 0:
+ additionals.append(termstyle.red("%s FAILED" % (
+ self.failure,)))
+ if self.error > 0:
+ additionals.append(termstyle.yellow("%s error%s" % (
+ self.error,
+ self._plural(self.error) )))
+ if self.skip > 0:
+ additionals.append(termstyle.blue("%s skipped" % (
+ self.skip)))
+ self._out(', '.join(additionals))
+
+ self._out(termstyle.green(" (%s test%s passed)" % (
+ self.success,
+ self._plural(self.success) )))
+ self._outln()
+
+ def _report_test(self, report_num, type_, test, err):
+ """report the results of a single (failing or errored) test"""
+ self._line(termstyle.black)
+ self._out("%s) " % (report_num))
+ if type_ == failure:
+ color = termstyle.red
+ self._outln(color('FAIL: %s' % (self._format_test_name(test),)))
+ else:
+ color = termstyle.yellow
+ self._outln(color('ERROR: %s' % (self._format_test_name(test),)))
+
+ exc_type, exc_instance, exc_trace = err
+
+ self._outln()
+ self._outln(self._fmt_traceback(exc_trace))
+ self._out(color(' ', termstyle.bold(color(exc_type.__name__)), ": "))
+ self._outln(self._fmt_message(exc_instance, color))
+ self._outln()
+
+ def _relative_path(self, path):
+ """
+ If path is a child of the current working directory, the relative
+ path is returned surrounded by bold xterm escape sequences.
+ If path is not a child of the working directory, path is returned
+ """
+ try:
+ here = os.path.abspath(os.path.realpath(os.getcwd()))
+ fullpath = os.path.abspath(os.path.realpath(path))
+ except OSError:
+ return path
+ if fullpath.startswith(here):
+ return termstyle.bold(fullpath[len(here)+1:])
+ return path
+
+ def _file_line(self, tb):
+ """formats the file / lineno / function line of a traceback element"""
+ prefix = "file://"
+ prefix = ""
+
+ f = tb.tb_frame
+ if '__unittest' in f.f_globals:
+ # this is the magical flag that prevents unittest internal
+ # code from junking up the stacktrace
+ return None
+
+ filename = f.f_code.co_filename
+ lineno = tb.tb_lineno
+ linecache.checkcache(filename)
+ function_name = f.f_code.co_name
+
+ line_contents = linecache.getline(filename, lineno, f.f_globals).strip()
+
+ return " %s line %s in %s\n %s" % (
+ termstyle.blue(prefix, self._relative_path(filename)),
+ lineno,
+ termstyle.cyan(function_name),
+ line_contents)
+
+ def _fmt_traceback(self, trace):
+ """format a traceback"""
+ ret = []
+ ret.append(termstyle.default(" Traceback (most recent call last):"))
+ current_trace = trace
+ while current_trace is not None:
+ line = self._file_line(current_trace)
+ if line is not None:
+ ret.append(line)
+ current_trace = current_trace.tb_next
+ return '\n'.join(ret)
+
+ def _fmt_message(self, exception, color):
+ orig_message_lines = to_unicode(exception).splitlines()
+
+ if len(orig_message_lines) == 0:
+ return ''
+ message_lines = [color(orig_message_lines[0])]
+ for line in orig_message_lines[1:]:
+ match = re.match('^---.* begin captured stdout.*----$', line)
+ if match:
+ color = None
+ message_lines.append('')
+ line = ' ' + line
+ message_lines.append(color(line) if color is not None else line)
+ return '\n'.join(message_lines)
+
+ def _out(self, msg='', newline=False):
+ self.stream.write(msg)
+ if newline:
+ self.stream.write('\n')
+
+ def _outln(self, msg=''):
+ self._out(msg, True)
+
+ def _plural(self, num):
+ return '' if num == 1 else 's'
+
+ def _line(self, color=termstyle.reset, char='-'):
+ """
+ print a line of separator characters (default '-')
+ in the given colour (default black)
+ """
+ self._outln(color(char * line_length))
+
+
+import traceback
+import sys
+
+
+class FilteringStream(object):
+ """
+ A wrapper for a stream that will filter
+ calls to `write` and `writeln` to ignore calls
+ from blacklisted callers
+ (implemented as a regex on their filename, according
+ to traceback.extract_stack())
+
+ It's super hacky, but there seems to be no other way
+ to suppress nose's default output
+ """
+ def __init__(self, stream, excludes):
+ self.__stream = stream
+ self.__excludes = list(map(re.compile, excludes))
+
+ def __should_filter(self):
+ try:
+ stack = traceback.extract_stack(limit=3)[0]
+ filename = stack[0]
+ pattern_matches_filename = lambda pattern: pattern.search(filename)
+ should_filter = any(map(pattern_matches_filename, self.__excludes))
+ if REDNOSE_DEBUG:
+ print >> sys.stderr, "REDNOSE_DEBUG: got write call from %s, should_filter = %s" % (
+ filename, should_filter)
+ return should_filter
+ except StandardError as e:
+ if REDNOSE_DEBUG:
+ print("\nError in rednose filtering: %s" % (e,), file=sys.stderr)
+ traceback.print_exc(sys.stderr)
+ return False
+
+ def write(self, *a):
+ if self.__should_filter():
+ return
+ return self.__stream.write(*a)
+
+ def writeln(self, *a):
+ if self.__should_filter():
+ return
+ return self.__stream.writeln(*a)
+
+ # pass non-known methods through to self.__stream
+ def __getattr__(self, name):
+ if REDNOSE_DEBUG:
+ print("REDNOSE_DEBUG: getting attr %s" % (name,), file=sys.stderr)
+ return getattr(self.__stream, name)
diff --git a/scripts/automation/trex_control_plane/python_lib/rednose-0.4.1/setup.py b/scripts/automation/trex_control_plane/python_lib/rednose-0.4.1/setup.py
new file mode 100755
index 00000000..34cded4b
--- /dev/null
+++ b/scripts/automation/trex_control_plane/python_lib/rednose-0.4.1/setup.py
@@ -0,0 +1,29 @@
+#!/usr/bin/env python
+
+## NOTE: ##
+## this setup.py was generated by zero2pypi:
+## http://gfxmonk.net/dist/0install/zero2pypi.xml
+
+from setuptools import *
+setup(
+ packages = find_packages(exclude=['test', 'test.*']),
+ description='coloured output for nosetests',
+ entry_points={'nose.plugins.0.10': ['NOSETESTS_PLUGINS = rednose:RedNose']},
+ install_requires=['setuptools', 'python-termstyle >=0.1.7'],
+ long_description="\n**Note**: This package has been built automatically by\n`zero2pypi <http://gfxmonk.net/dist/0install/zero2pypi.xml>`_.\nIf possible, you should use the zero-install feed instead:\nhttp://gfxmonk.net/dist/0install/rednose.xml\n\n----------------\n\n=========\nrednose\n=========\n\nrednose is a `nosetests`_\nplugin for adding colour (and readability) to nosetest console results.\n\nInstallation:\n-------------\n::\n\n\teasy_install rednose\n\t\nor from the source::\n\n\t./setup.py develop\n\nUsage:\n------\n::\n\n\tnosetests --rednose\n\nor::\n\n\texport NOSE_REDNOSE=1\n\tnosetests\n\nRednose by default uses auto-colouring, which will only use\ncolour if you're running it on a terminal (i.e not piping it\nto a file). To control colouring, use one of::\n\n\tnosetests --rednose --force-color\n\tnosetests --no-color\n\n(you can also control this by setting the environment variable NOSE_REDNOSE_COLOR to 'force' or 'no')\n\n.. _nosetests: http://somethingaboutorange.com/mrl/projects/nose/\n",
+ name='rednose',
+ py_modules=['rednose'],
+ url='http://gfxmonk.net/dist/0install/rednose.xml',
+ version='0.4.1',
+classifiers=[
+ "License :: OSI Approved :: BSD License",
+ "Programming Language :: Python",
+ "Programming Language :: Python :: 3",
+ "Development Status :: 4 - Beta",
+ "Intended Audience :: Developers",
+ "Topic :: Software Development :: Libraries :: Python Modules",
+ "Topic :: Software Development :: Testing",
+ ],
+ keywords='test nosetests nose nosetest output colour console',
+ license='BSD',
+)
diff --git a/scripts/automation/trex_control_plane/python_lib/termstyle/MANIFEST.in b/scripts/automation/trex_control_plane/python_lib/termstyle/MANIFEST.in
new file mode 100755
index 00000000..14dafaf3
--- /dev/null
+++ b/scripts/automation/trex_control_plane/python_lib/termstyle/MANIFEST.in
@@ -0,0 +1 @@
+include test*
diff --git a/scripts/automation/trex_control_plane/python_lib/termstyle/Makefile b/scripts/automation/trex_control_plane/python_lib/termstyle/Makefile
new file mode 100755
index 00000000..02151dca
--- /dev/null
+++ b/scripts/automation/trex_control_plane/python_lib/termstyle/Makefile
@@ -0,0 +1,9 @@
+0:
+ mkzero-gfxmonk \
+ -v `cat VERSION` \
+ -p termstyle.py \
+ -p setup.py \
+ python-termstyle.xml
+
+pypi:
+ ./setup.py register sdist upload
diff --git a/scripts/automation/trex_control_plane/python_lib/termstyle/README.rst b/scripts/automation/trex_control_plane/python_lib/termstyle/README.rst
new file mode 100755
index 00000000..f3dfa0ab
--- /dev/null
+++ b/scripts/automation/trex_control_plane/python_lib/termstyle/README.rst
@@ -0,0 +1,82 @@
+=========
+termstyle
+=========
+
+termstyle is a simple python library for adding coloured output to
+terminal (console) programs. The definitions come from ECMA-048_, the
+"Control Functions for Coded Character Sets" standard.
+
+Installation:
+-------------
+
+I thoroughly recommend using the zero-install feed (see the project homepage) to manage your dependencies if at all possible. zero-install_ provides a much better system than pip or easy_install, and works with absolutely any language and allows decentralised package management that requires no special privileges to install.
+
+Example Usage:
+--------------
+::
+
+ from termstyle import *
+ print "%s:%s" % (red('Hey'), green('how are you?'))
+ print blue('How ', bold('you'), ' doin?')
+
+or, you can use a colour just as a string::
+
+ print "%sBlue!%s" % (blue, reset)
+
+Styles:
+-------
+::
+
+ reset or default (no colour / style)
+
+colour::
+
+ black
+ red
+ green
+ yellow
+ blue
+ magenta
+ cyan
+ white
+
+background colour::
+
+ bg_black
+ bg_red
+ bg_green
+ bg_yellow
+ bg_blue
+ bg_magenta
+ bg_cyan
+ bg_white
+ bg_default
+
+In terminals supporting transparency ``bg_default`` is often used to set
+the background to transparent [#]_.
+
+weight::
+
+ bold
+ inverted
+
+style::
+
+ italic
+ underscore
+
+Controls:
+---------
+::
+
+ auto() - sets colouring on only if sys.stdout is a terminal
+ disabe() - disable colours
+ enable() - enable colours
+
+.. [#] Supporting terminals include rxvt-unicode_, and Eterm_.
+
+.. _ECMA-048: http://www.ecma-international.org/publications/files/ECMA-ST/Ecma-048.pdf
+.. _rxvt-unicode: http://software.schmorp.de/
+.. _Eterm: http://www.eterm.org/
+.. _zero-install: http://0install.net/
+
diff --git a/scripts/automation/trex_control_plane/python_lib/termstyle/VERSION b/scripts/automation/trex_control_plane/python_lib/termstyle/VERSION
new file mode 100755
index 00000000..345f8cc0
--- /dev/null
+++ b/scripts/automation/trex_control_plane/python_lib/termstyle/VERSION
@@ -0,0 +1 @@
+0.1.10 \ No newline at end of file
diff --git a/scripts/automation/trex_control_plane/python_lib/termstyle/python-termstyle.xml b/scripts/automation/trex_control_plane/python_lib/termstyle/python-termstyle.xml
new file mode 100755
index 00000000..b6b08bd7
--- /dev/null
+++ b/scripts/automation/trex_control_plane/python_lib/termstyle/python-termstyle.xml
@@ -0,0 +1,183 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type='text/xsl' href='interface.xsl'?>
+<interface xmlns="http://zero-install.sourceforge.net/2004/injector/interface" uri="http://gfxmonk.net/dist/0install/python-termstyle.xml">
+ <name>termstyle</name>
+ <summary>console colouring for python</summary>
+ <homepage>http://github.com/gfxmonk/termstyle</homepage>
+ <description>
+=========
+termstyle
+=========
+
+termstyle is a simple python library for adding coloured output to
+terminal (console) programs. The definitions come from ECMA-048_, the
+"Control Functions for Coded Character Sets" standard.
+
+Installation:
+-------------
+
+I thoroughly recommend using the zero-install feed (see the project homepage) to manage your dependencies if at all possible. zero-install_ provides a much better system than pip or easy_install, and works with absolutely any language and allows decentralised package management that requires no special privileges to install.
+
+Example Usage:
+--------------
+::
+
+ from termstyle import *
+ print "%s:%s" % (red('Hey'), green('how are you?'))
+ print blue('How ', bold('you'), ' doin?')
+
+or, you can use a colour just as a string::
+
+ print "%sBlue!%s" % (blue, reset)
+
+Styles:
+-------
+::
+
+ reset or default (no colour / style)
+
+colour::
+
+ black
+ red
+ green
+ yellow
+ blue
+ magenta
+ cyan
+ white
+
+background colour::
+
+ bg_black
+ bg_red
+ bg_green
+ bg_yellow
+ bg_blue
+ bg_magenta
+ bg_cyan
+ bg_white
+ bg_default
+
+In terminals supporting transparency ``bg_default`` is often used to set
+the background to transparent [#]_.
+
+weight::
+
+ bold
+ inverted
+
+style::
+
+ italic
+ underscore
+
+Controls:
+---------
+::
+
+ auto() - sets colouring on only if sys.stdout is a terminal
+ disabe() - disable colours
+ enable() - enable colours
+
+.. [#] Supporting terminals include rxvt-unicode_, and Eterm_.
+
+.. _ECMA-048: http://www.ecma-international.org/publications/files/ECMA-ST/Ecma-048.pdf
+.. _rxvt-unicode: http://software.schmorp.de/
+.. _Eterm: http://www.eterm.org/
+.. _zero-install: http://0install.net/
+
+ </description>
+ <pypi-extra xmlns="http://gfxmonk.net/dist/0install"><![CDATA[
+ classifiers=[
+ "License :: OSI Approved :: BSD License",
+ "Programming Language :: Python",
+ "Programming Language :: Python :: 3",
+ "Intended Audience :: Developers",
+ "Topic :: Software Development :: Libraries :: Python Modules",
+ ],
+ keywords='output colour console ansi',
+ license='BSD',
+ ]]></pypi-extra>
+ <rich-description xmlns="http://gfxmonk.net/dist/0install">
+ <div xmlns="http://www.w3.org/1999/xhtml">
+ <h1 id="termstyle">termstyle</h1>
+ <p>termstyle is a simple python library for adding coloured output to terminal (console) programs. The definitions come from <a href="http://www.ecma-international.org/publications/files/ECMA-ST/Ecma-048.pdf">ECMA-048</a>, the "Control Functions for Coded Character Sets" standard.</p>
+ <h2 id="installation">Installation:</h2>
+ <p>I thoroughly recommend using the zero-install feed (see the project homepage) to manage your dependencies if at all possible. <a href="http://0install.net/">zero-install</a> provides a much better system than pip or easy_install, and works with absolutely any language and allows decentralised package management that requires no special privileges to install.</p>
+ <h2 id="example-usage">Example Usage:</h2>
+ <pre><code>from termstyle import *
+print "%s:%s" % (red('Hey'), green('how are you?'))
+print blue('How ', bold('you'), ' doin?')
+</code></pre>
+ <p>or, you can use a colour just as a string:</p>
+ <pre><code>print "%sBlue!%s" % (blue, reset)
+</code></pre>
+ <h2 id="styles">Styles:</h2>
+ <pre><code>reset or default (no colour / style)
+</code></pre>
+ <p>colour:</p>
+ <pre><code>black
+red
+green
+yellow
+blue
+magenta
+cyan
+white
+</code></pre>
+ <p>background colour:</p>
+ <pre><code>bg_black
+bg_red
+bg_green
+bg_yellow
+bg_blue
+bg_magenta
+bg_cyan
+bg_white
+bg_default
+</code></pre>
+ <p>In terminals supporting transparency <code>bg_default</code> is often used to set the background to transparent [#]_.</p>
+ <p>weight:</p>
+ <pre><code>bold
+inverted
+</code></pre>
+ <p>style:</p>
+ <pre><code>italic
+underscore
+</code></pre>
+ <h2 id="controls">Controls:</h2>
+ <pre><code>auto() - sets colouring on only if sys.stdout is a terminal
+disabe() - disable colours
+enable() - enable colours
+</code></pre>
+ </div>
+ </rich-description>
+ <group>
+ <environment insert="" mode="prepend" name="PYTHONPATH"/>
+ <implementation id="sha1new=1f5b66dcd48fa38740aa98bb2796f0adb7eeab04" released="2010-07-18" version="0.1.5">
+ <manifest-digest sha256="9b0ff9b07c1d424484b5aa20fc2db7827a372d84515e8caf46b005f54a034e07"/>
+ <archive href="http://gfxmonk.net/dist/0install/python-termstyle/python-termstyle-0.1.2.tgz" size="3114"/>
+ </implementation>
+ <implementation id="sha1new=9f728d8ba0eb5379c907b85a9662c54935f1e0ca" released="2010-11-05" version="0.1.6">
+ <manifest-digest sha256="a7283c6fe262bc88ed270a91304da243c474d32353c2c9d6c6f4003bacfce27c"/>
+ <archive href="http://gfxmonk.net/dist/0install/python-termstyle/python-termstyle-0.1.6.tgz" size="3607"/>
+ </implementation>
+ <implementation id="sha1new=93d781ad4b723d154462f990254f78d3bab7456e" released="2011-01-29" version="0.1.7">
+ <manifest-digest sha256="31488cd005a3f9182e6738068a5e9ddfd6d28db370d8b0a0b16d5bbc53048665"/>
+ <archive href="http://gfxmonk.net/dist/0install/python-termstyle/python-termstyle-0.1.7.tgz" size="4265"/>
+ </implementation>
+ <implementation id="sha1new=ada58698e40a6f11b40dbebaa11f70061b47172d" released="2011-03-15" version="0.1.8">
+ <manifest-digest sha256="cbcee0509b194eec74a5fe923999b505293c5d0a20b3d79203fa6f47a942b24e"/>
+ <archive href="http://gfxmonk.net/dist/0install/python-termstyle/python-termstyle-0.1.8.tgz" size="4312"/>
+ </implementation>
+ <implementation id="sha1new=e718d149b78028f3ac39ee4de42e0b7644ad48fb" released="2011-03-16" version="0.1.9">
+ <manifest-digest sha256="ee3dc9b097ac0b7682fab8753a5dab45f0a9c189544c13a369ada8b9e1f4fde8"/>
+ <archive href="http://gfxmonk.net/dist/0install/python-termstyle/python-termstyle-0.1.9.tgz" size="2901"/>
+ </implementation>
+ <implementation id="sha1new=0c4adef2eccc7ddcb48264c4bc2d84fcfa1072a4" released="2011-04-30" version="0.1.10">
+ <manifest-digest sha256="df3b9e11077995f515239eb8382acdfa1741c35e867916917cf70ce53e49bd59"/>
+ <archive href="http://gfxmonk.net/dist/0install/python-termstyle/python-termstyle-0.1.10.tgz" size="2903"/>
+ </implementation>
+ </group>
+</interface>
diff --git a/scripts/automation/trex_control_plane/python_lib/termstyle/setup.py b/scripts/automation/trex_control_plane/python_lib/termstyle/setup.py
new file mode 100755
index 00000000..69b11cbb
--- /dev/null
+++ b/scripts/automation/trex_control_plane/python_lib/termstyle/setup.py
@@ -0,0 +1,27 @@
+#!/usr/bin/env python
+
+## NOTE: ##
+## this setup.py was generated by zero2pypi:
+## http://gfxmonk.net/dist/0install/zero2pypi.xml
+
+from setuptools import *
+setup(
+ packages = find_packages(exclude=['test', 'test.*']),
+ install_requires=['setuptools'],
+ version='0.1.10',
+ url='http://gfxmonk.net/dist/0install/python-termstyle.xml',
+ description='console colouring for python',
+ long_description='\n**Note**: This package has been built automatically by\n`zero2pypi <http://gfxmonk.net/dist/0install/zero2pypi.xml>`_.\nIf possible, you should use the zero-install feed instead:\nhttp://gfxmonk.net/dist/0install/python-termstyle.xml\n\n----------------\n\n=========\ntermstyle\n=========\n\ntermstyle is a simple python library for adding coloured output to\nterminal (console) programs. The definitions come from ECMA-048_, the\n"Control Functions for Coded Character Sets" standard.\n\nInstallation:\n-------------\n\nI thoroughly recommend using the zero-install feed (see the project homepage) to manage your dependencies if at all possible. zero-install_ provides a much better system than pip or easy_install, and works with absolutely any language and allows decentralised package management that requires no special privileges to install.\n\nExample Usage:\n--------------\n::\n\n\tfrom termstyle import *\n\tprint "%s:%s" % (red(\'Hey\'), green(\'how are you?\'))\n\tprint blue(\'How \', bold(\'you\'), \' doin?\')\n\nor, you can use a colour just as a string::\n\n\tprint "%sBlue!%s" % (blue, reset)\n\nStyles:\n-------\n::\n\n\treset or default (no colour / style)\n\ncolour::\n\n\tblack\n\tred\n\tgreen\n\tyellow\n\tblue\n\tmagenta\n\tcyan\n\twhite\n\nbackground colour::\n\n\tbg_black\n\tbg_red\n\tbg_green\n\tbg_yellow\n\tbg_blue\n\tbg_magenta\n\tbg_cyan\n\tbg_white\n\tbg_default\n\nIn terminals supporting transparency ``bg_default`` is often used to set\nthe background to transparent [#]_.\n\nweight::\n\n\tbold\n\tinverted\n\nstyle::\n\n\titalic\n\tunderscore\n\nControls:\n---------\n::\n\n\tauto() - sets colouring on only if sys.stdout is a terminal\n\tdisabe() - disable colours\n\tenable() - enable colours\n\n.. [#] Supporting terminals include rxvt-unicode_, and Eterm_.\n\n.. _ECMA-048: http://www.ecma-international.org/publications/files/ECMA-ST/Ecma-048.pdf\n.. _rxvt-unicode: http://software.schmorp.de/\n.. _Eterm: http://www.eterm.org/\n.. _zero-install: http://0install.net/\n',
+ name='python-termstyle',
+ download_url='http://gfxmonk.net/dist/0install/python-termstyle/python-termstyle-0.1.10.tgz',
+ py_modules=['termstyle'],
+classifiers=[
+ "License :: OSI Approved :: BSD License",
+ "Programming Language :: Python",
+ "Programming Language :: Python :: 3",
+ "Intended Audience :: Developers",
+ "Topic :: Software Development :: Libraries :: Python Modules",
+ ],
+ keywords='output colour console ansi',
+ license='BSD',
+)
diff --git a/scripts/automation/trex_control_plane/python_lib/termstyle/termstyle.py b/scripts/automation/trex_control_plane/python_lib/termstyle/termstyle.py
new file mode 100755
index 00000000..62a3a920
--- /dev/null
+++ b/scripts/automation/trex_control_plane/python_lib/termstyle/termstyle.py
@@ -0,0 +1,107 @@
+#!/usr/bin/env python
+# Copyright (c) 2009, Tim Cuthbertson
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following
+# disclaimer in the documentation and/or other materials provided
+# with the distribution.
+# * Neither the name of the organisation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
+# OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
+# AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY
+# WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+
+import sys
+
+class Style(object):
+ prefix='\x1b['
+ suffix='m'
+ enabled = True
+
+ def __init__(self, on_codes, off_codes = 0):
+ self._on = self.sequence(on_codes)
+ self._off = self.sequence(off_codes)
+
+ def _get_on(self): return self._on if self.enabled else ''
+ def _get_off(self): return self._off if self.enabled else ''
+ on = property(_get_on)
+ off = property(_get_off)
+
+ @classmethod
+ def sequence(cls, codes):
+ wrap_single = lambda code: "%s%s%s" % (cls.prefix, code, cls.suffix)
+ try:
+ return ''.join([wrap_single(code) for code in codes])
+ except TypeError:
+ return wrap_single(codes)
+
+ def __str__(self):
+ if not self.enabled:
+ return ''
+ return self.on
+
+ def __call__(self, *args):
+ contents = ''.join(["%s%s" % (self.on, arg) for arg in args])
+ return "%s%s" % (contents, self.off)
+
+
+def auto():
+ """set colouring on if STDOUT is a terminal device, off otherwise"""
+ try:
+ Style.enabled = False
+ Style.enabled = sys.stdout.isatty()
+ except (AttributeError, TypeError):
+ pass
+
+def enable():
+ """force coloured output"""
+ Style.enabled = True
+
+def disable():
+ """disable coloured output"""
+ Style.enabled = False
+
+default = reset = Style(0)
+
+black = Style(30)
+red = Style(31)
+green = Style(32)
+yellow = Style(33)
+blue = Style(34)
+magenta = Style(35)
+cyan = Style(36)
+white = Style(37)
+
+bg_black = Style(40)
+bg_red = Style(41)
+bg_green = Style(42)
+bg_yellow = Style(43)
+bg_blue = Style(44)
+bg_magenta = Style(45)
+bg_cyan = Style(46)
+bg_white = Style(47)
+bg_default = Style(49)
+
+bold = Style(1)
+underscore = Style(4)
+inverted = Style(7)
+italic = Style(3)
+
diff --git a/scripts/automation/trex_control_plane/python_lib/termstyle/test2.py b/scripts/automation/trex_control_plane/python_lib/termstyle/test2.py
new file mode 100755
index 00000000..2d84c375
--- /dev/null
+++ b/scripts/automation/trex_control_plane/python_lib/termstyle/test2.py
@@ -0,0 +1,5 @@
+#!/usr/bin/env python2
+
+from termstyle import *
+
+print green(u"unicod\xe9!")
diff --git a/scripts/automation/trex_control_plane/python_lib/termstyle/test3.py b/scripts/automation/trex_control_plane/python_lib/termstyle/test3.py
new file mode 100755
index 00000000..861c44f9
--- /dev/null
+++ b/scripts/automation/trex_control_plane/python_lib/termstyle/test3.py
@@ -0,0 +1,5 @@
+#!/usr/bin/env python3
+
+from termstyle import *
+
+print(green("unicod\xe9!"))
diff --git a/scripts/automation/trex_control_plane/python_lib/termstyle/test_all.sh b/scripts/automation/trex_control_plane/python_lib/termstyle/test_all.sh
new file mode 100755
index 00000000..d28545a9
--- /dev/null
+++ b/scripts/automation/trex_control_plane/python_lib/termstyle/test_all.sh
@@ -0,0 +1,4 @@
+#!/bin/sh
+set -ex
+./test2.py
+./test3.py
diff --git a/scripts/automation/trex_control_plane/python_lib/zmq_fedora.tar.gz b/scripts/automation/trex_control_plane/python_lib/zmq_fedora.tar.gz
new file mode 100755
index 00000000..4f36749b
--- /dev/null
+++ b/scripts/automation/trex_control_plane/python_lib/zmq_fedora.tar.gz
Binary files differ
diff --git a/scripts/automation/trex_control_plane/server/CCustomLogger.py b/scripts/automation/trex_control_plane/server/CCustomLogger.py
new file mode 100755
index 00000000..ecf7d519
--- /dev/null
+++ b/scripts/automation/trex_control_plane/server/CCustomLogger.py
@@ -0,0 +1,100 @@
+
+import sys
+import os
+import logging
+
+
+def setup_custom_logger(name, log_path = None):
+ # first make sure path availabe
+# if log_path is None:
+# log_path = os.getcwd()+'/trex_log.log'
+# else:
+# directory = os.path.dirname(log_path)
+# if not os.path.exists(directory):
+# os.makedirs(directory)
+ logging.basicConfig(level = logging.INFO,
+ format = '%(asctime)s %(name)-10s %(module)-20s %(levelname)-8s %(message)s',
+ datefmt = '%m-%d %H:%M')
+# filename= log_path,
+# filemode= 'w')
+#
+# # define a Handler which writes INFO messages or higher to the sys.stderr
+# consoleLogger = logging.StreamHandler()
+# consoleLogger.setLevel(logging.ERROR)
+# # set a format which is simpler for console use
+# formatter = logging.Formatter('%(name)-12s: %(levelname)-8s %(message)s')
+# # tell the handler to use this format
+# consoleLogger.setFormatter(formatter)
+#
+# # add the handler to the logger
+# logging.getLogger(name).addHandler(consoleLogger)
+
+def setup_daemon_logger (name, log_path = None):
+ # first make sure path availabe
+ logging.basicConfig(level = logging.INFO,
+ format = '%(asctime)s %(name)-10s %(module)-20s %(levelname)-8s %(message)s',
+ datefmt = '%m-%d %H:%M',
+ filename= log_path,
+ filemode= 'w')
+
+class CustomLogger(object):
+
+ def __init__(self, log_filename):
+ # Store the original stdout and stderr
+ sys.stdout.flush()
+ sys.stderr.flush()
+
+ self.stdout_fd = os.dup(sys.stdout.fileno())
+ self.devnull = os.open('/dev/null', os.O_WRONLY)
+ self.log_file = open(log_filename, 'w')
+ self.silenced = False
+ self.pending_log_file_prints = 0
+
+ # silence all prints from stdout
+ def silence(self):
+ os.dup2(self.devnull, sys.stdout.fileno())
+ self.silenced = True
+
+ # restore stdout status
+ def restore(self):
+ sys.stdout.flush()
+ sys.stderr.flush()
+ # Restore normal stdout
+ os.dup2(self.stdout_fd, sys.stdout.fileno())
+ self.silenced = False
+
+ #print a message to the log (both stdout / log file)
+ def log(self, text, force = False, newline = True):
+ self.log_file.write((text + "\n") if newline else text)
+ self.pending_log_file_prints += 1
+
+ if (self.pending_log_file_prints >= 10):
+ self.log_file.flush()
+ self.pending_log_file_prints = 0
+
+ self.console(text, force, newline)
+
+ # print a message to the console alone
+ def console(self, text, force = False, newline = True):
+ _text = (text + "\n") if newline else text
+ # if we are silenced and not forced - go home
+ if self.silenced and not force:
+ return
+
+ if self.silenced:
+ os.write(self.stdout_fd, _text)
+ else:
+ sys.stdout.write(_text)
+
+ sys.stdout.flush()
+
+ # flush
+ def flush(self):
+ sys.stdout.flush()
+ self.log_file.flush()
+
+ def __exit__(self, type, value, traceback):
+ sys.stdout.flush()
+ self.log_file.flush()
+ os.close(self.devnull)
+ os.close(self.log_file)
diff --git a/scripts/automation/trex_control_plane/server/extended_daemon_runner.py b/scripts/automation/trex_control_plane/server/extended_daemon_runner.py
new file mode 100755
index 00000000..07eedd9f
--- /dev/null
+++ b/scripts/automation/trex_control_plane/server/extended_daemon_runner.py
@@ -0,0 +1,144 @@
+#!/usr/bin/python
+
+import outer_packages
+import lockfile
+from daemon import runner,daemon
+from daemon.runner import *
+import os, sys
+from argparse import ArgumentParser
+from trex_server import trex_parser
+try:
+ from python_lib.termstyle import termstyle
+except ImportError:
+ import termstyle
+
+
+
+def daemonize_parser (parser_obj, action_funcs, help_menu):
+ """Update the regular process parser to deal with daemon process options"""
+ parser_obj.description += " (as a daemon process)"
+ parser_obj.usage = None
+ parser_obj.add_argument("action", choices = action_funcs,
+ action="store", help = help_menu )
+ return
+
+
+class ExtendedDaemonRunner(runner.DaemonRunner):
+ """ Controller for a callable running in a separate background process.
+
+ The first command-line argument is the action to take:
+
+ * 'start': Become a daemon and call `app.run()`.
+ * 'stop': Exit the daemon process specified in the PID file.
+ * 'restart': Stop, then start.
+
+ """
+
+ help_menu = """Specify action command to be applied on server.
+ (*) start : start the application in as a daemon process.
+ (*) show : prompt a updated status of daemon process (running/ not running).
+ (*) stop : exit the daemon process.
+ (*) restart : stop, then start again the application as daemon process
+ (*) start-live : start the application in live mode (no daemon process).
+ """
+
+ def __init__ (self, app, parser_obj):
+ """ Set up the parameters of a new runner.
+ THIS METHOD INTENTIONALLY DO NOT INVOKE SUPER __init__() METHOD
+
+ :param app: The application instance; see below.
+ :return: ``None``.
+
+ The `app` argument must have the following attributes:
+
+ * `stdin_path`, `stdout_path`, `stderr_path`: Filesystem paths
+ to open and replace the existing `sys.stdin`, `sys.stdout`,
+ `sys.stderr`.
+
+ * `pidfile_path`: Absolute filesystem path to a file that will
+ be used as the PID file for the daemon. If ``None``, no PID
+ file will be used.
+
+ * `pidfile_timeout`: Used as the default acquisition timeout
+ value supplied to the runner's PID lock file.
+
+ * `run`: Callable that will be invoked when the daemon is
+ started.
+
+ """
+ super(runner.DaemonRunner, self).__init__()
+ # update action_funcs to support more operations
+ self.update_action_funcs()
+
+ daemonize_parser(parser_obj, self.action_funcs, ExtendedDaemonRunner.help_menu)
+ args = parser_obj.parse_args()
+ self.action = unicode(args.action)
+
+ self.app = app
+ self.daemon_context = daemon.DaemonContext()
+ self.daemon_context.stdin = open(app.stdin_path, 'rt')
+ self.daemon_context.stdout = open(app.stdout_path, 'w+t')
+ self.daemon_context.stderr = open(
+ app.stderr_path, 'a+t', buffering=0)
+
+ self.pidfile = None
+ if app.pidfile_path is not None:
+ self.pidfile = make_pidlockfile(app.pidfile_path, app.pidfile_timeout)
+ self.daemon_context.pidfile = self.pidfile
+
+ # mask out all arguments that aren't relevant to main app script
+
+
+ def update_action_funcs (self):
+ self.action_funcs.update({u'start-live': self._start_live, u'show': self._show}) # add key (=action), value (=desired func)
+
+ @staticmethod
+ def _start_live (self):
+ self.app.run()
+
+ @staticmethod
+ def _show (self):
+ if self.pidfile.is_locked():
+ print termstyle.red("T-Rex server daemon is running")
+ else:
+ print termstyle.red("T-Rex server daemon is NOT running")
+
+ def do_action (self):
+ self.__prevent_duplicate_runs()
+ self.__prompt_init_msg()
+ try:
+ super(ExtendedDaemonRunner, self).do_action()
+ if self.action == 'stop':
+ self.__verify_termination()
+ except runner.DaemonRunnerStopFailureError:
+ if self.action == 'restart':
+ # error means server wasn't running in the first place- so start it!
+ self.action = 'start'
+ self.do_action()
+
+
+ def __prevent_duplicate_runs (self):
+ if self.action == 'start' and self.pidfile.is_locked():
+ print termstyle.green("Server daemon is already running")
+ exit(1)
+ elif self.action == 'stop' and not self.pidfile.is_locked():
+ print termstyle.green("Server daemon is not running")
+ exit(1)
+
+ def __prompt_init_msg (self):
+ if self.action == 'start':
+ print termstyle.green("Starting daemon server...")
+ elif self.action == 'stop':
+ print termstyle.green("Stopping daemon server...")
+
+ def __verify_termination (self):
+ pass
+# import time
+# while self.pidfile.is_locked():
+# time.sleep(2)
+# self._stop()
+#
+
+
+if __name__ == "__main__":
+ pass
diff --git a/scripts/automation/trex_control_plane/server/outer_packages.py b/scripts/automation/trex_control_plane/server/outer_packages.py
new file mode 100755
index 00000000..ab25ea68
--- /dev/null
+++ b/scripts/automation/trex_control_plane/server/outer_packages.py
@@ -0,0 +1,66 @@
+#!/router/bin/python
+
+import sys,site
+import platform,os
+import tarfile
+import errno
+import pwd
+
+CURRENT_PATH = os.path.dirname(os.path.realpath(__file__))
+ROOT_PATH = os.path.abspath(os.path.join(CURRENT_PATH, os.pardir)) # path to trex_control_plane directory
+PATH_TO_PYTHON_LIB = os.path.abspath(os.path.join(ROOT_PATH, 'python_lib'))
+
+SERVER_MODULES = ['enum34-1.0.4',
+ # 'jsonrpclib-0.1.3',
+ 'jsonrpclib-pelix-0.2.5',
+ 'zmq',
+ 'python-daemon-2.0.5',
+ 'lockfile-0.10.2',
+ 'termstyle'
+ ]
+
+def extract_zmq_package ():
+ """make sure zmq package is available"""
+
+ os.chdir(PATH_TO_PYTHON_LIB)
+ if not os.path.exists('zmq'):
+ if os.path.exists('zmq_fedora.tar.gz'): # make sure tar file is available for extraction
+ try:
+ tar = tarfile.open("zmq_fedora.tar.gz")
+ # finally, extract the tarfile locally
+ tar.extractall()
+ except OSError as err:
+ if err.errno == errno.EACCES:
+ # fall back. try extracting using currently logged in user
+ stat_info = os.stat(PATH_TO_PYTHON_LIB)
+ uid = stat_info.st_uid
+ logged_user = pwd.getpwuid(uid).pw_name
+ if logged_user != 'root':
+ try:
+ os.system("sudo -u {user} tar -zxvf zmq_fedora.tar.gz".format(user = logged_user))
+ except:
+ raise OSError(13, 'Permission denied: Please make sure that logged user have sudo access and writing privileges to `python_lib` directory.')
+ else:
+ raise OSError(13, 'Permission denied: Please make sure that logged user have sudo access and writing privileges to `python_lib` directory.')
+ finally:
+ tar.close()
+ else:
+ raise IOError("File 'zmq_fedora.tar.gz' couldn't be located at python_lib directory.")
+ os.chdir(CURRENT_PATH)
+
+def import_server_modules ():
+ # must be in a higher priority
+ sys.path.insert(0, PATH_TO_PYTHON_LIB)
+ sys.path.append(ROOT_PATH)
+ extract_zmq_package()
+ import_module_list(SERVER_MODULES)
+
+def import_module_list (modules_list):
+ assert(isinstance(modules_list, list))
+ for p in modules_list:
+ full_path = os.path.join(PATH_TO_PYTHON_LIB, p)
+ fix_path = os.path.normcase(full_path)
+ site.addsitedir(full_path)
+
+
+import_server_modules()
diff --git a/scripts/automation/trex_control_plane/server/trex_daemon_server b/scripts/automation/trex_control_plane/server/trex_daemon_server
new file mode 100755
index 00000000..3494e303
--- /dev/null
+++ b/scripts/automation/trex_control_plane/server/trex_daemon_server
@@ -0,0 +1,25 @@
+#!/usr/bin/python
+
+import os
+import sys
+
+core = 0
+
+if '--core' in sys.argv:
+ try:
+ idx = sys.argv.index('--core')
+ core = int(sys.argv[idx + 1])
+ if core > 31 or core < 0:
+ print "Error: please provide core argument between 0 to 31"
+ exit(-1)
+ del sys.argv[idx:idx+2]
+ except IndexError:
+ print "Error: please make sure core option provided with argument"
+ exit(-1)
+ except ValueError:
+ print "Error: please make sure core option provided with integer argument"
+ exit(-1)
+
+str_argv = ' '.join(sys.argv[1:])
+cmd = "taskset -c {core} python automation/trex_control_plane/server/trex_daemon_server.py {argv}".format(core = core, argv = str_argv)
+os.system(cmd)
diff --git a/scripts/automation/trex_control_plane/server/trex_daemon_server.py b/scripts/automation/trex_control_plane/server/trex_daemon_server.py
new file mode 100755
index 00000000..5032423a
--- /dev/null
+++ b/scripts/automation/trex_control_plane/server/trex_daemon_server.py
@@ -0,0 +1,87 @@
+#!/usr/bin/python
+
+import outer_packages
+import daemon
+from trex_server import do_main_program, trex_parser
+import CCustomLogger
+
+import logging
+import time
+import sys
+import os, errno
+import grp
+import signal
+from daemon import runner
+from extended_daemon_runner import ExtendedDaemonRunner
+import lockfile
+import errno
+
+class TRexServerApp(object):
+ def __init__(self):
+ TRexServerApp.create_working_dirs()
+ self.stdin_path = '/dev/null'
+ self.stdout_path = '/dev/tty' # All standard prints will come up from this source.
+ self.stderr_path = "/var/log/trex/trex_daemon_server.log" # All log messages will come up from this source
+ self.pidfile_path = '/var/run/trex/trex_daemon_server.pid'
+ self.pidfile_timeout = 5 # timeout in seconds
+
+ def run(self):
+ do_main_program()
+
+
+ @staticmethod
+ def create_working_dirs():
+ if not os.path.exists('/var/log/trex'):
+ os.mkdir('/var/log/trex')
+ if not os.path.exists('/var/run/trex'):
+ os.mkdir('/var/run/trex')
+
+
+
+def main ():
+
+ trex_app = TRexServerApp()
+
+ # setup the logger
+ default_log_path = '/var/log/trex/trex_daemon_server.log'
+
+ try:
+ CCustomLogger.setup_daemon_logger('TRexServer', default_log_path)
+ logger = logging.getLogger('TRexServer')
+ logger.setLevel(logging.INFO)
+ formatter = logging.Formatter("%(asctime)s %(name)-10s %(module)-20s %(levelname)-8s %(message)s")
+ handler = logging.FileHandler("/var/log/trex/trex_daemon_server.log")
+ logger.addHandler(handler)
+ except EnvironmentError, e:
+ if e.errno == errno.EACCES: # catching permission denied error
+ print "Launching user must have sudo privileges in order to run T-Rex daemon.\nTerminating daemon process."
+ exit(-1)
+
+ try:
+ daemon_runner = ExtendedDaemonRunner(trex_app, trex_parser)
+ except IOError as err:
+ # catch 'tty' error when launching server from remote location
+ if err.errno == errno.ENXIO:
+ trex_app.stdout_path = "/dev/null"
+ daemon_runner = ExtendedDaemonRunner(trex_app, trex_parser)
+ else:
+ raise
+
+ #This ensures that the logger file handle does not get closed during daemonization
+ daemon_runner.daemon_context.files_preserve=[handler.stream]
+
+ try:
+ if not set(['start', 'stop']).isdisjoint(set(sys.argv)):
+ print "Logs are saved at: {log_path}".format( log_path = default_log_path )
+ daemon_runner.do_action()
+
+ except lockfile.LockTimeout as inst:
+ logger.error(inst)
+ print inst
+ print """
+ Please try again once the timeout has been reached.
+ If this error continues, consider killing the process manually and restart the daemon."""
+
+
+if __name__ == "__main__":
+ main()
diff --git a/scripts/automation/trex_control_plane/server/trex_launch_thread.py b/scripts/automation/trex_control_plane/server/trex_launch_thread.py
new file mode 100755
index 00000000..b4be60a9
--- /dev/null
+++ b/scripts/automation/trex_control_plane/server/trex_launch_thread.py
@@ -0,0 +1,92 @@
+#!/router/bin/python
+
+
+import os
+import signal
+import socket
+from common.trex_status_e import TRexStatus
+import subprocess
+import time
+import threading
+import logging
+import CCustomLogger
+
+# setup the logger
+CCustomLogger.setup_custom_logger('TRexServer')
+logger = logging.getLogger('TRexServer')
+
+
+class AsynchronousTRexSession(threading.Thread):
+ def __init__(self, trexObj , trex_launch_path, trex_cmd_data):
+ super(AsynchronousTRexSession, self).__init__()
+ self.stoprequest = threading.Event()
+ self.terminateFlag = False
+ self.launch_path = trex_launch_path
+ self.cmd, self.export_path, self.duration = trex_cmd_data
+ self.session = None
+ self.trexObj = trexObj
+ self.time_stamps = {'start' : None, 'run_time' : None}
+ self.trexObj.zmq_dump = {}
+
+ def run (self):
+
+ with open(os.devnull, 'w') as DEVNULL:
+ self.time_stamps['start'] = self.time_stamps['run_time'] = time.time()
+ self.session = subprocess.Popen("exec "+self.cmd, cwd = self.launch_path, shell=True, stdin = DEVNULL, stderr = subprocess.PIPE, preexec_fn=os.setsid)
+ logger.info("T-Rex session initialized successfully, Parent process pid is {pid}.".format( pid = self.session.pid ))
+ while self.session.poll() is None: # subprocess is NOT finished
+ time.sleep(0.5)
+ if self.stoprequest.is_set():
+ logger.debug("Abort request received by handling thread. Terminating T-Rex session." )
+ os.killpg(self.session.pid, signal.SIGUSR1)
+ self.trexObj.set_status(TRexStatus.Idle)
+ self.trexObj.set_verbose_status("T-Rex is Idle")
+ break
+
+ self.time_stamps['run_time'] = time.time() - self.time_stamps['start']
+
+ try:
+ if self.time_stamps['run_time'] < 5:
+ logger.error("T-Rex run failed due to wrong input parameters, or due to reachability issues.")
+ self.trexObj.set_verbose_status("T-Rex run failed due to wrong input parameters, or due to reachability issues.\n\nT-Rex command: {cmd}\n\nRun output:\n{output}".format(
+ cmd = self.cmd, output = self.load_trex_output(self.export_path)))
+ self.trexObj.errcode = -11
+ elif (self.session.returncode is not None and self.session.returncode < 0) or ( (self.time_stamps['run_time'] < self.duration) and (not self.stoprequest.is_set()) ):
+ if (self.session.returncode is not None and self.session.returncode < 0):
+ logger.debug("Failed T-Rex run due to session return code ({ret_code})".format( ret_code = self.session.returncode ) )
+ elif ( (self.time_stamps['run_time'] < self.duration) and not self.stoprequest.is_set()):
+ logger.debug("Failed T-Rex run due to running time ({runtime}) combined with no-stopping request.".format( runtime = self.time_stamps['run_time'] ) )
+
+ logger.warning("T-Rex run was terminated unexpectedly by outer process or by the hosting OS")
+ self.trexObj.set_verbose_status("T-Rex run was terminated unexpectedly by outer process or by the hosting OS.\n\nRun output:\n{output}".format(
+ output = self.load_trex_output(self.export_path)))
+ self.trexObj.errcode = -15
+ else:
+ logger.info("T-Rex run session finished.")
+ self.trexObj.set_verbose_status('T-Rex finished.')
+ self.trexObj.errcode = None
+
+ finally:
+ self.trexObj.set_status(TRexStatus.Idle)
+ logger.info("TRex running state changed to 'Idle'.")
+ self.trexObj.expect_trex.clear()
+ logger.debug("Finished handling a single run of T-Rex.")
+ self.trexObj.zmq_dump = None
+
+ def join (self, timeout = None):
+ self.stoprequest.set()
+ super(AsynchronousTRexSession, self).join(timeout)
+
+ def load_trex_output (self, export_path):
+ output = None
+ with open(export_path, 'r') as f:
+ output = f.read()
+ return output
+
+
+
+
+
+if __name__ == "__main__":
+ pass
+
diff --git a/scripts/automation/trex_control_plane/server/trex_server.py b/scripts/automation/trex_control_plane/server/trex_server.py
new file mode 100755
index 00000000..992a1d5f
--- /dev/null
+++ b/scripts/automation/trex_control_plane/server/trex_server.py
@@ -0,0 +1,465 @@
+#!/usr/bin/python
+
+
+import os
+import stat
+import sys
+import time
+import outer_packages
+import zmq
+from jsonrpclib.SimpleJSONRPCServer import SimpleJSONRPCServer
+import jsonrpclib
+from jsonrpclib import Fault
+import binascii
+import socket
+import errno
+import signal
+import binascii
+from common.trex_status_e import TRexStatus
+from common.trex_exceptions import *
+import subprocess
+from random import randrange
+#import shlex
+import logging
+import threading
+import CCustomLogger
+from trex_launch_thread import AsynchronousTRexSession
+from zmq_monitor_thread import ZmqMonitorSession
+from argparse import ArgumentParser, RawTextHelpFormatter
+from json import JSONEncoder
+
+
+# setup the logger
+CCustomLogger.setup_custom_logger('TRexServer')
+logger = logging.getLogger('TRexServer')
+
+class CTRexServer(object):
+ """This class defines the server side of the RESTfull interaction with T-Rex"""
+ DEFAULT_TREX_PATH = '/auto/proj-pcube-b/apps/PL-b/tools/bp_sim2/v1.55/' #'/auto/proj-pcube-b/apps/PL-b/tools/nightly/trex_latest'
+ TREX_START_CMD = './t-rex-64'
+ DEFAULT_FILE_PATH = '/tmp/trex_files/'
+
+ def __init__(self, trex_path, trex_files_path, trex_host = socket.gethostname(), trex_daemon_port = 8090, trex_zmq_port = 4500):
+ """
+ Parameters
+ ----------
+ trex_host : str
+ a string of the t-rex ip address or hostname.
+ default value: machine hostname as fetched from socket.gethostname()
+ trex_daemon_port : int
+ the port number on which the trex-daemon server can be reached
+ default value: 8090
+ trex_zmq_port : int
+ the port number on which trex's zmq module will interact with daemon server
+ default value: 4500
+
+ Instatiate a T-Rex client object, and connecting it to listening daemon-server
+ """
+ self.TREX_PATH = os.path.abspath(os.path.dirname(trex_path+'/'))
+ self.trex_files_path = os.path.abspath(os.path.dirname(trex_files_path+'/'))
+ self.__check_trex_path_validity()
+ self.__check_files_path_validity()
+ self.trex = CTRex()
+ self.trex_host = trex_host
+ self.trex_daemon_port = trex_daemon_port
+ self.trex_zmq_port = trex_zmq_port
+ self.trex_server_path = "http://{hostname}:{port}".format( hostname = trex_host, port = trex_daemon_port )
+ self.start_lock = threading.Lock()
+ self.__reservation = None
+ self.zmq_monitor = ZmqMonitorSession(self.trex, self.trex_zmq_port) # intiate single ZMQ monitor thread for server usage
+
+ def add(self, x, y):
+ print "server function add ",x,y
+ logger.info("Processing add function. Parameters are: {0}, {1} ".format( x, y ))
+ return x + y
+ # return Fault(-10, "")
+
+ def push_file (self, filename, bin_data):
+ logger.info("Processing push_file() command.")
+ try:
+ filepath = os.path.abspath(os.path.join(self.trex_files_path, filename))
+ with open(filepath, 'wb') as f:
+ f.write(binascii.a2b_base64(bin_data))
+ logger.info("push_file() command finished. `{name}` was saved at {fpath}".format( name = filename, fpath = self.trex_files_path))
+ return True
+ except IOError as inst:
+ logger.error("push_file method failed. " + str(inst))
+ return False
+
+ def connectivity_check (self):
+ logger.info("Processing connectivity_check function.")
+ return True
+
+ def start(self):
+ """This method fires up the daemon server based on initialized parameters of the class"""
+ # initialize the server instance with given reasources
+ try:
+ print "Firing up T-Rex REST daemon @ port {trex_port} ...\n".format( trex_port = self.trex_daemon_port )
+ logger.info("Firing up T-Rex REST daemon @ port {trex_port} ...".format( trex_port = self.trex_daemon_port ))
+ logger.info("current working dir is: {0}".format(self.TREX_PATH) )
+ logger.info("current files dir is : {0}".format(self.trex_files_path) )
+ logger.debug("Starting TRex server. Registering methods to process.")
+ self.server = SimpleJSONRPCServer( (self.trex_host, self.trex_daemon_port) )
+ except socket.error as e:
+ if e.errno == errno.EADDRINUSE:
+ logger.error("T-Rex server requested address already in use. Aborting server launching.")
+ print "T-Rex server requested address already in use. Aborting server launching."
+ raise socket.error(errno.EADDRINUSE, "T-Rex daemon requested address already in use. Server launch aborted. Please make sure no other process is using the desired server properties.")
+
+ # set further functionality and peripherals to server instance
+ try:
+ self.server.register_function(self.add)
+ self.server.register_function(self.connectivity_check)
+ self.server.register_function(self.start_trex)
+ self.server.register_function(self.stop_trex)
+ self.server.register_function(self.wait_until_kickoff_finish)
+ self.server.register_function(self.get_running_status)
+ self.server.register_function(self.is_running)
+ self.server.register_function(self.get_running_info)
+ self.server.register_function(self.is_reserved)
+ self.server.register_function(self.get_files_path)
+ self.server.register_function(self.push_file)
+ self.server.register_function(self.reserve_trex)
+ self.server.register_function(self.cancel_reservation)
+ self.server.register_function(self.force_trex_kill)
+ signal.signal(signal.SIGTSTP, self.stop_handler)
+ signal.signal(signal.SIGTERM, self.stop_handler)
+ self.zmq_monitor.start()
+ self.server.serve_forever()
+ except KeyboardInterrupt:
+ logger.info("Daemon shutdown request detected." )
+ finally:
+ self.zmq_monitor.join() # close ZMQ monitor thread reasources
+ self.server.shutdown()
+ pass
+
+ def stop_handler (self, signum, frame):
+ logger.info("Daemon STOP request detected.")
+ if self.is_running():
+ # in case T-Rex process is currently running, stop it before terminating server process
+ self.stop_trex(self.trex.get_seq())
+ sys.exit(0)
+
+ def is_running (self):
+ run_status = self.trex.get_status()
+ logger.info("Processing is_running() command. Running status is: {stat}".format(stat = run_status) )
+ if run_status==TRexStatus.Running:
+ return True
+ else:
+ return False
+
+ def is_reserved (self):
+ logger.info("Processing is_reserved() command.")
+ return bool(self.__reservation)
+
+ def get_running_status (self):
+ run_status = self.trex.get_status()
+ logger.info("Processing get_running_status() command. Running status is: {stat}".format(stat = run_status) )
+ return { 'state' : run_status.value, 'verbose' : self.trex.get_verbose_status() }
+
+ def get_files_path (self):
+ logger.info("Processing get_files_path() command." )
+ return self.trex_files_path
+
+ def reserve_trex (self, user):
+ if user == "":
+ logger.info("T-Rex reservation cannot apply to empty string user. Request denied.")
+ return Fault(-33, "T-Rex reservation cannot apply to empty string user. Request denied.")
+
+ with self.start_lock:
+ logger.info("Processing reserve_trex() command.")
+ if self.is_reserved():
+ if user == self.__reservation['user']:
+ # return True is the same user is asking and already has the resrvation
+ logger.info("the same user is asking and already has the resrvation. Re-reserving T-Rex.")
+ return True
+
+ logger.info("T-Rex is already reserved to another user ({res_user}), cannot reserve to another user.".format( res_user = self.__reservation['user'] ))
+ return Fault(-33, "T-Rex is already reserved to another user ({res_user}). Please make sure T-Rex is free before reserving it.".format(
+ res_user = self.__reservation['user']) ) # raise at client TRexInUseError
+ elif self.trex.get_status() != TRexStatus.Idle:
+ logger.info("T-Rex is currently running, cannot reserve T-Rex unless in Idle state.")
+ return Fault(-13, 'T-Rex is currently running, cannot reserve T-Rex unless in Idle state. Please try again when T-Rex run finished.') # raise at client TRexInUseError
+ else:
+ logger.info("T-Rex is now reserved for user ({res_user}).".format( res_user = user ))
+ self.__reservation = {'user' : user, 'since' : time.ctime()}
+ logger.debug("Reservation details: "+ str(self.__reservation))
+ return True
+
+ def cancel_reservation (self, user):
+ with self.start_lock:
+ logger.info("Processing cancel_reservation() command.")
+ if self.is_reserved():
+ if self.__reservation['user'] == user:
+ logger.info("T-Rex reservation to {res_user} has been canceled successfully.".format(res_user = self.__reservation['user']))
+ self.__reservation = None
+ return True
+ else:
+ logger.warning("T-Rex is reserved to different user than the provided one. Reservation wasn't canceled.")
+ return Fault(-33, "Cancel reservation request is available to the user that holds the reservation. Request denied") # raise at client TRexRequestDenied
+
+ else:
+ logger.info("T-Rex is not reserved to anyone. No need to cancel anything")
+ assert(self.__reservation is None)
+ return False
+
+
+ def start_trex(self, trex_cmd_options, user, block_to_success = True, timeout = 30):
+ with self.start_lock:
+ logger.info("Processing start_trex() command.")
+ if self.is_reserved():
+ # check if this is not the user to which T-Rex is reserved
+ if self.__reservation['user'] != user:
+ logger.info("T-Rex is reserved to another user ({res_user}). Only that user is allowed to initiate new runs.".format(res_user = self.__reservation['user']))
+ return Fault(-33, "T-Rex is reserved to another user ({res_user}). Only that user is allowed to initiate new runs.".format(res_user = self.__reservation['user'])) # raise at client TRexRequestDenied
+ elif self.trex.get_status() != TRexStatus.Idle:
+ logger.info("T-Rex is already taken, cannot create another run until done.")
+ return Fault(-13, '') # raise at client TRexInUseError
+
+ try:
+ server_cmd_data = self.generate_run_cmd(**trex_cmd_options)
+ self.zmq_monitor.first_dump = True
+ self.trex.start_trex(self.TREX_PATH, server_cmd_data)
+ logger.info("T-Rex session has been successfully initiated.")
+ if block_to_success:
+ # delay server response until T-Rex is at 'Running' state.
+ start_time = time.time()
+ trex_state = None
+ while (time.time() - start_time) < timeout :
+ trex_state = self.trex.get_status()
+ if trex_state != TRexStatus.Starting:
+ break
+ else:
+ time.sleep(0.5)
+
+ # check for T-Rex run started normally
+ if trex_state == TRexStatus.Starting: # reached timeout
+ logger.warning("TimeoutError: T-Rex initiation outcome could not be obtained, since T-Rex stays at Starting state beyond defined timeout.")
+ return Fault(-12, 'TimeoutError: T-Rex initiation outcome could not be obtained, since T-Rex stays at Starting state beyond defined timeout.') # raise at client TRexWarning
+ elif trex_state == TRexStatus.Idle:
+ return Fault(-11, self.trex.get_verbose_status()) # raise at client TRexError
+
+ # reach here only if T-Rex is at 'Running' state
+ self.trex.gen_seq()
+ return self.trex.get_seq() # return unique seq number to client
+
+ except TypeError as e:
+ logger.error("T-Rex command generation failed, probably because either -f (traffic generation .yaml file) and -c (num of cores) was not specified correctly.\nReceived params: {params}".format( params = trex_cmd_options) )
+ raise TypeError('T-Rex -f (traffic generation .yaml file) and -c (num of cores) must be specified.')
+
+
+ def stop_trex(self, seq):
+ logger.info("Processing stop_trex() command.")
+ if self.trex.get_seq()== seq:
+ logger.debug("Abort request legit since seq# match")
+ return self.trex.stop_trex()
+ else:
+ if self.trex.get_status() != TRexStatus.Idle:
+ logger.warning("Abort request is only allowed to process initiated the run. Request denied.")
+
+ return Fault(-33, 'Abort request is only allowed to process initiated the run. Request denied.') # raise at client TRexRequestDenied
+ else:
+ return False
+
+ def force_trex_kill (self):
+ logger.info("Processing force_trex_kill() command. --> Killing T-Rex session indiscriminately.")
+ return self.trex.stop_trex()
+
+ def wait_until_kickoff_finish (self, timeout = 40):
+ # block until T-Rex exits Starting state
+ logger.info("Processing wait_until_kickoff_finish() command.")
+ trex_state = None
+ start_time = time.time()
+ while (time.time() - start_time) < timeout :
+ trex_state = self.trex.get_status()
+ if trex_state != TRexStatus.Starting:
+ return
+ return Fault(-12, 'TimeoutError: T-Rex initiation outcome could not be obtained, since T-Rex stays at Starting state beyond defined timeout.') # raise at client TRexWarning
+
+ def get_running_info (self):
+ logger.info("Processing get_running_info() command.")
+ return self.trex.get_running_info()
+
+ def generate_run_cmd (self, f, d, iom = 0, export_path="/tmp/trex.txt", **kwargs):
+ """ generate_run_cmd(self, trex_cmd_options, export_path) -> str
+
+ Generates a custom running command for the kick-off of the T-Rex traffic generator.
+ Returns a tuple of command (string) and export path (string) to be issued on the trex server
+
+ Parameters
+ ----------
+ trex_cmd_options : str
+ Defines the exact command to run on the t-rex
+ Example: "-c 2 -m 0.500000 -d 100 -f cap2/sfr.yaml --nc -p -l 1000"
+ export_path : str
+ a full system path to which the results of the trex-run will be logged.
+
+ """
+ if 'results_file_path' in kwargs:
+ export_path = kwargs['results_file_path']
+ del kwargs['results_file_path']
+
+
+ # adding additional options to the command
+ trex_cmd_options = ''
+ for key, value in kwargs.iteritems():
+ tmp_key = key.replace('_','-')
+ dash = ' -' if (len(key)==1) else ' --'
+ if (value == True) and (str(value) != '1'): # checking also int(value) to excape from situation that 1 translates by python to 'True'
+ trex_cmd_options += (dash + tmp_key)
+ else:
+ trex_cmd_options += (dash + '{k} {val}'.format( k = tmp_key, val = value ))
+
+ cmd = "{run_command} -f {gen_file} -d {duration} --iom {io} {cmd_options} --no-key > {export}".format( # -- iom 0 disables the periodic log to the screen (not needed)
+ run_command = self.TREX_START_CMD,
+ gen_file = f,
+ duration = d,
+ cmd_options = trex_cmd_options,
+ io = iom,
+ export = export_path )
+
+ logger.info("T-REX FULL COMMAND: {command}".format(command = cmd) )
+
+ return (cmd, export_path, long(d))
+
+ def __check_trex_path_validity(self):
+ # check for executable existance
+ if not os.path.exists(self.TREX_PATH+'/t-rex-64'):
+ print "The provided T-Rex path do not contain an executable T-Rex file.\nPlease check the path and retry."
+ logger.error("The provided T-Rex path do not contain an executable T-Rex file")
+ exit(-1)
+ # check for executable permissions
+ st = os.stat(self.TREX_PATH+'/t-rex-64')
+ if not bool(st.st_mode & (stat.S_IXUSR ) ):
+ print "The provided T-Rex path do not contain an T-Rex file with execution privileges.\nPlease check the files permissions and retry."
+ logger.error("The provided T-Rex path do not contain an T-Rex file with execution privileges")
+ exit(-1)
+ else:
+ return
+
+ def __check_files_path_validity(self):
+ # first, check for path existance. otherwise, try creating it with appropriate credentials
+ if not os.path.exists(self.trex_files_path):
+ try:
+ os.makedirs(self.trex_files_path, 0660)
+ return
+ except os.error as inst:
+ print "The provided files path does not exist and cannot be created with needed access credentials using root user.\nPlease check the path's permissions and retry."
+ logger.error("The provided files path does not exist and cannot be created with needed access credentials using root user.")
+ exit(-1)
+ elif os.access(self.trex_files_path, os.W_OK):
+ return
+ else:
+ print "The provided files path has insufficient access credentials for root user.\nPlease check the path's permissions and retry."
+ logger.error("The provided files path has insufficient access credentials for root user")
+ exit(-1)
+
+class CTRex(object):
+ def __init__(self):
+ self.status = TRexStatus.Idle
+ self.verbose_status = 'T-Rex is Idle'
+ self.errcode = None
+ self.session = None
+ self.zmq_monitor = None
+ self.zmq_dump = None
+ self.seq = None
+ self.expect_trex = threading.Event()
+ self.encoder = JSONEncoder()
+
+ def get_status(self):
+ return self.status
+
+ def set_status(self, new_status):
+ self.status = new_status
+
+ def get_verbose_status(self):
+ return self.verbose_status
+
+ def set_verbose_status(self, new_status):
+ self.verbose_status = new_status
+
+ def gen_seq (self):
+ self.seq = randrange(1,1000)
+
+ def get_seq (self):
+ return self.seq
+
+ def get_running_info (self):
+ if self.status == TRexStatus.Running:
+ return self.encoder.encode(self.zmq_dump)
+ else:
+ logger.info("T-Rex isn't running. Running information isn't available.")
+ if self.status == TRexStatus.Idle:
+ if self.errcode is not None: # some error occured
+ logger.info("T-Rex is in Idle state, with errors. returning fault")
+ return Fault(self.errcode, self.verbose_status) # raise at client relevant exception, depending on the reason the error occured
+ else:
+ logger.info("T-Rex is in Idle state, no errors. returning {}")
+ return u'{}'
+
+ return Fault(-12, self.verbose_status) # raise at client TRexWarning, indicating T-Rex is back to Idle state or still in Starting state
+
+ def stop_trex(self):
+ if self.status == TRexStatus.Idle:
+ # t-rex isn't running, nothing to abort
+ logger.info("T-Rex isn't running. No need to stop anything.")
+ if self.errcode is not None: # some error occured, notify client despite T-Rex already stopped
+ return Fault(self.errcode, self.verbose_status) # raise at client relevant exception, depending on the reason the error occured
+ return False
+ else:
+ # handle stopping t-rex's run
+ self.session.join()
+ logger.info("T-Rex session has been successfully aborted.")
+ return True
+
+ def start_trex(self, trex_launch_path, trex_cmd):
+ self.set_status(TRexStatus.Starting)
+ logger.info("TRex running state changed to 'Starting'.")
+ self.set_verbose_status('T-Rex is starting (data is not available yet)')
+
+ self.errcode = None
+ self.session = AsynchronousTRexSession(self, trex_launch_path, trex_cmd)
+ self.session.start()
+ self.expect_trex.set()
+# self.zmq_monitor= ZmqMonitorSession(self, zmq_port)
+# self.zmq_monitor.start()
+
+
+
+def generate_trex_parser ():
+ default_path = os.path.abspath(os.path.join(outer_packages.CURRENT_PATH, os.pardir, os.pardir, os.pardir))
+ default_files_path = os.path.abspath(CTRexServer.DEFAULT_FILE_PATH)
+
+ parser = ArgumentParser(description = 'Run server application for T-Rex traffic generator',
+ formatter_class = RawTextHelpFormatter,
+ usage = """
+trex_daemon_server [options]
+""" )
+
+ parser.add_argument('-v', '--version', action='version', version='%(prog)s 1.0')
+ parser.add_argument("-p", "--daemon-port", type=int, default = 8090, metavar="PORT", dest="daemon_port",
+ help="Select port on which the daemon runs.\nDefault port is 8090.", action="store")
+ parser.add_argument("-z", "--zmq-port", dest="zmq_port", type=int,
+ action="store", help="Select port on which the ZMQ module listens to T-Rex.\nDefault port is 4500.", metavar="PORT",
+ default = 4500)
+ parser.add_argument("-t", "--trex-path", dest="trex_path",
+ action="store", help="Specify the compiled T-Rex directory from which T-Rex would run.\nDefault path is: {def_path}.".format( def_path = default_path ),
+ metavar="PATH", default = default_path )
+ parser.add_argument("-f", "--files-path", dest="files_path",
+ action="store", help="Specify a path to directory on which pushed files will be saved at.\nDefault path is: {def_path}.".format( def_path = default_files_path ),
+ metavar="PATH", default = default_files_path )
+ return parser
+
+trex_parser = generate_trex_parser()
+
+def do_main_program ():
+
+ args = trex_parser.parse_args()
+
+ server = CTRexServer(trex_daemon_port = args.daemon_port, trex_zmq_port = args.zmq_port, trex_path = args.trex_path, trex_files_path = args.files_path)
+ server.start()
+
+
+if __name__ == "__main__":
+ do_main_program()
+
diff --git a/scripts/automation/trex_control_plane/server/zmq_monitor_thread.py b/scripts/automation/trex_control_plane/server/zmq_monitor_thread.py
new file mode 100755
index 00000000..28e154ee
--- /dev/null
+++ b/scripts/automation/trex_control_plane/server/zmq_monitor_thread.py
@@ -0,0 +1,80 @@
+#!/router/bin/python
+
+import os
+import outer_packages
+import zmq
+import threading
+import logging
+import CCustomLogger
+from json import JSONDecoder
+from common.trex_status_e import TRexStatus
+
+# setup the logger
+CCustomLogger.setup_custom_logger('TRexServer')
+logger = logging.getLogger('TRexServer')
+
+class ZmqMonitorSession(threading.Thread):
+ def __init__(self, trexObj , zmq_port):
+ super(ZmqMonitorSession, self).__init__()
+ self.stoprequest = threading.Event()
+# self.terminateFlag = False
+ self.first_dump = True
+ self.zmq_port = zmq_port
+ self.zmq_publisher = "tcp://localhost:{port}".format( port = self.zmq_port )
+# self.context = zmq.Context()
+# self.socket = self.context.socket(zmq.SUB)
+ self.trexObj = trexObj
+ self.expect_trex = self.trexObj.expect_trex # used to signal if T-Rex is expected to run and if data should be considered
+ self.decoder = JSONDecoder()
+ logger.info("ZMQ monitor initialization finished")
+
+ def run (self):
+ self.context = zmq.Context()
+ self.socket = self.context.socket(zmq.SUB)
+ logger.info("ZMQ monitor started listening @ {pub}".format( pub = self.zmq_publisher ) )
+ self.socket.connect(self.zmq_publisher)
+ self.socket.setsockopt(zmq.SUBSCRIBE, '')
+
+ while not self.stoprequest.is_set():
+ try:
+ zmq_dump = self.socket.recv() # This call is BLOCKING until data received!
+ if self.expect_trex.is_set():
+ self.parse_and_update_zmq_dump(zmq_dump)
+ logger.debug("ZMQ dump received on socket, and saved to trexObject.")
+ except Exception as e:
+ if self.stoprequest.is_set():
+ # allow this exception since it comes from ZMQ monitor termination
+ pass
+ else:
+ logger.error("ZMQ monitor thrown an exception. Received exception: {ex}".format(ex = e))
+ raise
+
+ def join (self, timeout = None):
+ self.stoprequest.set()
+ logger.debug("Handling termination of ZMQ monitor thread")
+ self.socket.close()
+ self.context.term()
+ logger.info("ZMQ monitor resources has been freed.")
+ super(ZmqMonitorSession, self).join(timeout)
+
+ def parse_and_update_zmq_dump (self, zmq_dump):
+ try:
+ dict_obj = self.decoder.decode(zmq_dump)
+ except ValueError:
+ logger.error("ZMQ dump failed JSON-RPC decode. Ignoring. Bad dump was: {dump}".format(dump = zmq_dump))
+ dict_obj = None
+
+ # add to trex_obj zmq latest dump, based on its 'name' header
+ if dict_obj is not None and dict_obj!={}:
+ self.trexObj.zmq_dump[dict_obj['name']] = dict_obj
+ if self.first_dump:
+ # change TRexStatus from starting to Running once the first ZMQ dump is obtained and parsed successfully
+ self.first_dump = False
+ self.trexObj.set_status(TRexStatus.Running)
+ self.trexObj.set_verbose_status("T-Rex is Running")
+ logger.info("First ZMQ dump received and successfully parsed. TRex running state changed to 'Running'.")
+
+
+if __name__ == "__main__":
+ pass
+
diff --git a/scripts/automation/trex_control_plane/unit_tests/__init__.py b/scripts/automation/trex_control_plane/unit_tests/__init__.py
new file mode 100755
index 00000000..d3f5a12f
--- /dev/null
+++ b/scripts/automation/trex_control_plane/unit_tests/__init__.py
@@ -0,0 +1 @@
+
diff --git a/scripts/automation/trex_control_plane/unit_tests/client_launching_test.py b/scripts/automation/trex_control_plane/unit_tests/client_launching_test.py
new file mode 100755
index 00000000..42d79af5
--- /dev/null
+++ b/scripts/automation/trex_control_plane/unit_tests/client_launching_test.py
@@ -0,0 +1,31 @@
+#!/router/bin/python
+from control_plane_general_test import CControlPlaneGeneral_Test
+from Client.trex_client import CTRexClient
+
+import socket
+from nose.tools import assert_raises
+
+
+class CClientLaunching_Test(CControlPlaneGeneral_Test):
+ def __init__(self):
+ super(CClientLaunching_Test, self).__init__()
+ pass
+
+ def setUp(self):
+ pass
+
+ def test_wrong_hostname(self):
+ # self.tmp_server = CTRexClient('some-invalid-hostname')
+ assert_raises (socket.gaierror, CTRexClient, 'some-invalid-hostname' )
+
+ # perform this test only if server is down, but server machine is up
+ def test_refused_connection(self):
+ assert_raises (socket.error, CTRexClient, 'trex-dan') # Assuming 'trex-dan' server is down! otherwise test fails
+
+
+ def test_verbose_mode(self):
+ tmp_client = CTRexClient(self.trex_server_name, verbose = True)
+ pass
+
+ def tearDown(self):
+ pass
diff --git a/scripts/automation/trex_control_plane/unit_tests/control_plane_general_test.py b/scripts/automation/trex_control_plane/unit_tests/control_plane_general_test.py
new file mode 100755
index 00000000..95f259b8
--- /dev/null
+++ b/scripts/automation/trex_control_plane/unit_tests/control_plane_general_test.py
@@ -0,0 +1,72 @@
+#!/router/bin/python
+
+__copyright__ = "Copyright 2015"
+
+"""
+Name:
+ control_plane_general_test.py
+
+
+Description:
+
+ This script creates the functionality to test the performance of the T-Rex traffic generator control plane.
+ The scenarios assumes a WORKING server is listening and processing the requests.
+
+::
+
+ Topology:
+
+ -------- --------
+ | | | |
+ | Client | <-----JSON-RPC------> | Server |
+ | | | |
+ -------- --------
+
+"""
+from nose.plugins import Plugin
+# import misc_methods
+import sys
+import os
+# from CPlatformUnderTest import *
+# from CPlatform import *
+import termstyle
+import threading
+from common.trex_exceptions import *
+from Client.trex_client import CTRexClient
+# import Client.outer_packages
+# import Client.trex_client
+
+TREX_SERVER = None
+
+class CTRexCP():
+ trex_server = None
+
+def setUpModule(module):
+ pass
+
+def tearDownModule(module):
+ pass
+
+
+class CControlPlaneGeneral_Test(object):#(unittest.TestCase):
+ """This class defines the general testcase of the control plane service"""
+ def __init__ (self):
+ self.trex_server_name = 'csi-kiwi-02'
+ self.trex = CTRexClient(self.trex_server_name)
+ pass
+
+ def setUp(self):
+ # initialize server connection for single client
+ # self.server = CTRexClient(self.trex_server)
+ pass
+
+ ########################################################################
+ #### DO NOT ADD TESTS TO THIS FILE ####
+ #### Added tests here will held once for EVERY test sub-class ####
+ ########################################################################
+
+ def tearDown(self):
+ pass
+
+ def check_for_trex_crash(self):
+ pass
diff --git a/scripts/automation/trex_control_plane/unit_tests/control_plane_unit_test.py b/scripts/automation/trex_control_plane/unit_tests/control_plane_unit_test.py
new file mode 100755
index 00000000..37130ee4
--- /dev/null
+++ b/scripts/automation/trex_control_plane/unit_tests/control_plane_unit_test.py
@@ -0,0 +1,73 @@
+#!/router/bin/python
+
+__copyright__ = "Copyright 2014"
+
+
+
+import os
+import sys
+import nose_outer_packages
+import nose
+from nose.plugins import Plugin
+from rednose import RedNose
+import termstyle
+import control_plane_general_test
+
+class TRexCPConfiguringPlugin(Plugin):
+ def options(self, parser, env = os.environ):
+ super(TRexCPConfiguringPlugin, self).options(parser, env)
+ parser.add_option('-t', '--trex-server', action='store',
+ dest='trex_server', default='trex-dan',
+ help='Specify T-Rex server hostname. This server will be used to test control-plane functionality.')
+
+ def configure(self, options, conf):
+ if options.trex_server:
+ self.trex_server = options.trex_server
+
+ def begin (self):
+ # initialize CTRexCP global testing class, to be used by and accessible all tests
+ print "assigned trex_server name"
+ control_plane_general_test.CTRexCP.trex_server = self.trex_server
+
+ def finalize(self, result):
+ pass
+
+
+
+if __name__ == "__main__":
+
+ # setting defaults. By default we run all the test suite
+ specific_tests = False
+ disableLogCapture = False
+ long_test = False
+ report_dir = "reports"
+
+ nose_argv= sys.argv + ['-s', '-v', '--exe', '--rednose', '--detailed-errors']
+
+ try:
+ result = nose.run(argv = nose_argv, addplugins = [RedNose(), TRexCPConfiguringPlugin()])
+ if (result == True):
+ print termstyle.green("""
+ ..::''''::..
+ .;'' ``;.
+ :: :: :: ::
+ :: :: :: ::
+ :: :: :: ::
+ :: .:' :: :: `:. ::
+ :: : : ::
+ :: `:. .:' ::
+ `;..``::::''..;'
+ ``::,,,,::''
+
+ ___ ___ __________
+ / _ \/ _ | / __/ __/ /
+ / ___/ __ |_\ \_\ \/_/
+ /_/ /_/ |_/___/___(_)
+
+ """)
+ sys.exit(0)
+ else:
+ sys.exit(-1)
+
+ finally:
+ pass \ No newline at end of file
diff --git a/scripts/automation/trex_control_plane/unit_tests/functional_test.py b/scripts/automation/trex_control_plane/unit_tests/functional_test.py
new file mode 100755
index 00000000..f742403d
--- /dev/null
+++ b/scripts/automation/trex_control_plane/unit_tests/functional_test.py
@@ -0,0 +1,160 @@
+#!/router/bin/python
+from control_plane_general_test import CControlPlaneGeneral_Test
+from Client.trex_client import CTRexClient
+
+import socket
+from nose.tools import assert_raises, assert_equal, assert_not_equal
+from common.trex_status_e import TRexStatus
+from common.trex_exceptions import *
+from enum import Enum
+import time
+
+
+class CTRexStartStop_Test(CControlPlaneGeneral_Test):
+ def __init__(self):
+ super(CTRexStartStop_Test, self).__init__()
+ self.valid_start_params = dict( c = 4,
+ m = 1.1,
+ d = 100,
+ f = 'avl/sfr_delay_10_1g.yaml',
+ nc = True,
+ p = True,
+ l = 1000)
+
+ def setUp(self):
+ pass
+
+ def test_mandatory_param_error(self):
+ start_params = dict( c = 4,
+ m = 1.1,
+ d = 70,
+ # f = 'avl/sfr_delay_10_1g.yaml', <-- f (mandatory) is not provided on purpose
+ nc = True,
+ p = True,
+ l = 1000)
+
+ assert_raises(TypeError, self.trex.start_trex, **start_params)
+
+ def test_parameter_name_error(self):
+ ret = self.trex.start_trex( c = 4,
+ wrong_key = 1.1, # <----- This key does not exists in T-Rex API
+ d = 70,
+ f = 'avl/sfr_delay_10_1g.yaml',
+ nc = True,
+ p = True,
+ l = 1000)
+
+ time.sleep(5)
+
+ # check for failure status
+ run_status = self.trex.get_running_status()
+ assert isinstance(run_status, dict)
+ assert_equal (run_status['state'], TRexStatus.Idle )
+ assert_equal (run_status['verbose'], "T-Rex run failed due to wrong input parameters, or due to reachability issues.")
+ assert_raises(TRexError, self.trex.get_running_info)
+
+ def test_too_early_sample(self):
+ ret = self.trex.start_trex(**self.valid_start_params)
+
+ assert ret==True
+ # issue get_running_info() too soon, without any(!) sleep
+ run_status = self.trex.get_running_status()
+ assert isinstance(run_status, dict)
+ assert_equal (run_status['state'], TRexStatus.Starting )
+ assert_raises(TRexWarning, self.trex.get_running_info)
+
+ ret = self.trex.stop_trex()
+ assert ret==True # make sure stop succeeded
+ assert self.trex.is_running() == False
+
+ def test_start_sampling_on_time(self):
+ ret = self.trex.start_trex(**self.valid_start_params)
+ assert ret==True
+ time.sleep(6)
+
+ run_status = self.trex.get_running_status()
+ assert isinstance(run_status, dict)
+ assert_equal (run_status['state'], TRexStatus.Running )
+
+ run_info = self.trex.get_running_info()
+ assert isinstance(run_info, dict)
+ ret = self.trex.stop_trex()
+ assert ret==True # make sure stop succeeded
+ assert self.trex.is_running() == False
+
+ def test_start_more_than_once_same_user(self):
+ assert self.trex.is_running() == False # first, make sure T-Rex is not running
+ ret = self.trex.start_trex(**self.valid_start_params) # start 1st T-Rex run
+ assert ret == True # make sure 1st run submitted successfuly
+ # time.sleep(1)
+ assert_raises(TRexInUseError, self.trex.start_trex, **self.valid_start_params) # try to start T-Rex again
+
+ ret = self.trex.stop_trex()
+ assert ret==True # make sure stop succeeded
+ assert self.trex.is_running() == False
+
+ def test_start_more_than_once_different_users(self):
+ assert self.trex.is_running() == False # first, make sure T-Rex is not running
+ ret = self.trex.start_trex(**self.valid_start_params) # start 1st T-Rex run
+ assert ret == True # make sure 1st run submitted successfuly
+ # time.sleep(1)
+
+ tmp_trex = CTRexClient(self.trex_server_name) # initialize another client connecting same server
+ assert_raises(TRexInUseError, tmp_trex.start_trex, **self.valid_start_params) # try to start T-Rex again
+
+ ret = self.trex.stop_trex()
+ assert ret==True # make sure stop succeeded
+ assert self.trex.is_running() == False
+
+ def test_simultaneous_sampling(self):
+ assert self.trex.is_running() == False # first, make sure T-Rex is not running
+ tmp_trex = CTRexClient(self.trex_server_name) # initialize another client connecting same server
+ ret = self.trex.start_trex(**self.valid_start_params) # start T-Rex run
+ assert ret == True # make sure 1st run submitted successfuly
+
+ time.sleep(6)
+ # now, sample server from both clients
+ while (self.trex.is_running()):
+ info_1 = self.trex.get_running_info()
+ info_2 = tmp_trex.get_running_info()
+
+ # make sure samples are consistent
+ if self.trex.get_result_obj().is_valid_hist():
+ assert tmp_trex.get_result_obj().is_valid_hist() == True
+ if self.trex.get_result_obj().is_done_warmup():
+ assert tmp_trex.get_result_obj().is_done_warmup() == True
+ # except TRexError as inst: # T-Rex might have stopped between is_running result and get_running_info() call
+ # # hence, ingore that case
+ # break
+
+ assert self.trex.is_running() == False
+
+ def test_fast_toggling(self):
+ assert self.trex.is_running() == False
+ for i in range(20):
+ ret = self.trex.start_trex(**self.valid_start_params) # start T-Rex run
+ assert ret == True
+ assert self.trex.is_running() == False # we expect the status to be 'Starting'
+ ret = self.trex.stop_trex()
+ assert ret == True
+ assert self.trex.is_running() == False
+ pass
+
+
+ def tearDown(self):
+ pass
+
+class CBasicQuery_Test(CControlPlaneGeneral_Test):
+ def __init__(self):
+ super(CBasicQuery_Test, self).__init__()
+ pass
+
+ def setUp(self):
+ pass
+
+ def test_is_running(self):
+ assert self.trex.is_running() == False
+
+
+ def tearDown(self):
+ pass
diff --git a/scripts/automation/trex_control_plane/unit_tests/nose_outer_packages.py b/scripts/automation/trex_control_plane/unit_tests/nose_outer_packages.py
new file mode 100755
index 00000000..b5b78db7
--- /dev/null
+++ b/scripts/automation/trex_control_plane/unit_tests/nose_outer_packages.py
@@ -0,0 +1,27 @@
+#!/router/bin/python
+
+import sys,site
+import platform,os
+
+CURRENT_PATH = os.path.dirname(os.path.realpath(__file__))
+ROOT_PATH = os.path.abspath(os.path.join(CURRENT_PATH, os.pardir)) # path to trex_control_plane directory
+PATH_TO_PYTHON_LIB = os.path.abspath(os.path.join(ROOT_PATH, 'python_lib'))
+
+
+TEST_MODULES = ['nose-1.3.4',
+ 'rednose-0.4.1',
+ 'termstyle'
+ ]
+
+def import_test_modules ():
+ sys.path.append(ROOT_PATH)
+ import_module_list(TEST_MODULES)
+
+def import_module_list (modules_list):
+ assert(isinstance(modules_list, list))
+ for p in modules_list:
+ full_path = os.path.join(PATH_TO_PYTHON_LIB, p)
+ fix_path = os.path.normcase(full_path) #CURRENT_PATH+p)
+ site.addsitedir(full_path)
+
+import_test_modules()
diff --git a/scripts/automation/trex_control_plane/unit_tests/sock.py b/scripts/automation/trex_control_plane/unit_tests/sock.py
new file mode 100755
index 00000000..29248e3e
--- /dev/null
+++ b/scripts/automation/trex_control_plane/unit_tests/sock.py
@@ -0,0 +1,552 @@
+import os
+import dpkt
+import struct
+import socket
+import sys
+import argparse;
+
+
+H_SCRIPT_VER = "0.1"
+
+class sock_driver(object):
+ args=None;
+
+
+def nl (buf):
+ return ( struct.unpack('>I', buf)[0]);
+
+def dump_tuple (t):
+ for obj in t:
+ print hex(obj),",",
+
+class CFlowRec:
+ def __init__ (self):
+ self.is_init_dir=False;
+ self.bytes=0;
+ self.data=None;
+
+ def __str__ (self):
+ if self.is_init_dir :
+ s=" client "
+ else:
+ s=" server "
+ s+= " %d " %(self.bytes)
+ return (s);
+
+
+
+class CPcapFileReader:
+ def __init__ (self,file_name):
+ self.file_name=file_name;
+ self.tuple=None;
+ self.swap=False;
+ self.info=[];
+
+ def dump_info (self):
+ for obj in self.info:
+ print obj
+ #print "'",obj.data,"'"
+
+ def is_client_side (self,swap):
+ if self.swap ==swap:
+ return (True);
+ else:
+ return (False);
+
+ def add_server(self,server,data):
+ r=CFlowRec();
+ r.is_init_dir =False;
+ r.bytes = server
+ r.data=data
+ self.info.append(r);
+
+ def add_client(self,client,data):
+ r=CFlowRec();
+ r.is_init_dir =True;
+ r.bytes = client
+ r.data=data
+ self.info.append(r);
+
+ def check_tcp_flow (self):
+ f = open(self.file_name)
+ pcap = dpkt.pcap.Reader(f)
+ for ts, buf in pcap:
+ eth = dpkt.ethernet.Ethernet(buf)
+ ip = eth.data
+ tcp = ip.data
+ if ip.p != 6 :
+ raise Exception("not a TCP flow ..");
+ if tcp.flags != dpkt.tcp.TH_SYN :
+ raise Exception("first packet should be with SYN");
+ break;
+ f.close();
+
+ def check_one_flow (self):
+ cnt=1
+ client=0;
+ server=0;
+ client_data=''
+ server_data=''
+ is_c=False # the direction
+ is_s=False
+ f = open(self.file_name)
+ pcap = dpkt.pcap.Reader(f)
+ for ts, buf in pcap:
+ eth = dpkt.ethernet.Ethernet(buf)
+ ip = eth.data
+ tcp = ip.data
+ pld = tcp.data;
+
+ pkt_swap=False
+ if nl(ip.src) > nl(ip.dst):
+ pkt_swap=True
+ tuple= (nl(ip.dst),nl(ip.src), tcp.dport ,tcp.sport,ip.p );
+ else:
+ tuple= (nl(ip.src),nl(ip.dst) ,tcp.sport,tcp.dport,ip.p );
+
+ if self.tuple == None:
+ self.swap=pkt_swap
+ self.tuple=tuple
+ else:
+ if self.tuple != tuple:
+ raise Exception("More than one flow - can't process this flow");
+
+
+ print " %5d," % (cnt),
+ if self.is_client_side (pkt_swap):
+ print "client",
+ if len(pld) >0 :
+ if is_c==False:
+ is_c=True
+ if is_s:
+ self.add_server(server,server_data);
+ server=0;
+ server_data=''
+ is_s=False;
+
+ client+=len(pld);
+ client_data=client_data+pld;
+ else:
+ if len(pld) >0 :
+ if is_s==False:
+ is_s=True
+ if is_c:
+ self.add_client(client,client_data);
+ client=0;
+ client_data=''
+ is_c=False;
+
+ server+=len(pld)
+ server_data=server_data+pld;
+
+ print "server",
+ print " %5d" % (len(pld)),
+ dump_tuple (tuple)
+ print
+
+ cnt=cnt+1
+
+ if is_c:
+ self.add_client(client,client_data);
+ if is_s:
+ self.add_server(server,server_data);
+
+ f.close();
+
+
+class CClientServerCommon(object):
+
+ def __init__ (self):
+ pass;
+
+ def send_info (self,data):
+ print "server send %d bytes" % (len(data))
+ self.connection.sendall(data)
+
+ def rcv_info (self,msg_size):
+ print "server wait for %d bytes" % (msg_size)
+
+ bytes_recd = 0
+ while bytes_recd < msg_size:
+ chunk = self.connection.recv(min(msg_size - bytes_recd, 2048))
+ if chunk == '':
+ raise RuntimeError("socket connection broken")
+ bytes_recd = bytes_recd + len(chunk)
+
+
+ def process (self,is_server):
+ pcapinfo=self.pcapr.info
+ for obj in pcapinfo:
+ if is_server:
+ if obj.is_init_dir:
+ self.rcv_info (obj.bytes);
+ else:
+ self.send_info (obj.data);
+ else:
+ if obj.is_init_dir:
+ self.send_info (obj.data);
+ else:
+ self.rcv_info (obj.bytes);
+
+ self.connection.close();
+ self.connection = None
+
+
+class CServer(CClientServerCommon) :
+ def __init__ (self,pcapr,port):
+ super(CServer, self).__init__()
+ sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+ sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
+
+ server_address = ('', port)
+ print 'starting up on %s port %s' % server_address
+ sock.bind(server_address)
+ sock.listen(1)
+
+ self.pcapr=pcapr; # save the info
+
+ while True:
+ # Wait for a connection
+ print 'waiting for a connection'
+ connection, client_address = sock.accept()
+
+ try:
+ print 'connection from', client_address
+ self.connection = connection;
+
+ self.process(True);
+ finally:
+ if self.connection :
+ self.connection.close()
+ self.connection = None
+
+
+class CClient(CClientServerCommon):
+ def __init__ (self,pcapr,ip,port):
+ super(CClient, self).__init__()
+ sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+ #sock.setsockopt(socket.SOL_SOCKET,socket.TCP_MAXSEG,300)
+ server_address = (ip, port)
+ print 'connecting to %s port %s' % server_address
+
+ sock.connect(server_address)
+ self.connection=sock;
+ self.pcapr=pcapr; # save the info
+
+ try:
+
+ self.process(False);
+ finally:
+ if self.connection :
+ self.connection.close()
+ self.connection = None
+
+
+def test_file_load ():
+ pcapr= CPcapFileReader("delay_10_http_browsing_0.pcap")
+ pcapr.check_tcp_flow ()
+ pcapr.check_one_flow ()
+ pcapr.dump_info();
+
+
+def process_options ():
+ parser = argparse.ArgumentParser(usage="""
+ sock [-s|-c] -f file_name
+
+ """,
+ description="offline process a pcap file",
+ epilog=" written by hhaim");
+
+ parser.add_argument("-f", dest="file_name",
+ help=""" the file name to process """,
+ required=True)
+
+ parser.add_argument('-c', action='store_true',
+ help='client side')
+
+ parser.add_argument('-s', action='store_true',
+ help='server side ')
+
+ parser.add_argument('--fix-time', action='store_true',
+ help='fix_time ')
+
+ parser.add_argument('--port', type=int, default=1000,
+ help='server_port ')
+
+ parser.add_argument('--ip', default='127.0.0.1',
+ help='socket ip ')
+
+ parser.add_argument('--debug', action='store_true',
+ help='debug mode')
+
+ parser.add_argument('--version', action='version',
+ version=H_SCRIPT_VER )
+
+
+
+ sock_driver.args = parser.parse_args();
+
+ if sock_driver.args.fix_time :
+ return ;
+ if (sock_driver.args.c ^ sock_driver.args.s) ==0:
+ raise Exception ("you must set either client or server mode");
+
+def load_pcap_file ():
+ pcapr= CPcapFileReader(sock_driver.args.file_name)
+ pcapr.check_tcp_flow ()
+ pcapr.check_one_flow ()
+ pcapr.dump_info();
+ return pcapr
+
+def run_client_side ():
+ pcapr=load_pcap_file ()
+ socket_client = CClient(pcapr,sock_driver.args.ip,sock_driver.args.port);
+
+
+def run_server_side ():
+ pcapr=load_pcap_file ()
+ socket_server = CServer(pcapr,sock_driver.args.port);
+
+
+class CPktWithTime:
+ def __init__ (self,pkt,ts):
+ self.pkt=pkt;
+ self.ts=ts
+ def __cmp__ (self,other):
+ return cmp(self.ts,other.ts);
+
+ def __repr__ (self):
+ s=" %x:%d" %(self.pkt,self.ts)
+ return s;
+
+
+class CPcapFixTime:
+ def __init__ (self,in_file_name,
+ out_file_name):
+ self.in_file_name = in_file_name;
+ self.out_file_name = out_file_name;
+ self.tuple=None;
+ self.swap=False;
+ self.rtt =0;
+ self.rtt_syn_ack_ack =0; # ack on the syn ack
+ self.pkts=[]
+
+ def calc_rtt (self):
+ f = open(self.in_file_name)
+ pcap = dpkt.pcap.Reader(f)
+ cnt=0;
+ first_time_set=False;
+ first_time=0;
+ last_syn_time=0;
+ rtt=0;
+ rtt_syn_ack_ack=0;
+
+ for ts, buf in pcap:
+ eth = dpkt.ethernet.Ethernet(buf)
+ ip = eth.data
+ tcp = ip.data
+
+ if first_time_set ==False:
+ first_time=ts;
+ first_time_set=True;
+ else:
+ rtt=ts-first_time;
+
+ if ip.p != 6 :
+ raise Exception("not a TCP flow ..");
+
+ if cnt==0 or cnt==1:
+ if (tcp.flags & dpkt.tcp.TH_SYN) != dpkt.tcp.TH_SYN :
+ raise Exception("first packet should be with SYN");
+
+ if cnt==1:
+ last_syn_time=ts;
+
+ if cnt==2:
+ rtt_syn_ack_ack=ts-last_syn_time;
+
+ if cnt > 1 :
+ break;
+ cnt = cnt +1;
+
+ f.close();
+ self.rtt_syn_ack_ack = rtt_syn_ack_ack;
+ return (rtt);
+
+ def is_client_side (self,swap):
+ if self.swap ==swap:
+ return (True);
+ else:
+ return (False);
+
+ def calc_timing (self):
+ self.rtt=self.calc_rtt ();
+
+ def fix_timing (self):
+
+ rtt=self.calc_rtt ();
+ print "RTT is %f msec" % (rtt*1000)
+
+ if (rtt/2)*1000<5:
+ raise Exception ("RTT is less than 5msec, you should replay it");
+
+ time_to_center=rtt/4;
+
+ f = open(self.in_file_name)
+ fo = open(self.out_file_name,"wb")
+ pcap = dpkt.pcap.Reader(f)
+ pcap_out = dpkt.pcap.Writer(fo)
+
+ for ts, buf in pcap:
+ eth = dpkt.ethernet.Ethernet(buf)
+ ip = eth.data
+ tcp = ip.data
+ pld = tcp.data;
+
+ pkt_swap=False
+ if nl(ip.src) > nl(ip.dst):
+ pkt_swap=True
+ tuple= (nl(ip.dst),nl(ip.src), tcp.dport ,tcp.sport,ip.p );
+ else:
+ tuple= (nl(ip.src),nl(ip.dst) ,tcp.sport,tcp.dport,ip.p );
+
+ if self.tuple == None:
+ self.swap=pkt_swap
+ self.tuple=tuple
+ else:
+ if self.tuple != tuple:
+ raise Exception("More than one flow - can't process this flow");
+
+ if self.is_client_side (pkt_swap):
+ self.pkts.append(CPktWithTime( buf,ts+time_to_center));
+ else:
+ self.pkts.append(CPktWithTime( buf,ts-time_to_center));
+
+ self.pkts.sort();
+ for pkt in self.pkts:
+ pcap_out.writepkt(pkt.pkt, pkt.ts)
+
+ f.close()
+ fo.close();
+
+
+
+def main ():
+ process_options ()
+
+ if sock_driver.args.fix_time:
+ pcap = CPcapFixTime(sock_driver.args.file_name ,sock_driver.args.file_name+".fix.pcap")
+ pcap.fix_timing ()
+
+ if sock_driver.args.c:
+ run_client_side ();
+
+ if sock_driver.args.s:
+ run_server_side ();
+
+
+files_to_convert=[
+'citrix_0',
+'exchange_0',
+'http_browsing_0',
+'http_get_0',
+'http_post_0',
+'https_0',
+'mail_pop_0',
+'mail_pop_1',
+'mail_pop_2',
+'oracle_0',
+'rtsp_0',
+'smtp_0',
+'smtp_1',
+'smtp_2'
+];
+
+
+#files_to_convert=[
+#'http_browsing_0',
+#];
+
+def test_pcap_file ():
+ for file in files_to_convert:
+ fn='tun_'+file+'.pcap';
+ fno='_tun_'+file+'_fixed.pcap';
+ print "convert ",fn
+ pcap = CPcapFixTime(fn,fno)
+ pcap.fix_timing ()
+
+
+
+
+class CPcapFileState:
+ def __init__ (self,file_name):
+ self.file_name = file_name
+ self.is_one_tcp_flow = False;
+ self.is_rtt_valid = False;
+ self.rtt=0;
+ self.rtt_ack=0;
+
+ def calc_stats (self):
+ file = CPcapFileReader(self.file_name);
+ try:
+ file.check_tcp_flow()
+ file.check_one_flow ()
+ self.is_one_tcp_flow = True;
+ except Exception :
+ self.is_one_tcp_flow = False;
+
+ print self.is_one_tcp_flow
+ if self.is_one_tcp_flow :
+ pcap= CPcapFixTime(self.file_name,"");
+ try:
+ pcap.calc_timing ()
+ print "rtt : %d %d \n" % (pcap.rtt*1000,pcap.rtt_syn_ack_ack*1000);
+ if (pcap.rtt*1000) > 10 and (pcap.rtt_syn_ack_ack*1000) >0.0 and (pcap.rtt_syn_ack_ack*1000) <2.0 :
+ self.is_rtt_valid = True
+ self.rtt = pcap.rtt*1000;
+ self.rtt_ack =pcap.rtt_syn_ack_ack*1000;
+ except Exception :
+ pass;
+
+
+def test_pcap_file (file_name):
+ p= CPcapFileState(file_name)
+ p.calc_stats();
+ if p.is_rtt_valid:
+ return True
+ else:
+ return False
+
+def iterate_tree_files (dirwalk,path_to):
+ fl=open("res.csv","w+");
+ cnt=0;
+ cnt_valid=0
+ for root, _, files in os.walk(dirwalk):
+ for f in files:
+ fullpath = os.path.join(root, f)
+ p= CPcapFileState(fullpath)
+ p.calc_stats();
+
+ valid=test_pcap_file (fullpath)
+ s='%s,%d,%d,%d \n' %(fullpath,p.is_rtt_valid,p.rtt,p.rtt_ack)
+ cnt = cnt +1 ;
+ if p.is_rtt_valid:
+ cnt_valid = cnt_valid +1;
+ diro=path_to+"/"+root;
+ fo = os.path.join(diro, f)
+ os.system("mkdir -p "+ diro);
+ pcap = CPcapFixTime(fullpath,fo)
+ pcap.fix_timing ()
+
+ print s
+ fl.write(s);
+ print " %d %% %d valids \n" % (100*cnt_valid/cnt,cnt);
+ fl.close();
+
+path_code="/scratch/tftp/pFidelity/pcap_repository"
+
+iterate_tree_files (path_code,"output")
+#test_pcap_file ()
+#test_pcap_file ()
+#main();
+
diff --git a/scripts/automation/trex_control_plane/unit_tests/test.py b/scripts/automation/trex_control_plane/unit_tests/test.py
new file mode 100755
index 00000000..dac765d6
--- /dev/null
+++ b/scripts/automation/trex_control_plane/unit_tests/test.py
@@ -0,0 +1,36 @@
+from mininet.topo import Topo
+from mininet.link import TCLink
+from mininet.net import Mininet
+from mininet.node import CPULimitedHost
+from mininet.link import TCLink
+from mininet.util import dumpNodeConnections
+from mininet.log import setLogLevel
+
+class MyTopo( Topo ):
+ "Simple topology example."
+
+ def __init__( self ):
+ "Create custom topo."
+
+ # Initialize topology
+ Topo.__init__( self )
+
+ # Add hosts and switches
+ leftHost = self.addHost( 'h1' )
+ rightHost = self.addHost( 'h2' )
+ Switch = self.addSwitch( 's1' )
+
+ # Add links
+ self.addLink( leftHost, Switch ,bw=10, delay='5ms')
+ self.addLink( Switch, rightHost )
+
+
+topos = { 'mytopo': ( lambda: MyTopo() ) }
+
+# 1. http server example
+#
+#mininet> h1 python -m SimpleHTTPServer 80 &
+#mininet> h2 wget -O - h1
+# 2. limit mss example
+#decrease the MTU ifconfig eth0 mtu 488
+
diff --git a/scripts/automation/trex_perf.py b/scripts/automation/trex_perf.py
new file mode 100755
index 00000000..5d11f549
--- /dev/null
+++ b/scripts/automation/trex_perf.py
@@ -0,0 +1,1265 @@
+#!/router/bin/python-2.7.4
+import h_avc
+
+
+from trex_control_plane.client.trex_client import CTRexClient
+import ConfigParser
+import threading
+import time,signal
+import argparse
+import sys
+import os
+import subprocess
+from time import sleep
+import signal
+import textwrap
+import getpass
+import random
+import datetime
+from datetime import timedelta
+import traceback
+import math
+import re
+import termios
+import errno
+import smtplib
+from email.MIMEMultipart import MIMEMultipart
+from email.MIMEBase import MIMEBase
+from email.MIMEText import MIMEText
+from email.Utils import COMMASPACE, formatdate
+from email import Encoders
+from email.mime.image import MIMEImage
+
+from distutils.version import StrictVersion
+
+class TrexRunException(Exception):
+ def __init__ (self, reason, cmd = None, std_log = None, err_log = None):
+ self.reason = reason
+ self.std_log = std_log
+ self.err_log = err_log
+ # generate the error message
+ self.message = "\nSummary of error:\n\n %s\n" % (reason)
+
+ if std_log:
+ self.message += "\nConsole Log:\n\n %s\n" % (self.std_log)
+
+ if err_log:
+ self.message += "\nStd Error Log:\n\n %s\n" % (self.err_log)
+
+ def __str__(self):
+ return self.message
+
+
+############################# utility functions start #################################
+
+def verify_glibc_version ():
+ x = subprocess.check_output("/usr/bin/ldd --version", shell=True)
+ m = re.match("ldd \(GNU libc\) (.*)", x)
+ if not m:
+ raise Exception("Cannot determine LDD version")
+ current_version = m.group(1)
+
+ if StrictVersion(current_version) < StrictVersion("2.5"):
+ raise Exception("GNU ldd version required for graph plotting is at least 2.5, system is %s - please run simple 'find'" % current_version)
+
+def get_median(numericValues):
+ theValues = sorted(numericValues)
+ if len(theValues) % 2 == 1:
+ return theValues[(len(theValues)+1)/2-1]
+ else:
+ lower = theValues[len(theValues)/2-1]
+ upper = theValues[len(theValues)/2]
+ return (float(lower + upper)) / 2
+
+def list_to_clusters(l, n):
+ for i in xrange(0, len(l), n):
+ yield l[i:i+n]
+
+def cpu_histo_to_str (cpu_histo):
+ s = "\nCPU Samplings:\n\n"
+ period = 0
+
+ clusters = list(list_to_clusters(cpu_histo, 10))
+
+ for cluster in clusters:
+ period += 10
+ line = "%3s Seconds: [" % period
+
+ cluster += (10 - len(cluster)) * [None]
+
+ for x in cluster:
+ if (x != None):
+ line += "%5.1f%%, " % x
+ else:
+ line += " "
+
+ line = line[:-2] # trim the comma and space
+ line += " " # return the space
+
+ line += "]\n"
+
+ s += line
+
+ return s
+
+# Terminal Manager Class
+class TermMng:
+ def __enter__(self):
+ self.fd = sys.stdin.fileno()
+ self.old = termios.tcgetattr(self.fd)
+
+ # copy new and remove echo
+ new = self.old[:]
+ new[3] &= ~termios.ECHO
+
+ self.tcsetattr_flags = termios.TCSAFLUSH
+ if hasattr(termios, 'TCSASOFT'):
+ self.tcsetattr_flags |= termios.TCSASOFT
+
+ termios.tcsetattr(self.fd, self.tcsetattr_flags, new)
+
+ def __exit__ (self ,type, value, traceback):
+ termios.tcsetattr(self.fd, self.tcsetattr_flags, self.old)
+
+############################# utility functions stop #################################
+
+def send_mail(send_from, send_to, subject, html_text, txt_attachments=[], images=[], server="localhost"):
+ assert isinstance(send_to, list)
+ assert isinstance(txt_attachments, list)
+ assert isinstance(images, list)
+
+ # create a multi part message
+ msg = MIMEMultipart()
+ msg['From'] = send_from
+ msg['To'] = COMMASPACE.join(send_to)
+ msg['Date'] = formatdate(localtime=True)
+ msg['Subject'] = subject
+ msg['Cc'] = "imarom@cisco.com"
+
+ # add all images to the text as embbeded images
+ for image in images:
+ html_text += '<br><img src="cid:{0}"><br>'.format(image)
+ fp = open(image, 'rb')
+ image_object = MIMEImage(fp.read())
+ fp.close()
+ image_object.add_header('Content-ID', image)
+ msg.attach(image_object)
+
+ # attach the main report as embedded HTML
+ msg.attach( MIMEText(html_text, 'html') )
+
+ # attach regualr txt files
+ for f in txt_attachments:
+ part = MIMEBase('application', "octet-stream")
+ part.set_payload( open(f,"rb").read() )
+ Encoders.encode_base64(part)
+ part.add_header('Content-Disposition', 'attachment; filename="%s"' % os.path.basename(f))
+ msg.attach(part)
+
+ smtp = smtplib.SMTP(server)
+ smtp.sendmail(send_from, send_to, msg.as_string())
+ smtp.close()
+
+# convert HTML to image - returning a image file as a string
+def html2image (html_filename, image_filename):
+ cmd = "./phantom/phantomjs ./phantom/rasterize.js {0} {1}".format(html_filename, image_filename)
+ subprocess.call(cmd, shell=True)
+
+ assert os.path.exists(image_filename)
+
+ return (image_filename)
+
+# convert results of run to a string
+def run_results_to_str (results, cond_type):
+ output = ""
+
+ output += "M: {0:<12.6f}\n".format(results['m'])
+ output += "BW: {0:<12,.2f} [Mbps]\n".format(results['tx'])
+ output += "PPS: {0:<12,} [pkts]\n".format(int(results['total-pps']))
+ output += "CPU: {0:.4f} %\n".format(results['cpu_util'])
+ output += "Maximum Latency: {0:<12,} [usec]\n".format(int(results['maximum-latency']))
+ output += "Average Latency: {0:<12,} [usec]\n".format(int(results['average-latency']))
+ output += "Pkt Drop: {0:<12,} [pkts]\n".format(int(results['total-pkt-drop']))
+ output += "Condition: {0:<12} ({1})\n".format("Passed" if check_condition(cond_type, results) else "Failed", cond_type_to_str(cond_type))
+
+ return (output)
+
+############################# classes #################################
+class ErrorHandler(object):
+ def __init__ (self, exception, traceback):
+
+ if isinstance(exception, TrexRunException):
+ logger.log("\n*** Script Terminated Due To Trex Failure")
+ logger.log("\n********************** TRex Error - Report **************************\n")
+ logger.log(str(exception))
+ logger.flush()
+
+ elif isinstance(exception, IOError):
+ logger.log("\n*** Script Terminated Due To IO Error")
+ logger.log("\nEither Router address or the Trex config is bad or some file is missing - check traceback below")
+ logger.log("\n********************** IO Error - Report **************************\n")
+ logger.log(str(exception))
+ logger.log(str(traceback))
+ logger.flush()
+
+
+ else:
+ logger.log("\n*** Script Terminated Due To Fatal Error")
+ logger.log("\n********************** Internal Error - Report **************************\n")
+ logger.log(str(exception) + "\n")
+ logger.log(str(traceback))
+ logger.flush()
+
+
+ # call the handler
+ g_kill_cause = "error"
+ os.kill(os.getpid(), signal.SIGUSR1)
+
+
+# simple HTML table
+class HTMLTable:
+ def __init__ (self):
+ self.table_rows = []
+
+ def add_row (self, param, value):
+ self.table_rows.append([param, value])
+
+ def generate_table(self):
+ txt = '<table class="myWideTable" style="width:50%">'
+ txt += "<tr><th>Parameter</th><th>Results</th></tr>"
+
+ for row in self.table_rows:
+ txt += "<tr><td>{0}</td><td>{1}</td></tr>".format(row[0], row[1])
+
+ txt += "</table>"
+
+ return txt
+
+# process results and dispatch it
+class JobReporter:
+ def __init__ (self, job_summary):
+ self.job_summary = job_summary
+ pass
+
+ def __plot_results_to_str (self, plot_results):
+ output = "\nPlotted Points: \n\n"
+ for p in plot_results:
+ output += "BW : {0:8.2f}, ".format(p['tx'])
+ output += "PPS : {0:8,} ".format(int(p['total-pps']))
+ output += "CPU : {0:8.2f} %, ".format(p['cpu_util'])
+ output += "Max Latency : {0:10,}, ".format(int(p['maximum-latency']))
+ output += "Avg Latency : {0:10,}, ".format(int(p['average-latency']))
+ output += "Pkt Drop : {0:12,}, \n".format(int(p['total-pkt-drop']))
+
+ return (output + "\n")
+
+ def __summary_to_string (self):
+ output = ""
+
+ output += "\n-== Job Completed Successfully ==-\n\n"
+ output += "Job Report:\n\n"
+ output += "Job Name: {0}\n".format(self.job_summary['job_name'])
+ output += "YAML file: {0}\n".format(self.job_summary['yaml'])
+ output += "Job Type: {0}\n".format(self.job_summary['job_type_str'])
+ output += "Condition: {0}\n".format(self.job_summary['cond_name'])
+ output += "Job Dir: {0}\n".format(self.job_summary['job_dir'])
+ output += "Job Log: {0}\n".format(self.job_summary['log_filename'])
+ output += "Email Report: {0}\n".format(self.job_summary['email'])
+ output += "Job Total Time: {0}\n\n".format(self.job_summary['total_run_time'])
+
+ if (self.job_summary.get('find_results') != None):
+ find_results = self.job_summary['find_results']
+ output += ("Maximum BW Point Details:\n\n")
+ output += run_results_to_str(find_results, self.job_summary['cond_type'])
+
+ if (self.job_summary.get('plot_results') != None):
+ plot_results = self.job_summary['plot_results']
+ output += self.__plot_results_to_str(plot_results)
+
+ return output
+
+
+ # simple print to screen of the job summary
+ def print_summary (self):
+ summary = self.__summary_to_string()
+ logger.log(summary)
+
+ def __generate_graph_report (self, plot_results):
+ graph_data = str( [ [x['tx'], x['cpu_util']/100, x['maximum-latency'], x['average-latency']] for x in plot_results ] )
+ table_data = str( [ [x['tx'], x['total-pps'], x['cpu_util']/100, x['norm_cpu'], x['maximum-latency'], x['average-latency'], x['total-pkt-drop']] for x in plot_results ] )
+
+ with open ("graph_template.html", "r") as myfile:
+ data = myfile.read()
+ data = data.replace("!@#$template_fill_head!@#$", self.job_summary['yaml'])
+ data = data.replace("!@#$template_fill_graph!@#$", graph_data[1:(len(graph_data) - 1)])
+ data = data.replace("!@#$template_fill_table!@#$", table_data[1:(len(table_data) - 1)])
+
+ # generate HTML report
+ graph_filename = self.job_summary['graph_filename']
+ text_file = open(graph_filename, "w")
+ text_file.write(str(data))
+ text_file.close()
+
+ return graph_filename
+
+ def __generate_body_report (self):
+ job_setup_table = HTMLTable()
+
+ job_setup_table.add_row("User Name", self.job_summary['user'])
+ job_setup_table.add_row("Job Name", self.job_summary['job_name'])
+ job_setup_table.add_row("Job Type", self.job_summary['job_type_str'])
+ job_setup_table.add_row("Test Condition", self.job_summary['cond_name'])
+ job_setup_table.add_row("YAML File", self.job_summary['yaml'])
+ job_setup_table.add_row("Job Total Time", "{0}".format(self.job_summary['total_run_time']))
+
+ job_summary_table = HTMLTable()
+
+ find_results = self.job_summary['find_results']
+
+ if find_results != None:
+ job_summary_table.add_row("Maximum Bandwidth", "{0:,.2f} [Mbps]".format(find_results['tx']))
+ job_summary_table.add_row("Maximum PPS", "{0:,} [pkts]".format(int(find_results['total-pps'])))
+ job_summary_table.add_row("CPU Util.", "{0:.2f}%".format(find_results['cpu_util']))
+ job_summary_table.add_row("Maximum Latency", "{0:,} [usec]".format(int(find_results['maximum-latency'])))
+ job_summary_table.add_row("Average Latency", "{0:,} [usec]".format(int(find_results['average-latency'])))
+ job_summary_table.add_row("Total Pkt Drop", "{0:,} [pkts]".format(int(find_results['total-pkt-drop'])))
+
+ with open ("report_template.html", "r") as myfile:
+ data = myfile.read()
+ data = data.replace("!@#$template_fill_job_setup_table!@#$", job_setup_table.generate_table())
+ data = data.replace("!@#$template_fill_job_summary_table!@#$", job_summary_table.generate_table())
+
+ return data
+
+ # create an email report and send to the user
+ def send_email_report (self):
+ images = []
+
+ logger.log("\nCreating E-Mail Report...\n")
+
+ # generate main report
+ report_str = self.__generate_body_report()
+
+ # generate graph report (if exists)
+ plot_results = self.job_summary['plot_results']
+ if plot_results:
+ logger.log("Generating Plot Results HTML ...\n")
+ graph_filename = self.__generate_graph_report(plot_results)
+ logger.log("Converting HTML to image ...\n")
+ images.append(html2image(graph_filename, graph_filename + ".png"))
+
+ else:
+ graph_filename = None
+
+ # create email
+ from_addr = 'TrexReporter@cisco.com'
+ to_addr = []
+ to_addr.append(self.job_summary['email'])
+ to_addr.append('imarom@cisco.com')
+
+ attachments = []
+ attachments.append(self.job_summary['log_filename'])
+ logger.log("Attaching log {0}...".format(self.job_summary['log_filename']))
+
+ if graph_filename:
+ attachments.append(graph_filename)
+ logger.log("Attaching plotting report {0}...".format(graph_filename))
+
+ logger.flush()
+
+ send_mail(from_addr, to_addr, "TRex Performance Report", report_str, attachments, images)
+ logger.log("\nE-mail sent successfully to: " + self.job_summary['email'])
+
+# dummy logger in case logger creation failed
+class DummyLogger(object):
+ def __init__(self):
+ pass
+
+ def log(self, text, force = False, newline = True):
+ text_out = (text + "\n") if newline else text
+ sys.stdout.write(text_out)
+
+ def console(self, text, force = False, newline = True):
+ self.log(text, force, newline)
+
+ def flush (self):
+ pass
+
+# logger object
+class MyLogger(object):
+
+ def __init__(self, log_filename):
+ # Store the original stdout and stderr
+ sys.stdout.flush()
+ sys.stderr.flush()
+
+ self.stdout_fd = os.dup(sys.stdout.fileno())
+ self.devnull = os.open('/dev/null', os.O_WRONLY)
+ self.log_file = open(log_filename, 'w')
+ self.silenced = False
+ self.pending_log_file_prints = 0
+ self.active = True
+
+ def shutdown (self):
+ self.active = False
+
+ def reactive (self):
+ self.active = True
+
+ # silence all prints from stdout
+ def silence(self):
+ os.dup2(self.devnull, sys.stdout.fileno())
+ self.silenced = True
+
+ # restore stdout status
+ def restore(self):
+ sys.stdout.flush()
+ sys.stderr.flush()
+ # Restore normal stdout
+ os.dup2(self.stdout_fd, sys.stdout.fileno())
+ self.silenced = False
+
+ #print a message to the log (both stdout / log file)
+ def log(self, text, force = False, newline = True):
+ if not self.active:
+ return
+
+ self.log_file.write((text + "\n") if newline else text)
+ self.pending_log_file_prints += 1
+
+ if (self.pending_log_file_prints >= 10):
+ self.log_file.flush()
+ self.pending_log_file_prints = 0
+
+ self.console(text, force, newline)
+
+ # print a message to the console alone
+ def console(self, text, force = False, newline = True):
+ if not self.active:
+ return
+
+ _text = (text + "\n") if newline else text
+
+ # if we are silenced and not forced - go home
+ if self.silenced and not force:
+ return
+
+ if self.silenced:
+ os.write(self.stdout_fd, _text)
+ else:
+ sys.stdout.write(_text)
+
+ sys.stdout.flush()
+
+ # flush
+ def flush(self):
+ sys.stdout.flush()
+ self.log_file.flush()
+
+ def __del__(self):
+ os.close(self.devnull)
+ if self.log_file:
+ self.log_file.flush()
+ self.log_file.close()
+
+
+# simple progress bar
+class ProgressBar(threading.Thread):
+ def __init__(self, time_sec, router):
+ super(ProgressBar, self).__init__()
+ self.active = True
+ self.time_sec = time_sec + 15
+ self.router = router
+
+ def run (self):
+ global g_stop
+
+ col = 40
+ delta_for_sec = float(col) / self.time_sec
+
+ accu = 0.0
+
+ for i in range(self.time_sec):
+ if (self.active == False):
+ # print 100% - done
+ bar = "\r[" + ('#' * col) + "] {0:.2f} %".format(100)
+ logger.console(bar, force = True, newline = False)
+ break
+
+ if (g_stop == True):
+ break
+
+ sleep(1)
+ accu += delta_for_sec
+ bar = "\r[" + ('#' * int(accu)) + (' ' * (col - int(accu))) + "] {0:.2f} %".format( (accu/col) * 100 )
+ bar += " / Router CPU: {0:.2f} %".format(self.router.get_last_cpu_util())
+ logger.console(bar, force = True, newline = False)
+
+ logger.console("\r\n", force = True, newline = False)
+ logger.flush()
+
+ def stop (self):
+ self.active = False
+ self.join()
+
+# global vars
+
+g_stop = False
+logger = DummyLogger()
+
+# cleanup list is a list of callables to be run when cntrl+c is caught
+cleanup_list = []
+
+################ threads ########################
+
+# sampler
+class Sample_Thread (threading.Thread):
+ def __init__(self, threadID, router):
+
+ threading.Thread.__init__(self)
+ self.threadID = threadID
+ self.router = router
+ self.stop = False
+
+ def run(self):
+ self.router.clear_sampling_stats()
+
+ try:
+ while (self.stop==False) and (g_stop==False):
+ self.router.sample_stats()
+ time.sleep(1);
+ except Exception as e:
+ ErrorHandler(e, traceback.format_exc())
+
+ def do_stop(self):
+ self.stop = True
+
+
+def general_cleanup_on_error ():
+ global g_stop
+ global cleanup_list
+
+ # mark all the threads to finish
+ g_stop = True;
+
+ # shutdown and flush the logger
+ logger.shutdown()
+ if logger:
+ logger.flush()
+
+ # execute the registered callables
+ for c in cleanup_list:
+ c()
+
+ # dummy wait for threads to finish (TODO: make this more smart)
+ time.sleep(2)
+ exit(-1)
+
+# just a dummy for preventing chain calls
+def signal_handler_dummy (sig_id, frame):
+ pass
+
+def error_signal_handler (sig_id, frame):
+ # make sure no chain of calls
+ signal.signal(signal.SIGUSR1, signal_handler_dummy)
+ signal.signal(signal.SIGINT, signal_handler_dummy)
+
+ general_cleanup_on_error()
+
+def int_signal_handler(sig_id, frame):
+ # make sure no chain of calls
+ signal.signal(signal.SIGINT, signal_handler_dummy)
+ signal.signal(signal.SIGUSR1, signal_handler_dummy)
+
+ logger.log("\n\nCaught Cntrl+C... Cleaning up!\n\n")
+
+ general_cleanup_on_error()
+
+
+# Trex with sampling
+class CTRexWithRouter:
+ def __init__(self, trex, trex_params):
+ self.trex = trex;
+ self.trex_params = trex_params
+
+ if self.trex_params['router_type'] == "ASR":
+ self.router = h_avc.ASR1k(self.trex_params['router_interface'], self.trex_params['router_port'], self.trex_params['router_password'])
+ elif self.trex_params['router_type'] == "ISR":
+ self.router = h_avc.ISR(self.trex_params['router_interface'], self.trex_params['router_port'], self.trex_params['router_password'])
+ else:
+ raise Exception("unknown router type in config file")
+
+ self.router.connect()
+
+ def get_router (self):
+ return self.router
+
+ def run(self, m, duration):
+
+ self.sample_thread = Sample_Thread(1, self.router)
+ self.sample_thread.start();
+
+ # launch trex
+ try:
+# trex_res = self.trex.run(m, duration);
+ self.trex.start_trex(c = self.trex_params['trex_cores'],
+ m = m,
+ d = duration,
+ f = self.trex_params['trex_yaml_file'],
+ nc = True,
+ l = self.trex_params['trex_latency'],
+ limit_ports = self.trex_params['trex_limit_ports'])
+ self.trex.sample_to_run_finish(20) # collect trex-sample every 20 seconds.
+ except Exception:
+ self.sample_thread.do_stop() # signal to stop
+ self.sample_thread.join() # wait for it to realy stop
+ raise
+
+ self.sample_thread.do_stop() # signal to stop
+ self.sample_thread.join() # wait for it to realy stop
+
+ self.res = self.trex.get_result_obj()
+
+ results = {}
+ results['status'] = True
+ results['trex_results'] = self.res
+ results['avc_results'] = self.router.get_stats()
+
+ return (results)
+ #return(trex_res.get_status() == STATUS_OK);
+
+# sanity checks to see run really went well
+def sanity_test_run (trex_r, avc_r):
+ pass
+ #if (sum(avc_r['cpu_histo']) == 0):
+ #raise h_trex.TrexRunException("CPU utilization from router is zero, check connectivity")
+
+def _trex_run (job_summary, m, duration):
+
+ trex_thread = job_summary['trex_thread']
+
+ p = ProgressBar(duration, trex_thread.get_router())
+ p.start()
+
+ try:
+ results = trex_thread.run(m, duration)
+ except Exception,e:
+ p.stop()
+ raise
+
+ p.stop()
+
+ if (results == None):
+ raise Exception("Failed to run Trex")
+
+ # fetch values
+ trex_r = results['trex_results']
+ avc_r = results['avc_results']
+
+ sanity_test_run(trex_r, avc_r)
+
+ res_dict = {}
+
+ res_dict['m'] = m
+ total_tx_bps = trex_r.get_last_value("trex-global.data.m_tx_bps")
+ res_dict['tx'] = total_tx_bps / (1000 * 1000) # EVENTUALLY CONTAINS IN MBPS (EXTRACTED IN BPS)
+
+ res_dict['cpu_util'] = avc_r['cpu_util']
+
+ if int(res_dict['cpu_util']) == 0:
+ res_dict['norm_cpu']=1;
+ else:
+ res_dict['norm_cpu'] = (res_dict['tx'] / res_dict['cpu_util']) * 100
+
+ res_dict['maximum-latency'] = max ( trex_r.get_max_latency().values() ) #trex_r.res['maximum-latency']
+ res_dict['average-latency'] = trex_r.get_avg_latency()['all'] #trex_r.res['average-latency']
+
+ logger.log(cpu_histo_to_str(avc_r['cpu_histo']))
+
+ res_dict['total-pkt-drop'] = trex_r.get_total_drops()
+ res_dict['expected-bps'] = trex_r.get_expected_tx_rate()['m_tx_expected_bps']
+ res_dict['total-pps'] = get_median( trex_r.get_value_list("trex-global.data.m_tx_pps") )#trex_r.res['total-pps']
+ res_dict['m_total_pkt'] = trex_r.get_last_value("trex-global.data.m_total_tx_pkts")
+
+ res_dict['latency_condition'] = job_summary['trex_params']['trex_latency_condition']
+
+ return res_dict
+
+def trex_run (job_summary, m, duration):
+ res = _trex_run (job_summary, m, duration)
+ return res
+
+
+def m_to_mbps (job_summary, m):
+ return (m * job_summary['base_m_unit'])
+
+# find the correct range of M
+def find_m_range (job_summary):
+
+ trex = job_summary['trex']
+ trex_config = job_summary['trex_params']
+
+ # if not provided - guess the correct range of bandwidth
+ if not job_summary['m_range']:
+ m_range = [0.0, 0.0]
+ # 1 Mbps -> 1 Gbps
+ LOW_TX = 1.0 * 1000 * 1000
+ MAX_TX = 1.0 * 1000 * 1000 * 1000
+
+ # for 10g go to 10g
+ if trex_config['trex_machine_type'] == "10G":
+ MAX_TX *= 10
+
+ # dual injection can potentially reach X2 speed
+ if trex_config['trex_is_dual'] == True:
+ MAX_TX *= 2
+
+ else:
+ m_range = job_summary['m_range']
+ LOW_TX = m_range[0] * 1000 * 1000
+ MAX_TX = m_range[1] * 1000 * 1000
+
+
+ logger.log("\nSystem Settings - Min: {0:,} Mbps / Max: {1:,} Mbps".format(LOW_TX / (1000 * 1000), MAX_TX / (1000 * 1000)))
+ logger.log("\nTrying to get system minimum M and maximum M...")
+
+ res_dict = trex_run(job_summary, 1, 30)
+
+ # figure out low / high M
+ m_range[0] = (LOW_TX / res_dict['expected-bps']) * 1
+ m_range[1] = (MAX_TX / res_dict['expected-bps']) * 1
+
+
+ # return both the m_range and the base m unit for future calculation
+ results = {}
+ results['m_range'] = m_range
+ results['base_m_unit'] = res_dict['expected-bps'] /(1000 * 1000)
+
+ return (results)
+
+# calculate points between m_range[0] and m_range[1]
+def calculate_plot_points (job_summary, m_range, plot_count):
+
+ cond_type = job_summary['cond_type']
+ delta_m = (m_range[1] - m_range[0]) / plot_count
+
+ m_current = m_range[0]
+ m_end = m_range[1]
+
+ logger.log("\nStarting Plot Graph Task ...\n")
+ logger.log("Plotting Range Is From: {0:.2f} [Mbps] To: {1:.2f} [Mbps] Over {2} Points".format(m_to_mbps(job_summary, m_range[0]),
+ m_to_mbps(job_summary, m_range[1]),
+ plot_count))
+ logger.log("Delta Between Points is {0:.2f} [Mbps]".format(m_to_mbps(job_summary, delta_m)))
+ plot_points = []
+
+ duration = 180
+
+ iter = 1
+
+ trex = job_summary['trex']
+ while (iter <= plot_count):
+ logger.log("\nPlotting Point [{0}/{1}]:\n".format(iter, plot_count))
+ logger.log("Estimated BW ~= {0:,.2f} [Mbps]\n".format(m_to_mbps(job_summary, m_current)))
+ logger.log("M = {0:.6f}".format(m_current))
+ logger.log("Duration = {0} seconds\n".format(duration))
+
+ res_dict = trex_run(job_summary, m_current, duration)
+ print_trex_results(res_dict, cond_type)
+
+ plot_points.append(dict(res_dict))
+
+ m_current += delta_m
+ iter = iter + 1
+
+ # last point - make sure its the maximum point
+ if (iter == plot_count):
+ m_current = m_range[1]
+
+ #print "waiting for system to stabilize ..."
+ #time.sleep(30);
+
+ return plot_points
+
+
+def cond_type_to_str (cond_type):
+ return "Max Latency" if cond_type=='latency' else "Pkt Drop"
+
+# success condition (latency or drop)
+def check_condition (cond_type, res_dict):
+ if cond_type == 'latency':
+ if res_dict['maximum-latency'] < res_dict['latency_condition']:
+ return True
+ else:
+ return False
+
+ # drop condition is a bit more complex - it should create high latency in addition to 0.2% drop
+ elif cond_type == 'drop':
+ if (res_dict['maximum-latency'] > (res_dict['latency_condition']+2000) ) and (res_dict['total-pkt-drop'] > (0.002 * res_dict['m_total_pkt'])):
+ return False
+ else:
+ return True
+
+ assert(0)
+
+def print_trex_results (res_dict, cond_type):
+ logger.log("\nRun Results:\n")
+ output = run_results_to_str(res_dict, cond_type)
+ logger.log(output)
+
+
+######################## describe a find job ########################
+class FindJob:
+ # init a job object with min / max
+ def __init__ (self, min, max, job_summary):
+ self.min = float(min)
+ self.max = float(max)
+ self.job_summary = job_summary
+ self.cond_type = job_summary['cond_type']
+ self.success_points = []
+ self.iter_num = 1
+ self.found = False
+ self.iter_duration = 300
+
+ def _distance (self):
+ return ( (self.max - self.min) / min(self.max, self.min) )
+
+ def time_to_end (self):
+ time_in_sec = (self.iters_to_end() * self.iter_duration)
+ return timedelta(seconds = time_in_sec)
+
+ def iters_to_end (self):
+ # find 2% point
+ ma = self.max
+ mi = self.min
+ iter = 0
+
+ while True:
+ dist = (ma - mi) / min(ma , mi)
+ if dist < 0.02:
+ break
+ if random.choice(["up", "down"]) == "down":
+ ma = (ma + mi) / 2
+ else:
+ mi = (ma + mi) / 2
+
+ iter += 1
+
+ return (iter)
+
+ def _cur (self):
+ return ( (self.min + self.max) / 2 )
+
+ def _add_success_point (self, res_dict):
+ self.success_points.append(res_dict.copy())
+
+ def _is_found (self):
+ return (self.found)
+
+ def _next_iter_duration (self):
+ return (self.iter_duration)
+
+ # execute iteration
+ def _execute (self):
+ # reset the found var before running
+ self.found = False
+
+ # run and print results
+ res_dict = trex_run(self.job_summary, self._cur(), self.iter_duration)
+
+ self.iter_num += 1
+ cur = self._cur()
+
+ if (self._distance() < 0.02):
+ if (check_condition(self.cond_type, res_dict)):
+ # distance < 2% and success - we are done
+ self.found = True
+ else:
+ # lower to 90% of current and retry
+ self.min = cur * 0.9
+ self.max = cur
+ else:
+ # success
+ if (check_condition(self.cond_type, res_dict)):
+ self.min = cur
+ else:
+ self.max = cur
+
+ if (check_condition(self.cond_type, res_dict)):
+ self._add_success_point(res_dict)
+
+ return res_dict
+
+ # find the max M before
+ def find_max_m (self):
+
+ res_dict = {}
+ while not self._is_found():
+
+ logger.log("\n-> Starting Find Iteration #{0}\n".format(self.iter_num))
+ logger.log("Estimated BW ~= {0:,.2f} [Mbps]".format(m_to_mbps(self.job_summary, self._cur())))
+ logger.log("M = {0:.6f}".format(self._cur()))
+ logger.log("Duration = {0} seconds".format(self._next_iter_duration()))
+ logger.log("Current BW Range = {0:,.2f} [Mbps] / {1:,.2f} [Mbps]".format(m_to_mbps(self.job_summary, self.min), m_to_mbps(self.job_summary, self.max)))
+ logger.log("Est. Iterations Left = {0} Iterations".format(self.iters_to_end()))
+ logger.log("Est. Time Left = {0}\n".format(self.time_to_end()))
+
+ res_dict = self._execute()
+
+ print_trex_results(res_dict, self.cond_type)
+
+ find_results = res_dict.copy()
+ find_results['max_m'] = self._cur()
+ return (find_results)
+
+######################## describe a plot job ########################
+class PlotJob:
+ def __init__(self, findjob):
+ self.job_summary = findjob.job_summary
+
+ self.plot_points = list(findjob.success_points)
+ self.plot_points.sort(key = lambda item:item['tx'])
+
+ def plot (self, duration = 300):
+ return self.plot_points
+
+ # add points if needed
+ #iter = 0
+ #for point in self.success_points:
+ #iter += 1
+ #logger.log("\nPlotting Point [{0}/{1}]:\n".format(iter, self.plot_count))
+ #logger.log("Estimated BW ~= {0:,.2f} [Mbps]\n".format(m_to_mbps(self.job_summary, point['m'])))
+ #logger.log("M = {0:.6f}".format(point['m']))
+ #logger.log("Duration = {0} seconds\n".format(duration))
+
+ #res_dict = trex_run(self.job_summary, point['m'], duration)
+ #print_trex_results(res_dict, self.job_summary['cond_type'])
+
+ #self.plot_points.append(dict(res_dict))
+
+ #self.plot_points = list(self.success_points)
+
+ #print self.plot_points
+ #self.plot_points.sort(key = lambda item:item['m'])
+ #print self.plot_points
+
+ #return self.plot_points
+
+
+def generate_job_id ():
+ return (str(int(random.getrandbits(32))))
+
+def print_header ():
+ logger.log("--== T-Trex Performance Tool v1.0 (2014) ==--")
+
+# print startup summary
+def log_startup_summary (job_summary):
+
+ trex = job_summary['trex']
+ trex_config = job_summary['trex_params']
+
+ logger.log("\nWork Request Details:\n")
+ logger.log("Setup Details:\n")
+ logger.log("T-Rex Config File: {0}".format(job_summary['config_file']))
+ logger.log("Machine Name: {0}".format(trex_config['trex_name']))
+ logger.log("T-Rex Type: {0}".format(trex_config['trex_machine_type']))
+ logger.log("T-Rex Dual Int. Tx: {0}".format(trex_config['trex_is_dual']))
+ logger.log("Router Interface: {0}".format(trex_config['router_interface']))
+
+ logger.log("\nJob Details:\n")
+ logger.log("Job Name: {0}".format(job_summary['job_name']))
+ logger.log("YAML file: {0}".format(job_summary['yaml']))
+ logger.log("Job Type: {0}".format(job_summary['job_type_str']))
+ logger.log("Condition Type: {0}".format(job_summary['cond_name']))
+ logger.log("Job Log: {0}".format(job_summary['log_filename']))
+ logger.log("Email Report: {0}".format(job_summary['email']))
+
+# logger.log("\nTrex Command Used:\n{0}".format(trex.build_cmd(1, 10)))
+
+def load_trex_config_params (filename, yaml_file):
+ config = {}
+
+ parser = ConfigParser.ConfigParser()
+
+ try:
+ parser.read(filename)
+
+ config['trex_name'] = parser.get("trex", "machine_name")
+ config['trex_port'] = parser.get("trex", "machine_port")
+ config['trex_hisory_size'] = parser.getint("trex", "history_size")
+
+ config['trex_latency_condition'] = parser.getint("trex", "latency_condition")
+ config['trex_yaml_file'] = yaml_file
+
+ # support legacy data
+ config['trex_latency'] = parser.getint("trex", "latency")
+ config['trex_limit_ports'] = parser.getint("trex", "limit_ports")
+ config['trex_cores'] = parser.getint("trex", "cores")
+ config['trex_machine_type'] = parser.get("trex", "machine_type")
+ config['trex_is_dual'] = parser.getboolean("trex", "is_dual")
+
+ # optional Trex parameters
+ if parser.has_option("trex", "config_file"):
+ config['trex_config_file'] = parser.get("trex", "config_file")
+ else:
+ config['trex_config_file'] = None
+
+ if parser.has_option("trex", "misc_params"):
+ config['trex_misc_params'] = parser.get("trex", "misc_params")
+ else:
+ config['trex_misc_params'] = None
+
+ # router section
+
+ if parser.has_option("router", "port"):
+ config['router_port'] = parser.get("router", "port")
+ else:
+ # simple telnet port
+ config['router_port'] = 23
+
+ config['router_interface'] = parser.get("router", "interface")
+ config['router_password'] = parser.get("router", "password")
+ config['router_type'] = parser.get("router", "type")
+
+ except Exception as inst:
+ raise TrexRunException("\nBad configuration file: '{0}'\n\n{1}".format(filename, inst))
+
+ return config
+
+def prepare_for_run (job_summary):
+ global logger
+
+ # generate unique id
+ job_summary['job_id'] = generate_job_id()
+ job_summary['job_dir'] = "trex_job_{0}".format(job_summary['job_id'])
+
+ job_summary['start_time'] = datetime.datetime.now()
+
+ if not job_summary['email']:
+ job_summary['user'] = getpass.getuser()
+ job_summary['email'] = "{0}@cisco.com".format(job_summary['user'])
+
+ # create dir for reports
+ try:
+ job_summary['job_dir'] = os.path.abspath( os.path.join(os.getcwd(), 'logs', job_summary['job_dir']) )
+ print job_summary['job_dir']
+ os.makedirs( job_summary['job_dir'] )
+
+ except OSError as err:
+ if err.errno == errno.EACCES:
+ # fall back. try creating the dir name at /tmp path
+ job_summary['job_dir'] = os.path.join("/tmp/", "trex_job_{0}".format(job_summary['job_id']) )
+ os.makedirs(job_summary['job_dir'])
+
+ job_summary['log_filename'] = os.path.join(job_summary['job_dir'], "trex_log_{0}.txt".format(job_summary['job_id']))
+ job_summary['graph_filename'] = os.path.join(job_summary['job_dir'], "trex_graph_{0}.html".format(job_summary['job_id']))
+
+ # init logger
+ logger = MyLogger(job_summary['log_filename'])
+
+ # mark those as not populated yet
+ job_summary['find_results'] = None
+ job_summary['plot_results'] = None
+
+ # create trex client instance
+ trex_params = load_trex_config_params(job_summary['config_file'],job_summary['yaml'])
+ trex = CTRexClient(trex_host = trex_params['trex_name'],
+ max_history_size = trex_params['trex_hisory_size'],
+ trex_daemon_port = trex_params['trex_port'])
+
+ job_summary['trex'] = trex
+ job_summary['trex_params'] = trex_params
+
+ # create trex task thread
+ job_summary['trex_thread'] = CTRexWithRouter(trex, trex_params);
+
+ # in case of an error we need to call the remote cleanup
+ cleanup_list.append(trex.stop_trex)
+
+ # signal handler
+ signal.signal(signal.SIGINT, int_signal_handler)
+ signal.signal(signal.SIGUSR1, error_signal_handler)
+
+
+def after_run (job_summary):
+
+ job_summary['total_run_time'] = datetime.datetime.now() - job_summary['start_time']
+ reporter = JobReporter(job_summary)
+ reporter.print_summary()
+ reporter.send_email_report()
+
+def launch (job_summary):
+
+ prepare_for_run(job_summary)
+
+ print_header()
+
+ log_startup_summary(job_summary)
+
+ # find the correct M range if not provided
+ range_results = find_m_range(job_summary)
+
+ job_summary['base_m_unit'] = range_results['base_m_unit']
+
+ if job_summary['m_range']:
+ m_range = job_summary['m_range']
+ else:
+ m_range = range_results['m_range']
+
+ logger.log("\nJob Bandwidth Working Range:\n")
+ logger.log("Min M = {0:.6f} / {1:,.2f} [Mbps] \nMax M = {2:.6f} / {3:,.2f} [Mbps]".format(m_range[0], m_to_mbps(job_summary, m_range[0]), m_range[1], m_to_mbps(job_summary, m_range[1])))
+
+ # job time
+ findjob = FindJob(m_range[0], m_range[1], job_summary)
+ job_summary['find_results'] = findjob.find_max_m()
+
+ if job_summary['job_type'] == "all":
+ # plot points to graph
+ plotjob = PlotJob(findjob)
+ job_summary['plot_results'] = plotjob.plot()
+
+ after_run(job_summary)
+
+
+# populate the fields for run
+def populate_fields (job_summary, args):
+ job_summary['config_file'] = args.config_file
+ job_summary['job_type'] = args.job
+ job_summary['cond_type'] = args.cond_type
+ job_summary['yaml'] = args.yaml
+
+ if args.n:
+ job_summary['job_name'] = args.n
+ else:
+ job_summary['job_name'] = "Nameless"
+
+ # did the user provided an email
+ if args.e:
+ job_summary['email'] = args.e
+ else:
+ job_summary['email'] = None
+
+ # did the user provide a range ?
+ if args.m:
+ job_summary['m_range'] = args.m
+ else:
+ job_summary['m_range'] = None
+
+ # some pretty shows
+ job_summary['cond_name'] = 'Drop Pkt' if (args.cond_type == 'drop') else 'High Latency'
+
+ if args.job == "find":
+ job_summary['job_type_str'] = "Find Max BW"
+ elif args.job == "plot":
+ job_summary['job_type_str'] = "Plot Graph"
+ else:
+ job_summary['job_type_str'] = "Find Max BW & Plot Graph"
+
+ if args.job != "find":
+ verify_glibc_version()
+
+
+
+# verify file exists for argparse
+def is_valid_file (parser, err_msg, filename):
+ if not os.path.exists(filename):
+ parser.error("{0}: '{1}'".format(err_msg, filename))
+ else:
+ return (filename) # return an open file handle
+
+def entry ():
+
+ job_summary = {}
+
+ parser = argparse.ArgumentParser()
+
+ parser.add_argument("-n", help="Job Name",
+ type = str)
+
+ parser.add_argument("-m", help="M Range [default: auto calcuation]",
+ nargs = 2,
+ type = float)
+
+ parser.add_argument("-e", help="E-Mail for report [default: whoami@cisco.com]",
+ type = str)
+
+ parser.add_argument("-c", "--cfg", dest = "config_file", required = True,
+ help = "Configuration File For Trex/Router Pair",
+ type = lambda x: is_valid_file(parser, "config file does not exists",x))
+
+ parser.add_argument("job", help = "Job type",
+ type = str,
+ choices = ['find', 'plot', 'all'])
+
+ parser.add_argument("cond_type", help="type of failure condition",
+ type = str,
+ choices = ['latency','drop'])
+
+ parser.add_argument("-f", "--yaml", dest = "yaml", required = True,
+ help="YAML file to use", type = str)
+
+ args = parser.parse_args()
+
+ with TermMng():
+ try:
+ populate_fields(job_summary, args)
+ launch(job_summary)
+
+ except Exception as e:
+ ErrorHandler(e, traceback.format_exc())
+
+ logger.log("\nReport bugs to imarom@cisco.com\n")
+ g_stop = True
+
+def dummy_test ():
+ job_summary = {}
+ find_results = {}
+
+ job_summary['config_file'] = 'config/trex01-1g.cfg'
+ job_summary['yaml'] = 'dummy.yaml'
+ job_summary['email'] = 'imarom@cisco.com'
+ job_summary['job_name'] = 'test'
+ job_summary['job_type_str'] = 'test'
+
+ prepare_for_run(job_summary)
+
+ time.sleep(2)
+ job_summary['yaml'] = 'dummy.yaml'
+ job_summary['job_type'] = 'find'
+ job_summary['cond_name'] = 'Drop'
+ job_summary['cond_type'] = 'drop'
+ job_summary['job_id']= 94817231
+
+
+ find_results['tx'] = 210.23
+ find_results['m'] = 1.292812
+ find_results['total-pps'] = 1000
+ find_results['cpu_util'] = 74.0
+ find_results['maximum-latency'] = 4892
+ find_results['average-latency'] = 201
+ find_results['total-pkt-drop'] = 0
+
+
+ findjob = FindJob(1,1,job_summary)
+ plotjob = PlotJob(findjob)
+ job_summary['plot_results'] = plotjob.plot()
+
+ job_summary['find_results'] = find_results
+ job_summary['plot_results'] = [{'cpu_util': 2.0,'norm_cpu': 1.0, 'total-pps': 1000, 'expected-bps': 999980.0, 'average-latency': 85.0, 'tx': 0.00207*1000, 'total-pkt-drop': 0.0, 'maximum-latency': 221.0},
+ {'cpu_util': 8.0,'norm_cpu': 1.0, 'total-pps': 1000,'expected-bps': 48500000.0, 'average-latency': 87.0, 'tx': 0.05005*1000, 'total-pkt-drop': 0.0, 'maximum-latency': 279.0},
+ {'cpu_util': 14.0,'norm_cpu': 1.0, 'total-pps': 1000,'expected-bps': 95990000.0, 'average-latency': 92.0, 'tx': 0.09806*1000, 'total-pkt-drop': 0.0, 'maximum-latency': 273.0},
+ {'cpu_util': 20.0,'norm_cpu': 1.0, 'total-pps': 1000,'expected-bps': 143490000.0, 'average-latency': 95.0, 'tx': 0.14613*1000, 'total-pkt-drop': 0.0, 'maximum-latency': 271.0},
+ {'cpu_util': 25.0,'norm_cpu': 1.0, 'total-pps': 1000,'expected-bps': 190980000.0, 'average-latency': 97.0, 'tx': 0.1933*1000, 'total-pkt-drop': 0.0, 'maximum-latency': 302.0},
+ {'cpu_util': 31.0,'norm_cpu': 1.0, 'total-pps': 1000,'expected-bps': 238480000.0, 'average-latency': 98.0, 'tx': 0.24213*1000, 'total-pkt-drop': 1.0, 'maximum-latency': 292.0},
+ {'cpu_util': 37.0,'norm_cpu': 1.0, 'total-pps': 1000, 'expected-bps': 285970000.0, 'average-latency': 99.0, 'tx': 0.29011*1000, 'total-pkt-drop': 0.0, 'maximum-latency': 344.0},
+ {'cpu_util': 43.0,'norm_cpu': 1.0, 'total-pps': 1000, 'expected-bps': 333470000.0, 'average-latency': 100.0, 'tx': 0.3382*1000, 'total-pkt-drop': 0.0, 'maximum-latency': 351.0},
+ {'cpu_util': 48.0,'norm_cpu': 1.0, 'total-pps': 1000, 'expected-bps': 380970000.0, 'average-latency': 100.0, 'tx': 0.38595*1000, 'total-pkt-drop': 0.0, 'maximum-latency': 342.0},
+ {'cpu_util': 54.0,'norm_cpu': 1.0, 'total-pps': 1000, 'expected-bps': 428460000.0, 'average-latency': 19852.0, 'tx': 0.43438*1000, 'total-pkt-drop': 1826229.0, 'maximum-latency': 25344.0}]
+
+
+
+ after_run(job_summary)
+
+if __name__ == "__main__":
+ entry ()
+
diff --git a/scripts/automation/wkhtmltopdf-amd64 b/scripts/automation/wkhtmltopdf-amd64
new file mode 100755
index 00000000..a173d2cf
--- /dev/null
+++ b/scripts/automation/wkhtmltopdf-amd64
Binary files differ
diff --git a/scripts/avl/_tun_citrix_0_fixed.pcap b/scripts/avl/_tun_citrix_0_fixed.pcap
new file mode 100755
index 00000000..be1664f8
--- /dev/null
+++ b/scripts/avl/_tun_citrix_0_fixed.pcap
Binary files differ
diff --git a/scripts/avl/_tun_exchange_0_fixed.pcap b/scripts/avl/_tun_exchange_0_fixed.pcap
new file mode 100755
index 00000000..e94d88ba
--- /dev/null
+++ b/scripts/avl/_tun_exchange_0_fixed.pcap
Binary files differ
diff --git a/scripts/avl/_tun_http_browsing_0_fixed.pcap b/scripts/avl/_tun_http_browsing_0_fixed.pcap
new file mode 100755
index 00000000..4cbeba57
--- /dev/null
+++ b/scripts/avl/_tun_http_browsing_0_fixed.pcap
Binary files differ
diff --git a/scripts/avl/_tun_http_get_0_fixed.pcap b/scripts/avl/_tun_http_get_0_fixed.pcap
new file mode 100755
index 00000000..ca959c5b
--- /dev/null
+++ b/scripts/avl/_tun_http_get_0_fixed.pcap
Binary files differ
diff --git a/scripts/avl/_tun_http_post_0_fixed.pcap b/scripts/avl/_tun_http_post_0_fixed.pcap
new file mode 100755
index 00000000..aa733b27
--- /dev/null
+++ b/scripts/avl/_tun_http_post_0_fixed.pcap
Binary files differ
diff --git a/scripts/avl/_tun_https_0_fixed.pcap b/scripts/avl/_tun_https_0_fixed.pcap
new file mode 100755
index 00000000..b3b48876
--- /dev/null
+++ b/scripts/avl/_tun_https_0_fixed.pcap
Binary files differ
diff --git a/scripts/avl/_tun_mail_pop_0_fixed.pcap b/scripts/avl/_tun_mail_pop_0_fixed.pcap
new file mode 100755
index 00000000..9a0dfef0
--- /dev/null
+++ b/scripts/avl/_tun_mail_pop_0_fixed.pcap
Binary files differ
diff --git a/scripts/avl/_tun_mail_pop_1_fixed.pcap b/scripts/avl/_tun_mail_pop_1_fixed.pcap
new file mode 100755
index 00000000..80001c6e
--- /dev/null
+++ b/scripts/avl/_tun_mail_pop_1_fixed.pcap
Binary files differ
diff --git a/scripts/avl/_tun_mail_pop_2_fixed.pcap b/scripts/avl/_tun_mail_pop_2_fixed.pcap
new file mode 100755
index 00000000..b0cf054f
--- /dev/null
+++ b/scripts/avl/_tun_mail_pop_2_fixed.pcap
Binary files differ
diff --git a/scripts/avl/_tun_oracle_0_fixed.pcap b/scripts/avl/_tun_oracle_0_fixed.pcap
new file mode 100755
index 00000000..b990a1e8
--- /dev/null
+++ b/scripts/avl/_tun_oracle_0_fixed.pcap
Binary files differ
diff --git a/scripts/avl/_tun_rtsp_0_fixed.pcap b/scripts/avl/_tun_rtsp_0_fixed.pcap
new file mode 100755
index 00000000..ae4e69bd
--- /dev/null
+++ b/scripts/avl/_tun_rtsp_0_fixed.pcap
Binary files differ
diff --git a/scripts/avl/_tun_smtp_0_fixed.pcap b/scripts/avl/_tun_smtp_0_fixed.pcap
new file mode 100755
index 00000000..96015bbc
--- /dev/null
+++ b/scripts/avl/_tun_smtp_0_fixed.pcap
Binary files differ
diff --git a/scripts/avl/_tun_smtp_1_fixed.pcap b/scripts/avl/_tun_smtp_1_fixed.pcap
new file mode 100755
index 00000000..9478b0b5
--- /dev/null
+++ b/scripts/avl/_tun_smtp_1_fixed.pcap
Binary files differ
diff --git a/scripts/avl/_tun_smtp_2_fixed.pcap b/scripts/avl/_tun_smtp_2_fixed.pcap
new file mode 100755
index 00000000..40ea1c6e
--- /dev/null
+++ b/scripts/avl/_tun_smtp_2_fixed.pcap
Binary files differ
diff --git a/scripts/avl/avl.csv b/scripts/avl/avl.csv
new file mode 100755
index 00000000..b09f6f1d
--- /dev/null
+++ b/scripts/avl/avl.csv
@@ -0,0 +1,26 @@
+id, name , cps , f-pkts , f-bytes , Mb/sec , MB/sec, c-flows , PPS , total-Mbytes-duration , errors ,flows
+ 00, avl/http_get.pcap , 102 , 44 , 38182 , 29.71 , 3.71 , 45 , 4488 , 0 , 0 , 1
+ 01, avl/http_post.pcap , 102 , 54 , 48900 , 38.05 , 4.76 , 55 , 5508 , 0 , 0 , 1
+ 02, avl/https.pcap , 33 , 96 , 92387 , 23.26 , 2.91 , 32 , 3168 , 0 , 0 , 1
+ 03, avl/http_browsing.pcap , 179 , 37 , 34721 , 47.42 , 5.93 , 66 , 6623 , 1 , 0 , 1
+ 04, avl/exchange.pcap , 64 , 43 , 10192 , 4.98 , 0.62 , 28 , 2752 , 0 , 0 , 1
+ 05, avl/email_pop1.pcap , 1 , 20 , 5763 , 0.05 , 0.01 , 0 , 24 , 0 , 0 , 1
+ 06, avl/email_pop2.pcap , 1 , 114 , 102429 , 0.94 , 0.12 , 1 , 137 , 0 , 0 , 1
+ 07, avl/email_pop4_29.pcap , 1 , 30 , 15870 , 0.15 , 0.02 , 0 , 36 , 0 , 0 , 1
+ 08, avl/oracle.pcap , 20 , 302 , 58547 , 8.93 , 1.12 , 60 , 6040 , 0 , 0 , 1
+ 09, avl/rtp_160_0.pcap , 1 , 84 , 97484 , 0.52 , 0.07 , 1 , 59 , 0 , 0 , 1
+ 10, avl/rtp_160_1.pcap , 1 , 1244 , 1138480 , 6.08 , 0.76 , 9 , 871 , 0 , 0 , 1
+ 11, avl/rtp_250k_1_0.pcap , 0 , 108 , 147128 , 0.56 , 0.07 , 1 , 54 , 0 , 0 , 1
+ 12, avl/rtp_250k_2_0.pcap , 0 , 1920 , 1767018 , 6.74 , 0.84 , 10 , 960 , 0 , 0 , 1
+ 13, avl/smtp_1.pcap , 2 , 22 , 5794 , 0.08 , 0.01 , 0 , 41 , 0 , 0 , 1
+ 14, avl/smtp_2.pcap , 2 , 35 , 18624 , 0.26 , 0.03 , 1 , 65 , 0 , 0 , 1
+ 15, avl/smtp_3.pcap , 2 , 110 , 97424 , 1.38 , 0.17 , 2 , 204 , 0 , 0 , 1
+ 16, avl/video_call_0.pcap , 3 , 2325 , 2551177 , 58.39 , 7.30 , 70 , 6975 , 1 , 0 , 1
+ 17, avl/video_rtp_1588_0.pcap , 7 , 1558 , 123704 , 6.98 , 0.87 , 115 , 11529 , 0 , 0 , 1
+ 18, avl/citrix_0.pcap , 11 , 272 , 86729 , 7.28 , 0.91 , 30 , 2992 , 0 , 0 , 1
+ 19, avl/dns_0.pcap , 498 , 2 , 178 , 0.68 , 0.08 , 10 , 996 , 0 , 0 , 1
+ 20, avl/sip_0.pcap , 7 , 7 , 2723 , 0.15 , 0.02 , 1 , 52 , 0 , 0 , 1
+ 21, avl/rtsp_0.pcap , 1 , 20 , 4038 , 0.04 , 0.00 , 0 , 24 , 0 , 0 , 1
+
+ 00, sum , 1040 , 8447 , 6447492 , 242.63 , 30.33 , 536 , 53596 , 3 , 0 , 22
+
diff --git a/scripts/avl/avl_delay_10.csv b/scripts/avl/avl_delay_10.csv
new file mode 100755
index 00000000..bd120cd3
--- /dev/null
+++ b/scripts/avl/avl_delay_10.csv
@@ -0,0 +1,27 @@
+ id, name , cps , f-pkts , f-bytes , Mb/sec , MB/sec, c-flows , PPS , total-Mbytes-duration , errors ,flows
+ 00, avl/delay_10_http_get_0.pcap , 102 , 44 , 38006 , 29.58 , 3.70 , 45 , 4488 , 0 , 0 , 1
+ 01, avl/delay_10_http_post_0.pcap , 102 , 54 , 48684 , 37.89 , 4.74 , 55 , 5508 , 0 , 0 , 1
+ 02, avl/delay_10_https_0.pcap , 33 , 96 , 92003 , 23.16 , 2.90 , 32 , 3168 , 0 , 0 , 1
+ 03, avl/delay_10_http_browsing_0.pcap , 179 , 37 , 34573 , 47.22 , 5.90 , 66 , 6623 , 1 , 0 , 1
+ 04, avl/delay_10_exchange_0.pcap , 64 , 43 , 10020 , 4.89 , 0.61 , 28 , 2752 , 0 , 0 , 1
+ 05, avl/delay_10_mail_pop_0.pcap , 1 , 20 , 5683 , 0.05 , 0.01 , 0 , 24 , 0 , 0 , 1
+ 06, avl/delay_10_mail_pop_1.pcap , 1 , 114 , 101973 , 0.93 , 0.12 , 1 , 137 , 0 , 0 , 1
+ 07, avl/delay_10_mail_pop_2.pcap , 1 , 30 , 15750 , 0.14 , 0.02 , 0 , 36 , 0 , 0 , 1
+ 08, avl/delay_10_oracle_0.pcap , 20 , 302 , 57339 , 8.75 , 1.09 , 60 , 6040 , 0 , 0 , 1
+ 09, avl/delay_10_rtp_160k_0.pcap , 1 , 85 , 98406 , 0.53 , 0.07 , 1 , 59 , 0 , 0 , 1
+ 10, avl/delay_10_rtp_160k_1.pcap , 1 , 1246 , 1135325 , 6.06 , 0.76 , 9 , 872 , 0 , 0 , 1
+ 11, avl/delay_10_rtp_250k_0_0.pcap , 0 , 126 , 166496 , 0.64 , 0.08 , 1 , 63 , 0 , 0 , 1
+ 12, avl/delay_10_rtp_250k_1_0.pcap , 0 , 1920 , 1759338 , 6.71 , 0.84 , 10 , 960 , 0 , 0 , 1
+ 13, avl/delay_10_smtp_0.pcap , 2 , 22 , 5706 , 0.08 , 0.01 , 0 , 41 , 0 , 0 , 1
+ 14, avl/delay_10_smtp_1.pcap , 2 , 35 , 18484 , 0.26 , 0.03 , 1 , 65 , 0 , 0 , 1
+ 15, avl/delay_10_smtp_2.pcap , 2 , 110 , 96984 , 1.37 , 0.17 , 2 , 204 , 0 , 0 , 1
+ 16, avl/delay_10_video_call_0.pcap , 3 , 2325 , 2541877 , 58.18 , 7.27 , 70 , 6975 , 1 , 0 , 1
+ 17, avl/delay_10_video_call_rtp_0.pcap , 7 , 1408 , 106412 , 6.01 , 0.75 , 104 , 10419 , 0 , 0 , 1
+ 18, avl/delay_10_citrix_0.pcap , 11 , 272 , 85641 , 7.19 , 0.90 , 30 , 2992 , 0 , 0 , 1
+ 19, avl/delay_10_dns_0.pcap , 498 , 2 , 170 , 0.65 , 0.08 , 10 , 996 , 0 , 0 , 1
+ 20, avl/delay_10_sip_0.pcap , 7 , 7 , 2695 , 0.15 , 0.02 , 1 , 52 , 0 , 0 , 1
+ 21, avl/delay_10_rtsp_0.pcap , 1 , 23 , 4382 , 0.04 , 0.01 , 0 , 28 , 0 , 0 , 1
+
+ 00, sum , 1040 , 8321 , 6425947 , 240.47 , 30.06 , 525 , 52501 , 3 , 0 , 22
+ create thread 0 nodes-0 socket: 0
+
diff --git a/scripts/avl/citrix_0.pcap b/scripts/avl/citrix_0.pcap
new file mode 100755
index 00000000..72ecd43b
--- /dev/null
+++ b/scripts/avl/citrix_0.pcap
Binary files differ
diff --git a/scripts/avl/delay_10_citrix_0.pcap b/scripts/avl/delay_10_citrix_0.pcap
new file mode 100755
index 00000000..e783c9b8
--- /dev/null
+++ b/scripts/avl/delay_10_citrix_0.pcap
Binary files differ
diff --git a/scripts/avl/delay_10_dns_0.pcap b/scripts/avl/delay_10_dns_0.pcap
new file mode 100755
index 00000000..bfc2c66c
--- /dev/null
+++ b/scripts/avl/delay_10_dns_0.pcap
Binary files differ
diff --git a/scripts/avl/delay_10_exchange_0.pcap b/scripts/avl/delay_10_exchange_0.pcap
new file mode 100755
index 00000000..439a65d2
--- /dev/null
+++ b/scripts/avl/delay_10_exchange_0.pcap
Binary files differ
diff --git a/scripts/avl/delay_10_http_browsing_0.pcap b/scripts/avl/delay_10_http_browsing_0.pcap
new file mode 100755
index 00000000..3e5768f3
--- /dev/null
+++ b/scripts/avl/delay_10_http_browsing_0.pcap
Binary files differ
diff --git a/scripts/avl/delay_10_http_get_0.pcap b/scripts/avl/delay_10_http_get_0.pcap
new file mode 100755
index 00000000..fd9ed76c
--- /dev/null
+++ b/scripts/avl/delay_10_http_get_0.pcap
Binary files differ
diff --git a/scripts/avl/delay_10_http_post_0.pcap b/scripts/avl/delay_10_http_post_0.pcap
new file mode 100755
index 00000000..a5511793
--- /dev/null
+++ b/scripts/avl/delay_10_http_post_0.pcap
Binary files differ
diff --git a/scripts/avl/delay_10_https_0.pcap b/scripts/avl/delay_10_https_0.pcap
new file mode 100755
index 00000000..e56f52ce
--- /dev/null
+++ b/scripts/avl/delay_10_https_0.pcap
Binary files differ
diff --git a/scripts/avl/delay_10_mail_pop_0.pcap b/scripts/avl/delay_10_mail_pop_0.pcap
new file mode 100755
index 00000000..3616f47b
--- /dev/null
+++ b/scripts/avl/delay_10_mail_pop_0.pcap
Binary files differ
diff --git a/scripts/avl/delay_10_mail_pop_1.pcap b/scripts/avl/delay_10_mail_pop_1.pcap
new file mode 100755
index 00000000..0f28443b
--- /dev/null
+++ b/scripts/avl/delay_10_mail_pop_1.pcap
Binary files differ
diff --git a/scripts/avl/delay_10_mail_pop_2.pcap b/scripts/avl/delay_10_mail_pop_2.pcap
new file mode 100755
index 00000000..d6f75cb6
--- /dev/null
+++ b/scripts/avl/delay_10_mail_pop_2.pcap
Binary files differ
diff --git a/scripts/avl/delay_10_oracle_0.pcap b/scripts/avl/delay_10_oracle_0.pcap
new file mode 100755
index 00000000..2610fa59
--- /dev/null
+++ b/scripts/avl/delay_10_oracle_0.pcap
Binary files differ
diff --git a/scripts/avl/delay_10_rtp_160k_0.pcap b/scripts/avl/delay_10_rtp_160k_0.pcap
new file mode 100755
index 00000000..c1043737
--- /dev/null
+++ b/scripts/avl/delay_10_rtp_160k_0.pcap
Binary files differ
diff --git a/scripts/avl/delay_10_rtp_160k_1.pcap b/scripts/avl/delay_10_rtp_160k_1.pcap
new file mode 100755
index 00000000..8ccb79fe
--- /dev/null
+++ b/scripts/avl/delay_10_rtp_160k_1.pcap
Binary files differ
diff --git a/scripts/avl/delay_10_rtp_160k_full.pcap b/scripts/avl/delay_10_rtp_160k_full.pcap
new file mode 100755
index 00000000..b91f5e6c
--- /dev/null
+++ b/scripts/avl/delay_10_rtp_160k_full.pcap
Binary files differ
diff --git a/scripts/avl/delay_10_rtp_250k_0_0.pcap b/scripts/avl/delay_10_rtp_250k_0_0.pcap
new file mode 100755
index 00000000..7a6f0879
--- /dev/null
+++ b/scripts/avl/delay_10_rtp_250k_0_0.pcap
Binary files differ
diff --git a/scripts/avl/delay_10_rtp_250k_1_0.pcap b/scripts/avl/delay_10_rtp_250k_1_0.pcap
new file mode 100755
index 00000000..cf0098f1
--- /dev/null
+++ b/scripts/avl/delay_10_rtp_250k_1_0.pcap
Binary files differ
diff --git a/scripts/avl/delay_10_rtp_250k_full.pcap b/scripts/avl/delay_10_rtp_250k_full.pcap
new file mode 100755
index 00000000..689e2f81
--- /dev/null
+++ b/scripts/avl/delay_10_rtp_250k_full.pcap
Binary files differ
diff --git a/scripts/avl/delay_10_rtsp_0.pcap b/scripts/avl/delay_10_rtsp_0.pcap
new file mode 100755
index 00000000..da7a9d03
--- /dev/null
+++ b/scripts/avl/delay_10_rtsp_0.pcap
Binary files differ
diff --git a/scripts/avl/delay_10_sip_0.pcap b/scripts/avl/delay_10_sip_0.pcap
new file mode 100755
index 00000000..07043894
--- /dev/null
+++ b/scripts/avl/delay_10_sip_0.pcap
Binary files differ
diff --git a/scripts/avl/delay_10_sip_video_call_full.pcap b/scripts/avl/delay_10_sip_video_call_full.pcap
new file mode 100755
index 00000000..f3802918
--- /dev/null
+++ b/scripts/avl/delay_10_sip_video_call_full.pcap
Binary files differ
diff --git a/scripts/avl/delay_10_sip_video_call_short.pcap b/scripts/avl/delay_10_sip_video_call_short.pcap
new file mode 100755
index 00000000..f1bbf007
--- /dev/null
+++ b/scripts/avl/delay_10_sip_video_call_short.pcap
Binary files differ
diff --git a/scripts/avl/delay_10_smtp_0.pcap b/scripts/avl/delay_10_smtp_0.pcap
new file mode 100755
index 00000000..37173a1c
--- /dev/null
+++ b/scripts/avl/delay_10_smtp_0.pcap
Binary files differ
diff --git a/scripts/avl/delay_10_smtp_1.pcap b/scripts/avl/delay_10_smtp_1.pcap
new file mode 100755
index 00000000..620fe9b1
--- /dev/null
+++ b/scripts/avl/delay_10_smtp_1.pcap
Binary files differ
diff --git a/scripts/avl/delay_10_smtp_2.pcap b/scripts/avl/delay_10_smtp_2.pcap
new file mode 100755
index 00000000..3fb3063d
--- /dev/null
+++ b/scripts/avl/delay_10_smtp_2.pcap
Binary files differ
diff --git a/scripts/avl/delay_10_video_call_0.pcap b/scripts/avl/delay_10_video_call_0.pcap
new file mode 100755
index 00000000..bdeb082c
--- /dev/null
+++ b/scripts/avl/delay_10_video_call_0.pcap
Binary files differ
diff --git a/scripts/avl/delay_10_video_call_rtp_0.pcap b/scripts/avl/delay_10_video_call_rtp_0.pcap
new file mode 100755
index 00000000..03375605
--- /dev/null
+++ b/scripts/avl/delay_10_video_call_rtp_0.pcap
Binary files differ
diff --git a/scripts/avl/delay_citrix_0.pcap b/scripts/avl/delay_citrix_0.pcap
new file mode 100755
index 00000000..a53b0517
--- /dev/null
+++ b/scripts/avl/delay_citrix_0.pcap
Binary files differ
diff --git a/scripts/avl/delay_dns_0.pcap b/scripts/avl/delay_dns_0.pcap
new file mode 100755
index 00000000..f4b96e25
--- /dev/null
+++ b/scripts/avl/delay_dns_0.pcap
Binary files differ
diff --git a/scripts/avl/delay_email_pop_0.pcap b/scripts/avl/delay_email_pop_0.pcap
new file mode 100755
index 00000000..0975dc01
--- /dev/null
+++ b/scripts/avl/delay_email_pop_0.pcap
Binary files differ
diff --git a/scripts/avl/delay_email_pop_1.pcap b/scripts/avl/delay_email_pop_1.pcap
new file mode 100755
index 00000000..4df4afc0
--- /dev/null
+++ b/scripts/avl/delay_email_pop_1.pcap
Binary files differ
diff --git a/scripts/avl/delay_email_pop_2.pcap b/scripts/avl/delay_email_pop_2.pcap
new file mode 100755
index 00000000..616a21f2
--- /dev/null
+++ b/scripts/avl/delay_email_pop_2.pcap
Binary files differ
diff --git a/scripts/avl/delay_exchange_0.pcap b/scripts/avl/delay_exchange_0.pcap
new file mode 100755
index 00000000..a1366ef8
--- /dev/null
+++ b/scripts/avl/delay_exchange_0.pcap
Binary files differ
diff --git a/scripts/avl/delay_http_browsing_0.pcap b/scripts/avl/delay_http_browsing_0.pcap
new file mode 100755
index 00000000..697b64e2
--- /dev/null
+++ b/scripts/avl/delay_http_browsing_0.pcap
Binary files differ
diff --git a/scripts/avl/delay_http_get_0.pcap b/scripts/avl/delay_http_get_0.pcap
new file mode 100755
index 00000000..136d102d
--- /dev/null
+++ b/scripts/avl/delay_http_get_0.pcap
Binary files differ
diff --git a/scripts/avl/delay_http_post_0.pcap b/scripts/avl/delay_http_post_0.pcap
new file mode 100755
index 00000000..46d6f8f5
--- /dev/null
+++ b/scripts/avl/delay_http_post_0.pcap
Binary files differ
diff --git a/scripts/avl/delay_https_0.pcap b/scripts/avl/delay_https_0.pcap
new file mode 100755
index 00000000..a8a902c8
--- /dev/null
+++ b/scripts/avl/delay_https_0.pcap
Binary files differ
diff --git a/scripts/avl/delay_oracle_0.pcap b/scripts/avl/delay_oracle_0.pcap
new file mode 100755
index 00000000..c3882f30
--- /dev/null
+++ b/scripts/avl/delay_oracle_0.pcap
Binary files differ
diff --git a/scripts/avl/delay_rtp_160k_1_1_0.pcap b/scripts/avl/delay_rtp_160k_1_1_0.pcap
new file mode 100755
index 00000000..9efff616
--- /dev/null
+++ b/scripts/avl/delay_rtp_160k_1_1_0.pcap
Binary files differ
diff --git a/scripts/avl/delay_rtp_160k_1_1_1.pcap b/scripts/avl/delay_rtp_160k_1_1_1.pcap
new file mode 100755
index 00000000..29aa8ae8
--- /dev/null
+++ b/scripts/avl/delay_rtp_160k_1_1_1.pcap
Binary files differ
diff --git a/scripts/avl/delay_rtp_250k_0_0.pcap b/scripts/avl/delay_rtp_250k_0_0.pcap
new file mode 100755
index 00000000..a055b76e
--- /dev/null
+++ b/scripts/avl/delay_rtp_250k_0_0.pcap
Binary files differ
diff --git a/scripts/avl/delay_rtp_250k_2_0.pcap b/scripts/avl/delay_rtp_250k_2_0.pcap
new file mode 100755
index 00000000..b57ea074
--- /dev/null
+++ b/scripts/avl/delay_rtp_250k_2_0.pcap
Binary files differ
diff --git a/scripts/avl/delay_rtsp_0.pcap b/scripts/avl/delay_rtsp_0.pcap
new file mode 100755
index 00000000..73968c5a
--- /dev/null
+++ b/scripts/avl/delay_rtsp_0.pcap
Binary files differ
diff --git a/scripts/avl/delay_sip_0.pcap b/scripts/avl/delay_sip_0.pcap
new file mode 100755
index 00000000..9e524eac
--- /dev/null
+++ b/scripts/avl/delay_sip_0.pcap
Binary files differ
diff --git a/scripts/avl/delay_smtp_0.pcap b/scripts/avl/delay_smtp_0.pcap
new file mode 100755
index 00000000..9d004995
--- /dev/null
+++ b/scripts/avl/delay_smtp_0.pcap
Binary files differ
diff --git a/scripts/avl/delay_smtp_1.pcap b/scripts/avl/delay_smtp_1.pcap
new file mode 100755
index 00000000..96f6b121
--- /dev/null
+++ b/scripts/avl/delay_smtp_1.pcap
Binary files differ
diff --git a/scripts/avl/delay_smtp_2.pcap b/scripts/avl/delay_smtp_2.pcap
new file mode 100755
index 00000000..a96ddc03
--- /dev/null
+++ b/scripts/avl/delay_smtp_2.pcap
Binary files differ
diff --git a/scripts/avl/delay_video_call_0.pcap b/scripts/avl/delay_video_call_0.pcap
new file mode 100755
index 00000000..0f365a1e
--- /dev/null
+++ b/scripts/avl/delay_video_call_0.pcap
Binary files differ
diff --git a/scripts/avl/delay_video_call_rtp_0.pcap b/scripts/avl/delay_video_call_rtp_0.pcap
new file mode 100755
index 00000000..1a0c5c70
--- /dev/null
+++ b/scripts/avl/delay_video_call_rtp_0.pcap
Binary files differ
diff --git a/scripts/avl/dns_0.pcap b/scripts/avl/dns_0.pcap
new file mode 100755
index 00000000..b3acca6d
--- /dev/null
+++ b/scripts/avl/dns_0.pcap
Binary files differ
diff --git a/scripts/avl/email_pop1.pcap b/scripts/avl/email_pop1.pcap
new file mode 100755
index 00000000..c4a6e7f9
--- /dev/null
+++ b/scripts/avl/email_pop1.pcap
Binary files differ
diff --git a/scripts/avl/email_pop1_1.pcap b/scripts/avl/email_pop1_1.pcap
new file mode 100755
index 00000000..479a2934
--- /dev/null
+++ b/scripts/avl/email_pop1_1.pcap
Binary files differ
diff --git a/scripts/avl/email_pop2.pcap b/scripts/avl/email_pop2.pcap
new file mode 100755
index 00000000..58e9d491
--- /dev/null
+++ b/scripts/avl/email_pop2.pcap
Binary files differ
diff --git a/scripts/avl/email_pop2_2.pcap b/scripts/avl/email_pop2_2.pcap
new file mode 100755
index 00000000..ff80d253
--- /dev/null
+++ b/scripts/avl/email_pop2_2.pcap
Binary files differ
diff --git a/scripts/avl/email_pop4_29.pcap b/scripts/avl/email_pop4_29.pcap
new file mode 100755
index 00000000..67429564
--- /dev/null
+++ b/scripts/avl/email_pop4_29.pcap
Binary files differ
diff --git a/scripts/avl/exchange.pcap b/scripts/avl/exchange.pcap
new file mode 100755
index 00000000..66d65367
--- /dev/null
+++ b/scripts/avl/exchange.pcap
Binary files differ
diff --git a/scripts/avl/http_browsing.pcap b/scripts/avl/http_browsing.pcap
new file mode 100755
index 00000000..8f051029
--- /dev/null
+++ b/scripts/avl/http_browsing.pcap
Binary files differ
diff --git a/scripts/avl/http_get.pcap b/scripts/avl/http_get.pcap
new file mode 100755
index 00000000..2f152cfb
--- /dev/null
+++ b/scripts/avl/http_get.pcap
Binary files differ
diff --git a/scripts/avl/http_post.pcap b/scripts/avl/http_post.pcap
new file mode 100755
index 00000000..6a9c3f73
--- /dev/null
+++ b/scripts/avl/http_post.pcap
Binary files differ
diff --git a/scripts/avl/https.pcap b/scripts/avl/https.pcap
new file mode 100755
index 00000000..58f872ea
--- /dev/null
+++ b/scripts/avl/https.pcap
Binary files differ
diff --git a/scripts/avl/mac_uit.yaml b/scripts/avl/mac_uit.yaml
new file mode 100755
index 00000000..d980d54d
--- /dev/null
+++ b/scripts/avl/mac_uit.yaml
@@ -0,0 +1,5 @@
+- items :
+ - ip : "16.0.0.1"
+ mac : [0x16,0x1,0x4,0x5,0x6,0x7]
+ - ip : "16.0.0.2"
+ mac : [0x16,0x2,0x0,0x1,0x0,0x0]
diff --git a/scripts/avl/oracle.pcap b/scripts/avl/oracle.pcap
new file mode 100755
index 00000000..63ffe9b2
--- /dev/null
+++ b/scripts/avl/oracle.pcap
Binary files differ
diff --git a/scripts/avl/rtp_160_0.pcap b/scripts/avl/rtp_160_0.pcap
new file mode 100755
index 00000000..fb7eb6fa
--- /dev/null
+++ b/scripts/avl/rtp_160_0.pcap
Binary files differ
diff --git a/scripts/avl/rtp_160_1.pcap b/scripts/avl/rtp_160_1.pcap
new file mode 100755
index 00000000..00cf2a20
--- /dev/null
+++ b/scripts/avl/rtp_160_1.pcap
Binary files differ
diff --git a/scripts/avl/rtp_250k_1_0.pcap b/scripts/avl/rtp_250k_1_0.pcap
new file mode 100755
index 00000000..f369c037
--- /dev/null
+++ b/scripts/avl/rtp_250k_1_0.pcap
Binary files differ
diff --git a/scripts/avl/rtp_250k_2_0.pcap b/scripts/avl/rtp_250k_2_0.pcap
new file mode 100755
index 00000000..c3fd7a5e
--- /dev/null
+++ b/scripts/avl/rtp_250k_2_0.pcap
Binary files differ
diff --git a/scripts/avl/rtsp_0.pcap b/scripts/avl/rtsp_0.pcap
new file mode 100755
index 00000000..ffbf4a7d
--- /dev/null
+++ b/scripts/avl/rtsp_0.pcap
Binary files differ
diff --git a/scripts/avl/sfr_branch_profile_delay_10.yaml b/scripts/avl/sfr_branch_profile_delay_10.yaml
new file mode 100755
index 00000000..71e69212
--- /dev/null
+++ b/scripts/avl/sfr_branch_profile_delay_10.yaml
@@ -0,0 +1,114 @@
+- duration : 0.1
+ generator :
+ distribution : "seq"
+ clients_start : "16.0.0.1"
+ clients_end : "16.0.1.255"
+ servers_start : "48.0.0.1"
+ servers_end : "48.0.62.255"
+ clients_per_gb : 201
+ min_clients : 101
+ dual_port_mask : "1.0.0.0"
+ tcp_aging : 0
+ udp_aging : 0
+ mac : [0x0,0x0,0x0,0x1,0x0,0x00]
+ cap_ipg : true
+ #cap_ipg_min : 30
+ #cap_override_ipg : 200
+ wlength : 100
+ one_app_server : false
+ cap_info :
+ - name: avl/delay_10_http_get_0.pcap
+ cps : 432.576
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ - name: avl/delay_10_http_post_0.pcap
+ cps : 432.576
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ - name: avl/delay_10_https_0.pcap
+ cps : 135.863
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ - name: avl/delay_10_http_browsing_0.pcap
+ cps : 361.55
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ - name: avl/delay_10_exchange_0.pcap
+ cps : 623.781
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ one_app_server : true
+ - name: avl/delay_10_mail_pop_0.pcap
+ cps : 20.253
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ - name: avl/delay_10_mail_pop_1.pcap
+ cps : 20.253
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ - name: avl/delay_10_mail_pop_2.pcap
+ cps : 20.253
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ - name: avl/delay_10_oracle_0.pcap
+ cps : 61.04
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ - name: avl/delay_10_rtp_160k_full.pcap
+ cps : 2.388
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ plugin_id : 1
+ - name: avl/delay_10_rtp_250k_full.pcap
+ cps : 1.705
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ plugin_id : 1
+ - name: avl/delay_10_smtp_0.pcap
+ cps : 20.635
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ - name: avl/delay_10_smtp_1.pcap
+ cps : 20.635
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ - name: avl/delay_10_smtp_2.pcap
+ cps : 20.635
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ - name: avl/delay_10_video_call_0.pcap
+ cps : 9.835
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ - name: avl/delay_10_sip_video_call_full.pcap
+ cps : 98.482
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ plugin_id : 2
+ - name: avl/delay_10_citrix_0.pcap
+ cps : 43.789
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ - name: avl/delay_10_dns_0.pcap
+ cps : 1471.196
+ ipg : 10000
+ rtt : 10000
+ w : 1
+
diff --git a/scripts/avl/sfr_delay_10.yaml b/scripts/avl/sfr_delay_10.yaml
new file mode 100755
index 00000000..1a3f82c3
--- /dev/null
+++ b/scripts/avl/sfr_delay_10.yaml
@@ -0,0 +1,119 @@
+- duration : 0.1
+ generator :
+ distribution : "seq"
+ clients_start : "16.0.0.1"
+ clients_end : "16.0.1.255"
+ servers_start : "48.0.0.1"
+ servers_end : "48.0.20.255"
+ clients_per_gb : 201
+ min_clients : 101
+ dual_port_mask : "1.0.0.0"
+ tcp_aging : 0
+ udp_aging : 0
+ mac : [0x0,0x0,0x0,0x1,0x0,0x00]
+ #vlan : { enable : 1 , vlan0 : 100 , vlan1 : 200 }
+ #mac_override_by_ip : true
+ cap_ipg : true
+ #cap_ipg_min : 30
+ #cap_override_ipg : 200
+ wlength : 107
+ one_app_server : false
+ cap_info :
+ - name: avl/delay_10_http_get_0.pcap
+ cps : 102.0
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ - name: avl/delay_10_http_post_0.pcap
+ cps : 102.0
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ - name: avl/delay_10_https_0.pcap
+ cps : 33.0
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ - name: avl/delay_10_http_browsing_0.pcap
+ cps : 179.0
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ - name: avl/delay_10_exchange_0.pcap
+ cps : 64.0
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ - name: avl/delay_10_mail_pop_0.pcap
+ cps : 1.2
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ - name: avl/delay_10_mail_pop_1.pcap
+ cps : 1.2
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ - name: avl/delay_10_mail_pop_2.pcap
+ cps : 1.2
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ - name: avl/delay_10_oracle_0.pcap
+ cps : 20.0
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ - name: avl/delay_10_rtp_160k_full.pcap
+ cps : 0.7
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ one_app_server : false
+ plugin_id : 1
+ - name: avl/delay_10_rtp_250k_full.pcap
+ cps : 0.5
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ one_app_server : false
+ plugin_id : 1
+ - name: avl/delay_10_smtp_0.pcap
+ cps : 1.85
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ - name: avl/delay_10_smtp_1.pcap
+ cps : 1.85
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ - name: avl/delay_10_smtp_2.pcap
+ cps : 1.85
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ - name: avl/delay_10_video_call_0.pcap
+ cps : 3.0
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ one_app_server : false
+ - name: avl/delay_10_sip_video_call_full.pcap
+ cps : 7.4
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ plugin_id : 2
+ one_app_server : false
+ - name: avl/delay_10_citrix_0.pcap
+ cps : 11.0
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ - name: avl/delay_10_dns_0.pcap
+ cps : 498.0
+ ipg : 10000
+ rtt : 10000
+ w : 1
+
diff --git a/scripts/avl/sfr_delay_10_1g.yaml b/scripts/avl/sfr_delay_10_1g.yaml
new file mode 100755
index 00000000..925531fd
--- /dev/null
+++ b/scripts/avl/sfr_delay_10_1g.yaml
@@ -0,0 +1,118 @@
+- duration : 0.1
+ generator :
+ distribution : "seq"
+ clients_start : "16.0.0.1"
+ clients_end : "16.0.1.255"
+ servers_start : "48.0.0.1"
+ servers_end : "48.0.20.255"
+ clients_per_gb : 201
+ min_clients : 101
+ dual_port_mask : "1.0.0.0"
+ tcp_aging : 0
+ udp_aging : 0
+ mac : [0x0,0x0,0x0,0x1,0x0,0x00]
+ cap_ipg : true
+ #cap_ipg_min : 30
+ #cap_override_ipg : 200
+ #wlength : 107
+ #one_app_server : false
+ cap_info :
+ - name: avl/delay_10_http_get_0.pcap
+ cps : 404.52
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ - name: avl/delay_10_http_post_0.pcap
+ cps : 404.52
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ - name: avl/delay_10_https_0.pcap
+ cps : 130.8745
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ - name: avl/delay_10_http_browsing_0.pcap
+ cps : 709.89
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ - name: avl/delay_10_exchange_0.pcap
+ cps : 253.81
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ - name: avl/delay_10_mail_pop_0.pcap
+ cps : 4.759
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ - name: avl/delay_10_mail_pop_1.pcap
+ cps : 4.759
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ - name: avl/delay_10_mail_pop_2.pcap
+ cps : 4.759
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ - name: avl/delay_10_oracle_0.pcap
+ cps : 79.3178
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ - name: avl/delay_10_rtp_160k_full.pcap
+ cps : 2.776
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ one_app_server : false
+ plugin_id : 1
+ - name: avl/delay_10_rtp_250k_full.pcap
+ cps : 1.982
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ one_app_server : false
+ plugin_id : 1
+ - name: avl/delay_10_smtp_0.pcap
+ cps : 7.3369
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ - name: avl/delay_10_smtp_1.pcap
+ cps : 7.3369
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ - name: avl/delay_10_smtp_2.pcap
+ cps : 7.3369
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ - name: avl/delay_10_video_call_0.pcap
+ cps : 11.8976
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ one_app_server : false
+ - name: avl/delay_10_sip_video_call_full.pcap
+ cps : 29.347
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ plugin_id : 2
+ one_app_server : false
+ - name: avl/delay_10_citrix_0.pcap
+ cps : 43.6248
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ - name: avl/delay_10_dns_0.pcap
+ cps : 1975.015
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ wlength : 1
+
diff --git a/scripts/avl/sfr_delay_10_1g_no_bundeling.yaml b/scripts/avl/sfr_delay_10_1g_no_bundeling.yaml
new file mode 100755
index 00000000..361d32cc
--- /dev/null
+++ b/scripts/avl/sfr_delay_10_1g_no_bundeling.yaml
@@ -0,0 +1,130 @@
+- duration : 0.1
+ generator :
+ distribution : "seq"
+ clients_start : "16.0.0.1"
+ clients_end : "16.0.0.255"
+ servers_start : "48.0.0.1"
+ servers_end : "48.0.20.255"
+ clients_per_gb : 201
+ min_clients : 101
+ dual_port_mask : "1.0.0.0"
+ tcp_aging : 0
+ udp_aging : 0
+ mac : [0x0,0x0,0x0,0x1,0x0,0x00]
+ cap_ipg : true
+ #cap_ipg_min : 30
+ #cap_override_ipg : 200
+ wlength : 107
+ one_app_server : false
+ cap_info :
+ - name: avl/delay_10_http_get_0.pcap
+ cps : 404.52
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ - name: avl/delay_10_http_post_0.pcap
+ cps : 404.52
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ - name: avl/delay_10_https_0.pcap
+ cps : 130.8745
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ - name: avl/delay_10_http_browsing_0.pcap
+ cps : 709.89
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ - name: avl/delay_10_exchange_0.pcap
+ cps : 253.81
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ - name: avl/delay_10_mail_pop_0.pcap
+ cps : 4.759
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ - name: avl/delay_10_mail_pop_1.pcap
+ cps : 4.759
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ - name: avl/delay_10_mail_pop_2.pcap
+ cps : 4.759
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ - name: avl/delay_10_oracle_0.pcap
+ cps : 79.3178
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ - name: avl/delay_10_rtp_160k_0.pcap
+ cps : 2.776
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ - name: avl/delay_10_rtp_160k_1.pcap
+ cps : 2.776
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ - name: avl/delay_10_rtp_250k_0_0.pcap
+ cps : 1.982
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ - name: avl/delay_10_rtp_250k_1_0.pcap
+ cps : 1.982
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ - name: avl/delay_10_smtp_0.pcap
+ cps : 7.3369
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ - name: avl/delay_10_smtp_1.pcap
+ cps : 7.3369
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ - name: avl/delay_10_smtp_2.pcap
+ cps : 7.3369
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ - name: avl/delay_10_video_call_0.pcap
+ cps : 11.8976
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ - name: avl/delay_10_video_call_rtp_0.pcap
+ cps : 29.347
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ - name: avl/delay_10_citrix_0.pcap
+ cps : 43.6248
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ - name: avl/delay_10_dns_0.pcap
+ cps : 1975.015
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ - name: avl/delay_10_sip_0.pcap
+ cps : 29.34
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ - name: avl/delay_10_rtsp_0.pcap
+ cps : 4.759
+ ipg : 10000
+ rtt : 10000
+ w : 1
+
diff --git a/scripts/avl/sfr_delay_10_no_bundeling.yaml b/scripts/avl/sfr_delay_10_no_bundeling.yaml
new file mode 100755
index 00000000..8374eab4
--- /dev/null
+++ b/scripts/avl/sfr_delay_10_no_bundeling.yaml
@@ -0,0 +1,131 @@
+- duration : 0.1
+ generator :
+ distribution : "seq"
+ clients_start : "16.0.0.1"
+ clients_end : "16.0.0.255"
+ servers_start : "48.0.0.1"
+ servers_end : "48.0.20.255"
+ clients_per_gb : 201
+ min_clients : 101
+ dual_port_mask : "1.0.0.0"
+ tcp_aging : 0
+ udp_aging : 0
+ mac : [0x0,0x0,0x0,0x1,0x0,0x00]
+ cap_ipg : true
+ #cap_ipg_min : 30
+ #cap_override_ipg : 200
+ wlength : 107
+ one_app_server : false
+ cap_info :
+ - name: avl/delay_10_http_get_0.pcap
+ cps : 102.0
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ - name: avl/delay_10_http_post_0.pcap
+ cps : 102.0
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ - name: avl/delay_10_https_0.pcap
+ cps : 33.0
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ - name: avl/delay_10_http_browsing_0.pcap
+ cps : 179.0
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ - name: avl/delay_10_exchange_0.pcap
+ cps : 64.0
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ - name: avl/delay_10_mail_pop_0.pcap
+ cps : 1.2
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ - name: avl/delay_10_mail_pop_1.pcap
+ cps : 1.2
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ - name: avl/delay_10_mail_pop_2.pcap
+ cps : 1.2
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ - name: avl/delay_10_oracle_0.pcap
+ cps : 20.0
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ - name: avl/delay_10_rtp_160k_0.pcap
+ cps : 0.7
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ - name: avl/delay_10_rtp_160k_1.pcap
+ cps : 0.7
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ - name: avl/delay_10_rtp_250k_0_0.pcap
+ cps : 0.5
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ - name: avl/delay_10_rtp_250k_1_0.pcap
+ cps : 0.5
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ - name: avl/delay_10_smtp_0.pcap
+ cps : 1.85
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ - name: avl/delay_10_smtp_1.pcap
+ cps : 1.85
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ - name: avl/delay_10_smtp_2.pcap
+ cps : 1.85
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ - name: avl/delay_10_video_call_0.pcap
+ cps : 3.0
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ - name: avl/delay_10_video_call_rtp_0.pcap
+ cps : 7.4
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ - name: avl/delay_10_citrix_0.pcap
+ cps : 11.0
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ - name: avl/delay_10_dns_0.pcap
+ cps : 498.0
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ wlength : 1
+ - name: avl/delay_10_sip_0.pcap
+ cps : 7.4
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ - name: avl/delay_10_rtsp_0.pcap
+ cps : 1.2
+ ipg : 10000
+ rtt : 10000
+ w : 1
+
diff --git a/scripts/avl/sfr_delay_50_tunnel_no_bundeling.yaml b/scripts/avl/sfr_delay_50_tunnel_no_bundeling.yaml
new file mode 100755
index 00000000..cd8e3c64
--- /dev/null
+++ b/scripts/avl/sfr_delay_50_tunnel_no_bundeling.yaml
@@ -0,0 +1,131 @@
+- duration : 0.1
+ generator :
+ distribution : "seq"
+ clients_start : "16.0.0.1"
+ clients_end : "16.0.0.255"
+ servers_start : "48.0.0.1"
+ servers_end : "48.0.20.255"
+ clients_per_gb : 201
+ min_clients : 101
+ dual_port_mask : "1.0.0.0"
+ tcp_aging : 0
+ udp_aging : 0
+ mac : [0x0,0x0,0x0,0x1,0x0,0x00]
+ cap_ipg : true
+ #cap_ipg_min : 30
+ #cap_override_ipg : 200
+ wlength : 107
+ one_app_server : false
+ cap_info :
+ - name: avl/_tun_http_get_0_fixed.pcap
+ cps : 102.0
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ - name: avl/_tun_http_post_0_fixed.pcap
+ cps : 102.0
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ - name: avl/_tun_https_0_fixed.pcap
+ cps : 33.0
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ - name: avl/_tun_http_browsing_0_fixed.pcap
+ cps : 179.0
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ - name: avl/_tun_exchange_0_fixed.pcap
+ cps : 64.0
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ - name: avl/_tun_mail_pop_0_fixed.pcap
+ cps : 1.2
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ - name: avl/_tun_mail_pop_1_fixed.pcap
+ cps : 1.2
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ - name: avl/_tun_mail_pop_2_fixed.pcap
+ cps : 1.2
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ - name: avl/_tun_oracle_0_fixed.pcap
+ cps : 20.0
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ - name: avl/delay_10_rtp_160k_0.pcap
+ cps : 0.7
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ - name: avl/delay_10_rtp_160k_1.pcap
+ cps : 0.7
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ - name: avl/delay_10_rtp_250k_0_0.pcap
+ cps : 0.5
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ - name: avl/delay_10_rtp_250k_1_0.pcap
+ cps : 0.5
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ - name: avl/_tun_smtp_0_fixed.pcap
+ cps : 1.85
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ - name: avl/_tun_smtp_1_fixed.pcap
+ cps : 1.85
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ - name: avl/_tun_smtp_2_fixed.pcap
+ cps : 1.85
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ - name: avl/delay_10_video_call_0.pcap
+ cps : 3.0
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ - name: avl/delay_10_video_call_rtp_0.pcap
+ cps : 7.4
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ - name: avl/_tun_citrix_0_fixed.pcap
+ cps : 11.0
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ - name: avl/delay_10_dns_0.pcap
+ cps : 498.0
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ wlength : 1
+ - name: avl/delay_10_sip_0.pcap
+ cps : 7.4
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ - name: avl/_tun_rtsp_0_fixed.pcap
+ cps : 1.2
+ ipg : 10000
+ rtt : 10000
+ w : 1
+
diff --git a/scripts/avl/sip_0.pcap b/scripts/avl/sip_0.pcap
new file mode 100755
index 00000000..9f6d6ac9
--- /dev/null
+++ b/scripts/avl/sip_0.pcap
Binary files differ
diff --git a/scripts/avl/smtp_1.pcap b/scripts/avl/smtp_1.pcap
new file mode 100755
index 00000000..a0638b94
--- /dev/null
+++ b/scripts/avl/smtp_1.pcap
Binary files differ
diff --git a/scripts/avl/smtp_2.pcap b/scripts/avl/smtp_2.pcap
new file mode 100755
index 00000000..cbace86b
--- /dev/null
+++ b/scripts/avl/smtp_2.pcap
Binary files differ
diff --git a/scripts/avl/smtp_3.pcap b/scripts/avl/smtp_3.pcap
new file mode 100755
index 00000000..890db92a
--- /dev/null
+++ b/scripts/avl/smtp_3.pcap
Binary files differ
diff --git a/scripts/avl/test_mac.yaml b/scripts/avl/test_mac.yaml
new file mode 100755
index 00000000..9d0ed1c4
--- /dev/null
+++ b/scripts/avl/test_mac.yaml
@@ -0,0 +1,29 @@
+- items :
+ - ip : "16.0.0.1"
+ mac : [0x16,0x1,0x4,0x5,0x6,0x7]
+ - ip : "16.0.0.2"
+ mac : [0x16,0x2,0x0,0x1,0x0,0x0]
+ - ip : "16.0.0.3"
+ mac : [0x16,0x3,0x0,0x1,0x0,0x0]
+ - ip : "16.0.0.4"
+ mac : [0x16,0x4,0x4,0x5,0x6,0x7]
+ - ip : "16.0.0.5"
+ mac : [0x16,0x5,0x4,0x5,0x6,0x7]
+ - ip : "16.0.0.6"
+ mac : [0x16,0x6,0x4,0x5,0x6,0x7]
+ - ip : "16.0.0.7"
+ mac : [0x16,0x7,0x4,0x5,0x6,0x7]
+ - ip : "17.0.0.2"
+ mac : [0x17,0x2,0x0,0x1,0x0,0x0]
+ - ip : "17.0.0.1"
+ mac : [0x17,0x1,0x0,0x1,0x0,0x0]
+ - ip : "17.0.0.3"
+ mac : [0x17,0x3,0x0,0x1,0x0,0x0]
+ - ip : "17.0.0.4"
+ mac : [0x17,0x4,0x0,0x1,0x0,0x0]
+ - ip : "17.0.0.5"
+ mac : [0x17,0x5,0x0,0x1,0x0,0x0]
+ - ip : "17.0.0.6"
+ mac : [0x17,0x6,0x0,0x1,0x0,0x0]
+ - ip : "17.0.0.8"
+ mac : [0x17,0x8,0x0,0x1,0x0,0x0]
diff --git a/scripts/avl/video_call_0.pcap b/scripts/avl/video_call_0.pcap
new file mode 100755
index 00000000..33949516
--- /dev/null
+++ b/scripts/avl/video_call_0.pcap
Binary files differ
diff --git a/scripts/avl/video_rtp_1588_0.pcap b/scripts/avl/video_rtp_1588_0.pcap
new file mode 100755
index 00000000..464bb1fe
--- /dev/null
+++ b/scripts/avl/video_rtp_1588_0.pcap
Binary files differ
diff --git a/scripts/cap2/Oracle.pcap b/scripts/cap2/Oracle.pcap
new file mode 100755
index 00000000..cc300a53
--- /dev/null
+++ b/scripts/cap2/Oracle.pcap
Binary files differ
diff --git a/scripts/cap2/Video_Calls.pcap b/scripts/cap2/Video_Calls.pcap
new file mode 100755
index 00000000..6fd4d202
--- /dev/null
+++ b/scripts/cap2/Video_Calls.pcap
Binary files differ
diff --git a/scripts/cap2/Voice_calls_rtp_only.pcap b/scripts/cap2/Voice_calls_rtp_only.pcap
new file mode 100755
index 00000000..eb26dbb9
--- /dev/null
+++ b/scripts/cap2/Voice_calls_rtp_only.pcap
Binary files differ
diff --git a/scripts/cap2/citrix.pcap b/scripts/cap2/citrix.pcap
new file mode 100755
index 00000000..f13eec55
--- /dev/null
+++ b/scripts/cap2/citrix.pcap
Binary files differ
diff --git a/scripts/cap2/delay_10_rtp_250k_short.pcap b/scripts/cap2/delay_10_rtp_250k_short.pcap
new file mode 100755
index 00000000..de8b28e7
--- /dev/null
+++ b/scripts/cap2/delay_10_rtp_250k_short.pcap
Binary files differ
diff --git a/scripts/cap2/dns.pcap b/scripts/cap2/dns.pcap
new file mode 100755
index 00000000..2bd7f460
--- /dev/null
+++ b/scripts/cap2/dns.pcap
Binary files differ
diff --git a/scripts/cap2/dns.yaml b/scripts/cap2/dns.yaml
new file mode 100755
index 00000000..dd577894
--- /dev/null
+++ b/scripts/cap2/dns.yaml
@@ -0,0 +1,23 @@
+- duration : 10.0
+ generator :
+ distribution : "seq"
+ clients_start : "16.0.0.1"
+ clients_end : "16.0.1.255"
+ servers_start : "48.0.0.1"
+ servers_end : "48.0.0.255"
+ clients_per_gb : 201
+ min_clients : 101
+ dual_port_mask : "1.0.0.0"
+ tcp_aging : 1
+ udp_aging : 1
+ mac : [0x00,0x00,0x00,0x01,0x00,0x00]
+ #vlan : { enable : 1 , vlan0 : 100 , vlan1 : 200 }
+ #mac_override_by_ip : true
+ cap_info :
+ - name: cap2/dns.pcap
+ cps : 1.0
+ ipg : 10000
+ rtt : 10000
+ w : 1
+
+
diff --git a/scripts/cap2/dns_one_server.yaml b/scripts/cap2/dns_one_server.yaml
new file mode 100755
index 00000000..bac3c1d1
--- /dev/null
+++ b/scripts/cap2/dns_one_server.yaml
@@ -0,0 +1,33 @@
+- duration : 10.0
+ generator :
+ distribution : "seq"
+ clients_start : "16.0.0.1"
+ clients_end : "16.0.0.255"
+ servers_start : "48.0.0.2"
+ servers_end : "48.0.255.255"
+ clients_per_gb : 201
+ min_clients : 101
+ dual_port_mask : "1.0.0.0"
+ tcp_aging : 0
+ udp_aging : 0
+ #cap_ipg : true
+ #cap_ipg_min : 30
+ #cap_override_ipg : 200
+ mac : [0x00,0x00,0x00,0x01,0x00,0x00]
+ cap_info :
+ - name: cap2/dns.pcap
+ cps : 1.0
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ server_addr : "48.0.0.1"
+ one_app_server : true
+ wlength : 1
+ - name: cap2/dns.pcap
+ cps : 1.0
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ wlength : 1
+
+
diff --git a/scripts/cap2/dns_single_server.yaml b/scripts/cap2/dns_single_server.yaml
new file mode 100755
index 00000000..e2513484
--- /dev/null
+++ b/scripts/cap2/dns_single_server.yaml
@@ -0,0 +1,32 @@
+- duration : 10.0
+ generator :
+ distribution : "seq"
+ clients_start : "16.0.0.1"
+ clients_end : "16.0.0.255"
+ servers_start : "48.0.0.1"
+ servers_end : "48.0.255.255"
+ clients_per_gb : 201
+ min_clients : 101
+ dual_port_mask : "1.0.0.0"
+ tcp_aging : 0
+ udp_aging : 0
+ #cap_ipg : true
+ #cap_ipg_min : 100
+ #cap_override_ipg : 200
+ one_app_server : true
+ wlength : 7
+ mac : [0x00,0x00,0x00,0x01,0x00,0x00]
+ cap_info :
+ - name: cap2/dns.pcap
+ cps : 0.5
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ - name: cap2/dns.pcap
+ one_app_server : false
+ cps : 1.0
+ ipg : 10000
+ rtt : 10000
+ w : 5
+
+
diff --git a/scripts/cap2/dns_wlen.yaml b/scripts/cap2/dns_wlen.yaml
new file mode 100755
index 00000000..fc653d4f
--- /dev/null
+++ b/scripts/cap2/dns_wlen.yaml
@@ -0,0 +1,25 @@
+- duration : 10.0
+ generator :
+ distribution : "seq"
+ clients_start : "16.0.0.1"
+ clients_end : "16.0.0.255"
+ servers_start : "48.0.0.1"
+ servers_end : "48.0.255.255"
+ clients_per_gb : 201
+ min_clients : 101
+ dual_port_mask : "1.0.0.0"
+ tcp_aging : 0
+ udp_aging : 0
+ #cap_ipg : true
+ #cap_ipg_min : 30
+ #cap_override_ipg : 200
+ mac : [0x00,0x00,0x00,0x01,0x00,0x00]
+ cap_info :
+ - name: cap2/dns.pcap
+ cps : 1.0
+ ipg : 10000
+ rtt : 10000
+ w : 4
+ wlength : 2
+
+
diff --git a/scripts/cap2/dns_wlen1.yaml b/scripts/cap2/dns_wlen1.yaml
new file mode 100755
index 00000000..bf07bac3
--- /dev/null
+++ b/scripts/cap2/dns_wlen1.yaml
@@ -0,0 +1,25 @@
+- duration : 10.0
+ generator :
+ distribution : "seq"
+ clients_start : "16.0.0.1"
+ clients_end : "16.0.0.255"
+ servers_start : "48.0.0.1"
+ servers_end : "48.0.255.255"
+ clients_per_gb : 201
+ min_clients : 101
+ dual_port_mask : "1.0.0.0"
+ tcp_aging : 0
+ udp_aging : 0
+ #cap_ipg : true
+ #cap_ipg_min : 30
+ #cap_override_ipg : 200
+ mac : [0x00,0x00,0x00,0x01,0x00,0x00]
+ cap_info :
+ - name: cap2/dns.pcap
+ cps : 1.0
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ wlength : 1
+
+
diff --git a/scripts/cap2/dns_wlen2.yaml b/scripts/cap2/dns_wlen2.yaml
new file mode 100755
index 00000000..5488ae8e
--- /dev/null
+++ b/scripts/cap2/dns_wlen2.yaml
@@ -0,0 +1,32 @@
+- duration : 10.0
+ generator :
+ distribution : "seq"
+ clients_start : "16.0.0.1"
+ clients_end : "16.0.0.255"
+ servers_start : "48.0.0.1"
+ servers_end : "48.0.255.255"
+ clients_per_gb : 201
+ min_clients : 101
+ dual_port_mask : "1.0.0.0"
+ tcp_aging : 0
+ udp_aging : 0
+ #cap_ipg : true
+ #cap_ipg_min : 30
+ #cap_override_ipg : 200
+ wlength : 10
+ one_app_server : true
+ mac : [0x00,0x00,0x00,0x01,0x00,0x00]
+ cap_info :
+ - name: cap2/dns.pcap
+ cps : 1.0
+ ipg : 10000
+ rtt : 10000
+ w : 10
+ wlength : 1
+ - name: cap2/dns.pcap
+ cps : 1.0
+ ipg : 10000
+ rtt : 10000
+ w : 10
+
+
diff --git a/scripts/cap2/dns_wlength.yaml b/scripts/cap2/dns_wlength.yaml
new file mode 100755
index 00000000..fc653d4f
--- /dev/null
+++ b/scripts/cap2/dns_wlength.yaml
@@ -0,0 +1,25 @@
+- duration : 10.0
+ generator :
+ distribution : "seq"
+ clients_start : "16.0.0.1"
+ clients_end : "16.0.0.255"
+ servers_start : "48.0.0.1"
+ servers_end : "48.0.255.255"
+ clients_per_gb : 201
+ min_clients : 101
+ dual_port_mask : "1.0.0.0"
+ tcp_aging : 0
+ udp_aging : 0
+ #cap_ipg : true
+ #cap_ipg_min : 30
+ #cap_override_ipg : 200
+ mac : [0x00,0x00,0x00,0x01,0x00,0x00]
+ cap_info :
+ - name: cap2/dns.pcap
+ cps : 1.0
+ ipg : 10000
+ rtt : 10000
+ w : 4
+ wlength : 2
+
+
diff --git a/scripts/cap2/dyn_pyld1.yaml b/scripts/cap2/dyn_pyld1.yaml
new file mode 100755
index 00000000..1ea09aa7
--- /dev/null
+++ b/scripts/cap2/dyn_pyld1.yaml
@@ -0,0 +1,36 @@
+- duration : 10.0
+ generator :
+ distribution : "seq"
+ clients_start : "16.0.0.1"
+ clients_end : "16.0.0.255"
+ servers_start : "48.0.0.1"
+ servers_end : "48.0.255.255"
+ clients_per_gb : 201
+ min_clients : 101
+ dual_port_mask : "1.0.0.0"
+ tcp_aging : 0
+ udp_aging : 0
+ #cap_ipg : true
+ #cap_ipg_min : 100
+ #cap_override_ipg : 200
+ mac : [0x00,0x00,0x00,0x01,0x00,0x00]
+ cap_info :
+ - name: cap2/dns.pcap
+ cps : 1.0
+ ipg : 10000
+ rtt : 10000
+ w : 5
+ dyn_pyload :
+ - pkt_id : 0 # 0 is first packet
+ pyld_offset : 0 # ofsset from pyload in bytes
+ type : 0 # 0- random , 1 - client_ip
+ len : 1 # len in uint32
+ mask : 0xffffffff #mask
+ - pkt_id : 1 # 0 is first packet
+ pyld_offset : 2 # ofsset from pyload in bytes
+ type : 1 # 0- random , 1 - client_ip
+ len : 2 # len in uint32
+ mask : 0xffffffff #mask
+
+
+
diff --git a/scripts/cap2/exchange.pcap b/scripts/cap2/exchange.pcap
new file mode 100755
index 00000000..1be671f2
--- /dev/null
+++ b/scripts/cap2/exchange.pcap
Binary files differ
diff --git a/scripts/cap2/http.yaml b/scripts/cap2/http.yaml
new file mode 100755
index 00000000..80d58369
--- /dev/null
+++ b/scripts/cap2/http.yaml
@@ -0,0 +1,22 @@
+- duration : 0.1
+ generator :
+ distribution : "seq"
+ clients_start : "16.0.0.1"
+ clients_end : "16.0.1.255"
+ servers_start : "48.0.0.1"
+ servers_end : "48.0.255.255"
+ clients_per_gb : 201
+ min_clients : 101
+ dual_port_mask : "1.0.0.0"
+ tcp_aging : 0
+ udp_aging : 0
+ mac : [0x0,0x0,0x0,0x1,0x0,0x00]
+ cap_ipg : true
+ cap_info :
+ - name: avl/delay_10_rtp_160k_full.pcap
+ cps : 2.776
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ plugin_id : 1
+
diff --git a/scripts/cap2/http_browsing.pcap b/scripts/cap2/http_browsing.pcap
new file mode 100755
index 00000000..065e33ba
--- /dev/null
+++ b/scripts/cap2/http_browsing.pcap
Binary files differ
diff --git a/scripts/cap2/http_get.pcap b/scripts/cap2/http_get.pcap
new file mode 100755
index 00000000..3f1d42e4
--- /dev/null
+++ b/scripts/cap2/http_get.pcap
Binary files differ
diff --git a/scripts/cap2/http_plugin.yaml b/scripts/cap2/http_plugin.yaml
new file mode 100755
index 00000000..032409e4
--- /dev/null
+++ b/scripts/cap2/http_plugin.yaml
@@ -0,0 +1,24 @@
+- duration : 1.0
+ generator :
+ distribution : "seq"
+ clients_start : "16.0.0.1"
+ clients_end : "16.0.0.255"
+ servers_start : "48.0.0.1"
+ servers_end : "48.0.255.255"
+ clients_per_gb : 201
+ min_clients : 101
+ dual_port_mask : "1.0.0.0"
+ tcp_aging : 0
+ udp_aging : 0
+ cap_ipg : true
+ #cap_ipg_min : 100
+ #cap_override_ipg : 200
+ mac : [0x00,0x00,0x00,0x01,0x00,0x00]
+ cap_info :
+ - name: avl/delay_10_http_browsing_0.pcap
+ cps : 1.0
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ plugin_id : 4
+
diff --git a/scripts/cap2/http_post.pcap b/scripts/cap2/http_post.pcap
new file mode 100755
index 00000000..0ccb3b55
--- /dev/null
+++ b/scripts/cap2/http_post.pcap
Binary files differ
diff --git a/scripts/cap2/http_simple.yaml b/scripts/cap2/http_simple.yaml
new file mode 100755
index 00000000..ad4814f5
--- /dev/null
+++ b/scripts/cap2/http_simple.yaml
@@ -0,0 +1,21 @@
+- duration : 0.1
+ generator :
+ distribution : "seq"
+ clients_start : "16.0.0.1"
+ clients_end : "16.0.0.255"
+ servers_start : "48.0.0.1"
+ servers_end : "48.0.255.255"
+ clients_per_gb : 201
+ min_clients : 101
+ dual_port_mask : "1.0.0.0"
+ tcp_aging : 0
+ udp_aging : 0
+ mac : [0x0,0x0,0x0,0x1,0x0,0x00]
+ cap_ipg : true
+ cap_info :
+ - name: avl/delay_10_http_browsing_0.pcap
+ cps : 2.776
+ ipg : 10000
+ rtt : 10000
+ w : 1
+
diff --git a/scripts/cap2/https.pcap b/scripts/cap2/https.pcap
new file mode 100755
index 00000000..e62fa29c
--- /dev/null
+++ b/scripts/cap2/https.pcap
Binary files differ
diff --git a/scripts/cap2/imix.yaml b/scripts/cap2/imix.yaml
new file mode 100755
index 00000000..a155e38b
--- /dev/null
+++ b/scripts/cap2/imix.yaml
@@ -0,0 +1,35 @@
+#
+# Simple IMIX test (7x64B, 5x594B, 1x1518B)
+#
+- duration : 3
+ generator :
+ distribution : "seq"
+ clients_start : "16.0.0.1"
+ clients_end : "16.0.0.255"
+ servers_start : "48.0.0.1"
+ servers_end : "48.0.255.255"
+ clients_per_gb : 201
+ min_clients : 101
+ dual_port_mask : "1.0.0.0"
+ tcp_aging : 0
+ udp_aging : 0
+ mac : [0x0,0x0,0x0,0x1,0x0,0x00]
+ cap_info :
+ - name: cap2/udp_64B.pcap
+ cps : 28.0
+ ipg : 10000
+ rtt : 10000
+ w : 4
+ limit : 7
+ - name: cap2/udp_594B.pcap
+ cps : 20.0
+ ipg : 10000
+ rtt : 10000
+ w : 4
+ limit : 5
+ - name: cap2/udp_1518B.pcap
+ cps : 4.0
+ ipg : 10000
+ rtt : 10000
+ w : 4
+ limit : 1
diff --git a/scripts/cap2/imix_1518.yaml b/scripts/cap2/imix_1518.yaml
new file mode 100755
index 00000000..ddccc987
--- /dev/null
+++ b/scripts/cap2/imix_1518.yaml
@@ -0,0 +1,70 @@
+#
+# Simple IMIX test 1518B
+#
+- duration : 3
+ generator :
+ distribution : "seq"
+ clients_start : "16.0.0.1"
+ clients_end : "16.0.0.255"
+ servers_start : "48.0.0.1"
+ servers_end : "48.0.255.255"
+ clients_per_gb : 201
+ min_clients : 101
+ dual_port_mask : "1.0.0.0"
+ tcp_aging : 0
+ udp_aging : 0
+ mac : [0x0,0x0,0x0,0x1,0x0,0x00]
+#
+# the templates are duplicated in purpose , to utilized all DRAM BW and get better performance, we should do it automatically
+# but for now it like this , you should have at least 8
+#
+ cap_info :
+ - name: cap2/udp_1518B.pcap
+ cps : 1000.0
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ limit : 200
+ - name: cap2/udp_1518B.pcap
+ cps : 1000.0
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ limit : 200
+ - name: cap2/udp_1518B.pcap
+ cps : 1000.0
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ limit : 200
+ - name: cap2/udp_1518B.pcap
+ cps : 1000.0
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ limit : 200
+ - name: cap2/udp_1518B.pcap
+ cps : 1000.0
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ limit : 200
+ - name: cap2/udp_1518B.pcap
+ cps : 1000.0
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ limit : 200
+ - name: cap2/udp_1518B.pcap
+ cps : 1000.0
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ limit : 200
+ - name: cap2/udp_1518B.pcap
+ cps : 1000.0
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ limit : 200
+
diff --git a/scripts/cap2/imix_64.yaml b/scripts/cap2/imix_64.yaml
new file mode 100755
index 00000000..d3c92296
--- /dev/null
+++ b/scripts/cap2/imix_64.yaml
@@ -0,0 +1,70 @@
+#
+# Simple IMIX test 64B
+#
+- duration : 3
+ generator :
+ distribution : "seq"
+ clients_start : "16.0.0.1"
+ clients_end : "16.0.0.255"
+ servers_start : "48.0.0.1"
+ servers_end : "48.0.255.255"
+ clients_per_gb : 201
+ min_clients : 101
+ dual_port_mask : "1.0.0.0"
+ tcp_aging : 0
+ udp_aging : 0
+ mac : [0x0,0x0,0x0,0x1,0x0,0x00]
+#
+# the templates are duplicated in purpose , to utilized all DRAM BW and get better performance, we should do it automaticly
+# but for now it like this , you should have at least 8
+#
+ cap_info :
+ - name: cap2/udp_64B.pcap
+ cps : 1000.0
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ limit : 200
+ - name: cap2/udp_64B.pcap
+ cps : 1000.0
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ limit : 200
+ - name: cap2/udp_64B.pcap
+ cps : 1000.0
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ limit : 200
+ - name: cap2/udp_64B.pcap
+ cps : 1000.0
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ limit : 200
+ - name: cap2/udp_64B.pcap
+ cps : 1000.0
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ limit : 200
+ - name: cap2/udp_64B.pcap
+ cps : 1000.0
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ limit : 200
+ - name: cap2/udp_64B.pcap
+ cps : 1000.0
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ limit : 200
+ - name: cap2/udp_64B.pcap
+ cps : 1000.0
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ limit : 200
+
diff --git a/scripts/cap2/imix_fast_1g.yaml b/scripts/cap2/imix_fast_1g.yaml
new file mode 100755
index 00000000..12ce7c4d
--- /dev/null
+++ b/scripts/cap2/imix_fast_1g.yaml
@@ -0,0 +1,53 @@
+#
+# Simple IMIX faster test (7x64B, 5x594B, 1x1518B)
+# we duplicate the template to utilize the memory better
+- duration : 3
+ generator :
+ distribution : "seq"
+ clients_start : "16.0.0.1"
+ clients_end : "16.0.0.255"
+ servers_start : "48.0.0.1"
+ servers_end : "48.0.255.255"
+ clients_per_gb : 201
+ min_clients : 101
+ dual_port_mask : "1.0.0.0"
+ tcp_aging : 0
+ udp_aging : 0
+ mac : [0x0,0x0,0x0,0x1,0x0,0x00]
+ cap_info :
+ - name: cap2/udp_64B.pcap
+ cps : 90615
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ limit : 199
+ - name: cap2/udp_594B.pcap
+ cps : 64725
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ limit : 199
+ - name: cap2/udp_1518B.pcap
+ cps : 12945
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ limit : 199
+ - name: cap2/udp_64B.pcap
+ cps : 90615
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ limit : 199
+ - name: cap2/udp_594B.pcap
+ cps : 64725
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ limit : 199
+ - name: cap2/udp_1518B.pcap
+ cps : 12945
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ limit : 199
diff --git a/scripts/cap2/imix_fast_1g_100k_flows.yaml b/scripts/cap2/imix_fast_1g_100k_flows.yaml
new file mode 100755
index 00000000..a5fb6bc0
--- /dev/null
+++ b/scripts/cap2/imix_fast_1g_100k_flows.yaml
@@ -0,0 +1,53 @@
+#
+# Simple IMIX faster test (7x64B, 5x594B, 1x1518B)
+# we duplicate the template to utilize the memory better
+- duration : 3
+ generator :
+ distribution : "seq"
+ clients_start : "16.0.0.1"
+ clients_end : "16.0.0.255"
+ servers_start : "48.0.0.1"
+ servers_end : "48.0.255.255"
+ clients_per_gb : 201
+ min_clients : 101
+ dual_port_mask : "1.0.0.0"
+ tcp_aging : 0
+ udp_aging : 0
+ mac : [0x0,0x0,0x0,0x1,0x0,0x00]
+ cap_info :
+ - name: cap2/udp_64B.pcap
+ cps : 90615
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ limit : 16666
+ - name: cap2/udp_594B.pcap
+ cps : 64725
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ limit : 16666
+ - name: cap2/udp_1518B.pcap
+ cps : 12945
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ limit : 16667
+ - name: cap2/udp_64B.pcap
+ cps : 90615
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ limit : 16667
+ - name: cap2/udp_594B.pcap
+ cps : 64725
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ limit : 16667
+ - name: cap2/udp_1518B.pcap
+ cps : 12945
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ limit : 16667
diff --git a/scripts/cap2/ipv4_vlan.yaml b/scripts/cap2/ipv4_vlan.yaml
new file mode 100755
index 00000000..63f7db7d
--- /dev/null
+++ b/scripts/cap2/ipv4_vlan.yaml
@@ -0,0 +1,21 @@
+- duration : 10
+ generator :
+ distribution : "seq"
+ clients_start : "16.0.0.1"
+ clients_end : "16.0.1.255"
+ servers_start : "48.0.0.1"
+ servers_end : "48.0.0.255"
+ clients_per_gb : 201
+ min_clients : 101
+ dual_port_mask : "1.0.0.0"
+ tcp_aging : 1
+ udp_aging : 1
+ mac : [0x0,0x0,0x0,0x1,0x0,0x00]
+ vlan : { enable : 1 , vlan0 : 100 , vlan1 : 200 }
+ cap_info :
+ - name: cap2/udp_64B.pcap
+ cps : 10.0
+ ipg : 10000
+ rtt : 10000
+ w : 4
+ limit : 20
diff --git a/scripts/cap2/ipv6.pcap b/scripts/cap2/ipv6.pcap
new file mode 100755
index 00000000..5a3e23be
--- /dev/null
+++ b/scripts/cap2/ipv6.pcap
Binary files differ
diff --git a/scripts/cap2/ipv6.yaml b/scripts/cap2/ipv6.yaml
new file mode 100755
index 00000000..3de11f6c
--- /dev/null
+++ b/scripts/cap2/ipv6.yaml
@@ -0,0 +1,22 @@
+- duration : 10
+ generator :
+ distribution : "seq"
+ clients_start : "16.0.0.1"
+ clients_end : "16.0.1.255"
+ servers_start : "48.0.0.1"
+ servers_end : "48.0.0.255"
+ clients_per_gb : 201
+ min_clients : 101
+ dual_port_mask : "1.0.0.0"
+ tcp_aging : 1
+ udp_aging : 1
+ src_ipv6 : [0x2001,0x0232,0x1002,0x0051,0x0000,0x0000]
+ dst_ipv6 : [0x3001,0x0DB8,0x0003,0x0004,0x0000,0x0000]
+ mac : [0x0,0x0,0x0,0x1,0x0,0x00]
+ cap_info :
+ - name: cap2/ipv6.pcap
+ cps : 10.0
+ ipg : 10000
+ rtt : 10000
+ w : 4
+ limit : 20
diff --git a/scripts/cap2/ipv6_vlan.yaml b/scripts/cap2/ipv6_vlan.yaml
new file mode 100755
index 00000000..bb91a4f8
--- /dev/null
+++ b/scripts/cap2/ipv6_vlan.yaml
@@ -0,0 +1,23 @@
+- duration : 10
+ generator :
+ distribution : "seq"
+ clients_start : "16.0.0.1"
+ clients_end : "16.0.1.255"
+ servers_start : "48.0.0.1"
+ servers_end : "48.0.0.255"
+ clients_per_gb : 201
+ min_clients : 101
+ dual_port_mask : "1.0.0.0"
+ tcp_aging : 1
+ udp_aging : 1
+ src_ipv6 : [0x2001,0x0232,0x1002,0x0051,0x0000,0x0000]
+ dst_ipv6 : [0x3001,0x0DB8,0x0003,0x0004,0x0000,0x0000]
+ mac : [0x0,0x0,0x0,0x1,0x0,0x00]
+ vlan : { enable : 1 , vlan0 : 100 , vlan1 : 200 }
+ cap_info :
+ - name: cap2/ipv6.pcap
+ cps : 10.0
+ ipg : 10000
+ rtt : 10000
+ w : 4
+ limit : 20
diff --git a/scripts/cap2/lb_ex1.yaml b/scripts/cap2/lb_ex1.yaml
new file mode 100755
index 00000000..ea7c38e7
--- /dev/null
+++ b/scripts/cap2/lb_ex1.yaml
@@ -0,0 +1,26 @@
+- duration : 0.1
+ generator :
+ distribution : "seq"
+ clients_start : "10.10.10.1"
+ clients_end : "10.10.10.10"
+ servers_start : "1.1.1.1"
+ servers_end : "1.1.1.8"
+ clients_per_gb : 201
+ min_clients : 101
+ dual_port_mask : "1.0.0.0"
+ tcp_aging : 0
+ udp_aging : 0
+ mac : [0x0,0x0,0x0,0x1,0x0,0x00]
+ cap_ipg : true
+ one_app_server : false
+ cap_info :
+ - name: avl/delay_10_https_0.pcap
+ cps : 33.0
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ one_app_server : true
+ server_addr : "1.1.1.1"
+
+
+
diff --git a/scripts/cap2/limit_multi_pkt.yaml b/scripts/cap2/limit_multi_pkt.yaml
new file mode 100755
index 00000000..25e32e86
--- /dev/null
+++ b/scripts/cap2/limit_multi_pkt.yaml
@@ -0,0 +1,23 @@
+#
+# Test "limit" keyword using a pcap file that contains a flow with multiple packets
+#
+- duration : 10
+ generator :
+ distribution : "seq"
+ clients_start : "16.0.0.1"
+ clients_end : "16.0.0.255"
+ servers_start : "48.0.0.1"
+ servers_end : "48.0.0.255"
+ clients_per_gb : 201
+ min_clients : 101
+ dual_port_mask : "1.0.0.0"
+ tcp_aging : 0
+ udp_aging : 0
+ mac : [0x0,0x0,0x0,0x1,0x0,0x00]
+ cap_info :
+ - name: cap2/dns.pcap
+ cps : 15.0
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ limit : 6
diff --git a/scripts/cap2/limit_single_pkt.yaml b/scripts/cap2/limit_single_pkt.yaml
new file mode 100755
index 00000000..03802fe8
--- /dev/null
+++ b/scripts/cap2/limit_single_pkt.yaml
@@ -0,0 +1,23 @@
+#
+# Test "limit" keyword using a pcap file that contains a flow with a single packet
+#
+- duration : 10
+ generator :
+ distribution : "seq"
+ clients_start : "16.0.0.1"
+ clients_end : "16.0.0.255"
+ servers_start : "48.0.0.1"
+ servers_end : "48.0.0.255"
+ clients_per_gb : 201
+ min_clients : 101
+ dual_port_mask : "1.0.0.0"
+ tcp_aging : 0
+ udp_aging : 0
+ mac : [0x0,0x0,0x0,0x1,0x0,0x00]
+ cap_info :
+ - name: cap2/udp_64B.pcap
+ cps : 6.0
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ limit : 15
diff --git a/scripts/cap2/mail_pop.pcap b/scripts/cap2/mail_pop.pcap
new file mode 100755
index 00000000..431b05d7
--- /dev/null
+++ b/scripts/cap2/mail_pop.pcap
Binary files differ
diff --git a/scripts/cap2/nat_test.yaml b/scripts/cap2/nat_test.yaml
new file mode 100755
index 00000000..d4fdc7db
--- /dev/null
+++ b/scripts/cap2/nat_test.yaml
@@ -0,0 +1,46 @@
+- duration : 0.1
+ generator :
+ distribution : "seq"
+
+ clients_start : "16.0.0.3"
+ clients_end : "16.0.0.103"
+ servers_start : "48.0.0.3"
+ servers_end : "48.0.0.102"
+ clients_per_gb : 201
+ min_clients : 101
+ dual_port_mask : "1.0.0.0"
+ tcp_aging : 0
+ udp_aging : 0
+ mac : [0x0,0x0,0x0,0x1,0x0,0x00]
+ cap_ipg : true
+ #cap_ipg_min : 30
+ #cap_override_ipg : 200
+ wlength : 107
+ one_app_server : false
+ cap_info :
+ - name: avl/delay_10_video_call_0.pcap
+ cps : 30
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ - name: avl/delay_10_video_call_rtp_0.pcap
+ cps : 60
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ - name: avl/delay_10_https_0.pcap
+ cps : 50
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ - name: avl/delay_10_rtp_250k_0_0.pcap
+ cps : 19.82
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ - name: avl/delay_10_rtp_250k_1_0.pcap
+ cps : 19.82
+ ipg : 10000
+ rtt : 10000
+ w : 1
+
diff --git a/scripts/cap2/rtp_160k.pcap b/scripts/cap2/rtp_160k.pcap
new file mode 100755
index 00000000..4fbd5e52
--- /dev/null
+++ b/scripts/cap2/rtp_160k.pcap
Binary files differ
diff --git a/scripts/cap2/rtp_250k_rtp_only.pcap b/scripts/cap2/rtp_250k_rtp_only.pcap
new file mode 100755
index 00000000..ec96df3a
--- /dev/null
+++ b/scripts/cap2/rtp_250k_rtp_only.pcap
Binary files differ
diff --git a/scripts/cap2/rtp_250k_rtp_only_1.pcap b/scripts/cap2/rtp_250k_rtp_only_1.pcap
new file mode 100755
index 00000000..4ac299d8
--- /dev/null
+++ b/scripts/cap2/rtp_250k_rtp_only_1.pcap
Binary files differ
diff --git a/scripts/cap2/rtp_250k_rtp_only_2.pcap b/scripts/cap2/rtp_250k_rtp_only_2.pcap
new file mode 100755
index 00000000..e587bfaa
--- /dev/null
+++ b/scripts/cap2/rtp_250k_rtp_only_2.pcap
Binary files differ
diff --git a/scripts/cap2/rtsp.yaml b/scripts/cap2/rtsp.yaml
new file mode 100755
index 00000000..7a91c4e9
--- /dev/null
+++ b/scripts/cap2/rtsp.yaml
@@ -0,0 +1,24 @@
+- duration : 2.0
+ generator :
+ distribution : "seq"
+ clients_start : "16.0.0.1"
+ clients_end : "16.0.0.255"
+ servers_start : "48.0.0.1"
+ servers_end : "48.0.255.255"
+ clients_per_gb : 201
+ min_clients : 101
+ dual_port_mask : "1.0.0.0"
+ tcp_aging : 0
+ udp_aging : 0
+ #cap_ipg : true
+ #cap_ipg_min : 100
+ #cap_override_ipg : 200
+ mac : [0x00,0x00,0x00,0x01,0x00,0x00]
+ cap_info :
+ - name: cap2/rtsp_short.pcap
+ cps : 1.0
+ ipg : 10000
+ rtt : 10000
+ w : 5
+ plugin_id : 1
+
diff --git a/scripts/cap2/rtsp_full1.yaml b/scripts/cap2/rtsp_full1.yaml
new file mode 100755
index 00000000..c150f54a
--- /dev/null
+++ b/scripts/cap2/rtsp_full1.yaml
@@ -0,0 +1,17 @@
+- duration : 5.0
+ min_src_ip : 0x80f1c2c3
+ max_src_ip : 0x90f1c2c3
+ min_dst_ip : 0xa0f2c2c3
+ max_dst_ip : 0xb0f2c2c3
+ cap_ipg : true
+ #cap_ipg_min : 100
+ #cap_override_ipg : 200
+ mac : [0x00,0x00,0x00,0x01,0x00,0x00]
+ cap_info :
+ - name: avl/delay_10_rtp_250k_full.pcap
+ cps : 1.0
+ ipg : 10000
+ rtt : 10000
+ w : 5
+ plugin_id : 1
+
diff --git a/scripts/cap2/rtsp_full2.yaml b/scripts/cap2/rtsp_full2.yaml
new file mode 100755
index 00000000..eb75afec
--- /dev/null
+++ b/scripts/cap2/rtsp_full2.yaml
@@ -0,0 +1,24 @@
+- duration : 4.0
+ generator :
+ distribution : "seq"
+ clients_start : "16.0.0.1"
+ clients_end : "16.0.0.255"
+ servers_start : "48.0.0.1"
+ servers_end : "48.0.255.255"
+ clients_per_gb : 201
+ min_clients : 101
+ dual_port_mask : "1.0.0.0"
+ tcp_aging : 0
+ udp_aging : 0
+ cap_ipg : true
+ #cap_ipg_min : 100
+ #cap_override_ipg : 200
+ mac : [0x00,0x00,0x00,0x01,0x00,0x00]
+ cap_info :
+ - name: avl/delay_10_rtp_160k_full.pcap
+ cps : 1.0
+ ipg : 10000
+ rtt : 10000
+ w : 5
+ plugin_id : 1
+
diff --git a/scripts/cap2/rtsp_short.pcap b/scripts/cap2/rtsp_short.pcap
new file mode 100755
index 00000000..36d5b775
--- /dev/null
+++ b/scripts/cap2/rtsp_short.pcap
Binary files differ
diff --git a/scripts/cap2/rtsp_short1.yaml b/scripts/cap2/rtsp_short1.yaml
new file mode 100755
index 00000000..8e0fd53d
--- /dev/null
+++ b/scripts/cap2/rtsp_short1.yaml
@@ -0,0 +1,24 @@
+- duration : 1.0
+ generator :
+ distribution : "seq"
+ clients_start : "16.0.0.1"
+ clients_end : "16.0.0.255"
+ servers_start : "48.0.0.1"
+ servers_end : "48.0.0.255"
+ clients_per_gb : 201
+ min_clients : 101
+ dual_port_mask : "1.0.0.0"
+ tcp_aging : 1
+ udp_aging : 1
+ cap_ipg : true
+ #cap_ipg_min : 100
+ #cap_override_ipg : 200
+ mac : [0x00,0x00,0x00,0x01,0x00,0x00]
+ cap_info :
+ - name: cap2/delay_10_rtp_250k_short.pcap
+ cps : 1.0
+ ipg : 10000
+ rtt : 10000
+ w : 5
+ plugin_id : 1
+
diff --git a/scripts/cap2/rtsp_short1_slow.yaml b/scripts/cap2/rtsp_short1_slow.yaml
new file mode 100755
index 00000000..7a3abb59
--- /dev/null
+++ b/scripts/cap2/rtsp_short1_slow.yaml
@@ -0,0 +1,24 @@
+- duration : 1.0
+ generator :
+ distribution : "seq"
+ clients_start : "16.0.0.1"
+ clients_end : "16.0.0.255"
+ servers_start : "48.0.0.1"
+ servers_end : "48.0.0.255"
+ clients_per_gb : 201
+ min_clients : 101
+ dual_port_mask : "1.0.0.0"
+ tcp_aging : 1
+ udp_aging : 1
+ cap_ipg : true
+ #cap_ipg_min : 100
+ #cap_override_ipg : 200
+ mac : [0x00,0x00,0x00,0x01,0x00,0x00]
+ cap_info :
+ - name: cap2/delay_10_rtp_250k_short.pcap
+ cps : 0.01
+ ipg : 10000
+ rtt : 10000
+ w : 5
+ plugin_id : 1
+
diff --git a/scripts/cap2/rtsp_short2.yaml b/scripts/cap2/rtsp_short2.yaml
new file mode 100755
index 00000000..8e0fd53d
--- /dev/null
+++ b/scripts/cap2/rtsp_short2.yaml
@@ -0,0 +1,24 @@
+- duration : 1.0
+ generator :
+ distribution : "seq"
+ clients_start : "16.0.0.1"
+ clients_end : "16.0.0.255"
+ servers_start : "48.0.0.1"
+ servers_end : "48.0.0.255"
+ clients_per_gb : 201
+ min_clients : 101
+ dual_port_mask : "1.0.0.0"
+ tcp_aging : 1
+ udp_aging : 1
+ cap_ipg : true
+ #cap_ipg_min : 100
+ #cap_override_ipg : 200
+ mac : [0x00,0x00,0x00,0x01,0x00,0x00]
+ cap_info :
+ - name: cap2/delay_10_rtp_250k_short.pcap
+ cps : 1.0
+ ipg : 10000
+ rtt : 10000
+ w : 5
+ plugin_id : 1
+
diff --git a/scripts/cap2/rtsp_short3.yaml b/scripts/cap2/rtsp_short3.yaml
new file mode 100755
index 00000000..8e0fd53d
--- /dev/null
+++ b/scripts/cap2/rtsp_short3.yaml
@@ -0,0 +1,24 @@
+- duration : 1.0
+ generator :
+ distribution : "seq"
+ clients_start : "16.0.0.1"
+ clients_end : "16.0.0.255"
+ servers_start : "48.0.0.1"
+ servers_end : "48.0.0.255"
+ clients_per_gb : 201
+ min_clients : 101
+ dual_port_mask : "1.0.0.0"
+ tcp_aging : 1
+ udp_aging : 1
+ cap_ipg : true
+ #cap_ipg_min : 100
+ #cap_override_ipg : 200
+ mac : [0x00,0x00,0x00,0x01,0x00,0x00]
+ cap_info :
+ - name: cap2/delay_10_rtp_250k_short.pcap
+ cps : 1.0
+ ipg : 10000
+ rtt : 10000
+ w : 5
+ plugin_id : 1
+
diff --git a/scripts/cap2/sfr.yaml b/scripts/cap2/sfr.yaml
new file mode 100755
index 00000000..da6391d2
--- /dev/null
+++ b/scripts/cap2/sfr.yaml
@@ -0,0 +1,90 @@
+- duration : 40
+ generator :
+ distribution : "seq"
+ clients_start : "16.0.0.1"
+ clients_end : "16.0.0.255"
+ servers_start : "48.0.0.1"
+ servers_end : "48.0.255.255"
+ clients_per_gb : 201
+ min_clients : 101
+ dual_port_mask : "1.0.0.0"
+ tcp_aging : 0
+ udp_aging : 0
+ mac : [0x0,0x0,0x0,0x1,0x0,0x00]
+ cap_info :
+ - name: cap2/Oracle.pcap
+ cps : 150.0
+ ipg : 10000
+ rtt : 10000
+ w : 4
+ - name: cap2/Video_Calls.pcap
+ cps : 11.4
+ ipg : 10000
+ rtt : 10000
+ w : 4
+ - name: cap2/rtp_160k.pcap
+ cps : 3.6
+ ipg : 10000
+ rtt : 10000
+ w : 4
+ - name: cap2/rtp_250k_rtp_only_1.pcap
+ cps : 4.0
+ ipg : 10000
+ rtt : 10000
+ w : 4
+ - name: cap2/rtp_250k_rtp_only_2.pcap
+ cps : 4.0
+ ipg : 10000
+ rtt : 10000
+ w : 4
+ - name: cap2/smtp.pcap
+ cps : 34.2
+ ipg : 10000
+ rtt : 10000
+ w : 4
+ - name: cap2/Voice_calls_rtp_only.pcap
+ cps : 66.0
+ ipg : 10000
+ rtt : 10000
+ w : 4
+ - name: cap2/citrix.pcap
+ cps : 105.0
+ ipg : 10000
+ rtt : 10000
+ w : 4
+ - name: cap2/dns.pcap
+ cps : 2400.0
+ ipg : 10000
+ rtt : 10000
+ w : 4
+ - name: cap2/exchange.pcap
+ cps : 630.0
+ ipg : 10000
+ rtt : 10000
+ w : 4
+ - name: cap2/http_browsing.pcap
+ cps : 267.0
+ ipg : 10000
+ rtt : 10000
+ w : 4
+ - name: cap2/http_get.pcap
+ cps : 345.0
+ ipg : 10000
+ rtt : 10000
+ w : 4
+ - name: cap2/http_post.pcap
+ cps : 345.0
+ ipg : 10000
+ rtt : 10000
+ w : 4
+ - name: cap2/https.pcap
+ cps : 111.0
+ ipg : 10000
+ rtt : 10000
+ w : 4
+ - name: cap2/mail_pop.pcap
+ cps : 34.2
+ ipg : 10000
+ rtt : 10000
+ w : 4
+
diff --git a/scripts/cap2/sfr2.yaml b/scripts/cap2/sfr2.yaml
new file mode 100755
index 00000000..910e36bf
--- /dev/null
+++ b/scripts/cap2/sfr2.yaml
@@ -0,0 +1,30 @@
+- duration : 10.0
+ generator :
+ distribution : "seq"
+ clients_start : "16.0.0.1"
+ clients_end : "16.0.0.255"
+ servers_start : "48.0.0.1"
+ servers_end : "48.0.0.255"
+ clients_per_gb : 201
+ min_clients : 101
+ dual_port_mask : "1.0.0.0"
+ tcp_aging : 1
+ udp_aging : 1
+ mac : [0x0,0x0,0x0,0x1,0x0,0x00]
+ cap_info :
+ - name: cap2/Oracle.pcap
+ cps : 1.0
+ ipg : 10
+ rtt : 10
+ w : 4
+ - name: cap2/smtp.pcap
+ cps : 1.2
+ ipg : 10000
+ rtt : 10000
+ w : 4
+ - name: cap2/dns.pcap
+ cps : 1.0
+ ipg : 10000
+ rtt : 10000
+ w : 4
+
diff --git a/scripts/cap2/sfr3.yaml b/scripts/cap2/sfr3.yaml
new file mode 100755
index 00000000..bb60fe9d
--- /dev/null
+++ b/scripts/cap2/sfr3.yaml
@@ -0,0 +1,90 @@
+- duration : 1.0
+ generator :
+ distribution : "seq"
+ clients_start : "16.0.0.1"
+ clients_end : "16.0.0.255"
+ servers_start : "48.0.0.1"
+ servers_end : "48.0.255.255"
+ clients_per_gb : 201
+ min_clients : 101
+ dual_port_mask : "1.0.0.0"
+ tcp_aging : 0
+ udp_aging : 0
+ mac : [0x0,0x0,0x0,0x1,0x0,0x00]
+ cap_info :
+ - name: cap2/Oracle.pcap
+ cps : 0.5
+ ipg : 10000
+ rtt : 10000
+ w : 4
+ - name: cap2/Video_Calls.pcap
+ cps : 1.4
+ ipg : 10000
+ rtt : 10000
+ w : 4
+ - name: cap2/rtp_160k.pcap
+ cps : 2.6
+ ipg : 10000
+ rtt : 10000
+ w : 4
+ - name: cap2/rtp_250k_rtp_only_1.pcap
+ cps : 3.0
+ ipg : 10000
+ rtt : 10000
+ w : 4
+ - name: cap2/rtp_250k_rtp_only_2.pcap
+ cps : 4.0
+ ipg : 10000
+ rtt : 10000
+ w : 4
+ - name: cap2/smtp.pcap
+ cps : 1.2
+ ipg : 10000
+ rtt : 10000
+ w : 4
+ - name: cap2/Voice_calls_rtp_only.pcap
+ cps : 1.0
+ ipg : 10000
+ rtt : 10000
+ w : 4
+ - name: cap2/citrix.pcap
+ cps : 0.1
+ ipg : 10000
+ rtt : 10000
+ w : 4
+ - name: cap2/dns.pcap
+ cps : 0.2
+ ipg : 10000
+ rtt : 10000
+ w : 4
+ - name: cap2/exchange.pcap
+ cps : 0.3
+ ipg : 10000
+ rtt : 10000
+ w : 4
+ - name: cap2/http_browsing.pcap
+ cps : 0.4
+ ipg : 10000
+ rtt : 10000
+ w : 4
+ - name: cap2/http_get.pcap
+ cps : 0.5
+ ipg : 10000
+ rtt : 10000
+ w : 4
+ - name: cap2/http_post.pcap
+ cps : 0.6
+ ipg : 10000
+ rtt : 10000
+ w : 4
+ - name: cap2/https.pcap
+ cps : 0.7
+ ipg : 10000
+ rtt : 10000
+ w : 4
+ - name: cap2/mail_pop.pcap
+ cps : 0.8
+ ipg : 10000
+ rtt : 10000
+ w : 4
+
diff --git a/scripts/cap2/sfr4.yaml b/scripts/cap2/sfr4.yaml
new file mode 100755
index 00000000..49d7e9b3
--- /dev/null
+++ b/scripts/cap2/sfr4.yaml
@@ -0,0 +1,20 @@
+- duration : 1.0
+ generator :
+ distribution : "seq"
+ clients_start : "16.0.0.1"
+ clients_end : "16.0.0.255"
+ servers_start : "48.0.0.1"
+ servers_end : "48.0.255.255"
+ clients_per_gb : 201
+ min_clients : 101
+ dual_port_mask : "1.0.0.0"
+ tcp_aging : 0
+ udp_aging : 0
+ mac : [0x0,0x0,0x0,0x1,0x0,0x00]
+ cap_info :
+ - name: cap2/http_get.pcap
+ cps : 1.0
+ ipg : 100
+ rtt : 10000
+ w : 1
+
diff --git a/scripts/cap2/sfr_agg_tcp14_udp11_http200msec_new_high_new_nir_profile.yaml b/scripts/cap2/sfr_agg_tcp14_udp11_http200msec_new_high_new_nir_profile.yaml
new file mode 100755
index 00000000..b36771c4
--- /dev/null
+++ b/scripts/cap2/sfr_agg_tcp14_udp11_http200msec_new_high_new_nir_profile.yaml
@@ -0,0 +1,83 @@
+- duration : 1800
+ min_src_ip : 0x10000001
+ max_src_ip : 0x90000001
+ min_dst_ip : 0xa0000001
+ max_dst_ip : 0xb0000001
+ mac : [0x0,0x0,0x0,0x1,0x0,0x00]
+ cap_info :
+ - name: cap2/Oracle.pcap
+ cps : 60
+ ipg : 10000
+ rtt : 10000
+ w : 4
+ - name: cap2/Video_Calls.pcap
+ cps : 9.6
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ - name: cap2/rtp_160k.pcap
+ cps : 2.2
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ - name: cap2/rtp_250k_rtp_only_1.pcap
+ cps : 2.2
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ - name: cap2/rtp_250k_rtp_only_2.pcap
+ cps : 2.2
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ - name: cap2/smtp.pcap
+ cps : 6.25
+ ipg : 10000
+ rtt : 10000
+ w : 4
+ - name: cap2/Voice_calls_rtp_only.pcap
+ cps : 35.4
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ - name: cap2/citrix.pcap
+ cps : 42
+ ipg : 10000
+ rtt : 10000
+ w : 4
+ - name: cap2/dns.pcap
+ cps : 1500
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ - name: cap2/exchange.pcap
+ cps : 372
+ ipg : 10000
+ rtt : 10000
+ w : 4
+ - name: cap2/http_browsing.pcap
+ cps : 770
+ ipg : 200000
+ rtt : 200000
+ w : 4
+ - name: cap2/http_get.pcap
+ cps : 430
+ ipg : 200000
+ rtt : 200000
+ w : 4
+ - name: cap2/http_post.pcap
+ cps : 430
+ ipg : 200000
+ rtt : 200000
+ w : 4
+ - name: cap2/https.pcap
+ cps : 70
+ ipg : 200000
+ rtt : 200000
+ w : 4
+ - name: cap2/mail_pop.pcap
+ cps : 37.5
+ ipg : 10000
+ rtt : 10000
+ w : 4
+
diff --git a/scripts/cap2/sfr_agg_tcp14_udp11_http200msec_new_high_new_nir_profile_ipg_mix.yaml b/scripts/cap2/sfr_agg_tcp14_udp11_http200msec_new_high_new_nir_profile_ipg_mix.yaml
new file mode 100755
index 00000000..2d8d9ce8
--- /dev/null
+++ b/scripts/cap2/sfr_agg_tcp14_udp11_http200msec_new_high_new_nir_profile_ipg_mix.yaml
@@ -0,0 +1,240 @@
+- duration : 1800
+ generator :
+ distribution : "seq"
+ clients_start : "16.0.0.1"
+ clients_end : "16.0.0.255"
+ servers_start : "48.0.0.1"
+ servers_end : "48.0.255.255"
+ clients_per_gb : 201
+ min_clients : 101
+ dual_port_mask : "1.0.0.0"
+ tcp_aging : 0
+ udp_aging : 0
+ mac : [0x0,0x0,0x0,0x1,0x0,0x00]
+ cap_info :
+ - name: cap2/Oracle.pcap
+ cps : 21.6
+ ipg : 12
+ rtt : 12
+ w : 4
+ - name: cap2/Oracle.pcap
+ cps : 4.8
+ ipg : 200
+ rtt : 200
+ w : 4
+ - name: cap2/Oracle.pcap
+ cps : 33.6
+ ipg : 1000
+ rtt : 1000
+ w : 4
+ - name: cap2/Video_Calls.pcap
+ cps : 3.456
+ ipg : 12
+ rtt : 12
+ w : 1
+ - name: cap2/Video_Calls.pcap
+ cps : 0.768
+ ipg : 200
+ rtt : 200
+ w : 1
+ - name: cap2/Video_Calls.pcap
+ cps : 5.376
+ ipg : 1000
+ rtt : 1000
+ w : 1
+ - name: cap2/rtp_160k.pcap
+ cps : 0.792
+ ipg : 12
+ rtt : 12
+ w : 1
+ - name: cap2/rtp_160k.pcap
+ cps : 0.176
+ ipg : 200
+ rtt : 200
+ w : 1
+ - name: cap2/rtp_160k.pcap
+ cps : 1.232
+ ipg : 1000
+ rtt : 1000
+ w : 1
+ - name: cap2/rtp_250k_rtp_only_1.pcap
+ cps : 0.792
+ ipg : 12
+ rtt : 12
+ w : 1
+ - name: cap2/rtp_250k_rtp_only_1.pcap
+ cps : 0.176
+ ipg : 200
+ rtt : 200
+ w : 1
+ - name: cap2/rtp_250k_rtp_only_1.pcap
+ cps : 1.232
+ ipg : 1000
+ rtt : 1000
+ w : 1
+ - name: cap2/rtp_250k_rtp_only_2.pcap
+ cps : 0.792
+ ipg : 12
+ rtt : 12
+ w : 1
+ - name: cap2/rtp_250k_rtp_only_2.pcap
+ cps : 0.176
+ ipg : 200
+ rtt : 200
+ w : 1
+ - name: cap2/rtp_250k_rtp_only_2.pcap
+ cps : 1.232
+ ipg : 1000
+ rtt : 1000
+ w : 1
+ - name: cap2/smtp.pcap
+ cps : 2.25
+ ipg : 12
+ rtt : 12
+ w : 4
+ - name: cap2/smtp.pcap
+ cps : 0.5
+ ipg : 200
+ rtt : 200
+ w : 4
+ - name: cap2/smtp.pcap
+ cps : 3.5
+ ipg : 1000
+ rtt : 1000
+ w : 4
+ - name: cap2/Voice_calls_rtp_only.pcap
+ cps : 12.744
+ ipg : 12
+ rtt : 12
+ w : 1
+ - name: cap2/Voice_calls_rtp_only.pcap
+ cps : 2.832
+ ipg : 200
+ rtt : 200
+ w : 1
+ - name: cap2/Voice_calls_rtp_only.pcap
+ cps : 19.824
+ ipg : 1000
+ rtt : 1000
+ w : 1
+ - name: cap2/citrix.pcap
+ cps : 15.12
+ ipg : 12
+ rtt : 12
+ w : 4
+ - name: cap2/citrix.pcap
+ cps : 3.36
+ ipg : 200
+ rtt : 200
+ w : 4
+ - name: cap2/citrix.pcap
+ cps : 23.52
+ ipg : 1000
+ rtt : 1000
+ w : 4
+ - name: cap2/dns.pcap
+ cps : 540
+ ipg : 12
+ rtt : 12
+ w : 1
+ - name: cap2/dns.pcap
+ cps : 120
+ ipg : 200
+ rtt : 200
+ w : 1
+ - name: cap2/dns.pcap
+ cps : 840
+ ipg : 1000
+ rtt : 1000
+ w : 1
+ - name: cap2/exchange.pcap
+ cps : 133.92
+ ipg : 12
+ rtt : 12
+ w : 4
+ - name: cap2/exchange.pcap
+ cps : 29.76
+ ipg : 200
+ rtt : 200
+ w : 4
+ - name: cap2/exchange.pcap
+ cps : 208.32
+ ipg : 1000
+ rtt : 1000
+ w : 4
+ - name: cap2/http_browsing.pcap
+ cps : 277.2
+ ipg : 12
+ rtt : 12
+ w : 4
+ - name: cap2/http_browsing.pcap
+ cps : 61.6
+ ipg : 200
+ rtt : 200
+ w : 4
+ - name: cap2/http_browsing.pcap
+ cps : 431.2
+ ipg : 200000
+ rtt : 200000
+ w : 4
+ - name: cap2/http_get.pcap
+ cps : 154.8
+ ipg : 12
+ rtt : 12
+ w : 4
+ - name: cap2/http_get.pcap
+ cps : 34.4
+ ipg : 200
+ rtt : 200
+ w : 4
+ - name: cap2/http_get.pcap
+ cps : 240.8
+ ipg : 200000
+ rtt : 200000
+ w : 4
+ - name: cap2/http_post.pcap
+ cps : 154.8
+ ipg : 12
+ rtt : 12
+ w : 4
+ - name: cap2/http_post.pcap
+ cps : 34.4
+ ipg : 200
+ rtt : 200
+ w : 4
+ - name: cap2/http_post.pcap
+ cps : 240.8
+ ipg : 200000
+ rtt : 200000
+ w : 4
+ - name: cap2/https.pcap
+ cps : 25.2
+ ipg : 12
+ rtt : 12
+ w : 4
+ - name: cap2/https.pcap
+ cps : 5.6
+ ipg : 200
+ rtt : 200
+ w : 4
+ - name: cap2/https.pcap
+ cps : 39.2
+ ipg : 200000
+ rtt : 200000
+ w : 4
+ - name: cap2/mail_pop.pcap
+ cps : 13.5
+ ipg : 12
+ rtt : 12
+ w : 4
+ - name: cap2/mail_pop.pcap
+ cps : 3
+ ipg : 200
+ rtt : 200
+ w : 4
+ - name: cap2/mail_pop.pcap
+ cps : 21
+ ipg : 1000
+ rtt : 1000
+ w : 4
+
diff --git a/scripts/cap2/short_tcp.yaml b/scripts/cap2/short_tcp.yaml
new file mode 100755
index 00000000..2c58a002
--- /dev/null
+++ b/scripts/cap2/short_tcp.yaml
@@ -0,0 +1,20 @@
+- duration : 40
+ generator :
+ distribution : "seq"
+ clients_start : "16.0.0.1"
+ clients_end : "16.0.0.255"
+ servers_start : "48.0.0.1"
+ servers_end : "48.0.255.255"
+ clients_per_gb : 201
+ min_clients : 101
+ dual_port_mask : "1.0.0.0"
+ tcp_aging : 0
+ udp_aging : 0
+ mac : [0x0,0x0,0x0,0x1,0x0,0x00]
+ cap_info :
+ - name: avl/delay_10_smtp_0.pcap
+ cps : 1.0
+ ipg : 10000
+ rtt : 10000
+ w : 1
+
diff --git a/scripts/cap2/sip_short1.yaml b/scripts/cap2/sip_short1.yaml
new file mode 100755
index 00000000..e644bf56
--- /dev/null
+++ b/scripts/cap2/sip_short1.yaml
@@ -0,0 +1,24 @@
+- duration : 1.0
+ generator :
+ distribution : "seq"
+ clients_start : "16.0.0.1"
+ clients_end : "16.0.0.255"
+ servers_start : "48.0.0.1"
+ servers_end : "48.0.255.255"
+ clients_per_gb : 201
+ min_clients : 101
+ dual_port_mask : "1.0.0.0"
+ tcp_aging : 0
+ udp_aging : 0
+ cap_ipg : true
+ #cap_ipg_min : 100
+ #cap_override_ipg : 200
+ mac : [0x00,0x00,0x00,0x01,0x00,0x00]
+ cap_info :
+ - name: avl/delay_10_sip_video_call_short.pcap
+ cps : 1.0
+ ipg : 10000
+ rtt : 10000
+ w : 5
+ plugin_id : 2
+
diff --git a/scripts/cap2/sip_short2.yaml b/scripts/cap2/sip_short2.yaml
new file mode 100755
index 00000000..e644bf56
--- /dev/null
+++ b/scripts/cap2/sip_short2.yaml
@@ -0,0 +1,24 @@
+- duration : 1.0
+ generator :
+ distribution : "seq"
+ clients_start : "16.0.0.1"
+ clients_end : "16.0.0.255"
+ servers_start : "48.0.0.1"
+ servers_end : "48.0.255.255"
+ clients_per_gb : 201
+ min_clients : 101
+ dual_port_mask : "1.0.0.0"
+ tcp_aging : 0
+ udp_aging : 0
+ cap_ipg : true
+ #cap_ipg_min : 100
+ #cap_override_ipg : 200
+ mac : [0x00,0x00,0x00,0x01,0x00,0x00]
+ cap_info :
+ - name: avl/delay_10_sip_video_call_short.pcap
+ cps : 1.0
+ ipg : 10000
+ rtt : 10000
+ w : 5
+ plugin_id : 2
+
diff --git a/scripts/cap2/smtp.pcap b/scripts/cap2/smtp.pcap
new file mode 100755
index 00000000..2cb9d893
--- /dev/null
+++ b/scripts/cap2/smtp.pcap
Binary files differ
diff --git a/scripts/cap2/test_mac.yaml b/scripts/cap2/test_mac.yaml
new file mode 100755
index 00000000..8ae7eaea
--- /dev/null
+++ b/scripts/cap2/test_mac.yaml
@@ -0,0 +1,9 @@
+- min_ip : "1.1.1.1"
+ items :
+ - ip : "1.1.1.1"
+ mac : [0x2,0x0,0x0,0x1,0x0,0x0]
+
+ - ip : "1.1.1.2"
+ mac : [0x3,0x0,0x0,0x1,0x0,0x0]
+ - ip : "1.1.1.103"
+ mac : [0x3,0x0,0x0,0x1,0x0,0x0]
diff --git a/scripts/cap2/test_pcap_mode1.yaml b/scripts/cap2/test_pcap_mode1.yaml
new file mode 100755
index 00000000..3af7b088
--- /dev/null
+++ b/scripts/cap2/test_pcap_mode1.yaml
@@ -0,0 +1,24 @@
+- duration : 0.5
+ generator :
+ distribution : "seq"
+ clients_start : "16.0.0.1"
+ clients_end : "16.0.0.255"
+ servers_start : "48.0.0.1"
+ servers_end : "48.0.255.255"
+ clients_per_gb : 201
+ min_clients : 101
+ dual_port_mask : "1.0.0.0"
+ tcp_aging : 0
+ udp_aging : 0
+ cap_ipg : true
+ #cap_ipg_min : 30
+ #cap_override_ipg : 200
+ mac : [0x00,0x00,0x00,0x01,0x00,0x00]
+ cap_info :
+ - name: avl/citrix_0.pcap
+ cps : 1.0
+ ipg : 10000
+ rtt : 10000
+ w : 5
+
+
diff --git a/scripts/cap2/test_pcap_mode2.yaml b/scripts/cap2/test_pcap_mode2.yaml
new file mode 100755
index 00000000..6bb6ba3a
--- /dev/null
+++ b/scripts/cap2/test_pcap_mode2.yaml
@@ -0,0 +1,24 @@
+- duration : 10.0
+ generator :
+ distribution : "seq"
+ clients_start : "16.0.0.1"
+ clients_end : "16.0.0.255"
+ servers_start : "48.0.0.1"
+ servers_end : "48.0.255.255"
+ clients_per_gb : 201
+ min_clients : 101
+ dual_port_mask : "1.0.0.0"
+ tcp_aging : 0
+ udp_aging : 0
+ cap_ipg : true
+ cap_ipg_min : 30
+ cap_override_ipg : 200
+ mac : [0x00,0x00,0x00,0x01,0x00,0x00]
+ cap_info :
+ - name: avl/citrix_0.pcap
+ cps : 1.0
+ ipg : 10000
+ rtt : 10000
+ w : 5
+
+
diff --git a/scripts/cap2/tuple_gen.yaml b/scripts/cap2/tuple_gen.yaml
new file mode 100755
index 00000000..873d9336
--- /dev/null
+++ b/scripts/cap2/tuple_gen.yaml
@@ -0,0 +1,10 @@
+- distribution : "seq"
+ clients_start : "10.0.0.0"
+ clients_end : "20.0.0.0"
+ servers_start : "30.0.0.0"
+ servers_end : "40.0.0.0"
+ clients_per_gb : 201
+ min_clients : 101
+ dual_port_mask : "1.0.0.0"
+
+
diff --git a/scripts/cap2/udp_1518B.pcap b/scripts/cap2/udp_1518B.pcap
new file mode 100755
index 00000000..f4f7af15
--- /dev/null
+++ b/scripts/cap2/udp_1518B.pcap
Binary files differ
diff --git a/scripts/cap2/udp_594B.pcap b/scripts/cap2/udp_594B.pcap
new file mode 100755
index 00000000..6c94cbc3
--- /dev/null
+++ b/scripts/cap2/udp_594B.pcap
Binary files differ
diff --git a/scripts/cap2/udp_64B.pcap b/scripts/cap2/udp_64B.pcap
new file mode 100755
index 00000000..699b9c80
--- /dev/null
+++ b/scripts/cap2/udp_64B.pcap
Binary files differ
diff --git a/scripts/cfg/cfg_example1.yaml b/scripts/cfg/cfg_example1.yaml
new file mode 100755
index 00000000..bfd7fd88
--- /dev/null
+++ b/scripts/cfg/cfg_example1.yaml
@@ -0,0 +1,27 @@
+- port_limit : 2 # this option can limit the number of port of the platform
+ version : 2
+ interfaces : ["03:00.0","03:00.1"] #
+ interface_mask : [ "0000:11:00.00", "0000:11:00.01" ] # deprecated
+ scan_only_1g : true
+ enable_zmq_pub : true # enable publisher for stats data
+ zmq_pub_port : 4507
+ telnet_port : 4508 # the telnet port in case it is enable ( with intercative mode )
+ port_info : # set eh mac addr
+ - dest_mac : [0x1,0x0,0x0,0x1,0x0,0x00] # port 0
+ src_mac : [0x2,0x0,0x0,0x2,0x0,0x00]
+ - dest_mac : [0x3,0x0,0x0,0x3,0x0,0x00] # port 1
+ src_mac : [0x4,0x0,0x0,0x4,0x0,0x00]
+ - dest_mac : [0x5,0x0,0x0,0x5,0x0,0x00] # port 2
+ src_mac : [0x6,0x0,0x0,0x6,0x0,0x00]
+ - dest_mac : [0x7,0x0,0x0,0x7,0x0,0x01] # port 3
+ src_mac : [0x0,0x0,0x0,0x8,0x0,0x02]
+ - dest_mac : [0x0,0x0,0x0,0x9,0x0,0x03] # port 4
+ src_mac : [0x0,0x0,0x0,0xa,0x0,0x04]
+ - dest_mac : [0x0,0x0,0x0,0xb,0x0,0x05] # port 5
+ src_mac : [0x0,0x0,0x0,0xc,0x0,0x06]
+ - dest_mac : [0x0,0x0,0x0,0xd,0x0,0x07] # port 6
+ src_mac : [0x0,0x0,0x0,0xa,0x0,0x08]
+ - dest_mac : [0x0,0x0,0x0,0xb,0x0,0x09] # port 7
+ src_mac : [0x0,0x0,0x0,0xc,0x0,0x0a]
+
+
diff --git a/scripts/cfg/cfg_example2.yaml b/scripts/cfg/cfg_example2.yaml
new file mode 100755
index 00000000..a381fb2f
--- /dev/null
+++ b/scripts/cfg/cfg_example2.yaml
@@ -0,0 +1,26 @@
+- port_limit : 2 # this option can limit the number of port of the platform
+ version : 2
+ interfaces : ["03:00.0","03:00.1","13:00.1","13:00.0"] # list of the interfaces to bind run ./dpdk_nic_bind.py --status
+ interface_mask : [ "0000:22:00.00", "0000:22:00.01" ] # deprecated
+ enable_zmq_pub : false # enable publisher for stats data
+ zmq_pub_port : 4500
+ telnet_port : 4501 # the telnet port in case it is enable ( with intercative mode )
+ port_info : # set eh mac addr
+ - dest_mac : [0x1,0x0,0x0,0x1,0x0,0x00] # port 0
+ src_mac : [0x2,0x0,0x0,0x2,0x0,0x00]
+ - dest_mac : [0x3,0x0,0x0,0x3,0x0,0x00] # port 1
+ src_mac : [0x4,0x0,0x0,0x4,0x0,0x00]
+ - dest_mac : [0x5,0x0,0x0,0x5,0x0,0x00] # port 2
+ src_mac : [0x6,0x0,0x0,0x6,0x0,0x00]
+ - dest_mac : [0x7,0x0,0x0,0x7,0x0,0x01] # port 3
+ src_mac : [0x0,0x0,0x0,0x8,0x0,0x02]
+ - dest_mac : [0x0,0x0,0x0,0x9,0x0,0x03] # port 4
+ src_mac : [0x0,0x0,0x0,0xa,0x0,0x04]
+ - dest_mac : [0x0,0x0,0x0,0xb,0x0,0x05] # port 5
+ src_mac : [0x0,0x0,0x0,0xc,0x0,0x06]
+ - dest_mac : [0x0,0x0,0x0,0xd,0x0,0x07] # port 6
+ src_mac : [0x0,0x0,0x0,0xa,0x0,0x08]
+ - dest_mac : [0x0,0x0,0x0,0xb,0x0,0x09] # port 7
+ src_mac : [0x0,0x0,0x0,0xc,0x0,0x0a]
+
+
diff --git a/scripts/cfg/ins1.yaml b/scripts/cfg/ins1.yaml
new file mode 100755
index 00000000..49278db4
--- /dev/null
+++ b/scripts/cfg/ins1.yaml
@@ -0,0 +1,25 @@
+- version : 2
+ interfaces : ["03:00.0","03:00.1"]
+ port_limit : 2
+ enable_zmq_pub : true # enable publisher for stats data
+ zmq_pub_port : 4507
+ prefix : setup1
+ limit_memory : 1024
+ c : 4
+# for system of 1Gb/sec NIC or VM enable this
+ port_bandwidth_gb : 10 # port bandwidth 10Gb/sec , for VM put here 1 for XL710 put 40
+ platform :
+ master_thread_id : 0
+ latency_thread_id : 5
+ dual_if :
+ - socket : 0
+ threads : [1,2,3,4]
+
+
+
+
+
+
+
+
+
diff --git a/scripts/cfg/ins2.yaml b/scripts/cfg/ins2.yaml
new file mode 100755
index 00000000..3a010750
--- /dev/null
+++ b/scripts/cfg/ins2.yaml
@@ -0,0 +1,25 @@
+- version : 2
+ interfaces : ["82:00.0","82:00.1"]
+ port_limit : 2
+ enable_zmq_pub : true # enable publisher for stats data
+ zmq_pub_port : 4510
+ #prefix : setup3
+ #limit_memory : 2048
+ c : 4
+# for system of 1Gb/sec NIC or VM enable this
+ port_bandwidth_gb : 10 # port bandwidth 10Gb/sec , for VM put here 1 for XL710 put 40
+ platform :
+ master_thread_id : 12
+ latency_thread_id : 13
+ dual_if :
+ - socket : 1
+ threads : [8,9,10,11]
+
+
+
+
+
+
+
+
+
diff --git a/scripts/cfg/ins3.yaml b/scripts/cfg/ins3.yaml
new file mode 100755
index 00000000..b03f7428
--- /dev/null
+++ b/scripts/cfg/ins3.yaml
@@ -0,0 +1,27 @@
+- version : 2
+ interfaces : ["03:00.0","03:00.1","82:00.0","82:00.1"]
+ port_limit : 4
+ enable_zmq_pub : true # enable publisher for stats data
+ zmq_pub_port : 4500
+ #prefix : setup3
+ #limit_memory : 2048
+ c : 4
+# for system of 1Gb/sec NIC or VM enable this
+ port_bandwidth_gb : 10 # port bandwidth 10Gb/sec , for VM put here 1 for XL710 put 40
+ platform :
+ master_thread_id : 0
+ latency_thread_id : 5
+ dual_if :
+ - socket : 1
+ threads : [1,2,3,4]
+ - socket : 0
+ threads : [8,9,10,11]
+
+
+
+
+
+
+
+
+
diff --git a/scripts/cfg/ucs_h0.yaml b/scripts/cfg/ucs_h0.yaml
new file mode 100755
index 00000000..3b620e03
--- /dev/null
+++ b/scripts/cfg/ucs_h0.yaml
@@ -0,0 +1,9 @@
+- port_limit : 2
+ version : 2
+ interfaces : ["03:00.0","03:00.1","13:00.1","13:00.0"] # list of the interfaces to bind run ./dpdk_nic_bind.py --status
+ interface_mask : [ "0000:11:00.00", "0000:11:00.01" ,"0000:0b:00.02","0000:0b:00.03"] # deprecated
+ enable_zmq_pub : true # enable publisher for stats data
+ zmq_pub_port : 4500
+ telnet_port : 4501 # the telnet port in case it is enable ( with intercative mode )
+
+
diff --git a/scripts/cfg/ucs_h1.yaml b/scripts/cfg/ucs_h1.yaml
new file mode 100755
index 00000000..cb0caf54
--- /dev/null
+++ b/scripts/cfg/ucs_h1.yaml
@@ -0,0 +1,9 @@
+- port_limit : 2
+ version : 2
+ interfaces : ["03:00.0","03:00.1","13:00.1","13:00.0"] # list of the interfaces to bind run ./dpdk_nic_bind.py --status
+ interface_mask : [ "0000:11:00.00", "0000:11:00.01" ,"0000:0b:00.00","0000:0b:00.01"] # deprecated
+ enable_zmq_pub : true # enable publisher for stats data
+ zmq_pub_port : 4510
+ telnet_port : 4511 # the telnet port in case it is enable ( with intercative mode )
+
+
diff --git a/scripts/cfg/xl710.yaml b/scripts/cfg/xl710.yaml
new file mode 100755
index 00000000..ed66078a
--- /dev/null
+++ b/scripts/cfg/xl710.yaml
@@ -0,0 +1,29 @@
+- version : 2
+ interfaces : ["08:00.0","08:00.1"]
+ port_limit : 2
+ enable_zmq_pub : true # enable publisher for stats data
+ zmq_pub_port : 4507
+ c : 4
+# for system of 1Gb/sec NIC or VM enable this
+ port_bandwidth_gb : 40 # port bandwidth 10Gb/sec , for VM put here 1 for XL710 put 40
+ platform :
+ master_thread_id : 0
+ latency_thread_id : 5
+ dual_if :
+ - socket : 0
+ threads : [1,2,3,4,6,7]
+ port_info : # set eh mac addr
+ - dest_mac : [0x00,0x0c,0x29,0x55,0x37,0xc7] # port 0
+ src_mac : [0x00,0x0c,0x29,0x55,0x37,0xbd]
+ - dest_mac : [0x00,0x0c,0x29,0x55,0x37,0xbd] # port 1
+ src_mac : [0x00,0x0c,0x29,0x55,0x37,0xc7]
+
+
+
+
+
+
+
+
+
+
diff --git a/scripts/daemon_server b/scripts/daemon_server
new file mode 100755
index 00000000..90fc614d
--- /dev/null
+++ b/scripts/daemon_server
@@ -0,0 +1,27 @@
+#!/usr/bin/python
+
+import os
+import sys
+
+core = 0
+
+if '--core' in sys.argv:
+ try:
+ idx = sys.argv.index('--core')
+ core = int(sys.argv[idx + 1])
+ if core > 31 or core < 0:
+ print "Error: please provide core argument between 0 to 31"
+ exit(-1)
+ del sys.argv[idx:idx+2]
+ except IndexError:
+ print "Error: please make sure core option provided with argument"
+ exit(-1)
+ except ValueError:
+ print "Error: please make sure core option provided with integer argument"
+ exit(-1)
+
+str_argv = ' '.join(sys.argv[1:])
+cmd = "taskset -c {core} python automation/trex_control_plane/server/trex_daemon_server.py {argv}".format(core = core, argv = str_argv)
+os.system(cmd)
+
+
diff --git a/scripts/doc_process.py b/scripts/doc_process.py
new file mode 100755
index 00000000..d371cc02
--- /dev/null
+++ b/scripts/doc_process.py
@@ -0,0 +1,109 @@
+import sys,os
+import re
+import argparse
+
+
+class doc_driver(object):
+ args=None;
+
+def str_hex_to_ip(ip_hex):
+ bytes = ["".join(x) for x in zip(*[iter(ip_hex)]*2)]
+ bytes = [int(x, 16) for x in bytes]
+ s=".".join(str(x) for x in bytes)
+ return s
+
+class CTemplateFile:
+ def __init__ (self):
+ self.l=[];
+
+ def dump (self):
+ print "pkt,time sec,template,fid,flow-pkt-id,client_ip,client_port,server_ip ,desc"
+ for obj in self.l:
+ s='';
+ if obj[7]=='1':
+ s ='->'
+ else:
+ s ='<-'
+
+ print obj[0],",",obj[1],",",obj[6],",", obj[2],",",obj[4],",",str_hex_to_ip(obj[11]),",",obj[13],",",str_hex_to_ip(obj[12]),",",s
+
+ def dump_sj (self):
+ flows=0;
+ print "["
+ for obj in self.l:
+ print "[",
+ print obj[1],",",int("0x"+obj[2],16),",",obj[6],",",obj[4],
+ print "],"
+
+ print "]"
+
+
+
+ def load_file (self,file):
+ f=open(file,'r');
+ x=f.readlines()
+ f.close();
+ found=False;
+ for line in x:
+ if found :
+ line=line.rstrip('\n');
+ info=line.split(',');
+ if len(info)>5:
+ self.l+=[info];
+ else:
+ break;
+ else:
+ if 'pkt_id' in line:
+ found=True;
+
+
+
+
+def do_main ():
+ temp = CTemplateFile();
+ temp.load_file(doc_driver.args.file)
+ temp.dump();
+ temp.dump_sj();
+
+
+def process_options ():
+ parser = argparse.ArgumentParser(usage="""
+ doc_process -f <debug_output>
+ """,
+ description="process bp-sim output for docomentation",
+ epilog=" written by hhaim");
+
+ parser.add_argument("-f", "--file", dest="file",
+ metavar="file",
+ required=True)
+
+ parser.add_argument('--version', action='version',
+ version="0.1" )
+
+ doc_driver.args = parser.parse_args();
+
+
+
+def main ():
+ try:
+ process_options ();
+ do_main ()
+ exit(0);
+ except Exception, e:
+ print str(e);
+ exit(-1);
+
+
+
+if __name__ == "__main__":
+ main()
+
+
+
+
+
+
+
+
+
+
diff --git a/scripts/dpdk_nic_bind.py b/scripts/dpdk_nic_bind.py
new file mode 100755
index 00000000..08402227
--- /dev/null
+++ b/scripts/dpdk_nic_bind.py
@@ -0,0 +1,539 @@
+#! /usr/bin/python
+#
+# BSD LICENSE
+#
+# Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+
+import sys, os, getopt, subprocess
+from os.path import exists, abspath, dirname, basename
+
+
+# The PCI device class for ETHERNET devices
+ETHERNET_CLASS = "0200"
+
+# global dict ethernet devices present. Dictionary indexed by PCI address.
+# Each device within this is itself a dictionary of device properties
+devices = {}
+# list of supported DPDK drivers
+dpdk_drivers = [ "igb_uio", "vfio-pci" ]
+
+# command-line arg flags
+b_flag = None
+status_flag = False
+force_flag = False
+args = []
+
+def usage():
+ '''Print usage information for the program'''
+ argv0 = basename(sys.argv[0])
+ print """
+Usage:
+------
+
+ %(argv0)s [options] DEVICE1 DEVICE2 ....
+
+where DEVICE1, DEVICE2 etc, are specified via PCI "domain:bus:slot.func" syntax
+or "bus:slot.func" syntax. For devices bound to Linux kernel drivers, they may
+also be referred to by Linux interface name e.g. eth0, eth1, em0, em1, etc.
+
+Options:
+ --help, --usage:
+ Display usage information and quit
+
+ --status:
+ Print the current status of all known network interfaces.
+ For each device, it displays the PCI domain, bus, slot and function,
+ along with a text description of the device. Depending upon whether the
+ device is being used by a kernel driver, the igb_uio driver, or no
+ driver, other relevant information will be displayed:
+ * the Linux interface name e.g. if=eth0
+ * the driver being used e.g. drv=igb_uio
+ * any suitable drivers not currently using that device
+ e.g. unused=igb_uio
+ NOTE: if this flag is passed along with a bind/unbind option, the status
+ display will always occur after the other operations have taken place.
+
+ -b driver, --bind=driver:
+ Select the driver to use or \"none\" to unbind the device
+
+ -u, --unbind:
+ Unbind a device (Equivalent to \"-b none\")
+
+ --force:
+ By default, devices which are used by Linux - as indicated by having
+ routes in the routing table - cannot be modified. Using the --force
+ flag overrides this behavior, allowing active links to be forcibly
+ unbound.
+ WARNING: This can lead to loss of network connection and should be used
+ with caution.
+
+Examples:
+---------
+
+To display current device status:
+ %(argv0)s --status
+
+To bind eth1 from the current driver and move to use igb_uio
+ %(argv0)s --bind=igb_uio eth1
+
+To unbind 0000:01:00.0 from using any driver
+ %(argv0)s -u 0000:01:00.0
+
+To bind 0000:02:00.0 and 0000:02:00.1 to the ixgbe kernel driver
+ %(argv0)s -b ixgbe 02:00.0 02:00.1
+
+ """ % locals() # replace items from local variables
+
+# This is roughly compatible with check_output function in subprocess module
+# which is only available in python 2.7.
+def check_output(args, stderr=None):
+ '''Run a command and capture its output'''
+ return subprocess.Popen(args, stdout=subprocess.PIPE,
+ stderr=stderr).communicate()[0]
+
+def find_module(mod):
+ '''find the .ko file for kernel module named mod.
+ Searches the $RTE_SDK/$RTE_TARGET directory, the kernel
+ modules directory and finally under the parent directory of
+ the script '''
+ # check $RTE_SDK/$RTE_TARGET directory
+ if 'RTE_SDK' in os.environ and 'RTE_TARGET' in os.environ:
+ path = "%s/%s/kmod/%s.ko" % (os.environ['RTE_SDK'],\
+ os.environ['RTE_TARGET'], mod)
+ if exists(path):
+ return path
+
+ # check using depmod
+ try:
+ depmod_out = check_output(["modinfo", "-n", mod], \
+ stderr=subprocess.STDOUT).lower()
+ if "error" not in depmod_out:
+ path = depmod_out.strip()
+ if exists(path):
+ return path
+ except: # if modinfo can't find module, it fails, so continue
+ pass
+
+ # check for a copy based off current path
+ tools_dir = dirname(abspath(sys.argv[0]))
+ if (tools_dir.endswith("tools")):
+ base_dir = dirname(tools_dir)
+ find_out = check_output(["find", base_dir, "-name", mod + ".ko"])
+ if len(find_out) > 0: #something matched
+ path = find_out.splitlines()[0]
+ if exists(path):
+ return path
+
+def check_modules():
+ '''Checks that igb_uio is loaded'''
+ global dpdk_drivers
+
+ fd = file("/proc/modules")
+ loaded_mods = fd.readlines()
+ fd.close()
+
+ # list of supported modules
+ mods = [{"Name" : driver, "Found" : False} for driver in dpdk_drivers]
+
+ # first check if module is loaded
+ for line in loaded_mods:
+ for mod in mods:
+ if line.startswith(mod["Name"]):
+ mod["Found"] = True
+ # special case for vfio_pci (module is named vfio-pci,
+ # but its .ko is named vfio_pci)
+ elif line.replace("_", "-").startswith(mod["Name"]):
+ mod["Found"] = True
+
+ # check if we have at least one loaded module
+ if True not in [mod["Found"] for mod in mods] and b_flag is not None:
+ print "Error - no supported modules are loaded"
+ sys.exit(1)
+
+ # change DPDK driver list to only contain drivers that are loaded
+ dpdk_drivers = [mod["Name"] for mod in mods if mod["Found"]]
+
+def has_driver(dev_id):
+ '''return true if a device is assigned to a driver. False otherwise'''
+ return "Driver_str" in devices[dev_id]
+
+def get_pci_device_details(dev_id):
+ '''This function gets additional details for a PCI device'''
+ device = {}
+
+ extra_info = check_output(["lspci", "-vmmks", dev_id]).splitlines()
+
+ # parse lspci details
+ for line in extra_info:
+ if len(line) == 0:
+ continue
+ name, value = line.split("\t", 1)
+ name = name.strip(":") + "_str"
+ device[name] = value
+ # check for a unix interface name
+ sys_path = "/sys/bus/pci/devices/%s/net/" % dev_id
+ if exists(sys_path):
+ device["Interface"] = ",".join(os.listdir(sys_path))
+ else:
+ device["Interface"] = ""
+ # check if a port is used for ssh connection
+ device["Ssh_if"] = False
+ device["Active"] = ""
+
+ return device
+
+def get_nic_details():
+ '''This function populates the "devices" dictionary. The keys used are
+ the pci addresses (domain:bus:slot.func). The values are themselves
+ dictionaries - one for each NIC.'''
+ global devices
+ global dpdk_drivers
+
+ # clear any old data
+ devices = {}
+ # first loop through and read details for all devices
+ # request machine readable format, with numeric IDs
+ dev = {};
+ dev_lines = check_output(["lspci", "-Dvmmn"]).splitlines()
+ for dev_line in dev_lines:
+ if (len(dev_line) == 0):
+ if dev["Class"] == ETHERNET_CLASS:
+ #convert device and vendor ids to numbers, then add to global
+ dev["Vendor"] = int(dev["Vendor"],16)
+ dev["Device"] = int(dev["Device"],16)
+ devices[dev["Slot"]] = dict(dev) # use dict to make copy of dev
+ else:
+ name, value = dev_line.split("\t", 1)
+ dev[name.rstrip(":")] = value
+
+ # check what is the interface if any for an ssh connection if
+ # any to this host, so we can mark it later.
+ ssh_if = []
+ route = check_output(["ip", "-o", "route"])
+ # filter out all lines for 169.254 routes
+ route = "\n".join(filter(lambda ln: not ln.startswith("169.254"),
+ route.splitlines()))
+ rt_info = route.split()
+ for i in xrange(len(rt_info) - 1):
+ if rt_info[i] == "dev":
+ ssh_if.append(rt_info[i+1])
+
+ # based on the basic info, get extended text details
+ for d in devices.keys():
+ # get additional info and add it to existing data
+ devices[d] = dict(devices[d].items() +
+ get_pci_device_details(d).items())
+
+ for _if in ssh_if:
+ if _if in devices[d]["Interface"].split(","):
+ devices[d]["Ssh_if"] = True
+ devices[d]["Active"] = "*Active*"
+ break;
+
+ # add igb_uio to list of supporting modules if needed
+ if "Module_str" in devices[d]:
+ for driver in dpdk_drivers:
+ if driver not in devices[d]["Module_str"]:
+ devices[d]["Module_str"] = devices[d]["Module_str"] + ",%s" % driver
+ else:
+ devices[d]["Module_str"] = ",".join(dpdk_drivers)
+
+ # make sure the driver and module strings do not have any duplicates
+ if has_driver(d):
+ modules = devices[d]["Module_str"].split(",")
+ if devices[d]["Driver_str"] in modules:
+ modules.remove(devices[d]["Driver_str"])
+ devices[d]["Module_str"] = ",".join(modules)
+
+def dev_id_from_dev_name(dev_name):
+ '''Take a device "name" - a string passed in by user to identify a NIC
+ device, and determine the device id - i.e. the domain:bus:slot.func - for
+ it, which can then be used to index into the devices array'''
+ dev = None
+ # check if it's already a suitable index
+ if dev_name in devices:
+ return dev_name
+ # check if it's an index just missing the domain part
+ elif "0000:" + dev_name in devices:
+ return "0000:" + dev_name
+ else:
+ # check if it's an interface name, e.g. eth1
+ for d in devices.keys():
+ if dev_name in devices[d]["Interface"].split(","):
+ return devices[d]["Slot"]
+ # if nothing else matches - error
+ print "Unknown device: %s. " \
+ "Please specify device in \"bus:slot.func\" format" % dev_name
+ sys.exit(1)
+
+def unbind_one(dev_id, force):
+ '''Unbind the device identified by "dev_id" from its current driver'''
+ dev = devices[dev_id]
+ if not has_driver(dev_id):
+ print "%s %s %s is not currently managed by any driver\n" % \
+ (dev["Slot"], dev["Device_str"], dev["Interface"])
+ return
+
+ # prevent us disconnecting ourselves
+ if dev["Ssh_if"] and not force:
+ print "Routing table indicates that interface %s is active" \
+ ". Skipping unbind" % (dev_id)
+ return
+
+ # write to /sys to unbind
+ filename = "/sys/bus/pci/drivers/%s/unbind" % dev["Driver_str"]
+ try:
+ f = open(filename, "a")
+ except:
+ print "Error: unbind failed for %s - Cannot open %s" % (dev_id, filename)
+ sys/exit(1)
+ f.write(dev_id)
+ f.close()
+
+def bind_one(dev_id, driver, force):
+ '''Bind the device given by "dev_id" to the driver "driver". If the device
+ is already bound to a different driver, it will be unbound first'''
+ dev = devices[dev_id]
+ saved_driver = None # used to rollback any unbind in case of failure
+
+ # prevent disconnection of our ssh session
+ if dev["Ssh_if"] and not force:
+ print "Routing table indicates that interface %s is active" \
+ ". Not modifying" % (dev_id)
+ return
+
+ # unbind any existing drivers we don't want
+ if has_driver(dev_id):
+ if dev["Driver_str"] == driver:
+ print "%s already bound to driver %s, skipping\n" % (dev_id, driver)
+ return
+ else:
+ saved_driver = dev["Driver_str"]
+ unbind_one(dev_id, force)
+ dev["Driver_str"] = "" # clear driver string
+
+ # if we are binding to one of DPDK drivers, add PCI id's to that driver
+ if driver in dpdk_drivers:
+ filename = "/sys/bus/pci/drivers/%s/new_id" % driver
+ try:
+ f = open(filename, "w")
+ except:
+ print "Error: bind failed for %s - Cannot open %s" % (dev_id, filename)
+ exit(-1)
+ return
+ try:
+ f.write("%04x %04x" % (dev["Vendor"], dev["Device"]))
+ f.close()
+ except:
+ print "Error: bind failed for %s - Cannot write new PCI ID to " \
+ "driver %s" % (dev_id, driver)
+ exit(-1)
+ return
+
+ # do the bind by writing to /sys
+ filename = "/sys/bus/pci/drivers/%s/bind" % driver
+ try:
+ f = open(filename, "a")
+ except:
+ print "Error: bind failed for %s - Cannot open %s" % (dev_id, filename)
+ if saved_driver is not None: # restore any previous driver
+ bind_one(dev_id, saved_driver, force)
+ return
+ try:
+ f.write(dev_id)
+ f.close()
+ except:
+ # for some reason, closing dev_id after adding a new PCI ID to new_id
+ # results in IOError. however, if the device was successfully bound,
+ # we don't care for any errors and can safely ignore IOError
+ tmp = get_pci_device_details(dev_id)
+ if "Driver_str" in tmp and tmp["Driver_str"] == driver:
+ return
+ print "Error: bind failed for %s - Cannot bind to driver %s" % (dev_id, driver)
+ if saved_driver is not None: # restore any previous driver
+ bind_one(dev_id, saved_driver, force)
+ return
+
+
+def unbind_all(dev_list, force=False):
+ """Unbind method, takes a list of device locations"""
+ dev_list = map(dev_id_from_dev_name, dev_list)
+ for d in dev_list:
+ unbind_one(d, force)
+
+def bind_all(dev_list, driver, force=False):
+ """Unbind method, takes a list of device locations"""
+ global devices
+
+ dev_list = map(dev_id_from_dev_name, dev_list)
+
+ for d in dev_list:
+ bind_one(d, driver, force)
+
+ # when binding devices to a generic driver (i.e. one that doesn't have a
+ # PCI ID table), some devices that are not bound to any other driver could
+ # be bound even if no one has asked them to. hence, we check the list of
+ # drivers again, and see if some of the previously-unbound devices were
+ # erroneously bound.
+ for d in devices.keys():
+ # skip devices that were already bound or that we know should be bound
+ if "Driver_str" in devices[d] or d in dev_list:
+ continue
+
+ # update information about this device
+ devices[d] = dict(devices[d].items() +
+ get_pci_device_details(d).items())
+
+ # check if updated information indicates that the device was bound
+ if "Driver_str" in devices[d]:
+ unbind_one(d, force)
+
+def display_devices(title, dev_list, extra_params = None):
+ '''Displays to the user the details of a list of devices given in "dev_list"
+ The "extra_params" parameter, if given, should contain a string with
+ %()s fields in it for replacement by the named fields in each device's
+ dictionary.'''
+ strings = [] # this holds the strings to print. We sort before printing
+ print "\n%s" % title
+ print "="*len(title)
+ if len(dev_list) == 0:
+ strings.append("<none>")
+ else:
+ for dev in dev_list:
+ if extra_params is not None:
+ strings.append("%s '%s' %s" % (dev["Slot"], \
+ dev["Device_str"], extra_params % dev))
+ else:
+ strings.append("%s '%s'" % (dev["Slot"], dev["Device_str"]))
+ # sort before printing, so that the entries appear in PCI order
+ strings.sort()
+ print "\n".join(strings) # print one per line
+
+def show_status():
+ '''Function called when the script is passed the "--status" option. Displays
+ to the user what devices are bound to the igb_uio driver, the kernel driver
+ or to no driver'''
+ global dpdk_drivers
+ kernel_drv = []
+ dpdk_drv = []
+ no_drv = []
+
+ # split our list of devices into the three categories above
+ for d in devices.keys():
+ if not has_driver(d):
+ no_drv.append(devices[d])
+ continue
+ if devices[d]["Driver_str"] in dpdk_drivers:
+ dpdk_drv.append(devices[d])
+ else:
+ kernel_drv.append(devices[d])
+
+ # print each category separately, so we can clearly see what's used by DPDK
+ display_devices("Network devices using DPDK-compatible driver", dpdk_drv, \
+ "drv=%(Driver_str)s unused=%(Module_str)s")
+ display_devices("Network devices using kernel driver", kernel_drv,
+ "if=%(Interface)s drv=%(Driver_str)s unused=%(Module_str)s %(Active)s")
+ display_devices("Other network devices", no_drv,\
+ "unused=%(Module_str)s")
+
+def parse_args():
+ '''Parses the command-line arguments given by the user and takes the
+ appropriate action for each'''
+ global b_flag
+ global status_flag
+ global force_flag
+ global args
+ if len(sys.argv) <= 1:
+ usage()
+ sys.exit(0)
+
+ try:
+ opts, args = getopt.getopt(sys.argv[1:], "b:u",
+ ["help", "usage", "status", "force",
+ "bind=", "unbind"])
+ except getopt.GetoptError, error:
+ print str(error)
+ print "Run '%s --usage' for further information" % sys.argv[0]
+ sys.exit(1)
+
+ for opt, arg in opts:
+ if opt == "--help" or opt == "--usage":
+ usage()
+ sys.exit(0)
+ if opt == "--status":
+ status_flag = True
+ if opt == "--force":
+ force_flag = True
+ if opt == "-b" or opt == "-u" or opt == "--bind" or opt == "--unbind":
+ if b_flag is not None:
+ print "Error - Only one bind or unbind may be specified\n"
+ sys.exit(1)
+ if opt == "-u" or opt == "--unbind":
+ b_flag = "none"
+ else:
+ b_flag = arg
+
+def do_arg_actions():
+ '''do the actual action requested by the user'''
+ global b_flag
+ global status_flag
+ global force_flag
+ global args
+
+ if b_flag is None and not status_flag:
+ print "Error: No action specified for devices. Please give a -b or -u option"
+ print "Run '%s --usage' for further information" % sys.argv[0]
+ sys.exit(1)
+
+ if b_flag is not None and len(args) == 0:
+ print "Error: No devices specified."
+ print "Run '%s --usage' for further information" % sys.argv[0]
+ sys.exit(1)
+
+ if b_flag == "none" or b_flag == "None":
+ unbind_all(args, force_flag)
+ elif b_flag is not None:
+ bind_all(args, b_flag, force_flag)
+ if status_flag:
+ if b_flag is not None:
+ get_nic_details() # refresh if we have changed anything
+ show_status()
+
+def main():
+ '''program main function'''
+ parse_args()
+ check_modules()
+ get_nic_details()
+ do_arg_actions()
+
+if __name__ == "__main__":
+ main()
diff --git a/scripts/dpdk_setup_ports.py b/scripts/dpdk_setup_ports.py
new file mode 100755
index 00000000..8dfd742e
--- /dev/null
+++ b/scripts/dpdk_setup_ports.py
@@ -0,0 +1,232 @@
+#! /usr/bin/python
+# hhaim
+import sys,site
+site.addsitedir('python-lib')
+import yaml
+import os.path
+import os
+import dpdk_nic_bind
+import re
+import argparse;
+
+
+
+class map_driver(object):
+ args=None;
+ cfg_file='/etc/trex_cfg.yaml'
+
+class DpdkSetup(Exception):
+ pass
+
+class CIfMap:
+
+ def __init__(self, cfg_file):
+ self.m_cfg_file =cfg_file;
+ self.m_cfg_dict={};
+ self.m_devices={};
+
+ def dump_error (self,err):
+ s="""%s
+From this TRex version a configuration file must exist in /etc/ folder "
+The name of the configuration file should be /etc/trex_cfg.yaml "
+The minimum configuration file should include something like this
+- version : 2 # version 2 of the configuration file
+ interfaces : ["03:00.0","03:00.1","13:00.1","13:00.0"] # list of the interfaces to bind run ./dpdk_nic_bind.py --status to see the list
+ port_limit : 2 # number of ports to use valid is 2,4,6,8
+
+example of already bind devices
+
+$ ./dpdk_nic_bind.py --status
+
+Network devices using DPDK-compatible driver
+============================================
+0000:03:00.0 '82599ES 10-Gigabit SFI/SFP+ Network Connection' drv=igb_uio unused=
+0000:03:00.1 '82599ES 10-Gigabit SFI/SFP+ Network Connection' drv=igb_uio unused=
+0000:13:00.0 '82599ES 10-Gigabit SFI/SFP+ Network Connection' drv=igb_uio unused=
+0000:13:00.1 '82599ES 10-Gigabit SFI/SFP+ Network Connection' drv=igb_uio unused=
+
+Network devices using kernel driver
+===================================
+0000:02:00.0 '82545EM Gigabit Ethernet Controller (Copper)' if=eth2 drv=e1000 unused=igb_uio *Active*
+
+Other network devices
+=====================
+
+
+ """ % (err);
+ return s;
+
+
+ def raise_error (self,err):
+ s= self.dump_error (err)
+ raise DpdkSetup(s)
+
+ def load_config_file (self):
+
+ fcfg=self.m_cfg_file
+
+ if not os.path.isfile(fcfg) :
+ self.raise_error ("There is no valid configuration file %s " % fcfg)
+
+ try:
+ stream = open(fcfg, 'r')
+ self.m_cfg_dict= yaml.load(stream)
+ except Exception,e:
+ print e;
+ raise e
+
+ stream.close();
+
+ if not self.m_cfg_dict[0].has_key('version') :
+ self.raise_error ("Configuration file %s is old, should include version field" % fcfg )
+
+ if int(self.m_cfg_dict[0]['version'])<2 :
+ self.raise_error ("Configuration file %s is old, should include version field with value greater than 2" % fcfg)
+
+
+ if not self.m_cfg_dict[0].has_key('interfaces') :
+ self.raise_error ("Configuration file %s is old, should include interfaces field with 2,4,6,8 number of elemets" % fcfg)
+
+ if_list=self.m_cfg_dict[0]['interfaces']
+ if not (len(if_list) in [2,4,6,8]):
+ self.raise_error ("Configuration file %s should include interfaces field with 2,4,6,8 number of elemets" % fcfg)
+
+ def do_bind_one (self,key):
+ cmd='./dpdk_nic_bind.py --force --bind=igb_uio %s ' % ( key)
+ print cmd
+ res=os.system(cmd);
+ if res!=0:
+ raise DpdkSetup('')
+
+
+
+ def pci_name_to_full_name (self,pci_name):
+ c='[0-9A-Fa-f]';
+ sp='[:]'
+ s_short=c+c+sp+c+c+'[.]'+c;
+ s_full=c+c+c+c+sp+s_short
+ re_full = re.compile(s_full)
+ re_short = re.compile(s_short)
+
+ if re_short.match(pci_name):
+ return '0000:'+pci_name
+
+ if re_full.match(pci_name):
+ return pci_name
+
+ err=" %s is not a valid pci address \n" %pci_name;
+ raise DpdkSetup(err)
+
+
+ def run_dpdk_lspci (self):
+ dpdk_nic_bind.get_nic_details()
+ self.m_devices= dpdk_nic_bind.devices
+
+ def do_run (self):
+ self.load_config_file ()
+ self.run_dpdk_lspci ()
+
+ if_list=self.m_cfg_dict[0]['interfaces']
+
+ for obj in if_list:
+ key= self.pci_name_to_full_name (obj)
+ if not self.m_devices.has_key(key) :
+ err=" %s does not exist " %key;
+ raise DpdkSetup(err)
+
+
+ if self.m_devices[key].has_key('Driver_str'):
+ if self.m_devices[key]['Driver_str'] !='igb_uio' :
+ self.do_bind_one (key)
+ else:
+ self.do_bind_one (key)
+
+
+ def do_create (self):
+ print " not supported yet !"
+
+
+def parse_parent_cfg (parent_cfg):
+ l=parent_cfg.split(" ");
+ cfg_file='';
+ next=False;
+ for obj in l:
+ if next:
+ cfg_file=obj
+ next=False;
+ if obj == '--cfg':
+ next=True
+
+ return (cfg_file)
+
+def process_options ():
+ parser = argparse.ArgumentParser(usage="""
+
+Examples:
+---------
+
+To unbind the interfaces using the trex configuration file
+ dpdk_set_ports.py -l
+
+To create a default file
+ dpdk_set_ports.py -c
+
+To show status
+ dpdk_set_ports.py -s
+
+ """,
+ description=" unbind dpdk interfaces ",
+ epilog=" written by hhaim");
+
+ parser.add_argument("-l", "--load", action='store_true',
+ help=""" unbind the interfaces using the configuration file given """,
+ )
+
+ parser.add_argument("--cfg",
+ help=""" configuration file name """,
+ )
+
+ parser.add_argument("--parent",
+ help=""" parent configuration CLI, extract --cfg from it if given """,
+ )
+
+ parser.add_argument("-c", "--create", action='store_true',
+ help=""" try to create a configuration file. It is heuristic try to look into the file before """,
+ )
+
+ parser.add_argument("-s", "--show", action='store_true',
+ help=""" show the status """,
+ )
+
+ parser.add_argument('--version', action='version',
+ version="0.1" )
+
+ map_driver.args = parser.parse_args();
+
+ if map_driver.args.parent :
+ cfg = parse_parent_cfg (map_driver.args.parent)
+ if cfg != '':
+ map_driver.cfg_file = cfg;
+ if map_driver.args.cfg :
+ map_driver.cfg_file = map_driver.args.cfg;
+
+def main ():
+ try:
+ process_options ()
+
+ if map_driver.args.show:
+ res=os.system('./dpdk_nic_bind.py --status');
+ return(res);
+
+ obj =CIfMap(map_driver.cfg_file);
+
+ if map_driver.args.create:
+ obj.do_create();
+ else:
+ obj.do_run();
+ except Exception,e:
+ print e
+ exit(-1)
+
+main();
+
diff --git a/scripts/exp/dns-0-ex.erf b/scripts/exp/dns-0-ex.erf
new file mode 100755
index 00000000..5ffffcb3
--- /dev/null
+++ b/scripts/exp/dns-0-ex.erf
Binary files differ
diff --git a/scripts/exp/dns-0.erf b/scripts/exp/dns-0.erf
new file mode 100644
index 00000000..d8d601b7
--- /dev/null
+++ b/scripts/exp/dns-0.erf
Binary files differ
diff --git a/scripts/exp/dns_e-0-ex.erf b/scripts/exp/dns_e-0-ex.erf
new file mode 100755
index 00000000..7ebfd69a
--- /dev/null
+++ b/scripts/exp/dns_e-0-ex.erf
Binary files differ
diff --git a/scripts/exp/dns_e-0.erf b/scripts/exp/dns_e-0.erf
new file mode 100644
index 00000000..7ebfd69a
--- /dev/null
+++ b/scripts/exp/dns_e-0.erf
Binary files differ
diff --git a/scripts/exp/dns_flip-0-ex.erf b/scripts/exp/dns_flip-0-ex.erf
new file mode 100755
index 00000000..f6074ad7
--- /dev/null
+++ b/scripts/exp/dns_flip-0-ex.erf
Binary files differ
diff --git a/scripts/exp/dns_flip-0.erf b/scripts/exp/dns_flip-0.erf
new file mode 100644
index 00000000..f6074ad7
--- /dev/null
+++ b/scripts/exp/dns_flip-0.erf
Binary files differ
diff --git a/scripts/exp/dns_ipv6-0-ex.erf b/scripts/exp/dns_ipv6-0-ex.erf
new file mode 100755
index 00000000..c47a6496
--- /dev/null
+++ b/scripts/exp/dns_ipv6-0-ex.erf
Binary files differ
diff --git a/scripts/exp/dns_ipv6-0.erf b/scripts/exp/dns_ipv6-0.erf
new file mode 100644
index 00000000..53dee235
--- /dev/null
+++ b/scripts/exp/dns_ipv6-0.erf
Binary files differ
diff --git a/scripts/exp/dns_ipv6_rxcheck-ex.erf b/scripts/exp/dns_ipv6_rxcheck-ex.erf
new file mode 100755
index 00000000..ebf95197
--- /dev/null
+++ b/scripts/exp/dns_ipv6_rxcheck-ex.erf
Binary files differ
diff --git a/scripts/exp/dns_ipv6_rxcheck.erf b/scripts/exp/dns_ipv6_rxcheck.erf
new file mode 100644
index 00000000..ebf95197
--- /dev/null
+++ b/scripts/exp/dns_ipv6_rxcheck.erf
Binary files differ
diff --git a/scripts/exp/dns_one_server-0-ex.erf b/scripts/exp/dns_one_server-0-ex.erf
new file mode 100755
index 00000000..0d3d447b
--- /dev/null
+++ b/scripts/exp/dns_one_server-0-ex.erf
Binary files differ
diff --git a/scripts/exp/dns_one_server-0.erf b/scripts/exp/dns_one_server-0.erf
new file mode 100644
index 00000000..ef76a69b
--- /dev/null
+++ b/scripts/exp/dns_one_server-0.erf
Binary files differ
diff --git a/scripts/exp/dns_p-0-ex.erf b/scripts/exp/dns_p-0-ex.erf
new file mode 100755
index 00000000..ec313584
--- /dev/null
+++ b/scripts/exp/dns_p-0-ex.erf
Binary files differ
diff --git a/scripts/exp/dns_p-0.erf b/scripts/exp/dns_p-0.erf
new file mode 100644
index 00000000..ec313584
--- /dev/null
+++ b/scripts/exp/dns_p-0.erf
Binary files differ
diff --git a/scripts/exp/dns_rxcheck-ex.erf b/scripts/exp/dns_rxcheck-ex.erf
new file mode 100755
index 00000000..21a610a6
--- /dev/null
+++ b/scripts/exp/dns_rxcheck-ex.erf
Binary files differ
diff --git a/scripts/exp/dns_rxcheck.erf b/scripts/exp/dns_rxcheck.erf
new file mode 100644
index 00000000..21a610a6
--- /dev/null
+++ b/scripts/exp/dns_rxcheck.erf
Binary files differ
diff --git a/scripts/exp/dns_single_server-0-ex.erf b/scripts/exp/dns_single_server-0-ex.erf
new file mode 100755
index 00000000..244211eb
--- /dev/null
+++ b/scripts/exp/dns_single_server-0-ex.erf
Binary files differ
diff --git a/scripts/exp/dns_wlen-0-ex.erf b/scripts/exp/dns_wlen-0-ex.erf
new file mode 100755
index 00000000..069cd431
--- /dev/null
+++ b/scripts/exp/dns_wlen-0-ex.erf
Binary files differ
diff --git a/scripts/exp/dns_wlen1-0-ex.erf b/scripts/exp/dns_wlen1-0-ex.erf
new file mode 100755
index 00000000..226a614a
--- /dev/null
+++ b/scripts/exp/dns_wlen1-0-ex.erf
Binary files differ
diff --git a/scripts/exp/dns_wlen2-0-ex.erf b/scripts/exp/dns_wlen2-0-ex.erf
new file mode 100755
index 00000000..ae5d518e
--- /dev/null
+++ b/scripts/exp/dns_wlen2-0-ex.erf
Binary files differ
diff --git a/scripts/exp/dyn_pyld1-0-ex.erf b/scripts/exp/dyn_pyld1-0-ex.erf
new file mode 100755
index 00000000..7d2089db
--- /dev/null
+++ b/scripts/exp/dyn_pyld1-0-ex.erf
Binary files differ
diff --git a/scripts/exp/dyn_pyld1-0.erf b/scripts/exp/dyn_pyld1-0.erf
new file mode 100644
index 00000000..175a810c
--- /dev/null
+++ b/scripts/exp/dyn_pyld1-0.erf
Binary files differ
diff --git a/scripts/exp/http1_with_option-ex.pcap b/scripts/exp/http1_with_option-ex.pcap
new file mode 100755
index 00000000..ef5bf3c4
--- /dev/null
+++ b/scripts/exp/http1_with_option-ex.pcap
Binary files differ
diff --git a/scripts/exp/http1_with_option.pcap b/scripts/exp/http1_with_option.pcap
new file mode 100644
index 00000000..ef5bf3c4
--- /dev/null
+++ b/scripts/exp/http1_with_option.pcap
Binary files differ
diff --git a/scripts/exp/http1_with_option_ipv6-ex.pcap b/scripts/exp/http1_with_option_ipv6-ex.pcap
new file mode 100755
index 00000000..f70c1114
--- /dev/null
+++ b/scripts/exp/http1_with_option_ipv6-ex.pcap
Binary files differ
diff --git a/scripts/exp/http1_with_option_ipv6.pcap b/scripts/exp/http1_with_option_ipv6.pcap
new file mode 100644
index 00000000..f70c1114
--- /dev/null
+++ b/scripts/exp/http1_with_option_ipv6.pcap
Binary files differ
diff --git a/scripts/exp/http_plugin-0-ex.erf b/scripts/exp/http_plugin-0-ex.erf
new file mode 100755
index 00000000..f195e6ae
--- /dev/null
+++ b/scripts/exp/http_plugin-0-ex.erf
Binary files differ
diff --git a/scripts/exp/http_plugin-0.erf b/scripts/exp/http_plugin-0.erf
new file mode 100644
index 00000000..e2320eeb
--- /dev/null
+++ b/scripts/exp/http_plugin-0.erf
Binary files differ
diff --git a/scripts/exp/http_plugin_v6-0-ex.erf b/scripts/exp/http_plugin_v6-0-ex.erf
new file mode 100755
index 00000000..a0b43754
--- /dev/null
+++ b/scripts/exp/http_plugin_v6-0-ex.erf
Binary files differ
diff --git a/scripts/exp/http_plugin_v6-0.erf b/scripts/exp/http_plugin_v6-0.erf
new file mode 100644
index 00000000..96e79eae
--- /dev/null
+++ b/scripts/exp/http_plugin_v6-0.erf
Binary files differ
diff --git a/scripts/exp/imix-0-ex.erf b/scripts/exp/imix-0-ex.erf
new file mode 100755
index 00000000..233e6b31
--- /dev/null
+++ b/scripts/exp/imix-0-ex.erf
Binary files differ
diff --git a/scripts/exp/imix-0.erf b/scripts/exp/imix-0.erf
new file mode 100644
index 00000000..c41a3006
--- /dev/null
+++ b/scripts/exp/imix-0.erf
Binary files differ
diff --git a/scripts/exp/imix_v6-0-ex.erf b/scripts/exp/imix_v6-0-ex.erf
new file mode 100755
index 00000000..56412091
--- /dev/null
+++ b/scripts/exp/imix_v6-0-ex.erf
Binary files differ
diff --git a/scripts/exp/imix_v6-0.erf b/scripts/exp/imix_v6-0.erf
new file mode 100644
index 00000000..a85ed2b9
--- /dev/null
+++ b/scripts/exp/imix_v6-0.erf
Binary files differ
diff --git a/scripts/exp/ipv4_vlan-0-ex.erf b/scripts/exp/ipv4_vlan-0-ex.erf
new file mode 100755
index 00000000..abbdb652
--- /dev/null
+++ b/scripts/exp/ipv4_vlan-0-ex.erf
Binary files differ
diff --git a/scripts/exp/ipv4_vlan-0.erf b/scripts/exp/ipv4_vlan-0.erf
new file mode 100644
index 00000000..3a9ec6d1
--- /dev/null
+++ b/scripts/exp/ipv4_vlan-0.erf
Binary files differ
diff --git a/scripts/exp/ipv6-0-ex.erf b/scripts/exp/ipv6-0-ex.erf
new file mode 100755
index 00000000..85d30377
--- /dev/null
+++ b/scripts/exp/ipv6-0-ex.erf
Binary files differ
diff --git a/scripts/exp/ipv6-0.erf b/scripts/exp/ipv6-0.erf
new file mode 100644
index 00000000..50f89ece
--- /dev/null
+++ b/scripts/exp/ipv6-0.erf
Binary files differ
diff --git a/scripts/exp/ipv6_vlan-0-ex.erf b/scripts/exp/ipv6_vlan-0-ex.erf
new file mode 100755
index 00000000..ac7cc39b
--- /dev/null
+++ b/scripts/exp/ipv6_vlan-0-ex.erf
Binary files differ
diff --git a/scripts/exp/ipv6_vlan-0.erf b/scripts/exp/ipv6_vlan-0.erf
new file mode 100644
index 00000000..771f5c03
--- /dev/null
+++ b/scripts/exp/ipv6_vlan-0.erf
Binary files differ
diff --git a/scripts/exp/limit_multi_pkt-0-ex.erf b/scripts/exp/limit_multi_pkt-0-ex.erf
new file mode 100755
index 00000000..23d536fc
--- /dev/null
+++ b/scripts/exp/limit_multi_pkt-0-ex.erf
Binary files differ
diff --git a/scripts/exp/limit_multi_pkt-0.erf b/scripts/exp/limit_multi_pkt-0.erf
new file mode 100644
index 00000000..a2ff8815
--- /dev/null
+++ b/scripts/exp/limit_multi_pkt-0.erf
Binary files differ
diff --git a/scripts/exp/limit_single_pkt-0-ex.erf b/scripts/exp/limit_single_pkt-0-ex.erf
new file mode 100755
index 00000000..3f7f0ff2
--- /dev/null
+++ b/scripts/exp/limit_single_pkt-0-ex.erf
Binary files differ
diff --git a/scripts/exp/limit_single_pkt-0.erf b/scripts/exp/limit_single_pkt-0.erf
new file mode 100644
index 00000000..548d2e3f
--- /dev/null
+++ b/scripts/exp/limit_single_pkt-0.erf
Binary files differ
diff --git a/scripts/exp/pcap_mode1-0-ex.erf b/scripts/exp/pcap_mode1-0-ex.erf
new file mode 100755
index 00000000..245b5ffb
--- /dev/null
+++ b/scripts/exp/pcap_mode1-0-ex.erf
Binary files differ
diff --git a/scripts/exp/pcap_mode1-0.erf b/scripts/exp/pcap_mode1-0.erf
new file mode 100644
index 00000000..ad5469f8
--- /dev/null
+++ b/scripts/exp/pcap_mode1-0.erf
Binary files differ
diff --git a/scripts/exp/pcap_mode2-0-ex.erf b/scripts/exp/pcap_mode2-0-ex.erf
new file mode 100755
index 00000000..e19274f7
--- /dev/null
+++ b/scripts/exp/pcap_mode2-0-ex.erf
Binary files differ
diff --git a/scripts/exp/pcap_mode2-0.erf b/scripts/exp/pcap_mode2-0.erf
new file mode 100644
index 00000000..348d1a4c
--- /dev/null
+++ b/scripts/exp/pcap_mode2-0.erf
Binary files differ
diff --git a/scripts/exp/rtsp_short1-0-ex.erf b/scripts/exp/rtsp_short1-0-ex.erf
new file mode 100755
index 00000000..7a7522b2
--- /dev/null
+++ b/scripts/exp/rtsp_short1-0-ex.erf
Binary files differ
diff --git a/scripts/exp/rtsp_short1-0.erf b/scripts/exp/rtsp_short1-0.erf
new file mode 100644
index 00000000..09e180c6
--- /dev/null
+++ b/scripts/exp/rtsp_short1-0.erf
Binary files differ
diff --git a/scripts/exp/rtsp_short1_ipv6_rxcheck-ex.erf b/scripts/exp/rtsp_short1_ipv6_rxcheck-ex.erf
new file mode 100755
index 00000000..06ac6484
--- /dev/null
+++ b/scripts/exp/rtsp_short1_ipv6_rxcheck-ex.erf
Binary files differ
diff --git a/scripts/exp/rtsp_short1_ipv6_rxcheck.erf b/scripts/exp/rtsp_short1_ipv6_rxcheck.erf
new file mode 100644
index 00000000..06ac6484
--- /dev/null
+++ b/scripts/exp/rtsp_short1_ipv6_rxcheck.erf
Binary files differ
diff --git a/scripts/exp/rtsp_short1_rxcheck-ex.erf b/scripts/exp/rtsp_short1_rxcheck-ex.erf
new file mode 100755
index 00000000..cd120df9
--- /dev/null
+++ b/scripts/exp/rtsp_short1_rxcheck-ex.erf
Binary files differ
diff --git a/scripts/exp/rtsp_short1_rxcheck.erf b/scripts/exp/rtsp_short1_rxcheck.erf
new file mode 100644
index 00000000..cd120df9
--- /dev/null
+++ b/scripts/exp/rtsp_short1_rxcheck.erf
Binary files differ
diff --git a/scripts/exp/rtsp_short1_v6-0-ex.erf b/scripts/exp/rtsp_short1_v6-0-ex.erf
new file mode 100755
index 00000000..1b6d6391
--- /dev/null
+++ b/scripts/exp/rtsp_short1_v6-0-ex.erf
Binary files differ
diff --git a/scripts/exp/rtsp_short1_v6-0.erf b/scripts/exp/rtsp_short1_v6-0.erf
new file mode 100644
index 00000000..57373ba5
--- /dev/null
+++ b/scripts/exp/rtsp_short1_v6-0.erf
Binary files differ
diff --git a/scripts/exp/rtsp_short2-0-ex.erf b/scripts/exp/rtsp_short2-0-ex.erf
new file mode 100755
index 00000000..7a7522b2
--- /dev/null
+++ b/scripts/exp/rtsp_short2-0-ex.erf
Binary files differ
diff --git a/scripts/exp/rtsp_short2-0.erf b/scripts/exp/rtsp_short2-0.erf
new file mode 100644
index 00000000..09e180c6
--- /dev/null
+++ b/scripts/exp/rtsp_short2-0.erf
Binary files differ
diff --git a/scripts/exp/rtsp_short2_v6-0-ex.erf b/scripts/exp/rtsp_short2_v6-0-ex.erf
new file mode 100755
index 00000000..1b6d6391
--- /dev/null
+++ b/scripts/exp/rtsp_short2_v6-0-ex.erf
Binary files differ
diff --git a/scripts/exp/rtsp_short2_v6-0.erf b/scripts/exp/rtsp_short2_v6-0.erf
new file mode 100644
index 00000000..57373ba5
--- /dev/null
+++ b/scripts/exp/rtsp_short2_v6-0.erf
Binary files differ
diff --git a/scripts/exp/rtsp_short3-0-ex.erf b/scripts/exp/rtsp_short3-0-ex.erf
new file mode 100755
index 00000000..bae34983
--- /dev/null
+++ b/scripts/exp/rtsp_short3-0-ex.erf
Binary files differ
diff --git a/scripts/exp/rtsp_short3-0.erf b/scripts/exp/rtsp_short3-0.erf
new file mode 100644
index 00000000..93ad0fa4
--- /dev/null
+++ b/scripts/exp/rtsp_short3-0.erf
Binary files differ
diff --git a/scripts/exp/rtsp_short3_v6-0-ex.erf b/scripts/exp/rtsp_short3_v6-0-ex.erf
new file mode 100755
index 00000000..e68dc787
--- /dev/null
+++ b/scripts/exp/rtsp_short3_v6-0-ex.erf
Binary files differ
diff --git a/scripts/exp/rtsp_short3_v6-0.erf b/scripts/exp/rtsp_short3_v6-0.erf
new file mode 100644
index 00000000..281bd36f
--- /dev/null
+++ b/scripts/exp/rtsp_short3_v6-0.erf
Binary files differ
diff --git a/scripts/exp/sctp-ex.erf b/scripts/exp/sctp-ex.erf
new file mode 100755
index 00000000..bcbaf0c9
--- /dev/null
+++ b/scripts/exp/sctp-ex.erf
Binary files differ
diff --git a/scripts/exp/sctp.erf b/scripts/exp/sctp.erf
new file mode 100644
index 00000000..6eaac10c
--- /dev/null
+++ b/scripts/exp/sctp.erf
Binary files differ
diff --git a/scripts/exp/sfr2-0-ex.erf b/scripts/exp/sfr2-0-ex.erf
new file mode 100755
index 00000000..5e2b791f
--- /dev/null
+++ b/scripts/exp/sfr2-0-ex.erf
Binary files differ
diff --git a/scripts/exp/sfr2-0.erf b/scripts/exp/sfr2-0.erf
new file mode 100644
index 00000000..bf5ff3ef
--- /dev/null
+++ b/scripts/exp/sfr2-0.erf
Binary files differ
diff --git a/scripts/exp/sfr3-0-ex.erf b/scripts/exp/sfr3-0-ex.erf
new file mode 100755
index 00000000..fa9b1008
--- /dev/null
+++ b/scripts/exp/sfr3-0-ex.erf
Binary files differ
diff --git a/scripts/exp/sfr3-0.erf b/scripts/exp/sfr3-0.erf
new file mode 100644
index 00000000..bd14de4f
--- /dev/null
+++ b/scripts/exp/sfr3-0.erf
Binary files differ
diff --git a/scripts/exp/sfr_4-0-ex.erf b/scripts/exp/sfr_4-0-ex.erf
new file mode 100755
index 00000000..a0cddc92
--- /dev/null
+++ b/scripts/exp/sfr_4-0-ex.erf
Binary files differ
diff --git a/scripts/exp/sfr_4-0.erf b/scripts/exp/sfr_4-0.erf
new file mode 100644
index 00000000..8cbf4275
--- /dev/null
+++ b/scripts/exp/sfr_4-0.erf
Binary files differ
diff --git a/scripts/exp/sip_short1-0-ex.erf b/scripts/exp/sip_short1-0-ex.erf
new file mode 100755
index 00000000..762fc157
--- /dev/null
+++ b/scripts/exp/sip_short1-0-ex.erf
Binary files differ
diff --git a/scripts/exp/sip_short1-0.erf b/scripts/exp/sip_short1-0.erf
new file mode 100644
index 00000000..c78c215e
--- /dev/null
+++ b/scripts/exp/sip_short1-0.erf
Binary files differ
diff --git a/scripts/exp/sip_short1_v6-0-ex.erf b/scripts/exp/sip_short1_v6-0-ex.erf
new file mode 100755
index 00000000..cb027045
--- /dev/null
+++ b/scripts/exp/sip_short1_v6-0-ex.erf
Binary files differ
diff --git a/scripts/exp/sip_short1_v6-0.erf b/scripts/exp/sip_short1_v6-0.erf
new file mode 100644
index 00000000..a77b5c60
--- /dev/null
+++ b/scripts/exp/sip_short1_v6-0.erf
Binary files differ
diff --git a/scripts/exp/sip_short2-0-ex.erf b/scripts/exp/sip_short2-0-ex.erf
new file mode 100755
index 00000000..762fc157
--- /dev/null
+++ b/scripts/exp/sip_short2-0-ex.erf
Binary files differ
diff --git a/scripts/exp/sip_short2-0.erf b/scripts/exp/sip_short2-0.erf
new file mode 100644
index 00000000..c78c215e
--- /dev/null
+++ b/scripts/exp/sip_short2-0.erf
Binary files differ
diff --git a/scripts/exp/sip_short2_v6-0-ex.erf b/scripts/exp/sip_short2_v6-0-ex.erf
new file mode 100755
index 00000000..cb027045
--- /dev/null
+++ b/scripts/exp/sip_short2_v6-0-ex.erf
Binary files differ
diff --git a/scripts/exp/sip_short2_v6-0.erf b/scripts/exp/sip_short2_v6-0.erf
new file mode 100644
index 00000000..a77b5c60
--- /dev/null
+++ b/scripts/exp/sip_short2_v6-0.erf
Binary files differ
diff --git a/scripts/exp/sip_short3-0-ex.erf b/scripts/exp/sip_short3-0-ex.erf
new file mode 100755
index 00000000..f6b0d5bb
--- /dev/null
+++ b/scripts/exp/sip_short3-0-ex.erf
Binary files differ
diff --git a/scripts/exp/sip_short3-0.erf b/scripts/exp/sip_short3-0.erf
new file mode 100644
index 00000000..26f38414
--- /dev/null
+++ b/scripts/exp/sip_short3-0.erf
Binary files differ
diff --git a/scripts/exp/sip_short3_v6-0-ex.erf b/scripts/exp/sip_short3_v6-0-ex.erf
new file mode 100755
index 00000000..431fab0b
--- /dev/null
+++ b/scripts/exp/sip_short3_v6-0-ex.erf
Binary files differ
diff --git a/scripts/exp/sip_short3_v6-0.erf b/scripts/exp/sip_short3_v6-0.erf
new file mode 100644
index 00000000..1f347a29
--- /dev/null
+++ b/scripts/exp/sip_short3_v6-0.erf
Binary files differ
diff --git a/scripts/ko/3.11.10-301.fc20.x86_64/igb_uio.ko b/scripts/ko/3.11.10-301.fc20.x86_64/igb_uio.ko
new file mode 100755
index 00000000..2e9148a2
--- /dev/null
+++ b/scripts/ko/3.11.10-301.fc20.x86_64/igb_uio.ko
Binary files differ
diff --git a/scripts/ko/3.13.0-32-generic/igb_uio.ko b/scripts/ko/3.13.0-32-generic/igb_uio.ko
new file mode 100755
index 00000000..9e93fda2
--- /dev/null
+++ b/scripts/ko/3.13.0-32-generic/igb_uio.ko
Binary files differ
diff --git a/scripts/ko/3.16.0-37-generic/igb_uio.ko b/scripts/ko/3.16.0-37-generic/igb_uio.ko
new file mode 100755
index 00000000..94eac790
--- /dev/null
+++ b/scripts/ko/3.16.0-37-generic/igb_uio.ko
Binary files differ
diff --git a/scripts/ko/3.17.4-301.fc21.x86_64/igb_uio.ko b/scripts/ko/3.17.4-301.fc21.x86_64/igb_uio.ko
new file mode 100755
index 00000000..bbd87c6b
--- /dev/null
+++ b/scripts/ko/3.17.4-301.fc21.x86_64/igb_uio.ko
Binary files differ
diff --git a/scripts/ko/3.18.9-100.fc20.x86_64/igb_uio.ko b/scripts/ko/3.18.9-100.fc20.x86_64/igb_uio.ko
new file mode 100755
index 00000000..562a3cf5
--- /dev/null
+++ b/scripts/ko/3.18.9-100.fc20.x86_64/igb_uio.ko
Binary files differ
diff --git a/scripts/ko/3.19.1-201.fc21.x86_64/igb_uio.ko b/scripts/ko/3.19.1-201.fc21.x86_64/igb_uio.ko
new file mode 100755
index 00000000..0be8ce08
--- /dev/null
+++ b/scripts/ko/3.19.1-201.fc21.x86_64/igb_uio.ko
Binary files differ
diff --git a/scripts/ko/3.6.10-4.fc18.x86_64/igb_uio.ko b/scripts/ko/3.6.10-4.fc18.x86_64/igb_uio.ko
new file mode 100755
index 00000000..d711ee3e
--- /dev/null
+++ b/scripts/ko/3.6.10-4.fc18.x86_64/igb_uio.ko
Binary files differ
diff --git a/scripts/ko/src/Makefile b/scripts/ko/src/Makefile
new file mode 100755
index 00000000..7966b1af
--- /dev/null
+++ b/scripts/ko/src/Makefile
@@ -0,0 +1,38 @@
+# obj-m is a list of what kernel modules to build. The .o and other
+# objects will be automatically built from the corresponding .c file -
+# no need to list the source files explicitly.
+
+obj-m := igb_uio.o
+
+# KDIR is the location of the kernel source. The current standard is
+# to link to the associated source tree from the directory containing
+# the compiled modules.
+KDIR := /lib/modules/$(shell uname -r)/build
+
+# PWD is the current working directory and the location of our module
+# source files.
+PWD := $(shell pwd)
+
+# default is the default make target. The rule here says to run make
+# with a working directory of the directory containing the kernel
+# source and compile only the modules in the PWD (local) directory.
+default:
+ $(MAKE) -C $(KDIR) M=$(PWD) modules
+
+clean:
+ -rm *.o >> test.log
+ -rm *.ko >> test.log
+ -rm *.*.cmd >> test.log
+ -rm *.mod.c >> test.log
+ -rm modules.order >> test.log
+ -rm Module.symvers >> test.log
+ -rm -rf .tmp_versions >> test.log
+ rm test.log
+
+
+
+# install the new driver
+install:
+ mkdir -p ../`uname -r`/
+ cp igb_uio.ko ../`uname -r`/
+
diff --git a/scripts/ko/src/compat.h b/scripts/ko/src/compat.h
new file mode 100755
index 00000000..c1d45a66
--- /dev/null
+++ b/scripts/ko/src/compat.h
@@ -0,0 +1,116 @@
+/*
+ * Minimal wrappers to allow compiling igb_uio on older kernels.
+ */
+
+#ifndef RHEL_RELEASE_VERSION
+#define RHEL_RELEASE_VERSION(a, b) (((a) << 8) + (b))
+#endif
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 3, 0)
+#define pci_cfg_access_lock pci_block_user_cfg_access
+#define pci_cfg_access_unlock pci_unblock_user_cfg_access
+#endif
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 18, 0)
+#define HAVE_PTE_MASK_PAGE_IOMAP
+#endif
+
+#ifndef PCI_MSIX_ENTRY_SIZE
+#define PCI_MSIX_ENTRY_SIZE 16
+#define PCI_MSIX_ENTRY_LOWER_ADDR 0
+#define PCI_MSIX_ENTRY_UPPER_ADDR 4
+#define PCI_MSIX_ENTRY_DATA 8
+#define PCI_MSIX_ENTRY_VECTOR_CTRL 12
+#define PCI_MSIX_ENTRY_CTRL_MASKBIT 1
+#endif
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 34) && \
+ (!(defined(RHEL_RELEASE_CODE) && \
+ RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(5, 9)))
+
+static int pci_num_vf(struct pci_dev *dev)
+{
+ struct iov {
+ int pos;
+ int nres;
+ u32 cap;
+ u16 ctrl;
+ u16 total;
+ u16 initial;
+ u16 nr_virtfn;
+ } *iov = (struct iov *)dev->sriov;
+
+ if (!dev->is_physfn)
+ return 0;
+
+ return iov->nr_virtfn;
+}
+
+#endif /* < 2.6.34 */
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 39) && \
+ (!(defined(RHEL_RELEASE_CODE) && \
+ RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6, 4)))
+
+#define kstrtoul strict_strtoul
+
+#endif /* < 2.6.39 */
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 3, 0) && \
+ (!(defined(RHEL_RELEASE_CODE) && \
+ RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6, 3)))
+
+/* Check if INTX works to control irq's.
+ * Set's INTX_DISABLE flag and reads it back
+ */
+static bool pci_intx_mask_supported(struct pci_dev *pdev)
+{
+ bool mask_supported = false;
+ uint16_t orig, new;
+
+ pci_block_user_cfg_access(pdev);
+ pci_read_config_word(pdev, PCI_COMMAND, &orig);
+ pci_write_config_word(pdev, PCI_COMMAND,
+ orig ^ PCI_COMMAND_INTX_DISABLE);
+ pci_read_config_word(pdev, PCI_COMMAND, &new);
+
+ if ((new ^ orig) & ~PCI_COMMAND_INTX_DISABLE) {
+ dev_err(&pdev->dev, "Command register changed from "
+ "0x%x to 0x%x: driver or hardware bug?\n", orig, new);
+ } else if ((new ^ orig) & PCI_COMMAND_INTX_DISABLE) {
+ mask_supported = true;
+ pci_write_config_word(pdev, PCI_COMMAND, orig);
+ }
+ pci_unblock_user_cfg_access(pdev);
+
+ return mask_supported;
+}
+
+static bool pci_check_and_mask_intx(struct pci_dev *pdev)
+{
+ bool pending;
+ uint32_t status;
+
+ pci_block_user_cfg_access(pdev);
+ pci_read_config_dword(pdev, PCI_COMMAND, &status);
+
+ /* interrupt is not ours, goes to out */
+ pending = (((status >> 16) & PCI_STATUS_INTERRUPT) != 0);
+ if (pending) {
+ uint16_t old, new;
+
+ old = status;
+ if (status != 0)
+ new = old & (~PCI_COMMAND_INTX_DISABLE);
+ else
+ new = old | PCI_COMMAND_INTX_DISABLE;
+
+ if (old != new)
+ pci_write_config_word(pdev, PCI_COMMAND, new);
+ }
+ pci_unblock_user_cfg_access(pdev);
+
+ return pending;
+}
+
+#endif /* < 3.3.0 */
diff --git a/scripts/ko/src/igb_uio.c b/scripts/ko/src/igb_uio.c
new file mode 100755
index 00000000..faeb0b68
--- /dev/null
+++ b/scripts/ko/src/igb_uio.c
@@ -0,0 +1,643 @@
+/*-
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ * Intel Corporation
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/uio_driver.h>
+#include <linux/io.h>
+#include <linux/msi.h>
+#include <linux/version.h>
+
+#ifdef CONFIG_XEN_DOM0
+#include <xen/xen.h>
+#endif
+#include "rte_pci_dev_features.h"
+
+#include "compat.h"
+
+#ifdef RTE_PCI_CONFIG
+#define PCI_SYS_FILE_BUF_SIZE 10
+#define PCI_DEV_CAP_REG 0xA4
+#define PCI_DEV_CTRL_REG 0xA8
+#define PCI_DEV_CAP_EXT_TAG_MASK 0x20
+#define PCI_DEV_CTRL_EXT_TAG_SHIFT 8
+#define PCI_DEV_CTRL_EXT_TAG_MASK (1 << PCI_DEV_CTRL_EXT_TAG_SHIFT)
+#endif
+
+/**
+ * A structure describing the private information for a uio device.
+ */
+struct rte_uio_pci_dev {
+ struct uio_info info;
+ struct pci_dev *pdev;
+ enum rte_intr_mode mode;
+};
+
+static char *intr_mode = NULL;
+static enum rte_intr_mode igbuio_intr_mode_preferred = RTE_INTR_MODE_MSIX;
+
+static inline struct rte_uio_pci_dev *
+igbuio_get_uio_pci_dev(struct uio_info *info)
+{
+ return container_of(info, struct rte_uio_pci_dev, info);
+}
+
+/* sriov sysfs */
+static ssize_t
+show_max_vfs(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ return snprintf(buf, 10, "%u\n",
+ pci_num_vf(container_of(dev, struct pci_dev, dev)));
+}
+
+static ssize_t
+store_max_vfs(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ int err = 0;
+ unsigned long max_vfs;
+ struct pci_dev *pdev = container_of(dev, struct pci_dev, dev);
+
+ if (0 != kstrtoul(buf, 0, &max_vfs))
+ return -EINVAL;
+
+ if (0 == max_vfs)
+ pci_disable_sriov(pdev);
+ else if (0 == pci_num_vf(pdev))
+ err = pci_enable_sriov(pdev, max_vfs);
+ else /* do nothing if change max_vfs number */
+ err = -EINVAL;
+
+ return err ? err : count;
+}
+
+#ifdef RTE_PCI_CONFIG
+static ssize_t
+show_extended_tag(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct pci_dev *pci_dev = container_of(dev, struct pci_dev, dev);
+ uint32_t val = 0;
+
+ pci_read_config_dword(pci_dev, PCI_DEV_CAP_REG, &val);
+ if (!(val & PCI_DEV_CAP_EXT_TAG_MASK)) /* Not supported */
+ return snprintf(buf, PCI_SYS_FILE_BUF_SIZE, "%s\n", "invalid");
+
+ val = 0;
+ pci_bus_read_config_dword(pci_dev->bus, pci_dev->devfn,
+ PCI_DEV_CTRL_REG, &val);
+
+ return snprintf(buf, PCI_SYS_FILE_BUF_SIZE, "%s\n",
+ (val & PCI_DEV_CTRL_EXT_TAG_MASK) ? "on" : "off");
+}
+
+static ssize_t
+store_extended_tag(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t count)
+{
+ struct pci_dev *pci_dev = container_of(dev, struct pci_dev, dev);
+ uint32_t val = 0, enable;
+
+ if (strncmp(buf, "on", 2) == 0)
+ enable = 1;
+ else if (strncmp(buf, "off", 3) == 0)
+ enable = 0;
+ else
+ return -EINVAL;
+
+ pci_cfg_access_lock(pci_dev);
+ pci_bus_read_config_dword(pci_dev->bus, pci_dev->devfn,
+ PCI_DEV_CAP_REG, &val);
+ if (!(val & PCI_DEV_CAP_EXT_TAG_MASK)) { /* Not supported */
+ pci_cfg_access_unlock(pci_dev);
+ return -EPERM;
+ }
+
+ val = 0;
+ pci_bus_read_config_dword(pci_dev->bus, pci_dev->devfn,
+ PCI_DEV_CTRL_REG, &val);
+ if (enable)
+ val |= PCI_DEV_CTRL_EXT_TAG_MASK;
+ else
+ val &= ~PCI_DEV_CTRL_EXT_TAG_MASK;
+ pci_bus_write_config_dword(pci_dev->bus, pci_dev->devfn,
+ PCI_DEV_CTRL_REG, val);
+ pci_cfg_access_unlock(pci_dev);
+
+ return count;
+}
+
+static ssize_t
+show_max_read_request_size(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct pci_dev *pci_dev = container_of(dev, struct pci_dev, dev);
+ int val = pcie_get_readrq(pci_dev);
+
+ return snprintf(buf, PCI_SYS_FILE_BUF_SIZE, "%d\n", val);
+}
+
+static ssize_t
+store_max_read_request_size(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t count)
+{
+ struct pci_dev *pci_dev = container_of(dev, struct pci_dev, dev);
+ unsigned long size = 0;
+ int ret;
+
+ if (0 != kstrtoul(buf, 0, &size))
+ return -EINVAL;
+
+ ret = pcie_set_readrq(pci_dev, (int)size);
+ if (ret < 0)
+ return ret;
+
+ return count;
+}
+#endif
+
+static DEVICE_ATTR(max_vfs, S_IRUGO | S_IWUSR, show_max_vfs, store_max_vfs);
+#ifdef RTE_PCI_CONFIG
+static DEVICE_ATTR(extended_tag, S_IRUGO | S_IWUSR, show_extended_tag,
+ store_extended_tag);
+static DEVICE_ATTR(max_read_request_size, S_IRUGO | S_IWUSR,
+ show_max_read_request_size, store_max_read_request_size);
+#endif
+
+static struct attribute *dev_attrs[] = {
+ &dev_attr_max_vfs.attr,
+#ifdef RTE_PCI_CONFIG
+ &dev_attr_extended_tag.attr,
+ &dev_attr_max_read_request_size.attr,
+#endif
+ NULL,
+};
+
+static const struct attribute_group dev_attr_grp = {
+ .attrs = dev_attrs,
+};
+/*
+ * It masks the msix on/off of generating MSI-X messages.
+ */
+static void
+igbuio_msix_mask_irq(struct msi_desc *desc, int32_t state)
+{
+ u32 mask_bits = desc->masked;
+ unsigned offset = desc->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE +
+ PCI_MSIX_ENTRY_VECTOR_CTRL;
+
+ if (state != 0)
+ mask_bits &= ~PCI_MSIX_ENTRY_CTRL_MASKBIT;
+ else
+ mask_bits |= PCI_MSIX_ENTRY_CTRL_MASKBIT;
+
+ if (mask_bits != desc->masked) {
+ writel(mask_bits, desc->mask_base + offset);
+ readl(desc->mask_base);
+ desc->masked = mask_bits;
+ }
+}
+
+/**
+ * This is the irqcontrol callback to be registered to uio_info.
+ * It can be used to disable/enable interrupt from user space processes.
+ *
+ * @param info
+ * pointer to uio_info.
+ * @param irq_state
+ * state value. 1 to enable interrupt, 0 to disable interrupt.
+ *
+ * @return
+ * - On success, 0.
+ * - On failure, a negative value.
+ */
+static int
+igbuio_pci_irqcontrol(struct uio_info *info, s32 irq_state)
+{
+ struct rte_uio_pci_dev *udev = igbuio_get_uio_pci_dev(info);
+ struct pci_dev *pdev = udev->pdev;
+
+ pci_cfg_access_lock(pdev);
+ if (udev->mode == RTE_INTR_MODE_LEGACY)
+ pci_intx(pdev, !!irq_state);
+
+ else if (udev->mode == RTE_INTR_MODE_MSIX) {
+ struct msi_desc *desc;
+
+ list_for_each_entry(desc, &pdev->msi_list, list)
+ igbuio_msix_mask_irq(desc, irq_state);
+ }
+ pci_cfg_access_unlock(pdev);
+
+ return 0;
+}
+
+/**
+ * This is interrupt handler which will check if the interrupt is for the right device.
+ * If yes, disable it here and will be enable later.
+ */
+static irqreturn_t
+igbuio_pci_irqhandler(int irq, struct uio_info *info)
+{
+ struct rte_uio_pci_dev *udev = igbuio_get_uio_pci_dev(info);
+
+ /* Legacy mode need to mask in hardware */
+ if (udev->mode == RTE_INTR_MODE_LEGACY &&
+ !pci_check_and_mask_intx(udev->pdev))
+ return IRQ_NONE;
+
+ /* Message signal mode, no share IRQ and automasked */
+ return IRQ_HANDLED;
+}
+
+#ifdef CONFIG_XEN_DOM0
+static int
+igbuio_dom0_mmap_phys(struct uio_info *info, struct vm_area_struct *vma)
+{
+ int idx;
+
+ idx = (int)vma->vm_pgoff;
+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+#ifdef HAVE_PTE_MASK_PAGE_IOMAP
+ vma->vm_page_prot.pgprot |= _PAGE_IOMAP;
+#endif
+
+ return remap_pfn_range(vma,
+ vma->vm_start,
+ info->mem[idx].addr >> PAGE_SHIFT,
+ vma->vm_end - vma->vm_start,
+ vma->vm_page_prot);
+}
+
+/**
+ * This is uio device mmap method which will use igbuio mmap for Xen
+ * Dom0 environment.
+ */
+static int
+igbuio_dom0_pci_mmap(struct uio_info *info, struct vm_area_struct *vma)
+{
+ int idx;
+
+ if (vma->vm_pgoff >= MAX_UIO_MAPS)
+ return -EINVAL;
+
+ if (info->mem[vma->vm_pgoff].size == 0)
+ return -EINVAL;
+
+ idx = (int)vma->vm_pgoff;
+ switch (info->mem[idx].memtype) {
+ case UIO_MEM_PHYS:
+ return igbuio_dom0_mmap_phys(info, vma);
+ case UIO_MEM_LOGICAL:
+ case UIO_MEM_VIRTUAL:
+ default:
+ return -EINVAL;
+ }
+}
+#endif
+
+/* Remap pci resources described by bar #pci_bar in uio resource n. */
+static int
+igbuio_pci_setup_iomem(struct pci_dev *dev, struct uio_info *info,
+ int n, int pci_bar, const char *name)
+{
+ unsigned long addr, len;
+ void *internal_addr;
+
+ if (sizeof(info->mem) / sizeof(info->mem[0]) <= n)
+ return -EINVAL;
+
+ addr = pci_resource_start(dev, pci_bar);
+ len = pci_resource_len(dev, pci_bar);
+ if (addr == 0 || len == 0)
+ return -1;
+ internal_addr = ioremap(addr, len);
+ if (internal_addr == NULL)
+ return -1;
+ info->mem[n].name = name;
+ info->mem[n].addr = addr;
+ info->mem[n].internal_addr = internal_addr;
+ info->mem[n].size = len;
+ info->mem[n].memtype = UIO_MEM_PHYS;
+ return 0;
+}
+
+/* Get pci port io resources described by bar #pci_bar in uio resource n. */
+static int
+igbuio_pci_setup_ioport(struct pci_dev *dev, struct uio_info *info,
+ int n, int pci_bar, const char *name)
+{
+ unsigned long addr, len;
+
+ if (sizeof(info->port) / sizeof(info->port[0]) <= n)
+ return -EINVAL;
+
+ addr = pci_resource_start(dev, pci_bar);
+ len = pci_resource_len(dev, pci_bar);
+ if (addr == 0 || len == 0)
+ return -EINVAL;
+
+ info->port[n].name = name;
+ info->port[n].start = addr;
+ info->port[n].size = len;
+ info->port[n].porttype = UIO_PORT_X86;
+
+ return 0;
+}
+
+/* Unmap previously ioremap'd resources */
+static void
+igbuio_pci_release_iomem(struct uio_info *info)
+{
+ int i;
+
+ for (i = 0; i < MAX_UIO_MAPS; i++) {
+ if (info->mem[i].internal_addr)
+ iounmap(info->mem[i].internal_addr);
+ }
+}
+
+static int
+igbuio_setup_bars(struct pci_dev *dev, struct uio_info *info)
+{
+ int i, iom, iop, ret;
+ unsigned long flags;
+ static const char *bar_names[PCI_STD_RESOURCE_END + 1] = {
+ "BAR0",
+ "BAR1",
+ "BAR2",
+ "BAR3",
+ "BAR4",
+ "BAR5",
+ };
+
+ iom = 0;
+ iop = 0;
+
+ for (i = 0; i != sizeof(bar_names) / sizeof(bar_names[0]); i++) {
+ if (pci_resource_len(dev, i) != 0 &&
+ pci_resource_start(dev, i) != 0) {
+ flags = pci_resource_flags(dev, i);
+ if (flags & IORESOURCE_MEM) {
+ ret = igbuio_pci_setup_iomem(dev, info, iom,
+ i, bar_names[i]);
+ if (ret != 0)
+ return ret;
+ iom++;
+ } else if (flags & IORESOURCE_IO) {
+ ret = igbuio_pci_setup_ioport(dev, info, iop,
+ i, bar_names[i]);
+ if (ret != 0)
+ return ret;
+ iop++;
+ }
+ }
+ }
+
+ return (iom != 0) ? ret : -ENOENT;
+}
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 8, 0)
+static int __devinit
+#else
+static int
+#endif
+igbuio_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
+{
+ struct rte_uio_pci_dev *udev;
+ struct msix_entry msix_entry;
+ int err;
+
+ udev = kzalloc(sizeof(struct rte_uio_pci_dev), GFP_KERNEL);
+ if (!udev)
+ return -ENOMEM;
+
+ /*
+ * enable device: ask low-level code to enable I/O and
+ * memory
+ */
+ err = pci_enable_device(dev);
+ if (err != 0) {
+ dev_err(&dev->dev, "Cannot enable PCI device\n");
+ goto fail_free;
+ }
+
+ /*
+ * reserve device's PCI memory regions for use by this
+ * module
+ */
+ err = pci_request_regions(dev, "igb_uio");
+ if (err != 0) {
+ dev_err(&dev->dev, "Cannot request regions\n");
+ goto fail_disable;
+ }
+
+ /* enable bus mastering on the device */
+ pci_set_master(dev);
+
+ /* remap IO memory */
+ err = igbuio_setup_bars(dev, &udev->info);
+ if (err != 0)
+ goto fail_release_iomem;
+
+ /* set 64-bit DMA mask */
+ err = pci_set_dma_mask(dev, DMA_BIT_MASK(64));
+ if (err != 0) {
+ dev_err(&dev->dev, "Cannot set DMA mask\n");
+ goto fail_release_iomem;
+ }
+
+ err = pci_set_consistent_dma_mask(dev, DMA_BIT_MASK(64));
+ if (err != 0) {
+ dev_err(&dev->dev, "Cannot set consistent DMA mask\n");
+ goto fail_release_iomem;
+ }
+
+ /* fill uio infos */
+ udev->info.name = "igb_uio";
+ udev->info.version = "0.1";
+ udev->info.handler = igbuio_pci_irqhandler;
+ udev->info.irqcontrol = igbuio_pci_irqcontrol;
+#ifdef CONFIG_XEN_DOM0
+ /* check if the driver run on Xen Dom0 */
+ if (xen_initial_domain())
+ udev->info.mmap = igbuio_dom0_pci_mmap;
+#endif
+ udev->info.priv = udev;
+ udev->pdev = dev;
+
+ switch (igbuio_intr_mode_preferred) {
+ case RTE_INTR_MODE_MSIX:
+ /* Only 1 msi-x vector needed */
+ msix_entry.entry = 0;
+ if (pci_enable_msix(dev, &msix_entry, 1) == 0) {
+ dev_dbg(&dev->dev, "using MSI-X");
+ udev->info.irq = msix_entry.vector;
+ udev->mode = RTE_INTR_MODE_MSIX;
+ break;
+ }
+ /* fall back to INTX */
+ case RTE_INTR_MODE_LEGACY:
+ if (pci_intx_mask_supported(dev)) {
+ dev_dbg(&dev->dev, "using INTX");
+ udev->info.irq_flags = IRQF_SHARED;
+ udev->info.irq = dev->irq;
+ udev->mode = RTE_INTR_MODE_LEGACY;
+ break;
+ }
+ dev_notice(&dev->dev, "PCI INTX mask not supported\n");
+ /* fall back to no IRQ */
+ case RTE_INTR_MODE_NONE:
+ udev->mode = RTE_INTR_MODE_NONE;
+ udev->info.irq = 0;
+ break;
+
+ default:
+ dev_err(&dev->dev, "invalid IRQ mode %u",
+ igbuio_intr_mode_preferred);
+ err = -EINVAL;
+ goto fail_release_iomem;
+ }
+
+ err = sysfs_create_group(&dev->dev.kobj, &dev_attr_grp);
+ if (err != 0)
+ goto fail_release_iomem;
+
+ /* register uio driver */
+ err = uio_register_device(&dev->dev, &udev->info);
+ if (err != 0)
+ goto fail_remove_group;
+
+ pci_set_drvdata(dev, udev);
+
+ dev_info(&dev->dev, "uio device registered with irq %lx\n",
+ udev->info.irq);
+
+ return 0;
+
+fail_remove_group:
+ sysfs_remove_group(&dev->dev.kobj, &dev_attr_grp);
+fail_release_iomem:
+ igbuio_pci_release_iomem(&udev->info);
+ if (udev->mode == RTE_INTR_MODE_MSIX)
+ pci_disable_msix(udev->pdev);
+ pci_release_regions(dev);
+fail_disable:
+ pci_disable_device(dev);
+fail_free:
+ kfree(udev);
+
+ return err;
+}
+
+static void
+igbuio_pci_remove(struct pci_dev *dev)
+{
+ struct uio_info *info = pci_get_drvdata(dev);
+ struct rte_uio_pci_dev *udev = igbuio_get_uio_pci_dev(info);
+
+ if (info->priv == NULL) {
+ pr_notice("Not igbuio device\n");
+ return;
+ }
+
+ sysfs_remove_group(&dev->dev.kobj, &dev_attr_grp);
+ uio_unregister_device(info);
+ igbuio_pci_release_iomem(info);
+ if (udev->mode == RTE_INTR_MODE_MSIX)
+ pci_disable_msix(dev);
+ pci_release_regions(dev);
+ pci_disable_device(dev);
+ pci_set_drvdata(dev, NULL);
+ kfree(info);
+}
+
+static int
+igbuio_config_intr_mode(char *intr_str)
+{
+ if (!intr_str) {
+ pr_info("Use MSIX interrupt by default\n");
+ return 0;
+ }
+
+ if (!strcmp(intr_str, RTE_INTR_MODE_MSIX_NAME)) {
+ igbuio_intr_mode_preferred = RTE_INTR_MODE_MSIX;
+ pr_info("Use MSIX interrupt\n");
+ } else if (!strcmp(intr_str, RTE_INTR_MODE_LEGACY_NAME)) {
+ igbuio_intr_mode_preferred = RTE_INTR_MODE_LEGACY;
+ pr_info("Use legacy interrupt\n");
+ } else {
+ pr_info("Error: bad parameter - %s\n", intr_str);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static struct pci_driver igbuio_pci_driver = {
+ .name = "igb_uio",
+ .id_table = NULL,
+ .probe = igbuio_pci_probe,
+ .remove = igbuio_pci_remove,
+};
+
+static int __init
+igbuio_pci_init_module(void)
+{
+ int ret;
+
+ ret = igbuio_config_intr_mode(intr_mode);
+ if (ret < 0)
+ return ret;
+
+ return pci_register_driver(&igbuio_pci_driver);
+}
+
+static void __exit
+igbuio_pci_exit_module(void)
+{
+ pci_unregister_driver(&igbuio_pci_driver);
+}
+
+module_init(igbuio_pci_init_module);
+module_exit(igbuio_pci_exit_module);
+
+module_param(intr_mode, charp, S_IRUGO);
+MODULE_PARM_DESC(intr_mode,
+"igb_uio interrupt mode (default=msix):\n"
+" " RTE_INTR_MODE_MSIX_NAME " Use MSIX interrupt\n"
+" " RTE_INTR_MODE_LEGACY_NAME " Use Legacy interrupt\n"
+"\n");
+
+MODULE_DESCRIPTION("UIO driver for Intel IGB PCI cards");
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Intel Corporation");
diff --git a/scripts/ko/src/readme.txt b/scripts/ko/src/readme.txt
new file mode 100755
index 00000000..a8d028c9
--- /dev/null
+++ b/scripts/ko/src/readme.txt
@@ -0,0 +1,16 @@
+
+
+1. make sure you have install this
+
+Fedora
+ $sudo yum install kernel-headers
+or
+Ubunto
+ $sudo apt-get install linux-headers-$(uname -r)
+
+2. from this dir do this
+
+$make
+$sudo modprobe uio
+$sudo insmod igb_uio.ko
+
diff --git a/scripts/ko/src/rte_pci_dev_feature_defs.h b/scripts/ko/src/rte_pci_dev_feature_defs.h
new file mode 100755
index 00000000..6316b6dd
--- /dev/null
+++ b/scripts/ko/src/rte_pci_dev_feature_defs.h
@@ -0,0 +1,45 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _RTE_PCI_DEV_DEFS_H_
+#define _RTE_PCI_DEV_DEFS_H_
+
+/* interrupt mode */
+enum rte_intr_mode {
+ RTE_INTR_MODE_NONE = 0,
+ RTE_INTR_MODE_LEGACY,
+ RTE_INTR_MODE_MSI,
+ RTE_INTR_MODE_MSIX
+};
+
+#endif /* _RTE_PCI_DEV_DEFS_H_ */
diff --git a/scripts/ko/src/rte_pci_dev_features.h b/scripts/ko/src/rte_pci_dev_features.h
new file mode 100755
index 00000000..dc5bc34f
--- /dev/null
+++ b/scripts/ko/src/rte_pci_dev_features.h
@@ -0,0 +1,44 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _RTE_PCI_DEV_FEATURES_H
+#define _RTE_PCI_DEV_FEATURES_H
+
+#include "rte_pci_dev_feature_defs.h"
+
+#define RTE_INTR_MODE_NONE_NAME "none"
+#define RTE_INTR_MODE_LEGACY_NAME "legacy"
+#define RTE_INTR_MODE_MSI_NAME "msi"
+#define RTE_INTR_MODE_MSIX_NAME "msix"
+
+#endif
diff --git a/scripts/libzmq.so.3 b/scripts/libzmq.so.3
new file mode 100755
index 00000000..16980c27
--- /dev/null
+++ b/scripts/libzmq.so.3
Binary files differ
diff --git a/scripts/libzmq.so.3.1.0 b/scripts/libzmq.so.3.1.0
new file mode 100755
index 00000000..16980c27
--- /dev/null
+++ b/scripts/libzmq.so.3.1.0
Binary files differ
diff --git a/scripts/python-lib/yaml/__init__.py b/scripts/python-lib/yaml/__init__.py
new file mode 100755
index 00000000..76e19e13
--- /dev/null
+++ b/scripts/python-lib/yaml/__init__.py
@@ -0,0 +1,315 @@
+
+from error import *
+
+from tokens import *
+from events import *
+from nodes import *
+
+from loader import *
+from dumper import *
+
+__version__ = '3.11'
+
+try:
+ from cyaml import *
+ __with_libyaml__ = True
+except ImportError:
+ __with_libyaml__ = False
+
+def scan(stream, Loader=Loader):
+ """
+ Scan a YAML stream and produce scanning tokens.
+ """
+ loader = Loader(stream)
+ try:
+ while loader.check_token():
+ yield loader.get_token()
+ finally:
+ loader.dispose()
+
+def parse(stream, Loader=Loader):
+ """
+ Parse a YAML stream and produce parsing events.
+ """
+ loader = Loader(stream)
+ try:
+ while loader.check_event():
+ yield loader.get_event()
+ finally:
+ loader.dispose()
+
+def compose(stream, Loader=Loader):
+ """
+ Parse the first YAML document in a stream
+ and produce the corresponding representation tree.
+ """
+ loader = Loader(stream)
+ try:
+ return loader.get_single_node()
+ finally:
+ loader.dispose()
+
+def compose_all(stream, Loader=Loader):
+ """
+ Parse all YAML documents in a stream
+ and produce corresponding representation trees.
+ """
+ loader = Loader(stream)
+ try:
+ while loader.check_node():
+ yield loader.get_node()
+ finally:
+ loader.dispose()
+
+def load(stream, Loader=Loader):
+ """
+ Parse the first YAML document in a stream
+ and produce the corresponding Python object.
+ """
+ loader = Loader(stream)
+ try:
+ return loader.get_single_data()
+ finally:
+ loader.dispose()
+
+def load_all(stream, Loader=Loader):
+ """
+ Parse all YAML documents in a stream
+ and produce corresponding Python objects.
+ """
+ loader = Loader(stream)
+ try:
+ while loader.check_data():
+ yield loader.get_data()
+ finally:
+ loader.dispose()
+
+def safe_load(stream):
+ """
+ Parse the first YAML document in a stream
+ and produce the corresponding Python object.
+ Resolve only basic YAML tags.
+ """
+ return load(stream, SafeLoader)
+
+def safe_load_all(stream):
+ """
+ Parse all YAML documents in a stream
+ and produce corresponding Python objects.
+ Resolve only basic YAML tags.
+ """
+ return load_all(stream, SafeLoader)
+
+def emit(events, stream=None, Dumper=Dumper,
+ canonical=None, indent=None, width=None,
+ allow_unicode=None, line_break=None):
+ """
+ Emit YAML parsing events into a stream.
+ If stream is None, return the produced string instead.
+ """
+ getvalue = None
+ if stream is None:
+ from StringIO import StringIO
+ stream = StringIO()
+ getvalue = stream.getvalue
+ dumper = Dumper(stream, canonical=canonical, indent=indent, width=width,
+ allow_unicode=allow_unicode, line_break=line_break)
+ try:
+ for event in events:
+ dumper.emit(event)
+ finally:
+ dumper.dispose()
+ if getvalue:
+ return getvalue()
+
+def serialize_all(nodes, stream=None, Dumper=Dumper,
+ canonical=None, indent=None, width=None,
+ allow_unicode=None, line_break=None,
+ encoding='utf-8', explicit_start=None, explicit_end=None,
+ version=None, tags=None):
+ """
+ Serialize a sequence of representation trees into a YAML stream.
+ If stream is None, return the produced string instead.
+ """
+ getvalue = None
+ if stream is None:
+ if encoding is None:
+ from StringIO import StringIO
+ else:
+ from cStringIO import StringIO
+ stream = StringIO()
+ getvalue = stream.getvalue
+ dumper = Dumper(stream, canonical=canonical, indent=indent, width=width,
+ allow_unicode=allow_unicode, line_break=line_break,
+ encoding=encoding, version=version, tags=tags,
+ explicit_start=explicit_start, explicit_end=explicit_end)
+ try:
+ dumper.open()
+ for node in nodes:
+ dumper.serialize(node)
+ dumper.close()
+ finally:
+ dumper.dispose()
+ if getvalue:
+ return getvalue()
+
+def serialize(node, stream=None, Dumper=Dumper, **kwds):
+ """
+ Serialize a representation tree into a YAML stream.
+ If stream is None, return the produced string instead.
+ """
+ return serialize_all([node], stream, Dumper=Dumper, **kwds)
+
+def dump_all(documents, stream=None, Dumper=Dumper,
+ default_style=None, default_flow_style=None,
+ canonical=None, indent=None, width=None,
+ allow_unicode=None, line_break=None,
+ encoding='utf-8', explicit_start=None, explicit_end=None,
+ version=None, tags=None):
+ """
+ Serialize a sequence of Python objects into a YAML stream.
+ If stream is None, return the produced string instead.
+ """
+ getvalue = None
+ if stream is None:
+ if encoding is None:
+ from StringIO import StringIO
+ else:
+ from cStringIO import StringIO
+ stream = StringIO()
+ getvalue = stream.getvalue
+ dumper = Dumper(stream, default_style=default_style,
+ default_flow_style=default_flow_style,
+ canonical=canonical, indent=indent, width=width,
+ allow_unicode=allow_unicode, line_break=line_break,
+ encoding=encoding, version=version, tags=tags,
+ explicit_start=explicit_start, explicit_end=explicit_end)
+ try:
+ dumper.open()
+ for data in documents:
+ dumper.represent(data)
+ dumper.close()
+ finally:
+ dumper.dispose()
+ if getvalue:
+ return getvalue()
+
+def dump(data, stream=None, Dumper=Dumper, **kwds):
+ """
+ Serialize a Python object into a YAML stream.
+ If stream is None, return the produced string instead.
+ """
+ return dump_all([data], stream, Dumper=Dumper, **kwds)
+
+def safe_dump_all(documents, stream=None, **kwds):
+ """
+ Serialize a sequence of Python objects into a YAML stream.
+ Produce only basic YAML tags.
+ If stream is None, return the produced string instead.
+ """
+ return dump_all(documents, stream, Dumper=SafeDumper, **kwds)
+
+def safe_dump(data, stream=None, **kwds):
+ """
+ Serialize a Python object into a YAML stream.
+ Produce only basic YAML tags.
+ If stream is None, return the produced string instead.
+ """
+ return dump_all([data], stream, Dumper=SafeDumper, **kwds)
+
+def add_implicit_resolver(tag, regexp, first=None,
+ Loader=Loader, Dumper=Dumper):
+ """
+ Add an implicit scalar detector.
+ If an implicit scalar value matches the given regexp,
+ the corresponding tag is assigned to the scalar.
+ first is a sequence of possible initial characters or None.
+ """
+ Loader.add_implicit_resolver(tag, regexp, first)
+ Dumper.add_implicit_resolver(tag, regexp, first)
+
+def add_path_resolver(tag, path, kind=None, Loader=Loader, Dumper=Dumper):
+ """
+ Add a path based resolver for the given tag.
+ A path is a list of keys that forms a path
+ to a node in the representation tree.
+ Keys can be string values, integers, or None.
+ """
+ Loader.add_path_resolver(tag, path, kind)
+ Dumper.add_path_resolver(tag, path, kind)
+
+def add_constructor(tag, constructor, Loader=Loader):
+ """
+ Add a constructor for the given tag.
+ Constructor is a function that accepts a Loader instance
+ and a node object and produces the corresponding Python object.
+ """
+ Loader.add_constructor(tag, constructor)
+
+def add_multi_constructor(tag_prefix, multi_constructor, Loader=Loader):
+ """
+ Add a multi-constructor for the given tag prefix.
+ Multi-constructor is called for a node if its tag starts with tag_prefix.
+ Multi-constructor accepts a Loader instance, a tag suffix,
+ and a node object and produces the corresponding Python object.
+ """
+ Loader.add_multi_constructor(tag_prefix, multi_constructor)
+
+def add_representer(data_type, representer, Dumper=Dumper):
+ """
+ Add a representer for the given type.
+ Representer is a function accepting a Dumper instance
+ and an instance of the given data type
+ and producing the corresponding representation node.
+ """
+ Dumper.add_representer(data_type, representer)
+
+def add_multi_representer(data_type, multi_representer, Dumper=Dumper):
+ """
+ Add a representer for the given type.
+ Multi-representer is a function accepting a Dumper instance
+ and an instance of the given data type or subtype
+ and producing the corresponding representation node.
+ """
+ Dumper.add_multi_representer(data_type, multi_representer)
+
+class YAMLObjectMetaclass(type):
+ """
+ The metaclass for YAMLObject.
+ """
+ def __init__(cls, name, bases, kwds):
+ super(YAMLObjectMetaclass, cls).__init__(name, bases, kwds)
+ if 'yaml_tag' in kwds and kwds['yaml_tag'] is not None:
+ cls.yaml_loader.add_constructor(cls.yaml_tag, cls.from_yaml)
+ cls.yaml_dumper.add_representer(cls, cls.to_yaml)
+
+class YAMLObject(object):
+ """
+ An object that can dump itself to a YAML stream
+ and load itself from a YAML stream.
+ """
+
+ __metaclass__ = YAMLObjectMetaclass
+ __slots__ = () # no direct instantiation, so allow immutable subclasses
+
+ yaml_loader = Loader
+ yaml_dumper = Dumper
+
+ yaml_tag = None
+ yaml_flow_style = None
+
+ def from_yaml(cls, loader, node):
+ """
+ Convert a representation node to a Python object.
+ """
+ return loader.construct_yaml_object(node, cls)
+ from_yaml = classmethod(from_yaml)
+
+ def to_yaml(cls, dumper, data):
+ """
+ Convert a Python object to a representation node.
+ """
+ return dumper.represent_yaml_object(cls.yaml_tag, data, cls,
+ flow_style=cls.yaml_flow_style)
+ to_yaml = classmethod(to_yaml)
+
diff --git a/scripts/python-lib/yaml/__init__.pyc b/scripts/python-lib/yaml/__init__.pyc
new file mode 100644
index 00000000..dcaf27c0
--- /dev/null
+++ b/scripts/python-lib/yaml/__init__.pyc
Binary files differ
diff --git a/scripts/python-lib/yaml/composer.py b/scripts/python-lib/yaml/composer.py
new file mode 100755
index 00000000..06e5ac78
--- /dev/null
+++ b/scripts/python-lib/yaml/composer.py
@@ -0,0 +1,139 @@
+
+__all__ = ['Composer', 'ComposerError']
+
+from error import MarkedYAMLError
+from events import *
+from nodes import *
+
+class ComposerError(MarkedYAMLError):
+ pass
+
+class Composer(object):
+
+ def __init__(self):
+ self.anchors = {}
+
+ def check_node(self):
+ # Drop the STREAM-START event.
+ if self.check_event(StreamStartEvent):
+ self.get_event()
+
+ # If there are more documents available?
+ return not self.check_event(StreamEndEvent)
+
+ def get_node(self):
+ # Get the root node of the next document.
+ if not self.check_event(StreamEndEvent):
+ return self.compose_document()
+
+ def get_single_node(self):
+ # Drop the STREAM-START event.
+ self.get_event()
+
+ # Compose a document if the stream is not empty.
+ document = None
+ if not self.check_event(StreamEndEvent):
+ document = self.compose_document()
+
+ # Ensure that the stream contains no more documents.
+ if not self.check_event(StreamEndEvent):
+ event = self.get_event()
+ raise ComposerError("expected a single document in the stream",
+ document.start_mark, "but found another document",
+ event.start_mark)
+
+ # Drop the STREAM-END event.
+ self.get_event()
+
+ return document
+
+ def compose_document(self):
+ # Drop the DOCUMENT-START event.
+ self.get_event()
+
+ # Compose the root node.
+ node = self.compose_node(None, None)
+
+ # Drop the DOCUMENT-END event.
+ self.get_event()
+
+ self.anchors = {}
+ return node
+
+ def compose_node(self, parent, index):
+ if self.check_event(AliasEvent):
+ event = self.get_event()
+ anchor = event.anchor
+ if anchor not in self.anchors:
+ raise ComposerError(None, None, "found undefined alias %r"
+ % anchor.encode('utf-8'), event.start_mark)
+ return self.anchors[anchor]
+ event = self.peek_event()
+ anchor = event.anchor
+ if anchor is not None:
+ if anchor in self.anchors:
+ raise ComposerError("found duplicate anchor %r; first occurence"
+ % anchor.encode('utf-8'), self.anchors[anchor].start_mark,
+ "second occurence", event.start_mark)
+ self.descend_resolver(parent, index)
+ if self.check_event(ScalarEvent):
+ node = self.compose_scalar_node(anchor)
+ elif self.check_event(SequenceStartEvent):
+ node = self.compose_sequence_node(anchor)
+ elif self.check_event(MappingStartEvent):
+ node = self.compose_mapping_node(anchor)
+ self.ascend_resolver()
+ return node
+
+ def compose_scalar_node(self, anchor):
+ event = self.get_event()
+ tag = event.tag
+ if tag is None or tag == u'!':
+ tag = self.resolve(ScalarNode, event.value, event.implicit)
+ node = ScalarNode(tag, event.value,
+ event.start_mark, event.end_mark, style=event.style)
+ if anchor is not None:
+ self.anchors[anchor] = node
+ return node
+
+ def compose_sequence_node(self, anchor):
+ start_event = self.get_event()
+ tag = start_event.tag
+ if tag is None or tag == u'!':
+ tag = self.resolve(SequenceNode, None, start_event.implicit)
+ node = SequenceNode(tag, [],
+ start_event.start_mark, None,
+ flow_style=start_event.flow_style)
+ if anchor is not None:
+ self.anchors[anchor] = node
+ index = 0
+ while not self.check_event(SequenceEndEvent):
+ node.value.append(self.compose_node(node, index))
+ index += 1
+ end_event = self.get_event()
+ node.end_mark = end_event.end_mark
+ return node
+
+ def compose_mapping_node(self, anchor):
+ start_event = self.get_event()
+ tag = start_event.tag
+ if tag is None or tag == u'!':
+ tag = self.resolve(MappingNode, None, start_event.implicit)
+ node = MappingNode(tag, [],
+ start_event.start_mark, None,
+ flow_style=start_event.flow_style)
+ if anchor is not None:
+ self.anchors[anchor] = node
+ while not self.check_event(MappingEndEvent):
+ #key_event = self.peek_event()
+ item_key = self.compose_node(node, None)
+ #if item_key in node.value:
+ # raise ComposerError("while composing a mapping", start_event.start_mark,
+ # "found duplicate key", key_event.start_mark)
+ item_value = self.compose_node(node, item_key)
+ #node.value[item_key] = item_value
+ node.value.append((item_key, item_value))
+ end_event = self.get_event()
+ node.end_mark = end_event.end_mark
+ return node
+
diff --git a/scripts/python-lib/yaml/composer.pyc b/scripts/python-lib/yaml/composer.pyc
new file mode 100644
index 00000000..7e5b71c1
--- /dev/null
+++ b/scripts/python-lib/yaml/composer.pyc
Binary files differ
diff --git a/scripts/python-lib/yaml/constructor.py b/scripts/python-lib/yaml/constructor.py
new file mode 100755
index 00000000..635faac3
--- /dev/null
+++ b/scripts/python-lib/yaml/constructor.py
@@ -0,0 +1,675 @@
+
+__all__ = ['BaseConstructor', 'SafeConstructor', 'Constructor',
+ 'ConstructorError']
+
+from error import *
+from nodes import *
+
+import datetime
+
+import binascii, re, sys, types
+
+class ConstructorError(MarkedYAMLError):
+ pass
+
+class BaseConstructor(object):
+
+ yaml_constructors = {}
+ yaml_multi_constructors = {}
+
+ def __init__(self):
+ self.constructed_objects = {}
+ self.recursive_objects = {}
+ self.state_generators = []
+ self.deep_construct = False
+
+ def check_data(self):
+ # If there are more documents available?
+ return self.check_node()
+
+ def get_data(self):
+ # Construct and return the next document.
+ if self.check_node():
+ return self.construct_document(self.get_node())
+
+ def get_single_data(self):
+ # Ensure that the stream contains a single document and construct it.
+ node = self.get_single_node()
+ if node is not None:
+ return self.construct_document(node)
+ return None
+
+ def construct_document(self, node):
+ data = self.construct_object(node)
+ while self.state_generators:
+ state_generators = self.state_generators
+ self.state_generators = []
+ for generator in state_generators:
+ for dummy in generator:
+ pass
+ self.constructed_objects = {}
+ self.recursive_objects = {}
+ self.deep_construct = False
+ return data
+
+ def construct_object(self, node, deep=False):
+ if node in self.constructed_objects:
+ return self.constructed_objects[node]
+ if deep:
+ old_deep = self.deep_construct
+ self.deep_construct = True
+ if node in self.recursive_objects:
+ raise ConstructorError(None, None,
+ "found unconstructable recursive node", node.start_mark)
+ self.recursive_objects[node] = None
+ constructor = None
+ tag_suffix = None
+ if node.tag in self.yaml_constructors:
+ constructor = self.yaml_constructors[node.tag]
+ else:
+ for tag_prefix in self.yaml_multi_constructors:
+ if node.tag.startswith(tag_prefix):
+ tag_suffix = node.tag[len(tag_prefix):]
+ constructor = self.yaml_multi_constructors[tag_prefix]
+ break
+ else:
+ if None in self.yaml_multi_constructors:
+ tag_suffix = node.tag
+ constructor = self.yaml_multi_constructors[None]
+ elif None in self.yaml_constructors:
+ constructor = self.yaml_constructors[None]
+ elif isinstance(node, ScalarNode):
+ constructor = self.__class__.construct_scalar
+ elif isinstance(node, SequenceNode):
+ constructor = self.__class__.construct_sequence
+ elif isinstance(node, MappingNode):
+ constructor = self.__class__.construct_mapping
+ if tag_suffix is None:
+ data = constructor(self, node)
+ else:
+ data = constructor(self, tag_suffix, node)
+ if isinstance(data, types.GeneratorType):
+ generator = data
+ data = generator.next()
+ if self.deep_construct:
+ for dummy in generator:
+ pass
+ else:
+ self.state_generators.append(generator)
+ self.constructed_objects[node] = data
+ del self.recursive_objects[node]
+ if deep:
+ self.deep_construct = old_deep
+ return data
+
+ def construct_scalar(self, node):
+ if not isinstance(node, ScalarNode):
+ raise ConstructorError(None, None,
+ "expected a scalar node, but found %s" % node.id,
+ node.start_mark)
+ return node.value
+
+ def construct_sequence(self, node, deep=False):
+ if not isinstance(node, SequenceNode):
+ raise ConstructorError(None, None,
+ "expected a sequence node, but found %s" % node.id,
+ node.start_mark)
+ return [self.construct_object(child, deep=deep)
+ for child in node.value]
+
+ def construct_mapping(self, node, deep=False):
+ if not isinstance(node, MappingNode):
+ raise ConstructorError(None, None,
+ "expected a mapping node, but found %s" % node.id,
+ node.start_mark)
+ mapping = {}
+ for key_node, value_node in node.value:
+ key = self.construct_object(key_node, deep=deep)
+ try:
+ hash(key)
+ except TypeError, exc:
+ raise ConstructorError("while constructing a mapping", node.start_mark,
+ "found unacceptable key (%s)" % exc, key_node.start_mark)
+ value = self.construct_object(value_node, deep=deep)
+ mapping[key] = value
+ return mapping
+
+ def construct_pairs(self, node, deep=False):
+ if not isinstance(node, MappingNode):
+ raise ConstructorError(None, None,
+ "expected a mapping node, but found %s" % node.id,
+ node.start_mark)
+ pairs = []
+ for key_node, value_node in node.value:
+ key = self.construct_object(key_node, deep=deep)
+ value = self.construct_object(value_node, deep=deep)
+ pairs.append((key, value))
+ return pairs
+
+ def add_constructor(cls, tag, constructor):
+ if not 'yaml_constructors' in cls.__dict__:
+ cls.yaml_constructors = cls.yaml_constructors.copy()
+ cls.yaml_constructors[tag] = constructor
+ add_constructor = classmethod(add_constructor)
+
+ def add_multi_constructor(cls, tag_prefix, multi_constructor):
+ if not 'yaml_multi_constructors' in cls.__dict__:
+ cls.yaml_multi_constructors = cls.yaml_multi_constructors.copy()
+ cls.yaml_multi_constructors[tag_prefix] = multi_constructor
+ add_multi_constructor = classmethod(add_multi_constructor)
+
+class SafeConstructor(BaseConstructor):
+
+ def construct_scalar(self, node):
+ if isinstance(node, MappingNode):
+ for key_node, value_node in node.value:
+ if key_node.tag == u'tag:yaml.org,2002:value':
+ return self.construct_scalar(value_node)
+ return BaseConstructor.construct_scalar(self, node)
+
+ def flatten_mapping(self, node):
+ merge = []
+ index = 0
+ while index < len(node.value):
+ key_node, value_node = node.value[index]
+ if key_node.tag == u'tag:yaml.org,2002:merge':
+ del node.value[index]
+ if isinstance(value_node, MappingNode):
+ self.flatten_mapping(value_node)
+ merge.extend(value_node.value)
+ elif isinstance(value_node, SequenceNode):
+ submerge = []
+ for subnode in value_node.value:
+ if not isinstance(subnode, MappingNode):
+ raise ConstructorError("while constructing a mapping",
+ node.start_mark,
+ "expected a mapping for merging, but found %s"
+ % subnode.id, subnode.start_mark)
+ self.flatten_mapping(subnode)
+ submerge.append(subnode.value)
+ submerge.reverse()
+ for value in submerge:
+ merge.extend(value)
+ else:
+ raise ConstructorError("while constructing a mapping", node.start_mark,
+ "expected a mapping or list of mappings for merging, but found %s"
+ % value_node.id, value_node.start_mark)
+ elif key_node.tag == u'tag:yaml.org,2002:value':
+ key_node.tag = u'tag:yaml.org,2002:str'
+ index += 1
+ else:
+ index += 1
+ if merge:
+ node.value = merge + node.value
+
+ def construct_mapping(self, node, deep=False):
+ if isinstance(node, MappingNode):
+ self.flatten_mapping(node)
+ return BaseConstructor.construct_mapping(self, node, deep=deep)
+
+ def construct_yaml_null(self, node):
+ self.construct_scalar(node)
+ return None
+
+ bool_values = {
+ u'yes': True,
+ u'no': False,
+ u'true': True,
+ u'false': False,
+ u'on': True,
+ u'off': False,
+ }
+
+ def construct_yaml_bool(self, node):
+ value = self.construct_scalar(node)
+ return self.bool_values[value.lower()]
+
+ def construct_yaml_int(self, node):
+ value = str(self.construct_scalar(node))
+ value = value.replace('_', '')
+ sign = +1
+ if value[0] == '-':
+ sign = -1
+ if value[0] in '+-':
+ value = value[1:]
+ if value == '0':
+ return 0
+ elif value.startswith('0b'):
+ return sign*int(value[2:], 2)
+ elif value.startswith('0x'):
+ return sign*int(value[2:], 16)
+ elif value[0] == '0':
+ return sign*int(value, 8)
+ elif ':' in value:
+ digits = [int(part) for part in value.split(':')]
+ digits.reverse()
+ base = 1
+ value = 0
+ for digit in digits:
+ value += digit*base
+ base *= 60
+ return sign*value
+ else:
+ return sign*int(value)
+
+ inf_value = 1e300
+ while inf_value != inf_value*inf_value:
+ inf_value *= inf_value
+ nan_value = -inf_value/inf_value # Trying to make a quiet NaN (like C99).
+
+ def construct_yaml_float(self, node):
+ value = str(self.construct_scalar(node))
+ value = value.replace('_', '').lower()
+ sign = +1
+ if value[0] == '-':
+ sign = -1
+ if value[0] in '+-':
+ value = value[1:]
+ if value == '.inf':
+ return sign*self.inf_value
+ elif value == '.nan':
+ return self.nan_value
+ elif ':' in value:
+ digits = [float(part) for part in value.split(':')]
+ digits.reverse()
+ base = 1
+ value = 0.0
+ for digit in digits:
+ value += digit*base
+ base *= 60
+ return sign*value
+ else:
+ return sign*float(value)
+
+ def construct_yaml_binary(self, node):
+ value = self.construct_scalar(node)
+ try:
+ return str(value).decode('base64')
+ except (binascii.Error, UnicodeEncodeError), exc:
+ raise ConstructorError(None, None,
+ "failed to decode base64 data: %s" % exc, node.start_mark)
+
+ timestamp_regexp = re.compile(
+ ur'''^(?P<year>[0-9][0-9][0-9][0-9])
+ -(?P<month>[0-9][0-9]?)
+ -(?P<day>[0-9][0-9]?)
+ (?:(?:[Tt]|[ \t]+)
+ (?P<hour>[0-9][0-9]?)
+ :(?P<minute>[0-9][0-9])
+ :(?P<second>[0-9][0-9])
+ (?:\.(?P<fraction>[0-9]*))?
+ (?:[ \t]*(?P<tz>Z|(?P<tz_sign>[-+])(?P<tz_hour>[0-9][0-9]?)
+ (?::(?P<tz_minute>[0-9][0-9]))?))?)?$''', re.X)
+
+ def construct_yaml_timestamp(self, node):
+ value = self.construct_scalar(node)
+ match = self.timestamp_regexp.match(node.value)
+ values = match.groupdict()
+ year = int(values['year'])
+ month = int(values['month'])
+ day = int(values['day'])
+ if not values['hour']:
+ return datetime.date(year, month, day)
+ hour = int(values['hour'])
+ minute = int(values['minute'])
+ second = int(values['second'])
+ fraction = 0
+ if values['fraction']:
+ fraction = values['fraction'][:6]
+ while len(fraction) < 6:
+ fraction += '0'
+ fraction = int(fraction)
+ delta = None
+ if values['tz_sign']:
+ tz_hour = int(values['tz_hour'])
+ tz_minute = int(values['tz_minute'] or 0)
+ delta = datetime.timedelta(hours=tz_hour, minutes=tz_minute)
+ if values['tz_sign'] == '-':
+ delta = -delta
+ data = datetime.datetime(year, month, day, hour, minute, second, fraction)
+ if delta:
+ data -= delta
+ return data
+
+ def construct_yaml_omap(self, node):
+ # Note: we do not check for duplicate keys, because it's too
+ # CPU-expensive.
+ omap = []
+ yield omap
+ if not isinstance(node, SequenceNode):
+ raise ConstructorError("while constructing an ordered map", node.start_mark,
+ "expected a sequence, but found %s" % node.id, node.start_mark)
+ for subnode in node.value:
+ if not isinstance(subnode, MappingNode):
+ raise ConstructorError("while constructing an ordered map", node.start_mark,
+ "expected a mapping of length 1, but found %s" % subnode.id,
+ subnode.start_mark)
+ if len(subnode.value) != 1:
+ raise ConstructorError("while constructing an ordered map", node.start_mark,
+ "expected a single mapping item, but found %d items" % len(subnode.value),
+ subnode.start_mark)
+ key_node, value_node = subnode.value[0]
+ key = self.construct_object(key_node)
+ value = self.construct_object(value_node)
+ omap.append((key, value))
+
+ def construct_yaml_pairs(self, node):
+ # Note: the same code as `construct_yaml_omap`.
+ pairs = []
+ yield pairs
+ if not isinstance(node, SequenceNode):
+ raise ConstructorError("while constructing pairs", node.start_mark,
+ "expected a sequence, but found %s" % node.id, node.start_mark)
+ for subnode in node.value:
+ if not isinstance(subnode, MappingNode):
+ raise ConstructorError("while constructing pairs", node.start_mark,
+ "expected a mapping of length 1, but found %s" % subnode.id,
+ subnode.start_mark)
+ if len(subnode.value) != 1:
+ raise ConstructorError("while constructing pairs", node.start_mark,
+ "expected a single mapping item, but found %d items" % len(subnode.value),
+ subnode.start_mark)
+ key_node, value_node = subnode.value[0]
+ key = self.construct_object(key_node)
+ value = self.construct_object(value_node)
+ pairs.append((key, value))
+
+ def construct_yaml_set(self, node):
+ data = set()
+ yield data
+ value = self.construct_mapping(node)
+ data.update(value)
+
+ def construct_yaml_str(self, node):
+ value = self.construct_scalar(node)
+ try:
+ return value.encode('ascii')
+ except UnicodeEncodeError:
+ return value
+
+ def construct_yaml_seq(self, node):
+ data = []
+ yield data
+ data.extend(self.construct_sequence(node))
+
+ def construct_yaml_map(self, node):
+ data = {}
+ yield data
+ value = self.construct_mapping(node)
+ data.update(value)
+
+ def construct_yaml_object(self, node, cls):
+ data = cls.__new__(cls)
+ yield data
+ if hasattr(data, '__setstate__'):
+ state = self.construct_mapping(node, deep=True)
+ data.__setstate__(state)
+ else:
+ state = self.construct_mapping(node)
+ data.__dict__.update(state)
+
+ def construct_undefined(self, node):
+ raise ConstructorError(None, None,
+ "could not determine a constructor for the tag %r" % node.tag.encode('utf-8'),
+ node.start_mark)
+
+SafeConstructor.add_constructor(
+ u'tag:yaml.org,2002:null',
+ SafeConstructor.construct_yaml_null)
+
+SafeConstructor.add_constructor(
+ u'tag:yaml.org,2002:bool',
+ SafeConstructor.construct_yaml_bool)
+
+SafeConstructor.add_constructor(
+ u'tag:yaml.org,2002:int',
+ SafeConstructor.construct_yaml_int)
+
+SafeConstructor.add_constructor(
+ u'tag:yaml.org,2002:float',
+ SafeConstructor.construct_yaml_float)
+
+SafeConstructor.add_constructor(
+ u'tag:yaml.org,2002:binary',
+ SafeConstructor.construct_yaml_binary)
+
+SafeConstructor.add_constructor(
+ u'tag:yaml.org,2002:timestamp',
+ SafeConstructor.construct_yaml_timestamp)
+
+SafeConstructor.add_constructor(
+ u'tag:yaml.org,2002:omap',
+ SafeConstructor.construct_yaml_omap)
+
+SafeConstructor.add_constructor(
+ u'tag:yaml.org,2002:pairs',
+ SafeConstructor.construct_yaml_pairs)
+
+SafeConstructor.add_constructor(
+ u'tag:yaml.org,2002:set',
+ SafeConstructor.construct_yaml_set)
+
+SafeConstructor.add_constructor(
+ u'tag:yaml.org,2002:str',
+ SafeConstructor.construct_yaml_str)
+
+SafeConstructor.add_constructor(
+ u'tag:yaml.org,2002:seq',
+ SafeConstructor.construct_yaml_seq)
+
+SafeConstructor.add_constructor(
+ u'tag:yaml.org,2002:map',
+ SafeConstructor.construct_yaml_map)
+
+SafeConstructor.add_constructor(None,
+ SafeConstructor.construct_undefined)
+
+class Constructor(SafeConstructor):
+
+ def construct_python_str(self, node):
+ return self.construct_scalar(node).encode('utf-8')
+
+ def construct_python_unicode(self, node):
+ return self.construct_scalar(node)
+
+ def construct_python_long(self, node):
+ return long(self.construct_yaml_int(node))
+
+ def construct_python_complex(self, node):
+ return complex(self.construct_scalar(node))
+
+ def construct_python_tuple(self, node):
+ return tuple(self.construct_sequence(node))
+
+ def find_python_module(self, name, mark):
+ if not name:
+ raise ConstructorError("while constructing a Python module", mark,
+ "expected non-empty name appended to the tag", mark)
+ try:
+ __import__(name)
+ except ImportError, exc:
+ raise ConstructorError("while constructing a Python module", mark,
+ "cannot find module %r (%s)" % (name.encode('utf-8'), exc), mark)
+ return sys.modules[name]
+
+ def find_python_name(self, name, mark):
+ if not name:
+ raise ConstructorError("while constructing a Python object", mark,
+ "expected non-empty name appended to the tag", mark)
+ if u'.' in name:
+ module_name, object_name = name.rsplit('.', 1)
+ else:
+ module_name = '__builtin__'
+ object_name = name
+ try:
+ __import__(module_name)
+ except ImportError, exc:
+ raise ConstructorError("while constructing a Python object", mark,
+ "cannot find module %r (%s)" % (module_name.encode('utf-8'), exc), mark)
+ module = sys.modules[module_name]
+ if not hasattr(module, object_name):
+ raise ConstructorError("while constructing a Python object", mark,
+ "cannot find %r in the module %r" % (object_name.encode('utf-8'),
+ module.__name__), mark)
+ return getattr(module, object_name)
+
+ def construct_python_name(self, suffix, node):
+ value = self.construct_scalar(node)
+ if value:
+ raise ConstructorError("while constructing a Python name", node.start_mark,
+ "expected the empty value, but found %r" % value.encode('utf-8'),
+ node.start_mark)
+ return self.find_python_name(suffix, node.start_mark)
+
+ def construct_python_module(self, suffix, node):
+ value = self.construct_scalar(node)
+ if value:
+ raise ConstructorError("while constructing a Python module", node.start_mark,
+ "expected the empty value, but found %r" % value.encode('utf-8'),
+ node.start_mark)
+ return self.find_python_module(suffix, node.start_mark)
+
+ class classobj: pass
+
+ def make_python_instance(self, suffix, node,
+ args=None, kwds=None, newobj=False):
+ if not args:
+ args = []
+ if not kwds:
+ kwds = {}
+ cls = self.find_python_name(suffix, node.start_mark)
+ if newobj and isinstance(cls, type(self.classobj)) \
+ and not args and not kwds:
+ instance = self.classobj()
+ instance.__class__ = cls
+ return instance
+ elif newobj and isinstance(cls, type):
+ return cls.__new__(cls, *args, **kwds)
+ else:
+ return cls(*args, **kwds)
+
+ def set_python_instance_state(self, instance, state):
+ if hasattr(instance, '__setstate__'):
+ instance.__setstate__(state)
+ else:
+ slotstate = {}
+ if isinstance(state, tuple) and len(state) == 2:
+ state, slotstate = state
+ if hasattr(instance, '__dict__'):
+ instance.__dict__.update(state)
+ elif state:
+ slotstate.update(state)
+ for key, value in slotstate.items():
+ setattr(object, key, value)
+
+ def construct_python_object(self, suffix, node):
+ # Format:
+ # !!python/object:module.name { ... state ... }
+ instance = self.make_python_instance(suffix, node, newobj=True)
+ yield instance
+ deep = hasattr(instance, '__setstate__')
+ state = self.construct_mapping(node, deep=deep)
+ self.set_python_instance_state(instance, state)
+
+ def construct_python_object_apply(self, suffix, node, newobj=False):
+ # Format:
+ # !!python/object/apply # (or !!python/object/new)
+ # args: [ ... arguments ... ]
+ # kwds: { ... keywords ... }
+ # state: ... state ...
+ # listitems: [ ... listitems ... ]
+ # dictitems: { ... dictitems ... }
+ # or short format:
+ # !!python/object/apply [ ... arguments ... ]
+ # The difference between !!python/object/apply and !!python/object/new
+ # is how an object is created, check make_python_instance for details.
+ if isinstance(node, SequenceNode):
+ args = self.construct_sequence(node, deep=True)
+ kwds = {}
+ state = {}
+ listitems = []
+ dictitems = {}
+ else:
+ value = self.construct_mapping(node, deep=True)
+ args = value.get('args', [])
+ kwds = value.get('kwds', {})
+ state = value.get('state', {})
+ listitems = value.get('listitems', [])
+ dictitems = value.get('dictitems', {})
+ instance = self.make_python_instance(suffix, node, args, kwds, newobj)
+ if state:
+ self.set_python_instance_state(instance, state)
+ if listitems:
+ instance.extend(listitems)
+ if dictitems:
+ for key in dictitems:
+ instance[key] = dictitems[key]
+ return instance
+
+ def construct_python_object_new(self, suffix, node):
+ return self.construct_python_object_apply(suffix, node, newobj=True)
+
+Constructor.add_constructor(
+ u'tag:yaml.org,2002:python/none',
+ Constructor.construct_yaml_null)
+
+Constructor.add_constructor(
+ u'tag:yaml.org,2002:python/bool',
+ Constructor.construct_yaml_bool)
+
+Constructor.add_constructor(
+ u'tag:yaml.org,2002:python/str',
+ Constructor.construct_python_str)
+
+Constructor.add_constructor(
+ u'tag:yaml.org,2002:python/unicode',
+ Constructor.construct_python_unicode)
+
+Constructor.add_constructor(
+ u'tag:yaml.org,2002:python/int',
+ Constructor.construct_yaml_int)
+
+Constructor.add_constructor(
+ u'tag:yaml.org,2002:python/long',
+ Constructor.construct_python_long)
+
+Constructor.add_constructor(
+ u'tag:yaml.org,2002:python/float',
+ Constructor.construct_yaml_float)
+
+Constructor.add_constructor(
+ u'tag:yaml.org,2002:python/complex',
+ Constructor.construct_python_complex)
+
+Constructor.add_constructor(
+ u'tag:yaml.org,2002:python/list',
+ Constructor.construct_yaml_seq)
+
+Constructor.add_constructor(
+ u'tag:yaml.org,2002:python/tuple',
+ Constructor.construct_python_tuple)
+
+Constructor.add_constructor(
+ u'tag:yaml.org,2002:python/dict',
+ Constructor.construct_yaml_map)
+
+Constructor.add_multi_constructor(
+ u'tag:yaml.org,2002:python/name:',
+ Constructor.construct_python_name)
+
+Constructor.add_multi_constructor(
+ u'tag:yaml.org,2002:python/module:',
+ Constructor.construct_python_module)
+
+Constructor.add_multi_constructor(
+ u'tag:yaml.org,2002:python/object:',
+ Constructor.construct_python_object)
+
+Constructor.add_multi_constructor(
+ u'tag:yaml.org,2002:python/object/apply:',
+ Constructor.construct_python_object_apply)
+
+Constructor.add_multi_constructor(
+ u'tag:yaml.org,2002:python/object/new:',
+ Constructor.construct_python_object_new)
+
diff --git a/scripts/python-lib/yaml/constructor.pyc b/scripts/python-lib/yaml/constructor.pyc
new file mode 100644
index 00000000..2c0383fa
--- /dev/null
+++ b/scripts/python-lib/yaml/constructor.pyc
Binary files differ
diff --git a/scripts/python-lib/yaml/cyaml.py b/scripts/python-lib/yaml/cyaml.py
new file mode 100755
index 00000000..68dcd751
--- /dev/null
+++ b/scripts/python-lib/yaml/cyaml.py
@@ -0,0 +1,85 @@
+
+__all__ = ['CBaseLoader', 'CSafeLoader', 'CLoader',
+ 'CBaseDumper', 'CSafeDumper', 'CDumper']
+
+from _yaml import CParser, CEmitter
+
+from constructor import *
+
+from serializer import *
+from representer import *
+
+from resolver import *
+
+class CBaseLoader(CParser, BaseConstructor, BaseResolver):
+
+ def __init__(self, stream):
+ CParser.__init__(self, stream)
+ BaseConstructor.__init__(self)
+ BaseResolver.__init__(self)
+
+class CSafeLoader(CParser, SafeConstructor, Resolver):
+
+ def __init__(self, stream):
+ CParser.__init__(self, stream)
+ SafeConstructor.__init__(self)
+ Resolver.__init__(self)
+
+class CLoader(CParser, Constructor, Resolver):
+
+ def __init__(self, stream):
+ CParser.__init__(self, stream)
+ Constructor.__init__(self)
+ Resolver.__init__(self)
+
+class CBaseDumper(CEmitter, BaseRepresenter, BaseResolver):
+
+ def __init__(self, stream,
+ default_style=None, default_flow_style=None,
+ canonical=None, indent=None, width=None,
+ allow_unicode=None, line_break=None,
+ encoding=None, explicit_start=None, explicit_end=None,
+ version=None, tags=None):
+ CEmitter.__init__(self, stream, canonical=canonical,
+ indent=indent, width=width, encoding=encoding,
+ allow_unicode=allow_unicode, line_break=line_break,
+ explicit_start=explicit_start, explicit_end=explicit_end,
+ version=version, tags=tags)
+ Representer.__init__(self, default_style=default_style,
+ default_flow_style=default_flow_style)
+ Resolver.__init__(self)
+
+class CSafeDumper(CEmitter, SafeRepresenter, Resolver):
+
+ def __init__(self, stream,
+ default_style=None, default_flow_style=None,
+ canonical=None, indent=None, width=None,
+ allow_unicode=None, line_break=None,
+ encoding=None, explicit_start=None, explicit_end=None,
+ version=None, tags=None):
+ CEmitter.__init__(self, stream, canonical=canonical,
+ indent=indent, width=width, encoding=encoding,
+ allow_unicode=allow_unicode, line_break=line_break,
+ explicit_start=explicit_start, explicit_end=explicit_end,
+ version=version, tags=tags)
+ SafeRepresenter.__init__(self, default_style=default_style,
+ default_flow_style=default_flow_style)
+ Resolver.__init__(self)
+
+class CDumper(CEmitter, Serializer, Representer, Resolver):
+
+ def __init__(self, stream,
+ default_style=None, default_flow_style=None,
+ canonical=None, indent=None, width=None,
+ allow_unicode=None, line_break=None,
+ encoding=None, explicit_start=None, explicit_end=None,
+ version=None, tags=None):
+ CEmitter.__init__(self, stream, canonical=canonical,
+ indent=indent, width=width, encoding=encoding,
+ allow_unicode=allow_unicode, line_break=line_break,
+ explicit_start=explicit_start, explicit_end=explicit_end,
+ version=version, tags=tags)
+ Representer.__init__(self, default_style=default_style,
+ default_flow_style=default_flow_style)
+ Resolver.__init__(self)
+
diff --git a/scripts/python-lib/yaml/cyaml.pyc b/scripts/python-lib/yaml/cyaml.pyc
new file mode 100644
index 00000000..feebb050
--- /dev/null
+++ b/scripts/python-lib/yaml/cyaml.pyc
Binary files differ
diff --git a/scripts/python-lib/yaml/dumper.py b/scripts/python-lib/yaml/dumper.py
new file mode 100755
index 00000000..f811d2c9
--- /dev/null
+++ b/scripts/python-lib/yaml/dumper.py
@@ -0,0 +1,62 @@
+
+__all__ = ['BaseDumper', 'SafeDumper', 'Dumper']
+
+from emitter import *
+from serializer import *
+from representer import *
+from resolver import *
+
+class BaseDumper(Emitter, Serializer, BaseRepresenter, BaseResolver):
+
+ def __init__(self, stream,
+ default_style=None, default_flow_style=None,
+ canonical=None, indent=None, width=None,
+ allow_unicode=None, line_break=None,
+ encoding=None, explicit_start=None, explicit_end=None,
+ version=None, tags=None):
+ Emitter.__init__(self, stream, canonical=canonical,
+ indent=indent, width=width,
+ allow_unicode=allow_unicode, line_break=line_break)
+ Serializer.__init__(self, encoding=encoding,
+ explicit_start=explicit_start, explicit_end=explicit_end,
+ version=version, tags=tags)
+ Representer.__init__(self, default_style=default_style,
+ default_flow_style=default_flow_style)
+ Resolver.__init__(self)
+
+class SafeDumper(Emitter, Serializer, SafeRepresenter, Resolver):
+
+ def __init__(self, stream,
+ default_style=None, default_flow_style=None,
+ canonical=None, indent=None, width=None,
+ allow_unicode=None, line_break=None,
+ encoding=None, explicit_start=None, explicit_end=None,
+ version=None, tags=None):
+ Emitter.__init__(self, stream, canonical=canonical,
+ indent=indent, width=width,
+ allow_unicode=allow_unicode, line_break=line_break)
+ Serializer.__init__(self, encoding=encoding,
+ explicit_start=explicit_start, explicit_end=explicit_end,
+ version=version, tags=tags)
+ SafeRepresenter.__init__(self, default_style=default_style,
+ default_flow_style=default_flow_style)
+ Resolver.__init__(self)
+
+class Dumper(Emitter, Serializer, Representer, Resolver):
+
+ def __init__(self, stream,
+ default_style=None, default_flow_style=None,
+ canonical=None, indent=None, width=None,
+ allow_unicode=None, line_break=None,
+ encoding=None, explicit_start=None, explicit_end=None,
+ version=None, tags=None):
+ Emitter.__init__(self, stream, canonical=canonical,
+ indent=indent, width=width,
+ allow_unicode=allow_unicode, line_break=line_break)
+ Serializer.__init__(self, encoding=encoding,
+ explicit_start=explicit_start, explicit_end=explicit_end,
+ version=version, tags=tags)
+ Representer.__init__(self, default_style=default_style,
+ default_flow_style=default_flow_style)
+ Resolver.__init__(self)
+
diff --git a/scripts/python-lib/yaml/dumper.pyc b/scripts/python-lib/yaml/dumper.pyc
new file mode 100644
index 00000000..dfe78317
--- /dev/null
+++ b/scripts/python-lib/yaml/dumper.pyc
Binary files differ
diff --git a/scripts/python-lib/yaml/emitter.py b/scripts/python-lib/yaml/emitter.py
new file mode 100755
index 00000000..e5bcdccc
--- /dev/null
+++ b/scripts/python-lib/yaml/emitter.py
@@ -0,0 +1,1140 @@
+
+# Emitter expects events obeying the following grammar:
+# stream ::= STREAM-START document* STREAM-END
+# document ::= DOCUMENT-START node DOCUMENT-END
+# node ::= SCALAR | sequence | mapping
+# sequence ::= SEQUENCE-START node* SEQUENCE-END
+# mapping ::= MAPPING-START (node node)* MAPPING-END
+
+__all__ = ['Emitter', 'EmitterError']
+
+from error import YAMLError
+from events import *
+
+class EmitterError(YAMLError):
+ pass
+
+class ScalarAnalysis(object):
+ def __init__(self, scalar, empty, multiline,
+ allow_flow_plain, allow_block_plain,
+ allow_single_quoted, allow_double_quoted,
+ allow_block):
+ self.scalar = scalar
+ self.empty = empty
+ self.multiline = multiline
+ self.allow_flow_plain = allow_flow_plain
+ self.allow_block_plain = allow_block_plain
+ self.allow_single_quoted = allow_single_quoted
+ self.allow_double_quoted = allow_double_quoted
+ self.allow_block = allow_block
+
+class Emitter(object):
+
+ DEFAULT_TAG_PREFIXES = {
+ u'!' : u'!',
+ u'tag:yaml.org,2002:' : u'!!',
+ }
+
+ def __init__(self, stream, canonical=None, indent=None, width=None,
+ allow_unicode=None, line_break=None):
+
+ # The stream should have the methods `write` and possibly `flush`.
+ self.stream = stream
+
+ # Encoding can be overriden by STREAM-START.
+ self.encoding = None
+
+ # Emitter is a state machine with a stack of states to handle nested
+ # structures.
+ self.states = []
+ self.state = self.expect_stream_start
+
+ # Current event and the event queue.
+ self.events = []
+ self.event = None
+
+ # The current indentation level and the stack of previous indents.
+ self.indents = []
+ self.indent = None
+
+ # Flow level.
+ self.flow_level = 0
+
+ # Contexts.
+ self.root_context = False
+ self.sequence_context = False
+ self.mapping_context = False
+ self.simple_key_context = False
+
+ # Characteristics of the last emitted character:
+ # - current position.
+ # - is it a whitespace?
+ # - is it an indention character
+ # (indentation space, '-', '?', or ':')?
+ self.line = 0
+ self.column = 0
+ self.whitespace = True
+ self.indention = True
+
+ # Whether the document requires an explicit document indicator
+ self.open_ended = False
+
+ # Formatting details.
+ self.canonical = canonical
+ self.allow_unicode = allow_unicode
+ self.best_indent = 2
+ if indent and 1 < indent < 10:
+ self.best_indent = indent
+ self.best_width = 80
+ if width and width > self.best_indent*2:
+ self.best_width = width
+ self.best_line_break = u'\n'
+ if line_break in [u'\r', u'\n', u'\r\n']:
+ self.best_line_break = line_break
+
+ # Tag prefixes.
+ self.tag_prefixes = None
+
+ # Prepared anchor and tag.
+ self.prepared_anchor = None
+ self.prepared_tag = None
+
+ # Scalar analysis and style.
+ self.analysis = None
+ self.style = None
+
+ def dispose(self):
+ # Reset the state attributes (to clear self-references)
+ self.states = []
+ self.state = None
+
+ def emit(self, event):
+ self.events.append(event)
+ while not self.need_more_events():
+ self.event = self.events.pop(0)
+ self.state()
+ self.event = None
+
+ # In some cases, we wait for a few next events before emitting.
+
+ def need_more_events(self):
+ if not self.events:
+ return True
+ event = self.events[0]
+ if isinstance(event, DocumentStartEvent):
+ return self.need_events(1)
+ elif isinstance(event, SequenceStartEvent):
+ return self.need_events(2)
+ elif isinstance(event, MappingStartEvent):
+ return self.need_events(3)
+ else:
+ return False
+
+ def need_events(self, count):
+ level = 0
+ for event in self.events[1:]:
+ if isinstance(event, (DocumentStartEvent, CollectionStartEvent)):
+ level += 1
+ elif isinstance(event, (DocumentEndEvent, CollectionEndEvent)):
+ level -= 1
+ elif isinstance(event, StreamEndEvent):
+ level = -1
+ if level < 0:
+ return False
+ return (len(self.events) < count+1)
+
+ def increase_indent(self, flow=False, indentless=False):
+ self.indents.append(self.indent)
+ if self.indent is None:
+ if flow:
+ self.indent = self.best_indent
+ else:
+ self.indent = 0
+ elif not indentless:
+ self.indent += self.best_indent
+
+ # States.
+
+ # Stream handlers.
+
+ def expect_stream_start(self):
+ if isinstance(self.event, StreamStartEvent):
+ if self.event.encoding and not getattr(self.stream, 'encoding', None):
+ self.encoding = self.event.encoding
+ self.write_stream_start()
+ self.state = self.expect_first_document_start
+ else:
+ raise EmitterError("expected StreamStartEvent, but got %s"
+ % self.event)
+
+ def expect_nothing(self):
+ raise EmitterError("expected nothing, but got %s" % self.event)
+
+ # Document handlers.
+
+ def expect_first_document_start(self):
+ return self.expect_document_start(first=True)
+
+ def expect_document_start(self, first=False):
+ if isinstance(self.event, DocumentStartEvent):
+ if (self.event.version or self.event.tags) and self.open_ended:
+ self.write_indicator(u'...', True)
+ self.write_indent()
+ if self.event.version:
+ version_text = self.prepare_version(self.event.version)
+ self.write_version_directive(version_text)
+ self.tag_prefixes = self.DEFAULT_TAG_PREFIXES.copy()
+ if self.event.tags:
+ handles = self.event.tags.keys()
+ handles.sort()
+ for handle in handles:
+ prefix = self.event.tags[handle]
+ self.tag_prefixes[prefix] = handle
+ handle_text = self.prepare_tag_handle(handle)
+ prefix_text = self.prepare_tag_prefix(prefix)
+ self.write_tag_directive(handle_text, prefix_text)
+ implicit = (first and not self.event.explicit and not self.canonical
+ and not self.event.version and not self.event.tags
+ and not self.check_empty_document())
+ if not implicit:
+ self.write_indent()
+ self.write_indicator(u'---', True)
+ if self.canonical:
+ self.write_indent()
+ self.state = self.expect_document_root
+ elif isinstance(self.event, StreamEndEvent):
+ if self.open_ended:
+ self.write_indicator(u'...', True)
+ self.write_indent()
+ self.write_stream_end()
+ self.state = self.expect_nothing
+ else:
+ raise EmitterError("expected DocumentStartEvent, but got %s"
+ % self.event)
+
+ def expect_document_end(self):
+ if isinstance(self.event, DocumentEndEvent):
+ self.write_indent()
+ if self.event.explicit:
+ self.write_indicator(u'...', True)
+ self.write_indent()
+ self.flush_stream()
+ self.state = self.expect_document_start
+ else:
+ raise EmitterError("expected DocumentEndEvent, but got %s"
+ % self.event)
+
+ def expect_document_root(self):
+ self.states.append(self.expect_document_end)
+ self.expect_node(root=True)
+
+ # Node handlers.
+
+ def expect_node(self, root=False, sequence=False, mapping=False,
+ simple_key=False):
+ self.root_context = root
+ self.sequence_context = sequence
+ self.mapping_context = mapping
+ self.simple_key_context = simple_key
+ if isinstance(self.event, AliasEvent):
+ self.expect_alias()
+ elif isinstance(self.event, (ScalarEvent, CollectionStartEvent)):
+ self.process_anchor(u'&')
+ self.process_tag()
+ if isinstance(self.event, ScalarEvent):
+ self.expect_scalar()
+ elif isinstance(self.event, SequenceStartEvent):
+ if self.flow_level or self.canonical or self.event.flow_style \
+ or self.check_empty_sequence():
+ self.expect_flow_sequence()
+ else:
+ self.expect_block_sequence()
+ elif isinstance(self.event, MappingStartEvent):
+ if self.flow_level or self.canonical or self.event.flow_style \
+ or self.check_empty_mapping():
+ self.expect_flow_mapping()
+ else:
+ self.expect_block_mapping()
+ else:
+ raise EmitterError("expected NodeEvent, but got %s" % self.event)
+
+ def expect_alias(self):
+ if self.event.anchor is None:
+ raise EmitterError("anchor is not specified for alias")
+ self.process_anchor(u'*')
+ self.state = self.states.pop()
+
+ def expect_scalar(self):
+ self.increase_indent(flow=True)
+ self.process_scalar()
+ self.indent = self.indents.pop()
+ self.state = self.states.pop()
+
+ # Flow sequence handlers.
+
+ def expect_flow_sequence(self):
+ self.write_indicator(u'[', True, whitespace=True)
+ self.flow_level += 1
+ self.increase_indent(flow=True)
+ self.state = self.expect_first_flow_sequence_item
+
+ def expect_first_flow_sequence_item(self):
+ if isinstance(self.event, SequenceEndEvent):
+ self.indent = self.indents.pop()
+ self.flow_level -= 1
+ self.write_indicator(u']', False)
+ self.state = self.states.pop()
+ else:
+ if self.canonical or self.column > self.best_width:
+ self.write_indent()
+ self.states.append(self.expect_flow_sequence_item)
+ self.expect_node(sequence=True)
+
+ def expect_flow_sequence_item(self):
+ if isinstance(self.event, SequenceEndEvent):
+ self.indent = self.indents.pop()
+ self.flow_level -= 1
+ if self.canonical:
+ self.write_indicator(u',', False)
+ self.write_indent()
+ self.write_indicator(u']', False)
+ self.state = self.states.pop()
+ else:
+ self.write_indicator(u',', False)
+ if self.canonical or self.column > self.best_width:
+ self.write_indent()
+ self.states.append(self.expect_flow_sequence_item)
+ self.expect_node(sequence=True)
+
+ # Flow mapping handlers.
+
+ def expect_flow_mapping(self):
+ self.write_indicator(u'{', True, whitespace=True)
+ self.flow_level += 1
+ self.increase_indent(flow=True)
+ self.state = self.expect_first_flow_mapping_key
+
+ def expect_first_flow_mapping_key(self):
+ if isinstance(self.event, MappingEndEvent):
+ self.indent = self.indents.pop()
+ self.flow_level -= 1
+ self.write_indicator(u'}', False)
+ self.state = self.states.pop()
+ else:
+ if self.canonical or self.column > self.best_width:
+ self.write_indent()
+ if not self.canonical and self.check_simple_key():
+ self.states.append(self.expect_flow_mapping_simple_value)
+ self.expect_node(mapping=True, simple_key=True)
+ else:
+ self.write_indicator(u'?', True)
+ self.states.append(self.expect_flow_mapping_value)
+ self.expect_node(mapping=True)
+
+ def expect_flow_mapping_key(self):
+ if isinstance(self.event, MappingEndEvent):
+ self.indent = self.indents.pop()
+ self.flow_level -= 1
+ if self.canonical:
+ self.write_indicator(u',', False)
+ self.write_indent()
+ self.write_indicator(u'}', False)
+ self.state = self.states.pop()
+ else:
+ self.write_indicator(u',', False)
+ if self.canonical or self.column > self.best_width:
+ self.write_indent()
+ if not self.canonical and self.check_simple_key():
+ self.states.append(self.expect_flow_mapping_simple_value)
+ self.expect_node(mapping=True, simple_key=True)
+ else:
+ self.write_indicator(u'?', True)
+ self.states.append(self.expect_flow_mapping_value)
+ self.expect_node(mapping=True)
+
+ def expect_flow_mapping_simple_value(self):
+ self.write_indicator(u':', False)
+ self.states.append(self.expect_flow_mapping_key)
+ self.expect_node(mapping=True)
+
+ def expect_flow_mapping_value(self):
+ if self.canonical or self.column > self.best_width:
+ self.write_indent()
+ self.write_indicator(u':', True)
+ self.states.append(self.expect_flow_mapping_key)
+ self.expect_node(mapping=True)
+
+ # Block sequence handlers.
+
+ def expect_block_sequence(self):
+ indentless = (self.mapping_context and not self.indention)
+ self.increase_indent(flow=False, indentless=indentless)
+ self.state = self.expect_first_block_sequence_item
+
+ def expect_first_block_sequence_item(self):
+ return self.expect_block_sequence_item(first=True)
+
+ def expect_block_sequence_item(self, first=False):
+ if not first and isinstance(self.event, SequenceEndEvent):
+ self.indent = self.indents.pop()
+ self.state = self.states.pop()
+ else:
+ self.write_indent()
+ self.write_indicator(u'-', True, indention=True)
+ self.states.append(self.expect_block_sequence_item)
+ self.expect_node(sequence=True)
+
+ # Block mapping handlers.
+
+ def expect_block_mapping(self):
+ self.increase_indent(flow=False)
+ self.state = self.expect_first_block_mapping_key
+
+ def expect_first_block_mapping_key(self):
+ return self.expect_block_mapping_key(first=True)
+
+ def expect_block_mapping_key(self, first=False):
+ if not first and isinstance(self.event, MappingEndEvent):
+ self.indent = self.indents.pop()
+ self.state = self.states.pop()
+ else:
+ self.write_indent()
+ if self.check_simple_key():
+ self.states.append(self.expect_block_mapping_simple_value)
+ self.expect_node(mapping=True, simple_key=True)
+ else:
+ self.write_indicator(u'?', True, indention=True)
+ self.states.append(self.expect_block_mapping_value)
+ self.expect_node(mapping=True)
+
+ def expect_block_mapping_simple_value(self):
+ self.write_indicator(u':', False)
+ self.states.append(self.expect_block_mapping_key)
+ self.expect_node(mapping=True)
+
+ def expect_block_mapping_value(self):
+ self.write_indent()
+ self.write_indicator(u':', True, indention=True)
+ self.states.append(self.expect_block_mapping_key)
+ self.expect_node(mapping=True)
+
+ # Checkers.
+
+ def check_empty_sequence(self):
+ return (isinstance(self.event, SequenceStartEvent) and self.events
+ and isinstance(self.events[0], SequenceEndEvent))
+
+ def check_empty_mapping(self):
+ return (isinstance(self.event, MappingStartEvent) and self.events
+ and isinstance(self.events[0], MappingEndEvent))
+
+ def check_empty_document(self):
+ if not isinstance(self.event, DocumentStartEvent) or not self.events:
+ return False
+ event = self.events[0]
+ return (isinstance(event, ScalarEvent) and event.anchor is None
+ and event.tag is None and event.implicit and event.value == u'')
+
+ def check_simple_key(self):
+ length = 0
+ if isinstance(self.event, NodeEvent) and self.event.anchor is not None:
+ if self.prepared_anchor is None:
+ self.prepared_anchor = self.prepare_anchor(self.event.anchor)
+ length += len(self.prepared_anchor)
+ if isinstance(self.event, (ScalarEvent, CollectionStartEvent)) \
+ and self.event.tag is not None:
+ if self.prepared_tag is None:
+ self.prepared_tag = self.prepare_tag(self.event.tag)
+ length += len(self.prepared_tag)
+ if isinstance(self.event, ScalarEvent):
+ if self.analysis is None:
+ self.analysis = self.analyze_scalar(self.event.value)
+ length += len(self.analysis.scalar)
+ return (length < 128 and (isinstance(self.event, AliasEvent)
+ or (isinstance(self.event, ScalarEvent)
+ and not self.analysis.empty and not self.analysis.multiline)
+ or self.check_empty_sequence() or self.check_empty_mapping()))
+
+ # Anchor, Tag, and Scalar processors.
+
+ def process_anchor(self, indicator):
+ if self.event.anchor is None:
+ self.prepared_anchor = None
+ return
+ if self.prepared_anchor is None:
+ self.prepared_anchor = self.prepare_anchor(self.event.anchor)
+ if self.prepared_anchor:
+ self.write_indicator(indicator+self.prepared_anchor, True)
+ self.prepared_anchor = None
+
+ def process_tag(self):
+ tag = self.event.tag
+ if isinstance(self.event, ScalarEvent):
+ if self.style is None:
+ self.style = self.choose_scalar_style()
+ if ((not self.canonical or tag is None) and
+ ((self.style == '' and self.event.implicit[0])
+ or (self.style != '' and self.event.implicit[1]))):
+ self.prepared_tag = None
+ return
+ if self.event.implicit[0] and tag is None:
+ tag = u'!'
+ self.prepared_tag = None
+ else:
+ if (not self.canonical or tag is None) and self.event.implicit:
+ self.prepared_tag = None
+ return
+ if tag is None:
+ raise EmitterError("tag is not specified")
+ if self.prepared_tag is None:
+ self.prepared_tag = self.prepare_tag(tag)
+ if self.prepared_tag:
+ self.write_indicator(self.prepared_tag, True)
+ self.prepared_tag = None
+
+ def choose_scalar_style(self):
+ if self.analysis is None:
+ self.analysis = self.analyze_scalar(self.event.value)
+ if self.event.style == '"' or self.canonical:
+ return '"'
+ if not self.event.style and self.event.implicit[0]:
+ if (not (self.simple_key_context and
+ (self.analysis.empty or self.analysis.multiline))
+ and (self.flow_level and self.analysis.allow_flow_plain
+ or (not self.flow_level and self.analysis.allow_block_plain))):
+ return ''
+ if self.event.style and self.event.style in '|>':
+ if (not self.flow_level and not self.simple_key_context
+ and self.analysis.allow_block):
+ return self.event.style
+ if not self.event.style or self.event.style == '\'':
+ if (self.analysis.allow_single_quoted and
+ not (self.simple_key_context and self.analysis.multiline)):
+ return '\''
+ return '"'
+
+ def process_scalar(self):
+ if self.analysis is None:
+ self.analysis = self.analyze_scalar(self.event.value)
+ if self.style is None:
+ self.style = self.choose_scalar_style()
+ split = (not self.simple_key_context)
+ #if self.analysis.multiline and split \
+ # and (not self.style or self.style in '\'\"'):
+ # self.write_indent()
+ if self.style == '"':
+ self.write_double_quoted(self.analysis.scalar, split)
+ elif self.style == '\'':
+ self.write_single_quoted(self.analysis.scalar, split)
+ elif self.style == '>':
+ self.write_folded(self.analysis.scalar)
+ elif self.style == '|':
+ self.write_literal(self.analysis.scalar)
+ else:
+ self.write_plain(self.analysis.scalar, split)
+ self.analysis = None
+ self.style = None
+
+ # Analyzers.
+
+ def prepare_version(self, version):
+ major, minor = version
+ if major != 1:
+ raise EmitterError("unsupported YAML version: %d.%d" % (major, minor))
+ return u'%d.%d' % (major, minor)
+
+ def prepare_tag_handle(self, handle):
+ if not handle:
+ raise EmitterError("tag handle must not be empty")
+ if handle[0] != u'!' or handle[-1] != u'!':
+ raise EmitterError("tag handle must start and end with '!': %r"
+ % (handle.encode('utf-8')))
+ for ch in handle[1:-1]:
+ if not (u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or u'a' <= ch <= u'z' \
+ or ch in u'-_'):
+ raise EmitterError("invalid character %r in the tag handle: %r"
+ % (ch.encode('utf-8'), handle.encode('utf-8')))
+ return handle
+
+ def prepare_tag_prefix(self, prefix):
+ if not prefix:
+ raise EmitterError("tag prefix must not be empty")
+ chunks = []
+ start = end = 0
+ if prefix[0] == u'!':
+ end = 1
+ while end < len(prefix):
+ ch = prefix[end]
+ if u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or u'a' <= ch <= u'z' \
+ or ch in u'-;/?!:@&=+$,_.~*\'()[]':
+ end += 1
+ else:
+ if start < end:
+ chunks.append(prefix[start:end])
+ start = end = end+1
+ data = ch.encode('utf-8')
+ for ch in data:
+ chunks.append(u'%%%02X' % ord(ch))
+ if start < end:
+ chunks.append(prefix[start:end])
+ return u''.join(chunks)
+
+ def prepare_tag(self, tag):
+ if not tag:
+ raise EmitterError("tag must not be empty")
+ if tag == u'!':
+ return tag
+ handle = None
+ suffix = tag
+ prefixes = self.tag_prefixes.keys()
+ prefixes.sort()
+ for prefix in prefixes:
+ if tag.startswith(prefix) \
+ and (prefix == u'!' or len(prefix) < len(tag)):
+ handle = self.tag_prefixes[prefix]
+ suffix = tag[len(prefix):]
+ chunks = []
+ start = end = 0
+ while end < len(suffix):
+ ch = suffix[end]
+ if u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or u'a' <= ch <= u'z' \
+ or ch in u'-;/?:@&=+$,_.~*\'()[]' \
+ or (ch == u'!' and handle != u'!'):
+ end += 1
+ else:
+ if start < end:
+ chunks.append(suffix[start:end])
+ start = end = end+1
+ data = ch.encode('utf-8')
+ for ch in data:
+ chunks.append(u'%%%02X' % ord(ch))
+ if start < end:
+ chunks.append(suffix[start:end])
+ suffix_text = u''.join(chunks)
+ if handle:
+ return u'%s%s' % (handle, suffix_text)
+ else:
+ return u'!<%s>' % suffix_text
+
+ def prepare_anchor(self, anchor):
+ if not anchor:
+ raise EmitterError("anchor must not be empty")
+ for ch in anchor:
+ if not (u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or u'a' <= ch <= u'z' \
+ or ch in u'-_'):
+ raise EmitterError("invalid character %r in the anchor: %r"
+ % (ch.encode('utf-8'), anchor.encode('utf-8')))
+ return anchor
+
+ def analyze_scalar(self, scalar):
+
+ # Empty scalar is a special case.
+ if not scalar:
+ return ScalarAnalysis(scalar=scalar, empty=True, multiline=False,
+ allow_flow_plain=False, allow_block_plain=True,
+ allow_single_quoted=True, allow_double_quoted=True,
+ allow_block=False)
+
+ # Indicators and special characters.
+ block_indicators = False
+ flow_indicators = False
+ line_breaks = False
+ special_characters = False
+
+ # Important whitespace combinations.
+ leading_space = False
+ leading_break = False
+ trailing_space = False
+ trailing_break = False
+ break_space = False
+ space_break = False
+
+ # Check document indicators.
+ if scalar.startswith(u'---') or scalar.startswith(u'...'):
+ block_indicators = True
+ flow_indicators = True
+
+ # First character or preceded by a whitespace.
+ preceeded_by_whitespace = True
+
+ # Last character or followed by a whitespace.
+ followed_by_whitespace = (len(scalar) == 1 or
+ scalar[1] in u'\0 \t\r\n\x85\u2028\u2029')
+
+ # The previous character is a space.
+ previous_space = False
+
+ # The previous character is a break.
+ previous_break = False
+
+ index = 0
+ while index < len(scalar):
+ ch = scalar[index]
+
+ # Check for indicators.
+ if index == 0:
+ # Leading indicators are special characters.
+ if ch in u'#,[]{}&*!|>\'\"%@`':
+ flow_indicators = True
+ block_indicators = True
+ if ch in u'?:':
+ flow_indicators = True
+ if followed_by_whitespace:
+ block_indicators = True
+ if ch == u'-' and followed_by_whitespace:
+ flow_indicators = True
+ block_indicators = True
+ else:
+ # Some indicators cannot appear within a scalar as well.
+ if ch in u',?[]{}':
+ flow_indicators = True
+ if ch == u':':
+ flow_indicators = True
+ if followed_by_whitespace:
+ block_indicators = True
+ if ch == u'#' and preceeded_by_whitespace:
+ flow_indicators = True
+ block_indicators = True
+
+ # Check for line breaks, special, and unicode characters.
+ if ch in u'\n\x85\u2028\u2029':
+ line_breaks = True
+ if not (ch == u'\n' or u'\x20' <= ch <= u'\x7E'):
+ if (ch == u'\x85' or u'\xA0' <= ch <= u'\uD7FF'
+ or u'\uE000' <= ch <= u'\uFFFD') and ch != u'\uFEFF':
+ unicode_characters = True
+ if not self.allow_unicode:
+ special_characters = True
+ else:
+ special_characters = True
+
+ # Detect important whitespace combinations.
+ if ch == u' ':
+ if index == 0:
+ leading_space = True
+ if index == len(scalar)-1:
+ trailing_space = True
+ if previous_break:
+ break_space = True
+ previous_space = True
+ previous_break = False
+ elif ch in u'\n\x85\u2028\u2029':
+ if index == 0:
+ leading_break = True
+ if index == len(scalar)-1:
+ trailing_break = True
+ if previous_space:
+ space_break = True
+ previous_space = False
+ previous_break = True
+ else:
+ previous_space = False
+ previous_break = False
+
+ # Prepare for the next character.
+ index += 1
+ preceeded_by_whitespace = (ch in u'\0 \t\r\n\x85\u2028\u2029')
+ followed_by_whitespace = (index+1 >= len(scalar) or
+ scalar[index+1] in u'\0 \t\r\n\x85\u2028\u2029')
+
+ # Let's decide what styles are allowed.
+ allow_flow_plain = True
+ allow_block_plain = True
+ allow_single_quoted = True
+ allow_double_quoted = True
+ allow_block = True
+
+ # Leading and trailing whitespaces are bad for plain scalars.
+ if (leading_space or leading_break
+ or trailing_space or trailing_break):
+ allow_flow_plain = allow_block_plain = False
+
+ # We do not permit trailing spaces for block scalars.
+ if trailing_space:
+ allow_block = False
+
+ # Spaces at the beginning of a new line are only acceptable for block
+ # scalars.
+ if break_space:
+ allow_flow_plain = allow_block_plain = allow_single_quoted = False
+
+ # Spaces followed by breaks, as well as special character are only
+ # allowed for double quoted scalars.
+ if space_break or special_characters:
+ allow_flow_plain = allow_block_plain = \
+ allow_single_quoted = allow_block = False
+
+ # Although the plain scalar writer supports breaks, we never emit
+ # multiline plain scalars.
+ if line_breaks:
+ allow_flow_plain = allow_block_plain = False
+
+ # Flow indicators are forbidden for flow plain scalars.
+ if flow_indicators:
+ allow_flow_plain = False
+
+ # Block indicators are forbidden for block plain scalars.
+ if block_indicators:
+ allow_block_plain = False
+
+ return ScalarAnalysis(scalar=scalar,
+ empty=False, multiline=line_breaks,
+ allow_flow_plain=allow_flow_plain,
+ allow_block_plain=allow_block_plain,
+ allow_single_quoted=allow_single_quoted,
+ allow_double_quoted=allow_double_quoted,
+ allow_block=allow_block)
+
+ # Writers.
+
+ def flush_stream(self):
+ if hasattr(self.stream, 'flush'):
+ self.stream.flush()
+
+ def write_stream_start(self):
+ # Write BOM if needed.
+ if self.encoding and self.encoding.startswith('utf-16'):
+ self.stream.write(u'\uFEFF'.encode(self.encoding))
+
+ def write_stream_end(self):
+ self.flush_stream()
+
+ def write_indicator(self, indicator, need_whitespace,
+ whitespace=False, indention=False):
+ if self.whitespace or not need_whitespace:
+ data = indicator
+ else:
+ data = u' '+indicator
+ self.whitespace = whitespace
+ self.indention = self.indention and indention
+ self.column += len(data)
+ self.open_ended = False
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+
+ def write_indent(self):
+ indent = self.indent or 0
+ if not self.indention or self.column > indent \
+ or (self.column == indent and not self.whitespace):
+ self.write_line_break()
+ if self.column < indent:
+ self.whitespace = True
+ data = u' '*(indent-self.column)
+ self.column = indent
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+
+ def write_line_break(self, data=None):
+ if data is None:
+ data = self.best_line_break
+ self.whitespace = True
+ self.indention = True
+ self.line += 1
+ self.column = 0
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+
+ def write_version_directive(self, version_text):
+ data = u'%%YAML %s' % version_text
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ self.write_line_break()
+
+ def write_tag_directive(self, handle_text, prefix_text):
+ data = u'%%TAG %s %s' % (handle_text, prefix_text)
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ self.write_line_break()
+
+ # Scalar streams.
+
+ def write_single_quoted(self, text, split=True):
+ self.write_indicator(u'\'', True)
+ spaces = False
+ breaks = False
+ start = end = 0
+ while end <= len(text):
+ ch = None
+ if end < len(text):
+ ch = text[end]
+ if spaces:
+ if ch is None or ch != u' ':
+ if start+1 == end and self.column > self.best_width and split \
+ and start != 0 and end != len(text):
+ self.write_indent()
+ else:
+ data = text[start:end]
+ self.column += len(data)
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ start = end
+ elif breaks:
+ if ch is None or ch not in u'\n\x85\u2028\u2029':
+ if text[start] == u'\n':
+ self.write_line_break()
+ for br in text[start:end]:
+ if br == u'\n':
+ self.write_line_break()
+ else:
+ self.write_line_break(br)
+ self.write_indent()
+ start = end
+ else:
+ if ch is None or ch in u' \n\x85\u2028\u2029' or ch == u'\'':
+ if start < end:
+ data = text[start:end]
+ self.column += len(data)
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ start = end
+ if ch == u'\'':
+ data = u'\'\''
+ self.column += 2
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ start = end + 1
+ if ch is not None:
+ spaces = (ch == u' ')
+ breaks = (ch in u'\n\x85\u2028\u2029')
+ end += 1
+ self.write_indicator(u'\'', False)
+
+ ESCAPE_REPLACEMENTS = {
+ u'\0': u'0',
+ u'\x07': u'a',
+ u'\x08': u'b',
+ u'\x09': u't',
+ u'\x0A': u'n',
+ u'\x0B': u'v',
+ u'\x0C': u'f',
+ u'\x0D': u'r',
+ u'\x1B': u'e',
+ u'\"': u'\"',
+ u'\\': u'\\',
+ u'\x85': u'N',
+ u'\xA0': u'_',
+ u'\u2028': u'L',
+ u'\u2029': u'P',
+ }
+
+ def write_double_quoted(self, text, split=True):
+ self.write_indicator(u'"', True)
+ start = end = 0
+ while end <= len(text):
+ ch = None
+ if end < len(text):
+ ch = text[end]
+ if ch is None or ch in u'"\\\x85\u2028\u2029\uFEFF' \
+ or not (u'\x20' <= ch <= u'\x7E'
+ or (self.allow_unicode
+ and (u'\xA0' <= ch <= u'\uD7FF'
+ or u'\uE000' <= ch <= u'\uFFFD'))):
+ if start < end:
+ data = text[start:end]
+ self.column += len(data)
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ start = end
+ if ch is not None:
+ if ch in self.ESCAPE_REPLACEMENTS:
+ data = u'\\'+self.ESCAPE_REPLACEMENTS[ch]
+ elif ch <= u'\xFF':
+ data = u'\\x%02X' % ord(ch)
+ elif ch <= u'\uFFFF':
+ data = u'\\u%04X' % ord(ch)
+ else:
+ data = u'\\U%08X' % ord(ch)
+ self.column += len(data)
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ start = end+1
+ if 0 < end < len(text)-1 and (ch == u' ' or start >= end) \
+ and self.column+(end-start) > self.best_width and split:
+ data = text[start:end]+u'\\'
+ if start < end:
+ start = end
+ self.column += len(data)
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ self.write_indent()
+ self.whitespace = False
+ self.indention = False
+ if text[start] == u' ':
+ data = u'\\'
+ self.column += len(data)
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ end += 1
+ self.write_indicator(u'"', False)
+
+ def determine_block_hints(self, text):
+ hints = u''
+ if text:
+ if text[0] in u' \n\x85\u2028\u2029':
+ hints += unicode(self.best_indent)
+ if text[-1] not in u'\n\x85\u2028\u2029':
+ hints += u'-'
+ elif len(text) == 1 or text[-2] in u'\n\x85\u2028\u2029':
+ hints += u'+'
+ return hints
+
+ def write_folded(self, text):
+ hints = self.determine_block_hints(text)
+ self.write_indicator(u'>'+hints, True)
+ if hints[-1:] == u'+':
+ self.open_ended = True
+ self.write_line_break()
+ leading_space = True
+ spaces = False
+ breaks = True
+ start = end = 0
+ while end <= len(text):
+ ch = None
+ if end < len(text):
+ ch = text[end]
+ if breaks:
+ if ch is None or ch not in u'\n\x85\u2028\u2029':
+ if not leading_space and ch is not None and ch != u' ' \
+ and text[start] == u'\n':
+ self.write_line_break()
+ leading_space = (ch == u' ')
+ for br in text[start:end]:
+ if br == u'\n':
+ self.write_line_break()
+ else:
+ self.write_line_break(br)
+ if ch is not None:
+ self.write_indent()
+ start = end
+ elif spaces:
+ if ch != u' ':
+ if start+1 == end and self.column > self.best_width:
+ self.write_indent()
+ else:
+ data = text[start:end]
+ self.column += len(data)
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ start = end
+ else:
+ if ch is None or ch in u' \n\x85\u2028\u2029':
+ data = text[start:end]
+ self.column += len(data)
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ if ch is None:
+ self.write_line_break()
+ start = end
+ if ch is not None:
+ breaks = (ch in u'\n\x85\u2028\u2029')
+ spaces = (ch == u' ')
+ end += 1
+
+ def write_literal(self, text):
+ hints = self.determine_block_hints(text)
+ self.write_indicator(u'|'+hints, True)
+ if hints[-1:] == u'+':
+ self.open_ended = True
+ self.write_line_break()
+ breaks = True
+ start = end = 0
+ while end <= len(text):
+ ch = None
+ if end < len(text):
+ ch = text[end]
+ if breaks:
+ if ch is None or ch not in u'\n\x85\u2028\u2029':
+ for br in text[start:end]:
+ if br == u'\n':
+ self.write_line_break()
+ else:
+ self.write_line_break(br)
+ if ch is not None:
+ self.write_indent()
+ start = end
+ else:
+ if ch is None or ch in u'\n\x85\u2028\u2029':
+ data = text[start:end]
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ if ch is None:
+ self.write_line_break()
+ start = end
+ if ch is not None:
+ breaks = (ch in u'\n\x85\u2028\u2029')
+ end += 1
+
+ def write_plain(self, text, split=True):
+ if self.root_context:
+ self.open_ended = True
+ if not text:
+ return
+ if not self.whitespace:
+ data = u' '
+ self.column += len(data)
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ self.whitespace = False
+ self.indention = False
+ spaces = False
+ breaks = False
+ start = end = 0
+ while end <= len(text):
+ ch = None
+ if end < len(text):
+ ch = text[end]
+ if spaces:
+ if ch != u' ':
+ if start+1 == end and self.column > self.best_width and split:
+ self.write_indent()
+ self.whitespace = False
+ self.indention = False
+ else:
+ data = text[start:end]
+ self.column += len(data)
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ start = end
+ elif breaks:
+ if ch not in u'\n\x85\u2028\u2029':
+ if text[start] == u'\n':
+ self.write_line_break()
+ for br in text[start:end]:
+ if br == u'\n':
+ self.write_line_break()
+ else:
+ self.write_line_break(br)
+ self.write_indent()
+ self.whitespace = False
+ self.indention = False
+ start = end
+ else:
+ if ch is None or ch in u' \n\x85\u2028\u2029':
+ data = text[start:end]
+ self.column += len(data)
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ start = end
+ if ch is not None:
+ spaces = (ch == u' ')
+ breaks = (ch in u'\n\x85\u2028\u2029')
+ end += 1
+
diff --git a/scripts/python-lib/yaml/emitter.pyc b/scripts/python-lib/yaml/emitter.pyc
new file mode 100644
index 00000000..5123fcd2
--- /dev/null
+++ b/scripts/python-lib/yaml/emitter.pyc
Binary files differ
diff --git a/scripts/python-lib/yaml/error.py b/scripts/python-lib/yaml/error.py
new file mode 100755
index 00000000..577686db
--- /dev/null
+++ b/scripts/python-lib/yaml/error.py
@@ -0,0 +1,75 @@
+
+__all__ = ['Mark', 'YAMLError', 'MarkedYAMLError']
+
+class Mark(object):
+
+ def __init__(self, name, index, line, column, buffer, pointer):
+ self.name = name
+ self.index = index
+ self.line = line
+ self.column = column
+ self.buffer = buffer
+ self.pointer = pointer
+
+ def get_snippet(self, indent=4, max_length=75):
+ if self.buffer is None:
+ return None
+ head = ''
+ start = self.pointer
+ while start > 0 and self.buffer[start-1] not in u'\0\r\n\x85\u2028\u2029':
+ start -= 1
+ if self.pointer-start > max_length/2-1:
+ head = ' ... '
+ start += 5
+ break
+ tail = ''
+ end = self.pointer
+ while end < len(self.buffer) and self.buffer[end] not in u'\0\r\n\x85\u2028\u2029':
+ end += 1
+ if end-self.pointer > max_length/2-1:
+ tail = ' ... '
+ end -= 5
+ break
+ snippet = self.buffer[start:end].encode('utf-8')
+ return ' '*indent + head + snippet + tail + '\n' \
+ + ' '*(indent+self.pointer-start+len(head)) + '^'
+
+ def __str__(self):
+ snippet = self.get_snippet()
+ where = " in \"%s\", line %d, column %d" \
+ % (self.name, self.line+1, self.column+1)
+ if snippet is not None:
+ where += ":\n"+snippet
+ return where
+
+class YAMLError(Exception):
+ pass
+
+class MarkedYAMLError(YAMLError):
+
+ def __init__(self, context=None, context_mark=None,
+ problem=None, problem_mark=None, note=None):
+ self.context = context
+ self.context_mark = context_mark
+ self.problem = problem
+ self.problem_mark = problem_mark
+ self.note = note
+
+ def __str__(self):
+ lines = []
+ if self.context is not None:
+ lines.append(self.context)
+ if self.context_mark is not None \
+ and (self.problem is None or self.problem_mark is None
+ or self.context_mark.name != self.problem_mark.name
+ or self.context_mark.line != self.problem_mark.line
+ or self.context_mark.column != self.problem_mark.column):
+ lines.append(str(self.context_mark))
+ if self.problem is not None:
+ lines.append(self.problem)
+ if self.problem_mark is not None:
+ lines.append(str(self.problem_mark))
+ if self.note is not None:
+ lines.append(self.note)
+ return '\n'.join(lines)
+
diff --git a/scripts/python-lib/yaml/error.pyc b/scripts/python-lib/yaml/error.pyc
new file mode 100644
index 00000000..a1fcd243
--- /dev/null
+++ b/scripts/python-lib/yaml/error.pyc
Binary files differ
diff --git a/scripts/python-lib/yaml/events.py b/scripts/python-lib/yaml/events.py
new file mode 100755
index 00000000..f79ad389
--- /dev/null
+++ b/scripts/python-lib/yaml/events.py
@@ -0,0 +1,86 @@
+
+# Abstract classes.
+
+class Event(object):
+ def __init__(self, start_mark=None, end_mark=None):
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+ def __repr__(self):
+ attributes = [key for key in ['anchor', 'tag', 'implicit', 'value']
+ if hasattr(self, key)]
+ arguments = ', '.join(['%s=%r' % (key, getattr(self, key))
+ for key in attributes])
+ return '%s(%s)' % (self.__class__.__name__, arguments)
+
+class NodeEvent(Event):
+ def __init__(self, anchor, start_mark=None, end_mark=None):
+ self.anchor = anchor
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+
+class CollectionStartEvent(NodeEvent):
+ def __init__(self, anchor, tag, implicit, start_mark=None, end_mark=None,
+ flow_style=None):
+ self.anchor = anchor
+ self.tag = tag
+ self.implicit = implicit
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+ self.flow_style = flow_style
+
+class CollectionEndEvent(Event):
+ pass
+
+# Implementations.
+
+class StreamStartEvent(Event):
+ def __init__(self, start_mark=None, end_mark=None, encoding=None):
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+ self.encoding = encoding
+
+class StreamEndEvent(Event):
+ pass
+
+class DocumentStartEvent(Event):
+ def __init__(self, start_mark=None, end_mark=None,
+ explicit=None, version=None, tags=None):
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+ self.explicit = explicit
+ self.version = version
+ self.tags = tags
+
+class DocumentEndEvent(Event):
+ def __init__(self, start_mark=None, end_mark=None,
+ explicit=None):
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+ self.explicit = explicit
+
+class AliasEvent(NodeEvent):
+ pass
+
+class ScalarEvent(NodeEvent):
+ def __init__(self, anchor, tag, implicit, value,
+ start_mark=None, end_mark=None, style=None):
+ self.anchor = anchor
+ self.tag = tag
+ self.implicit = implicit
+ self.value = value
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+ self.style = style
+
+class SequenceStartEvent(CollectionStartEvent):
+ pass
+
+class SequenceEndEvent(CollectionEndEvent):
+ pass
+
+class MappingStartEvent(CollectionStartEvent):
+ pass
+
+class MappingEndEvent(CollectionEndEvent):
+ pass
+
diff --git a/scripts/python-lib/yaml/events.pyc b/scripts/python-lib/yaml/events.pyc
new file mode 100644
index 00000000..0b022f68
--- /dev/null
+++ b/scripts/python-lib/yaml/events.pyc
Binary files differ
diff --git a/scripts/python-lib/yaml/loader.py b/scripts/python-lib/yaml/loader.py
new file mode 100755
index 00000000..293ff467
--- /dev/null
+++ b/scripts/python-lib/yaml/loader.py
@@ -0,0 +1,40 @@
+
+__all__ = ['BaseLoader', 'SafeLoader', 'Loader']
+
+from reader import *
+from scanner import *
+from parser import *
+from composer import *
+from constructor import *
+from resolver import *
+
+class BaseLoader(Reader, Scanner, Parser, Composer, BaseConstructor, BaseResolver):
+
+ def __init__(self, stream):
+ Reader.__init__(self, stream)
+ Scanner.__init__(self)
+ Parser.__init__(self)
+ Composer.__init__(self)
+ BaseConstructor.__init__(self)
+ BaseResolver.__init__(self)
+
+class SafeLoader(Reader, Scanner, Parser, Composer, SafeConstructor, Resolver):
+
+ def __init__(self, stream):
+ Reader.__init__(self, stream)
+ Scanner.__init__(self)
+ Parser.__init__(self)
+ Composer.__init__(self)
+ SafeConstructor.__init__(self)
+ Resolver.__init__(self)
+
+class Loader(Reader, Scanner, Parser, Composer, Constructor, Resolver):
+
+ def __init__(self, stream):
+ Reader.__init__(self, stream)
+ Scanner.__init__(self)
+ Parser.__init__(self)
+ Composer.__init__(self)
+ Constructor.__init__(self)
+ Resolver.__init__(self)
+
diff --git a/scripts/python-lib/yaml/loader.pyc b/scripts/python-lib/yaml/loader.pyc
new file mode 100644
index 00000000..17e3e00e
--- /dev/null
+++ b/scripts/python-lib/yaml/loader.pyc
Binary files differ
diff --git a/scripts/python-lib/yaml/nodes.py b/scripts/python-lib/yaml/nodes.py
new file mode 100755
index 00000000..c4f070c4
--- /dev/null
+++ b/scripts/python-lib/yaml/nodes.py
@@ -0,0 +1,49 @@
+
+class Node(object):
+ def __init__(self, tag, value, start_mark, end_mark):
+ self.tag = tag
+ self.value = value
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+ def __repr__(self):
+ value = self.value
+ #if isinstance(value, list):
+ # if len(value) == 0:
+ # value = '<empty>'
+ # elif len(value) == 1:
+ # value = '<1 item>'
+ # else:
+ # value = '<%d items>' % len(value)
+ #else:
+ # if len(value) > 75:
+ # value = repr(value[:70]+u' ... ')
+ # else:
+ # value = repr(value)
+ value = repr(value)
+ return '%s(tag=%r, value=%s)' % (self.__class__.__name__, self.tag, value)
+
+class ScalarNode(Node):
+ id = 'scalar'
+ def __init__(self, tag, value,
+ start_mark=None, end_mark=None, style=None):
+ self.tag = tag
+ self.value = value
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+ self.style = style
+
+class CollectionNode(Node):
+ def __init__(self, tag, value,
+ start_mark=None, end_mark=None, flow_style=None):
+ self.tag = tag
+ self.value = value
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+ self.flow_style = flow_style
+
+class SequenceNode(CollectionNode):
+ id = 'sequence'
+
+class MappingNode(CollectionNode):
+ id = 'mapping'
+
diff --git a/scripts/python-lib/yaml/nodes.pyc b/scripts/python-lib/yaml/nodes.pyc
new file mode 100644
index 00000000..58922515
--- /dev/null
+++ b/scripts/python-lib/yaml/nodes.pyc
Binary files differ
diff --git a/scripts/python-lib/yaml/parser.py b/scripts/python-lib/yaml/parser.py
new file mode 100755
index 00000000..f9e3057f
--- /dev/null
+++ b/scripts/python-lib/yaml/parser.py
@@ -0,0 +1,589 @@
+
+# The following YAML grammar is LL(1) and is parsed by a recursive descent
+# parser.
+#
+# stream ::= STREAM-START implicit_document? explicit_document* STREAM-END
+# implicit_document ::= block_node DOCUMENT-END*
+# explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END*
+# block_node_or_indentless_sequence ::=
+# ALIAS
+# | properties (block_content | indentless_block_sequence)?
+# | block_content
+# | indentless_block_sequence
+# block_node ::= ALIAS
+# | properties block_content?
+# | block_content
+# flow_node ::= ALIAS
+# | properties flow_content?
+# | flow_content
+# properties ::= TAG ANCHOR? | ANCHOR TAG?
+# block_content ::= block_collection | flow_collection | SCALAR
+# flow_content ::= flow_collection | SCALAR
+# block_collection ::= block_sequence | block_mapping
+# flow_collection ::= flow_sequence | flow_mapping
+# block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END
+# indentless_sequence ::= (BLOCK-ENTRY block_node?)+
+# block_mapping ::= BLOCK-MAPPING_START
+# ((KEY block_node_or_indentless_sequence?)?
+# (VALUE block_node_or_indentless_sequence?)?)*
+# BLOCK-END
+# flow_sequence ::= FLOW-SEQUENCE-START
+# (flow_sequence_entry FLOW-ENTRY)*
+# flow_sequence_entry?
+# FLOW-SEQUENCE-END
+# flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+# flow_mapping ::= FLOW-MAPPING-START
+# (flow_mapping_entry FLOW-ENTRY)*
+# flow_mapping_entry?
+# FLOW-MAPPING-END
+# flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+#
+# FIRST sets:
+#
+# stream: { STREAM-START }
+# explicit_document: { DIRECTIVE DOCUMENT-START }
+# implicit_document: FIRST(block_node)
+# block_node: { ALIAS TAG ANCHOR SCALAR BLOCK-SEQUENCE-START BLOCK-MAPPING-START FLOW-SEQUENCE-START FLOW-MAPPING-START }
+# flow_node: { ALIAS ANCHOR TAG SCALAR FLOW-SEQUENCE-START FLOW-MAPPING-START }
+# block_content: { BLOCK-SEQUENCE-START BLOCK-MAPPING-START FLOW-SEQUENCE-START FLOW-MAPPING-START SCALAR }
+# flow_content: { FLOW-SEQUENCE-START FLOW-MAPPING-START SCALAR }
+# block_collection: { BLOCK-SEQUENCE-START BLOCK-MAPPING-START }
+# flow_collection: { FLOW-SEQUENCE-START FLOW-MAPPING-START }
+# block_sequence: { BLOCK-SEQUENCE-START }
+# block_mapping: { BLOCK-MAPPING-START }
+# block_node_or_indentless_sequence: { ALIAS ANCHOR TAG SCALAR BLOCK-SEQUENCE-START BLOCK-MAPPING-START FLOW-SEQUENCE-START FLOW-MAPPING-START BLOCK-ENTRY }
+# indentless_sequence: { ENTRY }
+# flow_collection: { FLOW-SEQUENCE-START FLOW-MAPPING-START }
+# flow_sequence: { FLOW-SEQUENCE-START }
+# flow_mapping: { FLOW-MAPPING-START }
+# flow_sequence_entry: { ALIAS ANCHOR TAG SCALAR FLOW-SEQUENCE-START FLOW-MAPPING-START KEY }
+# flow_mapping_entry: { ALIAS ANCHOR TAG SCALAR FLOW-SEQUENCE-START FLOW-MAPPING-START KEY }
+
+__all__ = ['Parser', 'ParserError']
+
+from error import MarkedYAMLError
+from tokens import *
+from events import *
+from scanner import *
+
+class ParserError(MarkedYAMLError):
+ pass
+
+class Parser(object):
+ # Since writing a recursive-descendant parser is a straightforward task, we
+ # do not give many comments here.
+
+ DEFAULT_TAGS = {
+ u'!': u'!',
+ u'!!': u'tag:yaml.org,2002:',
+ }
+
+ def __init__(self):
+ self.current_event = None
+ self.yaml_version = None
+ self.tag_handles = {}
+ self.states = []
+ self.marks = []
+ self.state = self.parse_stream_start
+
+ def dispose(self):
+ # Reset the state attributes (to clear self-references)
+ self.states = []
+ self.state = None
+
+ def check_event(self, *choices):
+ # Check the type of the next event.
+ if self.current_event is None:
+ if self.state:
+ self.current_event = self.state()
+ if self.current_event is not None:
+ if not choices:
+ return True
+ for choice in choices:
+ if isinstance(self.current_event, choice):
+ return True
+ return False
+
+ def peek_event(self):
+ # Get the next event.
+ if self.current_event is None:
+ if self.state:
+ self.current_event = self.state()
+ return self.current_event
+
+ def get_event(self):
+ # Get the next event and proceed further.
+ if self.current_event is None:
+ if self.state:
+ self.current_event = self.state()
+ value = self.current_event
+ self.current_event = None
+ return value
+
+ # stream ::= STREAM-START implicit_document? explicit_document* STREAM-END
+ # implicit_document ::= block_node DOCUMENT-END*
+ # explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END*
+
+ def parse_stream_start(self):
+
+ # Parse the stream start.
+ token = self.get_token()
+ event = StreamStartEvent(token.start_mark, token.end_mark,
+ encoding=token.encoding)
+
+ # Prepare the next state.
+ self.state = self.parse_implicit_document_start
+
+ return event
+
+ def parse_implicit_document_start(self):
+
+ # Parse an implicit document.
+ if not self.check_token(DirectiveToken, DocumentStartToken,
+ StreamEndToken):
+ self.tag_handles = self.DEFAULT_TAGS
+ token = self.peek_token()
+ start_mark = end_mark = token.start_mark
+ event = DocumentStartEvent(start_mark, end_mark,
+ explicit=False)
+
+ # Prepare the next state.
+ self.states.append(self.parse_document_end)
+ self.state = self.parse_block_node
+
+ return event
+
+ else:
+ return self.parse_document_start()
+
+ def parse_document_start(self):
+
+ # Parse any extra document end indicators.
+ while self.check_token(DocumentEndToken):
+ self.get_token()
+
+ # Parse an explicit document.
+ if not self.check_token(StreamEndToken):
+ token = self.peek_token()
+ start_mark = token.start_mark
+ version, tags = self.process_directives()
+ if not self.check_token(DocumentStartToken):
+ raise ParserError(None, None,
+ "expected '<document start>', but found %r"
+ % self.peek_token().id,
+ self.peek_token().start_mark)
+ token = self.get_token()
+ end_mark = token.end_mark
+ event = DocumentStartEvent(start_mark, end_mark,
+ explicit=True, version=version, tags=tags)
+ self.states.append(self.parse_document_end)
+ self.state = self.parse_document_content
+ else:
+ # Parse the end of the stream.
+ token = self.get_token()
+ event = StreamEndEvent(token.start_mark, token.end_mark)
+ assert not self.states
+ assert not self.marks
+ self.state = None
+ return event
+
+ def parse_document_end(self):
+
+ # Parse the document end.
+ token = self.peek_token()
+ start_mark = end_mark = token.start_mark
+ explicit = False
+ if self.check_token(DocumentEndToken):
+ token = self.get_token()
+ end_mark = token.end_mark
+ explicit = True
+ event = DocumentEndEvent(start_mark, end_mark,
+ explicit=explicit)
+
+ # Prepare the next state.
+ self.state = self.parse_document_start
+
+ return event
+
+ def parse_document_content(self):
+ if self.check_token(DirectiveToken,
+ DocumentStartToken, DocumentEndToken, StreamEndToken):
+ event = self.process_empty_scalar(self.peek_token().start_mark)
+ self.state = self.states.pop()
+ return event
+ else:
+ return self.parse_block_node()
+
+ def process_directives(self):
+ self.yaml_version = None
+ self.tag_handles = {}
+ while self.check_token(DirectiveToken):
+ token = self.get_token()
+ if token.name == u'YAML':
+ if self.yaml_version is not None:
+ raise ParserError(None, None,
+ "found duplicate YAML directive", token.start_mark)
+ major, minor = token.value
+ if major != 1:
+ raise ParserError(None, None,
+ "found incompatible YAML document (version 1.* is required)",
+ token.start_mark)
+ self.yaml_version = token.value
+ elif token.name == u'TAG':
+ handle, prefix = token.value
+ if handle in self.tag_handles:
+ raise ParserError(None, None,
+ "duplicate tag handle %r" % handle.encode('utf-8'),
+ token.start_mark)
+ self.tag_handles[handle] = prefix
+ if self.tag_handles:
+ value = self.yaml_version, self.tag_handles.copy()
+ else:
+ value = self.yaml_version, None
+ for key in self.DEFAULT_TAGS:
+ if key not in self.tag_handles:
+ self.tag_handles[key] = self.DEFAULT_TAGS[key]
+ return value
+
+ # block_node_or_indentless_sequence ::= ALIAS
+ # | properties (block_content | indentless_block_sequence)?
+ # | block_content
+ # | indentless_block_sequence
+ # block_node ::= ALIAS
+ # | properties block_content?
+ # | block_content
+ # flow_node ::= ALIAS
+ # | properties flow_content?
+ # | flow_content
+ # properties ::= TAG ANCHOR? | ANCHOR TAG?
+ # block_content ::= block_collection | flow_collection | SCALAR
+ # flow_content ::= flow_collection | SCALAR
+ # block_collection ::= block_sequence | block_mapping
+ # flow_collection ::= flow_sequence | flow_mapping
+
+ def parse_block_node(self):
+ return self.parse_node(block=True)
+
+ def parse_flow_node(self):
+ return self.parse_node()
+
+ def parse_block_node_or_indentless_sequence(self):
+ return self.parse_node(block=True, indentless_sequence=True)
+
+ def parse_node(self, block=False, indentless_sequence=False):
+ if self.check_token(AliasToken):
+ token = self.get_token()
+ event = AliasEvent(token.value, token.start_mark, token.end_mark)
+ self.state = self.states.pop()
+ else:
+ anchor = None
+ tag = None
+ start_mark = end_mark = tag_mark = None
+ if self.check_token(AnchorToken):
+ token = self.get_token()
+ start_mark = token.start_mark
+ end_mark = token.end_mark
+ anchor = token.value
+ if self.check_token(TagToken):
+ token = self.get_token()
+ tag_mark = token.start_mark
+ end_mark = token.end_mark
+ tag = token.value
+ elif self.check_token(TagToken):
+ token = self.get_token()
+ start_mark = tag_mark = token.start_mark
+ end_mark = token.end_mark
+ tag = token.value
+ if self.check_token(AnchorToken):
+ token = self.get_token()
+ end_mark = token.end_mark
+ anchor = token.value
+ if tag is not None:
+ handle, suffix = tag
+ if handle is not None:
+ if handle not in self.tag_handles:
+ raise ParserError("while parsing a node", start_mark,
+ "found undefined tag handle %r" % handle.encode('utf-8'),
+ tag_mark)
+ tag = self.tag_handles[handle]+suffix
+ else:
+ tag = suffix
+ #if tag == u'!':
+ # raise ParserError("while parsing a node", start_mark,
+ # "found non-specific tag '!'", tag_mark,
+ # "Please check 'http://pyyaml.org/wiki/YAMLNonSpecificTag' and share your opinion.")
+ if start_mark is None:
+ start_mark = end_mark = self.peek_token().start_mark
+ event = None
+ implicit = (tag is None or tag == u'!')
+ if indentless_sequence and self.check_token(BlockEntryToken):
+ end_mark = self.peek_token().end_mark
+ event = SequenceStartEvent(anchor, tag, implicit,
+ start_mark, end_mark)
+ self.state = self.parse_indentless_sequence_entry
+ else:
+ if self.check_token(ScalarToken):
+ token = self.get_token()
+ end_mark = token.end_mark
+ if (token.plain and tag is None) or tag == u'!':
+ implicit = (True, False)
+ elif tag is None:
+ implicit = (False, True)
+ else:
+ implicit = (False, False)
+ event = ScalarEvent(anchor, tag, implicit, token.value,
+ start_mark, end_mark, style=token.style)
+ self.state = self.states.pop()
+ elif self.check_token(FlowSequenceStartToken):
+ end_mark = self.peek_token().end_mark
+ event = SequenceStartEvent(anchor, tag, implicit,
+ start_mark, end_mark, flow_style=True)
+ self.state = self.parse_flow_sequence_first_entry
+ elif self.check_token(FlowMappingStartToken):
+ end_mark = self.peek_token().end_mark
+ event = MappingStartEvent(anchor, tag, implicit,
+ start_mark, end_mark, flow_style=True)
+ self.state = self.parse_flow_mapping_first_key
+ elif block and self.check_token(BlockSequenceStartToken):
+ end_mark = self.peek_token().start_mark
+ event = SequenceStartEvent(anchor, tag, implicit,
+ start_mark, end_mark, flow_style=False)
+ self.state = self.parse_block_sequence_first_entry
+ elif block and self.check_token(BlockMappingStartToken):
+ end_mark = self.peek_token().start_mark
+ event = MappingStartEvent(anchor, tag, implicit,
+ start_mark, end_mark, flow_style=False)
+ self.state = self.parse_block_mapping_first_key
+ elif anchor is not None or tag is not None:
+ # Empty scalars are allowed even if a tag or an anchor is
+ # specified.
+ event = ScalarEvent(anchor, tag, (implicit, False), u'',
+ start_mark, end_mark)
+ self.state = self.states.pop()
+ else:
+ if block:
+ node = 'block'
+ else:
+ node = 'flow'
+ token = self.peek_token()
+ raise ParserError("while parsing a %s node" % node, start_mark,
+ "expected the node content, but found %r" % token.id,
+ token.start_mark)
+ return event
+
+ # block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END
+
+ def parse_block_sequence_first_entry(self):
+ token = self.get_token()
+ self.marks.append(token.start_mark)
+ return self.parse_block_sequence_entry()
+
+ def parse_block_sequence_entry(self):
+ if self.check_token(BlockEntryToken):
+ token = self.get_token()
+ if not self.check_token(BlockEntryToken, BlockEndToken):
+ self.states.append(self.parse_block_sequence_entry)
+ return self.parse_block_node()
+ else:
+ self.state = self.parse_block_sequence_entry
+ return self.process_empty_scalar(token.end_mark)
+ if not self.check_token(BlockEndToken):
+ token = self.peek_token()
+ raise ParserError("while parsing a block collection", self.marks[-1],
+ "expected <block end>, but found %r" % token.id, token.start_mark)
+ token = self.get_token()
+ event = SequenceEndEvent(token.start_mark, token.end_mark)
+ self.state = self.states.pop()
+ self.marks.pop()
+ return event
+
+ # indentless_sequence ::= (BLOCK-ENTRY block_node?)+
+
+ def parse_indentless_sequence_entry(self):
+ if self.check_token(BlockEntryToken):
+ token = self.get_token()
+ if not self.check_token(BlockEntryToken,
+ KeyToken, ValueToken, BlockEndToken):
+ self.states.append(self.parse_indentless_sequence_entry)
+ return self.parse_block_node()
+ else:
+ self.state = self.parse_indentless_sequence_entry
+ return self.process_empty_scalar(token.end_mark)
+ token = self.peek_token()
+ event = SequenceEndEvent(token.start_mark, token.start_mark)
+ self.state = self.states.pop()
+ return event
+
+ # block_mapping ::= BLOCK-MAPPING_START
+ # ((KEY block_node_or_indentless_sequence?)?
+ # (VALUE block_node_or_indentless_sequence?)?)*
+ # BLOCK-END
+
+ def parse_block_mapping_first_key(self):
+ token = self.get_token()
+ self.marks.append(token.start_mark)
+ return self.parse_block_mapping_key()
+
+ def parse_block_mapping_key(self):
+ if self.check_token(KeyToken):
+ token = self.get_token()
+ if not self.check_token(KeyToken, ValueToken, BlockEndToken):
+ self.states.append(self.parse_block_mapping_value)
+ return self.parse_block_node_or_indentless_sequence()
+ else:
+ self.state = self.parse_block_mapping_value
+ return self.process_empty_scalar(token.end_mark)
+ if not self.check_token(BlockEndToken):
+ token = self.peek_token()
+ raise ParserError("while parsing a block mapping", self.marks[-1],
+ "expected <block end>, but found %r" % token.id, token.start_mark)
+ token = self.get_token()
+ event = MappingEndEvent(token.start_mark, token.end_mark)
+ self.state = self.states.pop()
+ self.marks.pop()
+ return event
+
+ def parse_block_mapping_value(self):
+ if self.check_token(ValueToken):
+ token = self.get_token()
+ if not self.check_token(KeyToken, ValueToken, BlockEndToken):
+ self.states.append(self.parse_block_mapping_key)
+ return self.parse_block_node_or_indentless_sequence()
+ else:
+ self.state = self.parse_block_mapping_key
+ return self.process_empty_scalar(token.end_mark)
+ else:
+ self.state = self.parse_block_mapping_key
+ token = self.peek_token()
+ return self.process_empty_scalar(token.start_mark)
+
+ # flow_sequence ::= FLOW-SEQUENCE-START
+ # (flow_sequence_entry FLOW-ENTRY)*
+ # flow_sequence_entry?
+ # FLOW-SEQUENCE-END
+ # flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+ #
+ # Note that while production rules for both flow_sequence_entry and
+ # flow_mapping_entry are equal, their interpretations are different.
+ # For `flow_sequence_entry`, the part `KEY flow_node? (VALUE flow_node?)?`
+ # generate an inline mapping (set syntax).
+
+ def parse_flow_sequence_first_entry(self):
+ token = self.get_token()
+ self.marks.append(token.start_mark)
+ return self.parse_flow_sequence_entry(first=True)
+
+ def parse_flow_sequence_entry(self, first=False):
+ if not self.check_token(FlowSequenceEndToken):
+ if not first:
+ if self.check_token(FlowEntryToken):
+ self.get_token()
+ else:
+ token = self.peek_token()
+ raise ParserError("while parsing a flow sequence", self.marks[-1],
+ "expected ',' or ']', but got %r" % token.id, token.start_mark)
+
+ if self.check_token(KeyToken):
+ token = self.peek_token()
+ event = MappingStartEvent(None, None, True,
+ token.start_mark, token.end_mark,
+ flow_style=True)
+ self.state = self.parse_flow_sequence_entry_mapping_key
+ return event
+ elif not self.check_token(FlowSequenceEndToken):
+ self.states.append(self.parse_flow_sequence_entry)
+ return self.parse_flow_node()
+ token = self.get_token()
+ event = SequenceEndEvent(token.start_mark, token.end_mark)
+ self.state = self.states.pop()
+ self.marks.pop()
+ return event
+
+ def parse_flow_sequence_entry_mapping_key(self):
+ token = self.get_token()
+ if not self.check_token(ValueToken,
+ FlowEntryToken, FlowSequenceEndToken):
+ self.states.append(self.parse_flow_sequence_entry_mapping_value)
+ return self.parse_flow_node()
+ else:
+ self.state = self.parse_flow_sequence_entry_mapping_value
+ return self.process_empty_scalar(token.end_mark)
+
+ def parse_flow_sequence_entry_mapping_value(self):
+ if self.check_token(ValueToken):
+ token = self.get_token()
+ if not self.check_token(FlowEntryToken, FlowSequenceEndToken):
+ self.states.append(self.parse_flow_sequence_entry_mapping_end)
+ return self.parse_flow_node()
+ else:
+ self.state = self.parse_flow_sequence_entry_mapping_end
+ return self.process_empty_scalar(token.end_mark)
+ else:
+ self.state = self.parse_flow_sequence_entry_mapping_end
+ token = self.peek_token()
+ return self.process_empty_scalar(token.start_mark)
+
+ def parse_flow_sequence_entry_mapping_end(self):
+ self.state = self.parse_flow_sequence_entry
+ token = self.peek_token()
+ return MappingEndEvent(token.start_mark, token.start_mark)
+
+ # flow_mapping ::= FLOW-MAPPING-START
+ # (flow_mapping_entry FLOW-ENTRY)*
+ # flow_mapping_entry?
+ # FLOW-MAPPING-END
+ # flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+
+ def parse_flow_mapping_first_key(self):
+ token = self.get_token()
+ self.marks.append(token.start_mark)
+ return self.parse_flow_mapping_key(first=True)
+
+ def parse_flow_mapping_key(self, first=False):
+ if not self.check_token(FlowMappingEndToken):
+ if not first:
+ if self.check_token(FlowEntryToken):
+ self.get_token()
+ else:
+ token = self.peek_token()
+ raise ParserError("while parsing a flow mapping", self.marks[-1],
+ "expected ',' or '}', but got %r" % token.id, token.start_mark)
+ if self.check_token(KeyToken):
+ token = self.get_token()
+ if not self.check_token(ValueToken,
+ FlowEntryToken, FlowMappingEndToken):
+ self.states.append(self.parse_flow_mapping_value)
+ return self.parse_flow_node()
+ else:
+ self.state = self.parse_flow_mapping_value
+ return self.process_empty_scalar(token.end_mark)
+ elif not self.check_token(FlowMappingEndToken):
+ self.states.append(self.parse_flow_mapping_empty_value)
+ return self.parse_flow_node()
+ token = self.get_token()
+ event = MappingEndEvent(token.start_mark, token.end_mark)
+ self.state = self.states.pop()
+ self.marks.pop()
+ return event
+
+ def parse_flow_mapping_value(self):
+ if self.check_token(ValueToken):
+ token = self.get_token()
+ if not self.check_token(FlowEntryToken, FlowMappingEndToken):
+ self.states.append(self.parse_flow_mapping_key)
+ return self.parse_flow_node()
+ else:
+ self.state = self.parse_flow_mapping_key
+ return self.process_empty_scalar(token.end_mark)
+ else:
+ self.state = self.parse_flow_mapping_key
+ token = self.peek_token()
+ return self.process_empty_scalar(token.start_mark)
+
+ def parse_flow_mapping_empty_value(self):
+ self.state = self.parse_flow_mapping_key
+ return self.process_empty_scalar(self.peek_token().start_mark)
+
+ def process_empty_scalar(self, mark):
+ return ScalarEvent(None, None, (True, False), u'', mark, mark)
+
diff --git a/scripts/python-lib/yaml/parser.pyc b/scripts/python-lib/yaml/parser.pyc
new file mode 100644
index 00000000..ad508088
--- /dev/null
+++ b/scripts/python-lib/yaml/parser.pyc
Binary files differ
diff --git a/scripts/python-lib/yaml/reader.py b/scripts/python-lib/yaml/reader.py
new file mode 100755
index 00000000..3249e6b9
--- /dev/null
+++ b/scripts/python-lib/yaml/reader.py
@@ -0,0 +1,190 @@
+# This module contains abstractions for the input stream. You don't have to
+# looks further, there are no pretty code.
+#
+# We define two classes here.
+#
+# Mark(source, line, column)
+# It's just a record and its only use is producing nice error messages.
+# Parser does not use it for any other purposes.
+#
+# Reader(source, data)
+# Reader determines the encoding of `data` and converts it to unicode.
+# Reader provides the following methods and attributes:
+# reader.peek(length=1) - return the next `length` characters
+# reader.forward(length=1) - move the current position to `length` characters.
+# reader.index - the number of the current character.
+# reader.line, stream.column - the line and the column of the current character.
+
+__all__ = ['Reader', 'ReaderError']
+
+from error import YAMLError, Mark
+
+import codecs, re
+
+class ReaderError(YAMLError):
+
+ def __init__(self, name, position, character, encoding, reason):
+ self.name = name
+ self.character = character
+ self.position = position
+ self.encoding = encoding
+ self.reason = reason
+
+ def __str__(self):
+ if isinstance(self.character, str):
+ return "'%s' codec can't decode byte #x%02x: %s\n" \
+ " in \"%s\", position %d" \
+ % (self.encoding, ord(self.character), self.reason,
+ self.name, self.position)
+ else:
+ return "unacceptable character #x%04x: %s\n" \
+ " in \"%s\", position %d" \
+ % (self.character, self.reason,
+ self.name, self.position)
+
+class Reader(object):
+ # Reader:
+ # - determines the data encoding and converts it to unicode,
+ # - checks if characters are in allowed range,
+ # - adds '\0' to the end.
+
+ # Reader accepts
+ # - a `str` object,
+ # - a `unicode` object,
+ # - a file-like object with its `read` method returning `str`,
+ # - a file-like object with its `read` method returning `unicode`.
+
+ # Yeah, it's ugly and slow.
+
+ def __init__(self, stream):
+ self.name = None
+ self.stream = None
+ self.stream_pointer = 0
+ self.eof = True
+ self.buffer = u''
+ self.pointer = 0
+ self.raw_buffer = None
+ self.raw_decode = None
+ self.encoding = None
+ self.index = 0
+ self.line = 0
+ self.column = 0
+ if isinstance(stream, unicode):
+ self.name = "<unicode string>"
+ self.check_printable(stream)
+ self.buffer = stream+u'\0'
+ elif isinstance(stream, str):
+ self.name = "<string>"
+ self.raw_buffer = stream
+ self.determine_encoding()
+ else:
+ self.stream = stream
+ self.name = getattr(stream, 'name', "<file>")
+ self.eof = False
+ self.raw_buffer = ''
+ self.determine_encoding()
+
+ def peek(self, index=0):
+ try:
+ return self.buffer[self.pointer+index]
+ except IndexError:
+ self.update(index+1)
+ return self.buffer[self.pointer+index]
+
+ def prefix(self, length=1):
+ if self.pointer+length >= len(self.buffer):
+ self.update(length)
+ return self.buffer[self.pointer:self.pointer+length]
+
+ def forward(self, length=1):
+ if self.pointer+length+1 >= len(self.buffer):
+ self.update(length+1)
+ while length:
+ ch = self.buffer[self.pointer]
+ self.pointer += 1
+ self.index += 1
+ if ch in u'\n\x85\u2028\u2029' \
+ or (ch == u'\r' and self.buffer[self.pointer] != u'\n'):
+ self.line += 1
+ self.column = 0
+ elif ch != u'\uFEFF':
+ self.column += 1
+ length -= 1
+
+ def get_mark(self):
+ if self.stream is None:
+ return Mark(self.name, self.index, self.line, self.column,
+ self.buffer, self.pointer)
+ else:
+ return Mark(self.name, self.index, self.line, self.column,
+ None, None)
+
+ def determine_encoding(self):
+ while not self.eof and len(self.raw_buffer) < 2:
+ self.update_raw()
+ if not isinstance(self.raw_buffer, unicode):
+ if self.raw_buffer.startswith(codecs.BOM_UTF16_LE):
+ self.raw_decode = codecs.utf_16_le_decode
+ self.encoding = 'utf-16-le'
+ elif self.raw_buffer.startswith(codecs.BOM_UTF16_BE):
+ self.raw_decode = codecs.utf_16_be_decode
+ self.encoding = 'utf-16-be'
+ else:
+ self.raw_decode = codecs.utf_8_decode
+ self.encoding = 'utf-8'
+ self.update(1)
+
+ NON_PRINTABLE = re.compile(u'[^\x09\x0A\x0D\x20-\x7E\x85\xA0-\uD7FF\uE000-\uFFFD]')
+ def check_printable(self, data):
+ match = self.NON_PRINTABLE.search(data)
+ if match:
+ character = match.group()
+ position = self.index+(len(self.buffer)-self.pointer)+match.start()
+ raise ReaderError(self.name, position, ord(character),
+ 'unicode', "special characters are not allowed")
+
+ def update(self, length):
+ if self.raw_buffer is None:
+ return
+ self.buffer = self.buffer[self.pointer:]
+ self.pointer = 0
+ while len(self.buffer) < length:
+ if not self.eof:
+ self.update_raw()
+ if self.raw_decode is not None:
+ try:
+ data, converted = self.raw_decode(self.raw_buffer,
+ 'strict', self.eof)
+ except UnicodeDecodeError, exc:
+ character = exc.object[exc.start]
+ if self.stream is not None:
+ position = self.stream_pointer-len(self.raw_buffer)+exc.start
+ else:
+ position = exc.start
+ raise ReaderError(self.name, position, character,
+ exc.encoding, exc.reason)
+ else:
+ data = self.raw_buffer
+ converted = len(data)
+ self.check_printable(data)
+ self.buffer += data
+ self.raw_buffer = self.raw_buffer[converted:]
+ if self.eof:
+ self.buffer += u'\0'
+ self.raw_buffer = None
+ break
+
+ def update_raw(self, size=1024):
+ data = self.stream.read(size)
+ if data:
+ self.raw_buffer += data
+ self.stream_pointer += len(data)
+ else:
+ self.eof = True
+
+#try:
+# import psyco
+# psyco.bind(Reader)
+#except ImportError:
+# pass
+
diff --git a/scripts/python-lib/yaml/reader.pyc b/scripts/python-lib/yaml/reader.pyc
new file mode 100644
index 00000000..4e917e38
--- /dev/null
+++ b/scripts/python-lib/yaml/reader.pyc
Binary files differ
diff --git a/scripts/python-lib/yaml/representer.py b/scripts/python-lib/yaml/representer.py
new file mode 100755
index 00000000..5f4fc70d
--- /dev/null
+++ b/scripts/python-lib/yaml/representer.py
@@ -0,0 +1,484 @@
+
+__all__ = ['BaseRepresenter', 'SafeRepresenter', 'Representer',
+ 'RepresenterError']
+
+from error import *
+from nodes import *
+
+import datetime
+
+import sys, copy_reg, types
+
+class RepresenterError(YAMLError):
+ pass
+
+class BaseRepresenter(object):
+
+ yaml_representers = {}
+ yaml_multi_representers = {}
+
+ def __init__(self, default_style=None, default_flow_style=None):
+ self.default_style = default_style
+ self.default_flow_style = default_flow_style
+ self.represented_objects = {}
+ self.object_keeper = []
+ self.alias_key = None
+
+ def represent(self, data):
+ node = self.represent_data(data)
+ self.serialize(node)
+ self.represented_objects = {}
+ self.object_keeper = []
+ self.alias_key = None
+
+ def get_classobj_bases(self, cls):
+ bases = [cls]
+ for base in cls.__bases__:
+ bases.extend(self.get_classobj_bases(base))
+ return bases
+
+ def represent_data(self, data):
+ if self.ignore_aliases(data):
+ self.alias_key = None
+ else:
+ self.alias_key = id(data)
+ if self.alias_key is not None:
+ if self.alias_key in self.represented_objects:
+ node = self.represented_objects[self.alias_key]
+ #if node is None:
+ # raise RepresenterError("recursive objects are not allowed: %r" % data)
+ return node
+ #self.represented_objects[alias_key] = None
+ self.object_keeper.append(data)
+ data_types = type(data).__mro__
+ if type(data) is types.InstanceType:
+ data_types = self.get_classobj_bases(data.__class__)+list(data_types)
+ if data_types[0] in self.yaml_representers:
+ node = self.yaml_representers[data_types[0]](self, data)
+ else:
+ for data_type in data_types:
+ if data_type in self.yaml_multi_representers:
+ node = self.yaml_multi_representers[data_type](self, data)
+ break
+ else:
+ if None in self.yaml_multi_representers:
+ node = self.yaml_multi_representers[None](self, data)
+ elif None in self.yaml_representers:
+ node = self.yaml_representers[None](self, data)
+ else:
+ node = ScalarNode(None, unicode(data))
+ #if alias_key is not None:
+ # self.represented_objects[alias_key] = node
+ return node
+
+ def add_representer(cls, data_type, representer):
+ if not 'yaml_representers' in cls.__dict__:
+ cls.yaml_representers = cls.yaml_representers.copy()
+ cls.yaml_representers[data_type] = representer
+ add_representer = classmethod(add_representer)
+
+ def add_multi_representer(cls, data_type, representer):
+ if not 'yaml_multi_representers' in cls.__dict__:
+ cls.yaml_multi_representers = cls.yaml_multi_representers.copy()
+ cls.yaml_multi_representers[data_type] = representer
+ add_multi_representer = classmethod(add_multi_representer)
+
+ def represent_scalar(self, tag, value, style=None):
+ if style is None:
+ style = self.default_style
+ node = ScalarNode(tag, value, style=style)
+ if self.alias_key is not None:
+ self.represented_objects[self.alias_key] = node
+ return node
+
+ def represent_sequence(self, tag, sequence, flow_style=None):
+ value = []
+ node = SequenceNode(tag, value, flow_style=flow_style)
+ if self.alias_key is not None:
+ self.represented_objects[self.alias_key] = node
+ best_style = True
+ for item in sequence:
+ node_item = self.represent_data(item)
+ if not (isinstance(node_item, ScalarNode) and not node_item.style):
+ best_style = False
+ value.append(node_item)
+ if flow_style is None:
+ if self.default_flow_style is not None:
+ node.flow_style = self.default_flow_style
+ else:
+ node.flow_style = best_style
+ return node
+
+ def represent_mapping(self, tag, mapping, flow_style=None):
+ value = []
+ node = MappingNode(tag, value, flow_style=flow_style)
+ if self.alias_key is not None:
+ self.represented_objects[self.alias_key] = node
+ best_style = True
+ if hasattr(mapping, 'items'):
+ mapping = mapping.items()
+ mapping.sort()
+ for item_key, item_value in mapping:
+ node_key = self.represent_data(item_key)
+ node_value = self.represent_data(item_value)
+ if not (isinstance(node_key, ScalarNode) and not node_key.style):
+ best_style = False
+ if not (isinstance(node_value, ScalarNode) and not node_value.style):
+ best_style = False
+ value.append((node_key, node_value))
+ if flow_style is None:
+ if self.default_flow_style is not None:
+ node.flow_style = self.default_flow_style
+ else:
+ node.flow_style = best_style
+ return node
+
+ def ignore_aliases(self, data):
+ return False
+
+class SafeRepresenter(BaseRepresenter):
+
+ def ignore_aliases(self, data):
+ if data in [None, ()]:
+ return True
+ if isinstance(data, (str, unicode, bool, int, float)):
+ return True
+
+ def represent_none(self, data):
+ return self.represent_scalar(u'tag:yaml.org,2002:null',
+ u'null')
+
+ def represent_str(self, data):
+ tag = None
+ style = None
+ try:
+ data = unicode(data, 'ascii')
+ tag = u'tag:yaml.org,2002:str'
+ except UnicodeDecodeError:
+ try:
+ data = unicode(data, 'utf-8')
+ tag = u'tag:yaml.org,2002:str'
+ except UnicodeDecodeError:
+ data = data.encode('base64')
+ tag = u'tag:yaml.org,2002:binary'
+ style = '|'
+ return self.represent_scalar(tag, data, style=style)
+
+ def represent_unicode(self, data):
+ return self.represent_scalar(u'tag:yaml.org,2002:str', data)
+
+ def represent_bool(self, data):
+ if data:
+ value = u'true'
+ else:
+ value = u'false'
+ return self.represent_scalar(u'tag:yaml.org,2002:bool', value)
+
+ def represent_int(self, data):
+ return self.represent_scalar(u'tag:yaml.org,2002:int', unicode(data))
+
+ def represent_long(self, data):
+ return self.represent_scalar(u'tag:yaml.org,2002:int', unicode(data))
+
+ inf_value = 1e300
+ while repr(inf_value) != repr(inf_value*inf_value):
+ inf_value *= inf_value
+
+ def represent_float(self, data):
+ if data != data or (data == 0.0 and data == 1.0):
+ value = u'.nan'
+ elif data == self.inf_value:
+ value = u'.inf'
+ elif data == -self.inf_value:
+ value = u'-.inf'
+ else:
+ value = unicode(repr(data)).lower()
+ # Note that in some cases `repr(data)` represents a float number
+ # without the decimal parts. For instance:
+ # >>> repr(1e17)
+ # '1e17'
+ # Unfortunately, this is not a valid float representation according
+ # to the definition of the `!!float` tag. We fix this by adding
+ # '.0' before the 'e' symbol.
+ if u'.' not in value and u'e' in value:
+ value = value.replace(u'e', u'.0e', 1)
+ return self.represent_scalar(u'tag:yaml.org,2002:float', value)
+
+ def represent_list(self, data):
+ #pairs = (len(data) > 0 and isinstance(data, list))
+ #if pairs:
+ # for item in data:
+ # if not isinstance(item, tuple) or len(item) != 2:
+ # pairs = False
+ # break
+ #if not pairs:
+ return self.represent_sequence(u'tag:yaml.org,2002:seq', data)
+ #value = []
+ #for item_key, item_value in data:
+ # value.append(self.represent_mapping(u'tag:yaml.org,2002:map',
+ # [(item_key, item_value)]))
+ #return SequenceNode(u'tag:yaml.org,2002:pairs', value)
+
+ def represent_dict(self, data):
+ return self.represent_mapping(u'tag:yaml.org,2002:map', data)
+
+ def represent_set(self, data):
+ value = {}
+ for key in data:
+ value[key] = None
+ return self.represent_mapping(u'tag:yaml.org,2002:set', value)
+
+ def represent_date(self, data):
+ value = unicode(data.isoformat())
+ return self.represent_scalar(u'tag:yaml.org,2002:timestamp', value)
+
+ def represent_datetime(self, data):
+ value = unicode(data.isoformat(' '))
+ return self.represent_scalar(u'tag:yaml.org,2002:timestamp', value)
+
+ def represent_yaml_object(self, tag, data, cls, flow_style=None):
+ if hasattr(data, '__getstate__'):
+ state = data.__getstate__()
+ else:
+ state = data.__dict__.copy()
+ return self.represent_mapping(tag, state, flow_style=flow_style)
+
+ def represent_undefined(self, data):
+ raise RepresenterError("cannot represent an object: %s" % data)
+
+SafeRepresenter.add_representer(type(None),
+ SafeRepresenter.represent_none)
+
+SafeRepresenter.add_representer(str,
+ SafeRepresenter.represent_str)
+
+SafeRepresenter.add_representer(unicode,
+ SafeRepresenter.represent_unicode)
+
+SafeRepresenter.add_representer(bool,
+ SafeRepresenter.represent_bool)
+
+SafeRepresenter.add_representer(int,
+ SafeRepresenter.represent_int)
+
+SafeRepresenter.add_representer(long,
+ SafeRepresenter.represent_long)
+
+SafeRepresenter.add_representer(float,
+ SafeRepresenter.represent_float)
+
+SafeRepresenter.add_representer(list,
+ SafeRepresenter.represent_list)
+
+SafeRepresenter.add_representer(tuple,
+ SafeRepresenter.represent_list)
+
+SafeRepresenter.add_representer(dict,
+ SafeRepresenter.represent_dict)
+
+SafeRepresenter.add_representer(set,
+ SafeRepresenter.represent_set)
+
+SafeRepresenter.add_representer(datetime.date,
+ SafeRepresenter.represent_date)
+
+SafeRepresenter.add_representer(datetime.datetime,
+ SafeRepresenter.represent_datetime)
+
+SafeRepresenter.add_representer(None,
+ SafeRepresenter.represent_undefined)
+
+class Representer(SafeRepresenter):
+
+ def represent_str(self, data):
+ tag = None
+ style = None
+ try:
+ data = unicode(data, 'ascii')
+ tag = u'tag:yaml.org,2002:str'
+ except UnicodeDecodeError:
+ try:
+ data = unicode(data, 'utf-8')
+ tag = u'tag:yaml.org,2002:python/str'
+ except UnicodeDecodeError:
+ data = data.encode('base64')
+ tag = u'tag:yaml.org,2002:binary'
+ style = '|'
+ return self.represent_scalar(tag, data, style=style)
+
+ def represent_unicode(self, data):
+ tag = None
+ try:
+ data.encode('ascii')
+ tag = u'tag:yaml.org,2002:python/unicode'
+ except UnicodeEncodeError:
+ tag = u'tag:yaml.org,2002:str'
+ return self.represent_scalar(tag, data)
+
+ def represent_long(self, data):
+ tag = u'tag:yaml.org,2002:int'
+ if int(data) is not data:
+ tag = u'tag:yaml.org,2002:python/long'
+ return self.represent_scalar(tag, unicode(data))
+
+ def represent_complex(self, data):
+ if data.imag == 0.0:
+ data = u'%r' % data.real
+ elif data.real == 0.0:
+ data = u'%rj' % data.imag
+ elif data.imag > 0:
+ data = u'%r+%rj' % (data.real, data.imag)
+ else:
+ data = u'%r%rj' % (data.real, data.imag)
+ return self.represent_scalar(u'tag:yaml.org,2002:python/complex', data)
+
+ def represent_tuple(self, data):
+ return self.represent_sequence(u'tag:yaml.org,2002:python/tuple', data)
+
+ def represent_name(self, data):
+ name = u'%s.%s' % (data.__module__, data.__name__)
+ return self.represent_scalar(u'tag:yaml.org,2002:python/name:'+name, u'')
+
+ def represent_module(self, data):
+ return self.represent_scalar(
+ u'tag:yaml.org,2002:python/module:'+data.__name__, u'')
+
+ def represent_instance(self, data):
+ # For instances of classic classes, we use __getinitargs__ and
+ # __getstate__ to serialize the data.
+
+ # If data.__getinitargs__ exists, the object must be reconstructed by
+ # calling cls(**args), where args is a tuple returned by
+ # __getinitargs__. Otherwise, the cls.__init__ method should never be
+ # called and the class instance is created by instantiating a trivial
+ # class and assigning to the instance's __class__ variable.
+
+ # If data.__getstate__ exists, it returns the state of the object.
+ # Otherwise, the state of the object is data.__dict__.
+
+ # We produce either a !!python/object or !!python/object/new node.
+ # If data.__getinitargs__ does not exist and state is a dictionary, we
+ # produce a !!python/object node . Otherwise we produce a
+ # !!python/object/new node.
+
+ cls = data.__class__
+ class_name = u'%s.%s' % (cls.__module__, cls.__name__)
+ args = None
+ state = None
+ if hasattr(data, '__getinitargs__'):
+ args = list(data.__getinitargs__())
+ if hasattr(data, '__getstate__'):
+ state = data.__getstate__()
+ else:
+ state = data.__dict__
+ if args is None and isinstance(state, dict):
+ return self.represent_mapping(
+ u'tag:yaml.org,2002:python/object:'+class_name, state)
+ if isinstance(state, dict) and not state:
+ return self.represent_sequence(
+ u'tag:yaml.org,2002:python/object/new:'+class_name, args)
+ value = {}
+ if args:
+ value['args'] = args
+ value['state'] = state
+ return self.represent_mapping(
+ u'tag:yaml.org,2002:python/object/new:'+class_name, value)
+
+ def represent_object(self, data):
+ # We use __reduce__ API to save the data. data.__reduce__ returns
+ # a tuple of length 2-5:
+ # (function, args, state, listitems, dictitems)
+
+ # For reconstructing, we calls function(*args), then set its state,
+ # listitems, and dictitems if they are not None.
+
+ # A special case is when function.__name__ == '__newobj__'. In this
+ # case we create the object with args[0].__new__(*args).
+
+ # Another special case is when __reduce__ returns a string - we don't
+ # support it.
+
+ # We produce a !!python/object, !!python/object/new or
+ # !!python/object/apply node.
+
+ cls = type(data)
+ if cls in copy_reg.dispatch_table:
+ reduce = copy_reg.dispatch_table[cls](data)
+ elif hasattr(data, '__reduce_ex__'):
+ reduce = data.__reduce_ex__(2)
+ elif hasattr(data, '__reduce__'):
+ reduce = data.__reduce__()
+ else:
+ raise RepresenterError("cannot represent object: %r" % data)
+ reduce = (list(reduce)+[None]*5)[:5]
+ function, args, state, listitems, dictitems = reduce
+ args = list(args)
+ if state is None:
+ state = {}
+ if listitems is not None:
+ listitems = list(listitems)
+ if dictitems is not None:
+ dictitems = dict(dictitems)
+ if function.__name__ == '__newobj__':
+ function = args[0]
+ args = args[1:]
+ tag = u'tag:yaml.org,2002:python/object/new:'
+ newobj = True
+ else:
+ tag = u'tag:yaml.org,2002:python/object/apply:'
+ newobj = False
+ function_name = u'%s.%s' % (function.__module__, function.__name__)
+ if not args and not listitems and not dictitems \
+ and isinstance(state, dict) and newobj:
+ return self.represent_mapping(
+ u'tag:yaml.org,2002:python/object:'+function_name, state)
+ if not listitems and not dictitems \
+ and isinstance(state, dict) and not state:
+ return self.represent_sequence(tag+function_name, args)
+ value = {}
+ if args:
+ value['args'] = args
+ if state or not isinstance(state, dict):
+ value['state'] = state
+ if listitems:
+ value['listitems'] = listitems
+ if dictitems:
+ value['dictitems'] = dictitems
+ return self.represent_mapping(tag+function_name, value)
+
+Representer.add_representer(str,
+ Representer.represent_str)
+
+Representer.add_representer(unicode,
+ Representer.represent_unicode)
+
+Representer.add_representer(long,
+ Representer.represent_long)
+
+Representer.add_representer(complex,
+ Representer.represent_complex)
+
+Representer.add_representer(tuple,
+ Representer.represent_tuple)
+
+Representer.add_representer(type,
+ Representer.represent_name)
+
+Representer.add_representer(types.ClassType,
+ Representer.represent_name)
+
+Representer.add_representer(types.FunctionType,
+ Representer.represent_name)
+
+Representer.add_representer(types.BuiltinFunctionType,
+ Representer.represent_name)
+
+Representer.add_representer(types.ModuleType,
+ Representer.represent_module)
+
+Representer.add_multi_representer(types.InstanceType,
+ Representer.represent_instance)
+
+Representer.add_multi_representer(object,
+ Representer.represent_object)
+
diff --git a/scripts/python-lib/yaml/representer.pyc b/scripts/python-lib/yaml/representer.pyc
new file mode 100644
index 00000000..f6cd56c9
--- /dev/null
+++ b/scripts/python-lib/yaml/representer.pyc
Binary files differ
diff --git a/scripts/python-lib/yaml/resolver.py b/scripts/python-lib/yaml/resolver.py
new file mode 100755
index 00000000..6b5ab875
--- /dev/null
+++ b/scripts/python-lib/yaml/resolver.py
@@ -0,0 +1,224 @@
+
+__all__ = ['BaseResolver', 'Resolver']
+
+from error import *
+from nodes import *
+
+import re
+
+class ResolverError(YAMLError):
+ pass
+
+class BaseResolver(object):
+
+ DEFAULT_SCALAR_TAG = u'tag:yaml.org,2002:str'
+ DEFAULT_SEQUENCE_TAG = u'tag:yaml.org,2002:seq'
+ DEFAULT_MAPPING_TAG = u'tag:yaml.org,2002:map'
+
+ yaml_implicit_resolvers = {}
+ yaml_path_resolvers = {}
+
+ def __init__(self):
+ self.resolver_exact_paths = []
+ self.resolver_prefix_paths = []
+
+ def add_implicit_resolver(cls, tag, regexp, first):
+ if not 'yaml_implicit_resolvers' in cls.__dict__:
+ cls.yaml_implicit_resolvers = cls.yaml_implicit_resolvers.copy()
+ if first is None:
+ first = [None]
+ for ch in first:
+ cls.yaml_implicit_resolvers.setdefault(ch, []).append((tag, regexp))
+ add_implicit_resolver = classmethod(add_implicit_resolver)
+
+ def add_path_resolver(cls, tag, path, kind=None):
+ # Note: `add_path_resolver` is experimental. The API could be changed.
+ # `new_path` is a pattern that is matched against the path from the
+ # root to the node that is being considered. `node_path` elements are
+ # tuples `(node_check, index_check)`. `node_check` is a node class:
+ # `ScalarNode`, `SequenceNode`, `MappingNode` or `None`. `None`
+ # matches any kind of a node. `index_check` could be `None`, a boolean
+ # value, a string value, or a number. `None` and `False` match against
+ # any _value_ of sequence and mapping nodes. `True` matches against
+ # any _key_ of a mapping node. A string `index_check` matches against
+ # a mapping value that corresponds to a scalar key which content is
+ # equal to the `index_check` value. An integer `index_check` matches
+ # against a sequence value with the index equal to `index_check`.
+ if not 'yaml_path_resolvers' in cls.__dict__:
+ cls.yaml_path_resolvers = cls.yaml_path_resolvers.copy()
+ new_path = []
+ for element in path:
+ if isinstance(element, (list, tuple)):
+ if len(element) == 2:
+ node_check, index_check = element
+ elif len(element) == 1:
+ node_check = element[0]
+ index_check = True
+ else:
+ raise ResolverError("Invalid path element: %s" % element)
+ else:
+ node_check = None
+ index_check = element
+ if node_check is str:
+ node_check = ScalarNode
+ elif node_check is list:
+ node_check = SequenceNode
+ elif node_check is dict:
+ node_check = MappingNode
+ elif node_check not in [ScalarNode, SequenceNode, MappingNode] \
+ and not isinstance(node_check, basestring) \
+ and node_check is not None:
+ raise ResolverError("Invalid node checker: %s" % node_check)
+ if not isinstance(index_check, (basestring, int)) \
+ and index_check is not None:
+ raise ResolverError("Invalid index checker: %s" % index_check)
+ new_path.append((node_check, index_check))
+ if kind is str:
+ kind = ScalarNode
+ elif kind is list:
+ kind = SequenceNode
+ elif kind is dict:
+ kind = MappingNode
+ elif kind not in [ScalarNode, SequenceNode, MappingNode] \
+ and kind is not None:
+ raise ResolverError("Invalid node kind: %s" % kind)
+ cls.yaml_path_resolvers[tuple(new_path), kind] = tag
+ add_path_resolver = classmethod(add_path_resolver)
+
+ def descend_resolver(self, current_node, current_index):
+ if not self.yaml_path_resolvers:
+ return
+ exact_paths = {}
+ prefix_paths = []
+ if current_node:
+ depth = len(self.resolver_prefix_paths)
+ for path, kind in self.resolver_prefix_paths[-1]:
+ if self.check_resolver_prefix(depth, path, kind,
+ current_node, current_index):
+ if len(path) > depth:
+ prefix_paths.append((path, kind))
+ else:
+ exact_paths[kind] = self.yaml_path_resolvers[path, kind]
+ else:
+ for path, kind in self.yaml_path_resolvers:
+ if not path:
+ exact_paths[kind] = self.yaml_path_resolvers[path, kind]
+ else:
+ prefix_paths.append((path, kind))
+ self.resolver_exact_paths.append(exact_paths)
+ self.resolver_prefix_paths.append(prefix_paths)
+
+ def ascend_resolver(self):
+ if not self.yaml_path_resolvers:
+ return
+ self.resolver_exact_paths.pop()
+ self.resolver_prefix_paths.pop()
+
+ def check_resolver_prefix(self, depth, path, kind,
+ current_node, current_index):
+ node_check, index_check = path[depth-1]
+ if isinstance(node_check, basestring):
+ if current_node.tag != node_check:
+ return
+ elif node_check is not None:
+ if not isinstance(current_node, node_check):
+ return
+ if index_check is True and current_index is not None:
+ return
+ if (index_check is False or index_check is None) \
+ and current_index is None:
+ return
+ if isinstance(index_check, basestring):
+ if not (isinstance(current_index, ScalarNode)
+ and index_check == current_index.value):
+ return
+ elif isinstance(index_check, int) and not isinstance(index_check, bool):
+ if index_check != current_index:
+ return
+ return True
+
+ def resolve(self, kind, value, implicit):
+ if kind is ScalarNode and implicit[0]:
+ if value == u'':
+ resolvers = self.yaml_implicit_resolvers.get(u'', [])
+ else:
+ resolvers = self.yaml_implicit_resolvers.get(value[0], [])
+ resolvers += self.yaml_implicit_resolvers.get(None, [])
+ for tag, regexp in resolvers:
+ if regexp.match(value):
+ return tag
+ implicit = implicit[1]
+ if self.yaml_path_resolvers:
+ exact_paths = self.resolver_exact_paths[-1]
+ if kind in exact_paths:
+ return exact_paths[kind]
+ if None in exact_paths:
+ return exact_paths[None]
+ if kind is ScalarNode:
+ return self.DEFAULT_SCALAR_TAG
+ elif kind is SequenceNode:
+ return self.DEFAULT_SEQUENCE_TAG
+ elif kind is MappingNode:
+ return self.DEFAULT_MAPPING_TAG
+
+class Resolver(BaseResolver):
+ pass
+
+Resolver.add_implicit_resolver(
+ u'tag:yaml.org,2002:bool',
+ re.compile(ur'''^(?:yes|Yes|YES|no|No|NO
+ |true|True|TRUE|false|False|FALSE
+ |on|On|ON|off|Off|OFF)$''', re.X),
+ list(u'yYnNtTfFoO'))
+
+Resolver.add_implicit_resolver(
+ u'tag:yaml.org,2002:float',
+ re.compile(ur'''^(?:[-+]?(?:[0-9][0-9_]*)\.[0-9_]*(?:[eE][-+][0-9]+)?
+ |\.[0-9_]+(?:[eE][-+][0-9]+)?
+ |[-+]?[0-9][0-9_]*(?::[0-5]?[0-9])+\.[0-9_]*
+ |[-+]?\.(?:inf|Inf|INF)
+ |\.(?:nan|NaN|NAN))$''', re.X),
+ list(u'-+0123456789.'))
+
+Resolver.add_implicit_resolver(
+ u'tag:yaml.org,2002:int',
+ re.compile(ur'''^(?:[-+]?0b[0-1_]+
+ |[-+]?0[0-7_]+
+ |[-+]?(?:0|[1-9][0-9_]*)
+ |[-+]?0x[0-9a-fA-F_]+
+ |[-+]?[1-9][0-9_]*(?::[0-5]?[0-9])+)$''', re.X),
+ list(u'-+0123456789'))
+
+Resolver.add_implicit_resolver(
+ u'tag:yaml.org,2002:merge',
+ re.compile(ur'^(?:<<)$'),
+ [u'<'])
+
+Resolver.add_implicit_resolver(
+ u'tag:yaml.org,2002:null',
+ re.compile(ur'''^(?: ~
+ |null|Null|NULL
+ | )$''', re.X),
+ [u'~', u'n', u'N', u''])
+
+Resolver.add_implicit_resolver(
+ u'tag:yaml.org,2002:timestamp',
+ re.compile(ur'''^(?:[0-9][0-9][0-9][0-9]-[0-9][0-9]-[0-9][0-9]
+ |[0-9][0-9][0-9][0-9] -[0-9][0-9]? -[0-9][0-9]?
+ (?:[Tt]|[ \t]+)[0-9][0-9]?
+ :[0-9][0-9] :[0-9][0-9] (?:\.[0-9]*)?
+ (?:[ \t]*(?:Z|[-+][0-9][0-9]?(?::[0-9][0-9])?))?)$''', re.X),
+ list(u'0123456789'))
+
+Resolver.add_implicit_resolver(
+ u'tag:yaml.org,2002:value',
+ re.compile(ur'^(?:=)$'),
+ [u'='])
+
+# The following resolver is only for documentation purposes. It cannot work
+# because plain scalars cannot start with '!', '&', or '*'.
+Resolver.add_implicit_resolver(
+ u'tag:yaml.org,2002:yaml',
+ re.compile(ur'^(?:!|&|\*)$'),
+ list(u'!&*'))
+
diff --git a/scripts/python-lib/yaml/resolver.pyc b/scripts/python-lib/yaml/resolver.pyc
new file mode 100644
index 00000000..d776f578
--- /dev/null
+++ b/scripts/python-lib/yaml/resolver.pyc
Binary files differ
diff --git a/scripts/python-lib/yaml/scanner.py b/scripts/python-lib/yaml/scanner.py
new file mode 100755
index 00000000..5228fad6
--- /dev/null
+++ b/scripts/python-lib/yaml/scanner.py
@@ -0,0 +1,1457 @@
+
+# Scanner produces tokens of the following types:
+# STREAM-START
+# STREAM-END
+# DIRECTIVE(name, value)
+# DOCUMENT-START
+# DOCUMENT-END
+# BLOCK-SEQUENCE-START
+# BLOCK-MAPPING-START
+# BLOCK-END
+# FLOW-SEQUENCE-START
+# FLOW-MAPPING-START
+# FLOW-SEQUENCE-END
+# FLOW-MAPPING-END
+# BLOCK-ENTRY
+# FLOW-ENTRY
+# KEY
+# VALUE
+# ALIAS(value)
+# ANCHOR(value)
+# TAG(value)
+# SCALAR(value, plain, style)
+#
+# Read comments in the Scanner code for more details.
+#
+
+__all__ = ['Scanner', 'ScannerError']
+
+from error import MarkedYAMLError
+from tokens import *
+
+class ScannerError(MarkedYAMLError):
+ pass
+
+class SimpleKey(object):
+ # See below simple keys treatment.
+
+ def __init__(self, token_number, required, index, line, column, mark):
+ self.token_number = token_number
+ self.required = required
+ self.index = index
+ self.line = line
+ self.column = column
+ self.mark = mark
+
+class Scanner(object):
+
+ def __init__(self):
+ """Initialize the scanner."""
+ # It is assumed that Scanner and Reader will have a common descendant.
+ # Reader do the dirty work of checking for BOM and converting the
+ # input data to Unicode. It also adds NUL to the end.
+ #
+ # Reader supports the following methods
+ # self.peek(i=0) # peek the next i-th character
+ # self.prefix(l=1) # peek the next l characters
+ # self.forward(l=1) # read the next l characters and move the pointer.
+
+ # Had we reached the end of the stream?
+ self.done = False
+
+ # The number of unclosed '{' and '['. `flow_level == 0` means block
+ # context.
+ self.flow_level = 0
+
+ # List of processed tokens that are not yet emitted.
+ self.tokens = []
+
+ # Add the STREAM-START token.
+ self.fetch_stream_start()
+
+ # Number of tokens that were emitted through the `get_token` method.
+ self.tokens_taken = 0
+
+ # The current indentation level.
+ self.indent = -1
+
+ # Past indentation levels.
+ self.indents = []
+
+ # Variables related to simple keys treatment.
+
+ # A simple key is a key that is not denoted by the '?' indicator.
+ # Example of simple keys:
+ # ---
+ # block simple key: value
+ # ? not a simple key:
+ # : { flow simple key: value }
+ # We emit the KEY token before all keys, so when we find a potential
+ # simple key, we try to locate the corresponding ':' indicator.
+ # Simple keys should be limited to a single line and 1024 characters.
+
+ # Can a simple key start at the current position? A simple key may
+ # start:
+ # - at the beginning of the line, not counting indentation spaces
+ # (in block context),
+ # - after '{', '[', ',' (in the flow context),
+ # - after '?', ':', '-' (in the block context).
+ # In the block context, this flag also signifies if a block collection
+ # may start at the current position.
+ self.allow_simple_key = True
+
+ # Keep track of possible simple keys. This is a dictionary. The key
+ # is `flow_level`; there can be no more that one possible simple key
+ # for each level. The value is a SimpleKey record:
+ # (token_number, required, index, line, column, mark)
+ # A simple key may start with ALIAS, ANCHOR, TAG, SCALAR(flow),
+ # '[', or '{' tokens.
+ self.possible_simple_keys = {}
+
+ # Public methods.
+
+ def check_token(self, *choices):
+ # Check if the next token is one of the given types.
+ while self.need_more_tokens():
+ self.fetch_more_tokens()
+ if self.tokens:
+ if not choices:
+ return True
+ for choice in choices:
+ if isinstance(self.tokens[0], choice):
+ return True
+ return False
+
+ def peek_token(self):
+ # Return the next token, but do not delete if from the queue.
+ while self.need_more_tokens():
+ self.fetch_more_tokens()
+ if self.tokens:
+ return self.tokens[0]
+
+ def get_token(self):
+ # Return the next token.
+ while self.need_more_tokens():
+ self.fetch_more_tokens()
+ if self.tokens:
+ self.tokens_taken += 1
+ return self.tokens.pop(0)
+
+ # Private methods.
+
+ def need_more_tokens(self):
+ if self.done:
+ return False
+ if not self.tokens:
+ return True
+ # The current token may be a potential simple key, so we
+ # need to look further.
+ self.stale_possible_simple_keys()
+ if self.next_possible_simple_key() == self.tokens_taken:
+ return True
+
+ def fetch_more_tokens(self):
+
+ # Eat whitespaces and comments until we reach the next token.
+ self.scan_to_next_token()
+
+ # Remove obsolete possible simple keys.
+ self.stale_possible_simple_keys()
+
+ # Compare the current indentation and column. It may add some tokens
+ # and decrease the current indentation level.
+ self.unwind_indent(self.column)
+
+ # Peek the next character.
+ ch = self.peek()
+
+ # Is it the end of stream?
+ if ch == u'\0':
+ return self.fetch_stream_end()
+
+ # Is it a directive?
+ if ch == u'%' and self.check_directive():
+ return self.fetch_directive()
+
+ # Is it the document start?
+ if ch == u'-' and self.check_document_start():
+ return self.fetch_document_start()
+
+ # Is it the document end?
+ if ch == u'.' and self.check_document_end():
+ return self.fetch_document_end()
+
+ # TODO: support for BOM within a stream.
+ #if ch == u'\uFEFF':
+ # return self.fetch_bom() <-- issue BOMToken
+
+ # Note: the order of the following checks is NOT significant.
+
+ # Is it the flow sequence start indicator?
+ if ch == u'[':
+ return self.fetch_flow_sequence_start()
+
+ # Is it the flow mapping start indicator?
+ if ch == u'{':
+ return self.fetch_flow_mapping_start()
+
+ # Is it the flow sequence end indicator?
+ if ch == u']':
+ return self.fetch_flow_sequence_end()
+
+ # Is it the flow mapping end indicator?
+ if ch == u'}':
+ return self.fetch_flow_mapping_end()
+
+ # Is it the flow entry indicator?
+ if ch == u',':
+ return self.fetch_flow_entry()
+
+ # Is it the block entry indicator?
+ if ch == u'-' and self.check_block_entry():
+ return self.fetch_block_entry()
+
+ # Is it the key indicator?
+ if ch == u'?' and self.check_key():
+ return self.fetch_key()
+
+ # Is it the value indicator?
+ if ch == u':' and self.check_value():
+ return self.fetch_value()
+
+ # Is it an alias?
+ if ch == u'*':
+ return self.fetch_alias()
+
+ # Is it an anchor?
+ if ch == u'&':
+ return self.fetch_anchor()
+
+ # Is it a tag?
+ if ch == u'!':
+ return self.fetch_tag()
+
+ # Is it a literal scalar?
+ if ch == u'|' and not self.flow_level:
+ return self.fetch_literal()
+
+ # Is it a folded scalar?
+ if ch == u'>' and not self.flow_level:
+ return self.fetch_folded()
+
+ # Is it a single quoted scalar?
+ if ch == u'\'':
+ return self.fetch_single()
+
+ # Is it a double quoted scalar?
+ if ch == u'\"':
+ return self.fetch_double()
+
+ # It must be a plain scalar then.
+ if self.check_plain():
+ return self.fetch_plain()
+
+ # No? It's an error. Let's produce a nice error message.
+ raise ScannerError("while scanning for the next token", None,
+ "found character %r that cannot start any token"
+ % ch.encode('utf-8'), self.get_mark())
+
+ # Simple keys treatment.
+
+ def next_possible_simple_key(self):
+ # Return the number of the nearest possible simple key. Actually we
+ # don't need to loop through the whole dictionary. We may replace it
+ # with the following code:
+ # if not self.possible_simple_keys:
+ # return None
+ # return self.possible_simple_keys[
+ # min(self.possible_simple_keys.keys())].token_number
+ min_token_number = None
+ for level in self.possible_simple_keys:
+ key = self.possible_simple_keys[level]
+ if min_token_number is None or key.token_number < min_token_number:
+ min_token_number = key.token_number
+ return min_token_number
+
+ def stale_possible_simple_keys(self):
+ # Remove entries that are no longer possible simple keys. According to
+ # the YAML specification, simple keys
+ # - should be limited to a single line,
+ # - should be no longer than 1024 characters.
+ # Disabling this procedure will allow simple keys of any length and
+ # height (may cause problems if indentation is broken though).
+ for level in self.possible_simple_keys.keys():
+ key = self.possible_simple_keys[level]
+ if key.line != self.line \
+ or self.index-key.index > 1024:
+ if key.required:
+ raise ScannerError("while scanning a simple key", key.mark,
+ "could not found expected ':'", self.get_mark())
+ del self.possible_simple_keys[level]
+
+ def save_possible_simple_key(self):
+ # The next token may start a simple key. We check if it's possible
+ # and save its position. This function is called for
+ # ALIAS, ANCHOR, TAG, SCALAR(flow), '[', and '{'.
+
+ # Check if a simple key is required at the current position.
+ required = not self.flow_level and self.indent == self.column
+
+ # A simple key is required only if it is the first token in the current
+ # line. Therefore it is always allowed.
+ assert self.allow_simple_key or not required
+
+ # The next token might be a simple key. Let's save it's number and
+ # position.
+ if self.allow_simple_key:
+ self.remove_possible_simple_key()
+ token_number = self.tokens_taken+len(self.tokens)
+ key = SimpleKey(token_number, required,
+ self.index, self.line, self.column, self.get_mark())
+ self.possible_simple_keys[self.flow_level] = key
+
+ def remove_possible_simple_key(self):
+ # Remove the saved possible key position at the current flow level.
+ if self.flow_level in self.possible_simple_keys:
+ key = self.possible_simple_keys[self.flow_level]
+
+ if key.required:
+ raise ScannerError("while scanning a simple key", key.mark,
+ "could not found expected ':'", self.get_mark())
+
+ del self.possible_simple_keys[self.flow_level]
+
+ # Indentation functions.
+
+ def unwind_indent(self, column):
+
+ ## In flow context, tokens should respect indentation.
+ ## Actually the condition should be `self.indent >= column` according to
+ ## the spec. But this condition will prohibit intuitively correct
+ ## constructions such as
+ ## key : {
+ ## }
+ #if self.flow_level and self.indent > column:
+ # raise ScannerError(None, None,
+ # "invalid intendation or unclosed '[' or '{'",
+ # self.get_mark())
+
+ # In the flow context, indentation is ignored. We make the scanner less
+ # restrictive then specification requires.
+ if self.flow_level:
+ return
+
+ # In block context, we may need to issue the BLOCK-END tokens.
+ while self.indent > column:
+ mark = self.get_mark()
+ self.indent = self.indents.pop()
+ self.tokens.append(BlockEndToken(mark, mark))
+
+ def add_indent(self, column):
+ # Check if we need to increase indentation.
+ if self.indent < column:
+ self.indents.append(self.indent)
+ self.indent = column
+ return True
+ return False
+
+ # Fetchers.
+
+ def fetch_stream_start(self):
+ # We always add STREAM-START as the first token and STREAM-END as the
+ # last token.
+
+ # Read the token.
+ mark = self.get_mark()
+
+ # Add STREAM-START.
+ self.tokens.append(StreamStartToken(mark, mark,
+ encoding=self.encoding))
+
+
+ def fetch_stream_end(self):
+
+ # Set the current intendation to -1.
+ self.unwind_indent(-1)
+
+ # Reset simple keys.
+ self.remove_possible_simple_key()
+ self.allow_simple_key = False
+ self.possible_simple_keys = {}
+
+ # Read the token.
+ mark = self.get_mark()
+
+ # Add STREAM-END.
+ self.tokens.append(StreamEndToken(mark, mark))
+
+ # The steam is finished.
+ self.done = True
+
+ def fetch_directive(self):
+
+ # Set the current intendation to -1.
+ self.unwind_indent(-1)
+
+ # Reset simple keys.
+ self.remove_possible_simple_key()
+ self.allow_simple_key = False
+
+ # Scan and add DIRECTIVE.
+ self.tokens.append(self.scan_directive())
+
+ def fetch_document_start(self):
+ self.fetch_document_indicator(DocumentStartToken)
+
+ def fetch_document_end(self):
+ self.fetch_document_indicator(DocumentEndToken)
+
+ def fetch_document_indicator(self, TokenClass):
+
+ # Set the current intendation to -1.
+ self.unwind_indent(-1)
+
+ # Reset simple keys. Note that there could not be a block collection
+ # after '---'.
+ self.remove_possible_simple_key()
+ self.allow_simple_key = False
+
+ # Add DOCUMENT-START or DOCUMENT-END.
+ start_mark = self.get_mark()
+ self.forward(3)
+ end_mark = self.get_mark()
+ self.tokens.append(TokenClass(start_mark, end_mark))
+
+ def fetch_flow_sequence_start(self):
+ self.fetch_flow_collection_start(FlowSequenceStartToken)
+
+ def fetch_flow_mapping_start(self):
+ self.fetch_flow_collection_start(FlowMappingStartToken)
+
+ def fetch_flow_collection_start(self, TokenClass):
+
+ # '[' and '{' may start a simple key.
+ self.save_possible_simple_key()
+
+ # Increase the flow level.
+ self.flow_level += 1
+
+ # Simple keys are allowed after '[' and '{'.
+ self.allow_simple_key = True
+
+ # Add FLOW-SEQUENCE-START or FLOW-MAPPING-START.
+ start_mark = self.get_mark()
+ self.forward()
+ end_mark = self.get_mark()
+ self.tokens.append(TokenClass(start_mark, end_mark))
+
+ def fetch_flow_sequence_end(self):
+ self.fetch_flow_collection_end(FlowSequenceEndToken)
+
+ def fetch_flow_mapping_end(self):
+ self.fetch_flow_collection_end(FlowMappingEndToken)
+
+ def fetch_flow_collection_end(self, TokenClass):
+
+ # Reset possible simple key on the current level.
+ self.remove_possible_simple_key()
+
+ # Decrease the flow level.
+ self.flow_level -= 1
+
+ # No simple keys after ']' or '}'.
+ self.allow_simple_key = False
+
+ # Add FLOW-SEQUENCE-END or FLOW-MAPPING-END.
+ start_mark = self.get_mark()
+ self.forward()
+ end_mark = self.get_mark()
+ self.tokens.append(TokenClass(start_mark, end_mark))
+
+ def fetch_flow_entry(self):
+
+ # Simple keys are allowed after ','.
+ self.allow_simple_key = True
+
+ # Reset possible simple key on the current level.
+ self.remove_possible_simple_key()
+
+ # Add FLOW-ENTRY.
+ start_mark = self.get_mark()
+ self.forward()
+ end_mark = self.get_mark()
+ self.tokens.append(FlowEntryToken(start_mark, end_mark))
+
+ def fetch_block_entry(self):
+
+ # Block context needs additional checks.
+ if not self.flow_level:
+
+ # Are we allowed to start a new entry?
+ if not self.allow_simple_key:
+ raise ScannerError(None, None,
+ "sequence entries are not allowed here",
+ self.get_mark())
+
+ # We may need to add BLOCK-SEQUENCE-START.
+ if self.add_indent(self.column):
+ mark = self.get_mark()
+ self.tokens.append(BlockSequenceStartToken(mark, mark))
+
+ # It's an error for the block entry to occur in the flow context,
+ # but we let the parser detect this.
+ else:
+ pass
+
+ # Simple keys are allowed after '-'.
+ self.allow_simple_key = True
+
+ # Reset possible simple key on the current level.
+ self.remove_possible_simple_key()
+
+ # Add BLOCK-ENTRY.
+ start_mark = self.get_mark()
+ self.forward()
+ end_mark = self.get_mark()
+ self.tokens.append(BlockEntryToken(start_mark, end_mark))
+
+ def fetch_key(self):
+
+ # Block context needs additional checks.
+ if not self.flow_level:
+
+ # Are we allowed to start a key (not nessesary a simple)?
+ if not self.allow_simple_key:
+ raise ScannerError(None, None,
+ "mapping keys are not allowed here",
+ self.get_mark())
+
+ # We may need to add BLOCK-MAPPING-START.
+ if self.add_indent(self.column):
+ mark = self.get_mark()
+ self.tokens.append(BlockMappingStartToken(mark, mark))
+
+ # Simple keys are allowed after '?' in the block context.
+ self.allow_simple_key = not self.flow_level
+
+ # Reset possible simple key on the current level.
+ self.remove_possible_simple_key()
+
+ # Add KEY.
+ start_mark = self.get_mark()
+ self.forward()
+ end_mark = self.get_mark()
+ self.tokens.append(KeyToken(start_mark, end_mark))
+
+ def fetch_value(self):
+
+ # Do we determine a simple key?
+ if self.flow_level in self.possible_simple_keys:
+
+ # Add KEY.
+ key = self.possible_simple_keys[self.flow_level]
+ del self.possible_simple_keys[self.flow_level]
+ self.tokens.insert(key.token_number-self.tokens_taken,
+ KeyToken(key.mark, key.mark))
+
+ # If this key starts a new block mapping, we need to add
+ # BLOCK-MAPPING-START.
+ if not self.flow_level:
+ if self.add_indent(key.column):
+ self.tokens.insert(key.token_number-self.tokens_taken,
+ BlockMappingStartToken(key.mark, key.mark))
+
+ # There cannot be two simple keys one after another.
+ self.allow_simple_key = False
+
+ # It must be a part of a complex key.
+ else:
+
+ # Block context needs additional checks.
+ # (Do we really need them? They will be catched by the parser
+ # anyway.)
+ if not self.flow_level:
+
+ # We are allowed to start a complex value if and only if
+ # we can start a simple key.
+ if not self.allow_simple_key:
+ raise ScannerError(None, None,
+ "mapping values are not allowed here",
+ self.get_mark())
+
+ # If this value starts a new block mapping, we need to add
+ # BLOCK-MAPPING-START. It will be detected as an error later by
+ # the parser.
+ if not self.flow_level:
+ if self.add_indent(self.column):
+ mark = self.get_mark()
+ self.tokens.append(BlockMappingStartToken(mark, mark))
+
+ # Simple keys are allowed after ':' in the block context.
+ self.allow_simple_key = not self.flow_level
+
+ # Reset possible simple key on the current level.
+ self.remove_possible_simple_key()
+
+ # Add VALUE.
+ start_mark = self.get_mark()
+ self.forward()
+ end_mark = self.get_mark()
+ self.tokens.append(ValueToken(start_mark, end_mark))
+
+ def fetch_alias(self):
+
+ # ALIAS could be a simple key.
+ self.save_possible_simple_key()
+
+ # No simple keys after ALIAS.
+ self.allow_simple_key = False
+
+ # Scan and add ALIAS.
+ self.tokens.append(self.scan_anchor(AliasToken))
+
+ def fetch_anchor(self):
+
+ # ANCHOR could start a simple key.
+ self.save_possible_simple_key()
+
+ # No simple keys after ANCHOR.
+ self.allow_simple_key = False
+
+ # Scan and add ANCHOR.
+ self.tokens.append(self.scan_anchor(AnchorToken))
+
+ def fetch_tag(self):
+
+ # TAG could start a simple key.
+ self.save_possible_simple_key()
+
+ # No simple keys after TAG.
+ self.allow_simple_key = False
+
+ # Scan and add TAG.
+ self.tokens.append(self.scan_tag())
+
+ def fetch_literal(self):
+ self.fetch_block_scalar(style='|')
+
+ def fetch_folded(self):
+ self.fetch_block_scalar(style='>')
+
+ def fetch_block_scalar(self, style):
+
+ # A simple key may follow a block scalar.
+ self.allow_simple_key = True
+
+ # Reset possible simple key on the current level.
+ self.remove_possible_simple_key()
+
+ # Scan and add SCALAR.
+ self.tokens.append(self.scan_block_scalar(style))
+
+ def fetch_single(self):
+ self.fetch_flow_scalar(style='\'')
+
+ def fetch_double(self):
+ self.fetch_flow_scalar(style='"')
+
+ def fetch_flow_scalar(self, style):
+
+ # A flow scalar could be a simple key.
+ self.save_possible_simple_key()
+
+ # No simple keys after flow scalars.
+ self.allow_simple_key = False
+
+ # Scan and add SCALAR.
+ self.tokens.append(self.scan_flow_scalar(style))
+
+ def fetch_plain(self):
+
+ # A plain scalar could be a simple key.
+ self.save_possible_simple_key()
+
+ # No simple keys after plain scalars. But note that `scan_plain` will
+ # change this flag if the scan is finished at the beginning of the
+ # line.
+ self.allow_simple_key = False
+
+ # Scan and add SCALAR. May change `allow_simple_key`.
+ self.tokens.append(self.scan_plain())
+
+ # Checkers.
+
+ def check_directive(self):
+
+ # DIRECTIVE: ^ '%' ...
+ # The '%' indicator is already checked.
+ if self.column == 0:
+ return True
+
+ def check_document_start(self):
+
+ # DOCUMENT-START: ^ '---' (' '|'\n')
+ if self.column == 0:
+ if self.prefix(3) == u'---' \
+ and self.peek(3) in u'\0 \t\r\n\x85\u2028\u2029':
+ return True
+
+ def check_document_end(self):
+
+ # DOCUMENT-END: ^ '...' (' '|'\n')
+ if self.column == 0:
+ if self.prefix(3) == u'...' \
+ and self.peek(3) in u'\0 \t\r\n\x85\u2028\u2029':
+ return True
+
+ def check_block_entry(self):
+
+ # BLOCK-ENTRY: '-' (' '|'\n')
+ return self.peek(1) in u'\0 \t\r\n\x85\u2028\u2029'
+
+ def check_key(self):
+
+ # KEY(flow context): '?'
+ if self.flow_level:
+ return True
+
+ # KEY(block context): '?' (' '|'\n')
+ else:
+ return self.peek(1) in u'\0 \t\r\n\x85\u2028\u2029'
+
+ def check_value(self):
+
+ # VALUE(flow context): ':'
+ if self.flow_level:
+ return True
+
+ # VALUE(block context): ':' (' '|'\n')
+ else:
+ return self.peek(1) in u'\0 \t\r\n\x85\u2028\u2029'
+
+ def check_plain(self):
+
+ # A plain scalar may start with any non-space character except:
+ # '-', '?', ':', ',', '[', ']', '{', '}',
+ # '#', '&', '*', '!', '|', '>', '\'', '\"',
+ # '%', '@', '`'.
+ #
+ # It may also start with
+ # '-', '?', ':'
+ # if it is followed by a non-space character.
+ #
+ # Note that we limit the last rule to the block context (except the
+ # '-' character) because we want the flow context to be space
+ # independent.
+ ch = self.peek()
+ return ch not in u'\0 \t\r\n\x85\u2028\u2029-?:,[]{}#&*!|>\'\"%@`' \
+ or (self.peek(1) not in u'\0 \t\r\n\x85\u2028\u2029'
+ and (ch == u'-' or (not self.flow_level and ch in u'?:')))
+
+ # Scanners.
+
+ def scan_to_next_token(self):
+ # We ignore spaces, line breaks and comments.
+ # If we find a line break in the block context, we set the flag
+ # `allow_simple_key` on.
+ # The byte order mark is stripped if it's the first character in the
+ # stream. We do not yet support BOM inside the stream as the
+ # specification requires. Any such mark will be considered as a part
+ # of the document.
+ #
+ # TODO: We need to make tab handling rules more sane. A good rule is
+ # Tabs cannot precede tokens
+ # BLOCK-SEQUENCE-START, BLOCK-MAPPING-START, BLOCK-END,
+ # KEY(block), VALUE(block), BLOCK-ENTRY
+ # So the checking code is
+ # if <TAB>:
+ # self.allow_simple_keys = False
+ # We also need to add the check for `allow_simple_keys == True` to
+ # `unwind_indent` before issuing BLOCK-END.
+ # Scanners for block, flow, and plain scalars need to be modified.
+
+ if self.index == 0 and self.peek() == u'\uFEFF':
+ self.forward()
+ found = False
+ while not found:
+ while self.peek() == u' ':
+ self.forward()
+ if self.peek() == u'#':
+ while self.peek() not in u'\0\r\n\x85\u2028\u2029':
+ self.forward()
+ if self.scan_line_break():
+ if not self.flow_level:
+ self.allow_simple_key = True
+ else:
+ found = True
+
+ def scan_directive(self):
+ # See the specification for details.
+ start_mark = self.get_mark()
+ self.forward()
+ name = self.scan_directive_name(start_mark)
+ value = None
+ if name == u'YAML':
+ value = self.scan_yaml_directive_value(start_mark)
+ end_mark = self.get_mark()
+ elif name == u'TAG':
+ value = self.scan_tag_directive_value(start_mark)
+ end_mark = self.get_mark()
+ else:
+ end_mark = self.get_mark()
+ while self.peek() not in u'\0\r\n\x85\u2028\u2029':
+ self.forward()
+ self.scan_directive_ignored_line(start_mark)
+ return DirectiveToken(name, value, start_mark, end_mark)
+
+ def scan_directive_name(self, start_mark):
+ # See the specification for details.
+ length = 0
+ ch = self.peek(length)
+ while u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or u'a' <= ch <= u'z' \
+ or ch in u'-_':
+ length += 1
+ ch = self.peek(length)
+ if not length:
+ raise ScannerError("while scanning a directive", start_mark,
+ "expected alphabetic or numeric character, but found %r"
+ % ch.encode('utf-8'), self.get_mark())
+ value = self.prefix(length)
+ self.forward(length)
+ ch = self.peek()
+ if ch not in u'\0 \r\n\x85\u2028\u2029':
+ raise ScannerError("while scanning a directive", start_mark,
+ "expected alphabetic or numeric character, but found %r"
+ % ch.encode('utf-8'), self.get_mark())
+ return value
+
+ def scan_yaml_directive_value(self, start_mark):
+ # See the specification for details.
+ while self.peek() == u' ':
+ self.forward()
+ major = self.scan_yaml_directive_number(start_mark)
+ if self.peek() != '.':
+ raise ScannerError("while scanning a directive", start_mark,
+ "expected a digit or '.', but found %r"
+ % self.peek().encode('utf-8'),
+ self.get_mark())
+ self.forward()
+ minor = self.scan_yaml_directive_number(start_mark)
+ if self.peek() not in u'\0 \r\n\x85\u2028\u2029':
+ raise ScannerError("while scanning a directive", start_mark,
+ "expected a digit or ' ', but found %r"
+ % self.peek().encode('utf-8'),
+ self.get_mark())
+ return (major, minor)
+
+ def scan_yaml_directive_number(self, start_mark):
+ # See the specification for details.
+ ch = self.peek()
+ if not (u'0' <= ch <= u'9'):
+ raise ScannerError("while scanning a directive", start_mark,
+ "expected a digit, but found %r" % ch.encode('utf-8'),
+ self.get_mark())
+ length = 0
+ while u'0' <= self.peek(length) <= u'9':
+ length += 1
+ value = int(self.prefix(length))
+ self.forward(length)
+ return value
+
+ def scan_tag_directive_value(self, start_mark):
+ # See the specification for details.
+ while self.peek() == u' ':
+ self.forward()
+ handle = self.scan_tag_directive_handle(start_mark)
+ while self.peek() == u' ':
+ self.forward()
+ prefix = self.scan_tag_directive_prefix(start_mark)
+ return (handle, prefix)
+
+ def scan_tag_directive_handle(self, start_mark):
+ # See the specification for details.
+ value = self.scan_tag_handle('directive', start_mark)
+ ch = self.peek()
+ if ch != u' ':
+ raise ScannerError("while scanning a directive", start_mark,
+ "expected ' ', but found %r" % ch.encode('utf-8'),
+ self.get_mark())
+ return value
+
+ def scan_tag_directive_prefix(self, start_mark):
+ # See the specification for details.
+ value = self.scan_tag_uri('directive', start_mark)
+ ch = self.peek()
+ if ch not in u'\0 \r\n\x85\u2028\u2029':
+ raise ScannerError("while scanning a directive", start_mark,
+ "expected ' ', but found %r" % ch.encode('utf-8'),
+ self.get_mark())
+ return value
+
+ def scan_directive_ignored_line(self, start_mark):
+ # See the specification for details.
+ while self.peek() == u' ':
+ self.forward()
+ if self.peek() == u'#':
+ while self.peek() not in u'\0\r\n\x85\u2028\u2029':
+ self.forward()
+ ch = self.peek()
+ if ch not in u'\0\r\n\x85\u2028\u2029':
+ raise ScannerError("while scanning a directive", start_mark,
+ "expected a comment or a line break, but found %r"
+ % ch.encode('utf-8'), self.get_mark())
+ self.scan_line_break()
+
+ def scan_anchor(self, TokenClass):
+ # The specification does not restrict characters for anchors and
+ # aliases. This may lead to problems, for instance, the document:
+ # [ *alias, value ]
+ # can be interpteted in two ways, as
+ # [ "value" ]
+ # and
+ # [ *alias , "value" ]
+ # Therefore we restrict aliases to numbers and ASCII letters.
+ start_mark = self.get_mark()
+ indicator = self.peek()
+ if indicator == u'*':
+ name = 'alias'
+ else:
+ name = 'anchor'
+ self.forward()
+ length = 0
+ ch = self.peek(length)
+ while u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or u'a' <= ch <= u'z' \
+ or ch in u'-_':
+ length += 1
+ ch = self.peek(length)
+ if not length:
+ raise ScannerError("while scanning an %s" % name, start_mark,
+ "expected alphabetic or numeric character, but found %r"
+ % ch.encode('utf-8'), self.get_mark())
+ value = self.prefix(length)
+ self.forward(length)
+ ch = self.peek()
+ if ch not in u'\0 \t\r\n\x85\u2028\u2029?:,]}%@`':
+ raise ScannerError("while scanning an %s" % name, start_mark,
+ "expected alphabetic or numeric character, but found %r"
+ % ch.encode('utf-8'), self.get_mark())
+ end_mark = self.get_mark()
+ return TokenClass(value, start_mark, end_mark)
+
+ def scan_tag(self):
+ # See the specification for details.
+ start_mark = self.get_mark()
+ ch = self.peek(1)
+ if ch == u'<':
+ handle = None
+ self.forward(2)
+ suffix = self.scan_tag_uri('tag', start_mark)
+ if self.peek() != u'>':
+ raise ScannerError("while parsing a tag", start_mark,
+ "expected '>', but found %r" % self.peek().encode('utf-8'),
+ self.get_mark())
+ self.forward()
+ elif ch in u'\0 \t\r\n\x85\u2028\u2029':
+ handle = None
+ suffix = u'!'
+ self.forward()
+ else:
+ length = 1
+ use_handle = False
+ while ch not in u'\0 \r\n\x85\u2028\u2029':
+ if ch == u'!':
+ use_handle = True
+ break
+ length += 1
+ ch = self.peek(length)
+ handle = u'!'
+ if use_handle:
+ handle = self.scan_tag_handle('tag', start_mark)
+ else:
+ handle = u'!'
+ self.forward()
+ suffix = self.scan_tag_uri('tag', start_mark)
+ ch = self.peek()
+ if ch not in u'\0 \r\n\x85\u2028\u2029':
+ raise ScannerError("while scanning a tag", start_mark,
+ "expected ' ', but found %r" % ch.encode('utf-8'),
+ self.get_mark())
+ value = (handle, suffix)
+ end_mark = self.get_mark()
+ return TagToken(value, start_mark, end_mark)
+
+ def scan_block_scalar(self, style):
+ # See the specification for details.
+
+ if style == '>':
+ folded = True
+ else:
+ folded = False
+
+ chunks = []
+ start_mark = self.get_mark()
+
+ # Scan the header.
+ self.forward()
+ chomping, increment = self.scan_block_scalar_indicators(start_mark)
+ self.scan_block_scalar_ignored_line(start_mark)
+
+ # Determine the indentation level and go to the first non-empty line.
+ min_indent = self.indent+1
+ if min_indent < 1:
+ min_indent = 1
+ if increment is None:
+ breaks, max_indent, end_mark = self.scan_block_scalar_indentation()
+ indent = max(min_indent, max_indent)
+ else:
+ indent = min_indent+increment-1
+ breaks, end_mark = self.scan_block_scalar_breaks(indent)
+ line_break = u''
+
+ # Scan the inner part of the block scalar.
+ while self.column == indent and self.peek() != u'\0':
+ chunks.extend(breaks)
+ leading_non_space = self.peek() not in u' \t'
+ length = 0
+ while self.peek(length) not in u'\0\r\n\x85\u2028\u2029':
+ length += 1
+ chunks.append(self.prefix(length))
+ self.forward(length)
+ line_break = self.scan_line_break()
+ breaks, end_mark = self.scan_block_scalar_breaks(indent)
+ if self.column == indent and self.peek() != u'\0':
+
+ # Unfortunately, folding rules are ambiguous.
+ #
+ # This is the folding according to the specification:
+
+ if folded and line_break == u'\n' \
+ and leading_non_space and self.peek() not in u' \t':
+ if not breaks:
+ chunks.append(u' ')
+ else:
+ chunks.append(line_break)
+
+ # This is Clark Evans's interpretation (also in the spec
+ # examples):
+ #
+ #if folded and line_break == u'\n':
+ # if not breaks:
+ # if self.peek() not in ' \t':
+ # chunks.append(u' ')
+ # else:
+ # chunks.append(line_break)
+ #else:
+ # chunks.append(line_break)
+ else:
+ break
+
+ # Chomp the tail.
+ if chomping is not False:
+ chunks.append(line_break)
+ if chomping is True:
+ chunks.extend(breaks)
+
+ # We are done.
+ return ScalarToken(u''.join(chunks), False, start_mark, end_mark,
+ style)
+
+ def scan_block_scalar_indicators(self, start_mark):
+ # See the specification for details.
+ chomping = None
+ increment = None
+ ch = self.peek()
+ if ch in u'+-':
+ if ch == '+':
+ chomping = True
+ else:
+ chomping = False
+ self.forward()
+ ch = self.peek()
+ if ch in u'0123456789':
+ increment = int(ch)
+ if increment == 0:
+ raise ScannerError("while scanning a block scalar", start_mark,
+ "expected indentation indicator in the range 1-9, but found 0",
+ self.get_mark())
+ self.forward()
+ elif ch in u'0123456789':
+ increment = int(ch)
+ if increment == 0:
+ raise ScannerError("while scanning a block scalar", start_mark,
+ "expected indentation indicator in the range 1-9, but found 0",
+ self.get_mark())
+ self.forward()
+ ch = self.peek()
+ if ch in u'+-':
+ if ch == '+':
+ chomping = True
+ else:
+ chomping = False
+ self.forward()
+ ch = self.peek()
+ if ch not in u'\0 \r\n\x85\u2028\u2029':
+ raise ScannerError("while scanning a block scalar", start_mark,
+ "expected chomping or indentation indicators, but found %r"
+ % ch.encode('utf-8'), self.get_mark())
+ return chomping, increment
+
+ def scan_block_scalar_ignored_line(self, start_mark):
+ # See the specification for details.
+ while self.peek() == u' ':
+ self.forward()
+ if self.peek() == u'#':
+ while self.peek() not in u'\0\r\n\x85\u2028\u2029':
+ self.forward()
+ ch = self.peek()
+ if ch not in u'\0\r\n\x85\u2028\u2029':
+ raise ScannerError("while scanning a block scalar", start_mark,
+ "expected a comment or a line break, but found %r"
+ % ch.encode('utf-8'), self.get_mark())
+ self.scan_line_break()
+
+ def scan_block_scalar_indentation(self):
+ # See the specification for details.
+ chunks = []
+ max_indent = 0
+ end_mark = self.get_mark()
+ while self.peek() in u' \r\n\x85\u2028\u2029':
+ if self.peek() != u' ':
+ chunks.append(self.scan_line_break())
+ end_mark = self.get_mark()
+ else:
+ self.forward()
+ if self.column > max_indent:
+ max_indent = self.column
+ return chunks, max_indent, end_mark
+
+ def scan_block_scalar_breaks(self, indent):
+ # See the specification for details.
+ chunks = []
+ end_mark = self.get_mark()
+ while self.column < indent and self.peek() == u' ':
+ self.forward()
+ while self.peek() in u'\r\n\x85\u2028\u2029':
+ chunks.append(self.scan_line_break())
+ end_mark = self.get_mark()
+ while self.column < indent and self.peek() == u' ':
+ self.forward()
+ return chunks, end_mark
+
+ def scan_flow_scalar(self, style):
+ # See the specification for details.
+ # Note that we loose indentation rules for quoted scalars. Quoted
+ # scalars don't need to adhere indentation because " and ' clearly
+ # mark the beginning and the end of them. Therefore we are less
+ # restrictive then the specification requires. We only need to check
+ # that document separators are not included in scalars.
+ if style == '"':
+ double = True
+ else:
+ double = False
+ chunks = []
+ start_mark = self.get_mark()
+ quote = self.peek()
+ self.forward()
+ chunks.extend(self.scan_flow_scalar_non_spaces(double, start_mark))
+ while self.peek() != quote:
+ chunks.extend(self.scan_flow_scalar_spaces(double, start_mark))
+ chunks.extend(self.scan_flow_scalar_non_spaces(double, start_mark))
+ self.forward()
+ end_mark = self.get_mark()
+ return ScalarToken(u''.join(chunks), False, start_mark, end_mark,
+ style)
+
+ ESCAPE_REPLACEMENTS = {
+ u'0': u'\0',
+ u'a': u'\x07',
+ u'b': u'\x08',
+ u't': u'\x09',
+ u'\t': u'\x09',
+ u'n': u'\x0A',
+ u'v': u'\x0B',
+ u'f': u'\x0C',
+ u'r': u'\x0D',
+ u'e': u'\x1B',
+ u' ': u'\x20',
+ u'\"': u'\"',
+ u'\\': u'\\',
+ u'N': u'\x85',
+ u'_': u'\xA0',
+ u'L': u'\u2028',
+ u'P': u'\u2029',
+ }
+
+ ESCAPE_CODES = {
+ u'x': 2,
+ u'u': 4,
+ u'U': 8,
+ }
+
+ def scan_flow_scalar_non_spaces(self, double, start_mark):
+ # See the specification for details.
+ chunks = []
+ while True:
+ length = 0
+ while self.peek(length) not in u'\'\"\\\0 \t\r\n\x85\u2028\u2029':
+ length += 1
+ if length:
+ chunks.append(self.prefix(length))
+ self.forward(length)
+ ch = self.peek()
+ if not double and ch == u'\'' and self.peek(1) == u'\'':
+ chunks.append(u'\'')
+ self.forward(2)
+ elif (double and ch == u'\'') or (not double and ch in u'\"\\'):
+ chunks.append(ch)
+ self.forward()
+ elif double and ch == u'\\':
+ self.forward()
+ ch = self.peek()
+ if ch in self.ESCAPE_REPLACEMENTS:
+ chunks.append(self.ESCAPE_REPLACEMENTS[ch])
+ self.forward()
+ elif ch in self.ESCAPE_CODES:
+ length = self.ESCAPE_CODES[ch]
+ self.forward()
+ for k in range(length):
+ if self.peek(k) not in u'0123456789ABCDEFabcdef':
+ raise ScannerError("while scanning a double-quoted scalar", start_mark,
+ "expected escape sequence of %d hexdecimal numbers, but found %r" %
+ (length, self.peek(k).encode('utf-8')), self.get_mark())
+ code = int(self.prefix(length), 16)
+ chunks.append(unichr(code))
+ self.forward(length)
+ elif ch in u'\r\n\x85\u2028\u2029':
+ self.scan_line_break()
+ chunks.extend(self.scan_flow_scalar_breaks(double, start_mark))
+ else:
+ raise ScannerError("while scanning a double-quoted scalar", start_mark,
+ "found unknown escape character %r" % ch.encode('utf-8'), self.get_mark())
+ else:
+ return chunks
+
+ def scan_flow_scalar_spaces(self, double, start_mark):
+ # See the specification for details.
+ chunks = []
+ length = 0
+ while self.peek(length) in u' \t':
+ length += 1
+ whitespaces = self.prefix(length)
+ self.forward(length)
+ ch = self.peek()
+ if ch == u'\0':
+ raise ScannerError("while scanning a quoted scalar", start_mark,
+ "found unexpected end of stream", self.get_mark())
+ elif ch in u'\r\n\x85\u2028\u2029':
+ line_break = self.scan_line_break()
+ breaks = self.scan_flow_scalar_breaks(double, start_mark)
+ if line_break != u'\n':
+ chunks.append(line_break)
+ elif not breaks:
+ chunks.append(u' ')
+ chunks.extend(breaks)
+ else:
+ chunks.append(whitespaces)
+ return chunks
+
+ def scan_flow_scalar_breaks(self, double, start_mark):
+ # See the specification for details.
+ chunks = []
+ while True:
+ # Instead of checking indentation, we check for document
+ # separators.
+ prefix = self.prefix(3)
+ if (prefix == u'---' or prefix == u'...') \
+ and self.peek(3) in u'\0 \t\r\n\x85\u2028\u2029':
+ raise ScannerError("while scanning a quoted scalar", start_mark,
+ "found unexpected document separator", self.get_mark())
+ while self.peek() in u' \t':
+ self.forward()
+ if self.peek() in u'\r\n\x85\u2028\u2029':
+ chunks.append(self.scan_line_break())
+ else:
+ return chunks
+
+ def scan_plain(self):
+ # See the specification for details.
+ # We add an additional restriction for the flow context:
+ # plain scalars in the flow context cannot contain ',', ':' and '?'.
+ # We also keep track of the `allow_simple_key` flag here.
+ # Indentation rules are loosed for the flow context.
+ chunks = []
+ start_mark = self.get_mark()
+ end_mark = start_mark
+ indent = self.indent+1
+ # We allow zero indentation for scalars, but then we need to check for
+ # document separators at the beginning of the line.
+ #if indent == 0:
+ # indent = 1
+ spaces = []
+ while True:
+ length = 0
+ if self.peek() == u'#':
+ break
+ while True:
+ ch = self.peek(length)
+ if ch in u'\0 \t\r\n\x85\u2028\u2029' \
+ or (not self.flow_level and ch == u':' and
+ self.peek(length+1) in u'\0 \t\r\n\x85\u2028\u2029') \
+ or (self.flow_level and ch in u',:?[]{}'):
+ break
+ length += 1
+ # It's not clear what we should do with ':' in the flow context.
+ if (self.flow_level and ch == u':'
+ and self.peek(length+1) not in u'\0 \t\r\n\x85\u2028\u2029,[]{}'):
+ self.forward(length)
+ raise ScannerError("while scanning a plain scalar", start_mark,
+ "found unexpected ':'", self.get_mark(),
+ "Please check http://pyyaml.org/wiki/YAMLColonInFlowContext for details.")
+ if length == 0:
+ break
+ self.allow_simple_key = False
+ chunks.extend(spaces)
+ chunks.append(self.prefix(length))
+ self.forward(length)
+ end_mark = self.get_mark()
+ spaces = self.scan_plain_spaces(indent, start_mark)
+ if not spaces or self.peek() == u'#' \
+ or (not self.flow_level and self.column < indent):
+ break
+ return ScalarToken(u''.join(chunks), True, start_mark, end_mark)
+
+ def scan_plain_spaces(self, indent, start_mark):
+ # See the specification for details.
+ # The specification is really confusing about tabs in plain scalars.
+ # We just forbid them completely. Do not use tabs in YAML!
+ chunks = []
+ length = 0
+ while self.peek(length) in u' ':
+ length += 1
+ whitespaces = self.prefix(length)
+ self.forward(length)
+ ch = self.peek()
+ if ch in u'\r\n\x85\u2028\u2029':
+ line_break = self.scan_line_break()
+ self.allow_simple_key = True
+ prefix = self.prefix(3)
+ if (prefix == u'---' or prefix == u'...') \
+ and self.peek(3) in u'\0 \t\r\n\x85\u2028\u2029':
+ return
+ breaks = []
+ while self.peek() in u' \r\n\x85\u2028\u2029':
+ if self.peek() == ' ':
+ self.forward()
+ else:
+ breaks.append(self.scan_line_break())
+ prefix = self.prefix(3)
+ if (prefix == u'---' or prefix == u'...') \
+ and self.peek(3) in u'\0 \t\r\n\x85\u2028\u2029':
+ return
+ if line_break != u'\n':
+ chunks.append(line_break)
+ elif not breaks:
+ chunks.append(u' ')
+ chunks.extend(breaks)
+ elif whitespaces:
+ chunks.append(whitespaces)
+ return chunks
+
+ def scan_tag_handle(self, name, start_mark):
+ # See the specification for details.
+ # For some strange reasons, the specification does not allow '_' in
+ # tag handles. I have allowed it anyway.
+ ch = self.peek()
+ if ch != u'!':
+ raise ScannerError("while scanning a %s" % name, start_mark,
+ "expected '!', but found %r" % ch.encode('utf-8'),
+ self.get_mark())
+ length = 1
+ ch = self.peek(length)
+ if ch != u' ':
+ while u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or u'a' <= ch <= u'z' \
+ or ch in u'-_':
+ length += 1
+ ch = self.peek(length)
+ if ch != u'!':
+ self.forward(length)
+ raise ScannerError("while scanning a %s" % name, start_mark,
+ "expected '!', but found %r" % ch.encode('utf-8'),
+ self.get_mark())
+ length += 1
+ value = self.prefix(length)
+ self.forward(length)
+ return value
+
+ def scan_tag_uri(self, name, start_mark):
+ # See the specification for details.
+ # Note: we do not check if URI is well-formed.
+ chunks = []
+ length = 0
+ ch = self.peek(length)
+ while u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or u'a' <= ch <= u'z' \
+ or ch in u'-;/?:@&=+$,_.!~*\'()[]%':
+ if ch == u'%':
+ chunks.append(self.prefix(length))
+ self.forward(length)
+ length = 0
+ chunks.append(self.scan_uri_escapes(name, start_mark))
+ else:
+ length += 1
+ ch = self.peek(length)
+ if length:
+ chunks.append(self.prefix(length))
+ self.forward(length)
+ length = 0
+ if not chunks:
+ raise ScannerError("while parsing a %s" % name, start_mark,
+ "expected URI, but found %r" % ch.encode('utf-8'),
+ self.get_mark())
+ return u''.join(chunks)
+
+ def scan_uri_escapes(self, name, start_mark):
+ # See the specification for details.
+ bytes = []
+ mark = self.get_mark()
+ while self.peek() == u'%':
+ self.forward()
+ for k in range(2):
+ if self.peek(k) not in u'0123456789ABCDEFabcdef':
+ raise ScannerError("while scanning a %s" % name, start_mark,
+ "expected URI escape sequence of 2 hexdecimal numbers, but found %r" %
+ (self.peek(k).encode('utf-8')), self.get_mark())
+ bytes.append(chr(int(self.prefix(2), 16)))
+ self.forward(2)
+ try:
+ value = unicode(''.join(bytes), 'utf-8')
+ except UnicodeDecodeError, exc:
+ raise ScannerError("while scanning a %s" % name, start_mark, str(exc), mark)
+ return value
+
+ def scan_line_break(self):
+ # Transforms:
+ # '\r\n' : '\n'
+ # '\r' : '\n'
+ # '\n' : '\n'
+ # '\x85' : '\n'
+ # '\u2028' : '\u2028'
+ # '\u2029 : '\u2029'
+ # default : ''
+ ch = self.peek()
+ if ch in u'\r\n\x85':
+ if self.prefix(2) == u'\r\n':
+ self.forward(2)
+ else:
+ self.forward()
+ return u'\n'
+ elif ch in u'\u2028\u2029':
+ self.forward()
+ return ch
+ return u''
+
+#try:
+# import psyco
+# psyco.bind(Scanner)
+#except ImportError:
+# pass
+
diff --git a/scripts/python-lib/yaml/scanner.pyc b/scripts/python-lib/yaml/scanner.pyc
new file mode 100644
index 00000000..b22af786
--- /dev/null
+++ b/scripts/python-lib/yaml/scanner.pyc
Binary files differ
diff --git a/scripts/python-lib/yaml/serializer.py b/scripts/python-lib/yaml/serializer.py
new file mode 100755
index 00000000..0bf1e96d
--- /dev/null
+++ b/scripts/python-lib/yaml/serializer.py
@@ -0,0 +1,111 @@
+
+__all__ = ['Serializer', 'SerializerError']
+
+from error import YAMLError
+from events import *
+from nodes import *
+
+class SerializerError(YAMLError):
+ pass
+
+class Serializer(object):
+
+ ANCHOR_TEMPLATE = u'id%03d'
+
+ def __init__(self, encoding=None,
+ explicit_start=None, explicit_end=None, version=None, tags=None):
+ self.use_encoding = encoding
+ self.use_explicit_start = explicit_start
+ self.use_explicit_end = explicit_end
+ self.use_version = version
+ self.use_tags = tags
+ self.serialized_nodes = {}
+ self.anchors = {}
+ self.last_anchor_id = 0
+ self.closed = None
+
+ def open(self):
+ if self.closed is None:
+ self.emit(StreamStartEvent(encoding=self.use_encoding))
+ self.closed = False
+ elif self.closed:
+ raise SerializerError("serializer is closed")
+ else:
+ raise SerializerError("serializer is already opened")
+
+ def close(self):
+ if self.closed is None:
+ raise SerializerError("serializer is not opened")
+ elif not self.closed:
+ self.emit(StreamEndEvent())
+ self.closed = True
+
+ #def __del__(self):
+ # self.close()
+
+ def serialize(self, node):
+ if self.closed is None:
+ raise SerializerError("serializer is not opened")
+ elif self.closed:
+ raise SerializerError("serializer is closed")
+ self.emit(DocumentStartEvent(explicit=self.use_explicit_start,
+ version=self.use_version, tags=self.use_tags))
+ self.anchor_node(node)
+ self.serialize_node(node, None, None)
+ self.emit(DocumentEndEvent(explicit=self.use_explicit_end))
+ self.serialized_nodes = {}
+ self.anchors = {}
+ self.last_anchor_id = 0
+
+ def anchor_node(self, node):
+ if node in self.anchors:
+ if self.anchors[node] is None:
+ self.anchors[node] = self.generate_anchor(node)
+ else:
+ self.anchors[node] = None
+ if isinstance(node, SequenceNode):
+ for item in node.value:
+ self.anchor_node(item)
+ elif isinstance(node, MappingNode):
+ for key, value in node.value:
+ self.anchor_node(key)
+ self.anchor_node(value)
+
+ def generate_anchor(self, node):
+ self.last_anchor_id += 1
+ return self.ANCHOR_TEMPLATE % self.last_anchor_id
+
+ def serialize_node(self, node, parent, index):
+ alias = self.anchors[node]
+ if node in self.serialized_nodes:
+ self.emit(AliasEvent(alias))
+ else:
+ self.serialized_nodes[node] = True
+ self.descend_resolver(parent, index)
+ if isinstance(node, ScalarNode):
+ detected_tag = self.resolve(ScalarNode, node.value, (True, False))
+ default_tag = self.resolve(ScalarNode, node.value, (False, True))
+ implicit = (node.tag == detected_tag), (node.tag == default_tag)
+ self.emit(ScalarEvent(alias, node.tag, implicit, node.value,
+ style=node.style))
+ elif isinstance(node, SequenceNode):
+ implicit = (node.tag
+ == self.resolve(SequenceNode, node.value, True))
+ self.emit(SequenceStartEvent(alias, node.tag, implicit,
+ flow_style=node.flow_style))
+ index = 0
+ for item in node.value:
+ self.serialize_node(item, node, index)
+ index += 1
+ self.emit(SequenceEndEvent())
+ elif isinstance(node, MappingNode):
+ implicit = (node.tag
+ == self.resolve(MappingNode, node.value, True))
+ self.emit(MappingStartEvent(alias, node.tag, implicit,
+ flow_style=node.flow_style))
+ for key, value in node.value:
+ self.serialize_node(key, node, None)
+ self.serialize_node(value, node, key)
+ self.emit(MappingEndEvent())
+ self.ascend_resolver()
+
diff --git a/scripts/python-lib/yaml/serializer.pyc b/scripts/python-lib/yaml/serializer.pyc
new file mode 100644
index 00000000..da4989fa
--- /dev/null
+++ b/scripts/python-lib/yaml/serializer.pyc
Binary files differ
diff --git a/scripts/python-lib/yaml/tokens.py b/scripts/python-lib/yaml/tokens.py
new file mode 100755
index 00000000..4d0b48a3
--- /dev/null
+++ b/scripts/python-lib/yaml/tokens.py
@@ -0,0 +1,104 @@
+
+class Token(object):
+ def __init__(self, start_mark, end_mark):
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+ def __repr__(self):
+ attributes = [key for key in self.__dict__
+ if not key.endswith('_mark')]
+ attributes.sort()
+ arguments = ', '.join(['%s=%r' % (key, getattr(self, key))
+ for key in attributes])
+ return '%s(%s)' % (self.__class__.__name__, arguments)
+
+#class BOMToken(Token):
+# id = '<byte order mark>'
+
+class DirectiveToken(Token):
+ id = '<directive>'
+ def __init__(self, name, value, start_mark, end_mark):
+ self.name = name
+ self.value = value
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+
+class DocumentStartToken(Token):
+ id = '<document start>'
+
+class DocumentEndToken(Token):
+ id = '<document end>'
+
+class StreamStartToken(Token):
+ id = '<stream start>'
+ def __init__(self, start_mark=None, end_mark=None,
+ encoding=None):
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+ self.encoding = encoding
+
+class StreamEndToken(Token):
+ id = '<stream end>'
+
+class BlockSequenceStartToken(Token):
+ id = '<block sequence start>'
+
+class BlockMappingStartToken(Token):
+ id = '<block mapping start>'
+
+class BlockEndToken(Token):
+ id = '<block end>'
+
+class FlowSequenceStartToken(Token):
+ id = '['
+
+class FlowMappingStartToken(Token):
+ id = '{'
+
+class FlowSequenceEndToken(Token):
+ id = ']'
+
+class FlowMappingEndToken(Token):
+ id = '}'
+
+class KeyToken(Token):
+ id = '?'
+
+class ValueToken(Token):
+ id = ':'
+
+class BlockEntryToken(Token):
+ id = '-'
+
+class FlowEntryToken(Token):
+ id = ','
+
+class AliasToken(Token):
+ id = '<alias>'
+ def __init__(self, value, start_mark, end_mark):
+ self.value = value
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+
+class AnchorToken(Token):
+ id = '<anchor>'
+ def __init__(self, value, start_mark, end_mark):
+ self.value = value
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+
+class TagToken(Token):
+ id = '<tag>'
+ def __init__(self, value, start_mark, end_mark):
+ self.value = value
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+
+class ScalarToken(Token):
+ id = '<scalar>'
+ def __init__(self, value, plain, start_mark, end_mark, style=None):
+ self.value = value
+ self.plain = plain
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+ self.style = style
+
diff --git a/scripts/python-lib/yaml/tokens.pyc b/scripts/python-lib/yaml/tokens.pyc
new file mode 100644
index 00000000..1e0f5147
--- /dev/null
+++ b/scripts/python-lib/yaml/tokens.pyc
Binary files differ
diff --git a/scripts/stty_r b/scripts/stty_r
new file mode 100755
index 00000000..fdc3366b
--- /dev/null
+++ b/scripts/stty_r
@@ -0,0 +1,2 @@
+#! /bin/bash
+stty 500:5:bf:8a3b:3:1c:8:15:4:0:1:0:11:13:1a:0:12:f:17:16:0:0:0:0:0:0:0:0:0:0:0:0:0:0:0:0
diff --git a/scripts/t-rex-64 b/scripts/t-rex-64
new file mode 100755
index 00000000..d33cc3e8
--- /dev/null
+++ b/scripts/t-rex-64
@@ -0,0 +1,25 @@
+#! /bin/bash
+./trex-cfg $@
+RESULT=$?
+if [ $RESULT -ne 0 ]; then
+ exit $RESULT
+fi
+
+
+cd $(dirname $0)
+export LD_LIBRARY_PATH=$PWD
+saveterm="$(stty -g)"
+# if we have a new core run optimized trex
+if cat /proc/cpuinfo | grep -q avx ; then
+ ./_$(basename $0) $@
+ if [ $? -eq 132 ]; then
+ echo " WARNING this program is optimized for the new Intel processors. "
+ echo " try the ./t-rex-64-o application that should work for any Intel processor but might be slower. "
+ echo " try to run t-rex-64-o .. "
+ ./_t-rex-64-o $@
+ fi
+else
+ ./_t-rex-64-o $@
+fi
+stty $saveterm
+
diff --git a/scripts/t-rex-64-debug b/scripts/t-rex-64-debug
new file mode 100755
index 00000000..4f3761d9
--- /dev/null
+++ b/scripts/t-rex-64-debug
@@ -0,0 +1,9 @@
+#! /bin/bash
+./trex-cfg
+cd $(dirname $0)
+export LD_LIBRARY_PATH=$PWD
+saveterm="$(stty -g)"
+./_$(basename $0) $@
+stty $saveterm
+
+
diff --git a/scripts/t-rex-64-debug-gdb b/scripts/t-rex-64-debug-gdb
new file mode 100755
index 00000000..087afb71
--- /dev/null
+++ b/scripts/t-rex-64-debug-gdb
@@ -0,0 +1,4 @@
+#! /bin/bash
+export LD_LIBRARY_PATH=`pwd`
+gdb --args ./_t-rex-64-debug $@
+
diff --git a/scripts/t-rex-64-debug-o b/scripts/t-rex-64-debug-o
new file mode 100755
index 00000000..4f3761d9
--- /dev/null
+++ b/scripts/t-rex-64-debug-o
@@ -0,0 +1,9 @@
+#! /bin/bash
+./trex-cfg
+cd $(dirname $0)
+export LD_LIBRARY_PATH=$PWD
+saveterm="$(stty -g)"
+./_$(basename $0) $@
+stty $saveterm
+
+
diff --git a/scripts/t-rex-64-debug-o-gdb b/scripts/t-rex-64-debug-o-gdb
new file mode 100755
index 00000000..629530ae
--- /dev/null
+++ b/scripts/t-rex-64-debug-o-gdb
@@ -0,0 +1,4 @@
+#! /bin/bash
+export LD_LIBRARY_PATH=`pwd`
+gdb --args ./_v-avc-64-debug-o $@
+
diff --git a/scripts/t-rex-64-o b/scripts/t-rex-64-o
new file mode 100755
index 00000000..4f3761d9
--- /dev/null
+++ b/scripts/t-rex-64-o
@@ -0,0 +1,9 @@
+#! /bin/bash
+./trex-cfg
+cd $(dirname $0)
+export LD_LIBRARY_PATH=$PWD
+saveterm="$(stty -g)"
+./_$(basename $0) $@
+stty $saveterm
+
+
diff --git a/scripts/trex-cfg b/scripts/trex-cfg
new file mode 100755
index 00000000..2aebf026
--- /dev/null
+++ b/scripts/trex-cfg
@@ -0,0 +1,57 @@
+#! /bin/bash
+SYS=`uname -r`
+if [ -f /etc/debian_version ]; then
+ OS=debian
+elif [ -f /etc/redhat-release ]; then
+ OS=redhat
+ systemctl stop firewalld.service
+else
+ OS=unknown
+fi
+
+
+if [ -d /mnt/huge ]; then
+ echo >> /dev/null
+else
+ echo "Create huge node"
+ mkdir -p /mnt/huge
+fi
+
+if ! mount | grep hugetlbfs >> /dev/null ; then
+ mount -t hugetlbfs nodev /mnt/huge
+fi
+
+
+for file in /sys/devices/system/node/node0/hugepages/hugepages-2048kB/nr_hugepages /sys/devices/system/node/node1/hugepages/hugepages-2048kB/nr_hugepages ; do
+ if [ -e $file ] ; then
+ if [ "$(cat $file)" != "2048" ] ; then
+ echo 2048 > $file
+ fi
+ fi
+done
+
+if ! lsmod | grep -q igb_uio ; then
+ echo "Load the drivers for the first time "
+ modprobe uio
+ km=ko/$SYS/igb_uio.ko
+ if [ -e $km ] ; then
+ insmod $km
+ else
+ echo "ERROR kernel module is not supported for this OS"
+ echo "Please run the following commands: "
+ echo "\$cd ko/src "
+ echo "\$make "
+ echo "\$make install "
+ echo "\$cd - "
+ echo "and try to run it again "
+ exit 1
+ fi
+fi
+
+# try to bind the ports from the configuration file (new DPDK)
+PARENT_ARGS="$0 $@"
+if ! ./dpdk_setup_ports.py --parent "$PARENT_ARGS"; then
+ exit 1
+fi
+
+
diff --git a/scripts/trex_daemon_server b/scripts/trex_daemon_server
new file mode 100755
index 00000000..3494e303
--- /dev/null
+++ b/scripts/trex_daemon_server
@@ -0,0 +1,25 @@
+#!/usr/bin/python
+
+import os
+import sys
+
+core = 0
+
+if '--core' in sys.argv:
+ try:
+ idx = sys.argv.index('--core')
+ core = int(sys.argv[idx + 1])
+ if core > 31 or core < 0:
+ print "Error: please provide core argument between 0 to 31"
+ exit(-1)
+ del sys.argv[idx:idx+2]
+ except IndexError:
+ print "Error: please make sure core option provided with argument"
+ exit(-1)
+ except ValueError:
+ print "Error: please make sure core option provided with integer argument"
+ exit(-1)
+
+str_argv = ' '.join(sys.argv[1:])
+cmd = "taskset -c {core} python automation/trex_control_plane/server/trex_daemon_server.py {argv}".format(core = core, argv = str_argv)
+os.system(cmd)
diff --git a/scripts/version.txt b/scripts/version.txt
new file mode 100755
index 00000000..ef163be8
--- /dev/null
+++ b/scripts/version.txt
@@ -0,0 +1,4 @@
+
+moved to doc/release_notes.html
+
+