summaryrefslogtreecommitdiffstats
path: root/scripts/automation
diff options
context:
space:
mode:
Diffstat (limited to 'scripts/automation')
-rw-r--r--scripts/automation/__init__.py0
-rwxr-xr-xscripts/automation/config/trex-dan.cfg33
-rwxr-xr-xscripts/automation/config/trex-dev3.cfg34
-rwxr-xr-xscripts/automation/config/trex-esp80-hhaim.cfg31
-rwxr-xr-xscripts/automation/config/trex-hhaim.cfg33
-rwxr-xr-xscripts/automation/config/trex01-1g.cfg35
-rwxr-xr-xscripts/automation/graph_template.html80
-rwxr-xr-xscripts/automation/h_avc.py195
-rwxr-xr-xscripts/automation/phantom/phantomjsbin0 -> 38346752 bytes
-rwxr-xr-xscripts/automation/phantom/rasterize.js32
-rwxr-xr-xscripts/automation/readme.txt15
-rwxr-xr-xscripts/automation/regression/CPlatform.py945
-rwxr-xr-xscripts/automation/regression/CProgressDisp.py87
-rwxr-xr-xscripts/automation/regression/CShowParser.py228
-rwxr-xr-xscripts/automation/regression/CustomLogger.py36
-rwxr-xr-xscripts/automation/regression/aggregate_results.py659
-rw-r--r--scripts/automation/regression/functional_tests/config.yaml74
-rw-r--r--scripts/automation/regression/functional_tests/cpp_gtests_test.py46
-rw-r--r--scripts/automation/regression/functional_tests/filters_test.py100
-rwxr-xr-xscripts/automation/regression/functional_tests/functional_general_test.py22
-rw-r--r--scripts/automation/regression/functional_tests/golden/basic_imix_golden.capbin0 -> 198474 bytes
-rw-r--r--scripts/automation/regression/functional_tests/golden/basic_imix_vm_golden.capbin0 -> 316552 bytes
-rw-r--r--scripts/automation/regression/functional_tests/golden/basic_tuple_gen_golden.capbin0 -> 38024 bytes
-rw-r--r--scripts/automation/regression/functional_tests/golden/udp_590.capbin0 -> 630 bytes
-rwxr-xr-xscripts/automation/regression/functional_tests/hltapi_stream_builder_test.py629
-rwxr-xr-xscripts/automation/regression/functional_tests/misc_methods_test.py61
-rwxr-xr-xscripts/automation/regression/functional_tests/pkt_bld_general_test.py28
-rwxr-xr-xscripts/automation/regression/functional_tests/platform_cmd_cache_test.py60
-rwxr-xr-xscripts/automation/regression/functional_tests/platform_cmd_link_test.py62
-rwxr-xr-xscripts/automation/regression/functional_tests/platform_device_cfg_test.py20
-rwxr-xr-xscripts/automation/regression/functional_tests/platform_dual_if_obj_test.py31
-rwxr-xr-xscripts/automation/regression/functional_tests/platform_if_manager_test.py40
-rwxr-xr-xscripts/automation/regression/functional_tests/platform_if_obj_test.py49
-rw-r--r--scripts/automation/regression/functional_tests/scapy_pkt_builder_test.py369
-rw-r--r--scripts/automation/regression/functional_tests/stl_basic_tests.py367
-rwxr-xr-xscripts/automation/regression/functional_tests/trex_cfg_creator_test.py698
-rwxr-xr-xscripts/automation/regression/hltapi_playground.py193
-rwxr-xr-xscripts/automation/regression/interactive_platform4
-rwxr-xr-xscripts/automation/regression/interactive_platform.py338
-rwxr-xr-xscripts/automation/regression/interfaces_e.py8
-rwxr-xr-xscripts/automation/regression/misc_methods.py284
-rwxr-xr-xscripts/automation/regression/outer_packages.py71
-rwxr-xr-xscripts/automation/regression/platform_cmd_link.py488
-rw-r--r--scripts/automation/regression/reports/.keep0
-rwxr-xr-xscripts/automation/regression/setups/dave/benchmark.yaml118
-rwxr-xr-xscripts/automation/regression/setups/dave/config.yaml94
-rw-r--r--scripts/automation/regression/setups/dummy/config.yaml11
-rw-r--r--scripts/automation/regression/setups/kiwi02/benchmark.yaml298
-rw-r--r--scripts/automation/regression/setups/kiwi02/config.yaml95
-rw-r--r--scripts/automation/regression/setups/trex-dan/benchmark.yaml253
-rw-r--r--scripts/automation/regression/setups/trex-dan/config.yaml68
-rw-r--r--scripts/automation/regression/setups/trex04/benchmark.yaml155
-rw-r--r--scripts/automation/regression/setups/trex04/config.yaml39
-rw-r--r--scripts/automation/regression/setups/trex07/benchmark.yaml244
-rw-r--r--scripts/automation/regression/setups/trex07/config.yaml66
-rw-r--r--scripts/automation/regression/setups/trex08/benchmark.yaml181
-rw-r--r--scripts/automation/regression/setups/trex08/config.yaml40
-rw-r--r--scripts/automation/regression/setups/trex09/benchmark.yaml234
-rw-r--r--scripts/automation/regression/setups/trex09/config.yaml38
-rw-r--r--scripts/automation/regression/setups/trex10/benchmark.yaml60
-rw-r--r--scripts/automation/regression/setups/trex10/config.yaml38
-rw-r--r--scripts/automation/regression/setups/trex11/benchmark.yaml155
-rw-r--r--scripts/automation/regression/setups/trex11/config.yaml38
-rw-r--r--scripts/automation/regression/setups/trex12/benchmark.yaml182
-rw-r--r--scripts/automation/regression/setups/trex12/config.yaml40
-rw-r--r--scripts/automation/regression/setups/trex14/benchmark.yaml245
-rw-r--r--scripts/automation/regression/setups/trex14/config.yaml67
-rw-r--r--scripts/automation/regression/setups/trex15/benchmark.yaml155
-rw-r--r--scripts/automation/regression/setups/trex15/config.yaml39
-rw-r--r--scripts/automation/regression/setups/trex17/benchmark.yaml155
-rw-r--r--scripts/automation/regression/setups/trex17/config.yaml39
-rw-r--r--scripts/automation/regression/setups/trex24/benchmark.yaml155
-rw-r--r--scripts/automation/regression/setups/trex24/config.yaml39
-rw-r--r--scripts/automation/regression/setups/trex25/benchmark.yaml254
-rw-r--r--scripts/automation/regression/setups/trex25/config.yaml93
-rwxr-xr-xscripts/automation/regression/sshpass.exp17
-rw-r--r--scripts/automation/regression/stateful_tests/__init__.py0
-rwxr-xr-xscripts/automation/regression/stateful_tests/tests_exceptions.py37
-rwxr-xr-xscripts/automation/regression/stateful_tests/trex_client_pkg_test.py34
-rwxr-xr-xscripts/automation/regression/stateful_tests/trex_general_test.py363
-rwxr-xr-xscripts/automation/regression/stateful_tests/trex_imix_test.py213
-rwxr-xr-xscripts/automation/regression/stateful_tests/trex_ipv6_test.py103
-rwxr-xr-xscripts/automation/regression/stateful_tests/trex_nat_test.py169
-rwxr-xr-xscripts/automation/regression/stateful_tests/trex_nbar_test.py123
-rwxr-xr-xscripts/automation/regression/stateful_tests/trex_rx_test.py280
-rwxr-xr-xscripts/automation/regression/stateless_tests/__init__.py0
-rwxr-xr-xscripts/automation/regression/stateless_tests/stl_benchmark_test.py75
-rw-r--r--scripts/automation/regression/stateless_tests/stl_client_test.py350
-rwxr-xr-xscripts/automation/regression/stateless_tests/stl_examples_test.py31
-rw-r--r--scripts/automation/regression/stateless_tests/stl_general_test.py113
-rw-r--r--scripts/automation/regression/stateless_tests/stl_performance_test.py351
-rw-r--r--scripts/automation/regression/stateless_tests/stl_rx_test.py568
-rwxr-xr-xscripts/automation/regression/stateless_tests/trex_client_pkg_test.py39
-rw-r--r--scripts/automation/regression/test_pcaps/pcap_dual_test.erfbin0 -> 101488 bytes
-rw-r--r--scripts/automation/regression/trex.py457
-rwxr-xr-xscripts/automation/regression/trex_unit_test.py437
-rwxr-xr-xscripts/automation/report_template.html96
-rwxr-xr-xscripts/automation/sshpass.exp17
-rwxr-xr-xscripts/automation/trex_control_plane/__init__.py1
-rw-r--r--scripts/automation/trex_control_plane/client_utils/__init__.py1
-rw-r--r--scripts/automation/trex_control_plane/client_utils/external_packages.py72
-rw-r--r--scripts/automation/trex_control_plane/client_utils/trex_yaml_gen.py212
-rw-r--r--scripts/automation/trex_control_plane/client_utils/yaml_utils.py163
-rwxr-xr-xscripts/automation/trex_control_plane/common/__init__.py1
-rwxr-xr-xscripts/automation/trex_control_plane/common/external_packages.py28
-rwxr-xr-xscripts/automation/trex_control_plane/common/text_opts.py198
-rwxr-xr-xscripts/automation/trex_control_plane/common/trex_exceptions.py140
-rw-r--r--scripts/automation/trex_control_plane/common/trex_status.py8
-rwxr-xr-xscripts/automation/trex_control_plane/common/trex_status_e.py8
-rwxr-xr-xscripts/automation/trex_control_plane/doc/Makefile192
-rwxr-xr-xscripts/automation/trex_control_plane/doc/_static/no_scrollbars.css10
-rw-r--r--scripts/automation/trex_control_plane/doc/_templates/layout.html17
-rwxr-xr-xscripts/automation/trex_control_plane/doc/api/client_code.rst17
-rwxr-xr-xscripts/automation/trex_control_plane/doc/api/exceptions.rst7
-rwxr-xr-xscripts/automation/trex_control_plane/doc/api/index.rst18
-rwxr-xr-xscripts/automation/trex_control_plane/doc/api/json_fields.rst233
-rwxr-xr-xscripts/automation/trex_control_plane/doc/client_utils.rst19
-rwxr-xr-xscripts/automation/trex_control_plane/doc/conf.py312
-rwxr-xr-xscripts/automation/trex_control_plane/doc/docs_utilities.py37
-rwxr-xr-xscripts/automation/trex_control_plane/doc/index.rst96
-rwxr-xr-xscripts/automation/trex_control_plane/doc/json_dictionary.yaml252
-rwxr-xr-xscripts/automation/trex_control_plane/doc/packet_generator/examples.rst5
-rwxr-xr-xscripts/automation/trex_control_plane/doc/packet_generator/export_format.yaml47
-rwxr-xr-xscripts/automation/trex_control_plane/doc/packet_generator/index.rst17
-rwxr-xr-xscripts/automation/trex_control_plane/doc/packet_generator/packet_builder_code.bak12
-rwxr-xr-xscripts/automation/trex_control_plane/doc/packet_generator/stream_export.rst29
-rwxr-xr-xscripts/automation/trex_control_plane/doc/usage_examples.rst68
-rw-r--r--scripts/automation/trex_control_plane/doc_stl/Makefile192
-rw-r--r--scripts/automation/trex_control_plane/doc_stl/_static/no_scrollbars.css10
-rw-r--r--scripts/automation/trex_control_plane/doc_stl/_templates/layout.html17
-rwxr-xr-xscripts/automation/trex_control_plane/doc_stl/api/client_code.rst260
-rwxr-xr-xscripts/automation/trex_control_plane/doc_stl/api/field_engine.rst254
-rwxr-xr-xscripts/automation/trex_control_plane/doc_stl/api/index.rst37
-rwxr-xr-xscripts/automation/trex_control_plane/doc_stl/api/profile_code.rst140
-rwxr-xr-xscripts/automation/trex_control_plane/doc_stl/api/scapy_builder.rst44
-rw-r--r--scripts/automation/trex_control_plane/doc_stl/conf.py328
-rw-r--r--scripts/automation/trex_control_plane/doc_stl/index.rst101
-rwxr-xr-xscripts/automation/trex_control_plane/examples/__init__.py1
-rwxr-xr-xscripts/automation/trex_control_plane/examples/client_interactive_example.py254
-rwxr-xr-xscripts/automation/trex_control_plane/examples/client_tcl_example.tcl28
-rw-r--r--scripts/automation/trex_control_plane/examples/interactive_stateless.py128
-rwxr-xr-xscripts/automation/trex_control_plane/examples/pkt_generation_for_trex.py105
-rwxr-xr-xscripts/automation/trex_control_plane/examples/stateless_example.py30
-rwxr-xr-xscripts/automation/trex_control_plane/examples/trex_root_path.py15
-rwxr-xr-xscripts/automation/trex_control_plane/examples/trex_tcl_client.tcl228
-rwxr-xr-xscripts/automation/trex_control_plane/examples/zmq_server_client.py45
-rwxr-xr-xscripts/automation/trex_control_plane/server/CCustomLogger.py106
-rwxr-xr-xscripts/automation/trex_control_plane/server/outer_packages.py38
-rwxr-xr-xscripts/automation/trex_control_plane/server/singleton_daemon.py176
-rwxr-xr-xscripts/automation/trex_control_plane/server/trex_launch_thread.py96
-rwxr-xr-xscripts/automation/trex_control_plane/server/trex_server.py640
-rw-r--r--scripts/automation/trex_control_plane/server/zipmsg.py32
-rwxr-xr-xscripts/automation/trex_control_plane/server/zmq_monitor_thread.py86
-rwxr-xr-xscripts/automation/trex_control_plane/stf/examples/stf_example.py54
-rwxr-xr-xscripts/automation/trex_control_plane/stf/examples/stf_path.py4
-rwxr-xr-xscripts/automation/trex_control_plane/stf/trex_stf_lib/CCustomLogger.py100
-rwxr-xr-xscripts/automation/trex_control_plane/stf/trex_stf_lib/__init__.py1
-rwxr-xr-xscripts/automation/trex_control_plane/stf/trex_stf_lib/external_packages.py28
-rwxr-xr-xscripts/automation/trex_control_plane/stf/trex_stf_lib/general_utils.py95
-rwxr-xr-xscripts/automation/trex_control_plane/stf/trex_stf_lib/outer_packages.py30
-rwxr-xr-xscripts/automation/trex_control_plane/stf/trex_stf_lib/text_opts.py192
-rwxr-xr-xscripts/automation/trex_control_plane/stf/trex_stf_lib/trex_client.py1561
-rwxr-xr-xscripts/automation/trex_control_plane/stf/trex_stf_lib/trex_daemon_server.py79
-rwxr-xr-xscripts/automation/trex_control_plane/stf/trex_stf_lib/trex_exceptions.py141
-rw-r--r--scripts/automation/trex_control_plane/stf/trex_stf_lib/trex_status.py8
-rwxr-xr-xscripts/automation/trex_control_plane/stf/trex_stf_lib/trex_status_e.py11
-rw-r--r--scripts/automation/trex_control_plane/stl/console/__init__.py0
-rw-r--r--scripts/automation/trex_control_plane/stl/console/stl_path.py7
-rwxr-xr-xscripts/automation/trex_control_plane/stl/console/trex_console.py889
-rwxr-xr-xscripts/automation/trex_control_plane/stl/console/trex_root_path.py15
-rw-r--r--scripts/automation/trex_control_plane/stl/console/trex_tui.py1250
-rw-r--r--scripts/automation/trex_control_plane/stl/examples/hlt_udp_simple.py114
-rwxr-xr-xscripts/automation/trex_control_plane/stl/examples/rpc_proxy_server.py167
-rw-r--r--scripts/automation/trex_control_plane/stl/examples/stl_bi_dir_flows.py118
-rw-r--r--scripts/automation/trex_control_plane/stl/examples/stl_flow_latency_stats.py144
-rw-r--r--scripts/automation/trex_control_plane/stl/examples/stl_flow_stats.py110
-rw-r--r--scripts/automation/trex_control_plane/stl/examples/stl_imix.py126
-rw-r--r--scripts/automation/trex_control_plane/stl/examples/stl_imix_bidir.py113
-rw-r--r--scripts/automation/trex_control_plane/stl/examples/stl_path.py7
-rw-r--r--scripts/automation/trex_control_plane/stl/examples/stl_pcap.py117
-rw-r--r--scripts/automation/trex_control_plane/stl/examples/stl_pcap_remote.py123
-rw-r--r--scripts/automation/trex_control_plane/stl/examples/stl_profile.py58
-rw-r--r--scripts/automation/trex_control_plane/stl/examples/stl_run_udp_simple.py218
-rw-r--r--scripts/automation/trex_control_plane/stl/examples/stl_simple_burst.py71
-rw-r--r--scripts/automation/trex_control_plane/stl/examples/stl_simple_console_like.py60
-rw-r--r--scripts/automation/trex_control_plane/stl/examples/stl_simple_pin_core.py72
-rwxr-xr-xscripts/automation/trex_control_plane/stl/examples/using_rpc_proxy.py149
-rwxr-xr-xscripts/automation/trex_control_plane/stl/services/scapy_server/scapy_service.py798
-rw-r--r--scripts/automation/trex_control_plane/stl/services/scapy_server/scapy_zmq_client.py116
-rwxr-xr-xscripts/automation/trex_control_plane/stl/services/scapy_server/scapy_zmq_server.py188
-rw-r--r--scripts/automation/trex_control_plane/stl/services/scapy_server/unit_tests/basetest.py84
-rw-r--r--scripts/automation/trex_control_plane/stl/services/scapy_server/unit_tests/test_scapy_service.py155
-rwxr-xr-xscripts/automation/trex_control_plane/stl/services/scapy_server/zmq_for_scapy_server_test.py14
-rw-r--r--scripts/automation/trex_control_plane/stl/trex_stl_lib/__init__.py7
-rw-r--r--scripts/automation/trex_control_plane/stl/trex_stl_lib/api.py18
-rw-r--r--scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_async_client.py440
-rwxr-xr-xscripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_client.py3370
-rw-r--r--scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_exceptions.py71
-rw-r--r--scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_ext.py65
-rwxr-xr-xscripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_hltapi.py1595
-rw-r--r--scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_jsonrpc_client.py284
-rw-r--r--scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_packet_builder_interface.py43
-rwxr-xr-xscripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_packet_builder_scapy.py1698
-rw-r--r--scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_port.py794
-rw-r--r--scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_sim.py620
-rw-r--r--scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_stats.py1549
-rw-r--r--scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_std.py78
-rwxr-xr-xscripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_streams.py1346
-rw-r--r--scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_types.py167
-rwxr-xr-xscripts/automation/trex_control_plane/stl/trex_stl_lib/utils/GAObjClass.py297
-rw-r--r--scripts/automation/trex_control_plane/stl/trex_stl_lib/utils/__init__.py0
-rw-r--r--scripts/automation/trex_control_plane/stl/trex_stl_lib/utils/common.py88
-rwxr-xr-xscripts/automation/trex_control_plane/stl/trex_stl_lib/utils/constants.py26
-rw-r--r--scripts/automation/trex_control_plane/stl/trex_stl_lib/utils/filters.py144
-rwxr-xr-xscripts/automation/trex_control_plane/stl/trex_stl_lib/utils/parsing_opts.py596
-rw-r--r--scripts/automation/trex_control_plane/stl/trex_stl_lib/utils/pcap.py29
-rw-r--r--scripts/automation/trex_control_plane/stl/trex_stl_lib/utils/text_opts.py195
-rw-r--r--scripts/automation/trex_control_plane/stl/trex_stl_lib/utils/text_tables.py35
-rw-r--r--scripts/automation/trex_control_plane/stl/trex_stl_lib/utils/zipmsg.py32
-rwxr-xr-xscripts/automation/trex_control_plane/unit_tests/__init__.py1
-rwxr-xr-xscripts/automation/trex_control_plane/unit_tests/client_launching_test.py31
-rwxr-xr-xscripts/automation/trex_control_plane/unit_tests/control_plane_general_test.py72
-rwxr-xr-xscripts/automation/trex_control_plane/unit_tests/control_plane_unit_test.py73
-rwxr-xr-xscripts/automation/trex_control_plane/unit_tests/functional_test.py160
-rwxr-xr-xscripts/automation/trex_control_plane/unit_tests/nose_outer_packages.py27
-rwxr-xr-xscripts/automation/trex_control_plane/unit_tests/sock.py552
-rwxr-xr-xscripts/automation/trex_control_plane/unit_tests/test.py36
-rwxr-xr-xscripts/automation/trex_perf.py1266
-rwxr-xr-xscripts/automation/wkhtmltopdf-amd64bin0 -> 8301444 bytes
229 files changed, 43049 insertions, 0 deletions
diff --git a/scripts/automation/__init__.py b/scripts/automation/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/scripts/automation/__init__.py
diff --git a/scripts/automation/config/trex-dan.cfg b/scripts/automation/config/trex-dan.cfg
new file mode 100755
index 00000000..110f22e9
--- /dev/null
+++ b/scripts/automation/config/trex-dan.cfg
@@ -0,0 +1,33 @@
+# Configuration for trex01-1g TRex machine
+#
+# machine_name - can be DNS name or IP for the TRex machine for ssh to the box
+# machine_typ - 1G or 10G TRex machine
+# config_file - [Optional] configuration file for TRex if needed
+# is_dual - should the TRex inject with -p ?
+# cores - how many cores should be used
+# limit-ports - how many ports should be used
+# latency - rate of latency packets injected by the TRex
+# misc_params - [Optional] misc parameters to be passed to the trex
+
+[trex]
+machine_name=trex-dan
+machine_port=8090
+history_size=100
+machine_type=1G
+config_file=
+is_dual=yes
+cores=2
+limit_ports=2
+latency=1000
+latency_condition=10000
+misc_params=
+
+# Configuration for the router connected to the TRex
+#
+# interface - interface that can be used to communicate with the router
+
+[router]
+type=ASR
+interface=10.56.199.247
+password=lab
+
diff --git a/scripts/automation/config/trex-dev3.cfg b/scripts/automation/config/trex-dev3.cfg
new file mode 100755
index 00000000..f71a9493
--- /dev/null
+++ b/scripts/automation/config/trex-dev3.cfg
@@ -0,0 +1,34 @@
+# Configuration for trex-dev3 TRex machine
+#
+# machine_name - can be DNS name or IP for the TRex machine for ssh to the box
+# password - root password for TRex machine
+# machine_typ - 1G or 10G TRex machine
+# config_file - [Optional] configuration file for TRex if needed
+# is_dual - should the TRex inject with -p ?
+# version_pat - path to the TRex version and executable
+# exec - executable name (which will be under the version_path)
+# cores - how many cores should be used
+# limit-ports - how many ports should be used
+# latency - rate of latency packets injected by the TRex
+# misc_params - [Optional] misc parameters to be passed to the trex
+
+[trex]
+machine_name=trex-dev3
+machine_type=10G
+config_file=
+is_dual=yes
+version_path=/auto/proj-pcube-b/apps/PL-b/tools/bp_sim2/v1.52
+exec=t-rex-64
+cores=2
+limit_ports=2
+latency=1000
+# misc_params="--nc"
+
+# Configuration for the router connected to the TRex
+#
+# interface - interface that can be used to communicate with the router
+
+[router]
+type=ASR
+interface= 10.56.128.198
+password=lab
diff --git a/scripts/automation/config/trex-esp80-hhaim.cfg b/scripts/automation/config/trex-esp80-hhaim.cfg
new file mode 100755
index 00000000..56b7ad01
--- /dev/null
+++ b/scripts/automation/config/trex-esp80-hhaim.cfg
@@ -0,0 +1,31 @@
+# Configuration for trex01-1g TRex machine
+#
+# machine_name - can be DNS name or IP for the TRex machine for ssh to the box
+# password - root password for TRex machine
+# machine_type - 1G or 10G TRex machine
+# config_file - configuration file for TRex, can be "" if default
+# is_dual - should the TRex inject with -p ?
+# version_path - path to the TRex version and executable
+# exec - executable name (which will be under the version_path)
+# cores - how many cores should be used
+# limit-ports - how many ports should be used
+# latency - rate of latency packets injected by the TRex
+
+[trex]
+machine_name=csi-kiwi-02
+machine_type=10G
+config_file=
+is_dual=yes
+version_path=/auto/proj-pcube-b/apps/PL-b/tools/bp_sim2/v1.32/
+exec=t-rex-64
+limit_ports=4
+latency=1000
+misc_params=
+
+# Configuration for the router connected to the TRex
+#
+# interface - interface that can be used to communicate with the router
+
+[router]
+interface=10.56.192.57
+password=cisco
diff --git a/scripts/automation/config/trex-hhaim.cfg b/scripts/automation/config/trex-hhaim.cfg
new file mode 100755
index 00000000..44eba6f2
--- /dev/null
+++ b/scripts/automation/config/trex-hhaim.cfg
@@ -0,0 +1,33 @@
+# Configuration for trex01-1g TRex machine
+#
+# machine_name - can be DNS name or IP for the TRex machine for ssh to the box
+# machine_typ - 1G or 10G TRex machine
+# config_file - [Optional] configuration file for TRex if needed
+# is_dual - should the TRex inject with -p ?
+# cores - how many cores should be used
+# limit-ports - how many ports should be used
+# latency - rate of latency packets injected by the TRex
+# misc_params - [Optional] misc parameters to be passed to the trex
+
+[trex]
+machine_name=10.56.217.210
+machine_port=8090
+history_size=100
+machine_type=10G
+config_file=
+is_dual=yes
+cores=4
+limit_ports=4
+latency=1000
+latency_condition=1000
+misc_params=
+
+# Configuration for the router connected to the TRex
+#
+# interface - interface that can be used to communicate with the router
+
+[router]
+type=ASR
+interface=10.56.192.57
+password=cisco
+
diff --git a/scripts/automation/config/trex01-1g.cfg b/scripts/automation/config/trex01-1g.cfg
new file mode 100755
index 00000000..cf4a7f2a
--- /dev/null
+++ b/scripts/automation/config/trex01-1g.cfg
@@ -0,0 +1,35 @@
+# Configuration for trex01-1g TRex machine
+#
+# machine_name - can be DNS name or IP for the TRex machine for ssh to the box
+# password - root password for TRex machine
+# machine_typ - 1G or 10G TRex machine
+# config_file - [Optional] configuration file for TRex if needed
+# is_dual - should the TRex inject with -p ?
+# version_pat - path to the TRex version and executable
+# exec - executable name (which will be under the version_path)
+# cores - how many cores should be used
+# limit-ports - how many ports should be used
+# latency - rate of latency packets injected by the TRex
+# misc_params - [Optional] misc parameters to be passed to the trex
+
+[trex]
+machine_name=trex01-1g
+password=password
+machine_type=1G
+config_file=
+is_dual=yes
+version_path=/auto/proj-pcube-b/apps/PL-b/tools/bp_sim2/v1.35
+exec=t-rex-64
+cores=2
+limit_ports=2
+latency=1000
+misc_params="--nc"
+
+# Configuration for the router connected to the TRex
+#
+# interface - interface that can be used to communicate with the router
+
+[router]
+type=ASR
+interface=10.56.30.49
+password=cisco
diff --git a/scripts/automation/graph_template.html b/scripts/automation/graph_template.html
new file mode 100755
index 00000000..984fbc49
--- /dev/null
+++ b/scripts/automation/graph_template.html
@@ -0,0 +1,80 @@
+
+<html>
+ <head>
+ <script type="text/javascript" src="https://www.google.com/jsapi"></script>
+ <script type="text/javascript">
+ google.load("visualization", "1", {packages:["corechart"]});
+ google.load("visualization", "1", {packages:["table"]});
+ google.setOnLoadCallback(drawChart);
+ function drawChart() {
+
+ var cpu_data = google.visualization.arrayToDataTable([
+ ['Bandwidth [Mbps]', 'CPU [%]', 'Max. Latency [usec]', 'Avg. Latency [usec]'],
+ !@#$template_fill_graph!@#$
+ ])
+
+ var cpu_options = {
+ title: '!@#$template_fill_head!@#$',
+ hAxis: { title: 'Bandwidth [Mbps]', format:'#.##'},
+ vAxes:[
+ {title: 'CPU Util [%]',format:'#%', minValue:0, maxValue: 1}, // Left axis
+ {title: 'Latency [usec]'}, // Right axis
+ ],
+ series: {0: {targetAxisIndex:0},
+ 1: {targetAxisIndex:1},
+ 2: {targetAxisIndex:1},
+ },
+ colors: ["green", "red", "blue"],
+ };
+
+ var chart = new google.visualization.LineChart(document.getElementById('chart_div'));
+
+ chart.draw(cpu_data, cpu_options);
+
+ var plot_data = new google.visualization.DataTable();
+ plot_data.addColumn('number', 'BW [Mbps]');
+ plot_data.addColumn('number', 'PPS [Kpps]');
+ plot_data.addColumn('number', 'CPU Util. [%]');
+ plot_data.addColumn('number', 'BW / CPU');
+ plot_data.addColumn('number', 'Max. Latency [usec]');
+ plot_data.addColumn('number', 'Avg. Latency [usec]');
+ plot_data.addColumn('number', 'Pkt Drop [pkts]');
+ plot_data.addRows([
+ !@#$template_fill_table!@#$
+ ]);
+
+ var formatter = new google.visualization.NumberFormat(
+ {fractionDigits:2});
+ formatter.format(plot_data, 0); // Apply formatter to Bandwidth util column
+
+ var formatter = new google.visualization.NumberFormat(
+ {fractionDigits: 0});
+ formatter.format(plot_data, 1); // Apply formatter to PPS column
+
+ formatter = new google.visualization.NumberFormat(
+ {pattern:'#,###%'});
+ formatter.format(plot_data, 2); // Apply formatter to CPU util column
+
+ formatter = new google.visualization.NumberFormat(
+ {fractionDigits: 2});
+ formatter.format(plot_data, 3); // Apply formatter to BW / CPU column
+
+ formatter = new google.visualization.NumberFormat(
+ {fractionDigits: 0});
+ formatter.format(plot_data, 4); // Apply formatter to Avg Latency util column
+ formatter.format(plot_data, 5); // Apply formatter to Max Latency util column
+ formatter.format(plot_data, 6); // Apply formatter to Pkt Drop
+
+ var table = new google.visualization.Table(document.getElementById('table_div'));
+
+ table.draw(plot_data, {showRowNumber: true});
+ }
+
+ </script>
+ </head>
+ <body>
+ <div id="chart_div" style="width: 900px; height: 500px; position: relative;"></div>
+ <div id="table_div" style="display: table"></div>
+ </body>
+</html>
+
diff --git a/scripts/automation/h_avc.py b/scripts/automation/h_avc.py
new file mode 100755
index 00000000..75548d92
--- /dev/null
+++ b/scripts/automation/h_avc.py
@@ -0,0 +1,195 @@
+#!/router/bin/python-2.4.3
+import time,os, sys, string
+from os.path import exists
+from os import system, remove, chdir
+import re
+import time
+import random
+import copy
+import telnetlib
+import datetime
+import collections
+from trex_perf import TrexRunException
+
+
+import random
+import time
+
+class RouterIOException(Exception):
+ def __init__ (self, reason):
+ # generate the error message
+ #self.message = "\nSummary of error:\n\n %s\n" % (reason)
+ self.message = reason
+
+ def __str__(self):
+ return self.message
+
+# basic router class
+class Router:
+ def __init__ (self, host, port, password, str_wait = "#"):
+ self.host = host
+ self.port = port;
+ self.pr = str_wait;
+ self.password = password
+ self.to = 60
+ self.cpu_util_histo = []
+
+ # private function - command send
+ def _command (self, command, match = None, timeout = None):
+ to = timeout if (timeout != None) else self.to
+ m = match if (match != None) else [self.pr]
+
+ if not isinstance(m, list):
+ m = [m]
+
+ total_to = 0
+ while True:
+ self.tn.write(command + "\n")
+ ret = self.tn.expect(m, timeout = 2)
+ total_to += 2
+
+ if ret[0] != -1:
+ result = {}
+ result['match_index'] = ret[0]
+ result['output'] = ret[2]
+ return (result)
+
+ if total_to >= self.to:
+ raise RouterIOException("Failed to process command to router %s" % command)
+
+ # connect to router by telnet
+ def connect (self):
+ # create telnet session
+ self.tn = telnetlib.Telnet ()
+
+ try:
+ self.tn.open(self.host, self.port)
+ except IOError:
+ raise RouterIOException("Failed To Connect To Router interface at '%s' : '%s'" % (self.host, self.port))
+
+ # get a ready console and decides if you need password
+ ret = self._command("", ["Password", ">", "#"])
+ if ret['match_index'] == 0:
+ self._command(self.password, [">", "#"])
+
+ # can't hurt to call enable even if on enable
+ ret = self._command("enable 15", ["Password", "#"])
+ if (ret['match_index'] == 0):
+ self._command(self.password, "#")
+
+ self._command("terminal length 0")
+
+ def close (self):
+ self.tn.close ()
+ self.tn = None
+
+ # implemented through derived classes
+ def sample_cpu (self):
+ raise Exception("abstract method called")
+
+ def get_last_cpu_util (self):
+ if not self.cpu_util_histo:
+ return (0)
+ else:
+ return self.cpu_util_histo[len(self.cpu_util_histo) - 1]
+
+ def get_cpu_util_histo (self):
+ return self.cpu_util_histo
+
+ def get_filtered_cpu_util_histo (self):
+ trim_start = int(0.15 * len(self.cpu_util_histo))
+
+ filtered = self.cpu_util_histo[trim_start:]
+ if not filtered:
+ return [0]
+
+ m = collections.Counter(filtered).most_common(n = 1)[0][0]
+ #m = max(self.cpu_util_histo)
+ filtered = [x for x in filtered if (x > (0.9*m))]
+ return filtered
+
+ def clear_sampling_stats (self):
+ self.cpu_util_histo = []
+
+
+ # add a sample to the database
+ def sample_stats (self):
+ # sample CPU util
+ cpu_util = self.sample_cpu()
+ self.cpu_util_histo.append(cpu_util)
+
+ def get_stats (self):
+ stats = {}
+
+ filtered_cpu_util = self.get_filtered_cpu_util_histo()
+
+ if not filtered_cpu_util:
+ stats['cpu_util'] = 0
+ else:
+ stats['cpu_util'] = sum(filtered_cpu_util)/len(filtered_cpu_util)
+
+ stats['cpu_histo'] = self.get_cpu_util_histo()
+
+ return stats
+
+
+class ASR1k(Router):
+ def __init__ (self, host, password, port, str_wait = "#"):
+ Router.__init__(self, host, password, port, str_wait)
+
+ def sample_cpu (self):
+ cpu_show_cmd = "show platform hardware qfp active datapath utilization | inc Load"
+ output = self._command(cpu_show_cmd)['output']
+ lines = output.split('\n');
+
+ cpu_util = -1.0
+ # search for the line
+ for l in lines:
+ m = re.match("\W*Processing: Load\D*(\d+)\D*(\d+)\D*(\d+)\D*(\d+)\D*", l)
+ if m:
+ cpu_util = float(m.group(1))
+
+ if (cpu_util == -1.0):
+ raise Exception("cannot determine CPU util. for asr1k")
+
+ return cpu_util
+
+
+class ISR(Router):
+ def __init__ (self, host, password, port, str_wait = "#"):
+ Router.__init__(self, host, password, port, str_wait)
+
+ def sample_cpu (self):
+ cpu_show_cmd = "show processes cpu sorted | inc CPU utilization"
+ output = self._command(cpu_show_cmd)['output']
+ lines = output.split('\n');
+
+ cpu_util = -1.0
+
+ # search for the line
+ for l in lines:
+ m = re.match("\W*CPU utilization for five seconds: (\d+)%/(\d+)%", l)
+ if m:
+ max_cpu_util = float(m.group(1))
+ min_cpu_util = float(m.group(2))
+ cpu_util = (min_cpu_util + max_cpu_util)/2
+
+ if (cpu_util == -1.0):
+ raise Exception("cannot determine CPU util. for ISR")
+
+ return cpu_util
+
+
+
+if __name__ == "__main__":
+ #router = ASR1k("pqemb19ts", "cisco", port=2052)
+ router = ISR("10.56.198.7", "lab")
+ router.connect()
+ for i in range(1, 10):
+ router.sample_stats()
+ time.sleep(1)
+
+
+
+
+
diff --git a/scripts/automation/phantom/phantomjs b/scripts/automation/phantom/phantomjs
new file mode 100755
index 00000000..af9e4ab1
--- /dev/null
+++ b/scripts/automation/phantom/phantomjs
Binary files differ
diff --git a/scripts/automation/phantom/rasterize.js b/scripts/automation/phantom/rasterize.js
new file mode 100755
index 00000000..165bcfa7
--- /dev/null
+++ b/scripts/automation/phantom/rasterize.js
@@ -0,0 +1,32 @@
+var page = require('webpage').create(),
+ system = require('system'),
+ address, output, size;
+
+if (system.args.length < 3 || system.args.length > 5) {
+ console.log('Usage: rasterize.js URL filename [paperwidth*paperheight|paperformat] [zoom]');
+ console.log(' paper (pdf output) examples: "5in*7.5in", "10cm*20cm", "A4", "Letter"');
+ phantom.exit(1);
+} else {
+ address = system.args[1];
+ output = system.args[2];
+ page.viewportSize = { width: 600, height: 600 };
+ if (system.args.length > 3 && system.args[2].substr(-4) === ".pdf") {
+ size = system.args[3].split('*');
+ page.paperSize = size.length === 2 ? { width: size[0], height: size[1], margin: '0px' }
+ : { format: system.args[3], orientation: 'portrait', margin: '1cm' };
+ }
+ if (system.args.length > 4) {
+ page.zoomFactor = system.args[4];
+ }
+ page.open(address, function (status) {
+ if (status !== 'success') {
+ console.log('Unable to load the address!');
+ phantom.exit();
+ } else {
+ window.setTimeout(function () {
+ page.render(output);
+ phantom.exit();
+ }, 200);
+ }
+ });
+}
diff --git a/scripts/automation/readme.txt b/scripts/automation/readme.txt
new file mode 100755
index 00000000..152eee16
--- /dev/null
+++ b/scripts/automation/readme.txt
@@ -0,0 +1,15 @@
+README - trex_perf.py
+=====================
+
+This script uses the TRex RESTfull client-server conrtol plane achitecture and tries to find the maximum M (platform factor) for trex before hitting one of two stopping conditions:
+(*) Packet drops
+(*) High latency.
+ Since high latency can change from one platform to another, and might suffer from kickoff peak (espicially at VM), it is the user responsibility to provide the latency condition.
+ A common value used by non-vm machines is 1000, where in VM machines values around 5000 are more common.
+
+please note that '-f' and '-c' options are mandatory.
+
+Also, this is the user's responsibility to make sure a TRex is running, listening to relevant client request coming from this script.
+
+example for finding max M (between 10 to 100) with imix_fast_1g.yaml traffic profile:
+./trex_perf.py -m 10 100 -c config/trex-hhaim.cfg all drop -f cap2/imix_fast_1g.yaml
diff --git a/scripts/automation/regression/CPlatform.py b/scripts/automation/regression/CPlatform.py
new file mode 100755
index 00000000..0017e7db
--- /dev/null
+++ b/scripts/automation/regression/CPlatform.py
@@ -0,0 +1,945 @@
+#!/router/bin/python
+
+from interfaces_e import IFType
+from platform_cmd_link import *
+import CustomLogger
+import misc_methods
+import re
+import time
+import CProgressDisp
+from CShowParser import CShowParser
+
+class CPlatform(object):
+ def __init__(self, silent_mode):
+ self.if_mngr = CIfManager()
+ self.cmd_link = CCommandLink(silent_mode)
+ self.nat_config = None
+ self.stat_route_config = None
+ self.running_image = None
+ self.needed_image_path = None
+ self.tftp_cfg = None
+ self.config_history = { 'basic_if_config' : False, 'tftp_server_config' : False }
+
+ def configure_basic_interfaces(self, mtu = 9050):
+
+ cache = CCommandCache()
+ for dual_if in self.if_mngr.get_dual_if_list():
+ client_if_command_set = []
+ server_if_command_set = []
+
+ client_if_command_set.append ('mac-address {mac}'.format( mac = dual_if.client_if.get_src_mac_addr()) )
+ client_if_command_set.append ('mtu %s' % mtu)
+ client_if_command_set.append ('ip address {ip} 255.255.255.0'.format( ip = dual_if.client_if.get_ipv4_addr() ))
+ client_if_command_set.append ('ipv6 address {ip}/64'.format( ip = dual_if.client_if.get_ipv6_addr() ))
+
+ cache.add('IF', client_if_command_set, dual_if.client_if.get_name())
+
+ server_if_command_set.append ('mac-address {mac}'.format( mac = dual_if.server_if.get_src_mac_addr()) )
+ server_if_command_set.append ('mtu %s' % mtu)
+ server_if_command_set.append ('ip address {ip} 255.255.255.0'.format( ip = dual_if.server_if.get_ipv4_addr() ))
+ server_if_command_set.append ('ipv6 address {ip}/64'.format( ip = dual_if.server_if.get_ipv6_addr() ))
+
+ cache.add('IF', server_if_command_set, dual_if.server_if.get_name())
+
+ self.cmd_link.run_single_command(cache)
+ self.config_history['basic_if_config'] = True
+
+
+
+ def configure_basic_filtered_interfaces(self, intf_list, mtu = 9050):
+
+ cache = CCommandCache()
+ for intf in intf_list:
+ if_command_set = []
+
+ if_command_set.append ('mac-address {mac}'.format( mac = intf.get_src_mac_addr()) )
+ if_command_set.append ('mtu %s' % mtu)
+ if_command_set.append ('ip address {ip} 255.255.255.0'.format( ip = intf.get_ipv4_addr() ))
+ if_command_set.append ('ipv6 address {ip}/64'.format( ip = intf.get_ipv6_addr() ))
+
+ cache.add('IF', if_command_set, intf.get_name())
+
+ self.cmd_link.run_single_command(cache)
+
+
+ def load_clean_config (self, config_filename = "clean_config.cfg", cfg_drive = "bootflash"):
+ for i in range(5):
+ self.clear_nat_translations()
+ cache = CCommandCache()
+ cache.add('EXEC', "configure replace {drive}:{file} force".format(drive = cfg_drive, file = config_filename))
+ res = self.cmd_link.run_single_command(cache)
+ if 'Rollback Done' not in res:
+ print('Failed to load clean config, trying again')
+ time.sleep(2)
+ if i < 4:
+ continue
+ raise Exception('Could not load clean config, response: %s' % res)
+
+ def config_pbr (self, mode = 'config'):
+ idx = 1
+ unconfig_str = '' if mode=='config' else 'no '
+
+ cache = CCommandCache()
+ pre_commit_cache = CCommandCache()
+ pre_commit_set = set([])
+
+ for dual_if in self.if_mngr.get_dual_if_list():
+ client_if_command_set = []
+ server_if_command_set = []
+ conf_t_command_set = []
+ client_net_next_hop = misc_methods.get_single_net_client_addr(dual_if.server_if.get_ipv4_addr() )
+ server_net_next_hop = misc_methods.get_single_net_client_addr(dual_if.client_if.get_ipv4_addr() )
+
+ if dual_if.is_duplicated():
+ # define the relevant VRF name
+ pre_commit_set.add('{mode}ip vrf {dup}'.format( mode = unconfig_str, dup = dual_if.get_vrf_name()) )
+
+ # assign VRF to interfaces, config interfaces with relevant route-map
+ client_if_command_set.append ('{mode}ip vrf forwarding {dup}'.format( mode = unconfig_str, dup = dual_if.get_vrf_name()) )
+ client_if_command_set.append ('{mode}ip policy route-map {dup}_{p1}_to_{p2}'.format(
+ mode = unconfig_str,
+ dup = dual_if.get_vrf_name(),
+ p1 = 'p'+str(idx), p2 = 'p'+str(idx+1) ) )
+ server_if_command_set.append ('{mode}ip vrf forwarding {dup}'.format( mode = unconfig_str, dup = dual_if.get_vrf_name()) )
+ server_if_command_set.append ('{mode}ip policy route-map {dup}_{p2}_to_{p1}'.format(
+ mode = unconfig_str,
+ dup = dual_if.get_vrf_name(),
+ p1 = 'p'+str(idx), p2 = 'p'+str(idx+1) ) )
+
+ # config route-map routing
+ conf_t_command_set.append('{mode}route-map {dup}_{p1}_to_{p2} permit 10'.format(
+ mode = unconfig_str,
+ dup = dual_if.get_vrf_name(),
+ p1 = 'p'+str(idx), p2 = 'p'+str(idx+1) ) )
+ if mode == 'config':
+ conf_t_command_set.append('set ip next-hop {next_hop}'.format(
+ next_hop = client_net_next_hop) )
+ conf_t_command_set.append('{mode}route-map {dup}_{p2}_to_{p1} permit 10'.format(
+ mode = unconfig_str,
+ dup = dual_if.get_vrf_name(),
+ p1 = 'p'+str(idx), p2 = 'p'+str(idx+1) ) )
+ if mode == 'config':
+ conf_t_command_set.append('set ip next-hop {next_hop}'.format(
+ next_hop = server_net_next_hop) )
+ conf_t_command_set.append('exit')
+
+ # config global arp to interfaces net address and vrf
+ if dual_if.client_if.get_dest_mac():
+ conf_t_command_set.append('{mode}arp vrf {dup} {next_hop} {dest_mac} arpa'.format(
+ mode = unconfig_str,
+ dup = dual_if.get_vrf_name(),
+ next_hop = server_net_next_hop,
+ dest_mac = dual_if.client_if.get_dest_mac()))
+ if dual_if.server_if.get_dest_mac():
+ conf_t_command_set.append('{mode}arp vrf {dup} {next_hop} {dest_mac} arpa'.format(
+ mode = unconfig_str,
+ dup = dual_if.get_vrf_name(),
+ next_hop = client_net_next_hop,
+ dest_mac = dual_if.server_if.get_dest_mac()))
+ else:
+ # config interfaces with relevant route-map
+ client_if_command_set.append ('{mode}ip policy route-map {p1}_to_{p2}'.format(
+ mode = unconfig_str,
+ p1 = 'p'+str(idx), p2 = 'p'+str(idx+1) ) )
+ server_if_command_set.append ('{mode}ip policy route-map {p2}_to_{p1}'.format(
+ mode = unconfig_str,
+ p1 = 'p'+str(idx), p2 = 'p'+str(idx+1) ) )
+
+ # config route-map routing
+ conf_t_command_set.append('{mode}route-map {p1}_to_{p2} permit 10'.format(
+ mode = unconfig_str,
+ p1 = 'p'+str(idx), p2 = 'p'+str(idx+1) ) )
+ if mode == 'config':
+ conf_t_command_set.append('set ip next-hop {next_hop}'.format(
+ next_hop = client_net_next_hop) )
+ conf_t_command_set.append('{mode}route-map {p2}_to_{p1} permit 10'.format(
+ mode = unconfig_str,
+ p1 = 'p'+str(idx), p2 = 'p'+str(idx+1) ) )
+ if mode == 'config':
+ conf_t_command_set.append('set ip next-hop {next_hop}'.format(
+ next_hop = server_net_next_hop) )
+ conf_t_command_set.append('exit')
+
+ # config global arp to interfaces net address
+ if dual_if.client_if.get_dest_mac():
+ conf_t_command_set.append('{mode}arp {next_hop} {dest_mac} arpa'.format(
+ mode = unconfig_str,
+ next_hop = server_net_next_hop,
+ dest_mac = dual_if.client_if.get_dest_mac()))
+ if dual_if.server_if.get_dest_mac():
+ conf_t_command_set.append('{mode}arp {next_hop} {dest_mac} arpa'.format(
+ mode = unconfig_str,
+ next_hop = client_net_next_hop,
+ dest_mac = dual_if.server_if.get_dest_mac()))
+
+ # assign generated config list to cache
+ cache.add('IF', server_if_command_set, dual_if.server_if.get_name())
+ cache.add('IF', client_if_command_set, dual_if.client_if.get_name())
+ cache.add('CONF', conf_t_command_set)
+ idx += 2
+
+ # finish handling pre-config cache
+ pre_commit_set = list(pre_commit_set)
+ if len(pre_commit_set):
+ pre_commit_set.append('exit')
+ pre_commit_cache.add('CONF', pre_commit_set )
+ # deploy the configs (order is important!)
+ self.cmd_link.run_command( [pre_commit_cache, cache] )
+ if self.config_history['basic_if_config']:
+ # in this case, duplicated interfaces will lose its ip address.
+ # re-config IPv4 addresses
+ self.configure_basic_filtered_interfaces(self.if_mngr.get_duplicated_if() )
+
+ def config_no_pbr (self):
+ self.config_pbr(mode = 'unconfig')
+
+ def config_static_routing (self, stat_route_obj, mode = 'config'):
+
+ if mode == 'config':
+ self.stat_route_config = stat_route_obj # save the latest static route config for future removal purposes
+
+ unconfig_str = '' if mode=='config' else 'no '
+ cache = CCommandCache()
+ pre_commit_cache = CCommandCache()
+ pre_commit_set = set([])
+ current_dup_intf = None
+ # client_net = None
+ # server_net = None
+ client_net = stat_route_obj.client_net_start
+ server_net = stat_route_obj.server_net_start
+ conf_t_command_set = []
+
+ for dual_if in self.if_mngr.get_dual_if_list():
+
+ # handle duplicated addressing generation
+ if dual_if.is_duplicated():
+ if dual_if.get_vrf_name() != current_dup_intf:
+ # if this is a dual interfaces, and it is different from the one we proccessed so far, reset static route addressing
+ current_dup_intf = dual_if.get_vrf_name()
+ client_net = stat_route_obj.client_net_start
+ server_net = stat_route_obj.server_net_start
+ else:
+ if current_dup_intf is not None:
+ current_dup_intf = None
+ client_net = stat_route_obj.client_net_start
+ server_net = stat_route_obj.server_net_start
+
+ client_net_next_hop = misc_methods.get_single_net_client_addr(dual_if.server_if.get_ipv4_addr() )
+ server_net_next_hop = misc_methods.get_single_net_client_addr(dual_if.client_if.get_ipv4_addr() )
+
+ # handle static route configuration for the interfaces
+ if dual_if.is_duplicated():
+ client_if_command_set = []
+ server_if_command_set = []
+
+ # define the relevant VRF name
+ pre_commit_set.add('{mode}ip vrf {dup}'.format( mode = unconfig_str, dup = dual_if.get_vrf_name()) )
+
+ # assign VRF to interfaces, config interfaces with relevant route-map
+ client_if_command_set.append ('{mode}ip vrf forwarding {dup}'.format( mode = unconfig_str, dup = dual_if.get_vrf_name()) )
+ server_if_command_set.append ('{mode}ip vrf forwarding {dup}'.format( mode = unconfig_str, dup = dual_if.get_vrf_name()) )
+
+ conf_t_command_set.append( "{mode}ip route vrf {dup} {next_net} {dest_mask} {next_hop}".format(
+ mode = unconfig_str,
+ dup = dual_if.get_vrf_name(),
+ next_net = client_net,
+ dest_mask = stat_route_obj.client_mask,
+ next_hop = client_net_next_hop))
+ conf_t_command_set.append( "{mode}ip route vrf {dup} {next_net} {dest_mask} {next_hop}".format(
+ mode = unconfig_str,
+ dup = dual_if.get_vrf_name(),
+ next_net = server_net,
+ dest_mask = stat_route_obj.server_mask,
+ next_hop = server_net_next_hop))
+
+ # config global arp to interfaces net address and vrf
+ if dual_if.client_if.get_dest_mac():
+ conf_t_command_set.append('{mode}arp vrf {dup} {next_hop} {dest_mac} arpa'.format(
+ mode = unconfig_str,
+ dup = dual_if.get_vrf_name(),
+ next_hop = server_net_next_hop,
+ dest_mac = dual_if.client_if.get_dest_mac()))
+ if dual_if.server_if.get_dest_mac():
+ conf_t_command_set.append('{mode}arp vrf {dup} {next_hop} {dest_mac} arpa'.format(
+ mode = unconfig_str,
+ dup = dual_if.get_vrf_name(),
+ next_hop = client_net_next_hop,
+ dest_mac = dual_if.server_if.get_dest_mac()))
+
+ # assign generated interfaces config list to cache
+ cache.add('IF', server_if_command_set, dual_if.server_if.get_name())
+ cache.add('IF', client_if_command_set, dual_if.client_if.get_name())
+
+ else:
+ conf_t_command_set.append( "{mode}ip route {next_net} {dest_mask} {next_hop}".format(
+ mode = unconfig_str,
+ next_net = client_net,
+ dest_mask = stat_route_obj.client_mask,
+ next_hop = server_net_next_hop))
+ conf_t_command_set.append( "{mode}ip route {next_net} {dest_mask} {next_hop}".format(
+ mode = unconfig_str,
+ next_net = server_net,
+ dest_mask = stat_route_obj.server_mask,
+ next_hop = client_net_next_hop))
+
+ # config global arp to interfaces net address
+ if dual_if.client_if.get_dest_mac():
+ conf_t_command_set.append('{mode}arp {next_hop} {dest_mac} arpa'.format(
+ mode = unconfig_str,
+ next_hop = server_net_next_hop,
+ dest_mac = dual_if.client_if.get_dest_mac()))
+ if dual_if.server_if.get_dest_mac():
+ conf_t_command_set.append('{mode}arp {next_hop} {dest_mac} arpa'.format(
+ mode = unconfig_str,
+ next_hop = client_net_next_hop,
+ dest_mac = dual_if.server_if.get_dest_mac()))
+
+ # bump up to the next client network address
+ client_net = misc_methods.get_single_net_client_addr(client_net, stat_route_obj.net_increment)
+ server_net = misc_methods.get_single_net_client_addr(server_net, stat_route_obj.net_increment)
+
+
+ # finish handling pre-config cache
+ pre_commit_set = list(pre_commit_set)
+ if len(pre_commit_set):
+ pre_commit_set.append('exit')
+ pre_commit_cache.add('CONF', pre_commit_set )
+ # assign generated config list to cache
+ cache.add('CONF', conf_t_command_set)
+ # deploy the configs (order is important!)
+ self.cmd_link.run_command( [pre_commit_cache, cache] )
+ if self.config_history['basic_if_config']:
+ # in this case, duplicated interfaces will lose its ip address.
+ # re-config IPv4 addresses
+ self.configure_basic_filtered_interfaces(self.if_mngr.get_duplicated_if() )
+
+
+ def config_no_static_routing (self, stat_route_obj = None):
+
+ if stat_route_obj is None and self.stat_route_config is not None:
+ self.config_static_routing(self.stat_route_config, mode = 'unconfig')
+ self.stat_route_config = None # reverse current static route config back to None (no nat config is known to run).
+ elif stat_route_obj is not None:
+ self.config_static_routing(stat_route_obj, mode = 'unconfig')
+ else:
+ raise UserWarning('No static route configuration is available for removal.')
+
+ def config_nbar_pd (self, mode = 'config'):
+ unconfig_str = '' if mode=='config' else 'no '
+ cache = CCommandCache()
+
+ for intf in self.if_mngr.get_if_list(if_type = IFType.Client):
+ cache.add('IF', "{mode}ip nbar protocol-discovery".format( mode = unconfig_str ), intf.get_name())
+
+ self.cmd_link.run_single_command( cache )
+
+ def config_no_nbar_pd (self):
+ self.config_nbar_pd (mode = 'unconfig')
+
+
+ def config_nat_verify (self, mode = 'config'):
+
+ # toggle all duplicate interfaces
+ # dup_ifs = self.if_mngr.get_duplicated_if()
+ if mode=='config':
+ self.toggle_duplicated_intf(action = 'down')
+ # self.__toggle_interfaces(dup_ifs, action = 'down' )
+ else:
+ # if we're in 'unconfig', toggle duplicated interfaces back up
+ self.toggle_duplicated_intf(action = 'up')
+ # self.__toggle_interfaces(dup_ifs)
+
+ def config_no_nat_verify (self):
+ self.config_nat_verify(mode = 'unconfig')
+
+ def config_nat (self, nat_obj, mode = 'config'):
+
+ if mode == 'config':
+ self.nat_config = nat_obj # save the latest nat config for future removal purposes
+
+ cache = CCommandCache()
+ conf_t_command_set = []
+ client_net = nat_obj.clients_net_start
+ pool_net = nat_obj.nat_pool_start
+ unconfig_str = '' if mode=='config' else 'no '
+
+ # toggle all duplicate interfaces
+ # dup_ifs = self.if_mngr.get_duplicated_if()
+ if mode=='config':
+ self.toggle_duplicated_intf(action = 'down')
+ # self.__toggle_interfaces(dup_ifs, action = 'down' )
+ else:
+ # if we're in 'unconfig', toggle duplicated interfaces back up
+ self.toggle_duplicated_intf(action = 'up')
+ # self.__toggle_interfaces(dup_ifs)
+
+ for dual_if in self.if_mngr.get_dual_if_list(is_duplicated = False):
+ cache.add('IF', "{mode}ip nat inside".format( mode = unconfig_str ), dual_if.client_if.get_name())
+ cache.add('IF', "{mode}ip nat outside".format( mode = unconfig_str ), dual_if.server_if.get_name())
+ pool_id = dual_if.get_id() + 1
+
+ conf_t_command_set.append("{mode}ip nat pool pool{pool_num} {start_addr} {end_addr} netmask {mask}".format(
+ mode = unconfig_str,
+ pool_num = pool_id,
+ start_addr = pool_net,
+ end_addr = CNatConfig.calc_pool_end(pool_net, nat_obj.nat_netmask),
+ mask = nat_obj.nat_netmask))
+
+ conf_t_command_set.append("{mode}ip nat inside source list {num} pool pool{pool_num} overload".format(
+ mode = unconfig_str,
+ num = pool_id,
+ pool_num = pool_id ))
+ conf_t_command_set.append("{mode}access-list {num} permit {net_addr} {net_wildcard}".format(
+ mode = unconfig_str,
+ num = pool_id,
+ net_addr = client_net,
+ net_wildcard = nat_obj.client_acl_wildcard))
+
+ # bump up to the next client address
+ client_net = misc_methods.get_single_net_client_addr(client_net, nat_obj.net_increment)
+ pool_net = misc_methods.get_single_net_client_addr(pool_net, nat_obj.net_increment)
+
+
+ # assign generated config list to cache
+ cache.add('CONF', conf_t_command_set)
+
+ # deploy the configs (order is important!)
+ return self.cmd_link.run_single_command( cache )
+
+
+ def config_no_nat (self, nat_obj = None):
+ # first, clear all nat translations
+ self.clear_nat_translations()
+
+ # then, decompose the known config
+ if nat_obj is None and self.nat_config is not None:
+ self.config_nat(self.nat_config, mode = 'unconfig')
+ self.nat_config = None # reverse current NAT config back to None (no nat config is known to run).
+ elif nat_obj is not None:
+ self.config_nat(nat_obj, mode = 'unconfig')
+ else:
+ raise UserWarning('No NAT configuration is available for removal.')
+
+
+ def config_zbf (self, mode = 'config'):
+ cache = CCommandCache()
+ pre_commit_cache = CCommandCache()
+ conf_t_command_set = []
+
+ # toggle all duplicate interfaces down
+ self.toggle_duplicated_intf(action = 'down')
+ # dup_ifs = self.if_mngr.get_duplicated_if()
+ # self.__toggle_interfaces(dup_ifs, action = 'down' )
+
+ # define security zones and security service policy to be applied on the interfaces
+ conf_t_command_set.append('class-map type inspect match-any c1')
+ conf_t_command_set.append('match protocol tcp')
+ conf_t_command_set.append('match protocol udp')
+ conf_t_command_set.append('policy-map type inspect p1')
+ conf_t_command_set.append('class type inspect c1')
+ conf_t_command_set.append('inspect')
+ conf_t_command_set.append('class class-default')
+ conf_t_command_set.append('pass')
+
+ conf_t_command_set.append('zone security z_in')
+ conf_t_command_set.append('zone security z_out')
+
+ conf_t_command_set.append('zone-pair security in2out source z_in destination z_out')
+ conf_t_command_set.append('service-policy type inspect p1')
+ conf_t_command_set.append('zone-pair security out2in source z_out destination z_in')
+ conf_t_command_set.append('service-policy type inspect p1')
+ conf_t_command_set.append('exit')
+
+ pre_commit_cache.add('CONF', conf_t_command_set)
+
+ for dual_if in self.if_mngr.get_dual_if_list(is_duplicated = False):
+ cache.add('IF', "zone-member security z_in", dual_if.client_if.get_name() )
+ cache.add('IF', "zone-member security z_out", dual_if.server_if.get_name() )
+
+ self.cmd_link.run_command( [pre_commit_cache, cache] )
+
+ def config_no_zbf (self):
+ cache = CCommandCache()
+ conf_t_command_set = []
+
+ # define security zones and security service policy to be applied on the interfaces
+ conf_t_command_set.append('no zone-pair security in2out source z_in destination z_out')
+ conf_t_command_set.append('no zone-pair security out2in source z_out destination z_in')
+
+ conf_t_command_set.append('no policy-map type inspect p1')
+ conf_t_command_set.append('no class-map type inspect match-any c1')
+
+ conf_t_command_set.append('no zone security z_in')
+ conf_t_command_set.append('no zone security z_out')
+
+ cache.add('CONF', conf_t_command_set)
+
+ for dual_if in self.if_mngr.get_dual_if_list(is_duplicated = False):
+ cache.add('IF', "no zone-member security z_in", dual_if.client_if.get_name() )
+ cache.add('IF', "no zone-member security z_out", dual_if.server_if.get_name() )
+
+ self.cmd_link.run_command( [cache] )
+ # toggle all duplicate interfaces back up
+ self.toggle_duplicated_intf(action = 'up')
+ # dup_ifs = self.if_mngr.get_duplicated_if()
+ # self.__toggle_interfaces(dup_ifs)
+
+
+ def config_ipv6_pbr (self, mode = 'config'):
+ idx = 1
+ unconfig_str = '' if mode=='config' else 'no '
+ cache = CCommandCache()
+ conf_t_command_set = []
+
+ conf_t_command_set.append('{mode}ipv6 unicast-routing'.format(mode = unconfig_str) )
+
+ for dual_if in self.if_mngr.get_dual_if_list():
+ client_if_command_set = []
+ server_if_command_set = []
+
+ client_net_next_hop = misc_methods.get_single_net_client_addr(dual_if.server_if.get_ipv6_addr(), {'7':1}, ip_type = 'ipv6' )
+ server_net_next_hop = misc_methods.get_single_net_client_addr(dual_if.client_if.get_ipv6_addr(), {'7':1}, ip_type = 'ipv6' )
+
+
+ client_if_command_set.append ('{mode}ipv6 enable'.format(mode = unconfig_str))
+ server_if_command_set.append ('{mode}ipv6 enable'.format(mode = unconfig_str))
+
+ if dual_if.is_duplicated():
+ prefix = 'ipv6_' + dual_if.get_vrf_name()
+ else:
+ prefix = 'ipv6'
+
+ # config interfaces with relevant route-map
+ client_if_command_set.append ('{mode}ipv6 policy route-map {pre}_{p1}_to_{p2}'.format(
+ mode = unconfig_str,
+ pre = prefix,
+ p1 = 'p'+str(idx), p2 = 'p'+str(idx+1) ) )
+ server_if_command_set.append ('{mode}ipv6 policy route-map {pre}_{p2}_to_{p1}'.format(
+ mode = unconfig_str,
+ pre = prefix,
+ p1 = 'p'+str(idx), p2 = 'p'+str(idx+1) ) )
+
+ # config global arp to interfaces net address and vrf
+ if dual_if.client_if.get_ipv6_dest_mac():
+ conf_t_command_set.append('{mode}ipv6 neighbor {next_hop} {intf} {dest_mac}'.format(
+ mode = unconfig_str,
+ next_hop = server_net_next_hop,
+ intf = dual_if.client_if.get_name(),
+ dest_mac = dual_if.client_if.get_ipv6_dest_mac()))
+ if dual_if.server_if.get_ipv6_dest_mac():
+ conf_t_command_set.append('{mode}ipv6 neighbor {next_hop} {intf} {dest_mac}'.format(
+ mode = unconfig_str,
+ next_hop = client_net_next_hop,
+ intf = dual_if.server_if.get_name(),
+ dest_mac = dual_if.server_if.get_ipv6_dest_mac()))
+
+ conf_t_command_set.append('{mode}route-map {pre}_{p1}_to_{p2} permit 10'.format(
+ mode = unconfig_str,
+ pre = prefix,
+ p1 = 'p'+str(idx), p2 = 'p'+str(idx+1) ) )
+ if (mode == 'config'):
+ conf_t_command_set.append('set ipv6 next-hop {next_hop}'.format(next_hop = client_net_next_hop ) )
+ conf_t_command_set.append('{mode}route-map {pre}_{p2}_to_{p1} permit 10'.format(
+ mode = unconfig_str,
+ pre = prefix,
+ p1 = 'p'+str(idx), p2 = 'p'+str(idx+1) ) )
+ if (mode == 'config'):
+ conf_t_command_set.append('set ipv6 next-hop {next_hop}'.format(next_hop = server_net_next_hop ) )
+ conf_t_command_set.append('exit')
+
+ # assign generated config list to cache
+ cache.add('IF', server_if_command_set, dual_if.server_if.get_name())
+ cache.add('IF', client_if_command_set, dual_if.client_if.get_name())
+ idx += 2
+
+ cache.add('CONF', conf_t_command_set)
+
+ # deploy the configs (order is important!)
+ self.cmd_link.run_command( [cache] )
+
+ def config_no_ipv6_pbr (self):
+ self.config_ipv6_pbr(mode = 'unconfig')
+
+ # show methods
+ def get_cpu_util (self):
+ response = self.cmd_link.run_single_command('show platform hardware qfp active datapath utilization | inc Load')
+ return CShowParser.parse_cpu_util_stats(response)
+
+ def get_cft_stats (self):
+ response = self.cmd_link.run_single_command('test platform hardware qfp active infrastructure cft datapath function cft-cpp-show-all-instances')
+ return CShowParser.parse_cft_stats(response)
+
+ def get_nbar_stats (self):
+ per_intf_stats = {}
+ for intf in self.if_mngr.get_if_list(if_type = IFType.Client):
+ response = self.cmd_link.run_single_command("show ip nbar protocol-discovery interface {interface} stats packet-count protocol".format( interface = intf.get_name() ), flush_first = True)
+ per_intf_stats[intf.get_name()] = CShowParser.parse_nbar_stats(response)
+ return per_intf_stats
+
+ def get_nbar_profiling_stats (self):
+ response = self.cmd_link.run_single_command("show platform hardware qfp active feature nbar profiling")
+ return CShowParser.parse_nbar_profiling_stats(response)
+
+ def get_drop_stats (self):
+
+ response = self.cmd_link.run_single_command('show platform hardware qfp active interface all statistics', flush_first = True)
+ # print response
+ # response = self.cmd_link.run_single_command('show platform hardware qfp active interface all statistics')
+ # print response
+ if_list_by_name = [x.get_name() for x in self.if_mngr.get_if_list()]
+ return CShowParser.parse_drop_stats(response, if_list_by_name )
+
+ def get_nat_stats (self):
+ response = self.cmd_link.run_single_command('show ip nat statistics')
+ return CShowParser.parse_nat_stats(response)
+
+ def get_nat_trans (self):
+ return self.cmd_link.run_single_command('show ip nat translation')
+
+ def get_cvla_memory_usage(self):
+ response = self.cmd_link.run_single_command('show platform hardware qfp active infrastructure cvla client handles')
+ # (res, res2) = CShowParser.parse_cvla_memory_usage(response)
+ return CShowParser.parse_cvla_memory_usage(response)
+
+
+ # clear methods
+ def clear_nat_translations(self):
+ pre_commit_cache = CCommandCache()
+ # prevent new NAT entries
+ # http://www.cisco.com/c/en/us/support/docs/ip/network-address-translation-nat/13779-clear-nat-comments.html
+ for dual_if in self.if_mngr.get_dual_if_list(is_duplicated = False):
+ pre_commit_cache.add('IF', "no ip nat inside", dual_if.client_if.get_name())
+ pre_commit_cache.add('IF', "no ip nat outside", dual_if.server_if.get_name())
+ self.cmd_link.run_single_command(pre_commit_cache)
+ time.sleep(0.5)
+ pre_commit_cache = CCommandCache()
+ # clear the translation
+ pre_commit_cache.add('EXEC', 'clear ip nat translation *')
+ self.cmd_link.run_single_command(pre_commit_cache)
+ time.sleep(0.5)
+
+ def clear_cft_counters (self):
+ """ clear_cft_counters(self) -> None
+
+ Clears the CFT counters on the platform
+ """
+ self.cmd_link.run_single_command('test platform hardware qfp active infrastructure cft datapath function cft-cpp-clear-instance-stats')
+
+ def clear_counters(self):
+ """ clear_counters(self) -> None
+
+ Clears the platform counters
+ """
+
+ pre_commit_cache = CCommandCache()
+ pre_commit_cache.add('EXEC', ['clear counters', '\r'] )
+ self.cmd_link.run_single_command( pre_commit_cache , read_until = ['#', '\[confirm\]'])
+
+ def clear_nbar_stats(self):
+ """ clear_nbar_stats(self) -> None
+
+ Clears the NBAR-PD classification stats
+ """
+ pre_commit_cache = CCommandCache()
+ pre_commit_cache.add('EXEC', ['clear ip nbar protocol-discovery','\r'] )
+ self.cmd_link.run_single_command( pre_commit_cache )
+
+ def clear_packet_drop_stats(self):
+ """ clear_packet_drop_stats(self) -> None
+
+ Clears packet-drop stats
+ """
+# command = "show platform hardware qfp active statistics drop clear"
+ self.cmd_link.run_single_command('show platform hardware qfp active interface all statistics clear_drop')
+
+ ###########################################
+ # file transfer and image loading methods #
+ ###########################################
+ def get_running_image_details (self):
+ """ get_running_image_details() -> dict
+
+ Check for the currently running image file on the platform.
+ Returns a dictionary, where 'drive' key is the drive in which the image is installed,
+ and 'image' key is the actual image file used.
+ """
+ response = self.cmd_link.run_single_command('show version | include System image')
+ parsed_info = CShowParser.parse_show_image_version(response)
+ self.running_image = parsed_info
+ return parsed_info
+
+
+ def check_image_existence (self, img_name):
+ """ check_image_existence(self, img_name) -> boolean
+
+ Parameters
+ ----------
+ img_name : str
+ a string represents the image name.
+
+ Check if the image file defined in the platform_config already loaded into the platform.
+ """
+ search_drives = ['bootflash', 'harddisk', self.running_image['drive']]
+ for search_drive in search_drives:
+ command = "dir {drive}: | include {image}".format(drive = search_drive, image = img_name)
+ response = self.cmd_link.run_single_command(command, timeout = 10)
+ if CShowParser.parse_image_existence(response, img_name):
+ self.needed_image_path = '%s:/%s' % (search_drive, img_name)
+ print('Found image in platform:', self.needed_image_path)
+ return True
+ return False
+
+ def config_tftp_server(self, device_cfg_obj, external_tftp_config = None, applyToPlatform = False):
+ """ configure_tftp_server(self, external_tftp_config, applyToPlatform) -> str
+
+ Parameters
+ ----------
+ external_tftp_config : dict (Not is use)
+ A path to external tftp config file other than using the one defined in the instance.
+ applyToPlatform : boolean
+ set to True in order to apply the config into the platform
+
+ Configures the tftp server on an interface of the platform.
+ """
+# tmp_tftp_config = external_tftp_config if external_tftp_config is not None else self.tftp_server_config
+ self.tftp_cfg = device_cfg_obj.get_tftp_info()
+ cache = CCommandCache()
+
+ command = "ip tftp source-interface {intf}".format( intf = device_cfg_obj.get_mgmt_interface() )
+ cache.add('CONF', command )
+ self.cmd_link.run_single_command(cache)
+ self.config_history['tftp_server_config'] = True
+
+ def load_platform_image(self, img_filename, external_tftp_config = None):
+ """ load_platform_image(self, img_filename, external_tftp_config) -> None
+
+ Parameters
+ ----------
+ external_tftp_config : dict
+ A path to external tftp config file other than using the one defined in the instance.
+ img_filename : str
+ image name to be saved into the platforms drive.
+
+ This method loads the configured image into the platform's harddisk (unless it is already loaded),
+ and sets that image to be the boot_image of the platform.
+ """
+ if not self.check_image_existence(img_filename): # check if this image isn't already saved in platform
+ #tmp_tftp_config = external_tftp_config if external_tftp_config is not None else self.tftp_cfg
+
+ if self.config_history['tftp_server_config']: # make sure a TFTP configuration has been loaded
+ cache = CCommandCache()
+ if self.running_image is None:
+ self.get_running_image_details()
+
+ command = "copy tftp://{tftp_ip}/{img_path}/{image} bootflash:".format(
+ tftp_ip = self.tftp_cfg['ip_address'],
+ img_path = self.tftp_cfg['images_path'],
+ image = img_filename)
+ cache.add('EXEC', [command, '\r', '\r'])
+
+ progress_thread = CProgressDisp.ProgressThread(notifyMessage = "Copying image via tftp, this may take a while...\n")
+ progress_thread.start()
+
+ response = self.cmd_link.run_single_command(cache, timeout = 900, read_until = ['\?', '#'])
+ print("RESPONSE:")
+ print(response)
+ progress_thread.join()
+ copy_ok = CShowParser.parse_file_copy(response)
+
+ if not copy_ok:
+ raise UserWarning('Image file loading failed. Please make sure the accessed image exists and has read privileges')
+ else:
+ raise UserWarning('TFTP configuration is not available. Please make sure a valid TFTP configuration has been provided')
+
+ def set_boot_image(self, boot_image):
+ """ set_boot_image(self, boot_image) -> None
+
+ Parameters
+ ----------
+ boot_image : str
+ An image file to be set as boot_image
+
+ Configures boot_image as the boot image of the platform into the running-config + config-register
+ """
+ cache = CCommandCache()
+ if self.needed_image_path is None:
+ if not self.check_image_existence(boot_image):
+ raise Exception("Trying to set boot image that's not found in router, please copy it first.")
+
+ boot_img_cmd = "boot system flash %s" % self.needed_image_path
+ config_register_cmd = "config-register 0x2021"
+ cache.add('CONF', ["no boot system", boot_img_cmd, config_register_cmd, '\r'])
+ response = self.cmd_link.run_single_command( cache )
+ print("RESPONSE:")
+ print(response)
+ self.save_config_to_startup_config()
+
+ def is_image_matches(self, needed_image):
+ """ set_boot_image(self, needed_image) -> boolean
+
+ Parameters
+ ----------
+ needed_image : str
+ An image file to compare router running image
+
+ Compares image name to router running image, returns match result.
+
+ """
+ if self.running_image is None:
+ self.get_running_image_details()
+ needed_image = needed_image.lower()
+ current_image = self.running_image['image'].lower()
+ if needed_image.find(current_image) != -1:
+ return True
+ if current_image.find(needed_image) != -1:
+ return True
+ return False
+
+ # misc class related methods
+
+ def load_platform_data_from_file (self, device_cfg_obj):
+ self.if_mngr.load_config(device_cfg_obj)
+
+ def launch_connection (self, device_cfg_obj):
+ self.running_image = None # clear the image name "cache"
+ self.cmd_link.launch_platform_connectivity(device_cfg_obj)
+
+ def reload_connection (self, device_cfg_obj):
+ self.cmd_link.close_platform_connection()
+ self.launch_connection(device_cfg_obj)
+
+ def save_config_to_startup_config (self):
+ """ save_config_to_startup_config(self) -> None
+
+ Copies running-config into startup-config.
+ """
+ cache = CCommandCache()
+ cache.add('EXEC', ['wr', '\r'] )
+ self.cmd_link.run_single_command(cache)
+
+ def reload_platform(self, device_cfg_obj):
+ """ reload_platform(self) -> None
+
+ Reloads the platform.
+ """
+ from subprocess import call
+ import os
+ i = 0
+ sleep_time = 30 # seconds
+
+ try:
+ cache = CCommandCache()
+
+ cache.add('EXEC', ['reload','n\r','\r'] )
+ self.cmd_link.run_single_command( cache )
+
+ progress_thread = CProgressDisp.ProgressThread(notifyMessage = "Reloading the platform, this may take a while...\n")
+ progress_thread.start()
+ time.sleep(60) # need delay for device to shut down before polling it
+ # poll the platform until ping response is received.
+ while True:
+ time.sleep(sleep_time)
+ try:
+ x = call(["ping", "-c 1", device_cfg_obj.get_ip_address()], stdout = open(os.devnull, 'wb'))
+ except:
+ x = 1
+ if x == 0:
+ break
+ elif i > 20:
+ raise TimeoutError('Platform failed to reload after reboot for over {minutes} minutes!'.format(minutes = round(1 + i * sleep_time / 60)))
+ else:
+ i += 1
+
+ time.sleep(30)
+ self.reload_connection(device_cfg_obj)
+ progress_thread.join()
+ except Exception as e:
+ print(e)
+
+ def get_if_manager(self):
+ return self.if_mngr
+
+ def dump_obj_config (self, object_name):
+ if object_name=='nat' and self.nat_config is not None:
+ self.nat_config.dump_config()
+ elif object_name=='static_route' and self.stat_route_config is not None:
+ self.stat_route_config.dump_config()
+ else:
+ raise UserWarning('No known configuration exists.')
+
+ def toggle_duplicated_intf(self, action = 'down'):
+
+ dup_ifs = self.if_mngr.get_duplicated_if()
+ self.__toggle_interfaces( dup_ifs, action = action )
+
+
+ def __toggle_interfaces (self, intf_list, action = 'up'):
+ cache = CCommandCache()
+ mode_str = 'no ' if action == 'up' else ''
+
+ for intf_obj in intf_list:
+ cache.add('IF', '{mode}shutdown'.format(mode = mode_str), intf_obj.get_name())
+
+ self.cmd_link.run_single_command( cache )
+
+
+class CStaticRouteConfig(object):
+
+ def __init__(self, static_route_dict):
+ self.clients_start = static_route_dict['clients_start']
+ self.servers_start = static_route_dict['servers_start']
+ self.net_increment = misc_methods.gen_increment_dict(static_route_dict['dual_port_mask'])
+ self.client_mask = static_route_dict['client_destination_mask']
+ self.server_mask = static_route_dict['server_destination_mask']
+ self.client_net_start = self.extract_net_addr(self.clients_start, self.client_mask)
+ self.server_net_start = self.extract_net_addr(self.servers_start, self.server_mask)
+ self.static_route_dict = static_route_dict
+
+ def extract_net_addr (self, ip_addr, ip_mask):
+ addr_lst = ip_addr.split('.')
+ mask_lst = ip_mask.split('.')
+ mask_lst = [str(int(x) & int(y)) for x, y in zip(addr_lst, mask_lst)]
+ return '.'.join(mask_lst)
+
+ def dump_config (self):
+ import yaml
+ print(yaml.dump( self.static_route_dict , default_flow_style=False))
+
+
+class CNatConfig(object):
+ def __init__(self, nat_dict):
+ self.clients_net_start = nat_dict['clients_net_start']
+ self.client_acl_wildcard= nat_dict['client_acl_wildcard_mask']
+ self.net_increment = misc_methods.gen_increment_dict(nat_dict['dual_port_mask'])
+ self.nat_pool_start = nat_dict['pool_start']
+ self.nat_netmask = nat_dict['pool_netmask']
+ self.nat_dict = nat_dict
+
+ @staticmethod
+ def calc_pool_end (nat_pool_start, netmask):
+ pool_start_lst = [int(x) for x in nat_pool_start.split('.')]
+ pool_end_lst = list( pool_start_lst ) # create new list object, don't point to the original one
+ mask_lst = [int(x) for x in netmask.split('.')]
+ curr_octet = 3 # start with the LSB octet
+ inc_val = 1
+
+ while True:
+ tmp_masked = inc_val & mask_lst[curr_octet]
+ if tmp_masked == 0:
+ if (inc_val << 1) > 255:
+ inc_val = 1
+ pool_end_lst[curr_octet] = 255
+ curr_octet -= 1
+ else:
+ inc_val <<= 1
+ else:
+ pool_end_lst[curr_octet] += (inc_val - 1)
+ break
+ return '.'.join([str(x) for x in pool_end_lst])
+
+ def dump_config (self):
+ import yaml
+ print(yaml.dump( self.nat_dict , default_flow_style=False))
+
+
+if __name__ == "__main__":
+ pass
diff --git a/scripts/automation/regression/CProgressDisp.py b/scripts/automation/regression/CProgressDisp.py
new file mode 100755
index 00000000..18df2f43
--- /dev/null
+++ b/scripts/automation/regression/CProgressDisp.py
@@ -0,0 +1,87 @@
+#!/router/bin/python
+from __future__ import print_function
+import threading
+import sys
+import time
+import outer_packages
+import termstyle
+import progressbar
+
+
+class ProgressThread(threading.Thread):
+ def __init__(self, notifyMessage = None):
+ super(ProgressThread, self).__init__()
+ self.stoprequest = threading.Event()
+ self.notifyMessage = notifyMessage
+
+ def run(self):
+ if self.notifyMessage is not None:
+ print(self.notifyMessage, end=' ')
+
+ while not self.stoprequest.is_set():
+ print("\b.", end=' ')
+ sys.stdout.flush()
+ time.sleep(5)
+
+ def join(self, timeout=None):
+ if self.notifyMessage is not None:
+ print(termstyle.green("Done!\n"), end=' ')
+ self.stoprequest.set()
+ super(ProgressThread, self).join(timeout)
+
+
+class TimedProgressBar(threading.Thread):
+ def __init__(self, time_in_secs):
+ super(TimedProgressBar, self).__init__()
+ self.stoprequest = threading.Event()
+ self.stopFlag = False
+ self.time_in_secs = time_in_secs + 15 # 80 # taking 15 seconds extra
+ widgets = ['Running TRex: ', progressbar.Percentage(), ' ',
+ progressbar.Bar(marker='>',left='[',right=']'),
+ ' ', progressbar.ETA()]
+ self.pbar = progressbar.ProgressBar(widgets=widgets, maxval=self.time_in_secs*2)
+
+
+ def run (self):
+ # global g_stop
+ print()
+ self.pbar.start()
+
+ try:
+ for i in range(0, self.time_in_secs*2 + 1):
+ if (self.stopFlag == True):
+ break
+ time.sleep(0.5)
+ self.pbar.update(i)
+ # self.pbar.finish()
+
+ except KeyboardInterrupt:
+ # self.pbar.finish()
+ print("\nInterrupted by user!!")
+ self.join()
+ finally:
+ print()
+
+ def join(self, isPlannedStop = True, timeout=None):
+ if isPlannedStop:
+ self.pbar.update(self.time_in_secs*2)
+ self.stopFlag = True
+ else:
+ self.stopFlag = True # Stop the progress bar in its current location
+ self.stoprequest.set()
+ super(TimedProgressBar, self).join(timeout)
+
+
+def timedProgressBar(time_in_secs):
+ widgets = ['Running TRex: ', progressbar.Percentage(), ' ',
+ Bar(marker='>',left='[',right=']'),
+ ' ', progressbar.ETA()]
+ pbar = progressbar.ProgressBar(widgets=widgets, maxval=time_in_secs*2)
+ pbar.start()
+ for i in range(0, time_in_secs*2 + 1):
+ time.sleep(0.5)
+ pbar.update(i)
+ pbar.finish()
+ print()
+
+
diff --git a/scripts/automation/regression/CShowParser.py b/scripts/automation/regression/CShowParser.py
new file mode 100755
index 00000000..3445c70e
--- /dev/null
+++ b/scripts/automation/regression/CShowParser.py
@@ -0,0 +1,228 @@
+#!/router/bin/python-2.7.4
+
+import re
+import misc_methods
+
+class PlatformResponseMissmatch(Exception):
+ def __init__(self, message):
+ # Call the base class constructor with the parameters it needs
+ super(PlatformResponseMissmatch, self).__init__(message + ' is not available for given platform state and data.\nPlease make sure the relevant features are turned on in the platform.')
+
+class PlatformResponseAmbiguity(Exception):
+ def __init__(self, message):
+ # Call the base class constructor with the parameters it needs
+ super(PlatformResponseAmbiguity, self).__init__(message + ' found more than one file matching the provided filename.\nPlease provide more distinct filename.')
+
+
+class CShowParser(object):
+
+ @staticmethod
+ def parse_drop_stats (query_response, interfaces_list):
+ res = {'total_drops' : 0}
+ response_lst = query_response.split('\r\n')
+ mtch_found = 0
+
+ for line in response_lst:
+ mtch = re.match("^\s*(\w+/\d/\d)\s+(\d+)\s+(\d+)", line)
+ if mtch:
+ mtch_found += 1
+ if (mtch.group(1) in interfaces_list):
+ res[mtch.group(1)] = (int(mtch.group(2)) + int(mtch.group(3)))
+ res['total_drops'] += (int(mtch.group(2)) + int(mtch.group(3)))
+# if mtch_found == 0: # no matches found at all
+# raise PlatformResponseMissmatch('Drop stats')
+# else:
+# return res
+ return res
+
+ @staticmethod
+ def parse_nbar_stats (query_response):
+ response_lst = query_response.split('\r\n')
+ stats = {}
+ final_stats = {}
+ mtch_found = 0
+
+ for line in response_lst:
+ mtch = re.match("\s*([\w-]+)\s*(\d+)\s*(\d+)\s+", line)
+ if mtch:
+ mtch_found += 1
+ key = mtch.group(1)
+ pkt_in = int(mtch.group(2))
+ pkt_out = int(mtch.group(3))
+
+ avg_pkt_cnt = ( pkt_in + pkt_out )/2
+ if avg_pkt_cnt == 0.0:
+ # escaping zero division case
+ continue
+ if key in stats:
+ stats[key] += avg_pkt_cnt
+ else:
+ stats[key] = avg_pkt_cnt
+
+ # Normalize the results to percents
+ for protocol in stats:
+ protocol_norm_stat = int(stats[protocol]*10000/stats['Total'])/100.0 # round the result to x.xx format
+ if (protocol_norm_stat != 0.0):
+ final_stats[protocol] = protocol_norm_stat
+
+ if mtch_found == 0: # no matches found at all
+ raise PlatformResponseMissmatch('NBAR classification stats')
+ else:
+ return { 'percentage' : final_stats, 'packets' : stats }
+
+ @staticmethod
+ def parse_nat_stats (query_response):
+ response_lst = query_response.split('\r\n')
+ res = {}
+ mtch_found = 0
+
+ for line in response_lst:
+ mtch = re.match("Total (active translations):\s+(\d+).*(\d+)\s+static,\s+(\d+)\s+dynamic", line)
+ if mtch:
+ mtch_found += 1
+ res['total_active_trans'] = int(mtch.group(2))
+ res['static_active_trans'] = int(mtch.group(3))
+ res['dynamic_active_trans'] = int(mtch.group(4))
+ continue
+
+ mtch = re.match("(Hits):\s+(\d+)\s+(Misses):\s+(\d+)", line)
+ if mtch:
+ mtch_found += 1
+ res['num_of_hits'] = int(mtch.group(2))
+ res['num_of_misses'] = int(mtch.group(4))
+
+ if mtch_found == 0: # no matches found at all
+ raise PlatformResponseMissmatch('NAT translations stats')
+ else:
+ return res
+
+ @staticmethod
+ def parse_cpu_util_stats (query_response):
+ response_lst = query_response.split('\r\n')
+ res = { 'cpu0' : 0,
+ 'cpu1' : 0 }
+ mtch_found = 0
+ for line in response_lst:
+ mtch = re.match("\W*Processing: Load\D*(\d+)\D*(\d+)\D*(\d+)\D*(\d+)\D*", line)
+ if mtch:
+ mtch_found += 1
+ res['cpu0'] += float(mtch.group(1))
+ res['cpu1'] += float(mtch.group(2))
+
+ if mtch_found == 0: # no matches found at all
+ raise PlatformResponseMissmatch('CPU utilization processing')
+ else:
+ res['cpu0'] = res['cpu0']/mtch_found
+ res['cpu1'] = res['cpu1']/mtch_found
+ return res
+
+ @staticmethod
+ def parse_cft_stats (query_response):
+ response_lst = query_response.split('\r\n')
+ res = {}
+ mtch_found = 0
+ for line in response_lst:
+ mtch = re.match("\W*(\w+)\W*([:]|[=])\W*(\d+)", line)
+ if mtch:
+ mtch_found += 1
+ res[ str( mix_string(m.group(1)) )] = float(m.group(3))
+ if mtch_found == 0: # no matches found at all
+ raise PlatformResponseMissmatch('CFT counters stats')
+ else:
+ return res
+
+
+ @staticmethod
+ def parse_cvla_memory_usage(query_response):
+ response_lst = query_response.split('\r\n')
+ res = {}
+ res2 = {}
+ cnt = 0
+ state = 0
+ name = ''
+ number = 0.0
+
+ for line in response_lst:
+ if state == 0:
+ mtch = re.match("\W*Entity name:\W*(\w[^\r\n]+)", line)
+ if mtch:
+ name = misc_methods.mix_string(mtch.group(1))
+ state = 1
+ cnt += 1
+ elif state == 1:
+ mtch = re.match("\W*Handle:\W*(\d+)", line)
+ if mtch:
+ state = state + 1
+ else:
+ state = 0;
+ elif state == 2:
+ mtch = re.match("\W*Number of allocations:\W*(\d+)", line)
+ if mtch:
+ state = state + 1
+ number=float(mtch.group(1))
+ else:
+ state = 0;
+ elif state == 3:
+ mtch = re.match("\W*Memory allocated:\W*(\d+)", line)
+ if mtch:
+ state = 0
+ res[name] = float(mtch.group(1))
+ res2[name] = number
+ else:
+ state = 0
+ if cnt == 0:
+ raise PlatformResponseMissmatch('CVLA memory usage stats')
+
+ return (res,res2)
+
+
+ @staticmethod
+ def parse_show_image_version(query_response):
+ response_lst = query_response.split('\r\n')
+ res = {}
+
+ for line in response_lst:
+ mtch = re.match("System image file is \"(\w+):(.*/)?(.+)\"", line)
+ if mtch:
+ res['drive'] = mtch.group(1)
+ res['image'] = mtch.group(3)
+ return res
+
+ raise PlatformResponseMissmatch('Running image info')
+
+
+ @staticmethod
+ def parse_image_existence(query_response, img_name):
+ response_lst = query_response.split('\r\n')
+ cnt = 0
+
+ for line in response_lst:
+ regex = re.compile(".* (?!include) %s" % img_name )
+ mtch = regex.match(line)
+ if mtch:
+ cnt += 1
+ if cnt == 1:
+ return True
+ elif cnt > 1:
+ raise PlatformResponseAmbiguity('Image existence')
+ else:
+ return False
+
+ @staticmethod
+ def parse_file_copy (query_response):
+ rev_response_lst = reversed(query_response.split('\r\n'))
+ lines_parsed = 0
+
+ for line in rev_response_lst:
+ mtch = re.match("\[OK - (\d+) bytes\]", line)
+ if mtch:
+ return True
+ lines_parsed += 1
+
+ if lines_parsed > 5:
+ return False
+ return False
+
+
+if __name__ == "__main__":
+ pass
diff --git a/scripts/automation/regression/CustomLogger.py b/scripts/automation/regression/CustomLogger.py
new file mode 100755
index 00000000..14ef1362
--- /dev/null
+++ b/scripts/automation/regression/CustomLogger.py
@@ -0,0 +1,36 @@
+
+import sys
+import os
+import logging
+
+
+# def setup_custom_logger(name, log_path = None):
+# logging.basicConfig(level = logging.INFO,
+# format = '%(asctime)s %(name)-10s %(module)-20s %(levelname)-8s %(message)s',
+# datefmt = '%m-%d %H:%M')
+
+
+def setup_custom_logger(name, log_path = None):
+ # first make sure path availabe
+ if log_path is None:
+ log_path = os.getcwd()+'/trex_log.log'
+ else:
+ directory = os.path.dirname(log_path)
+ if not os.path.exists(directory):
+ os.makedirs(directory)
+ logging.basicConfig(level = logging.DEBUG,
+ format = '%(asctime)s %(name)-10s %(module)-20s %(levelname)-8s %(message)s',
+ datefmt = '%m-%d %H:%M',
+ filename= log_path,
+ filemode= 'w')
+
+ # define a Handler which writes INFO messages or higher to the sys.stderr
+ consoleLogger = logging.StreamHandler()
+ consoleLogger.setLevel(logging.ERROR)
+ # set a format which is simpler for console use
+ formatter = logging.Formatter('%(name)-12s: %(levelname)-8s %(message)s')
+ # tell the handler to use this format
+ consoleLogger.setFormatter(formatter)
+
+ # add the handler to the logger
+ logging.getLogger(name).addHandler(consoleLogger) \ No newline at end of file
diff --git a/scripts/automation/regression/aggregate_results.py b/scripts/automation/regression/aggregate_results.py
new file mode 100755
index 00000000..c7c61ea6
--- /dev/null
+++ b/scripts/automation/regression/aggregate_results.py
@@ -0,0 +1,659 @@
+# -*- coding: utf-8 -*-
+import xml.etree.ElementTree as ET
+import outer_packages
+import argparse
+import glob
+from pprint import pprint
+import sys, os
+from collections import OrderedDict
+import copy
+import datetime, time
+try:
+ import cPickle as pickle
+except:
+ import pickle
+import subprocess, shlex
+from ansi2html import Ansi2HTMLConverter
+
+converter = Ansi2HTMLConverter(inline = True)
+convert = converter.convert
+
+def ansi2html(text):
+ return convert(text, full = False)
+
+FUNCTIONAL_CATEGORY = 'Functional' # how to display those categories
+ERROR_CATEGORY = 'Error'
+
+
+def pad_tag(text, tag):
+ return '<%s>%s</%s>' % (tag, text, tag)
+
+def mark_string(text, color, condition):
+ if condition:
+ return '<font color=%s><b>%s</b></font>' % (color, text)
+ return text
+
+
+def is_functional_test_name(testname):
+ #if testname.startswith(('platform_', 'misc_methods_', 'vm_', 'payload_gen_', 'pkt_builder_')):
+ # return True
+ #return False
+ if testname.startswith('functional_tests.'):
+ return True
+ return False
+
+def is_good_status(text):
+ return text in ('Successful', 'Fixed', 'Passed', 'True', 'Pass')
+
+# input: xml element with test result
+# output string: 'error', 'failure', 'skipped', 'passed'
+def get_test_result(test):
+ for child in test.getchildren():
+ if child.tag in ('error', 'failure', 'skipped'):
+ return child.tag
+ return 'passed'
+
+# returns row of table with <th> and <td> columns - key: value
+def add_th_td(key, value):
+ return '<tr><th>%s</th><td>%s</td></tr>\n' % (key, value)
+
+# returns row of table with <td> and <td> columns - key: value
+def add_td_td(key, value):
+ return '<tr><td>%s</td><td>%s</td></tr>\n' % (key, value)
+
+# returns row of table with <th> and <th> columns - key: value
+def add_th_th(key, value):
+ return '<tr><th>%s</th><th>%s</th></tr>\n' % (key, value)
+
+# returns <div> with table of tests under given category.
+# category - string with name of category
+# tests - list of tests, derived from aggregated xml report, changed a little to get easily stdout etc.
+# tests_type - stateful or stateless
+# category_info_dir - folder to search for category info file
+# expanded - bool, false = outputs (stdout etc.) of tests are hidden by CSS
+# brief - bool, true = cut some part of tests outputs (useful for errors section with expanded flag)
+def add_category_of_tests(category, tests, tests_type = None, category_info_dir = None, expanded = False, brief = False):
+ is_actual_category = category not in (FUNCTIONAL_CATEGORY, ERROR_CATEGORY)
+ category_id = '_'.join([category, tests_type]) if tests_type else category
+ category_name = ' '.join([category, tests_type.capitalize()]) if tests_type else category
+ html_output = ''
+ if is_actual_category:
+ html_output += '<br><table class="reference">\n'
+
+ if category_info_dir:
+ category_info_file = '%s/report_%s.info' % (category_info_dir, category)
+ if os.path.exists(category_info_file):
+ with open(category_info_file) as f:
+ for info_line in f.readlines():
+ key_value = info_line.split(':', 1)
+ if key_value[0].strip() in list(trex_info_dict.keys()) + ['User']: # always 'hhaim', no need to show
+ continue
+ html_output += add_th_td('%s:' % key_value[0], key_value[1])
+ else:
+ html_output += add_th_td('Info:', 'No info')
+ print('add_category_of_tests: no category info %s' % category_info_file)
+ if tests_type:
+ html_output += add_th_td('Tests type:', tests_type.capitalize())
+ if len(tests):
+ total_duration = 0.0
+ for test in tests:
+ total_duration += float(test.attrib['time'])
+ html_output += add_th_td('Tests duration:', datetime.timedelta(seconds = int(total_duration)))
+ html_output += '</table>\n'
+
+ if not len(tests):
+ return html_output + pad_tag('<br><font color=red>No tests!</font>', 'b')
+ html_output += '<br>\n<table class="reference" width="100%">\n<tr><th align="left">'
+
+ if category == ERROR_CATEGORY:
+ html_output += 'Setup</th><th align="left">Failed tests:'
+ else:
+ html_output += '%s tests:' % category_name
+ html_output += '</th><th align="center">Final Result</th>\n<th align="center">Time (s)</th>\n</tr>\n'
+ for test in tests:
+ functional_test = is_functional_test_name(test.attrib['name'])
+ if functional_test and is_actual_category:
+ continue
+ if category == ERROR_CATEGORY:
+ test_id = ('err_' + test.attrib['classname'] + test.attrib['name']).replace('.', '_')
+ else:
+ test_id = (category_id + test.attrib['name']).replace('.', '_')
+ if expanded:
+ html_output += '<tr>\n<th>'
+ else:
+ html_output += '<tr onclick=tgl_test("%s") class=linktr>\n<td class=linktext>' % test_id
+ if category == ERROR_CATEGORY:
+ html_output += FUNCTIONAL_CATEGORY if functional_test else test.attrib['classname']
+ if expanded:
+ html_output += '</th><td>'
+ else:
+ html_output += '</td><td class=linktext>'
+ html_output += '%s</td>\n<td align="center">' % test.attrib['name']
+ test_result = get_test_result(test)
+ if test_result == 'error':
+ html_output += '<font color="red"><b>ERROR</b></font></td>'
+ elif test_result == 'failure':
+ html_output += '<font color="red"><b>FAILED</b></font></td>'
+ elif test_result == 'skipped':
+ html_output += '<font color="blue"><b>SKIPPED</b></font></td>'
+ else:
+ html_output += '<font color="green"><b>PASSED</b></font></td>'
+ html_output += '<td align="center"> '+ test.attrib['time'] + '</td></tr>'
+
+ result, result_text = test.attrib.get('result', ('', ''))
+ if result_text:
+ start_index_errors_stl = result_text.find('STLError: \n******')
+ if start_index_errors_stl > 0:
+ result_text = result_text[start_index_errors_stl:].strip() # cut traceback
+ start_index_errors = result_text.find('Exception: The test is failed, reasons:')
+ if start_index_errors > 0:
+ result_text = result_text[start_index_errors + 10:].strip() # cut traceback
+ result_text = ansi2html(result_text)
+ result_text = '<b style="color:000080;">%s:</b><br>%s<br><br>' % (result.capitalize(), result_text.replace('\n', '<br>'))
+ stderr = '' if brief and result_text else test.get('stderr', '')
+ if stderr:
+ stderr = ansi2html(stderr)
+ stderr = '<b style="color:000080;"><text color=000080>Stderr</text>:</b><br>%s<br><br>\n' % stderr.replace('\n', '<br>')
+ stdout = '' if brief and result_text else test.get('stdout', '')
+ if stdout:
+ stdout = ansi2html(stdout)
+ if brief: # cut off server logs
+ stdout = stdout.split('>>>>>>>>>>>>>>>', 1)[0]
+ stdout = '<b style="color:000080;">Stdout:</b><br>%s<br><br>\n' % stdout.replace('\n', '<br>')
+
+ html_output += '<tr style="%scolor:603000;" id="%s"><td colspan=%s>' % ('' if expanded else 'display:none;', test_id, 4 if category == ERROR_CATEGORY else 3)
+ if result_text or stderr or stdout:
+ html_output += '%s%s%s</td></tr>' % (result_text, stderr, stdout)
+ else:
+ html_output += '<b style="color:000080;">No output</b></td></tr>'
+
+ html_output += '\n</table>'
+ return html_output
+
+style_css = """
+html {overflow-y:scroll;}
+
+body {
+ font-size:12px;
+ color:#000000;
+ background-color:#ffffff;
+ margin:0px;
+ font-family:verdana,helvetica,arial,sans-serif;
+}
+
+div {width:100%;}
+
+table,th,td,input,textarea {
+ font-size:100%;
+}
+
+table.reference, table.reference_fail {
+ background-color:#ffffff;
+ border:1px solid #c3c3c3;
+ border-collapse:collapse;
+ vertical-align:middle;
+}
+
+table.reference th {
+ background-color:#e5eecc;
+ border:1px solid #c3c3c3;
+ padding:3px;
+}
+
+table.reference_fail th {
+ background-color:#ffcccc;
+ border:1px solid #c3c3c3;
+ padding:3px;
+}
+
+
+table.reference td, table.reference_fail td {
+ border:1px solid #c3c3c3;
+ padding:3px;
+}
+
+a.example {font-weight:bold}
+
+#a:link,a:visited {color:#900B09; background-color:transparent}
+#a:hover,a:active {color:#FF0000; background-color:transparent}
+
+.linktr {
+ cursor: pointer;
+}
+
+.linktext {
+ color:#0000FF;
+ text-decoration: underline;
+}
+"""
+
+
+# main
+if __name__ == '__main__':
+
+ # deal with input args
+ argparser = argparse.ArgumentParser(description='Aggregate test results of from ./reports dir, produces xml, html, mail report.')
+ argparser.add_argument('--input_dir', default='./reports',
+ help='Directory with xmls/setups info. Filenames: report_<setup name>.xml/report_<setup name>.info')
+ argparser.add_argument('--output_xml', default='./reports/aggregated_tests.xml',
+ dest = 'output_xmlfile', help='Name of output xml file with aggregated results.')
+ argparser.add_argument('--output_html', default='./reports/aggregated_tests.html',
+ dest = 'output_htmlfile', help='Name of output html file with aggregated results.')
+ argparser.add_argument('--output_mail', default='./reports/aggregated_tests_mail.html',
+ dest = 'output_mailfile', help='Name of output html file with aggregated results for mail.')
+ argparser.add_argument('--output_title', default='./reports/aggregated_tests_title.txt',
+ dest = 'output_titlefile', help='Name of output file to contain title of mail.')
+ argparser.add_argument('--build_status_file', default='./reports/build_status',
+ dest = 'build_status_file', help='Name of output file to save scenaries build results (should not be wiped).')
+ argparser.add_argument('--last_passed_commit', default='./reports/last_passed_commit',
+ dest = 'last_passed_commit', help='Name of output file to save last passed commit (should not be wiped).')
+ args = argparser.parse_args()
+
+
+##### get input variables/TRex commit info
+
+ scenario = os.environ.get('SCENARIO')
+ build_url = os.environ.get('BUILD_URL')
+ build_id = os.environ.get('BUILD_ID')
+ trex_repo = os.environ.get('TREX_CORE_REPO')
+ python_ver = os.environ.get('PYTHON_VER')
+ if not scenario:
+ print('Warning: no environment variable SCENARIO, using default')
+ scenario = 'TRex regression'
+ if not build_url:
+ print('Warning: no environment variable BUILD_URL')
+ if not build_id:
+ print('Warning: no environment variable BUILD_ID')
+ if not python_ver:
+ print('Warning: no environment variable PYTHON_VER')
+
+ trex_info_dict = OrderedDict()
+ for file in glob.glob('%s/report_*.info' % args.input_dir):
+ with open(file) as f:
+ file_lines = f.readlines()
+ if not len(file_lines):
+ continue # to next file
+ for info_line in file_lines:
+ key_value = info_line.split(':', 1)
+ not_trex_keys = ['Server', 'Router', 'User']
+ if key_value[0].strip() in not_trex_keys:
+ continue # to next parameters
+ trex_info_dict[key_value[0].strip()] = key_value[1].strip()
+ break
+
+ trex_last_commit_info = ''
+ trex_last_commit_hash = trex_info_dict.get('Git SHA')
+ if trex_last_commit_hash and trex_repo:
+ try:
+ print('Getting TRex commit with hash %s' % trex_last_commit_hash)
+ command = 'git --git-dir %s show %s --quiet' % (trex_repo, trex_last_commit_hash)
+ print('Executing: %s' % command)
+ proc = subprocess.Popen(shlex.split(command), stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+ (trex_last_commit_info, stderr) = proc.communicate()
+ print('Stdout:\n\t' + trex_last_commit_info.replace('\n', '\n\t'))
+ print('Stderr:', stderr)
+ print('Return code:', proc.returncode)
+ trex_last_commit_info = trex_last_commit_info.replace('\n', '<br>')
+ except Exception as e:
+ print('Error getting last commit: %s' % e)
+
+##### get xmls: report_<setup name>.xml
+
+ err = []
+ jobs_list = []
+ jobs_file = '%s/jobs_list.info' % args.input_dir
+ if os.path.exists(jobs_file):
+ with open('%s/jobs_list.info' % args.input_dir) as f:
+ for line in f.readlines():
+ line = line.strip()
+ if line:
+ jobs_list.append(line)
+ else:
+ message = '%s does not exist!' % jobs_file
+ print(message)
+ err.append(message)
+
+##### aggregate results to 1 single tree
+ aggregated_root = ET.Element('testsuite')
+ test_types = ('functional', 'stateful', 'stateless')
+ setups = {}
+ for job in jobs_list:
+ setups[job] = {}
+ for test_type in test_types:
+ xml_file = '%s/report_%s_%s.xml' % (args.input_dir, job, test_type)
+ if not os.path.exists(xml_file):
+ continue
+ if os.path.basename(xml_file) == os.path.basename(args.output_xmlfile):
+ continue
+ setups[job][test_type] = []
+ print('Processing report: %s.%s' % (job, test_type))
+ tree = ET.parse(xml_file)
+ root = tree.getroot()
+ for key, value in root.attrib.items():
+ if key in aggregated_root.attrib and value.isdigit(): # sum total number of failed tests etc.
+ aggregated_root.attrib[key] = str(int(value) + int(aggregated_root.attrib[key]))
+ else:
+ aggregated_root.attrib[key] = value
+ tests = root.getchildren()
+ if not len(tests): # there should be tests:
+ message = 'No tests in xml %s' % xml_file
+ print(message)
+ #err.append(message)
+ for test in tests:
+ setups[job][test_type].append(test)
+ test.attrib['name'] = test.attrib['classname'] + '.' + test.attrib['name']
+ test.attrib['classname'] = job
+ aggregated_root.append(test)
+ if not sum([len(x) for x in setups[job].values()]):
+ message = 'No reports from setup %s!' % job
+ print(message)
+ err.append(message)
+ continue
+
+ total_tests_count = int(aggregated_root.attrib.get('tests', 0))
+ error_tests_count = int(aggregated_root.attrib.get('errors', 0))
+ failure_tests_count = int(aggregated_root.attrib.get('failures', 0))
+ skipped_tests_count = int(aggregated_root.attrib.get('skip', 0))
+ passed_tests_count = total_tests_count - error_tests_count - failure_tests_count - skipped_tests_count
+
+ tests_count_string = mark_string('Total: %s' % total_tests_count, 'red', total_tests_count == 0) + ', '
+ tests_count_string += mark_string('Passed: %s' % passed_tests_count, 'red', error_tests_count + failure_tests_count > 0) + ', '
+ tests_count_string += mark_string('Error: %s' % error_tests_count, 'red', error_tests_count > 0) + ', '
+ tests_count_string += mark_string('Failure: %s' % failure_tests_count, 'red', failure_tests_count > 0) + ', '
+ tests_count_string += 'Skipped: %s' % skipped_tests_count
+
+##### save output xml
+
+ print('Writing output file: %s' % args.output_xmlfile)
+ ET.ElementTree(aggregated_root).write(args.output_xmlfile)
+
+
+##### build output html
+ error_tests = []
+ functional_tests = OrderedDict()
+ # categorize and get output of each test
+ for test in aggregated_root.getchildren(): # each test in xml
+ if is_functional_test_name(test.attrib['name']):
+ functional_tests[test.attrib['name']] = test
+ result_tuple = None
+ for child in test.getchildren(): # <system-out>, <system-err> (<failure>, <error>, <skipped> other: passed)
+# if child.tag in ('failure', 'error'):
+ #temp = copy.deepcopy(test)
+ #print temp._children
+ #print test._children
+# error_tests.append(test)
+ if child.tag == 'failure':
+ error_tests.append(test)
+ result_tuple = ('failure', child.text)
+ elif child.tag == 'error':
+ error_tests.append(test)
+ result_tuple = ('error', child.text)
+ elif child.tag == 'skipped':
+ result_tuple = ('skipped', child.text)
+ elif child.tag == 'system-out':
+ test.attrib['stdout'] = child.text
+ elif child.tag == 'system-err':
+ test.attrib['stderr'] = child.text
+ if result_tuple:
+ test.attrib['result'] = result_tuple
+
+ html_output = '''\
+<html>
+<head>
+<meta http-equiv="Content-Type" content="text/html; charset=UTF-8">
+<style type="text/css">
+'''
+ html_output += style_css
+ html_output +='''
+</style>
+</head>
+
+<body>
+<table class="reference">
+'''
+ if scenario:
+ html_output += add_th_td('Scenario:', scenario.capitalize())
+ if python_ver:
+ html_output += add_th_td('Python:', python_ver)
+ start_time_file = '%s/start_time.info' % args.input_dir
+ if os.path.exists(start_time_file):
+ with open(start_time_file) as f:
+ start_time = int(f.read())
+ total_time = int(time.time()) - start_time
+ html_output += add_th_td('Regression start:', datetime.datetime.fromtimestamp(start_time).strftime('%d/%m/%Y %H:%M'))
+ html_output += add_th_td('Regression duration:', datetime.timedelta(seconds = total_time))
+ html_output += add_th_td('Tests count:', tests_count_string)
+ for key in trex_info_dict:
+ if key == 'Git SHA':
+ continue
+ html_output += add_th_td(key, trex_info_dict[key])
+ if trex_last_commit_info:
+ html_output += add_th_td('Last commit:', trex_last_commit_info)
+ html_output += '</table><br>\n'
+ if err:
+ html_output += '<font color=red>%s<font><br><br>\n' % '\n<br>'.join(err)
+
+#<table style="width:100%;">
+# <tr>
+# <td>Summary:</td>\
+#'''
+ #passed_quantity = len(result_types['passed'])
+ #failed_quantity = len(result_types['failed'])
+ #error_quantity = len(result_types['error'])
+ #skipped_quantity = len(result_types['skipped'])
+
+ #html_output += '<td>Passed: %s</td>' % passed_quantity
+ #html_output += '<td>Failed: %s</td>' % (pad_tag(failed_quantity, 'b') if failed_quantity else '0')
+ #html_output += '<td>Error: %s</td>' % (pad_tag(error_quantity, 'b') if error_quantity else '0')
+ #html_output += '<td>Skipped: %s</td>' % (pad_tag(skipped_quantity, 'b') if skipped_quantity else '0')
+# html_output += '''
+# </tr>
+#</table>'''
+
+ category_arr = [FUNCTIONAL_CATEGORY, ERROR_CATEGORY]
+
+# Adding buttons
+ # Error button
+ if len(error_tests):
+ html_output += '\n<button onclick=tgl_cat("cat_tglr_{error}")>{error}</button>'.format(error = ERROR_CATEGORY)
+ # Setups buttons
+ for category in sorted(setups.keys()):
+ category_arr.append(category)
+ html_output += '\n<button onclick=tgl_cat("cat_tglr_%s")>%s</button>' % (category_arr[-1], category)
+ # Functional buttons
+ if len(functional_tests):
+ html_output += '\n<button onclick=tgl_cat("cat_tglr_%s")>%s</button>' % (FUNCTIONAL_CATEGORY, FUNCTIONAL_CATEGORY)
+
+# Adding tests
+ # Error tests
+ if len(error_tests):
+ html_output += '<div style="display:block;" id="cat_tglr_%s">' % ERROR_CATEGORY
+ html_output += add_category_of_tests(ERROR_CATEGORY, error_tests)
+ html_output += '</div>'
+ # Setups tests
+ for category, tests in setups.items():
+ html_output += '<div style="display:none;" id="cat_tglr_%s">' % category
+ if 'stateful' in tests:
+ html_output += add_category_of_tests(category, tests['stateful'], 'stateful', category_info_dir=args.input_dir)
+ if 'stateless' in tests:
+ html_output += add_category_of_tests(category, tests['stateless'], 'stateless', category_info_dir=(None if 'stateful' in tests else args.input_dir))
+ html_output += '</div>'
+ # Functional tests
+ if len(functional_tests):
+ html_output += '<div style="display:none;" id="cat_tglr_%s">' % FUNCTIONAL_CATEGORY
+ html_output += add_category_of_tests(FUNCTIONAL_CATEGORY, functional_tests.values())
+ html_output += '</div>'
+
+ html_output += '\n\n<script type="text/javascript">\n var category_arr = %s\n' % ['cat_tglr_%s' % x for x in category_arr]
+ html_output += '''
+ function tgl_cat(id)
+ {
+ for(var i=0; i<category_arr.length; i++)
+ {
+ var e = document.getElementById(category_arr[i]);
+ if (id == category_arr[i])
+ {
+ if(e.style.display == 'block')
+ e.style.display = 'none';
+ else
+ e.style.display = 'block';
+ }
+ else
+ {
+ if (e) e.style.display = 'none';
+ }
+ }
+ }
+ function tgl_test(id)
+ {
+ var e = document.getElementById(id);
+ if(e.style.display == 'table-row')
+ e.style.display = 'none';
+ else
+ e.style.display = 'table-row';
+ }
+</script>
+</body>
+</html>\
+'''
+
+# save html
+ with open(args.output_htmlfile, 'w') as f:
+ print('Writing output file: %s' % args.output_htmlfile)
+ f.write(html_output)
+ html_output = None
+
+# mail report (only error tests, expanded)
+
+ mail_output = '''\
+<html>
+<head>
+<meta http-equiv="Content-Type" content="text/html; charset=UTF-8">
+<style type="text/css">
+'''
+ mail_output += style_css
+ mail_output +='''
+</style>
+</head>
+
+<body>
+<table class="reference">
+'''
+ if scenario:
+ mail_output += add_th_td('Scenario:', scenario.capitalize())
+ if python_ver:
+ mail_output += add_th_td('Python:', python_ver)
+ if build_url:
+ mail_output += add_th_td('Full HTML report:', '<a class="example" href="%s/HTML_Report">link</a>' % build_url)
+ start_time_file = '%s/start_time.info' % args.input_dir
+ if os.path.exists(start_time_file):
+ with open(start_time_file) as f:
+ start_time = int(f.read())
+ total_time = int(time.time()) - start_time
+ mail_output += add_th_td('Regression start:', datetime.datetime.fromtimestamp(start_time).strftime('%d/%m/%Y %H:%M'))
+ mail_output += add_th_td('Regression duration:', datetime.timedelta(seconds = total_time))
+ mail_output += add_th_td('Tests count:', tests_count_string)
+ for key in trex_info_dict:
+ if key == 'Git SHA':
+ continue
+ mail_output += add_th_td(key, trex_info_dict[key])
+
+ if trex_last_commit_info:
+ mail_output += add_th_td('Last commit:', trex_last_commit_info)
+ mail_output += '</table><br>\n<table width=100%><tr><td>\n'
+
+ for category in setups.keys():
+ failing_category = False
+ for test in error_tests:
+ if test.attrib['classname'] == category:
+ failing_category = True
+ if failing_category or not len(setups[category]) or not sum([len(x) for x in setups[category]]):
+ mail_output += '<table class="reference_fail" align=left style="Margin-bottom:10;Margin-right:10;">\n'
+ else:
+ mail_output += '<table class="reference" align=left style="Margin-bottom:10;Margin-right:10;">\n'
+ mail_output += add_th_th('Setup:', pad_tag(category.replace('.', '/'), 'b'))
+ category_info_file = '%s/report_%s.info' % (args.input_dir, category.replace('.', '_'))
+ if os.path.exists(category_info_file):
+ with open(category_info_file) as f:
+ for info_line in f.readlines():
+ key_value = info_line.split(':', 1)
+ if key_value[0].strip() in list(trex_info_dict.keys()) + ['User']: # always 'hhaim', no need to show
+ continue
+ mail_output += add_th_td('%s:' % key_value[0].strip(), key_value[1].strip())
+ else:
+ mail_output += add_th_td('Info:', 'No info')
+ mail_output += '</table>\n'
+ mail_output += '</td></tr></table>\n'
+
+ # Error tests
+ if len(error_tests) or err:
+ if err:
+ mail_output += '<font color=red>%s<font>' % '\n<br>'.join(err)
+ if len(error_tests) > 5:
+ mail_output += '\n<font color=red>More than 5 failed tests, showing brief output.<font>\n<br>'
+ # show only brief version (cut some info)
+ mail_output += add_category_of_tests(ERROR_CATEGORY, error_tests, expanded=True, brief=True)
+ else:
+ mail_output += add_category_of_tests(ERROR_CATEGORY, error_tests, expanded=True)
+ else:
+ mail_output += '<table><tr style="font-size:120;color:green;font-family:arial"><td>☺</td><td style="font-size:20">All passed.</td></tr></table>\n'
+ mail_output += '\n</body>\n</html>'
+
+##### save outputs
+
+
+# mail content
+ with open(args.output_mailfile, 'w') as f:
+ print('Writing output file: %s' % args.output_mailfile)
+ f.write(mail_output)
+
+# build status
+ category_dict_status = {}
+ if os.path.exists(args.build_status_file):
+ print('Reading: %s' % args.build_status_file)
+ with open(args.build_status_file, 'rb') as f:
+ try:
+ category_dict_status = pickle.load(f)
+ except Exception as e:
+ print('Error during pickle load: %s' % e)
+ if type(category_dict_status) is not dict:
+ print('%s is corrupt, truncating' % args.build_status_file)
+ category_dict_status = {}
+
+ last_status = category_dict_status.get(scenario, 'Successful') # assume last is passed if no history
+ if err or len(error_tests): # has fails
+ exit_status = 1
+ if is_good_status(last_status):
+ current_status = 'Failure'
+ else:
+ current_status = 'Still Failing'
+ else:
+ exit_status = 0
+ if is_good_status(last_status):
+ current_status = 'Successful'
+ else:
+ current_status = 'Fixed'
+ category_dict_status[scenario] = current_status
+
+ with open(args.build_status_file, 'wb') as f:
+ print('Writing output file: %s' % args.build_status_file)
+ pickle.dump(category_dict_status, f)
+
+# last successful commit
+ if (current_status in ('Successful', 'Fixed')) and trex_last_commit_hash and jobs_list > 0 and scenario == 'nightly':
+ with open(args.last_passed_commit, 'w') as f:
+ print('Writing output file: %s' % args.last_passed_commit)
+ f.write(trex_last_commit_hash)
+
+# mail title
+ mailtitle_output = scenario.capitalize()
+ if build_id:
+ mailtitle_output += ' - Build #%s' % build_id
+ mailtitle_output += ' - %s!' % current_status
+
+ with open(args.output_titlefile, 'w') as f:
+ print('Writing output file: %s' % args.output_titlefile)
+ f.write(mailtitle_output)
+
+# exit
+ sys.exit(exit_status)
diff --git a/scripts/automation/regression/functional_tests/config.yaml b/scripts/automation/regression/functional_tests/config.yaml
new file mode 100644
index 00000000..e1bc2016
--- /dev/null
+++ b/scripts/automation/regression/functional_tests/config.yaml
@@ -0,0 +1,74 @@
+################################################################
+#### TRex nightly test configuration file ####
+################################################################
+
+
+### TRex configuration:
+# hostname - can be DNS name or IP for the TRex machine for ssh to the box
+# password - root password for TRex machine
+# is_dual - should the TRex inject with -p ?
+# version_path - path to the TRex version and executable
+# cores - how many cores should be used
+# latency - rate of latency packets injected by the TRex
+
+### Router configuration:
+# hostname - the router hostname as apears in ______# cli prefix
+# ip_address - the router's ip that can be used to communicate with
+# image - the desired imaged wished to be loaded as the router's running config
+# line_password - router password when access via Telent
+# en_password - router password when changing to "enable" mode
+# interfaces - an array of client-server pairs, representing the interfaces configurations of the router
+# configurations - an array of configurations that could possibly loaded into the router during the test.
+# The "clean" configuration is a mandatory configuration the router will load with to run the basic test bench
+
+### TFTP configuration:
+# hostname - the tftp hostname
+# ip_address - the tftp's ip address
+# images_path - the tftp's relative path in which the router's images are located
+
+### Test_misc configuration:
+# expected_bw - the "golden" bandwidth (in Gbps) results planned on receiving from the test
+
+trex:
+ hostname : hostname
+ password : root password
+ version_path : not used
+ cores : 1
+
+router:
+ model : device model
+ hostname : device hostname
+ ip_address : device ip
+ image : device image name
+ line_password : telnet pass
+ en_password : enable pass
+ mgmt_interface : GigabitEthernet0/0/0
+ clean_config : path to clean_config file
+ intf_masking : 255.255.255.0
+ ipv6_mask : 64
+ interfaces :
+ - client :
+ name : GigabitEthernet0/0/1
+ src_mac_addr : 0000.0001.0000
+ dest_mac_addr : 0000.1000.0000
+ server :
+ name : GigabitEthernet0/0/2
+ src_mac_addr : 0000.0002.0000
+ dest_mac_addr : 0000.2000.0000
+ vrf_name : null
+ - client :
+ name : GigabitEthernet0/0/3
+ src_mac_addr : 0000.0003.0000
+ dest_mac_addr : 0000.3000.0000
+ server :
+ name : GigabitEthernet0/0/4
+ src_mac_addr : 0000.0004.0000
+ dest_mac_addr : 0000.4000.0000
+ vrf_name : dup
+
+
+tftp:
+ hostname : tftp hostname
+ ip_address : tftp ip
+ root_dir : tftp root dir
+ images_path : path related to root dir
diff --git a/scripts/automation/regression/functional_tests/cpp_gtests_test.py b/scripts/automation/regression/functional_tests/cpp_gtests_test.py
new file mode 100644
index 00000000..6535da84
--- /dev/null
+++ b/scripts/automation/regression/functional_tests/cpp_gtests_test.py
@@ -0,0 +1,46 @@
+import outer_packages
+from nose.plugins.attrib import attr
+import functional_general_test
+from trex import CTRexScenario
+import os, sys
+from subprocess import Popen, STDOUT
+import shlex
+import time
+import errno
+import tempfile
+
+# runs command
+def run_command(command, timeout = 15, poll_rate = 0.1, cwd = None):
+ # pipes might stuck, even with timeout
+ with tempfile.TemporaryFile() as stdout_file:
+ proc = Popen(shlex.split(command), stdout = stdout_file, stderr = STDOUT, cwd = cwd, close_fds = True, universal_newlines = True)
+ if timeout > 0:
+ for i in range(int(timeout/poll_rate)):
+ time.sleep(poll_rate)
+ if proc.poll() is not None: # process stopped
+ break
+ if proc.poll() is None:
+ proc.kill() # timeout
+ stdout_file.seek(0)
+ return (errno.ETIME, '%s\n\n...Timeout of %s second(s) is reached!' % (stdout_file.read().decode(errors = 'replace'), timeout))
+ else:
+ proc.wait()
+ stdout_file.seek(0)
+ return (proc.returncode, stdout_file.read().decode(errors = 'replace'))
+
+@attr('run_on_trex')
+class CPP_Test(functional_general_test.CGeneralFunctional_Test):
+ def test_gtests_all(self):
+ print('')
+ bp_sim = os.path.join(CTRexScenario.scripts_path, 'bp-sim-64')
+ ret, out = run_command('%s --ut' % bp_sim, cwd = CTRexScenario.scripts_path)
+ print('Output:\n%s' % out)
+ if ret:
+ raise Exception('Non zero return status of gtests (%s)' % ret)
+
+ def test_gtests_valgrind(self):
+ print('')
+ ret, out = run_command(os.path.join(CTRexScenario.scripts_path, 'run-gtest-clean'), cwd = CTRexScenario.scripts_path)
+ print('Output:\n%s' % out)
+ if ret:
+ raise Exception('Non zero return status of Valgrind gtests (%s)' % ret)
diff --git a/scripts/automation/regression/functional_tests/filters_test.py b/scripts/automation/regression/functional_tests/filters_test.py
new file mode 100644
index 00000000..fbb8a126
--- /dev/null
+++ b/scripts/automation/regression/functional_tests/filters_test.py
@@ -0,0 +1,100 @@
+#!/router/bin/python
+
+import functional_general_test
+from trex_stl_lib.utils import filters
+from nose.tools import assert_equal
+from nose.tools import assert_not_equal
+from nose.tools import assert_raises
+from nose.tools import assert_true, assert_false
+from nose.tools import raises
+
+
+class ToggleFilter_Test(functional_general_test.CGeneralFunctional_Test):
+
+ def setUp(self):
+ self.list_db = [1, 2, 3, 4, 5]
+ self.set_db = {1, 2, 3, 4, 5}
+ self.tuple_db = (1, 2, 3, 4, 5)
+ self.dict_db = {str(x): x**2
+ for x in range(5)}
+
+ def test_init_with_dict(self):
+ toggle_filter = filters.ToggleFilter(self.dict_db)
+ assert_equal(toggle_filter._toggle_db, set(self.dict_db.keys()))
+ assert_equal(toggle_filter.filter_items(), self.dict_db)
+
+
+ def test_init_with_list(self):
+ toggle_filter = filters.ToggleFilter(self.list_db)
+ assert_equal(toggle_filter._toggle_db, set(self.list_db))
+ assert_equal(toggle_filter.filter_items(), self.list_db)
+
+ def test_init_with_set(self):
+ toggle_filter = filters.ToggleFilter(self.set_db)
+ assert_equal(toggle_filter._toggle_db, self.set_db)
+ assert_equal(toggle_filter.filter_items(), self.set_db)
+
+ def test_init_with_tuple(self):
+ toggle_filter = filters.ToggleFilter(self.tuple_db)
+ assert_equal(toggle_filter._toggle_db, set(self.tuple_db))
+ assert_equal(toggle_filter.filter_items(), self.tuple_db)
+
+ @raises(TypeError)
+ def test_init_with_non_iterable(self):
+ toggle_filter = filters.ToggleFilter(15)
+
+ def test_dict_toggeling(self):
+ toggle_filter = filters.ToggleFilter(self.dict_db)
+ assert_false(toggle_filter.toggle_item("3"))
+ assert_equal(toggle_filter._toggle_db, {'0', '1', '2', '4'})
+ assert_true(toggle_filter.toggle_item("3"))
+ assert_equal(toggle_filter._toggle_db, {'0', '1', '2', '3', '4'})
+ assert_false(toggle_filter.toggle_item("2"))
+ assert_false(toggle_filter.toggle_item("4"))
+ self.dict_db.update({'5': 25, '6': 36})
+ assert_true(toggle_filter.toggle_item("6"))
+
+ assert_equal(toggle_filter.filter_items(), {'0': 0, '1': 1, '3': 9, '6': 36})
+
+ del self.dict_db['1']
+ assert_equal(toggle_filter.filter_items(), {'0': 0, '3': 9, '6': 36})
+
+ def test_dict_toggeling_negative(self):
+ toggle_filter = filters.ToggleFilter(self.dict_db)
+ assert_raises(KeyError, toggle_filter.toggle_item, "100")
+
+ def test_list_toggeling(self):
+ toggle_filter = filters.ToggleFilter(self.list_db)
+ assert_false(toggle_filter.toggle_item(3))
+ assert_equal(toggle_filter._toggle_db, {1, 2, 4, 5})
+ assert_true(toggle_filter.toggle_item(3))
+ assert_equal(toggle_filter._toggle_db, {1, 2, 3, 4, 5})
+ assert_false(toggle_filter.toggle_item(2))
+ assert_false(toggle_filter.toggle_item(4))
+ self.list_db.extend([6 ,7])
+ assert_true(toggle_filter.toggle_item(6))
+
+ assert_equal(toggle_filter.filter_items(), [1, 3 , 5, 6])
+
+ self.list_db.remove(1)
+ assert_equal(toggle_filter.filter_items(), [3, 5, 6])
+
+ def test_list_toggling_negative(self):
+ toggle_filter = filters.ToggleFilter(self.list_db)
+ assert_raises(KeyError, toggle_filter.toggle_item, 10)
+
+ def test_toggle_multiple_items(self):
+ toggle_filter = filters.ToggleFilter(self.list_db)
+ assert_false(toggle_filter.toggle_items(1, 3, 5))
+ assert_equal(toggle_filter._toggle_db, {2, 4})
+ assert_true(toggle_filter.toggle_items(1, 5))
+ assert_equal(toggle_filter._toggle_db, {1, 2, 4, 5})
+
+ def test_dont_show_after_init(self):
+ toggle_filter = filters.ToggleFilter(self.list_db, show_by_default = False)
+ assert_equal(toggle_filter._toggle_db, set())
+ assert_equal(toggle_filter.filter_items(), [])
+
+
+ def tearDown(self):
+ pass
diff --git a/scripts/automation/regression/functional_tests/functional_general_test.py b/scripts/automation/regression/functional_tests/functional_general_test.py
new file mode 100755
index 00000000..525b58d2
--- /dev/null
+++ b/scripts/automation/regression/functional_tests/functional_general_test.py
@@ -0,0 +1,22 @@
+#!/router/bin/python
+
+from nose.tools import assert_equal
+from nose.tools import assert_not_equal
+from nose.tools import assert_raises
+from nose.tools import raises
+
+
+class CGeneralFunctional_Test(object):
+ def __init__(self):
+ pass
+
+
+ def setUp(self):
+ pass
+
+
+ def tearDown(self):
+ pass
+
+if __name__ == "__main__":
+ pass
diff --git a/scripts/automation/regression/functional_tests/golden/basic_imix_golden.cap b/scripts/automation/regression/functional_tests/golden/basic_imix_golden.cap
new file mode 100644
index 00000000..6ca32299
--- /dev/null
+++ b/scripts/automation/regression/functional_tests/golden/basic_imix_golden.cap
Binary files differ
diff --git a/scripts/automation/regression/functional_tests/golden/basic_imix_vm_golden.cap b/scripts/automation/regression/functional_tests/golden/basic_imix_vm_golden.cap
new file mode 100644
index 00000000..43ae2368
--- /dev/null
+++ b/scripts/automation/regression/functional_tests/golden/basic_imix_vm_golden.cap
Binary files differ
diff --git a/scripts/automation/regression/functional_tests/golden/basic_tuple_gen_golden.cap b/scripts/automation/regression/functional_tests/golden/basic_tuple_gen_golden.cap
new file mode 100644
index 00000000..7d5e7ec2
--- /dev/null
+++ b/scripts/automation/regression/functional_tests/golden/basic_tuple_gen_golden.cap
Binary files differ
diff --git a/scripts/automation/regression/functional_tests/golden/udp_590.cap b/scripts/automation/regression/functional_tests/golden/udp_590.cap
new file mode 100644
index 00000000..29302f22
--- /dev/null
+++ b/scripts/automation/regression/functional_tests/golden/udp_590.cap
Binary files differ
diff --git a/scripts/automation/regression/functional_tests/hltapi_stream_builder_test.py b/scripts/automation/regression/functional_tests/hltapi_stream_builder_test.py
new file mode 100755
index 00000000..c6b477aa
--- /dev/null
+++ b/scripts/automation/regression/functional_tests/hltapi_stream_builder_test.py
@@ -0,0 +1,629 @@
+#!/router/bin/python
+
+import os
+import unittest
+from trex_stl_lib.trex_stl_hltapi import STLHltStream
+from trex_stl_lib.trex_stl_types import validate_type
+from nose.plugins.attrib import attr
+from nose.tools import nottest
+
+def compare_yamls(yaml1, yaml2):
+ validate_type('yaml1', yaml1, str)
+ validate_type('yaml2', yaml2, str)
+ i = 0
+ for line1, line2 in zip(yaml1.strip().split('\n'), yaml2.strip().split('\n')):
+ i += 1
+ assert line1 == line2, 'yamls are not equal starting from line %s:\n%s\n Golden <-> Generated\n%s' % (i, line1.strip(), line2.strip())
+
+# TODO: move the tests to compare pcaps, not yamls
+@nottest
+class CTRexHltApi_Test(unittest.TestCase):
+ ''' Checks correct HLTAPI creation of packet/VM '''
+
+ def setUp(self):
+ self.golden_yaml = None
+ self.test_yaml = None
+
+ def tearDown(self):
+ compare_yamls(self.golden_yaml, self.test_yaml)
+
+ # Eth/IP/TCP, all values default, no VM instructions + test MACs correction
+ def test_hlt_basic(self):
+ STLHltStream(mac_src = 'a0:00:01:::01', mac_dst = '0d 00 01 00 00 01',
+ mac_src2 = '{00 b0 01 00 00 01}', mac_dst2 = 'd0.00.01.00.00.01')
+ with self.assertRaises(Exception):
+ STLHltStream(mac_src2 = '00:00:00:00:00:0k')
+ with self.assertRaises(Exception):
+ STLHltStream(mac_dst2 = '100:00:00:00:00:00')
+ # wrong encap
+ with self.assertRaises(Exception):
+ STLHltStream(l2_encap = 'ethernet_sdfgsdfg')
+ # all default values
+ test_stream = STLHltStream()
+ self.test_yaml = test_stream.dump_to_yaml(self.yaml_save_location())
+ self.golden_yaml = '''
+- stream:
+ action_count: 0
+ enabled: true
+ flags: 3
+ isg: 0.0
+ mode:
+ percentage: 10.0
+ type: continuous
+ packet:
+ binary: AAAAAAAAAAABAAABCABFAAAyAAAAAEAGusUAAAAAwAAAAQQAAFAAAAABAAAAAVAAD+U1/QAAISEhISEhISEhIQ==
+ meta: ''
+ flow_stats:
+ enabled: false
+ self_start: true
+ vm:
+ instructions: []
+ split_by_var: ''
+'''
+
+ # Eth/IP/TCP, test MAC fields VM, wait for masking of variables for MAC
+ @nottest
+ def test_macs_vm(self):
+ test_stream = STLHltStream(name = 'stream-0', )
+ self.test_yaml = test_stream.dump_to_yaml(self.yaml_save_location())
+ self.golden_yaml = '''
+TBD
+'''
+
+
+ # Eth/IP/TCP, ip src and dest is changed by VM
+ def test_ip_ranges(self):
+ # running on single core not implemented yet
+ with self.assertRaises(Exception):
+ test_stream = STLHltStream(split_by_cores = 'single',
+ ip_src_addr = '192.168.1.1',
+ ip_src_mode = 'increment',
+ ip_src_count = 5,)
+ # wrong type
+ with self.assertRaises(Exception):
+ test_stream = STLHltStream(split_by_cores = 12345,
+ ip_src_addr = '192.168.1.1',
+ ip_src_mode = 'increment',
+ ip_src_count = 5,)
+
+ test_stream = STLHltStream(split_by_cores = 'duplicate',
+ ip_src_addr = '192.168.1.1',
+ ip_src_mode = 'increment',
+ ip_src_count = 5,
+ ip_dst_addr = '5.5.5.5',
+ ip_dst_count = 2,
+ ip_dst_mode = 'random',
+ name = 'test_ip_ranges',
+ rate_pps = 1)
+ self.test_yaml = test_stream.dump_to_yaml(self.yaml_save_location())
+ self.golden_yaml = '''
+- name: test_ip_ranges
+ stream:
+ action_count: 0
+ enabled: true
+ flags: 3
+ isg: 0.0
+ mode:
+ pps: 1.0
+ type: continuous
+ packet:
+ binary: AAAAAAAAAAABAAABCABFAAAyAAAAAEAGrxPAqAEBBQUFBQQAAFAAAAABAAAAAVAAD+UqSwAAISEhISEhISEhIQ==
+ meta: ''
+ flow_stats:
+ enabled: false
+ self_start: true
+ vm:
+ instructions:
+ - init_value: 0
+ max_value: 4
+ min_value: 0
+ name: inc_4_4_1
+ op: inc
+ size: 4
+ step: 1
+ type: flow_var
+ - add_value: 3232235777
+ is_big_endian: true
+ name: inc_4_4_1
+ pkt_offset: 26
+ type: write_flow_var
+ - init_value: 0
+ max_value: 4294967295
+ min_value: 0
+ name: ip_dst_random
+ op: random
+ size: 4
+ step: 1
+ type: flow_var
+ - add_value: 0
+ is_big_endian: true
+ name: ip_dst_random
+ pkt_offset: 30
+ type: write_flow_var
+ - pkt_offset: 14
+ type: fix_checksum_ipv4
+ split_by_var: ''
+'''
+
+ # Eth / IP / TCP, tcp ports are changed by VM
+ def test_tcp_ranges(self):
+ test_stream = STLHltStream(tcp_src_port_mode = 'decrement',
+ tcp_src_port_count = 10,
+ tcp_dst_port_mode = 'random',
+ tcp_dst_port_count = 10,
+ tcp_dst_port = 1234,
+ name = 'test_tcp_ranges',
+ rate_pps = '2')
+ self.test_yaml = test_stream.dump_to_yaml(self.yaml_save_location())
+ self.golden_yaml = '''
+- name: test_tcp_ranges
+ stream:
+ action_count: 0
+ enabled: true
+ flags: 3
+ isg: 0.0
+ mode:
+ pps: 2.0
+ type: continuous
+ packet:
+ binary: AAAAAAAAAAABAAABCABFAAAyAAAAAEAGusUAAAAAwAAAAQQABNIAAAABAAAAAVAAD+UxewAAISEhISEhISEhIQ==
+ meta: ''
+ flow_stats:
+ enabled: false
+ self_start: true
+ vm:
+ instructions:
+ - init_value: 9
+ max_value: 9
+ min_value: 0
+ name: dec_2_9_1
+ op: dec
+ size: 2
+ step: 1
+ type: flow_var
+ - add_value: 1015
+ is_big_endian: true
+ name: dec_2_9_1
+ pkt_offset: 34
+ type: write_flow_var
+ - init_value: 0
+ max_value: 65535
+ min_value: 0
+ name: tcp_dst_random
+ op: random
+ size: 2
+ step: 1
+ type: flow_var
+ - add_value: 0
+ is_big_endian: true
+ name: tcp_dst_random
+ pkt_offset: 36
+ type: write_flow_var
+ - pkt_offset: 14
+ type: fix_checksum_ipv4
+ split_by_var: dec_2_9_1
+'''
+
+ # Eth / IP / UDP, udp ports are changed by VM
+ def test_udp_ranges(self):
+ # UDP is not set, expecting ignore of wrong UDP arguments
+ STLHltStream(udp_src_port_mode = 'qwerqwer',
+ udp_src_port_count = 'weqwer',
+ udp_src_port = 'qwerqwer',
+ udp_dst_port_mode = 'qwerqwe',
+ udp_dst_port_count = 'sfgsdfg',
+ udp_dst_port = 'sdfgsdfg')
+ # UDP is set, expecting fail due to wrong UDP arguments
+ with self.assertRaises(Exception):
+ STLHltStream(l4_protocol = 'udp',
+ udp_src_port_mode = 'qwerqwer',
+ udp_src_port_count = 'weqwer',
+ udp_src_port = 'qwerqwer',
+ udp_dst_port_mode = 'qwerqwe',
+ udp_dst_port_count = 'sfgsdfg',
+ udp_dst_port = 'sdfgsdfg')
+ # generate it already with correct arguments
+ test_stream = STLHltStream(l4_protocol = 'udp',
+ udp_src_port_mode = 'decrement',
+ udp_src_port_count = 10,
+ udp_src_port = 1234,
+ udp_dst_port_mode = 'increment',
+ udp_dst_port_count = 10,
+ udp_dst_port = 1234,
+ name = 'test_udp_ranges',
+ rate_percent = 20,)
+ self.test_yaml = test_stream.dump_to_yaml(self.yaml_save_location())
+ self.golden_yaml = '''
+- name: test_udp_ranges
+ stream:
+ action_count: 0
+ enabled: true
+ flags: 3
+ isg: 0.0
+ mode:
+ percentage: 20.0
+ type: continuous
+ packet:
+ binary: AAAAAAAAAAABAAABCABFAAAyAAAAAEARuroAAAAAwAAAAQTSBNIAHsmgISEhISEhISEhISEhISEhISEhISEhIQ==
+ meta: ''
+ flow_stats:
+ enabled: false
+ self_start: true
+ vm:
+ instructions:
+ - init_value: 9
+ max_value: 9
+ min_value: 0
+ name: dec_2_9_1
+ op: dec
+ size: 2
+ step: 1
+ type: flow_var
+ - add_value: 1225
+ is_big_endian: true
+ name: dec_2_9_1
+ pkt_offset: 34
+ type: write_flow_var
+ - init_value: 0
+ max_value: 9
+ min_value: 0
+ name: inc_2_9_1
+ op: inc
+ size: 2
+ step: 1
+ type: flow_var
+ - add_value: 1234
+ is_big_endian: true
+ name: inc_2_9_1
+ pkt_offset: 36
+ type: write_flow_var
+ - pkt_offset: 14
+ type: fix_checksum_ipv4
+ split_by_var: dec_2_9_1
+'''
+
+ # Eth/IP/TCP, packet length is changed in VM by frame_size
+ def test_pkt_len_by_framesize(self):
+ # just check errors, no compare to golden
+ STLHltStream(length_mode = 'increment',
+ frame_size_min = 100,
+ frame_size_max = 3000)
+ test_stream = STLHltStream(length_mode = 'decrement',
+ frame_size_min = 100,
+ frame_size_max = 3000,
+ name = 'test_pkt_len_by_framesize',
+ rate_bps = 1000)
+ self.test_yaml = test_stream.dump_to_yaml(self.yaml_save_location())
+ self.golden_yaml = '''
+- name: test_pkt_len_by_framesize
+ stream:
+ action_count: 0
+ enabled: true
+ flags: 3
+ isg: 0.0
+ mode:
+ bps_L2: 1000.0
+ type: continuous
+ packet:
+ binary: AAAAAAAAAAABAAABCABFAAuqAAAAAEAGr00AAAAAwAAAAQQAAFAAAAABAAAAAVAAD+UwiwAAISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEh
+ meta: ''
+ flow_stats:
+ enabled: false
+ self_start: true
+ vm:
+ instructions:
+ - init_value: 3000
+ max_value: 3000
+ min_value: 100
+ name: pkt_len
+ op: dec
+ size: 2
+ step: 1
+ type: flow_var
+ - name: pkt_len
+ type: trim_pkt_size
+ - add_value: -14
+ is_big_endian: true
+ name: pkt_len
+ pkt_offset: 16
+ type: write_flow_var
+ - pkt_offset: 14
+ type: fix_checksum_ipv4
+ split_by_var: pkt_len
+'''
+
+ # Eth/IP/UDP, packet length is changed in VM by l3_length
+ def test_pkt_len_by_l3length(self):
+ test_stream = STLHltStream(l4_protocol = 'udp',
+ length_mode = 'random',
+ l3_length_min = 100,
+ l3_length_max = 400,
+ name = 'test_pkt_len_by_l3length')
+ self.test_yaml = test_stream.dump_to_yaml(self.yaml_save_location())
+ self.golden_yaml = '''
+- name: test_pkt_len_by_l3length
+ stream:
+ action_count: 0
+ enabled: true
+ flags: 3
+ isg: 0.0
+ mode:
+ percentage: 10.0
+ type: continuous
+ packet:
+ binary: AAAAAAAAAAABAAABCABFAAGQAAAAAEARuVwAAAAAwAAAAQQAAFABfCaTISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEh
+ meta: ''
+ flow_stats:
+ enabled: false
+ self_start: true
+ vm:
+ instructions:
+ - init_value: 114
+ max_value: 414
+ min_value: 114
+ name: pkt_len
+ op: random
+ size: 2
+ step: 1
+ type: flow_var
+ - name: pkt_len
+ type: trim_pkt_size
+ - add_value: -14
+ is_big_endian: true
+ name: pkt_len
+ pkt_offset: 16
+ type: write_flow_var
+ - add_value: -34
+ is_big_endian: true
+ name: pkt_len
+ pkt_offset: 38
+ type: write_flow_var
+ - pkt_offset: 14
+ type: fix_checksum_ipv4
+ split_by_var: ''
+'''
+
+ # Eth/IP/TCP, with vlan, no VM
+ def test_vlan_basic(self):
+ with self.assertRaises(Exception):
+ STLHltStream(l2_encap = 'ethernet_ii',
+ vlan_id = 'sdfgsdgf')
+ test_stream = STLHltStream(l2_encap = 'ethernet_ii')
+ assert ':802.1Q:' not in test_stream.get_pkt_type(), 'Default packet should not include dot1q'
+
+ test_stream = STLHltStream(name = 'test_vlan_basic', l2_encap = 'ethernet_ii_vlan')
+ assert ':802.1Q:' in test_stream.get_pkt_type(), 'No dot1q in packet with encap ethernet_ii_vlan'
+ self.test_yaml = test_stream.dump_to_yaml(self.yaml_save_location())
+ self.golden_yaml = '''
+- name: test_vlan_basic
+ stream:
+ action_count: 0
+ enabled: true
+ flags: 3
+ isg: 0.0
+ mode:
+ percentage: 10.0
+ type: continuous
+ packet:
+ binary: AAAAAAAAAAABAAABgQAwAAgARQAALgAAAABABrrJAAAAAMAAAAEEAABQAAAAAQAAAAFQAA/leEMAACEhISEhIQ==
+ meta: ''
+ flow_stats:
+ enabled: false
+ self_start: true
+ vm:
+ instructions: []
+ split_by_var: ''
+'''
+
+ # Eth/IP/TCP, with 4 vlan
+ def test_vlan_multiple(self):
+ # default frame size should be not enough
+ with self.assertRaises(Exception):
+ STLHltStream(vlan_id = [1, 2, 3, 4])
+ test_stream = STLHltStream(name = 'test_vlan_multiple', frame_size = 100,
+ vlan_id = [1, 2, 3, 4], # can be either array or string separated by spaces
+ vlan_protocol_tag_id = '8100 0x8100')
+ pkt_layers = test_stream.get_pkt_type()
+ assert '802.1Q:' * 4 in pkt_layers, 'No four dot1q layers in packet: %s' % pkt_layers
+ self.test_yaml = test_stream.dump_to_yaml(self.yaml_save_location())
+ self.golden_yaml = '''
+- name: test_vlan_multiple
+ stream:
+ action_count: 0
+ enabled: true
+ flags: 3
+ isg: 0.0
+ mode:
+ percentage: 10.0
+ type: continuous
+ packet:
+ binary: AAAAAAAAAAABAAABgQAwAYEAMAKBADADgQAwBAgARQAARgAAAABABrqxAAAAAMAAAAEEAABQAAAAAQAAAAFQAA/l6p0AACEhISEhISEhISEhISEhISEhISEhISEhISEhISEhIQ==
+ meta: ''
+ flow_stats:
+ enabled: false
+ self_start: true
+ vm:
+ instructions: []
+ split_by_var: ''
+'''
+
+ # Eth/IP/TCP, with 5 vlans and VMs on vlan_id
+ def test_vlan_vm(self):
+ test_stream = STLHltStream(name = 'test_vlan_vm', frame_size = 100,
+ vlan_id = '1 2 1000 4 5', # 5 vlans
+ vlan_id_mode = 'increment fixed decrement random', # 5th vlan will be default fixed
+ vlan_id_step = 2, # 1st vlan step will be 2, others - default 1
+ vlan_id_count = [4, 1, 10], # 4th independent on count, 5th will be fixed
+ )
+ pkt_layers = test_stream.get_pkt_type()
+ self.test_yaml = test_stream.dump_to_yaml(self.yaml_save_location())
+ assert '802.1Q:' * 5 in pkt_layers, 'No five dot1q layers in packet: %s' % pkt_layers
+ self.golden_yaml = '''
+- name: test_vlan_vm
+ stream:
+ action_count: 0
+ enabled: true
+ flags: 3
+ isg: 0.0
+ mode:
+ percentage: 10.0
+ type: continuous
+ packet:
+ binary: AAAAAAAAAAABAAABgQAwAYEAMAKBADPogQAwBIEAMAUIAEUAAEIAAAAAQAa6tQAAAADAAAABBAAAUAAAAAEAAAABUAAP5SzkAAAhISEhISEhISEhISEhISEhISEhISEhISEhIQ==
+ meta: ''
+ flow_stats:
+ enabled: false
+ self_start: true
+ vm:
+ instructions:
+ - init_value: 0
+ max_value: 6
+ min_value: 0
+ name: dec_2_3_2
+ op: inc
+ size: 2
+ step: 2
+ type: flow_var
+ - add_value: 1
+ is_big_endian: true
+ mask: 4095
+ name: dec_2_3_2
+ pkt_cast_size: 2
+ pkt_offset: 14
+ shift: 0
+ type: write_mask_flow_var
+ - init_value: 9
+ max_value: 9
+ min_value: 0
+ name: dec_2_9_1
+ op: dec
+ size: 2
+ step: 1
+ type: flow_var
+ - add_value: 991
+ is_big_endian: true
+ mask: 4095
+ name: dec_2_9_1
+ pkt_cast_size: 2
+ pkt_offset: 22
+ shift: 0
+ type: write_mask_flow_var
+ - init_value: 0
+ max_value: 65535
+ min_value: 0
+ name: vlan_id_random
+ op: random
+ size: 2
+ step: 1
+ type: flow_var
+ - add_value: 0
+ is_big_endian: true
+ mask: 4095
+ name: vlan_id_random
+ pkt_cast_size: 2
+ pkt_offset: 26
+ shift: 0
+ type: write_mask_flow_var
+ split_by_var: dec_2_9_1
+'''
+
+
+ # Eth/IPv6/TCP, no VM
+ def test_ipv6_basic(self):
+ # default frame size should be not enough
+ with self.assertRaises(Exception):
+ STLHltStream(l3_protocol = 'ipv6')
+ # error should not affect
+ STLHltStream(ipv6_src_addr = 'asdfasdfasgasdf')
+ # error should affect
+ with self.assertRaises(Exception):
+ STLHltStream(l3_protocol = 'ipv6', ipv6_src_addr = 'asdfasdfasgasdf')
+ test_stream = STLHltStream(name = 'test_ipv6_basic', l3_protocol = 'ipv6', length_mode = 'fixed', l3_length = 150, )
+ self.test_yaml = test_stream.dump_to_yaml(self.yaml_save_location())
+ self.golden_yaml = '''
+- name: test_ipv6_basic
+ stream:
+ action_count: 0
+ enabled: true
+ flags: 3
+ isg: 0.0
+ mode:
+ percentage: 10.0
+ type: continuous
+ packet:
+ binary: AAAAAAAAAAABAAABht1gAAAAAG4GQP6AAAAAAAAAAAAAAAAAABL+gAAAAAAAAAAAAAAAAAAiBAAAUAAAAAEAAAABUAAP5ctLAAAhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISE=
+ meta: ''
+ flow_stats:
+ enabled: false
+ self_start: true
+ vm:
+ instructions: []
+ split_by_var: ''
+'''
+
+ # Eth/IPv6/UDP, VM on ipv6 fields
+ def test_ipv6_src_dst_ranges(self):
+ test_stream = STLHltStream(name = 'test_ipv6_src_dst_ranges', l3_protocol = 'ipv6', l3_length = 150, l4_protocol = 'udp',
+ ipv6_src_addr = '1111:2222:3333:4444:5555:6666:7777:8888',
+ ipv6_dst_addr = '1111:1111:1111:1111:1111:1111:1111:1111',
+ ipv6_src_mode = 'increment', ipv6_src_step = 5, ipv6_src_count = 10,
+ ipv6_dst_mode = 'decrement', ipv6_dst_step = '1111:1111:1111:1111:1111:1111:0000:0011', ipv6_dst_count = 150,
+ )
+ self.test_yaml = test_stream.dump_to_yaml(self.yaml_save_location())
+ self.golden_yaml = '''
+- name: test_ipv6_src_dst_ranges
+ stream:
+ action_count: 0
+ enabled: true
+ flags: 3
+ isg: 0.0
+ mode:
+ percentage: 10.0
+ type: continuous
+ packet:
+ binary: AAAAAAAAAAABAAABht1gAAAAAG4RQBERIiIzM0REVVVmZnd3iIgRERERERERERERERERERERBAAAUABucjohISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISE=
+ meta: ''
+ flow_stats:
+ enabled: false
+ self_start: true
+ vm:
+ instructions:
+ - init_value: 0
+ max_value: 45
+ min_value: 0
+ name: inc_4_9_5
+ op: inc
+ size: 4
+ step: 5
+ type: flow_var
+ - add_value: 2004322440
+ is_big_endian: true
+ name: inc_4_9_5
+ pkt_offset: 34
+ type: write_flow_var
+ - init_value: 2533
+ max_value: 2533
+ min_value: 0
+ name: dec_4_149_17
+ op: dec
+ size: 4
+ step: 17
+ type: flow_var
+ - add_value: 286328620
+ is_big_endian: true
+ name: dec_4_149_17
+ pkt_offset: 50
+ type: write_flow_var
+ split_by_var: dec_4_149_17
+'''
+
+
+
+
+
+ def yaml_save_location(self):
+ #return os.devnull
+ # debug/deveopment, comment line above
+ return '/tmp/%s.yaml' % self._testMethodName
+
+
diff --git a/scripts/automation/regression/functional_tests/misc_methods_test.py b/scripts/automation/regression/functional_tests/misc_methods_test.py
new file mode 100755
index 00000000..096f86d8
--- /dev/null
+++ b/scripts/automation/regression/functional_tests/misc_methods_test.py
@@ -0,0 +1,61 @@
+#!/router/bin/python
+
+import functional_general_test
+import misc_methods
+from nose.tools import assert_equal
+from nose.tools import assert_not_equal
+from nose.tools import assert_raises
+from nose.tools import raises
+
+
+class MiscMethods_Test(functional_general_test.CGeneralFunctional_Test):
+
+ def setUp(self):
+ self.ipv4_gen = misc_methods.get_network_addr()
+ self.ipv6_gen = misc_methods.get_network_addr(ip_type = 'ipv6')
+ pass
+
+ def test_ipv4_gen(self):
+ for i in range(1, 255):
+ assert_equal( next(self.ipv4_gen), [".".join( map(str, [1, 1, i, 0])), '255.255.255.0'] )
+
+ def test_ipv6_gen(self):
+ tmp_ipv6_addr = ['2001', 'DB8', 0, '2222', 0, 0, 0, 0]
+ for i in range(0, 255):
+ tmp_ipv6_addr[2] = hex(i)[2:]
+ assert_equal( next(self.ipv6_gen), ":".join( map(str, tmp_ipv6_addr)) )
+
+ def test_get_ipv4_client_addr(self):
+ tmp_ipv4_addr = next(self.ipv4_gen)[0]
+ assert_equal ( misc_methods.get_single_net_client_addr(tmp_ipv4_addr), '1.1.1.1')
+ assert_raises (ValueError, misc_methods.get_single_net_client_addr, tmp_ipv4_addr, {'3' : 255} )
+
+ def test_get_ipv6_client_addr(self):
+ tmp_ipv6_addr = next(self.ipv6_gen)
+ assert_equal ( misc_methods.get_single_net_client_addr(tmp_ipv6_addr, {'7' : 1}, ip_type = 'ipv6'), '2001:DB8:0:2222:0:0:0:1')
+ assert_equal ( misc_methods.get_single_net_client_addr(tmp_ipv6_addr, {'7' : 2}, ip_type = 'ipv6'), '2001:DB8:0:2222:0:0:0:2')
+ assert_raises (ValueError, misc_methods.get_single_net_client_addr, tmp_ipv6_addr, {'7' : 70000} )
+
+
+ @raises(ValueError)
+ def test_ipv4_client_addr_exception(self):
+ tmp_ipv4_addr = next(self.ipv4_gen)[0]
+ misc_methods.get_single_net_client_addr(tmp_ipv4_addr, {'4' : 1})
+
+ @raises(ValueError)
+ def test_ipv6_client_addr_exception(self):
+ tmp_ipv6_addr = next(self.ipv6_gen)
+ misc_methods.get_single_net_client_addr(tmp_ipv6_addr, {'8' : 1}, ip_type = 'ipv6')
+
+ @raises(StopIteration)
+ def test_gen_ipv4_to_limit (self):
+ while(True):
+ next(self.ipv4_gen)
+
+ @raises(StopIteration)
+ def test_gen_ipv6_to_limit (self):
+ while(True):
+ next(self.ipv6_gen)
+
+ def tearDown(self):
+ pass
diff --git a/scripts/automation/regression/functional_tests/pkt_bld_general_test.py b/scripts/automation/regression/functional_tests/pkt_bld_general_test.py
new file mode 100755
index 00000000..9a1b708a
--- /dev/null
+++ b/scripts/automation/regression/functional_tests/pkt_bld_general_test.py
@@ -0,0 +1,28 @@
+#!/router/bin/python
+
+from nose.tools import assert_equal
+from nose.tools import assert_not_equal
+from nose.tools import assert_raises
+from nose.tools import raises
+import sys
+import outer_packages
+
+
+class CGeneralPktBld_Test(object):
+ def __init__(self):
+ pass
+
+ @staticmethod
+ def print_packet(pkt_obj):
+ print("\nGenerated packet:\n{}".format(repr(pkt_obj)))
+
+
+ def setUp(self):
+ pass
+
+
+ def tearDown(self):
+ pass
+
+if __name__ == "__main__":
+ pass
diff --git a/scripts/automation/regression/functional_tests/platform_cmd_cache_test.py b/scripts/automation/regression/functional_tests/platform_cmd_cache_test.py
new file mode 100755
index 00000000..0be21280
--- /dev/null
+++ b/scripts/automation/regression/functional_tests/platform_cmd_cache_test.py
@@ -0,0 +1,60 @@
+#!/router/bin/python
+
+from platform_cmd_link import *
+import functional_general_test
+from nose.tools import assert_equal
+from nose.tools import assert_not_equal
+
+
+class CCommandCache_Test(functional_general_test.CGeneralFunctional_Test):
+
+ def setUp(self):
+ self.cache = CCommandCache()
+ self.cache.add('IF', "ip nbar protocol-discovery", 'GigabitEthernet0/0/1')
+ self.cache.add('IF', "ip nbar protocol-discovery", 'GigabitEthernet0/0/2')
+ self.cache.add('conf', "arp 1.1.1.1 0000.0001.0000 arpa")
+ self.cache.add('conf', "arp 1.1.2.1 0000.0002.0000 arpa")
+ self.cache.add('exec', "show ip nbar protocol-discovery stats packet-count")
+
+ def test_add(self):
+ assert_equal(self.cache.cache['IF'],
+ {'GigabitEthernet0/0/1' : ['ip nbar protocol-discovery'],
+ 'GigabitEthernet0/0/2' : ['ip nbar protocol-discovery']
+ })
+ assert_equal(self.cache.cache['CONF'],
+ ["arp 1.1.1.1 0000.0001.0000 arpa",
+ "arp 1.1.2.1 0000.0002.0000 arpa"]
+ )
+ assert_equal(self.cache.cache['EXEC'],
+ ["show ip nbar protocol-discovery stats packet-count"])
+
+ def test_dump_config (self):
+ import sys
+ from io import StringIO, BytesIO
+ saved_stdout = sys.stdout
+ try:
+ out = BytesIO() if sys.version_info < (3,0) else StringIO()
+ sys.stdout = out
+ self.cache.dump_config()
+ output = out.getvalue().strip()
+ assert_equal(output,
+ "configure terminal\ninterface GigabitEthernet0/0/1\nip nbar protocol-discovery\ninterface GigabitEthernet0/0/2\nip nbar protocol-discovery\nexit\narp 1.1.1.1 0000.0001.0000 arpa\narp 1.1.2.1 0000.0002.0000 arpa\nexit\nshow ip nbar protocol-discovery stats packet-count"
+ )
+ finally:
+ sys.stdout = saved_stdout
+
+ def test_get_config_list (self):
+ assert_equal(self.cache.get_config_list(),
+ ["configure terminal", "interface GigabitEthernet0/0/1", "ip nbar protocol-discovery", "interface GigabitEthernet0/0/2", "ip nbar protocol-discovery", "exit", "arp 1.1.1.1 0000.0001.0000 arpa", "arp 1.1.2.1 0000.0002.0000 arpa", "exit", "show ip nbar protocol-discovery stats packet-count"]
+ )
+
+ def test_clear_cache (self):
+ self.cache.clear_cache()
+ assert_equal(self.cache.cache,
+ {"IF" : {},
+ "CONF" : [],
+ "EXEC" : []}
+ )
+
+ def tearDown(self):
+ self.cache.clear_cache()
diff --git a/scripts/automation/regression/functional_tests/platform_cmd_link_test.py b/scripts/automation/regression/functional_tests/platform_cmd_link_test.py
new file mode 100755
index 00000000..7a31815b
--- /dev/null
+++ b/scripts/automation/regression/functional_tests/platform_cmd_link_test.py
@@ -0,0 +1,62 @@
+#!/router/bin/python
+
+from platform_cmd_link import *
+import functional_general_test
+from nose.tools import assert_equal
+from nose.tools import assert_not_equal
+
+
+class CCommandLink_Test(functional_general_test.CGeneralFunctional_Test):
+
+ def setUp(self):
+ self.cache = CCommandCache()
+ self.cache.add('IF', "ip nbar protocol-discovery", 'GigabitEthernet0/0/1')
+ self.cache.add('IF', "ip nbar protocol-discovery", 'GigabitEthernet0/0/2')
+ self.cache.add('conf', "arp 1.1.1.1 0000.0001.0000 arpa")
+ self.cache.add('conf', "arp 1.1.2.1 0000.0002.0000 arpa")
+ self.cache.add('exec', "show ip nbar protocol-discovery stats packet-count")
+ self.com_link = CCommandLink()
+
+ def test_transmit(self):
+ # test here future implemntatin of platform physical link
+ pass
+
+ def test_run_cached_command (self):
+ self.com_link.run_command([self.cache])
+
+ assert_equal (self.com_link.get_history(),
+ ["configure terminal", "interface GigabitEthernet0/0/1", "ip nbar protocol-discovery", "interface GigabitEthernet0/0/2", "ip nbar protocol-discovery", "exit", "arp 1.1.1.1 0000.0001.0000 arpa", "arp 1.1.2.1 0000.0002.0000 arpa", "exit", "show ip nbar protocol-discovery stats packet-count"]
+ )
+
+ self.com_link.clear_history()
+ self.com_link.run_single_command(self.cache)
+ assert_equal (self.com_link.get_history(),
+ ["configure terminal", "interface GigabitEthernet0/0/1", "ip nbar protocol-discovery", "interface GigabitEthernet0/0/2", "ip nbar protocol-discovery", "exit", "arp 1.1.1.1 0000.0001.0000 arpa", "arp 1.1.2.1 0000.0002.0000 arpa", "exit", "show ip nbar protocol-discovery stats packet-count"]
+ )
+
+ def test_run_single_command(self):
+ self.com_link.run_single_command("show ip nbar protocol-discovery stats packet-count")
+ assert_equal (self.com_link.get_history(),
+ ["show ip nbar protocol-discovery stats packet-count"]
+ )
+
+ def test_run_mixed_commands (self):
+ self.com_link.run_single_command("show ip nbar protocol-discovery stats packet-count")
+ self.com_link.run_command([self.cache])
+ self.com_link.run_command(["show ip interface brief"])
+
+ assert_equal (self.com_link.get_history(),
+ ["show ip nbar protocol-discovery stats packet-count",
+ "configure terminal", "interface GigabitEthernet0/0/1", "ip nbar protocol-discovery", "interface GigabitEthernet0/0/2", "ip nbar protocol-discovery", "exit", "arp 1.1.1.1 0000.0001.0000 arpa", "arp 1.1.2.1 0000.0002.0000 arpa", "exit", "show ip nbar protocol-discovery stats packet-count",
+ "show ip interface brief"]
+ )
+
+ def test_clear_history (self):
+ self.com_link.run_command(["show ip interface brief"])
+ self.com_link.clear_history()
+ assert_equal (self.com_link.get_history(), [])
+
+ def tearDown(self):
+ self.cache.clear_cache()
+
+
diff --git a/scripts/automation/regression/functional_tests/platform_device_cfg_test.py b/scripts/automation/regression/functional_tests/platform_device_cfg_test.py
new file mode 100755
index 00000000..c60635fe
--- /dev/null
+++ b/scripts/automation/regression/functional_tests/platform_device_cfg_test.py
@@ -0,0 +1,20 @@
+#!/router/bin/python
+
+from platform_cmd_link import *
+import functional_general_test
+from nose.tools import assert_equal
+from nose.tools import assert_not_equal
+
+
+class CDeviceCfg_Test(functional_general_test.CGeneralFunctional_Test):
+
+ def setUp(self):
+ self.dev_cfg = CDeviceCfg('./functional_tests/config.yaml')
+
+ def test_get_interfaces_cfg(self):
+ assert_equal (self.dev_cfg.get_interfaces_cfg(),
+ [{'client': {'src_mac_addr': '0000.0001.0000', 'name': 'GigabitEthernet0/0/1', 'dest_mac_addr': '0000.1000.0000'}, 'vrf_name': None, 'server': {'src_mac_addr': '0000.0002.0000', 'name': 'GigabitEthernet0/0/2', 'dest_mac_addr': '0000.2000.0000'}}, {'client': {'src_mac_addr': '0000.0003.0000', 'name': 'GigabitEthernet0/0/3', 'dest_mac_addr': '0000.3000.0000'}, 'vrf_name': 'dup', 'server': {'src_mac_addr': '0000.0004.0000', 'name': 'GigabitEthernet0/0/4', 'dest_mac_addr': '0000.4000.0000'}}]
+ )
+
+ def tearDown(self):
+ pass
diff --git a/scripts/automation/regression/functional_tests/platform_dual_if_obj_test.py b/scripts/automation/regression/functional_tests/platform_dual_if_obj_test.py
new file mode 100755
index 00000000..a97a3305
--- /dev/null
+++ b/scripts/automation/regression/functional_tests/platform_dual_if_obj_test.py
@@ -0,0 +1,31 @@
+#!/router/bin/python
+
+from platform_cmd_link import *
+import functional_general_test
+from nose.tools import assert_equal
+from nose.tools import assert_not_equal
+
+
+class CDualIfObj_Test(functional_general_test.CGeneralFunctional_Test):
+
+ def setUp(self):
+ self.if_1 = CIfObj('gig0/0/1', '1.1.1.1', '2001:DB8:0:2222:0:0:0:1', '0000.0001.0000', '0000.0001.0000', 0, IFType.Client)
+ self.if_2 = CIfObj('gig0/0/2', '1.1.2.1', '2001:DB8:1:2222:0:0:0:1', '0000.0002.0000', '0000.0002.0000', 0, IFType.Server)
+ self.if_3 = CIfObj('gig0/0/3', '1.1.3.1', '2001:DB8:2:2222:0:0:0:1', '0000.0003.0000', '0000.0003.0000', 0, IFType.Client)
+ self.if_4 = CIfObj('gig0/0/4', '1.1.4.1', '2001:DB8:3:2222:0:0:0:1', '0000.0004.0000', '0000.0004.0000', 0, IFType.Server)
+ self.dual_1 = CDualIfObj(None, self.if_1, self.if_2)
+ self.dual_2 = CDualIfObj('dup', self.if_3, self.if_4)
+
+ def test_id_allocation(self):
+ assert (self.dual_1.get_id() < self.dual_2.get_id() < CDualIfObj._obj_id)
+
+ def test_get_vrf_name (self):
+ assert_equal ( self.dual_1.get_vrf_name() , None )
+ assert_equal ( self.dual_2.get_vrf_name() , 'dup' )
+
+ def test_is_duplicated (self):
+ assert_equal ( self.dual_1.is_duplicated() , False )
+ assert_equal ( self.dual_2.is_duplicated() , True )
+
+ def tearDown(self):
+ pass
diff --git a/scripts/automation/regression/functional_tests/platform_if_manager_test.py b/scripts/automation/regression/functional_tests/platform_if_manager_test.py
new file mode 100755
index 00000000..72015f55
--- /dev/null
+++ b/scripts/automation/regression/functional_tests/platform_if_manager_test.py
@@ -0,0 +1,40 @@
+#!/router/bin/python
+
+from platform_cmd_link import *
+import functional_general_test
+from nose.tools import assert_equal
+from nose.tools import assert_not_equal
+
+
+class CIfManager_Test(functional_general_test.CGeneralFunctional_Test):
+
+ def setUp(self):
+ self.dev_cfg = CDeviceCfg('./functional_tests/config.yaml')
+ self.if_mng = CIfManager()
+
+ # main testing method to check the entire class
+ def test_load_config (self):
+ self.if_mng.load_config(self.dev_cfg)
+
+ # check the number of items in each qeury
+ assert_equal( len(self.if_mng.get_if_list()), 4 )
+ assert_equal( len(self.if_mng.get_if_list(if_type = IFType.Client)), 2 )
+ assert_equal( len(self.if_mng.get_if_list(if_type = IFType.Client, is_duplicated = True)), 1 )
+ assert_equal( len(self.if_mng.get_if_list(if_type = IFType.Client, is_duplicated = False)), 1 )
+ assert_equal( len(self.if_mng.get_if_list(if_type = IFType.Server)), 2 )
+ assert_equal( len(self.if_mng.get_if_list(if_type = IFType.Server, is_duplicated = True)), 1 )
+ assert_equal( len(self.if_mng.get_if_list(if_type = IFType.Server, is_duplicated = False)), 1 )
+ assert_equal( len(self.if_mng.get_duplicated_if()), 2 )
+ assert_equal( len(self.if_mng.get_dual_if_list()), 2 )
+
+ # check the classification with intf name
+ assert_equal( list(map(CIfObj.get_name, self.if_mng.get_if_list()) ), ['GigabitEthernet0/0/1','GigabitEthernet0/0/2','GigabitEthernet0/0/3','GigabitEthernet0/0/4'] )
+ assert_equal( list(map(CIfObj.get_name, self.if_mng.get_if_list(is_duplicated = True)) ), ['GigabitEthernet0/0/3','GigabitEthernet0/0/4'] )
+ assert_equal( list(map(CIfObj.get_name, self.if_mng.get_if_list(is_duplicated = False)) ), ['GigabitEthernet0/0/1','GigabitEthernet0/0/2'] )
+ assert_equal( list(map(CIfObj.get_name, self.if_mng.get_duplicated_if()) ), ['GigabitEthernet0/0/3', 'GigabitEthernet0/0/4'] )
+
+ # check the classification with vrf name
+ assert_equal( list(map(CDualIfObj.get_vrf_name, self.if_mng.get_dual_if_list()) ), [None, 'dup'] )
+
+ def tearDown(self):
+ pass
diff --git a/scripts/automation/regression/functional_tests/platform_if_obj_test.py b/scripts/automation/regression/functional_tests/platform_if_obj_test.py
new file mode 100755
index 00000000..2412d3cc
--- /dev/null
+++ b/scripts/automation/regression/functional_tests/platform_if_obj_test.py
@@ -0,0 +1,49 @@
+#!/router/bin/python
+
+from platform_cmd_link import *
+import functional_general_test
+from nose.tools import assert_equal
+from nose.tools import assert_not_equal
+
+
+class CIfObj_Test(functional_general_test.CGeneralFunctional_Test):
+ test_idx = 1
+
+ def setUp(self):
+ self.if_1 = CIfObj('gig0/0/1', '1.1.1.1', '2001:DB8:0:2222:0:0:0:1', '0000.0001.0000', '0000.0001.0000', 0, IFType.Client)
+ self.if_2 = CIfObj('TenGig0/0/0', '1.1.2.1', '2001:DB8:1:2222:0:0:0:1', '0000.0002.0000', '0000.0002.0000', 0, IFType.Server)
+ CIfObj_Test.test_idx += 1
+
+ def test_id_allocation(self):
+ assert (self.if_1.get_id() < self.if_2.get_id() < CIfObj._obj_id)
+
+ def test_isClient(self):
+ assert_equal (self.if_1.is_client(), True)
+
+ def test_isServer(self):
+ assert_equal (self.if_2.is_server(), True)
+
+ def test_get_name (self):
+ assert_equal (self.if_1.get_name(), 'gig0/0/1')
+ assert_equal (self.if_2.get_name(), 'TenGig0/0/0')
+
+ def test_get_src_mac_addr (self):
+ assert_equal (self.if_1.get_src_mac_addr(), '0000.0001.0000')
+
+ def test_get_dest_mac (self):
+ assert_equal (self.if_2.get_dest_mac(), '0000.0002.0000')
+
+ def test_get_ipv4_addr (self):
+ assert_equal (self.if_1.get_ipv4_addr(), '1.1.1.1' )
+ assert_equal (self.if_2.get_ipv4_addr(), '1.1.2.1' )
+
+ def test_get_ipv6_addr (self):
+ assert_equal (self.if_1.get_ipv6_addr(), '2001:DB8:0:2222:0:0:0:1' )
+ assert_equal (self.if_2.get_ipv6_addr(), '2001:DB8:1:2222:0:0:0:1' )
+
+ def test_get_type (self):
+ assert_equal (self.if_1.get_if_type(), IFType.Client)
+ assert_equal (self.if_2.get_if_type(), IFType.Server)
+
+ def tearDown(self):
+ pass
diff --git a/scripts/automation/regression/functional_tests/scapy_pkt_builder_test.py b/scripts/automation/regression/functional_tests/scapy_pkt_builder_test.py
new file mode 100644
index 00000000..5d34e5df
--- /dev/null
+++ b/scripts/automation/regression/functional_tests/scapy_pkt_builder_test.py
@@ -0,0 +1,369 @@
+#!/router/bin/python
+
+import pkt_bld_general_test
+
+#HACK FIX ME START
+import sys
+import os
+
+CURRENT_PATH = os.path.dirname(os.path.realpath(__file__))
+sys.path.append(os.path.join(CURRENT_PATH, '../../../trex_control_plane/stl/'))
+#HACK FIX ME END
+from trex_stl_lib.trex_stl_packet_builder_scapy import *
+
+from scapy.all import *
+from nose.tools import assert_equal
+from nose.tools import assert_not_equal
+from nose.tools import assert_raises
+from nose.tools import raises
+import os
+import random
+import pprint
+
+class CTRexPktBuilderSanitySCapy_Test(pkt_bld_general_test.CGeneralPktBld_Test):
+
+ def setUp(self):
+ pass
+
+ def test_simple_vm1(self):
+ raw1 = STLScVmRaw( [ STLVmFlowVar(name="a",min_value="16.0.0.1",max_value="16.0.0.10",init_value="16.0.0.1",size=4,op="inc"),
+ STLVmWrFlowVar(fv_name="a",pkt_offset= "IP.src"),
+ STLVmFixIpv4(offset = "IP")]
+ );
+
+ pkt_builder = STLPktBuilder();
+
+ py='5'*128
+ pkt=Ether()/ \
+ IP(src="16.0.0.1",dst="48.0.0.1")/ \
+ UDP(dport=12,sport=1025)/IP()/py
+
+ # set packet
+ pkt_builder.set_packet(pkt);
+ pkt_builder.add_command ( raw1 )
+ pkt_builder.compile();
+
+ pkt_builder.dump_scripts ()
+
+ print(pkt_builder.get_vm_data())
+
+ assert_equal( pkt_builder.get_vm_data(), {'split_by_var': '', 'instructions': [{'name': 'a', 'max_value': 268435466, 'min_value': 268435457, 'init_value': 268435457, 'size': 4, 'type': 'flow_var', 'step':1,'op': 'inc'}, {'is_big_endian': True, 'pkt_offset': 26, 'type': 'write_flow_var', 'name': 'a', 'add_value': 0}, {'pkt_offset': 14, 'type': 'fix_checksum_ipv4'}]} )
+
+
+
+ def test_simple_no_vm1(self):
+
+ pkt_builder = STLPktBuilder();
+
+ py='5'*128
+ pkt=Ether()/ \
+ IP(src="16.0.0.1",dst="48.0.0.1")/ \
+ UDP(dport=12,sport=1025)/IP()/py
+
+ # set packet
+ pkt_builder.set_packet(pkt);
+
+ pkt_builder.compile();
+
+ pkt_builder.dump_scripts ()
+
+ assert_equal( pkt_builder.get_vm_data(),
+ { 'instructions': [ ],
+ 'split_by_var': ''}
+ )
+
+
+ def test_simple_mac_default(self):
+
+ pkt = Ether()/IP()/UDP()
+
+
+ pkt_builder = STLPktBuilder(pkt = pkt);
+
+ assert_equal( pkt_builder.is_default_src_mac () ,True)
+ assert_equal( pkt_builder.is_default_dst_mac () ,True)
+
+ pkt = Ether(src="00:00:00:00:00:01")/IP()/UDP()
+
+ pkt_builder = STLPktBuilder(pkt = pkt);
+
+ assert_equal( pkt_builder.is_default_src_mac (), False)
+ assert_equal( pkt_builder.is_default_dst_mac (), True)
+
+ pkt = Ether(dst="00:00:00:00:00:01")/IP()/UDP()
+
+ pkt_builder = STLPktBuilder(pkt = pkt);
+
+ assert_equal( pkt_builder.is_default_src_mac (),True)
+ assert_equal( pkt_builder.is_default_dst_mac (),False)
+
+
+
+
+ def test_simple_teredo(self):
+
+ pkt = Ether()/IP(src="16.0.0.1",dst="48.0.0.1")/UDP(dport=3797,sport=3544)/IPv6(src="2001:0:4137:9350:8000:f12a:b9c8:2815",dst="2001:4860:0:2001::68")/UDP(dport=12,sport=1025)/ICMPv6Unknown()
+
+ pkt.build();
+ p_utl=CTRexScapyPktUtl(pkt);
+
+ assert_equal( p_utl.get_field_offet_by_str("IPv6.src"), (50,16) )
+ assert_equal( p_utl.get_field_offet_by_str("IPv6.dst"), (66,16) )
+
+
+
+
+ def test_simple_scapy_vlan(self):
+
+ py='5'*(9)
+ p1=Ether(src="00:00:00:01:00:00",dst="00:00:00:01:00:00")/ \
+ Dot1Q(vlan=12)/ \
+ Dot1Q(vlan=17)/ \
+ IP(src="10.0.0.10",dst="48.0.0.1")/ \
+ UDP(dport=12,sport=1025)/py
+
+ p1.build();
+ p1.dump_layers_offset()
+ p1.show2();
+ hexdump(p1);
+ #wrpcap("ipv4_udp_9k.pcap", p1);
+
+ p_utl=CTRexScapyPktUtl(p1);
+
+ assert_equal(p_utl.get_pkt_layers(),"Ethernet:802.1Q:802.1Q:IP:UDP:Raw")
+ assert_equal(p_utl.layer_offset("802.1Q",0),14);
+ assert_equal(p_utl.layer_offset("802.1Q",1),18);
+ assert_equal(p_utl.get_field_offet_by_str("802|1Q.vlan"),(14,0));
+ assert_equal(p_utl.get_field_offet_by_str("802|1Q:1.vlan"),(18,0));
+ assert_equal(p_utl.get_field_offet_by_str("IP.src"),(34,4));
+
+ def test_simple_scapy_128_udp(self):
+ """
+ build 128 byte packet with 0x35 as pyld
+ """
+
+
+ pkt_size =128
+ p1=Ether(src="00:00:00:01:00:00",dst="00:00:00:01:00:00")/ \
+ IP(src="16.0.0.1",dst="48.0.0.1")/ \
+ UDP(dport=12,sport=1025)
+ pyld_size=pkt_size-len(p1);
+
+ pkt=p1/('5'*(pyld_size))
+
+ pkt.show2();
+ hexdump(pkt);
+ assert_equal(len(pkt),128)
+
+ def test_simple_scapy_9k_ip_len(self):
+ """
+ build 9k ipv4 len packet
+ """
+
+
+ ip_pkt_size =9*1024
+ p_l2=Ether(src="00:00:00:01:00:00",dst="00:00:00:01:00:00");
+ p_l3= IP(src="16.0.0.1",dst="48.0.0.1")/ \
+ UDP(dport=12,sport=1025)
+ pyld_size = ip_pkt_size-len(p_l3);
+
+ pkt=p_l2/p_l3/('\x55'*(pyld_size))
+
+ #pkt.show2();
+ #hexdump(pkt);
+ assert_equal(len(pkt),9*1024+14)
+
+ def test_simple_scapy_ipv6_1(self):
+ """
+ build ipv6 packet
+ """
+
+ print("start ")
+ py='\x55'*(64)
+
+ p=Ether()/IPv6()/UDP(dport=12,sport=1025)/py
+ #p.build();
+ #p.dump_layers_offset()
+ hexdump(p);
+ p.show2();
+
+ p_utl=CTRexScapyPktUtl(p);
+
+ assert_equal(p_utl.get_field_offet_by_str("IPv6.src"),(22,16));
+
+
+ def test_simple_vm2(self):
+ raw1 = STLScVmRaw( [ STLVmFlowVar(name="my_valn",min_value=0,max_value=10,init_value=2,size=1,op="inc"),
+ STLVmWrFlowVar (fv_name="my_valn",pkt_offset= "802|1Q.vlan" ,offset_fixup=3) # fix the offset as valn is bitfield and not supported right now
+ ]
+ );
+
+ pkt_builder = STLPktBuilder();
+
+ py='5'*128
+ pkt=Ether()/ \
+ Dot1Q(vlan=12)/ \
+ IP(src="16.0.0.1",dst="48.0.0.1")/ \
+ UDP(dport=12,sport=1025)/IP()/py
+
+ # set packet
+ pkt_builder.set_packet(pkt);
+ pkt_builder.add_command ( raw1 )
+ pkt_builder.compile();
+
+
+ d= pkt_builder.get_vm_data()
+ assert_equal(d['instructions'][1]['pkt_offset'],17)
+
+ def test_simple_vm3(self):
+ try:
+ raw1 = STLScVmRaw( [ STLVmFlowVar(name="my_valn",min_value=0,max_value=10,init_value=2,size=1,op="inc"),
+ STLVmWrFlowVar(fv_name="my_valn_err",pkt_offset= "802|1Q.vlan" ,offset_fixup=3) # fix the offset as valn is bitfield and not supported right now
+ ]
+ );
+
+ pkt_builder = STLPktBuilder();
+
+ py='5'*128
+ pkt=Ether()/ \
+ Dot1Q(vlan=12)/ \
+ IP(src="16.0.0.1",dst="48.0.0.1")/ \
+ UDP(dport=12,sport=1025)/IP()/py
+
+ # set packet
+ pkt_builder.set_packet(pkt);
+ pkt_builder.add_command ( raw1 )
+ pkt_builder.compile();
+
+
+ d= pkt_builder.get_vm_data()
+ except CTRexPacketBuildException as e:
+ error=str(e)
+ assert_equal(error.find("[errcode:-11]"),0);
+
+ def test_simple_tuple_gen(self):
+ vm = STLScVmRaw( [ STLVmTupleGen(name="tuple"), # define tuple gen
+ STLVmWrFlowVar(fv_name="tuple.ip", pkt_offset= "IP.src" ), # write ip to packet IP.src
+ STLVmFixIpv4(offset = "IP"), # fix checksum
+ STLVmWrFlowVar (fv_name="tuple.port", pkt_offset= "UDP.sport" ) #write udp.port
+ ]
+ );
+ pkt_builder = STLPktBuilder();
+
+ py='5'*128
+ pkt=Ether()/ \
+ Dot1Q(vlan=12)/ \
+ IP(src="16.0.0.1",dst="48.0.0.1")/ \
+ UDP(dport=12,sport=1025)/IP()/py
+
+ # set packet
+ pkt_builder.set_packet(pkt);
+ pkt_builder.add_command ( vm )
+ pkt_builder.compile();
+ d= pkt_builder.get_vm_data()
+ pkt_builder.dump_vm_data_as_yaml()
+
+ assert_equal(d['instructions'][1]['pkt_offset'],30)
+ assert_equal(d['instructions'][3]['pkt_offset'],38)
+
+ def test_simple_random_pkt_size(self):
+
+ ip_pkt_size = 9*1024
+ p_l2 = Ether();
+ p_l3 = IP(src="16.0.0.1",dst="48.0.0.1")
+ p_l4 = UDP(dport=12,sport=1025)
+ pyld_size = ip_pkt_size-len(p_l3/p_l4);
+
+ pkt =p_l2/p_l3/p_l4/('\x55'*(pyld_size))
+
+ l3_len_fix =-(len(p_l2));
+ l4_len_fix =-(len(p_l2/p_l3));
+
+ vm = STLScVmRaw( [ STLVmFlowVar(name="fv_rand", min_value=64, max_value=len(pkt), size=2, op="random"),
+ STLVmTrimPktSize("fv_rand"), # total packet size
+ STLVmWrFlowVar(fv_name="fv_rand", pkt_offset= "IP.len", add_val=l3_len_fix),
+ STLVmFixIpv4(offset = "IP"), # fix checksum
+ STLVmWrFlowVar(fv_name="fv_rand", pkt_offset= "UDP.len", add_val=l4_len_fix)
+ ]
+ )
+ pkt_builder = STLPktBuilder();
+
+ # set packet
+ pkt_builder.set_packet(pkt);
+ pkt_builder.add_command ( vm )
+ pkt_builder.compile();
+ d= pkt_builder.get_vm_data()
+ pkt_builder.dump_vm_data_as_yaml()
+
+ assert_equal(d['instructions'][0]['max_value'],9230)
+ assert_equal(d['instructions'][2]['pkt_offset'],16)
+ assert_equal(d['instructions'][4]['pkt_offset'],38)
+
+ def test_simple_pkt_loader(self):
+ p=RawPcapReader("functional_tests/golden/basic_imix_golden.cap")
+ print("")
+ for pkt in p:
+ print(pkt[1])
+ print(hexdump(str(pkt[0])))
+ break;
+
+ def test_simple_pkt_loader1(self):
+
+ pkt_builder = STLPktBuilder(pkt = "functional_tests/golden/udp_590.cap", build_raw = False);
+ print("")
+ pkt_builder.dump_as_hex()
+ r = pkt_builder.pkt_raw
+ assert_equal(safe_ord(r[1]),0x50)
+ assert_equal(safe_ord(r[0]),0x00)
+ assert_equal(safe_ord(r[0x240]),0x16)
+ assert_equal(safe_ord(r[0x24d]),0x79)
+ assert_equal(len(r),590)
+
+ print(len(r))
+
+ def test_simple_pkt_loader2(self):
+
+ pkt_builder = STLPktBuilder(pkt = "functional_tests/golden/basic_imix_golden.cap");
+ assert_equal(pkt_builder.pkt_layers_desc (), "Ethernet:IP:UDP:Raw");
+
+ def test_simple_pkt_loader3(self):
+
+ #pkt_builder = STLPktBuilder(pkt = "stl/golden/basic_imix_golden.cap");
+ #r = pkt_builder.pkt_raw
+ #print ""
+ #hexdump(str(r))
+
+
+ #print pkt_builder.pkt_layers_desc ()
+
+
+ #pkt_builder.set_packet(pkt);
+
+ py='\x55'*(64)
+
+ p=Ether()/IP()/UDP(dport=12,sport=1025)/py
+ pkt_str = bytes(p);
+ print("")
+ hexdump(pkt_str);
+ scapy_pkt = Ether(pkt_str);
+ scapy_pkt.show2();
+
+ def tearDown(self):
+ pass
+
+
+class CTRexPktBuilderScapy_Test(pkt_bld_general_test.CGeneralPktBld_Test):
+
+ def setUp(self):
+ pass;
+ #self.pkt_bld = CTRexPktBuilder()
+ #self.pkt_bld.add_pkt_layer("l2", dpkt.ethernet.Ethernet())
+ #self.pp = pprint.PrettyPrinter(indent=4)
+
+ def tearDown(self):
+ pass
+
+
+if __name__ == "__main__":
+ pass
+
diff --git a/scripts/automation/regression/functional_tests/stl_basic_tests.py b/scripts/automation/regression/functional_tests/stl_basic_tests.py
new file mode 100644
index 00000000..bc5bc4d5
--- /dev/null
+++ b/scripts/automation/regression/functional_tests/stl_basic_tests.py
@@ -0,0 +1,367 @@
+
+import outer_packages
+from platform_cmd_link import *
+import functional_general_test
+from nose.tools import assert_equal
+from nose.tools import assert_not_equal
+from nose.tools import nottest
+from nose.plugins.attrib import attr
+from trex import CTRexScenario
+from trex_stl_lib import trex_stl_sim
+from trex_stl_lib.trex_stl_streams import STLProfile
+from trex_stl_lib.trex_stl_packet_builder_scapy import RawPcapReader, RawPcapWriter, Ether
+from trex_stl_lib.utils.text_opts import *
+
+import sys
+
+if sys.version_info > (3,0):
+ from io import StringIO
+else:
+ from cStringIO import StringIO
+
+import os
+import subprocess
+import shlex
+from threading import Thread
+from collections import defaultdict
+
+@attr('run_on_trex')
+class CStlBasic_Test(functional_general_test.CGeneralFunctional_Test):
+ def setUp (self):
+ self.test_path = os.path.abspath(os.getcwd())
+ self.scripts_path = CTRexScenario.scripts_path
+
+ self.verify_exists(os.path.join(self.scripts_path, "bp-sim-64-debug"))
+
+ self.stl_sim = os.path.join(self.scripts_path, "stl-sim")
+
+ self.verify_exists(self.stl_sim)
+
+ self.profiles_path = os.path.join(self.scripts_path, "stl/yaml/")
+
+ self.profiles = {}
+ self.profiles['imix_3pkt'] = os.path.join(self.profiles_path, "imix_3pkt.yaml")
+ self.profiles['imix_3pkt_vm'] = os.path.join(self.profiles_path, "imix_3pkt_vm.yaml")
+ self.profiles['random_size_9k'] = os.path.join(self.profiles_path, "../udp_rand_len_9k.py")
+ self.profiles['imix_tuple_gen'] = os.path.join(self.profiles_path, "imix_1pkt_tuple_gen.yaml")
+
+ for k, v in self.profiles.items():
+ self.verify_exists(v)
+
+ self.valgrind_profiles = [ self.profiles['imix_3pkt_vm'],
+ self.profiles['random_size_9k'],
+ self.profiles['imix_tuple_gen'] ]
+
+ self.golden_path = os.path.join(self.test_path,"stl/golden/")
+
+ os.chdir(self.scripts_path)
+
+
+ def tearDown (self):
+ os.chdir(self.test_path)
+
+
+
+ def get_golden (self, name):
+ golden = os.path.join(self.golden_path, name)
+ self.verify_exists(golden)
+ return golden
+
+
+ def verify_exists (self, name):
+ if not os.path.exists(name):
+ raise Exception("cannot find '{0}'".format(name))
+
+
+ def scapy_pkt_show_to_str (self, scapy_pkt):
+ capture = StringIO()
+ save_stdout = sys.stdout
+ sys.stdout = capture
+ scapy_pkt.show()
+ sys.stdout = save_stdout
+ return capture.getvalue()
+
+
+ def compare_caps (self, output, golden, max_diff_sec = 0.01):
+ pkts1 = []
+ pkts2 = []
+ pkts_ts_buckets = defaultdict(list)
+
+ for pkt in RawPcapReader(output):
+ ts = pkt[1][0] * 1e6 + pkt[1][1]
+ pkts_ts_buckets[ts].append(pkt)
+ # don't take last ts bucket, it can be cut in middle and packets inside bucket might be different
+ #for ts in sorted(pkts_ts_buckets.keys())[:-1]:
+ for ts in sorted(pkts_ts_buckets.keys()):
+ pkts1.extend(sorted(pkts_ts_buckets[ts]))
+ pkts_ts_buckets.clear()
+
+ for pkt in RawPcapReader(golden):
+ ts = pkt[1][0] * 1e6 + pkt[1][1]
+ pkts_ts_buckets[ts].append(pkt)
+ # don't take last ts bucket, it can be cut in middle and packets inside bucket might be different
+ #for ts in sorted(pkts_ts_buckets.keys())[:-1]:
+ for ts in sorted(pkts_ts_buckets.keys()):
+ pkts2.extend(sorted(pkts_ts_buckets[ts]))
+
+ assert_equal(len(pkts1), len(pkts2), 'Lengths of generated pcap (%s) and golden (%s) are different' % (output, golden))
+
+ for pkt1, pkt2, i in zip(pkts1, pkts2, range(1, len(pkts1))):
+ ts1 = float(pkt1[1][0]) + (float(pkt1[1][1]) / 1e6)
+ ts2 = float(pkt2[1][0]) + (float(pkt2[1][1]) / 1e6)
+
+ if abs(ts1-ts2) > 0.000005: # 5 nsec
+ raise AssertionError("TS error: cap files '{0}', '{1}' differ in cap #{2} - '{3}' vs. '{4}'".format(output, golden, i, ts1, ts2))
+
+ if pkt1[0] != pkt2[0]:
+ errmsg = "RAW error: output file '{0}', differs from golden '{1}' in cap #{2}".format(output, golden, i)
+ print(errmsg)
+
+ print(format_text("\ndifferent fields for packet #{0}:".format(i), 'underline'))
+
+ scapy_pkt1_info = self.scapy_pkt_show_to_str(Ether(pkt1[0])).split('\n')
+ scapy_pkt2_info = self.scapy_pkt_show_to_str(Ether(pkt2[0])).split('\n')
+
+ print(format_text("\nGot:\n", 'bold', 'underline'))
+ for line, ref in zip(scapy_pkt1_info, scapy_pkt2_info):
+ if line != ref:
+ print(format_text(line, 'bold'))
+
+ print(format_text("\nExpected:\n", 'bold', 'underline'))
+ for line, ref in zip(scapy_pkt2_info, scapy_pkt1_info):
+ if line != ref:
+ print(format_text(line, 'bold'))
+
+ print("\n")
+ raise AssertionError(errmsg)
+
+
+ def run_sim (self, yaml, output, options = "", silent = False, obj = None, tunables = None):
+ if output:
+ user_cmd = "-f {0} -o {1} {2} -p {3}".format(yaml, output, options, self.scripts_path)
+ else:
+ user_cmd = "-f {0} {1} -p {2}".format(yaml, options, self.scripts_path)
+
+ if silent:
+ user_cmd += " --silent"
+
+ if tunables:
+ user_cmd += " -t"
+ for k, v in tunables.items():
+ user_cmd += " {0}={1}".format(k, v)
+
+ rc = trex_stl_sim.main(args = shlex.split(user_cmd))
+ if obj:
+ obj['rc'] = (rc == 0)
+
+ return (rc == 0)
+
+
+
+ def run_py_profile_path (self,
+ profile,
+ options,
+ silent = False,
+ do_no_remove = False,
+ compare = True,
+ test_generated = True,
+ do_no_remove_generated = False,
+ tunables = None):
+
+ print('Testing profile: %s' % profile)
+ output_cap = "a.pcap"
+ input_file = os.path.join('stl/', profile)
+ golden_file = os.path.join('exp',os.path.basename(profile).split('.')[0]+'.pcap');
+ if os.path.exists(output_cap):
+ os.unlink(output_cap)
+ try:
+ rc = self.run_sim(yaml = input_file,
+ output = output_cap,
+ options = options,
+ silent = silent,
+ tunables = tunables)
+ assert_equal(rc, True, 'Simulation on profile %s failed.' % profile)
+ #s='cp '+output_cap+' '+golden_file;
+ #print s
+ #os.system(s)
+
+ if compare:
+ self.compare_caps(output_cap, golden_file)
+ finally:
+ if not do_no_remove:
+ os.unlink(output_cap)
+
+ if test_generated:
+ try:
+ generated_filename = input_file.replace('.py', '_GENERATED.py').replace('.yaml', '_GENERATED.py')
+ if input_file.endswith('.py'):
+ profile = STLProfile.load_py(input_file, **(tunables if tunables else {}))
+ elif input_file.endswith('.yaml'):
+ profile = STLProfile.load_yaml(input_file)
+
+ profile.dump_to_code(generated_filename)
+
+ rc = self.run_sim(yaml = generated_filename,
+ output = output_cap,
+ options = options,
+ silent = silent)
+ assert_equal(rc, True, 'Simulation on profile %s (generated) failed.' % profile)
+
+ if compare:
+ self.compare_caps(output_cap, golden_file)
+
+
+ finally:
+ if not do_no_remove_generated:
+ os.unlink(generated_filename)
+ # python 3 does not generate PYC under the same dir
+ if os.path.exists(generated_filename + 'c'):
+ os.unlink(generated_filename + 'c')
+ if not do_no_remove:
+ os.unlink(output_cap)
+
+
+ def test_stl_profiles (self):
+ p = [
+ ["udp_1pkt_1mac_override.py","-m 1 -l 50",True],
+ ["syn_attack.py","-m 1 -l 50",True],
+ ["udp_1pkt_1mac.py","-m 1 -l 50",True],
+ ["udp_1pkt_mac.py","-m 1 -l 50",True],
+ ["udp_1pkt.py","-m 1 -l 50",True],
+ ["udp_1pkt_tuple_gen.py","-m 1 -l 50",True],
+ ["udp_rand_len_9k.py","-m 1 -l 50",True],
+ ["udp_1pkt_mpls.py","-m 1 -l 50",True],
+ ["udp_1pkt_mpls_vm.py","-m 1 ",True],
+ ["imix.py","-m 1 -l 100",True],
+ ["udp_inc_len_9k.py","-m 1 -l 100",True],
+ ["udp_1pkt_range_clients.py","-m 1 -l 100",True],
+ ["multi_burst_2st_1000pkt.py","-m 1 -l 100",True],
+ ["pcap.py", "-m 1", True, False],
+ ["pcap_with_vm.py", "-m 1", True, False],
+ ["flow_stats.py", "-m 1 -l 1", True],
+ ["flow_stats_latency.py", "-m 1 -l 1", True],
+
+ # YAML test
+ ["yaml/burst_1000_pkt.yaml","-m 1 -l 100",True],
+ ["yaml/burst_1pkt_1burst.yaml","-m 1 -l 100",True],
+ ["yaml/burst_1pkt_vm.yaml","-m 1 -l 100",True],
+ ["yaml/imix_1pkt.yaml","-m 1 -l 100",True],
+ ["yaml/imix_1pkt_2.yaml","-m 1 -l 100",True],
+ ["yaml/imix_1pkt_tuple_gen.yaml","-m 1 -l 100",True],
+ ["yaml/imix_1pkt_vm.yaml","-m 1 -l 100",True],
+ ["udp_1pkt_pcap.py","-m 1 -l 10",True, False],
+ ["udp_3pkt_pcap.py","-m 1 -l 10",True, False],
+ #["udp_1pkt_simple.py","-m 1 -l 3",True],
+ ["udp_1pkt_pcap_relative_path.py","-m 1 -l 3",True, False],
+ ["udp_1pkt_tuple_gen_split.py","-m 1 -l 100",True],
+ ["udp_1pkt_range_clients_split.py","-m 1 -l 100",True],
+ ["udp_1pkt_vxlan.py","-m 1 -l 17",True, False], # can't generate: no VXLAN in Scapy, only in profile
+ ["udp_1pkt_ipv6_in_ipv4.py","-m 1 -l 17",True],
+ ["yaml/imix_3pkt.yaml","-m 50kpps --limit 20",True],
+ ["yaml/imix_3pkt_vm.yaml","-m 50kpps --limit 20",True],
+ ["udp_1pkt_simple_mac_dst.py","-m 1 -l 1 ",True],
+ ["udp_1pkt_simple_mac_src.py","-m 1 -l 1 ",True],
+ ["udp_1pkt_simple_mac_dst_src.py","-m 1 -l 1 ",True],
+ ["burst_3st_loop_x_times.py","-m 1 -l 20 ",True],
+ ["udp_1pkt_mac_step.py","-m 1 -l 20 ",True],
+ ["udp_1pkt_mac_mask1.py","-m 1 -l 20 ",True] ,
+ ["udp_1pkt_mac_mask2.py","-m 1 -l 20 ",True],
+ ["udp_1pkt_mac_mask3.py","-m 1 -l 20 ",True],
+ ["udp_1pkt_simple_test2.py","-m 1 -l 10 ",True, False], # test split of packet with ip option
+ ["udp_1pkt_simple_test.py","-m 1 -l 10 ",True, False],
+ ["udp_1pkt_mac_mask5.py","-m 1 -l 30 ",True],
+ ["udp_1pkt_range_clients_split_garp.py","-m 1 -l 50",True],
+ ["udp_1pkt_src_ip_split.py","-m 1 -l 50",True],
+ ["udp_1pkt_repeat_random.py","-m 1 -l 50",True],
+ ];
+
+ p1 = [ ["udp_1pkt_repeat_random.py","-m 1 -l 50",True] ];
+
+ for obj in p:
+ try:
+ test_generated = obj[3]
+ except: # check generated if not said otherwise
+ test_generated = True
+ self.run_py_profile_path (obj[0],obj[1],compare =obj[2], test_generated = test_generated, do_no_remove=True, do_no_remove_generated = False)
+
+
+ def test_hlt_profiles (self):
+ p = (
+ ['hlt/hlt_udp_inc_dec_len_9k.py', '-m 1 -l 20', True, None],
+ ['hlt/hlt_imix_default.py', '-m 1 -l 20', True, None],
+ ['hlt/hlt_imix_4rates.py', '-m 1 -l 20', True, None],
+ ['hlt/hlt_david1.py', '-m 1 -l 20', True, None],
+ ['hlt/hlt_david2.py', '-m 1 -l 20', True, None],
+ ['hlt/hlt_david3.py', '-m 1 -l 20', True, None],
+ ['hlt/hlt_david4.py', '-m 1 -l 20', True, None],
+ ['hlt/hlt_wentong1.py', '-m 1 -l 20', True, None],
+ ['hlt/hlt_wentong2.py', '-m 1 -l 20', True, None],
+ ['hlt/hlt_tcp_ranges.py', '-m 1 -l 20', True, None],
+ ['hlt/hlt_udp_ports.py', '-m 1 -l 20', True, None],
+ ['hlt/hlt_udp_random_ports.py', '-m 1 -l 20', True, None],
+ ['hlt/hlt_ip_ranges.py', '-m 1 -l 20', True, None],
+ ['hlt/hlt_framesize_vm.py', '-m 1 -l 20', True, None],
+ ['hlt/hlt_l3_length_vm.py', '-m 1 -l 20', True, None],
+ ['hlt/hlt_vlan_default.py', '-m 1 -l 20', True, None],
+ ['hlt/hlt_4vlans.py', '-m 1 -l 20', True, None],
+ ['hlt/hlt_vlans_vm.py', '-m 1 -l 20', True, {'random_seed': 1}],
+ ['hlt/hlt_ipv6_default.py', '-m 1 -l 20', True, None],
+ ['hlt/hlt_ipv6_ranges.py', '-m 1 -l 20', True, None],
+ ['hlt/hlt_mac_ranges.py', '-m 1 -l 20', True, None],
+ )
+
+ for obj in p:
+ self.run_py_profile_path (obj[0], obj[1], compare =obj[2], do_no_remove=True, do_no_remove_generated = False, tunables = obj[3])
+
+ # valgrind tests - this runs in multi thread as it safe (no output)
+ def test_valgrind_various_profiles (self):
+ print("\n")
+ threads = []
+ for profile in self.valgrind_profiles:
+ print("\n*** VALGRIND: testing profile '{0}' ***\n".format(profile))
+ obj = {'t': None, 'rc': None}
+ t = Thread(target = self.run_sim,
+ kwargs = {'obj': obj, 'yaml': profile, 'output':None, 'options': "--cores 8 --limit 20 --valgrind", 'silent': True})
+ obj['t'] = t
+
+ threads.append(obj)
+ t.start()
+
+ for obj in threads:
+ obj['t'].join()
+
+ for obj in threads:
+ assert_equal(obj['rc'], True)
+
+
+
+ def test_multicore_scheduling (self):
+
+ seed = time.time()
+
+ # test with simple vars
+ print(format_text("\nTesting multiple flow vars for multicore\n", 'underline'))
+ rc = self.run_sim('stl/tests/multi_core_test.py', output = None, options = '--test_multi_core --limit=840 -t test_type=plain#seed={0} -m 27kpps'.format(seed), silent = True)
+ assert_equal(rc, True)
+
+
+ # test with tuple
+ print(format_text("\nTesting multiple tuple generators for multicore\n", 'underline'))
+ rc = self.run_sim('stl/tests/multi_core_test.py', output = None, options = '--test_multi_core --limit=840 -t test_type=tuple#seed={0} -m 27kpps'.format(seed), silent = True)
+ assert_equal(rc, True)
+
+ # some tests
+ mc_tests = [
+ 'stl/tests/single_cont.py',
+ 'stl/tests/single_burst.py',
+ 'stl/tests/multi_burst.py',
+ ]
+
+ for mc_test in mc_tests:
+ print(format_text("\ntesting {0} for multicore...\n".format(mc_test), 'underline'))
+ rc = self.run_sim(mc_test, output = None, options = '--test_multi_core --limit=840 -m 27kpps', silent = True)
+ assert_equal(rc, True)
+
+ return
+
+
diff --git a/scripts/automation/regression/functional_tests/trex_cfg_creator_test.py b/scripts/automation/regression/functional_tests/trex_cfg_creator_test.py
new file mode 100755
index 00000000..5ff6b318
--- /dev/null
+++ b/scripts/automation/regression/functional_tests/trex_cfg_creator_test.py
@@ -0,0 +1,698 @@
+#!/usr/bin/python
+
+import sys
+import copy
+from collections import OrderedDict
+from trex import CTRexScenario
+sys.path.append(CTRexScenario.scripts_path)
+from dpdk_setup_ports import ConfigCreator, DpdkSetup
+sys.path.remove(CTRexScenario.scripts_path)
+from nose.tools import assert_raises
+import yaml
+
+class CompareLinesDiff(Exception): pass
+class CompareLinesNumDiff(Exception): pass
+class CompareTypeErr(Exception): pass
+
+def compare_lines(golden, output):
+ if type(golden) is not str:
+ raise CompareTypeErr('Type of golden should be str, got: %s' % type(golden))
+ if type(output) is not str:
+ raise CompareTypeErr('Type of output should be str, got: %s' % type(output))
+ golden_lines = golden.strip().splitlines()
+ output_lines = output.strip().splitlines()
+ if len(golden_lines) != len(output_lines):
+ raise CompareLinesNumDiff('Number of lines on golden is: %s, in output: %s\nGolden:\n%s\nGenerated:\n%s\n' % (len(golden_lines), len(output_lines), golden, output))
+ for line_num, (golden_line, output_line) in enumerate(zip(golden_lines, output_lines)):
+ if golden_line != output_line:
+ raise CompareLinesDiff('Produced YAML differs from golden at line %s.Golden: %s <-> Output: %s' % (line_num + 1, golden_line, output_line))
+
+def create_config(cpu_topology, interfaces, *args, **kwargs):
+ config = ConfigCreator(cpu_topology, interfaces, *args, **kwargs)
+ return config.create_config()
+
+def verify_master_core0(output):
+ output_yaml = yaml.safe_load(output)
+ assert type(output_yaml) is list, 'Generated YAML should be list'
+ assert len(output_yaml) is 1, 'Generated YAML should be list with 1 element'
+ output_yaml = output_yaml[0]
+ assert 'platform' in output_yaml, 'Generated YAML has no platform section:\n%s' % output
+ assert 'master_thread_id' in output_yaml['platform'], 'Generated YAML does not specify master thread id:\n%s' % output
+ assert output_yaml['platform']['master_thread_id'] is 0, 'Master thread id should be 0 in generated YAML, got:%s' % output_yaml['platform']['master_thread_id']
+
+class TRexCfgCreator_Test:
+
+ def test_vm_cfg(self):
+ cpu_topology = {0: OrderedDict([i, [i]] for i in range(5))}
+ interfaces = [{'Active': '',
+ 'Class': '0200',
+ 'Class_str': 'Ethernet controller',
+ 'Device': 1968,
+ 'Device_str': 'VMXNET3 Ethernet Controller',
+ 'Driver_str': 'vmxnet3',
+ 'Interface': 'ens192',
+ 'Interface_argv': '0b:00.0',
+ 'Module_str': 'igb_uio,vfio-pci,uio_pci_generic',
+ 'NUMA': -1,
+ 'PhySlot': '192',
+ 'PhySlot_str': '192',
+ 'ProgIf': '01',
+ 'Rev': '01',
+ 'Rev_str': '01',
+ 'SDevice': '07b0',
+ 'SDevice_str': 'VMXNET3 Ethernet Controller',
+ 'SVendor': '15ad',
+ 'SVendor_str': 'VMware',
+ 'Slot': '0000:0b:00.0',
+ 'Slot_str': '0b:00.0',
+ 'Vendor': 5549,
+ 'Vendor_str': 'VMware',
+ 'dest_mac': '00:0c:29:92:f1:ca',
+ 'src_mac': '00:0c:29:92:f1:d4',
+ 'loopback_dest': True},
+ {'Active': '',
+ 'Class': '0200',
+ 'Class_str': 'Ethernet controller',
+ 'Device': 1968,
+ 'Device_str': 'VMXNET3 Ethernet Controller',
+ 'Driver_str': 'vmxnet3',
+ 'Interface': 'ens160',
+ 'Interface_argv': '03:00.0',
+ 'Module_str': 'igb_uio,vfio-pci,uio_pci_generic',
+ 'NUMA': -1,
+ 'PhySlot': '160',
+ 'PhySlot_str': '160',
+ 'ProgIf': '01',
+ 'Rev': '01',
+ 'Rev_str': '01',
+ 'SDevice': '07b0',
+ 'SDevice_str': 'VMXNET3 Ethernet Controller',
+ 'SVendor': '15ad',
+ 'SVendor_str': 'VMware',
+ 'Slot': '0000:03:00.0',
+ 'Slot_str': '03:00.0',
+ 'Vendor': 5549,
+ 'Vendor_str': 'VMware',
+ 'dest_mac': '00:0c:29:92:f1:d4',
+ 'src_mac': '00:0c:29:92:f1:ca'}]
+ golden = '''
+### Config file generated by dpdk_setup_ports.py ###
+
+- port_limit: 2
+ version: 2
+ interfaces: ['0b:00.0', '03:00.0']
+ port_info:
+ - dest_mac: 00:0c:29:92:f1:ca # MAC OF LOOPBACK TO IT'S DUAL INTERFACE
+ src_mac: 00:0c:29:92:f1:d4
+ - dest_mac: 00:0c:29:92:f1:d4
+ src_mac: 00:0c:29:92:f1:ca
+
+ platform:
+ master_thread_id: 0
+ latency_thread_id: 1
+ dual_if:
+ - socket: 0
+ threads: [2]
+'''
+ output = create_config(cpu_topology, interfaces)
+ verify_master_core0(output)
+ compare_lines(golden, output)
+ with assert_raises(CompareLinesNumDiff):
+ compare_lines('1' + golden, output)
+ output = create_config(cpu_topology, interfaces, exclude_lcores = [0])
+ with assert_raises(AssertionError):
+ verify_master_core0(output)
+ output = create_config(cpu_topology, interfaces, include_lcores = [1,2,3,4])
+ with assert_raises(AssertionError):
+ verify_master_core0(output)
+ output = create_config(cpu_topology, interfaces, include_lcores = [0,2,3,4])
+ verify_master_core0(output)
+ output = create_config(cpu_topology, interfaces, include_lcores = [0,2,3,4], exclude_lcores = [0])
+ with assert_raises(AssertionError):
+ verify_master_core0(output)
+
+ def test_trex08_cfg(self):
+ cpu_topology = OrderedDict([(0, OrderedDict([(0, [0, 16]), (1, [1, 17]), (2, [2, 18]), (3, [3, 19]), (4, [4, 20]), (5, [5, 21]), (6, [6, 22]), (7, [7, 23])])), (1, OrderedDict([(0, [8, 24]), (1, [9, 25]), (2, [10, 26]), (3, [11, 27]), (4, [12, 28]), (5, [13, 29]), (6, [14, 30]), (7, [15, 31])]))])
+ interfaces = [{'Active': '',
+ 'Class': '0200',
+ 'Class_str': 'Ethernet controller',
+ 'Device': 5507,
+ 'Device_str': 'Ethernet Controller XL710 for 40GbE QSFP+',
+ 'Driver_str': 'igb_uio',
+ 'Interface': '',
+ 'Interface_argv': '0000:02:00.0',
+ 'Module_str': 'vfio-pci,uio_pci_generic',
+ 'NUMA': 0,
+ 'PhySlot': '0-1',
+ 'PhySlot_str': '0-1',
+ 'ProgIf': '01',
+ 'Rev': '01',
+ 'Rev_str': '01',
+ 'SDevice': '0002',
+ 'SDevice_str': 'Ethernet Converged Network Adapter XL710-Q2',
+ 'SVendor': '8086',
+ 'SVendor_str': 'Intel Corporation',
+ 'Slot': '0000:02:00.0',
+ 'Slot_str': '02:00.0',
+ 'Vendor': 32902,
+ 'Vendor_str': 'Intel Corporation',
+ 'dest_mac': '02:00:02:00:00:00',
+ 'src_mac': '01:00:01:00:00:00'},
+ {'Active': '',
+ 'Class': '0200',
+ 'Class_str': 'Ethernet controller',
+ 'Device': 5507,
+ 'Device_str': 'Ethernet Controller XL710 for 40GbE QSFP+',
+ 'Driver_str': 'igb_uio',
+ 'Interface': '',
+ 'Interface_argv': '0000:02:00.1',
+ 'Module_str': 'vfio-pci,uio_pci_generic',
+ 'NUMA': 0,
+ 'PhySlot': '0-1',
+ 'PhySlot_str': '0-1',
+ 'ProgIf': '01',
+ 'Rev': '01',
+ 'Rev_str': '01',
+ 'SDevice': '0000',
+ 'SDevice_str': 'Ethernet Converged Network Adapter XL710-Q2',
+ 'SVendor': '8086',
+ 'SVendor_str': 'Intel Corporation',
+ 'Slot': '0000:02:00.1',
+ 'Slot_str': '02:00.1',
+ 'Vendor': 32902,
+ 'Vendor_str': 'Intel Corporation',
+ 'dest_mac': '01:00:01:00:00:00',
+ 'src_mac': '02:00:02:00:00:00'},
+ {'Active': '',
+ 'Class': '0200',
+ 'Class_str': 'Ethernet controller',
+ 'Device': 5507,
+ 'Device_str': 'Ethernet Controller XL710 for 40GbE QSFP+',
+ 'Driver_str': 'igb_uio',
+ 'Interface': '',
+ 'Interface_argv': '0000:84:00.0',
+ 'Module_str': 'vfio-pci,uio_pci_generic',
+ 'NUMA': 1,
+ 'PhySlot': '0-8',
+ 'PhySlot_str': '0-8',
+ 'ProgIf': '20',
+ 'Rev': '01',
+ 'Rev_str': '01',
+ 'SDevice': '0002',
+ 'SDevice_str': 'Ethernet Converged Network Adapter XL710-Q2',
+ 'SVendor': '8086',
+ 'SVendor_str': 'Intel Corporation',
+ 'Slot': '0000:84:00.0',
+ 'Slot_str': '84:00.0',
+ 'Vendor': 32902,
+ 'Vendor_str': 'Intel Corporation',
+ 'dest_mac': '04:00:04:00:00:00',
+ 'src_mac': '03:00:03:00:00:00'},
+ {'Active': '',
+ 'Class': '0200',
+ 'Class_str': 'Ethernet controller',
+ 'Device': 5507,
+ 'Device_str': 'Ethernet Controller XL710 for 40GbE QSFP+',
+ 'Driver_str': 'igb_uio',
+ 'Interface': '',
+ 'Interface_argv': '0000:84:00.1',
+ 'Module_str': 'vfio-pci,uio_pci_generic',
+ 'NUMA': 1,
+ 'PhySlot': '0-8',
+ 'PhySlot_str': '0-8',
+ 'ProgIf': '20',
+ 'Rev': '01',
+ 'Rev_str': '01',
+ 'SDevice': '0000',
+ 'SDevice_str': 'Ethernet Converged Network Adapter XL710-Q2',
+ 'SVendor': '8086',
+ 'SVendor_str': 'Intel Corporation',
+ 'Slot': '0000:84:00.1',
+ 'Slot_str': '84:00.1',
+ 'Vendor': 32902,
+ 'Vendor_str': 'Intel Corporation',
+ 'dest_mac': '03:00:03:00:00:00',
+ 'src_mac': '04:00:04:00:00:00'},
+ {'Active': '',
+ 'Class': '0200',
+ 'Class_str': 'Ethernet controller',
+ 'Device': 5507,
+ 'Device_str': 'Ethernet Controller XL710 for 40GbE QSFP+',
+ 'Driver_str': 'igb_uio',
+ 'Interface': '',
+ 'Interface_argv': '05:00.0',
+ 'Module_str': 'vfio-pci,uio_pci_generic',
+ 'NUMA': 0,
+ 'PhySlot': '0-3',
+ 'PhySlot_str': '0-3',
+ 'ProgIf': '01',
+ 'Rev': '02',
+ 'Rev_str': '02',
+ 'SDevice': '0000',
+ 'SDevice_str': 'Ethernet Converged Network Adapter XL710-Q2',
+ 'SVendor': '8086',
+ 'SVendor_str': 'Intel Corporation',
+ 'Slot': '0000:05:00.0',
+ 'Slot_str': '05:00.0',
+ 'Vendor': 32902,
+ 'Vendor_str': 'Intel Corporation',
+ 'dest_mac': '06:00:06:00:00:00',
+ 'src_mac': '05:00:05:00:00:00'},
+ {'Active': '',
+ 'Class': '0200',
+ 'Class_str': 'Ethernet controller',
+ 'Device': 5507,
+ 'Device_str': 'Ethernet Controller XL710 for 40GbE QSFP+',
+ 'Driver_str': 'igb_uio',
+ 'Interface': '',
+ 'Interface_argv': '05:00.1',
+ 'Module_str': 'vfio-pci,uio_pci_generic',
+ 'NUMA': 0,
+ 'PhySlot': '0-3',
+ 'PhySlot_str': '0-3',
+ 'ProgIf': '01',
+ 'Rev': '02',
+ 'Rev_str': '02',
+ 'SDevice': '0000',
+ 'SDevice_str': 'Ethernet Converged Network Adapter XL710-Q2',
+ 'SVendor': '8086',
+ 'SVendor_str': 'Intel Corporation',
+ 'Slot': '0000:05:00.1',
+ 'Slot_str': '05:00.1',
+ 'Vendor': 32902,
+ 'Vendor_str': 'Intel Corporation',
+ 'dest_mac': '05:00:05:00:00:00',
+ 'src_mac': '06:00:06:00:00:00'}]
+ golden = '''
+### Config file generated by dpdk_setup_ports.py ###
+
+- port_limit: 6
+ version: 2
+ interfaces: ['02:00.0', '02:00.1', '84:00.0', '84:00.1', '05:00.0', '05:00.1']
+ port_bandwidth_gb: 40
+ port_info:
+ - dest_mac: 02:00:02:00:00:00
+ src_mac: 01:00:01:00:00:00
+ - dest_mac: 01:00:01:00:00:00
+ src_mac: 02:00:02:00:00:00
+
+ - dest_mac: 04:00:04:00:00:00
+ src_mac: 03:00:03:00:00:00
+ - dest_mac: 03:00:03:00:00:00
+ src_mac: 04:00:04:00:00:00
+
+ - dest_mac: 06:00:06:00:00:00
+ src_mac: 05:00:05:00:00:00
+ - dest_mac: 05:00:05:00:00:00
+ src_mac: 06:00:06:00:00:00
+
+ platform:
+ master_thread_id: 0
+ latency_thread_id: 16
+ dual_if:
+ - socket: 0
+ threads: [1,17,2,18,3,19,4]
+
+ - socket: 1
+ threads: [8,24,9,25,10,26,11]
+
+ - socket: 0
+ threads: [20,5,21,6,22,7,23]
+'''
+ output = create_config(cpu_topology, interfaces)
+ verify_master_core0(output)
+ compare_lines(golden, output)
+
+ interfaces = [{'Active': '',
+ 'Class': '0200',
+ 'Class_str': 'Ethernet controller',
+ 'Device': 5507,
+ 'Device_str': 'Ethernet Controller XL710 for 40GbE QSFP+',
+ 'Driver_str': 'igb_uio',
+ 'Interface': '',
+ 'Interface_argv': '0000:02:00.0',
+ 'Module_str': 'vfio-pci,uio_pci_generic',
+ 'NUMA': 0,
+ 'PhySlot': '0-1',
+ 'PhySlot_str': '0-1',
+ 'ProgIf': '01',
+ 'Rev': '01',
+ 'Rev_str': '01',
+ 'SDevice': '0002',
+ 'SDevice_str': 'Ethernet Converged Network Adapter XL710-Q2',
+ 'SVendor': '8086',
+ 'SVendor_str': 'Intel Corporation',
+ 'Slot': '0000:02:00.0',
+ 'Slot_str': '02:00.0',
+ 'Vendor': 32902,
+ 'Vendor_str': 'Intel Corporation',
+ 'dest_mac': '02:00:02:00:00:00',
+ 'src_mac': '01:00:01:00:00:00'},
+ {'Active': '',
+ 'Class': '0200',
+ 'Class_str': 'Ethernet controller',
+ 'Device': 5507,
+ 'Device_str': 'Ethernet Controller XL710 for 40GbE QSFP+',
+ 'Driver_str': 'igb_uio',
+ 'Interface': '',
+ 'Interface_argv': '0000:02:00.1',
+ 'Module_str': 'vfio-pci,uio_pci_generic',
+ 'NUMA': 0,
+ 'PhySlot': '0-1',
+ 'PhySlot_str': '0-1',
+ 'ProgIf': '01',
+ 'Rev': '01',
+ 'Rev_str': '01',
+ 'SDevice': '0000',
+ 'SDevice_str': 'Ethernet Converged Network Adapter XL710-Q2',
+ 'SVendor': '8086',
+ 'SVendor_str': 'Intel Corporation',
+ 'Slot': '0000:02:00.1',
+ 'Slot_str': '02:00.1',
+ 'Vendor': 32902,
+ 'Vendor_str': 'Intel Corporation',
+ 'dest_mac': '01:00:01:00:00:00',
+ 'src_mac': '02:00:02:00:00:00'},
+ {'Active': '',
+ 'Class': '0200',
+ 'Class_str': 'Ethernet controller',
+ 'Device': 5507,
+ 'Device_str': 'Ethernet Controller XL710 for 40GbE QSFP+',
+ 'Driver_str': 'igb_uio',
+ 'Interface': '',
+ 'Interface_argv': '0000:84:00.0',
+ 'Module_str': 'vfio-pci,uio_pci_generic',
+ 'NUMA': 1,
+ 'PhySlot': '0-8',
+ 'PhySlot_str': '0-8',
+ 'ProgIf': '20',
+ 'Rev': '01',
+ 'Rev_str': '01',
+ 'SDevice': '0002',
+ 'SDevice_str': 'Ethernet Converged Network Adapter XL710-Q2',
+ 'SVendor': '8086',
+ 'SVendor_str': 'Intel Corporation',
+ 'Slot': '0000:84:00.0',
+ 'Slot_str': '84:00.0',
+ 'Vendor': 32902,
+ 'Vendor_str': 'Intel Corporation',
+ 'dest_mac': '04:00:04:00:00:00',
+ 'src_mac': '03:00:03:00:00:00'},
+ {'Active': '',
+ 'Class': '0200',
+ 'Class_str': 'Ethernet controller',
+ 'Device': 5507,
+ 'Device_str': 'Ethernet Controller XL710 for 40GbE QSFP+',
+ 'Driver_str': 'igb_uio',
+ 'Interface': '',
+ 'Interface_argv': '0000:84:00.1',
+ 'Module_str': 'vfio-pci,uio_pci_generic',
+ 'NUMA': 1,
+ 'PhySlot': '0-8',
+ 'PhySlot_str': '0-8',
+ 'ProgIf': '20',
+ 'Rev': '01',
+ 'Rev_str': '01',
+ 'SDevice': '0000',
+ 'SDevice_str': 'Ethernet Converged Network Adapter XL710-Q2',
+ 'SVendor': '8086',
+ 'SVendor_str': 'Intel Corporation',
+ 'Slot': '0000:84:00.1',
+ 'Slot_str': '84:00.1',
+ 'Vendor': 32902,
+ 'Vendor_str': 'Intel Corporation',
+ 'dest_mac': '03:00:03:00:00:00',
+ 'src_mac': '04:00:04:00:00:00'}]
+ golden = '''
+### Config file generated by dpdk_setup_ports.py ###
+
+- port_limit: 4
+ version: 2
+ interfaces: ['02:00.0', '02:00.1', '84:00.0', '84:00.1']
+ port_bandwidth_gb: 40
+ port_info:
+ - dest_mac: 02:00:02:00:00:00
+ src_mac: 01:00:01:00:00:00
+ - dest_mac: 01:00:01:00:00:00
+ src_mac: 02:00:02:00:00:00
+
+ - dest_mac: 04:00:04:00:00:00
+ src_mac: 03:00:03:00:00:00
+ - dest_mac: 03:00:03:00:00:00
+ src_mac: 04:00:04:00:00:00
+
+ platform:
+ master_thread_id: 0
+ latency_thread_id: 31
+ dual_if:
+ - socket: 0
+ threads: [1,17,2,18,3,19,4,20,5,21,6,22,7,23,16]
+
+ - socket: 1
+ threads: [8,24,9,25,10,26,11,27,12,28,13,29,14,30,15]
+'''
+ output = create_config(cpu_topology, interfaces)
+ verify_master_core0(output)
+ compare_lines(golden, output)
+
+ interfaces = [{'Active': '',
+ 'Class': '0200',
+ 'Class_str': 'Ethernet controller',
+ 'Device': 5507,
+ 'Device_str': 'Ethernet Controller XL710 for 40GbE QSFP+',
+ 'Driver_str': 'igb_uio',
+ 'Interface': '',
+ 'Interface_argv': '0000:02:00.0',
+ 'Module_str': 'vfio-pci,uio_pci_generic',
+ 'NUMA': 0,
+ 'PhySlot': '0-1',
+ 'PhySlot_str': '0-1',
+ 'ProgIf': '01',
+ 'Rev': '01',
+ 'Rev_str': '01',
+ 'SDevice': '0002',
+ 'SDevice_str': 'Ethernet Converged Network Adapter XL710-Q2',
+ 'SVendor': '8086',
+ 'SVendor_str': 'Intel Corporation',
+ 'Slot': '0000:02:00.0',
+ 'Slot_str': '02:00.0',
+ 'Vendor': 32902,
+ 'Vendor_str': 'Intel Corporation',
+ 'dest_mac': '02:00:02:00:00:00',
+ 'src_mac': '01:00:01:00:00:00'},
+ {'Active': '',
+ 'Class': '0200',
+ 'Class_str': 'Ethernet controller',
+ 'Device': 5507,
+ 'Device_str': 'Ethernet Controller XL710 for 40GbE QSFP+',
+ 'Driver_str': 'igb_uio',
+ 'Interface': '',
+ 'Interface_argv': '0000:02:00.1',
+ 'Module_str': 'vfio-pci,uio_pci_generic',
+ 'NUMA': 0,
+ 'PhySlot': '0-1',
+ 'PhySlot_str': '0-1',
+ 'ProgIf': '01',
+ 'Rev': '01',
+ 'Rev_str': '01',
+ 'SDevice': '0000',
+ 'SDevice_str': 'Ethernet Converged Network Adapter XL710-Q2',
+ 'SVendor': '8086',
+ 'SVendor_str': 'Intel Corporation',
+ 'Slot': '0000:02:00.1',
+ 'Slot_str': '02:00.1',
+ 'Vendor': 32902,
+ 'Vendor_str': 'Intel Corporation',
+ 'dest_mac': '01:00:01:00:00:00',
+ 'src_mac': '02:00:02:00:00:00'},
+ {'Active': '',
+ 'Class': '0200',
+ 'Class_str': 'Ethernet controller',
+ 'Device': 5507,
+ 'Device_str': 'Ethernet Controller XL710 for 40GbE QSFP+',
+ 'Driver_str': 'igb_uio',
+ 'Interface': '',
+ 'Interface_argv': '05:00.0',
+ 'Module_str': 'vfio-pci,uio_pci_generic',
+ 'NUMA': 0,
+ 'PhySlot': '0-3',
+ 'PhySlot_str': '0-3',
+ 'ProgIf': '01',
+ 'Rev': '02',
+ 'Rev_str': '02',
+ 'SDevice': '0000',
+ 'SDevice_str': 'Ethernet Converged Network Adapter XL710-Q2',
+ 'SVendor': '8086',
+ 'SVendor_str': 'Intel Corporation',
+ 'Slot': '0000:05:00.0',
+ 'Slot_str': '05:00.0',
+ 'Vendor': 32902,
+ 'Vendor_str': 'Intel Corporation',
+ 'dest_mac': '04:00:04:00:00:00',
+ 'src_mac': '03:00:03:00:00:00'},
+ {'Active': '',
+ 'Class': '0200',
+ 'Class_str': 'Ethernet controller',
+ 'Device': 5507,
+ 'Device_str': 'Ethernet Controller XL710 for 40GbE QSFP+',
+ 'Driver_str': 'igb_uio',
+ 'Interface': '',
+ 'Interface_argv': '05:00.1',
+ 'Module_str': 'vfio-pci,uio_pci_generic',
+ 'NUMA': 0,
+ 'PhySlot': '0-3',
+ 'PhySlot_str': '0-3',
+ 'ProgIf': '01',
+ 'Rev': '02',
+ 'Rev_str': '02',
+ 'SDevice': '0000',
+ 'SDevice_str': 'Ethernet Converged Network Adapter XL710-Q2',
+ 'SVendor': '8086',
+ 'SVendor_str': 'Intel Corporation',
+ 'Slot': '0000:05:00.1',
+ 'Slot_str': '05:00.1',
+ 'Vendor': 32902,
+ 'Vendor_str': 'Intel Corporation',
+ 'dest_mac': '03:00:03:00:00:00',
+ 'src_mac': '04:00:04:00:00:00'}]
+ golden = '''
+### Config file generated by dpdk_setup_ports.py ###
+
+- port_limit: 4
+ version: 2
+ interfaces: ['02:00.0', '02:00.1', '05:00.0', '05:00.1']
+ port_bandwidth_gb: 40
+ port_info:
+ - dest_mac: 02:00:02:00:00:00
+ src_mac: 01:00:01:00:00:00
+ - dest_mac: 01:00:01:00:00:00
+ src_mac: 02:00:02:00:00:00
+
+ - dest_mac: 04:00:04:00:00:00
+ src_mac: 03:00:03:00:00:00
+ - dest_mac: 03:00:03:00:00:00
+ src_mac: 04:00:04:00:00:00
+
+ platform:
+ master_thread_id: 0
+ latency_thread_id: 16
+ dual_if:
+ - socket: 0
+ threads: [1,17,2,18,3,19,4]
+
+ - socket: 0
+ threads: [20,5,21,6,22,7,23]
+'''
+ output = create_config(cpu_topology, interfaces)
+ verify_master_core0(output)
+ compare_lines(golden, output)
+
+ def test_cfg_negative(self):
+ cpu_topology = OrderedDict([(0, OrderedDict([(0, [0, 16]), (1, [1, 17]), (2, [2, 18]), (3, [3, 19]), (4, [4, 20]), (5, [5, 21]), (6, [6, 22]), (7, [7, 23])])), (1, OrderedDict([(0, [8, 24]), (1, [9, 25]), (2, [10, 26]), (3, [11, 27]), (4, [12, 28]), (5, [13, 29]), (6, [14, 30]), (7, [15, 31])]))])
+ interfaces = [{'Active': '',
+ 'Class': '0200',
+ 'Class_str': 'Ethernet controller',
+ 'Device': 5507,
+ 'Device_str': 'Ethernet Controller XL710 for 40GbE QSFP+',
+ 'Driver_str': 'igb_uio',
+ 'Interface': '',
+ 'Interface_argv': '0000:02:00.0',
+ 'Module_str': 'vfio-pci,uio_pci_generic',
+ 'NUMA': 0,
+ 'PhySlot': '0-1',
+ 'PhySlot_str': '0-1',
+ 'ProgIf': '01',
+ 'Rev': '01',
+ 'Rev_str': '01',
+ 'SDevice': '0002',
+ 'SDevice_str': 'Ethernet Converged Network Adapter XL710-Q2',
+ 'SVendor': '8086',
+ 'SVendor_str': 'Intel Corporation',
+ 'Slot': '0000:02:00.0',
+ 'Slot_str': '02:00.0',
+ 'Vendor': 32902,
+ 'Vendor_str': 'Intel Corporation',
+ 'dest_mac': '02:00:02:00:00:00',
+ 'src_mac': '01:00:01:00:00:00'},
+ {'Active': '',
+ 'Class': '0200',
+ 'Class_str': 'Ethernet controller',
+ 'Device': 5507,
+ 'Device_str': 'Ethernet Controller XL710 for 40GbE QSFP+',
+ 'Driver_str': 'igb_uio',
+ 'Interface': '',
+ 'Interface_argv': '0000:02:00.1',
+ 'Module_str': 'vfio-pci,uio_pci_generic',
+ 'NUMA': 0,
+ 'PhySlot': '0-1',
+ 'PhySlot_str': '0-1',
+ 'ProgIf': '01',
+ 'Rev': '01',
+ 'Rev_str': '01',
+ 'SDevice': '0000',
+ 'SDevice_str': 'Ethernet Converged Network Adapter XL710-Q2',
+ 'SVendor': '8086',
+ 'SVendor_str': 'Intel Corporation',
+ 'Slot': '0000:02:00.1',
+ 'Slot_str': '02:00.1',
+ 'Vendor': 32902,
+ 'Vendor_str': 'Intel Corporation',
+ 'dest_mac': '01:00:01:00:00:00',
+ 'src_mac': '02:00:02:00:00:00'}]
+ # types errors
+ with assert_raises(AssertionError):
+ create_config(None, None)
+ with assert_raises(AssertionError):
+ create_config(cpu_topology, None)
+ with assert_raises(AssertionError):
+ create_config(None, interfaces)
+ with assert_raises(AssertionError):
+ create_config(cpu_topology, [])
+ with assert_raises(AssertionError):
+ create_config({}, interfaces)
+ with assert_raises(AssertionError):
+ create_config({}, [])
+ # not enough cores at NUMA 0
+ with assert_raises(DpdkSetup):
+ create_config({0:{0:[]}, 1:{0:[1,2,3,4,5,6,7]}}, interfaces)
+ with assert_raises(DpdkSetup):
+ create_config({0:{0:[1]}, 1:{0:[3]}}, interfaces)
+ with assert_raises(DpdkSetup):
+ create_config({0:{0:[1,2]}}, interfaces)
+ # no NUMA 0 info, NICs at NUMA 0
+ cpu_topo1 = copy.deepcopy(cpu_topology)
+ del cpu_topo1[0]
+ with assert_raises(KeyError):
+ create_config(cpu_topo1, interfaces)
+ int1 = copy.deepcopy(interfaces)
+ for interface in int1:
+ interface['NUMA'] = 1
+ # now should work, as interfaces use NUMA 1
+ create_config(cpu_topo1, int1)
+ int2 = copy.deepcopy(interfaces)
+ int2[1]['NUMA'] = 1
+ # interfaces on different NUMAs
+ with assert_raises(DpdkSetup):
+ create_config(cpu_topology, int2)
+
+
+ def test_inner_comparator(self):
+ compare_lines('', '')
+ compare_lines('one\ntwo', 'one\ntwo')
+ with assert_raises(CompareLinesNumDiff):
+ compare_lines('one\ntwo', 'one\ntwo\nthree')
+ with assert_raises(CompareLinesDiff):
+ compare_lines('one\ntwo', 'one\ntwo1')
+ with assert_raises(CompareLinesDiff):
+ compare_lines('one\ntwo', 'one\nthree')
+ with assert_raises(CompareTypeErr):
+ compare_lines(None, 'one\nthree')
+ with assert_raises(CompareTypeErr):
+ compare_lines('one\ntwo', None)
+ with assert_raises(CompareTypeErr):
+ compare_lines(None, None)
+
+ @classmethod
+ def tearDownClass(cls):
+ sys.path.remove(CTRexScenario.scripts_path)
+ del sys.modules['dpdk_setup_ports']
diff --git a/scripts/automation/regression/hltapi_playground.py b/scripts/automation/regression/hltapi_playground.py
new file mode 100755
index 00000000..b790fe25
--- /dev/null
+++ b/scripts/automation/regression/hltapi_playground.py
@@ -0,0 +1,193 @@
+#!/router/bin/python
+
+import outer_packages
+#from trex_stl_lib.trex_stl_hltapi import CTRexHltApi, CStreamsPerPort
+from trex_stl_lib.trex_stl_hltapi import *
+import traceback
+import sys, time
+from pprint import pprint
+import argparse
+
+def error(err = None):
+ if not err:
+ raise Exception('Unknown exception, look traceback')
+ if type(err) is str and not err.startswith('[ERR]'):
+ err = '[ERR] ' + err
+ print err
+ sys.exit(1)
+
+def check_res(res):
+ if res['status'] == 0:
+ error('Encountered error:\n%s' % res['log'])
+ return res
+
+def print_brief_stats(res):
+ title_str = ' '*3
+ tx_str = 'TX:'
+ rx_str = 'RX:'
+ for port_id, stat in res.iteritems():
+ if type(port_id) is not int:
+ continue
+ title_str += ' '*10 + 'Port%s' % port_id
+ tx_str += '%15s' % res[port_id]['aggregate']['tx']['total_pkts']
+ rx_str += '%15s' % res[port_id]['aggregate']['rx']['total_pkts']
+ print(title_str)
+ print(tx_str)
+ print(rx_str)
+
+def wait_with_progress(seconds):
+ for i in range(0, seconds):
+ time.sleep(1)
+ sys.stdout.write('.')
+ sys.stdout.flush()
+ print('')
+
+if __name__ == "__main__":
+ try:
+ parser = argparse.ArgumentParser(description='Example of using stateless TRex via HLT API.', formatter_class=argparse.RawTextHelpFormatter)
+ parser.add_argument('-v', dest = 'verbose', default = 0, help='Stateless API verbosity:\n0: No prints\n1: Commands and their status\n2: Same as 1 + ZMQ in&out')
+ parser.add_argument('--device', dest = 'device', default = 'localhost', help='Address of TRex server')
+ args = parser.parse_args()
+ hlt_client = CTRexHltApi(verbose = int(args.verbose))
+
+ print('Connecting to %s...' % args.device)
+ res = check_res(hlt_client.connect(device = args.device, port_list = [0, 1], username = 'danklei', break_locks = True, reset = True))
+ port_handle = res['port_handle']
+ print('Connected, got port handles %s' % port_handle)
+ ports_streams_dict = CStreamsPerPort()
+ print hlt_client.traffic_control(action = 'poll')
+
+ print hlt_client.traffic_config(mode = 'create', l2_encap = 'ethernet_ii_vlan', rate_pps = 1,
+ l3_protocol = 'ipv4',
+ #length_mode = 'imix', l3_length = 200,
+ ipv6_dst_mode = 'decrement', ipv6_dst_count = 300, ipv6_dst_addr = 'fe80:0:0:0:0:0:0:000f',
+ port_handle = port_handle, port_handle2 = port_handle[1],
+ #save_to_yaml = '/tmp/d1.yaml',
+ #stream_id = 1,
+ )
+ print hlt_client.traffic_control(action = 'poll')
+ print hlt_client.traffic_control(action = 'run')
+ print hlt_client.traffic_control(action = 'poll')
+ wait_with_progress(2)
+ print hlt_client.traffic_control(action = 'poll')
+ print hlt_client.traffic_control(action = 'stop')
+ print hlt_client.traffic_control(action = 'poll')
+ print hlt_client.traffic_stats(mode = 'aggregate')
+ print hlt_client.traffic_control(action = 'clear_stats')
+ wait_with_progress(1)
+ print hlt_client.traffic_stats(mode = 'aggregate')
+
+ wait_with_progress(1)
+ print hlt_client.traffic_stats(mode = 'aggregate')
+ wait_with_progress(1)
+ print hlt_client.traffic_stats(mode = 'aggregate')
+ wait_with_progress(1)
+ print hlt_client.traffic_stats(mode = 'aggregate')
+ #print res
+ #print hlt_client._streams_history
+ #print hlt_client.trex_client._STLClient__get_all_streams(port_id = port_handle[0])
+ #print hlt_client.trex_client._STLClient__get_all_streams(port_id = port_handle[1])
+ #ports_streams_dict.add_streams_from_res(res)
+ sys.exit(0)
+ res = check_res(hlt_client.traffic_config(mode = 'create', l2_encap = 'ethernet_ii_vlan', rate_pps = 1,
+ port_handle = port_handle[0], port_handle2 = port_handle[1], save_to_yaml = '/tmp/d1.yaml',
+ l4_protocol = 'udp',
+ #udp_src_port_mode = 'decrement',
+ #udp_src_port_count = 10, udp_src_port = 5,
+ ))
+ ports_streams_dict.add_streams_from_res(res)
+ sys.exit(0)
+ #print ports_streams_dict
+ #print hlt_client.trex_client._STLClient__get_all_streams(port_id = port_handle[0])
+ res = check_res(hlt_client.traffic_config(mode = 'modify', port_handle = port_handle[0], stream_id = ports_streams_dict[0][0],
+ mac_src = '1-2-3:4:5:6', l4_protocol = 'udp', save_to_yaml = '/tmp/d2.yaml'))
+ #print hlt_client.trex_client._STLClient__get_all_streams(port_id = port_handle[0])
+ #print hlt_client._streams_history
+ res = check_res(hlt_client.traffic_config(mode = 'modify', port_handle = port_handle[0], stream_id = ports_streams_dict[0][0],
+ mac_dst = '{ 7 7 7-7:7:7}', save_to_yaml = '/tmp/d3.yaml'))
+ #print hlt_client.trex_client._STLClient__get_all_streams(port_id = port_handle[0])
+ check_res(hlt_client.traffic_config(mode = 'reset', port_handle = port_handle))
+
+ res = check_res(hlt_client.traffic_config(mode = 'create', bidirectional = True, length_mode = 'fixed',
+ port_handle = port_handle[0], port_handle2 = port_handle[1],
+ transmit_mode = 'single_burst', pkts_per_burst = 100, rate_pps = 100,
+ mac_src = '1-2-3-4-5-6',
+ mac_dst = '6:5:4:4:5:6',
+ save_to_yaml = '/tmp/imix.yaml'))
+ ports_streams_dict.add_streams_from_res(res)
+
+ print('Create single_burst 100 packets rate_pps=100 on port 0')
+ res = check_res(hlt_client.traffic_config(mode = 'create', port_handle = port_handle[0], transmit_mode = 'single_burst',
+ pkts_per_burst = 100, rate_pps = 100))
+ ports_streams_dict.add_streams_from_res(res)
+
+ # playground - creating various streams on port 1
+ res = check_res(hlt_client.traffic_config(mode = 'create', port_handle = port_handle[1], save_to_yaml = '/tmp/hlt2.yaml',
+ tcp_src_port_mode = 'decrement',
+ tcp_src_port_count = 10, tcp_dst_port_count = 10, tcp_dst_port_mode = 'random'))
+ ports_streams_dict.add_streams_from_res(res)
+
+ res = check_res(hlt_client.traffic_config(mode = 'create', port_handle = port_handle[1], save_to_yaml = '/tmp/hlt3.yaml',
+ l4_protocol = 'udp',
+ udp_src_port_mode = 'decrement',
+ udp_src_port_count = 10, udp_dst_port_count = 10, udp_dst_port_mode = 'random'))
+ ports_streams_dict.add_streams_from_res(res)
+
+ res = check_res(hlt_client.traffic_config(mode = 'create', port_handle = port_handle[1], save_to_yaml = '/tmp/hlt4.yaml',
+ length_mode = 'increment',
+ #ip_src_addr = '192.168.1.1', ip_src_mode = 'increment', ip_src_count = 5,
+ ip_dst_addr = '5.5.5.5', ip_dst_mode = 'random', ip_dst_count = 2))
+ ports_streams_dict.add_streams_from_res(res)
+
+ res = check_res(hlt_client.traffic_config(mode = 'create', port_handle = port_handle[1], save_to_yaml = '/tmp/hlt5.yaml',
+ length_mode = 'decrement', frame_size_min = 100, frame_size_max = 3000,
+ #ip_src_addr = '192.168.1.1', ip_src_mode = 'increment', ip_src_count = 5,
+ #ip_dst_addr = '5.5.5.5', ip_dst_mode = 'random', ip_dst_count = 2
+ ))
+ ports_streams_dict.add_streams_from_res(res)
+
+ # remove the playground
+ check_res(hlt_client.traffic_config(mode = 'reset', port_handle = port_handle[1]))
+
+ print('Create continuous stream for port 1, rate_pps = 1')
+ res = check_res(hlt_client.traffic_config(mode = 'create', port_handle = port_handle[1], save_to_yaml = '/tmp/hlt1.yaml',
+ #length_mode = 'increment', l3_length_min = 200,
+ ip_src_addr = '192.168.1.1', ip_src_mode = 'increment', ip_src_count = 5,
+ ip_dst_addr = '5.5.5.5', ip_dst_mode = 'random', ip_dst_count = 2))
+
+ check_res(hlt_client.traffic_control(action = 'run', port_handle = port_handle))
+ wait_with_progress(1)
+ print('Sample after 1 seconds (only packets count)')
+ res = check_res(hlt_client.traffic_stats(mode = 'all', port_handle = port_handle))
+ print_brief_stats(res)
+ print ''
+
+ print('Port 0 has finished the burst, put continuous instead with rate 1000. No stopping of other ports.')
+ check_res(hlt_client.traffic_control(action = 'stop', port_handle = port_handle[0]))
+ check_res(hlt_client.traffic_config(mode = 'reset', port_handle = port_handle[0]))
+ res = check_res(hlt_client.traffic_config(mode = 'create', port_handle = port_handle[0], rate_pps = 1000))
+ ports_streams_dict.add_streams_from_res(res)
+ check_res(hlt_client.traffic_control(action = 'run', port_handle = port_handle[0]))
+ wait_with_progress(5)
+ print('Sample after another 5 seconds (only packets count)')
+ res = check_res(hlt_client.traffic_stats(mode = 'aggregate', port_handle = port_handle))
+ print_brief_stats(res)
+ print ''
+
+ print('Stop traffic at port 1')
+ res = check_res(hlt_client.traffic_control(action = 'stop', port_handle = port_handle[1]))
+ wait_with_progress(5)
+ print('Sample after another %s seconds (only packets count)' % 5)
+ res = check_res(hlt_client.traffic_stats(mode = 'aggregate', port_handle = port_handle))
+ print_brief_stats(res)
+ print ''
+ print('Full HLT stats:')
+ pprint(res)
+
+ check_res(hlt_client.cleanup_session())
+ except Exception as e:
+ print(traceback.print_exc())
+ print(e)
+ raise
+ finally:
+ print('Done.')
diff --git a/scripts/automation/regression/interactive_platform b/scripts/automation/regression/interactive_platform
new file mode 100755
index 00000000..5c5e920e
--- /dev/null
+++ b/scripts/automation/regression/interactive_platform
@@ -0,0 +1,4 @@
+#!/bin/bash
+/router/bin/python-2.7.4 interactive_platform.py $@
+sts=$?
+exit $sts \ No newline at end of file
diff --git a/scripts/automation/regression/interactive_platform.py b/scripts/automation/regression/interactive_platform.py
new file mode 100755
index 00000000..10e89910
--- /dev/null
+++ b/scripts/automation/regression/interactive_platform.py
@@ -0,0 +1,338 @@
+#!/router/bin/python-2.7.4
+
+from CPlatform import *
+import cmd
+import outer_packages
+import termstyle
+import os
+from misc_methods import load_object_config_file
+from optparse import OptionParser
+from CShowParser import PlatformResponseMissmatch, PlatformResponseAmbiguity
+
+class InteractivePlatform(cmd.Cmd):
+
+ intro = termstyle.green("\nInteractive shell to control a remote Cisco IOS platform.\nType help to view available pre-defined configurations\n(c) All rights reserved.\n")
+ prompt = '> '
+
+ def __init__(self, cfg_yaml_path = None, silent_mode = False, virtual_mode = False ):
+# super(InteractivePlatform, self).__init__()
+ cmd.Cmd.__init__(self)
+ self.virtual_mode = virtual_mode
+ self.platform = CPlatform(silent_mode)
+ if cfg_yaml_path is None:
+ try:
+ cfg_yaml_path = raw_input(termstyle.cyan("Please enter a readable .yaml configuration file path: "))
+ cfg_yaml_path = os.path.abspath(cfg_yaml_path)
+ except KeyboardInterrupt:
+ exit(-1)
+ try:
+ self.device_cfg = CDeviceCfg(cfg_yaml_path)
+ self.platform.load_platform_data_from_file(self.device_cfg)
+ if not virtual_mode:
+ # if not virtual mode, try to establish a phyisical connection to platform
+ self.platform.launch_connection(self.device_cfg)
+
+ except Exception as inst:
+ print(termstyle.magenta(inst))
+ exit(-1)
+
+ def do_show_cfg (self, line):
+ """Outputs the loaded interface configuration"""
+ self.platform.get_if_manager().dump_if_config()
+ print(termstyle.green("*** End of interface configuration ***"))
+
+ def do_show_nat_cfg (self, line):
+ """Outputs the loaded nat provided configuration"""
+ try:
+ self.platform.dump_obj_config('nat')
+ print(termstyle.green("*** End of nat configuration ***"))
+ except UserWarning as inst:
+ print(termstyle.magenta(inst))
+
+
+ def do_show_static_route_cfg (self, line):
+ """Outputs the loaded static route configuration"""
+ try:
+ self.platform.dump_obj_config('static_route')
+ print(termstyle.green("*** End of static route configuration ***"))
+ except UserWarning as inst:
+ print(termstyle.magenta(inst))
+
+ def do_switch_cfg (self, cfg_file_path):
+ """Switch the current platform interface configuration with another one"""
+ if cfg_file_path:
+ cfg_yaml_path = os.path.abspath(cfg_file_path)
+ self.device_cfg = CDeviceCfg(cfg_yaml_path)
+ self.platform.load_platform_data_from_file(self.device_cfg)
+ if not self.virtual_mode:
+ self.platform.reload_connection(self.device_cfg)
+ print(termstyle.green("Configuration switching completed successfully."))
+ else:
+ print(termstyle.magenta("Configuration file is missing. Please try again."))
+
+ def do_load_clean (self, arg):
+ """Loads a clean configuration file onto the platform
+ Specify no arguments will load 'clean_config.cfg' file from bootflash disk
+ First argument is clean config filename
+ Second argument is platform file's disk"""
+ if arg:
+ in_val = arg.split(' ')
+ if len(in_val)==2:
+ self.platform.load_clean_config(in_val[0], in_val[1])
+ else:
+ print(termstyle.magenta("One of the config inputs is missing."))
+ else:
+ self.platform.load_clean_config()
+# print termstyle.magenta("Configuration file definition is missing. use 'help load_clean' for further info.")
+
+ def do_basic_if_config(self, line):
+ """Apply basic interfaces configuartion to all platform interfaces"""
+ self.platform.configure_basic_interfaces()
+ print(termstyle.green("Basic interfaces configuration applied successfully."))
+
+ def do_pbr(self, line):
+ """Apply IPv4 PBR configuration on all interfaces"""
+ self.platform.config_pbr()
+ print(termstyle.green("IPv4 PBR configuration applied successfully."))
+
+ def do_no_pbr(self, line):
+ """Removes IPv4 PBR configuration from all interfaces"""
+ self.platform.config_no_pbr()
+ print(termstyle.green("IPv4 PBR configuration removed successfully."))
+
+ def do_nbar(self, line):
+ """Apply NBAR PD configuration on all interfaces"""
+ self.platform.config_nbar_pd()
+ print(termstyle.green("NBAR configuration applied successfully."))
+
+ def do_no_nbar(self, line):
+ """Removes NBAR PD configuration from all interfaces"""
+ self.platform.config_no_nbar_pd()
+ print(termstyle.green("NBAR configuration removed successfully."))
+
+ def do_static_route(self, arg):
+ """Apply IPv4 static routing configuration on all interfaces
+ Specify no arguments will apply static routing with following config:
+ 1. clients_start - 16.0.0.1
+ 2. servers_start - 48.0.0.1
+ 3. dual_port_mask - 1.0.0.0
+ 4. client_destination_mask - 255.0.0.0
+ 5. server_destination_mask - 255.0.0.0
+ """
+ if arg:
+ stat_route_dict = load_object_config_file(arg)
+# else:
+# print termstyle.magenta("Unknown configutaion option requested. use 'help static_route' for further info.")
+ else:
+ stat_route_dict = { 'clients_start' : '16.0.0.1',
+ 'servers_start' : '48.0.0.1',
+ 'dual_port_mask': '1.0.0.0',
+ 'client_destination_mask' : '255.0.0.0',
+ 'server_destination_mask' : '255.0.0.0' }
+ stat_route_obj = CStaticRouteConfig(stat_route_dict)
+ self.platform.config_static_routing(stat_route_obj)
+ print(termstyle.green("IPv4 static routing configuration applied successfully."))
+# print termstyle.magenta("Specific configutaion is missing. use 'help static_route' for further info.")
+
+ def do_no_static_route(self, line):
+ """Removes IPv4 static route configuration from all non-duplicated interfaces"""
+ try:
+ self.platform.config_no_static_routing()
+ print(termstyle.green("IPv4 static routing configuration removed successfully."))
+ except UserWarning as inst:
+ print(termstyle.magenta(inst))
+
+ def do_nat(self, arg):
+ """Apply NAT configuration on all non-duplicated interfaces
+ Specify no arguments will apply NAT with following config:
+ 1. clients_net_start - 16.0.0.0
+ 2. client_acl_wildcard_mask - 0.0.0.255
+ 3. dual_port_mask - 1.0.0.0
+ 4. pool_start - 200.0.0.0
+ 5. pool_netmask - 255.255.255.0
+ """
+ if arg:
+ nat_dict = load_object_config_file(arg)
+# else:
+# print termstyle.magenta("Unknown nat configutaion option requested. use 'help nat' for further info.")
+ else:
+# print termstyle.magenta("Specific nat configutaion is missing. use 'help nat' for further info.")
+ nat_dict = { 'clients_net_start' : '16.0.0.0',
+ 'client_acl_wildcard_mask' : '0.0.0.255',
+ 'dual_port_mask' : '1.0.0.0',
+ 'pool_start' : '200.0.0.0',
+ 'pool_netmask' : '255.255.255.0' }
+ nat_obj = CNatConfig(nat_dict)
+ self.platform.config_nat(nat_obj)
+ print(termstyle.green("NAT configuration applied successfully."))
+
+ def do_no_nat(self, arg):
+ """Removes NAT configuration from all non-duplicated interfaces"""
+ try:
+ self.platform.config_no_nat()
+ print(termstyle.green("NAT configuration removed successfully."))
+ except UserWarning as inst:
+ print(termstyle.magenta(inst))
+
+
+ def do_ipv6_pbr(self, line):
+ """Apply IPv6 PBR configuration on all interfaces"""
+ self.platform.config_ipv6_pbr()
+ print(termstyle.green("IPv6 PBR configuration applied successfully."))
+
+ def do_no_ipv6_pbr(self, line):
+ """Removes IPv6 PBR configuration from all interfaces"""
+ self.platform.config_no_ipv6_pbr()
+ print(termstyle.green("IPv6 PBR configuration removed successfully."))
+
+ def do_zbf(self, line):
+ """Apply Zone-Based policy Firewall configuration on all interfaces"""
+ self.platform.config_zbf()
+ print(termstyle.green("Zone-Based policy Firewall configuration applied successfully."))
+
+ def do_no_zbf(self, line):
+ """Removes Zone-Based policy Firewall configuration from all interfaces"""
+ self.platform.config_no_zbf()
+ print(termstyle.green("Zone-Based policy Firewall configuration removed successfully."))
+
+ def do_show_cpu_util(self, line):
+ """Fetches CPU utilization stats from the platform"""
+ try:
+ print(self.platform.get_cpu_util())
+ print(termstyle.green("*** End of show_cpu_util output ***"))
+ except PlatformResponseMissmatch as inst:
+ print(termstyle.magenta(inst))
+
+ def do_show_drop_stats(self, line):
+ """Fetches packet drop stats from the platform.\nDrop are summed and presented for both input and output traffic of each interface"""
+ print(self.platform.get_drop_stats())
+ print(termstyle.green("*** End of show_drop_stats output ***"))
+
+ def do_show_nbar_stats(self, line):
+ """Fetches NBAR classification stats from the platform.\nStats are available both as raw data and as percentage data."""
+ try:
+ print(self.platform.get_nbar_stats())
+ print(termstyle.green("*** End of show_nbar_stats output ***"))
+ except PlatformResponseMissmatch as inst:
+ print(termstyle.magenta(inst))
+
+ def do_show_nat_stats(self, line):
+ """Fetches NAT translations stats from the platform"""
+ print(self.platform.get_nat_stats())
+ print(termstyle.green("*** End of show_nat_stats output ***"))
+
+ def do_show_cft_stats(self, line):
+ """Fetches CFT stats from the platform"""
+ print(self.platform.get_cft_stats())
+ print(termstyle.green("*** End of show_sft_stats output ***"))
+
+ def do_show_cvla_memory_usage(self, line):
+ """Fetches CVLA memory usage stats from the platform"""
+ (res, res2) = self.platform.get_cvla_memory_usage()
+ print(res)
+ print(res2)
+ print(termstyle.green("*** End of show_cvla_memory_usage output ***"))
+
+ def do_clear_counters(self, line):
+ """Clears interfaces counters"""
+ self.platform.clear_counters()
+ print(termstyle.green("*** clear counters completed ***"))
+
+ def do_clear_nbar_stats(self, line):
+ """Clears interfaces counters"""
+ self.platform.clear_nbar_stats()
+ print(termstyle.green("*** clear nbar stats completed ***"))
+
+ def do_clear_cft_counters(self, line):
+ """Clears interfaces counters"""
+ self.platform.clear_cft_counters()
+ print(termstyle.green("*** clear cft counters completed ***"))
+
+ def do_clear_drop_stats(self, line):
+ """Clears interfaces counters"""
+ self.platform.clear_packet_drop_stats()
+ print(termstyle.green("*** clear packet drop stats completed ***"))
+
+ def do_clear_nat_translations(self, line):
+ """Clears nat translations"""
+ self.platform.clear_nat_translations()
+ print(termstyle.green("*** clear nat translations completed ***"))
+
+ def do_set_tftp_server (self, line):
+ """Configures TFTP access on platform"""
+ self.platform.config_tftp_server(self.device_cfg)
+ print(termstyle.green("*** TFTP config deployment completed ***"))
+
+ def do_show_running_image (self, line):
+ """Fetches currently loaded image of the platform"""
+ res = self.platform.get_running_image_details()
+ print(res)
+ print(termstyle.green("*** Show running image completed ***"))
+
+ def do_check_image_existence(self, arg):
+ """Check if specific image file (usually *.bin) is already stored in platform drive"""
+ if arg:
+ try:
+ res = self.platform.check_image_existence(arg.split(' ')[0])
+ print(res)
+ print(termstyle.green("*** Check image existence completed ***"))
+ except PlatformResponseAmbiguity as inst:
+ print(termstyle.magenta(inst))
+ else:
+ print(termstyle.magenta("Please provide an image name in order to check for existance."))
+
+ def do_load_image (self, arg):
+ """Loads a given image filename from tftp server (if not available on disk) and sets it as the boot image on the platform"""
+ if arg:
+ try:
+ self.platform.load_platform_image('asr1001-universalk9.BLD_V155_2_S_XE315_THROTTLE_LATEST_20150324_100047-std.bin')#arg.split(' ')[0])
+ except UserWarning as inst:
+ print(termstyle.magenta(inst))
+ else:
+ print(termstyle.magenta("Image filename is missing."))
+
+ def do_reload (self, line):
+ """Reloads the platform"""
+
+ ans = misc_methods.query_yes_no('This will reload the platform. Are you sure?', default = None)
+ if ans:
+ # user confirmed he wishes to reload the platform
+ self.platform.reload_platform(self.device_cfg)
+ print(termstyle.green("*** Platform reload completed ***"))
+ else:
+ print(termstyle.green("*** Platform reload aborted ***"))
+
+ def do_quit(self, arg):
+ """Quits the application"""
+ return True
+
+ def do_exit(self, arg):
+ """Quits the application"""
+ return True
+
+ def do_all(self, arg):
+ """Configures bundle of commands to set PBR routing"""
+ self.do_load_clean('')
+ self.do_set_tftp_server('')
+ self.do_basic_if_config('')
+ self.do_pbr('')
+ self.do_ipv6_pbr('')
+
+
+
+if __name__ == "__main__":
+ parser = OptionParser(version="%prog 1.0 \t (C) Cisco Systems Inc.\n")
+ parser.add_option("-c", "--config-file", dest="cfg_yaml_path",
+ action="store", help="Define the interface configuration to load the applicatino with.", metavar="FILE_PATH")
+ parser.add_option("-s", "--silent", dest="silent_mode", default = False,
+ action="store_true", help="Silence the generated input when commands launched.")
+ parser.add_option("-v", "--virtual", dest="virtual_mode", default = False,
+ action="store_true", help="Interact with a virtual router, no actual link will apply. Show commands are NOT available in this mode.")
+ (options, args) = parser.parse_args()
+
+ try:
+ InteractivePlatform(**vars(options)).cmdloop()
+
+ except KeyboardInterrupt:
+ exit(-1)
+
diff --git a/scripts/automation/regression/interfaces_e.py b/scripts/automation/regression/interfaces_e.py
new file mode 100755
index 00000000..0c2ce5d2
--- /dev/null
+++ b/scripts/automation/regression/interfaces_e.py
@@ -0,0 +1,8 @@
+#!/router/bin/python
+
+import outer_packages
+from enum import Enum
+
+
+# define the states in which a TRex can hold during its lifetime
+IFType = Enum('IFType', 'Client Server All')
diff --git a/scripts/automation/regression/misc_methods.py b/scripts/automation/regression/misc_methods.py
new file mode 100755
index 00000000..99071f81
--- /dev/null
+++ b/scripts/automation/regression/misc_methods.py
@@ -0,0 +1,284 @@
+#!/router/bin/python
+import sys
+if sys.version_info >= (3, 0):
+ import configparser
+else:
+ import ConfigParser
+
+import outer_packages
+import yaml
+from collections import namedtuple
+import subprocess, shlex
+import os
+
+TRexConfig = namedtuple('TRexConfig', 'trex, router, tftp')
+
+# debug/development purpose, lists object's attributes and their values
+def print_r(obj):
+ for attr in dir(obj):
+ print('obj.%s %s' % (attr, getattr(obj, attr)))
+
+def mix_string (str):
+ """Convert all string to lowercase letters, and replaces spaces with '_' char"""
+ return str.replace(' ', '_').lower()
+
+# executes given command, returns tuple (return_code, stdout, stderr)
+def run_command(cmd, background = False):
+ if background:
+ print('Running command in background: %s' % cmd)
+ with open(os.devnull, 'w') as tempf:
+ subprocess.Popen(shlex.split(cmd), stdin=tempf, stdout=tempf, stderr=tempf)
+ return (None,)*3
+ else:
+ print('Running command: %s' % cmd)
+ proc = subprocess.Popen(shlex.split(cmd), stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+ (stdout, stderr) = proc.communicate()
+ stdout = stdout.decode()
+ stderr = stderr.decode()
+ if stdout:
+ print('Stdout:\n%s' % stdout)
+ if proc.returncode:
+ if stderr:
+ print('Stderr:\n%s' % stderr)
+ print('Return code: %s' % proc.returncode)
+ return (proc.returncode, stdout, stderr)
+
+
+def run_remote_command(host, command_string, background = False, timeout = 20):
+ cmd = 'ssh -tt %s \'sudo%s sh -ec "%s"\'' % (host, (' timeout %s' % timeout) if (timeout and not background) else '', command_string)
+ return run_command(cmd, background)
+
+
+def generate_intf_lists (interfacesList):
+ retDict = {
+ 'relevant_intf' : [],
+ 'relevant_ip_addr' : [],
+ 'relevant_mac_addr' : [],
+ 'total_pairs' : None
+ }
+
+ for intf in interfacesList:
+ retDict['relevant_intf'].append(intf['client'])
+ retDict['relevant_ip_addr'].append(intf['client_config']['ip_addr'])
+ retDict['relevant_mac_addr'].append(intf['client_config']['mac_addr'])
+ retDict['relevant_intf'].append(intf['server'])
+ retDict['relevant_ip_addr'].append(intf['server_config']['ip_addr'])
+ retDict['relevant_mac_addr'].append(intf['server_config']['mac_addr'])
+
+ retDict['total_pairs'] = len(interfacesList)
+
+ return retDict
+
+def get_single_net_client_addr (ip_addr, octetListDict = {'3' : 1}, ip_type = 'ipv4'):
+ """ get_single_net_client_addr(ip_addr, octetListDict, ip_type) -> str
+
+ Parameters
+ ----------
+ ip_addr : str
+ a string an IP address (by default, of type A.B.C.D)
+ octetListDict : dict
+ a ditionary representing the octets on which to act such that ip[octet_key] = ip[octet_key] + octet_value
+ ip_type : str
+ a string that defines the ip type to parse. possible inputs are 'ipv4', 'ipv6'
+
+ By default- Returns a new ip address - A.B.C.(D+1)
+ """
+ if ip_type == 'ipv4':
+ ip_lst = ip_addr.split('.')
+
+ for octet,increment in octetListDict.items():
+ int_octet = int(octet)
+ if ((int_octet < 0) or (int_octet > 3)):
+ raise ValueError('the provided octet is not legal in {0} format'.format(ip_type) )
+ else:
+ if (int(ip_lst[int_octet]) + increment) < 255:
+ ip_lst[int_octet] = str(int(ip_lst[int_octet]) + increment)
+ else:
+ raise ValueError('the requested increment exceeds 255 client address limit')
+
+ return '.'.join(ip_lst)
+
+ else: # this is a ipv6 address, handle accordingly
+ ip_lst = ip_addr.split(':')
+
+ for octet,increment in octetListDict.items():
+ int_octet = int(octet)
+ if ((int_octet < 0) or (int_octet > 7)):
+ raise ValueError('the provided octet is not legal in {0} format'.format(ip_type) )
+ else:
+ if (int(ip_lst[int_octet]) + increment) < 65535:
+ ip_lst[int_octet] = format( int(ip_lst[int_octet], 16) + increment, 'X')
+ else:
+ raise ValueError('the requested increment exceeds 65535 client address limit')
+
+ return ':'.join(ip_lst)
+
+
+def load_complete_config_file (filepath):
+ """load_complete_config_file(filepath) -> list
+
+ Loads a configuration file (.yaml) for both trex config and router config
+ Returns a list with a dictionary to each of the configurations
+ """
+
+ # create response dictionaries
+ trex_config = {}
+ rtr_config = {}
+ tftp_config = {}
+
+ try:
+ with open(filepath, 'r') as f:
+ config = yaml.load(f)
+
+ # Handle TRex configuration
+ trex_config['trex_name'] = config["trex"]["hostname"]
+ trex_config['trex_password'] = config["trex"].get("password")
+ #trex_config['trex_is_dual'] = config["trex"]["is_dual"]
+ trex_config['trex_cores'] = int(config["trex"]["cores"])
+ #trex_config['trex_latency'] = int(config["trex"]["latency"])
+# trex_config['trex_version_path'] = config["trex"]["version_path"]
+ trex_config['modes'] = config['trex'].get('modes', [])
+
+ if 'loopback' not in trex_config['modes']:
+ trex_config['router_interface'] = config["router"]["ip_address"]
+
+ # Handle Router configuration
+ rtr_config['model'] = config["router"]["model"]
+ rtr_config['hostname'] = config["router"]["hostname"]
+ rtr_config['ip_address'] = config["router"]["ip_address"]
+ rtr_config['image'] = config["router"]["image"]
+ rtr_config['line_pswd'] = config["router"]["line_password"]
+ rtr_config['en_pswd'] = config["router"]["en_password"]
+ rtr_config['interfaces'] = config["router"]["interfaces"]
+ rtr_config['clean_config'] = config["router"]["clean_config"]
+ rtr_config['intf_masking'] = config["router"]["intf_masking"]
+ rtr_config['ipv6_mask'] = config["router"]["ipv6_mask"]
+ rtr_config['mgmt_interface'] = config["router"]["mgmt_interface"]
+
+ # Handle TFTP configuration
+ tftp_config['hostname'] = config["tftp"]["hostname"]
+ tftp_config['ip_address'] = config["tftp"]["ip_address"]
+ tftp_config['images_path'] = config["tftp"]["images_path"]
+
+ if rtr_config['clean_config'] is None:
+ raise ValueError('A clean router configuration wasn`t provided.')
+
+ except ValueError:
+ print("")
+ raise
+
+ except Exception as inst:
+ print("\nBad configuration file provided: '{0}'\n".format(filepath))
+ raise inst
+
+ return TRexConfig(trex_config, rtr_config, tftp_config)
+
+def load_object_config_file (filepath):
+ try:
+ with open(filepath, 'r') as f:
+ config = yaml.load(f)
+ return config
+ except Exception as inst:
+ print("\nBad configuration file provided: '{0}'\n".format(filepath))
+ print(inst)
+ exit(-1)
+
+
+def query_yes_no(question, default="yes"):
+ """Ask a yes/no question via raw_input() and return their answer.
+
+ "question" is a string that is presented to the user.
+ "default" is the presumed answer if the user just hits <Enter>.
+ It must be "yes" (the default), "no" or None (meaning
+ an answer is required of the user).
+
+ The "answer" return value is True for "yes" or False for "no".
+ """
+ valid = { "yes": True, "y": True, "ye": True,
+ "no": False, "n": False }
+ if default is None:
+ prompt = " [y/n] "
+ elif default == "yes":
+ prompt = " [Y/n] "
+ elif default == "no":
+ prompt = " [y/N] "
+ else:
+ raise ValueError("invalid default answer: '%s'" % default)
+
+ while True:
+ sys.stdout.write(question + prompt)
+ choice = input().lower()
+ if default is not None and choice == '':
+ return valid[default]
+ elif choice in valid:
+ return valid[choice]
+ else:
+ sys.stdout.write("Please respond with 'yes' or 'no' "
+ "(or 'y' or 'n').\n")
+
+
+def load_benchmark_config_file (filepath):
+ """load_benchmark_config_file(filepath) -> list
+
+ Loads a configuration file (.yaml) for both trex config and router config
+ Returns a list with a dictionary to each of the configurations
+ """
+
+ # create response dictionary
+ benchmark_config = {}
+
+ try:
+ with open(filepath, 'r') as f:
+ benchmark_config = yaml.load(f)
+
+ except Exception as inst:
+ print("\nBad configuration file provided: '{0}'\n".format(filepath))
+ print(inst)
+ exit(-1)
+
+ return benchmark_config
+
+
+def get_benchmark_param (benchmark_path, test_name, param, sub_param = None):
+
+ config = load_benchmark_config_file(benchmark_path)
+ if sub_param is None:
+ return config[test_name][param]
+ else:
+ return config[test_name][param][sub_param]
+
+def gen_increment_dict (dual_port_mask):
+ addr_lst = dual_port_mask.split('.')
+ result = {}
+ for idx, octet_increment in enumerate(addr_lst):
+ octet_int = int(octet_increment)
+ if octet_int>0:
+ result[str(idx)] = octet_int
+
+ return result
+
+
+def get_network_addr (ip_type = 'ipv4'):
+ ipv4_addr = [1, 1, 1, 0] # base ipv4 address to start generating from- 1.1.1.0
+ ipv6_addr = ['2001', 'DB8', 0, '2222', 0, 0, 0, 0] # base ipv6 address to start generating from- 2001:DB8:1111:2222:0:0
+ while True:
+ if ip_type == 'ipv4':
+ if (ipv4_addr[2] < 255):
+ yield [".".join( map(str, ipv4_addr) ), '255.255.255.0']
+ ipv4_addr[2] += 1
+ else: # reached defined maximum limit of address allocation
+ return
+ else: # handling ipv6 addressing
+ if (ipv6_addr[2] < 4369):
+ tmp_ipv6_addr = list(ipv6_addr)
+ tmp_ipv6_addr[2] = hex(tmp_ipv6_addr[2])[2:]
+ yield ":".join( map(str, tmp_ipv6_addr) )
+ ipv6_addr[2] += 1
+ else: # reached defined maximum limit of address allocation
+ return
+
+
+
+
+if __name__ == "__main__":
+ pass
diff --git a/scripts/automation/regression/outer_packages.py b/scripts/automation/regression/outer_packages.py
new file mode 100755
index 00000000..61ddc5cd
--- /dev/null
+++ b/scripts/automation/regression/outer_packages.py
@@ -0,0 +1,71 @@
+#!/router/bin/python
+
+import sys, site
+import platform, os
+
+CURRENT_PATH = os.path.dirname(os.path.realpath(__file__)) # alternate use with: os.getcwd()
+TREX_PATH = os.getenv('TREX_UNDER_TEST') # path to <trex-core>/scripts directory, env. variable TREX_UNDER_TEST should override it.
+if not TREX_PATH or not os.path.isfile('%s/trex_daemon_server' % TREX_PATH):
+ TREX_PATH = os.path.abspath(os.path.join(CURRENT_PATH, os.pardir, os.pardir))
+PATH_TO_PYTHON_LIB = os.path.abspath(os.path.join(TREX_PATH, 'external_libs'))
+PATH_TO_CTRL_PLANE = os.path.abspath(os.path.join(TREX_PATH, 'automation', 'trex_control_plane'))
+PATH_STF_API = os.path.abspath(os.path.join(PATH_TO_CTRL_PLANE, 'stf'))
+PATH_STL_API = os.path.abspath(os.path.join(PATH_TO_CTRL_PLANE, 'stl'))
+
+
+NIGHTLY_MODULES = [ {'name': 'ansi2html'},
+ {'name': 'enum34-1.0.4'},
+ {'name': 'rednose-0.4.1'},
+ {'name': 'progressbar-2.2'},
+ {'name': 'termstyle'},
+ {'name': 'pyyaml-3.11', 'py-dep': True},
+ {'name': 'nose-1.3.4', 'py-dep': True}
+ ]
+
+
+def generate_module_path (module, is_python3, is_64bit, is_cel):
+ platform_path = [module['name']]
+
+ if module.get('py-dep'):
+ platform_path.append('python3' if is_python3 else 'python2')
+
+ if module.get('arch-dep'):
+ platform_path.append('cel59' if is_cel else 'fedora18')
+ platform_path.append('64bit' if is_64bit else '32bit')
+
+ return os.path.normcase(os.path.join(PATH_TO_PYTHON_LIB, *platform_path))
+
+
+def import_module_list(modules_list):
+
+ # platform data
+ is_64bit = platform.architecture()[0] == '64bit'
+ is_python3 = (sys.version_info >= (3, 0))
+ is_cel = os.path.exists('/etc/system-profile')
+
+ # regular modules
+ for p in modules_list:
+ full_path = generate_module_path(p, is_python3, is_64bit, is_cel)
+
+ if not os.path.exists(full_path):
+ print("Unable to find required module library: '{0}'".format(p['name']))
+ print("Please provide the correct path using PATH_TO_PYTHON_LIB variable")
+ print("current path used: '{0}'".format(full_path))
+ exit(0)
+
+ sys.path.insert(1, full_path)
+
+
+def import_nightly_modules ():
+ sys.path.append(TREX_PATH)
+ #sys.path.append(PATH_TO_CTRL_PLANE)
+ sys.path.append(PATH_STL_API)
+ sys.path.append(PATH_STF_API)
+ import_module_list(NIGHTLY_MODULES)
+
+
+import_nightly_modules()
+
+
+if __name__ == "__main__":
+ pass
diff --git a/scripts/automation/regression/platform_cmd_link.py b/scripts/automation/regression/platform_cmd_link.py
new file mode 100755
index 00000000..275da656
--- /dev/null
+++ b/scripts/automation/regression/platform_cmd_link.py
@@ -0,0 +1,488 @@
+#!/router/bin/python
+
+from interfaces_e import IFType
+import CustomLogger
+import misc_methods
+import telnetlib
+import socket
+import time
+from collections import OrderedDict
+
+class CCommandCache(object):
+ def __init__(self):
+ self.__gen_clean_data_structure()
+
+ def __gen_clean_data_structure (self):
+ self.cache = {"IF" : OrderedDict(),
+ "CONF" : [],
+ "EXEC" : []}
+
+ def __list_append (self, dest_list, cmd):
+ if isinstance(cmd, list):
+ dest_list.extend( cmd )
+ else:
+ dest_list.append( cmd )
+
+ def add (self, cmd_type, cmd, interface = None):
+
+ if interface is not None: # this is an interface ("IF") config command
+ if interface in self.cache['IF']:
+ # interface commands already exists
+ self.__list_append(self.cache['IF'][interface], cmd)
+ else:
+ # no chached commands for this interface
+ self.cache['IF'][interface] = []
+ self.__list_append(self.cache['IF'][interface], cmd)
+ else: # this is either a CONF or EXEC command
+ self.__list_append(self.cache[cmd_type.upper()], cmd)
+
+ def dump_config (self):
+ # dump IF config:
+ print("configure terminal")
+ for intf, intf_cmd_list in self.cache['IF'].items():
+ print("interface {if_name}".format( if_name = intf ))
+ print('\n'.join(intf_cmd_list))
+
+ if self.cache['IF']:
+ # add 'exit' note only if if config actually took place
+ print('exit') # exit to global config mode
+
+ # dump global config
+ if self.cache['CONF']:
+ print('\n'.join(self.cache['CONF']))
+
+ # exit back to en mode
+ print("exit")
+
+ # dump exec config
+ if self.cache['EXEC']:
+ print('\n'.join(self.cache['EXEC']))
+
+ def get_config_list (self):
+ conf_list = []
+
+ conf_list.append("configure terminal")
+ for intf, intf_cmd_list in self.cache['IF'].items():
+ conf_list.append( "interface {if_name}".format( if_name = intf ) )
+ conf_list.extend( intf_cmd_list )
+ if len(conf_list)>1:
+ # add 'exit' note only if if config actually took place
+ conf_list.append("exit")
+
+ conf_list.extend( self.cache['CONF'] )
+ conf_list.append("exit")
+ conf_list.extend( self.cache['EXEC'] )
+
+
+ return conf_list
+
+ def clear_cache (self):
+ # clear all pointers to cache data (erase the data structure)
+ self.cache.clear()
+ # Re-initialize the cache
+ self.__gen_clean_data_structure()
+
+ pass
+
+
+class CCommandLink(object):
+ def __init__(self, silent_mode = False, debug_mode = False):
+ self.history = []
+ self.virtual_mode = True
+ self.silent_mode = silent_mode
+ self.telnet_con = None
+ self.debug_mode = debug_mode
+
+
+ def __transmit (self, cmd_list, **kwargs):
+ self.history.extend(cmd_list)
+ if not self.silent_mode:
+ print('\n'.join(cmd_list)) # prompting the pushed platform commands
+ if not self.virtual_mode:
+ # transmit the command to platform.
+ return self.telnet_con.write_ios_cmd(cmd_list, debug_mode = self.debug_mode, **kwargs)
+
+ def run_command (self, cmd_list, **kwargs):
+ response = ''
+ for cmd in cmd_list:
+
+ # check which type of cmd we handle
+ if isinstance(cmd, CCommandCache):
+ tmp_response = self.__transmit( cmd.get_config_list(), **kwargs ) # join the commands with new-line delimiter
+ else:
+ tmp_response = self.__transmit([cmd], **kwargs)
+ if not self.virtual_mode:
+ response += tmp_response
+ return response
+
+ def run_single_command (self, cmd, **kwargs):
+ return self.run_command([cmd], **kwargs)
+
+ def get_history (self, as_string = False):
+ if as_string:
+ return '\n'.join(self.history)
+ else:
+ return self.history
+
+ def clear_history (self):
+ # clear all pointers to history data (erase the data structure)
+ del self.history[:]
+ # Re-initialize the histoyr with clear one
+ self.history = []
+
+ def launch_platform_connectivity (self, device_config_obj):
+ connection_info = device_config_obj.get_platform_connection_data()
+ self.telnet_con = CIosTelnet( **connection_info )
+ self.virtual_mode = False # if physical connectivity was successful, toggle virtual mode off
+
+ def close_platform_connection(self):
+ if self.telnet_con is not None:
+ self.telnet_con.close()
+
+
+
+class CDeviceCfg(object):
+ def __init__(self, cfg_yaml_path = None):
+ if cfg_yaml_path is not None:
+ (self.platform_cfg, self.tftp_cfg) = misc_methods.load_complete_config_file(cfg_yaml_path)[1:3]
+
+ self.interfaces_cfg = self.platform_cfg['interfaces'] # extract only the router interface configuration
+
+ def set_platform_config(self, config_dict):
+ self.platform_cfg = config_dict
+ self.interfaces_cfg = self.platform_cfg['interfaces']
+
+ def set_tftp_config(self, tftp_cfg):
+ self.tftp_cfg = tftp_cfg
+
+ def get_interfaces_cfg (self):
+ return self.interfaces_cfg
+
+ def get_ip_address (self):
+ return self.__get_attr('ip_address')
+
+ def get_line_password (self):
+ return self.__get_attr('line_pswd')
+
+ def get_en_password (self):
+ return self.__get_attr('en_pswd')
+
+ def get_mgmt_interface (self):
+ return self.__get_attr('mgmt_interface')
+
+ def get_platform_connection_data (self):
+ return { 'host' : self.get_ip_address(), 'line_pass' : self.get_line_password(), 'en_pass' : self.get_en_password() }
+
+ def get_tftp_info (self):
+ return self.tftp_cfg
+
+ def get_image_name (self):
+ return self.__get_attr('image')
+
+ def __get_attr (self, attr):
+ return self.platform_cfg[attr]
+
+ def dump_config (self):
+ import yaml
+ print(yaml.dump(self.interfaces_cfg, default_flow_style=False))
+
+class CIfObj(object):
+ _obj_id = 0
+
+ def __init__(self, if_name, ipv4_addr, ipv6_addr, src_mac_addr, dest_mac_addr, dest_ipv6_mac_addr, if_type):
+ self.__get_and_increment_id()
+ self.if_name = if_name
+ self.if_type = if_type
+ self.src_mac_addr = src_mac_addr
+ self.dest_mac_addr = dest_mac_addr
+ self.dest_ipv6_mac_addr = dest_ipv6_mac_addr
+ self.ipv4_addr = ipv4_addr
+ self.ipv6_addr = ipv6_addr
+ self.pair_parent = None # a pointer to CDualIfObj which holds this interface and its pair-complement
+
+ def __get_and_increment_id (self):
+ self._obj_id = CIfObj._obj_id
+ CIfObj._obj_id += 1
+
+ def get_name (self):
+ return self.if_name
+
+ def get_src_mac_addr (self):
+ return self.src_mac_addr
+
+ def get_dest_mac (self):
+ return self.dest_mac_addr
+
+ def get_ipv6_dest_mac (self):
+ if self.dest_mac_addr != 0:
+ return self.dest_mac_addr
+ else:
+ return self.dest_ipv6_mac_addr
+
+ def get_id (self):
+ return self._obj_id
+
+ def get_if_type (self):
+ return self.if_type
+
+ def get_ipv4_addr (self):
+ return self.ipv4_addr
+
+ def get_ipv6_addr (self):
+ return self.ipv6_addr
+
+ def set_ipv4_addr (self, addr):
+ self.ipv4_addr = addr
+
+ def set_ipv6_addr (self, addr):
+ self.ipv6_addr = addr
+
+ def set_pair_parent (self, dual_if_obj):
+ self.pair_parent = dual_if_obj
+
+ def get_pair_parent (self):
+ return self.pair_parent
+
+ def is_client (self):
+ return (self.if_type == IFType.Client)
+
+ def is_server (self):
+ return (self.if_type == IFType.Server)
+
+ pass
+
+
+class CDualIfObj(object):
+ _obj_id = 0
+
+ def __init__(self, vrf_name, client_if_obj, server_if_obj):
+ self.__get_and_increment_id()
+ self.vrf_name = vrf_name
+ self.client_if = client_if_obj
+ self.server_if = server_if_obj
+
+ # link if_objects to its parent dual_if
+ self.client_if.set_pair_parent(self)
+ self.server_if.set_pair_parent(self)
+ pass
+
+ def __get_and_increment_id (self):
+ self._obj_id = CDualIfObj._obj_id
+ CDualIfObj._obj_id += 1
+
+ def get_id (self):
+ return self._obj_id
+
+ def get_vrf_name (self):
+ return self.vrf_name
+
+ def is_duplicated (self):
+ return self.vrf_name != None
+
+class CIfManager(object):
+ _ipv4_gen = misc_methods.get_network_addr()
+ _ipv6_gen = misc_methods.get_network_addr(ip_type = 'ipv6')
+
+ def __init__(self):
+ self.interfarces = OrderedDict()
+ self.dual_intf = []
+ self.full_device_cfg = None
+
+ def __add_if_to_manager (self, if_obj):
+ self.interfarces[if_obj.get_name()] = if_obj
+
+ def __add_dual_if_to_manager (self, dual_if_obj):
+ self.dual_intf.append(dual_if_obj)
+
+ def __get_ipv4_net_client_addr(self, ipv4_addr):
+ return misc_methods.get_single_net_client_addr (ipv4_addr)
+
+ def __get_ipv6_net_client_addr(self, ipv6_addr):
+ return misc_methods.get_single_net_client_addr (ipv6_addr, {'7' : 1}, ip_type = 'ipv6')
+
+ def load_config (self, device_config_obj):
+ self.full_device_cfg = device_config_obj
+ # first, erase all current config
+ self.interfarces.clear()
+ del self.dual_intf[:]
+
+ # than, load the configuration
+ intf_config = device_config_obj.get_interfaces_cfg()
+
+ # finally, parse the information into data-structures
+ for intf_pair in intf_config:
+ # generate network addresses for client side, and initialize client if object
+ tmp_ipv4_addr = self.__get_ipv4_net_client_addr (next(CIfManager._ipv4_gen)[0])
+ tmp_ipv6_addr = self.__get_ipv6_net_client_addr (next(CIfManager._ipv6_gen))
+
+ if 'dest_mac_addr' in intf_pair['client']:
+ client_dest_mac = intf_pair['client']['dest_mac_addr']
+ else:
+ client_dest_mac = 0
+ if 'dest_ipv6_mac_addr' in intf_pair['client']:
+ client_dest_ipv6_mac = intf_pair['client']['dest_ipv6_mac_addr']
+ else:
+ client_dest_ipv6_mac = 0
+ client_obj = CIfObj(if_name = intf_pair['client']['name'],
+ ipv4_addr = tmp_ipv4_addr,
+ ipv6_addr = tmp_ipv6_addr,
+ src_mac_addr = intf_pair['client']['src_mac_addr'],
+ dest_mac_addr = client_dest_mac,
+ dest_ipv6_mac_addr = client_dest_ipv6_mac,
+ if_type = IFType.Client)
+
+ # generate network addresses for server side, and initialize server if object
+ tmp_ipv4_addr = self.__get_ipv4_net_client_addr (next(CIfManager._ipv4_gen)[0])
+ tmp_ipv6_addr = self.__get_ipv6_net_client_addr (next(CIfManager._ipv6_gen))
+
+ if 'dest_mac_addr' in intf_pair['server']:
+ server_dest_mac = intf_pair['server']['dest_mac_addr']
+ else:
+ server_dest_mac = 0
+ if 'dest_ipv6_mac_addr' in intf_pair['server']:
+ server_dest_ipv6_mac = intf_pair['server']['dest_ipv6_mac_addr']
+ else:
+ server_dest_ipv6_mac = 0
+ server_obj = CIfObj(if_name = intf_pair['server']['name'],
+ ipv4_addr = tmp_ipv4_addr,
+ ipv6_addr = tmp_ipv6_addr,
+ src_mac_addr = intf_pair['server']['src_mac_addr'],
+ dest_mac_addr = server_dest_mac,
+ dest_ipv6_mac_addr = server_dest_ipv6_mac,
+ if_type = IFType.Server)
+
+ dual_intf_obj = CDualIfObj(vrf_name = intf_pair['vrf_name'],
+ client_if_obj = client_obj,
+ server_if_obj = server_obj)
+
+ # update single interfaces pointers
+ client_obj.set_pair_parent(dual_intf_obj)
+ server_obj.set_pair_parent(dual_intf_obj)
+
+ # finally, update the data-structures with generated objects
+ self.__add_if_to_manager(client_obj)
+ self.__add_if_to_manager(server_obj)
+ self.__add_dual_if_to_manager(dual_intf_obj)
+
+
+ def get_if_list (self, if_type = IFType.All, is_duplicated = None):
+ result = []
+ for if_name,if_obj in self.interfarces.items():
+ if (if_type == IFType.All) or ( if_obj.get_if_type() == if_type) :
+ if (is_duplicated is None) or (if_obj.get_pair_parent().is_duplicated() == is_duplicated):
+ # append this if_obj only if matches both IFType and is_duplicated conditions
+ result.append(if_obj)
+ return result
+
+ def get_duplicated_if (self):
+ result = []
+ for dual_if_obj in self.dual_intf:
+ if dual_if_obj.get_vrf_name() is not None :
+ result.extend( (dual_if_obj.client_if, dual_if_obj.server_if) )
+ return result
+
+ def get_dual_if_list (self, is_duplicated = None):
+ result = []
+ for dual_if in self.dual_intf:
+ if (is_duplicated is None) or (dual_if.is_duplicated() == is_duplicated):
+ result.append(dual_if)
+ return result
+
+ def dump_if_config (self):
+ if self.full_device_cfg is None:
+ print("Device configuration isn't loaded.\nPlease load config and try again.")
+ else:
+ self.full_device_cfg.dump_config()
+
+
+class AuthError(Exception):
+ pass
+
+class CIosTelnet(telnetlib.Telnet):
+ AuthError = AuthError
+
+ # wrapper for compatibility with Python2/3, convert input to bytes
+ def str_to_bytes_wrapper(self, func, text, *args, **kwargs):
+ if type(text) in (list, tuple):
+ text = [elem.encode('ascii') if type(elem) is str else elem for elem in text]
+ res = func(self, text.encode('ascii') if type(text) is str else text, *args, **kwargs)
+ return res.decode() if type(res) is bytes else res
+
+ def read_until(self, *args, **kwargs):
+ return self.str_to_bytes_wrapper(telnetlib.Telnet.read_until, *args, **kwargs)
+
+ def write(self, *args, **kwargs):
+ return self.str_to_bytes_wrapper(telnetlib.Telnet.write, *args, **kwargs)
+
+ def expect(self, *args, **kwargs):
+ res = self.str_to_bytes_wrapper(telnetlib.Telnet.expect, *args, **kwargs)
+ return [elem.decode() if type(elem) is bytes else elem for elem in res]
+
+ def __init__ (self, host, line_pass, en_pass, port = 23, str_wait = "#"):
+ telnetlib.Telnet.__init__(self)
+ self.host = host
+ self.port = port
+ self.line_passwd = line_pass
+ self.enable_passwd = en_pass
+ self.pr = str_wait
+# self.set_debuglevel (1)
+ try:
+ self.open(self.host,self.port, timeout = 5)
+ self.read_until("word:",1)
+ self.write("{line_pass}\n".format(line_pass = self.line_passwd) )
+ res = self.read_until(">",1)
+ if 'Password' in res:
+ raise AuthError('Invalid line password was provided')
+ self.write("enable 15\n")
+ self.read_until("d:",1)
+ self.write("{en_pass}\n".format(en_pass = self.enable_passwd) )
+ res = self.read_until(self.pr,1)
+ if 'Password' in res:
+ raise AuthError('Invalid en password was provided')
+ self.write_ios_cmd(['terminal length 0'])
+
+ except socket.timeout:
+ raise socket.timeout('A timeout error has occured.\nCheck platform connectivity or the hostname defined in the config file')
+ except Exception as inst:
+ raise
+
+ def write_ios_cmd (self, cmd_list, result_from = 0, timeout = 60, **kwargs):
+ assert (isinstance (cmd_list, list) == True)
+ self.read_until(self.pr, timeout = 1)
+
+ res = ''
+ if 'read_until' in kwargs:
+ wf = kwargs['read_until']
+ else:
+ wf = self.pr
+
+ for idx, cmd in enumerate(cmd_list):
+ start_time = time.time()
+ self.write(cmd+'\r\n')
+ if kwargs.get('debug_mode'):
+ print('-->\n%s' % cmd)
+ if type(wf) is list:
+ output = self.expect(wf, timeout)[2]
+ else:
+ output = self.read_until(wf, timeout)
+ if idx >= result_from:
+ res += output
+ if kwargs.get('debug_mode'):
+ print('<-- (%ss)\n%s' % (round(time.time() - start_time, 2), output))
+ if time.time() - start_time > timeout - 1:
+ raise Exception('Timeout while performing telnet command: %s' % cmd)
+ if 'Invalid' in res:
+ print('Warning: telnet command probably failed.\nCommand: %s\nResponse: %s' % (cmd_list, res))
+# return res.split('\r\n')
+ return res # return the received response as a string, each line is seperated by '\r\n'.
+
+
+if __name__ == "__main__":
+# dev_cfg = CDeviceCfg('config/config.yaml')
+# print dev_cfg.get_platform_connection_data()
+# telnet = CIosTelnet( **(dev_cfg.get_platform_connection_data() ) )
+
+# if_mng = CIfManager()
+# if_mng.load_config(dev_cfg)
+# if_mng.dump_config()
+ pass
diff --git a/scripts/automation/regression/reports/.keep b/scripts/automation/regression/reports/.keep
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/scripts/automation/regression/reports/.keep
diff --git a/scripts/automation/regression/setups/dave/benchmark.yaml b/scripts/automation/regression/setups/dave/benchmark.yaml
new file mode 100755
index 00000000..aac2d805
--- /dev/null
+++ b/scripts/automation/regression/setups/dave/benchmark.yaml
@@ -0,0 +1,118 @@
+################################################################
+#### TRex benchmark configuration file ####
+################################################################
+
+test_nbar_simple :
+ multiplier : 0.5
+ cores : 4
+ exp_gbps : 0.5
+ cpu_to_core_ratio : 37270000
+ cpu2core_custom_dev: YES
+ cpu2core_dev : 0.07
+ exp_max_latency : 1000
+
+ nbar_classification:
+ http : 29.95
+ rtp_audio : 20.75
+ oracle_sqlnet : 11.09
+ rtp : 10.9
+ exchange : 8.16
+ citrix : 5.54
+ rtsp : 2.85
+ sctp : 3.83
+ ssl : 2.41
+ sip : 0.09
+ dns : 1.92
+ smtp : 0.56
+ pop3 : 0.36
+ unknown : 3.15
+
+test_rx_check :
+ multiplier : 25
+ cores : 4
+ rx_sample_rate : 128
+ exp_gbps : 0.5
+ cpu_to_core_ratio : 37270000
+ exp_bw : 1
+ exp_latency : 1
+
+test_nat_simple :
+ stat_route_dict :
+ clients_start : 16.0.0.1
+ servers_start : 48.0.0.1
+ dual_port_mask : 1.0.0.0
+ client_destination_mask : 255.0.0.0
+ server_destination_mask : 255.0.0.0
+ nat_dict :
+ clients_net_start : 16.0.0.0
+ client_acl_wildcard_mask : 0.0.0.255
+ dual_port_mask : 1.0.0.0
+ pool_start : 200.0.0.0
+ pool_netmask : 255.255.255.0
+ multiplier : 400
+ cpu_to_core_ratio : 37270000
+ cores : 4
+ exp_bw : 1
+ exp_latency : 1
+ allow_timeout_dev : YES
+
+test_nat_learning :
+ stat_route_dict :
+ clients_start : 16.0.0.1
+ servers_start : 48.0.0.1
+ dual_port_mask : 1.0.0.0
+ client_destination_mask : 255.0.0.0
+ server_destination_mask : 255.0.0.0
+ multiplier : 400
+ cores : 4
+ nat_opened : 100000
+ cpu_to_core_ratio : 37270000
+ exp_bw : 1
+ exp_latency : 1
+ allow_timeout_dev : YES
+
+test_routing_imix_64 :
+ multiplier : 2500
+ cores : 4
+ cpu_to_core_ratio : 8900
+ exp_latency : 1
+
+test_routing_imix :
+ multiplier : 70
+ cores : 2
+ cpu_to_core_ratio : 8900
+ exp_latency : 1
+
+test_static_routing_imix :
+ stat_route_dict :
+ clients_start : 16.0.0.1
+ servers_start : 48.0.0.1
+ dual_port_mask : 1.0.0.0
+ client_destination_mask : 255.0.0.0
+ server_destination_mask : 255.0.0.0
+ multiplier : 70
+ cores : 2
+ cpu_to_core_ratio : 3766666
+ exp_latency : 1
+
+test_static_routing_imix_asymmetric:
+ stat_route_dict :
+ clients_start : 16.0.0.1
+ servers_start : 48.0.0.1
+ dual_port_mask : 1.0.0.0
+ client_destination_mask : 255.0.0.0
+ server_destination_mask : 255.0.0.0
+ multiplier : 36
+ cores : 1
+ cpu_to_core_ratio : 3766666
+ exp_latency : 1
+
+test_ipv6_simple :
+ multiplier : 36
+ cores : 4
+ cpu_to_core_ratio : 30070000
+ cpu2core_custom_dev: YES
+ cpu2core_dev : 0.07
+
+
+
diff --git a/scripts/automation/regression/setups/dave/config.yaml b/scripts/automation/regression/setups/dave/config.yaml
new file mode 100755
index 00000000..8aa763bc
--- /dev/null
+++ b/scripts/automation/regression/setups/dave/config.yaml
@@ -0,0 +1,94 @@
+################################################################
+#### TRex nightly test configuration file ####
+################################################################
+
+
+### TRex configuration:
+# hostname - can be DNS name or IP for the TRex machine for ssh to the box
+# password - root password for TRex machine
+# is_dual - should the TRex inject with -p ?
+# version_path - path to the TRex version and executable
+# cores - how many cores should be used
+# latency - rate of latency packets injected by the TRex
+# modes - list of modes (tagging) of this setup (loopback, virtual etc.)
+# * loopback - Trex works via loopback. Router and TFTP configurations may be skipped.
+# * virtual - virtual OS (accept low CPU utilization in tests)
+
+### Router configuration:
+# hostname - the router hostname as apears in ______# cli prefix
+# ip_address - the router's ip that can be used to communicate with
+# image - the desired imaged wished to be loaded as the router's running config
+# line_password - router password when access via Telent
+# en_password - router password when changing to "enable" mode
+# interfaces - an array of client-server pairs, representing the interfaces configurations of the router
+# configurations - an array of configurations that could possibly loaded into the router during the test.
+# The "clean" configuration is a mandatory configuration the router will load with to run the basic test bench
+
+### TFTP configuration:
+# hostname - the tftp hostname
+# ip_address - the tftp's ip address
+# images_path - the tftp's relative path in which the router's images are located
+
+### Test_misc configuration:
+# expected_bw - the "golden" bandwidth (in Gbps) results planned on receiving from the test
+
+trex:
+ hostname : cpp-rtp-trex-01
+ cores : 4
+
+router:
+ model : ESP100
+ hostname : cpp-rtp-ts-15
+ ip_address : 172.18.4.34
+ port : 2054
+ image : trex_regression_v155_315.bin
+ line_password : cisco
+ en_password : cisco
+ mgmt_interface : dummy
+ clean_config : dummy
+ intf_masking : 255.255.255.0
+ ipv6_mask : 64
+ interfaces :
+ - client :
+ name : TenGigabitEthernet0/0/0
+ src_mac_addr : 0000.0001.0000
+ dest_mac_addr : 0000.0001.0000
+ server :
+ name : TenGigabitEthernet0/1/0
+ src_mac_addr : 0000.0001.0000
+ dest_mac_addr : 0000.0001.0000
+ vrf_name :
+ - client :
+ name : TenGigabitEthernet0/2/0
+ src_mac_addr : 0000.0001.0000
+ dest_mac_addr : 0000.0001.0000
+ server :
+ name : TenGigabitEthernet0/3/0
+ src_mac_addr : 0000.0001.0000
+ dest_mac_addr : 0000.0001.0000
+ vrf_name :
+ - client :
+ name : TenGigabitEthernet1/0/0
+ src_mac_addr : 0000.0001.0000
+ dest_mac_addr : 0000.0001.0000
+ server :
+ name : TenGigabitEthernet1/1/0
+ src_mac_addr : 0000.0001.0000
+ dest_mac_addr : 0000.0001.0000
+ vrf_name :
+ - client :
+ name : TenGigabitEthernet1/2/0
+ src_mac_addr : 0000.0001.0000
+ dest_mac_addr : 0000.0001.0000
+ server :
+ name : TenGigabitEthernet1/3/0
+ src_mac_addr : 0000.0001.0000
+ dest_mac_addr : 0000.0001.0000
+ vrf_name :
+
+
+tftp:
+ hostname : ats-asr-srv-1
+ ip_address : 10.56.128.23
+ root_dir : /auto/avc-devtest/images/
+ images_path : /images/RP1/
diff --git a/scripts/automation/regression/setups/dummy/config.yaml b/scripts/automation/regression/setups/dummy/config.yaml
new file mode 100644
index 00000000..16e3b0cc
--- /dev/null
+++ b/scripts/automation/regression/setups/dummy/config.yaml
@@ -0,0 +1,11 @@
+################################################################
+#### TRex nightly test configuration file ####
+################################################################
+
+
+# dummy setup, all Trex tests are expected to be skipped
+
+trex:
+ hostname : csi-trex-04
+ cores : 2
+ modes : [loopback, virtual, dummy_mode]
diff --git a/scripts/automation/regression/setups/kiwi02/benchmark.yaml b/scripts/automation/regression/setups/kiwi02/benchmark.yaml
new file mode 100644
index 00000000..41688906
--- /dev/null
+++ b/scripts/automation/regression/setups/kiwi02/benchmark.yaml
@@ -0,0 +1,298 @@
+###############################################################
+#### TRex benchmark configuration file ####
+###############################################################
+
+#### common templates ###
+
+stat_route_dict: &stat_route_dict
+ clients_start : 16.0.0.1
+ servers_start : 48.0.0.1
+ dual_port_mask : 1.0.0.0
+ client_destination_mask : 255.0.0.0
+ server_destination_mask : 255.0.0.0
+
+nat_dict: &nat_dict
+ clients_net_start : 16.0.0.0
+ client_acl_wildcard_mask : 0.0.0.255
+ dual_port_mask : 1.0.0.0
+ pool_start : 200.0.0.0
+ pool_netmask : 255.255.255.0
+
+
+### stateful ###
+
+test_jumbo:
+ multiplier : 55
+ cores : 1
+ bw_per_core : 647.305
+
+
+test_routing_imix:
+ multiplier : 32
+ cores : 2
+ bw_per_core : 39.131
+
+
+test_routing_imix_64:
+ multiplier : 2500
+ cores : 4
+ bw_per_core : 7.427
+
+
+test_static_routing_imix:
+ stat_route_dict : *stat_route_dict
+ multiplier : 32
+ cores : 2
+ bw_per_core : 39.039
+
+
+test_static_routing_imix_asymmetric:
+ stat_route_dict : *stat_route_dict
+ multiplier : 16
+ cores : 1
+ bw_per_core : 38.796
+
+
+test_ipv6_simple:
+ multiplier : 32
+ cores : 4
+ bw_per_core : 19.283
+
+
+test_nat_simple_mode1: &test_nat_simple
+ stat_route_dict : *stat_route_dict
+ nat_dict : *nat_dict
+ multiplier : 10000
+ cores : 1
+ allow_timeout_dev : True
+ bw_per_core : 45.304
+
+test_nat_simple_mode2: *test_nat_simple
+
+test_nat_simple_mode3: *test_nat_simple
+
+test_nat_learning:
+ << : *test_nat_simple
+ nat_opened : 100000
+
+
+test_nbar_simple:
+ multiplier : 20
+ cores : 2
+ bw_per_core : 18.243
+ nbar_classification:
+ http : 30.41
+ rtp_audio : 21.22
+ rtp : 11.4
+ oracle_sqlnet : 11.3
+ exchange : 10.95
+ citrix : 5.65
+ rtsp : 2.67
+ dns : 1.95
+ smtp : 0.57
+ pop3 : 0.36
+ sctp : 0.09
+ sip : 0.09
+ ssl : 0.06
+ unknown : 3.2
+
+
+test_rx_check_http: &rx_http
+ multiplier : 40000
+ cores : 2
+ rx_sample_rate : 32
+ error_tolerance : 0.01
+ bw_per_core : 38.071
+
+test_rx_check_http_ipv6:
+ << : *rx_http
+ bw_per_core : 46.733
+
+
+test_rx_check_sfr: &rx_sfr
+ multiplier : 25
+ cores : 4
+ rx_sample_rate : 32
+ error_tolerance : 0.01
+ bw_per_core : 16.915
+
+test_rx_check_sfr_ipv6:
+ << : *rx_sfr
+ bw_per_core : 20.323
+
+
+
+### stateless ###
+
+test_CPU_benchmark:
+ profiles:
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 64}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 64, stream_count: 10}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 64, stream_count: 100}
+ cpu_util : 1
+ bw_per_core : 1
+
+# causes queue full
+# - name : stl/udp_for_benchmarks.py
+# kwargs : {packet_len: 64, stream_count: 1000}
+# cpu_util : 1
+# bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 128}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 256}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 512}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 1500}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 4000}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 9000}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 9000, stream_count: 10}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 9000, stream_count: 100}
+ cpu_util : 1
+ bw_per_core : 1
+
+# not enough memory + queue full if memory increase
+# - name : stl/udp_for_benchmarks.py
+# kwargs : {packet_len: 9000, stream_count: 1000}
+# cpu_util : 1
+# bw_per_core : 1
+
+ - name : stl/imix.py
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_1pkt_tuple_gen.py
+ kwargs : {packet_len: 64}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_1pkt_tuple_gen.py
+ kwargs : {packet_len: 128}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_1pkt_tuple_gen.py
+ kwargs : {packet_len: 256}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_1pkt_tuple_gen.py
+ kwargs : {packet_len: 512}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_1pkt_tuple_gen.py
+ kwargs : {packet_len: 1500}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_1pkt_tuple_gen.py
+ kwargs : {packet_len: 4000}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_1pkt_tuple_gen.py
+ kwargs : {packet_len: 9000}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/pcap.py
+ kwargs : {ipg_usec: 2, loop_count: 0}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_rand_len_9k.py
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/hlt/hlt_udp_rand_len_9k.py
+ cpu_util : 1
+ bw_per_core : 1
+
+
+
+test_performance_vm_single_cpu:
+ cfg:
+ mult : "90%"
+ mpps_per_core_golden :
+ min: 11.5
+ max: 13.1
+
+
+test_performance_vm_single_cpu_cached:
+ cfg:
+ mult : "90%"
+ mpps_per_core_golden :
+ min: 22.0
+ max: 25.0
+
+
+
+test_performance_syn_attack_single_cpu:
+ cfg:
+ mult : "90%"
+ mpps_per_core_golden :
+ min: 9.5
+ max: 11.5
+
+test_performance_vm_multi_cpus:
+ cfg:
+ core_count : 4
+ mult : "90%"
+ mpps_per_core_golden :
+ min: 9.7
+ max: 12.5
+
+
+test_performance_vm_multi_cpus_cached:
+ cfg:
+ core_count : 4
+ mult : "90%"
+ mpps_per_core_golden :
+ min: 19.0
+ max: 22.0
+
+test_performance_syn_attack_multi_cpus:
+ cfg:
+ core_count : 4
+ mult : "90%"
+ mpps_per_core_golden :
+ min: 8.5
+ max: 10.5
+
diff --git a/scripts/automation/regression/setups/kiwi02/config.yaml b/scripts/automation/regression/setups/kiwi02/config.yaml
new file mode 100644
index 00000000..d6c13a22
--- /dev/null
+++ b/scripts/automation/regression/setups/kiwi02/config.yaml
@@ -0,0 +1,95 @@
+################################################################
+#### TRex nightly test configuration file ####
+################################################################
+
+
+### TRex configuration:
+# hostname - can be DNS name or IP for the TRex machine for ssh to the box
+# password - root password for TRex machine
+# is_dual - should the TRex inject with -p ?
+# version_path - path to the TRex version and executable
+# cores - how many cores should be used
+# latency - rate of latency packets injected by the TRex
+# modes - list of modes (tagging) of this setup (loopback, virtual etc.)
+# * loopback - Trex works via loopback. Router and TFTP configurations may be skipped.
+# * VM - Virtual OS (accept low CPU utilization in tests, latency can get spikes)
+# * virt_nics - NICs are virtual (VMXNET3 etc.)
+
+### Router configuration:
+# hostname - the router hostname as apears in ______# cli prefix
+# ip_address - the router's ip that can be used to communicate with
+# image - the desired imaged wished to be loaded as the router's running config
+# line_password - router password when access via Telent
+# en_password - router password when changing to "enable" mode
+# interfaces - an array of client-server pairs, representing the interfaces configurations of the router
+# configurations - an array of configurations that could possibly loaded into the router during the test.
+# The "clean" configuration is a mandatory configuration the router will load with to run the basic test bench
+
+### TFTP configuration:
+# hostname - the tftp hostname
+# ip_address - the tftp's ip address
+# images_path - the tftp's relative path in which the router's images are located
+
+### Test_misc configuration:
+# expected_bw - the "golden" bandwidth (in Gbps) results planned on receiving from the test
+
+trex:
+ hostname : 10.56.217.210 #10.56.192.189
+ cores : 4
+
+router:
+ model : ESP100
+ hostname : csi-mcp-asr1k-40
+ ip_address : 10.56.192.57
+ image : BLD_V155_2_S_XE315_THROTTLE_LATEST_20150424_100040-std.bin # is in harddisk of router
+ #image : asr1000rp2-adventerprisek9.2014-11-10_18.33_etis.bin
+ line_password : cisco
+ en_password : cisco
+ mgmt_interface : GigabitEthernet0
+ clean_config : /tmp/asr1001_TRex_clean_config.cfg
+ intf_masking : 255.255.255.0
+ ipv6_mask : 64
+ interfaces :
+ - client :
+ name : TenGigabitEthernet0/0/0
+ src_mac_addr : 0000.0001.0000
+ dest_mac_addr : 0000.0001.0000
+ server :
+ name : TenGigabitEthernet0/1/0
+ src_mac_addr : 0000.0001.0000
+ dest_mac_addr : 0000.0001.0000
+ vrf_name : duplicate
+ - client :
+ name : TenGigabitEthernet0/2/0
+ src_mac_addr : 0000.0001.0000
+ dest_mac_addr : 0000.0001.0000
+ server :
+ name : TenGigabitEthernet0/3/0
+ src_mac_addr : 0000.0001.0000
+ dest_mac_addr : 0000.0001.0000
+ vrf_name : duplicate
+ - client :
+ name : TenGigabitEthernet1/0/0
+ src_mac_addr : 0000.0001.0000
+ dest_mac_addr : 0000.0001.0000
+ server :
+ name : TenGigabitEthernet1/1/0
+ src_mac_addr : 0000.0001.0000
+ dest_mac_addr : 0000.0001.0000
+ vrf_name :
+ - client :
+ name : TenGigabitEthernet1/2/0
+ src_mac_addr : 0000.0001.0000
+ dest_mac_addr : 0000.0001.0000
+ server :
+ name : TenGigabitEthernet1/3/0
+ src_mac_addr : 0000.0001.0000
+ dest_mac_addr : 0000.0001.0000
+ vrf_name :
+
+
+tftp:
+ hostname : kiwi02_tftp_server
+ ip_address : 10.56.217.7
+ root_dir : /scratch/tftp/
+ images_path : hhaim/
diff --git a/scripts/automation/regression/setups/trex-dan/benchmark.yaml b/scripts/automation/regression/setups/trex-dan/benchmark.yaml
new file mode 100644
index 00000000..de56089b
--- /dev/null
+++ b/scripts/automation/regression/setups/trex-dan/benchmark.yaml
@@ -0,0 +1,253 @@
+###############################################################
+#### TRex benchmark configuration file ####
+###############################################################
+
+#### common templates ###
+
+stat_route_dict: &stat_route_dict
+ clients_start : 16.0.0.1
+ servers_start : 48.0.0.1
+ dual_port_mask : 1.0.0.0
+ client_destination_mask : 255.0.0.0
+ server_destination_mask : 255.0.0.0
+
+nat_dict: &nat_dict
+ clients_net_start : 16.0.0.0
+ client_acl_wildcard_mask : 0.0.0.255
+ dual_port_mask : 1.0.0.0
+ pool_start : 200.0.0.0
+ pool_netmask : 255.255.255.0
+
+
+### stateful ###
+
+test_jumbo:
+ multiplier : 2.8
+ cores : 1
+ bw_per_core : 67.030
+
+
+test_routing_imix:
+ multiplier : 1
+ cores : 1
+ bw_per_core : 3.979
+
+
+test_routing_imix_64:
+ multiplier : 150
+ cores : 4
+ bw_per_core : .681
+
+
+test_static_routing_imix:
+ stat_route_dict : *stat_route_dict
+ multiplier : 0.7
+ cores : 1
+ bw_per_core : 3.837
+
+
+test_static_routing_imix_asymmetric:
+ stat_route_dict : *stat_route_dict
+ multiplier : 0.8
+ cores : 1
+ bw_per_core : 3.939
+
+
+test_ipv6_simple:
+ multiplier : 1.5
+ cores : 2
+ bw_per_core : 4.719
+
+
+test_nat_simple_mode1: &test_nat_simple
+ stat_route_dict : *stat_route_dict
+ nat_dict : *nat_dict
+ multiplier : 550
+ cores : 1
+ allow_timeout_dev : True
+ bw_per_core : 7.465
+
+test_nat_simple_mode2: *test_nat_simple
+
+test_nat_simple_mode3: *test_nat_simple
+
+test_nat_learning:
+ << : *test_nat_simple
+ bw_per_core : 7.377
+ nat_opened : 40000
+
+
+test_nbar_simple:
+ multiplier : 1.5
+ cores : 2
+ bw_per_core : 4.465
+ nbar_classification:
+ http : 30.3
+ rtp_audio : 21.06
+ oracle_sqlnet : 11.25
+ rtp : 11.1
+ exchange : 10.16
+ citrix : 5.6
+ rtsp : 2.84
+ sctp : 0.65
+ ssl : 0.8
+ sip : 0.09
+ dns : 1.95
+ smtp : 0.57
+ pop3 : 0.36
+ unknown : 3.19
+
+
+test_rx_check_http: &rx_http
+ multiplier : 2200
+ cores : 1
+ rx_sample_rate : 16
+ bw_per_core : 8.142
+
+test_rx_check_http_ipv6:
+ << : *rx_http
+ bw_per_core : 8.591
+
+test_rx_check_http_negative:
+ << : *rx_http
+ stat_route_dict : *stat_route_dict
+ nat_dict : *nat_dict
+ bw_per_core : 8.037
+
+
+test_rx_check_sfr: &rx_sfr
+ multiplier : 1.7
+ cores : 2
+ rx_sample_rate : 16
+ bw_per_core : 4.473
+
+test_rx_check_sfr_ipv6:
+ << : *rx_sfr
+ bw_per_core : 4.773
+
+
+
+### stateless ###
+
+test_CPU_benchmark:
+ profiles:
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 64}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 64, stream_count: 10}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 64, stream_count: 100}
+ cpu_util : 1
+ bw_per_core : 1
+
+# causes queue full
+# - name : stl/udp_for_benchmarks.py
+# kwargs : {packet_len: 64, stream_count: 1000}
+# cpu_util : 1
+# bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 128}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 256}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 512}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 1500}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 4000}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 9000}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 9000, stream_count: 10}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 9000, stream_count: 100}
+ cpu_util : 1
+ bw_per_core : 1
+
+# not enough memory + queue full if memory increase
+# - name : stl/udp_for_benchmarks.py
+# kwargs : {packet_len: 9000, stream_count: 1000}
+# cpu_util : 1
+# bw_per_core : 1
+
+ - name : stl/imix.py
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_1pkt_tuple_gen.py
+ kwargs : {packet_len: 64}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_1pkt_tuple_gen.py
+ kwargs : {packet_len: 128}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_1pkt_tuple_gen.py
+ kwargs : {packet_len: 256}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_1pkt_tuple_gen.py
+ kwargs : {packet_len: 512}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_1pkt_tuple_gen.py
+ kwargs : {packet_len: 1500}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_1pkt_tuple_gen.py
+ kwargs : {packet_len: 4000}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_1pkt_tuple_gen.py
+ kwargs : {packet_len: 9000}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/pcap.py
+ kwargs : {ipg_usec: 2, loop_count: 0}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_rand_len_9k.py
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/hlt/hlt_udp_rand_len_9k.py
+ cpu_util : 1
+ bw_per_core : 1
+
+
diff --git a/scripts/automation/regression/setups/trex-dan/config.yaml b/scripts/automation/regression/setups/trex-dan/config.yaml
new file mode 100644
index 00000000..fbed3cb7
--- /dev/null
+++ b/scripts/automation/regression/setups/trex-dan/config.yaml
@@ -0,0 +1,68 @@
+################################################################
+#### TRex nightly test configuration file ####
+################################################################
+
+
+### TRex configuration:
+# hostname - can be DNS name or IP for the TRex machine for ssh to the box
+# is_dual - should the TRex inject with -p ?
+# version_path - path to the TRex version and executable
+# cores - how many cores should be used
+# latency - rate of latency packets injected by the TRex
+# modes - list of modes (tagging) of this setup (loopback, virtual etc.)
+# * loopback - Trex works via loopback. Router and TFTP configurations may be skipped.
+# * VM - Virtual OS (accept low CPU utilization in tests, latency can get spikes)
+# * virt_nics - NICs are virtual (VMXNET3 etc.)
+
+### Router configuration:
+# hostname - the router hostname as apears in ______# cli prefix
+# ip_address - the router's ip that can be used to communicate with
+# image - the desired imaged wished to be loaded as the router's running config
+# line_password - router password when access via Telent
+# en_password - router password when changing to "enable" mode
+# interfaces - an array of client-server pairs, representing the interfaces configurations of the router
+# configurations - an array of configurations that could possibly loaded into the router during the test.
+# The "clean" configuration is a mandatory configuration the router will load with to run the basic test bench
+
+### TFTP configuration:
+# hostname - the tftp hostname
+# ip_address - the tftp's ip address
+# images_path - the tftp's relative path in which the router's images are located
+
+### Test_misc configuration:
+# expected_bw - the "golden" bandwidth (in Gbps) results planned on receiving from the test
+
+trex:
+ hostname : trex-dan
+ cores : 2
+ modes : [VM]
+
+router:
+ model : 1RU
+ hostname : ASR1001_T-Rex
+ ip_address : 10.56.199.247
+ image : asr1001-universalk9.BLD_V155_1_S_XE314_THROTTLE_LATEST_20141112_090734-std.bin
+ #image : asr1001-universalk9.BLD_V155_2_S_XE315_THROTTLE_LATEST_20150121_110036-std.bin
+ #image : asr1001-universalk9.BLD_V155_2_S_XE315_THROTTLE_LATEST_20150324_100047-std.bin
+ line_password : lab
+ en_password : lab
+ mgmt_interface : GigabitEthernet0/0/0
+ clean_config : /Configurations/danklei/asr1001_TRex_clean_config.cfg
+ intf_masking : 255.255.255.0
+ ipv6_mask : 64
+ interfaces :
+ - client :
+ name : GigabitEthernet0/0/1
+ src_mac_addr : 0000.0001.0000
+ dest_ipv6_mac_addr : a036.9f20.e6ce
+ server :
+ name : GigabitEthernet0/0/2
+ src_mac_addr : 0000.0001.0000
+ dest_ipv6_mac_addr : a036.9f20.e6cf
+ vrf_name : null
+
+tftp:
+ hostname : ats-asr-srv-1
+ ip_address : 10.56.128.23
+ root_dir : /auto/avc-devtest/
+ images_path : /images/1RU/
diff --git a/scripts/automation/regression/setups/trex04/benchmark.yaml b/scripts/automation/regression/setups/trex04/benchmark.yaml
new file mode 100644
index 00000000..b366b3fb
--- /dev/null
+++ b/scripts/automation/regression/setups/trex04/benchmark.yaml
@@ -0,0 +1,155 @@
+################################################################
+#### TRex benchmark configuration file ####
+################################################################
+
+### stateful ###
+
+test_jumbo:
+ multiplier : 2.8
+ cores : 1
+ bw_per_core : 106.652
+
+
+test_routing_imix:
+ multiplier : 0.5
+ cores : 1
+ bw_per_core : 11.577
+
+
+test_routing_imix_64:
+ multiplier : 28
+ cores : 1
+ bw_per_core : 2.030
+
+
+test_static_routing_imix_asymmetric:
+ multiplier : 0.8
+ cores : 1
+ bw_per_core : 13.742
+
+
+
+### stateless ###
+
+test_CPU_benchmark:
+ profiles:
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 64}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 64, stream_count: 10}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 64, stream_count: 100}
+ cpu_util : 1
+ bw_per_core : 1
+
+# causes queue full
+# - name : stl/udp_for_benchmarks.py
+# kwargs : {packet_len: 64, stream_count: 1000}
+# cpu_util : 1
+# bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 128}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 256}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 512}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 1500}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 4000}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 9000}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 9000, stream_count: 10}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 9000, stream_count: 100}
+ cpu_util : 1
+ bw_per_core : 1
+
+# not enough memory + queue full if memory increase
+# - name : stl/udp_for_benchmarks.py
+# kwargs : {packet_len: 9000, stream_count: 1000}
+# cpu_util : 1
+# bw_per_core : 1
+
+ - name : stl/imix.py
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_1pkt_tuple_gen.py
+ kwargs : {packet_len: 64}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_1pkt_tuple_gen.py
+ kwargs : {packet_len: 128}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_1pkt_tuple_gen.py
+ kwargs : {packet_len: 256}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_1pkt_tuple_gen.py
+ kwargs : {packet_len: 512}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_1pkt_tuple_gen.py
+ kwargs : {packet_len: 1500}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_1pkt_tuple_gen.py
+ kwargs : {packet_len: 4000}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_1pkt_tuple_gen.py
+ kwargs : {packet_len: 9000}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/pcap.py
+ kwargs : {ipg_usec: 4, loop_count: 0}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_rand_len_9k.py
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/hlt/hlt_udp_rand_len_9k.py
+ cpu_util : 1
+ bw_per_core : 1
+
+
diff --git a/scripts/automation/regression/setups/trex04/config.yaml b/scripts/automation/regression/setups/trex04/config.yaml
new file mode 100644
index 00000000..bf1c68e6
--- /dev/null
+++ b/scripts/automation/regression/setups/trex04/config.yaml
@@ -0,0 +1,39 @@
+################################################################
+#### TRex nightly test configuration file ####
+################################################################
+
+
+### TRex configuration:
+# hostname - can be DNS name or IP for the TRex machine for ssh to the box
+# password - root password for TRex machine
+# is_dual - should the TRex inject with -p ?
+# version_path - path to the TRex version and executable
+# cores - how many cores should be used
+# latency - rate of latency packets injected by the Trex
+# modes - list of modes (tagging) of this setup (loopback, virtual etc.)
+# * loopback - Trex works via loopback. Router and TFTP configurations may be skipped.
+# * VM - Virtual OS (accept low CPU utilization in tests, latency can get spikes)
+# * virt_nics - NICs are virtual (VMXNET3 etc. have their limitations in tests)
+
+### Router configuration:
+# hostname - the router hostname as apears in ______# cli prefix
+# ip_address - the router's ip that can be used to communicate with
+# image - the desired imaged wished to be loaded as the router's running config
+# line_password - router password when access via Telent
+# en_password - router password when changing to "enable" mode
+# interfaces - an array of client-server pairs, representing the interfaces configurations of the router
+# configurations - an array of configurations that could possibly loaded into the router during the test.
+# The "clean" configuration is a mandatory configuration the router will load with to run the basic test bench
+
+### TFTP configuration:
+# hostname - the tftp hostname
+# ip_address - the tftp's ip address
+# images_path - the tftp's relative path in which the router's images are located
+
+### Test_misc configuration:
+# expected_bw - the "golden" bandwidth (in Gbps) results planned on receiving from the test
+
+trex:
+ hostname : csi-trex-04
+ cores : 1
+ modes : [loopback, virt_nics, VM]
diff --git a/scripts/automation/regression/setups/trex07/benchmark.yaml b/scripts/automation/regression/setups/trex07/benchmark.yaml
new file mode 100644
index 00000000..0dc340b0
--- /dev/null
+++ b/scripts/automation/regression/setups/trex07/benchmark.yaml
@@ -0,0 +1,244 @@
+###############################################################
+#### TRex benchmark configuration file ####
+###############################################################
+
+#### common templates ###
+
+stat_route_dict: &stat_route_dict
+ clients_start : 16.0.0.1
+ servers_start : 48.0.0.1
+ dual_port_mask : 1.0.0.0
+ client_destination_mask : 255.0.0.0
+ server_destination_mask : 255.0.0.0
+
+nat_dict: &nat_dict
+ clients_net_start : 16.0.0.0
+ client_acl_wildcard_mask : 0.0.0.255
+ dual_port_mask : 1.0.0.0
+ pool_start : 200.0.0.0
+ pool_netmask : 255.255.255.0
+
+
+### stateful ###
+
+test_jumbo:
+ multiplier : 17
+ cores : 1
+ bw_per_core : 543.232
+
+
+test_routing_imix:
+ multiplier : 10
+ cores : 1
+ bw_per_core : 34.128
+
+
+test_routing_imix_64:
+ multiplier : 430
+ cores : 1
+ bw_per_core : 5.893
+
+
+test_static_routing_imix: &test_static_routing_imix
+ stat_route_dict : *stat_route_dict
+ multiplier : 8
+ cores : 1
+ bw_per_core : 34.339
+
+test_static_routing_imix_asymmetric: *test_static_routing_imix
+
+
+test_ipv6_simple:
+ multiplier : 9
+ cores : 2
+ bw_per_core : 19.064
+
+
+test_nat_simple_mode1: &test_nat_simple
+ stat_route_dict : *stat_route_dict
+ nat_dict : *nat_dict
+ multiplier : 6000
+ cores : 1
+ nat_opened : 500000
+ allow_timeout_dev : True
+ bw_per_core : 44.445
+
+test_nat_simple_mode2: *test_nat_simple
+
+test_nat_simple_mode3: *test_nat_simple
+
+test_nat_learning: *test_nat_simple
+
+
+test_nbar_simple:
+ multiplier : 7.5
+ cores : 2
+ bw_per_core : 17.174
+ nbar_classification:
+ rtp : 32.57
+ http : 30.25
+ oracle_sqlnet : 11.23
+ exchange : 10.80
+ citrix : 5.62
+ rtsp : 2.84
+ dns : 1.95
+ smtp : 0.57
+ pop3 : 0.36
+ ssl : 0.17
+ sctp : 0.13
+ sip : 0.09
+ unknown : 3.41
+
+
+test_rx_check_http: &rx_http
+ multiplier : 15000
+ cores : 1
+ rx_sample_rate : 16
+ bw_per_core : 39.560
+
+test_rx_check_http_ipv6:
+ << : *rx_http
+ bw_per_core : 49.237
+
+test_rx_check_http_negative_disabled:
+ << : *rx_http
+ stat_route_dict : *stat_route_dict
+ nat_dict : *nat_dict
+
+
+test_rx_check_sfr: &rx_sfr
+ multiplier : 10
+ cores : 3
+ rx_sample_rate : 16
+ bw_per_core : 16.082
+
+test_rx_check_sfr_ipv6:
+ << : *rx_sfr
+ bw_per_core : 19.198
+
+
+
+### stateless ###
+
+test_CPU_benchmark:
+ profiles:
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 64}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 64, stream_count: 10}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 64, stream_count: 100}
+ cpu_util : 1
+ bw_per_core : 1
+
+# causes queue full
+# - name : stl/udp_for_benchmarks.py
+# kwargs : {packet_len: 64, stream_count: 1000}
+# cpu_util : 1
+# bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 128}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 256}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 512}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 1500}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 4000}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 9000}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 9000, stream_count: 10}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 9000, stream_count: 100}
+ cpu_util : 1
+ bw_per_core : 1
+
+# not enough memory + queue full if memory increase
+# - name : stl/udp_for_benchmarks.py
+# kwargs : {packet_len: 9000, stream_count: 1000}
+# cpu_util : 1
+# bw_per_core : 1
+
+ - name : stl/imix.py
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_1pkt_tuple_gen.py
+ kwargs : {packet_len: 64}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_1pkt_tuple_gen.py
+ kwargs : {packet_len: 128}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_1pkt_tuple_gen.py
+ kwargs : {packet_len: 256}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_1pkt_tuple_gen.py
+ kwargs : {packet_len: 512}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_1pkt_tuple_gen.py
+ kwargs : {packet_len: 1500}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_1pkt_tuple_gen.py
+ kwargs : {packet_len: 4000}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_1pkt_tuple_gen.py
+ kwargs : {packet_len: 9000}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/pcap.py
+ kwargs : {ipg_usec: 2, loop_count: 0}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_rand_len_9k.py
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/hlt/hlt_udp_rand_len_9k.py
+ cpu_util : 1
+ bw_per_core : 1
+
+
diff --git a/scripts/automation/regression/setups/trex07/config.yaml b/scripts/automation/regression/setups/trex07/config.yaml
new file mode 100644
index 00000000..db6e9bf8
--- /dev/null
+++ b/scripts/automation/regression/setups/trex07/config.yaml
@@ -0,0 +1,66 @@
+################################################################
+#### TRex nightly test configuration file ####
+################################################################
+
+
+### TRex configuration:
+# hostname - can be DNS name or IP for the TRex machine for ssh to the box
+# password - root password for TRex machine
+# is_dual - should the TRex inject with -p ?
+# version_path - path to the TRex version and executable
+# cores - how many cores should be used
+# latency - rate of latency packets injected by the TRex
+# modes - list of modes (tagging) of this setup (loopback etc.)
+# * loopback - Trex works via loopback. Router and TFTP configurations may be skipped.
+# * VM - Virtual OS (accept low CPU utilization in tests, latency can get spikes)
+# * virt_nics - NICs are virtual (VMXNET3 etc.)
+
+### Router configuration:
+# hostname - the router hostname as apears in ______# cli prefix
+# ip_address - the router's ip that can be used to communicate with
+# image - the desired imaged wished to be loaded as the router's running config
+# line_password - router password when access via Telent
+# en_password - router password when changing to "enable" mode
+# interfaces - an array of client-server pairs, representing the interfaces configurations of the router
+# configurations - an array of configurations that could possibly loaded into the router during the test.
+# The "clean" configuration is a mandatory configuration the router will load with to run the basic test bench
+
+### TFTP configuration:
+# hostname - the tftp hostname
+# ip_address - the tftp's ip address
+# images_path - the tftp's relative path in which the router's images are located
+
+### Test_misc configuration:
+# expected_bw - the "golden" bandwidth (in Gbps) results planned on receiving from the test
+
+trex:
+ hostname : csi-trex-07
+ cores : 4
+
+router:
+ model : ASR1001x
+ hostname : csi-asr-01
+ ip_address : 10.56.216.120
+ image : asr1001x-universalk9.03.13.02.S.154-3.S2-ext.SPA.bin
+ line_password : cisco
+ en_password : cisco
+ mgmt_interface : GigabitEthernet0
+ clean_config : clean_config.cfg
+ intf_masking : 255.255.255.0
+ ipv6_mask : 64
+ interfaces :
+ - client :
+ name : Te0/0/0
+ src_mac_addr : 0000.0001.0002
+ dest_mac_addr : 0000.0001.0001
+ server :
+ name : Te0/0/1
+ src_mac_addr : 0000.0002.0002
+ dest_mac_addr : 0000.0002.0001
+ vrf_name : null
+
+tftp:
+ hostname : ats-asr-srv-1
+ ip_address : 10.56.217.7
+ root_dir : /scratch/tftp/
+ images_path : /asr1001x/
diff --git a/scripts/automation/regression/setups/trex08/benchmark.yaml b/scripts/automation/regression/setups/trex08/benchmark.yaml
new file mode 100644
index 00000000..8f83e8f9
--- /dev/null
+++ b/scripts/automation/regression/setups/trex08/benchmark.yaml
@@ -0,0 +1,181 @@
+###############################################################
+#### TRex benchmark configuration file ####
+###############################################################
+
+### stateful ###
+
+test_jumbo:
+ multiplier : 150
+ cores : 2
+ bw_per_core : 962.464
+
+
+test_routing_imix:
+ multiplier : 80
+ cores : 4
+ bw_per_core : 55.130
+
+
+test_routing_imix_64:
+ multiplier : 8000
+ cores : 7
+ bw_per_core : 11.699
+
+
+test_static_routing_imix_asymmetric:
+ multiplier : 70
+ cores : 3
+ bw_per_core : 50.561
+
+
+test_ipv6_simple:
+ multiplier : 80
+ cores : 7
+ bw_per_core : 25.948
+
+
+test_rx_check_http: &rx_http
+ multiplier : 99000
+ cores : 3
+ rx_sample_rate : 128
+ bw_per_core : 49.464
+
+test_rx_check_http_ipv6:
+ << : *rx_http
+ bw_per_core : 49.237
+
+test_rx_check_sfr: &rx_sfr
+ multiplier : 80
+ cores : 7
+ rx_sample_rate : 128
+ bw_per_core : 20.871
+
+test_rx_check_sfr_ipv6:
+ << : *rx_sfr
+ bw_per_core : 19.198
+
+
+### stateless ###
+
+test_CPU_benchmark:
+ profiles:
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 64}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 64, stream_count: 10}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 64, stream_count: 100}
+ cpu_util : 1
+ bw_per_core : 1
+
+# causes queue full
+# - name : stl/udp_for_benchmarks.py
+# kwargs : {packet_len: 64, stream_count: 1000}
+# cpu_util : 1
+# bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 128}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 256}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 512}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 1500}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 4000}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 9000}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 9000, stream_count: 10}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 9000, stream_count: 100}
+ cpu_util : 1
+ bw_per_core : 1
+
+# not enough memory + queue full if memory increase
+# - name : stl/udp_for_benchmarks.py
+# kwargs : {packet_len: 9000, stream_count: 1000}
+# cpu_util : 1
+# bw_per_core : 1
+
+ - name : stl/imix.py
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_1pkt_tuple_gen.py
+ kwargs : {packet_len: 64}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_1pkt_tuple_gen.py
+ kwargs : {packet_len: 128}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_1pkt_tuple_gen.py
+ kwargs : {packet_len: 256}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_1pkt_tuple_gen.py
+ kwargs : {packet_len: 512}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_1pkt_tuple_gen.py
+ kwargs : {packet_len: 1500}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_1pkt_tuple_gen.py
+ kwargs : {packet_len: 4000}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_1pkt_tuple_gen.py
+ kwargs : {packet_len: 9000}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/pcap.py
+ kwargs : {ipg_usec: 2, loop_count: 0}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_rand_len_9k.py
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/hlt/hlt_udp_rand_len_9k.py
+ cpu_util : 1
+ bw_per_core : 1
+
+
diff --git a/scripts/automation/regression/setups/trex08/config.yaml b/scripts/automation/regression/setups/trex08/config.yaml
new file mode 100644
index 00000000..affe9bc9
--- /dev/null
+++ b/scripts/automation/regression/setups/trex08/config.yaml
@@ -0,0 +1,40 @@
+################################################################
+#### TRex nightly test configuration file ####
+################################################################
+
+
+### TRex configuration:
+# hostname - can be DNS name or IP for the TRex machine for ssh to the box
+# password - root password for TRex machine
+# is_dual - should the TRex inject with -p ?
+# version_path - path to the TRex version and executable
+# cores - how many cores should be used
+# latency - rate of latency packets injected by the TRex
+# modes - list of modes (tagging) of this setup (loopback etc.)
+# * loopback - Trex works via loopback. Router and TFTP configurations may be skipped.
+# * VM - Virtual OS (accept low CPU utilization in tests, latency can get spikes)
+# * virt_nics - NICs are virtual (VMXNET3 etc.)
+
+### Router configuration:
+# hostname - the router hostname as apears in ______# cli prefix
+# ip_address - the router's ip that can be used to communicate with
+# image - the desired imaged wished to be loaded as the router's running config
+# line_password - router password when access via Telent
+# en_password - router password when changing to "enable" mode
+# interfaces - an array of client-server pairs, representing the interfaces configurations of the router
+# configurations - an array of configurations that could possibly loaded into the router during the test.
+# The "clean" configuration is a mandatory configuration the router will load with to run the basic test bench
+
+### TFTP configuration:
+# hostname - the tftp hostname
+# ip_address - the tftp's ip address
+# images_path - the tftp's relative path in which the router's images are located
+
+### Test_misc configuration:
+# expected_bw - the "golden" bandwidth (in Gbps) results planned on receiving from the test
+
+trex:
+ hostname : csi-trex-08
+ cores : 7
+ modes : ['loopback']
+
diff --git a/scripts/automation/regression/setups/trex09/benchmark.yaml b/scripts/automation/regression/setups/trex09/benchmark.yaml
new file mode 100644
index 00000000..d1f5f56c
--- /dev/null
+++ b/scripts/automation/regression/setups/trex09/benchmark.yaml
@@ -0,0 +1,234 @@
+################################################################
+#### TRex benchmark configuration file ####
+################################################################
+
+### stateful ###
+
+test_jumbo:
+ multiplier : 110
+ cores : 1
+ bw_per_core : 767.198
+
+
+test_routing_imix:
+ multiplier : 64
+ cores : 2
+ bw_per_core : 35.889
+
+
+test_routing_imix_64:
+ multiplier : 5000
+ cores : 2
+ bw_per_core : 10.672
+
+
+test_static_routing_imix_asymmetric:
+ multiplier : 32
+ cores : 1
+ bw_per_core : 52.738
+
+
+test_ipv6_simple:
+ multiplier : 64
+ cores : 3
+ bw_per_core : 22.808
+
+
+test_rx_check_http: &rx_http
+ multiplier : 90000
+ cores : 2
+ rx_sample_rate : 32
+ bw_per_core : 46.075
+
+
+test_rx_check_http_ipv6:
+ << : *rx_http
+ bw_per_core : 49.237
+
+test_rx_check_sfr: &rx_sfr
+ multiplier : 50
+ cores : 3
+ rx_sample_rate : 32
+ bw_per_core : 20.469
+
+test_rx_check_sfr_ipv6:
+ << : *rx_sfr
+ bw_per_core : 19.198
+
+
+
+### stateless ###
+
+test_CPU_benchmark:
+ profiles:
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 64}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 64, stream_count: 10}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 64, stream_count: 100}
+ cpu_util : 1
+ bw_per_core : 1
+
+# causes queue full
+# - name : stl/udp_for_benchmarks.py
+# kwargs : {packet_len: 64, stream_count: 1000}
+# cpu_util : 1
+# bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 128}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 256}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 512}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 1500}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 4000}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 9000}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 9000, stream_count: 10}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 9000, stream_count: 100}
+ cpu_util : 1
+ bw_per_core : 1
+
+# not enough memory + queue full if memory increase
+# - name : stl/udp_for_benchmarks.py
+# kwargs : {packet_len: 9000, stream_count: 1000}
+# cpu_util : 1
+# bw_per_core : 1
+
+ - name : stl/imix.py
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_1pkt_tuple_gen.py
+ kwargs : {packet_len: 64}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_1pkt_tuple_gen.py
+ kwargs : {packet_len: 128}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_1pkt_tuple_gen.py
+ kwargs : {packet_len: 256}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_1pkt_tuple_gen.py
+ kwargs : {packet_len: 512}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_1pkt_tuple_gen.py
+ kwargs : {packet_len: 1500}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_1pkt_tuple_gen.py
+ kwargs : {packet_len: 4000}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_1pkt_tuple_gen.py
+ kwargs : {packet_len: 9000}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/pcap.py
+ kwargs : {ipg_usec: 2, loop_count: 0}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_rand_len_9k.py
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/hlt/hlt_udp_rand_len_9k.py
+ cpu_util : 1
+ bw_per_core : 1
+
+
+# performance tests
+
+test_performance_vm_single_cpu:
+ cfg:
+ mult : "90%"
+ mpps_per_core_golden :
+ min: 16.2
+ max: 17.3
+
+
+test_performance_vm_single_cpu_cached:
+ cfg:
+ mult : "90%"
+ mpps_per_core_golden :
+ min: 29.5
+ max: 31.2
+
+
+
+test_performance_syn_attack_single_cpu:
+ cfg:
+ mult : "90%"
+ mpps_per_core_golden :
+ min: 12.9
+ max: 14.5
+
+test_performance_vm_multi_cpus:
+ cfg:
+ core_count : 2
+ mult : "90%"
+ mpps_per_core_golden :
+ min: 15.2
+ max: 16.3
+
+
+test_performance_vm_multi_cpus_cached:
+ cfg:
+ core_count : 2
+ mult : "90%"
+ mpps_per_core_golden :
+ min: 28.8
+ max: 29.5
+
+test_performance_syn_attack_multi_cpus:
+ cfg:
+ core_count : 2
+ mult : "90%"
+ mpps_per_core_golden :
+ min: 13.0
+ max: 13.8
+
diff --git a/scripts/automation/regression/setups/trex09/config.yaml b/scripts/automation/regression/setups/trex09/config.yaml
new file mode 100644
index 00000000..724de6e7
--- /dev/null
+++ b/scripts/automation/regression/setups/trex09/config.yaml
@@ -0,0 +1,38 @@
+################################################################
+#### TRex nightly test configuration file ####
+################################################################
+
+
+### TRex configuration:
+# hostname - can be DNS name or IP for the TRex machine for ssh to the box
+# password - root password for TRex machine
+# is_dual - should the TRex inject with -p ?
+# version_path - path to the TRex version and executable
+# cores - how many cores should be used
+# latency - rate of latency packets injected by the TRex
+# modes - list of modes (tagging) of this setup (loopback, virtual etc.)
+# * loopback - Trex works via loopback. Router and TFTP configurations may be skipped.
+# * virtual - virtual OS (accept low CPU utilization in tests)
+
+### Router configuration:
+# hostname - the router hostname as apears in ______# cli prefix
+# ip_address - the router's ip that can be used to communicate with
+# image - the desired imaged wished to be loaded as the router's running config
+# line_password - router password when access via Telent
+# en_password - router password when changing to "enable" mode
+# interfaces - an array of client-server pairs, representing the interfaces configurations of the router
+# configurations - an array of configurations that could possibly loaded into the router during the test.
+# The "clean" configuration is a mandatory configuration the router will load with to run the basic test bench
+
+### TFTP configuration:
+# hostname - the tftp hostname
+# ip_address - the tftp's ip address
+# images_path - the tftp's relative path in which the router's images are located
+
+### Test_misc configuration:
+# expected_bw - the "golden" bandwidth (in Gbps) results planned on receiving from the test
+
+trex:
+ hostname : csi-trex-09
+ cores : 2
+ modes : ['loopback']
diff --git a/scripts/automation/regression/setups/trex10/benchmark.yaml b/scripts/automation/regression/setups/trex10/benchmark.yaml
new file mode 100644
index 00000000..fb900cbb
--- /dev/null
+++ b/scripts/automation/regression/setups/trex10/benchmark.yaml
@@ -0,0 +1,60 @@
+################################################################
+#### TRex benchmark configuration file ####
+################################################################
+
+
+test_rx_check :
+ multiplier : 0.8
+ cores : 1
+ rx_sample_rate : 128
+ exp_gbps : 0.5
+ cpu_to_core_ratio : 37270000
+ exp_bw : 1
+ exp_latency : 1
+
+
+test_routing_imix_64 :
+ multiplier : 37
+ cores : 1
+ cpu_to_core_ratio : 280
+ exp_latency : 1
+
+test_routing_imix :
+ multiplier : 0.8
+ cores : 1
+ cpu_to_core_ratio : 1800
+ exp_latency : 1
+
+test_static_routing_imix :
+ stat_route_dict :
+ clients_start : 16.0.0.1
+ servers_start : 48.0.0.1
+ dual_port_mask : 1.0.0.0
+ client_destination_mask : 255.0.0.0
+ server_destination_mask : 255.0.0.0
+ multiplier : 0.8
+ cores : 1
+ cpu_to_core_ratio : 1800
+ exp_latency : 1
+
+test_static_routing_imix_asymmetric:
+ stat_route_dict :
+ clients_start : 16.0.0.1
+ servers_start : 48.0.0.1
+ dual_port_mask : 1.0.0.0
+ client_destination_mask : 255.0.0.0
+ server_destination_mask : 255.0.0.0
+ multiplier : 0.8
+ cores : 1
+ cpu_to_core_ratio : 1800
+ exp_latency : 1
+
+test_ipv6_simple :
+ multiplier : 0.5
+ cores : 1
+ cpu_to_core_ratio : 30070000
+ cpu2core_custom_dev: YES
+ cpu2core_dev : 0.07
+
+
+
diff --git a/scripts/automation/regression/setups/trex10/config.yaml b/scripts/automation/regression/setups/trex10/config.yaml
new file mode 100644
index 00000000..8b031c88
--- /dev/null
+++ b/scripts/automation/regression/setups/trex10/config.yaml
@@ -0,0 +1,38 @@
+################################################################
+#### TRex nightly test configuration file ####
+################################################################
+
+
+### TRex configuration:
+# hostname - can be DNS name or IP for the TRex machine for ssh to the box
+# password - root password for TRex machine
+# is_dual - should the TRex inject with -p ?
+# version_path - path to the TRex version and executable
+# cores - how many cores should be used
+# latency - rate of latency packets injected by the Trex
+# modes - list of modes (tagging) of this setup (loopback, virtual etc.)
+# * loopback - Trex works via loopback. Router and TFTP configurations may be skipped.
+# * virtual - virtual OS (accept low CPU utilization in tests)
+
+### Router configuration:
+# hostname - the router hostname as apears in ______# cli prefix
+# ip_address - the router's ip that can be used to communicate with
+# image - the desired imaged wished to be loaded as the router's running config
+# line_password - router password when access via Telent
+# en_password - router password when changing to "enable" mode
+# interfaces - an array of client-server pairs, representing the interfaces configurations of the router
+# configurations - an array of configurations that could possibly loaded into the router during the test.
+# The "clean" configuration is a mandatory configuration the router will load with to run the basic test bench
+
+### TFTP configuration:
+# hostname - the tftp hostname
+# ip_address - the tftp's ip address
+# images_path - the tftp's relative path in which the router's images are located
+
+### Test_misc configuration:
+# expected_bw - the "golden" bandwidth (in Gbps) results planned on receiving from the test
+
+trex:
+ hostname : csi-trex-10
+ cores : 2
+ modes : [loopback, virtual]
diff --git a/scripts/automation/regression/setups/trex11/benchmark.yaml b/scripts/automation/regression/setups/trex11/benchmark.yaml
new file mode 100644
index 00000000..b366b3fb
--- /dev/null
+++ b/scripts/automation/regression/setups/trex11/benchmark.yaml
@@ -0,0 +1,155 @@
+################################################################
+#### TRex benchmark configuration file ####
+################################################################
+
+### stateful ###
+
+test_jumbo:
+ multiplier : 2.8
+ cores : 1
+ bw_per_core : 106.652
+
+
+test_routing_imix:
+ multiplier : 0.5
+ cores : 1
+ bw_per_core : 11.577
+
+
+test_routing_imix_64:
+ multiplier : 28
+ cores : 1
+ bw_per_core : 2.030
+
+
+test_static_routing_imix_asymmetric:
+ multiplier : 0.8
+ cores : 1
+ bw_per_core : 13.742
+
+
+
+### stateless ###
+
+test_CPU_benchmark:
+ profiles:
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 64}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 64, stream_count: 10}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 64, stream_count: 100}
+ cpu_util : 1
+ bw_per_core : 1
+
+# causes queue full
+# - name : stl/udp_for_benchmarks.py
+# kwargs : {packet_len: 64, stream_count: 1000}
+# cpu_util : 1
+# bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 128}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 256}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 512}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 1500}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 4000}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 9000}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 9000, stream_count: 10}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 9000, stream_count: 100}
+ cpu_util : 1
+ bw_per_core : 1
+
+# not enough memory + queue full if memory increase
+# - name : stl/udp_for_benchmarks.py
+# kwargs : {packet_len: 9000, stream_count: 1000}
+# cpu_util : 1
+# bw_per_core : 1
+
+ - name : stl/imix.py
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_1pkt_tuple_gen.py
+ kwargs : {packet_len: 64}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_1pkt_tuple_gen.py
+ kwargs : {packet_len: 128}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_1pkt_tuple_gen.py
+ kwargs : {packet_len: 256}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_1pkt_tuple_gen.py
+ kwargs : {packet_len: 512}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_1pkt_tuple_gen.py
+ kwargs : {packet_len: 1500}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_1pkt_tuple_gen.py
+ kwargs : {packet_len: 4000}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_1pkt_tuple_gen.py
+ kwargs : {packet_len: 9000}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/pcap.py
+ kwargs : {ipg_usec: 4, loop_count: 0}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_rand_len_9k.py
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/hlt/hlt_udp_rand_len_9k.py
+ cpu_util : 1
+ bw_per_core : 1
+
+
diff --git a/scripts/automation/regression/setups/trex11/config.yaml b/scripts/automation/regression/setups/trex11/config.yaml
new file mode 100644
index 00000000..782b7542
--- /dev/null
+++ b/scripts/automation/regression/setups/trex11/config.yaml
@@ -0,0 +1,38 @@
+################################################################
+#### TRex nightly test configuration file ####
+################################################################
+
+
+### TRex configuration:
+# hostname - can be DNS name or IP for the TRex machine for ssh to the box
+# password - root password for TRex machine
+# is_dual - should the TRex inject with -p ?
+# version_path - path to the TRex version and executable
+# cores - how many cores should be used
+# latency - rate of latency packets injected by the TRex
+# modes - list of modes (tagging) of this setup (loopback, virtual etc.)
+# * loopback - Trex works via loopback. Router and TFTP configurations may be skipped.
+# * virtual - virtual OS (accept low CPU utilization in tests)
+
+### Router configuration:
+# hostname - the router hostname as apears in ______# cli prefix
+# ip_address - the router's ip that can be used to communicate with
+# image - the desired imaged wished to be loaded as the router's running config
+# line_password - router password when access via Telent
+# en_password - router password when changing to "enable" mode
+# interfaces - an array of client-server pairs, representing the interfaces configurations of the router
+# configurations - an array of configurations that could possibly loaded into the router during the test.
+# The "clean" configuration is a mandatory configuration the router will load with to run the basic test bench
+
+### TFTP configuration:
+# hostname - the tftp hostname
+# ip_address - the tftp's ip address
+# images_path - the tftp's relative path in which the router's images are located
+
+### Test_misc configuration:
+# expected_bw - the "golden" bandwidth (in Gbps) results planned on receiving from the test
+
+trex:
+ hostname : csi-trex-11
+ cores : 1
+ modes : ['loopback', 'VM', 'virt_nics']
diff --git a/scripts/automation/regression/setups/trex12/benchmark.yaml b/scripts/automation/regression/setups/trex12/benchmark.yaml
new file mode 100644
index 00000000..87bd3114
--- /dev/null
+++ b/scripts/automation/regression/setups/trex12/benchmark.yaml
@@ -0,0 +1,182 @@
+###############################################################
+#### TRex benchmark configuration file ####
+###############################################################
+
+### stateful ###
+
+test_jumbo:
+ multiplier : 14
+ cores : 1
+ bw_per_core : 689.664
+
+
+test_routing_imix:
+ multiplier : 8
+ cores : 1
+ bw_per_core : 45.422
+
+
+test_routing_imix_64:
+ multiplier : 2200
+ cores : 1
+ bw_per_core : 11.655
+
+
+test_static_routing_imix_asymmetric:
+ multiplier : 4
+ cores : 1
+ bw_per_core : 45.294
+
+
+test_ipv6_simple:
+ multiplier : 8
+ cores : 1
+ bw_per_core : 29.332
+
+
+test_rx_check_http: &rx_http
+ multiplier : 11000
+ cores : 1
+ rx_sample_rate : 16
+ bw_per_core : 47.813
+
+test_rx_check_http_ipv6:
+ << : *rx_http
+ bw_per_core : 55.607
+
+
+test_rx_check_sfr: &rx_sfr
+ multiplier : 8
+ cores : 1
+ rx_sample_rate : 16
+ bw_per_core : 24.203
+
+test_rx_check_sfr_ipv6:
+ << : *rx_sfr
+ bw_per_core : 28.867
+
+
+### stateless ###
+
+test_CPU_benchmark:
+ profiles:
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 64}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 64, stream_count: 10}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 64, stream_count: 100}
+ cpu_util : 1
+ bw_per_core : 1
+
+# causes queue full
+# - name : stl/udp_for_benchmarks.py
+# kwargs : {packet_len: 64, stream_count: 1000}
+# cpu_util : 1
+# bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 128}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 256}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 512}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 1500}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 4000}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 9000}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 9000, stream_count: 10}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 9000, stream_count: 100}
+ cpu_util : 1
+ bw_per_core : 1
+
+# not enough memory + queue full if memory increase
+# - name : stl/udp_for_benchmarks.py
+# kwargs : {packet_len: 9000, stream_count: 1000}
+# cpu_util : 1
+# bw_per_core : 1
+
+ - name : stl/imix.py
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_1pkt_tuple_gen.py
+ kwargs : {packet_len: 64}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_1pkt_tuple_gen.py
+ kwargs : {packet_len: 128}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_1pkt_tuple_gen.py
+ kwargs : {packet_len: 256}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_1pkt_tuple_gen.py
+ kwargs : {packet_len: 512}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_1pkt_tuple_gen.py
+ kwargs : {packet_len: 1500}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_1pkt_tuple_gen.py
+ kwargs : {packet_len: 4000}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_1pkt_tuple_gen.py
+ kwargs : {packet_len: 9000}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/pcap.py
+ kwargs : {ipg_usec: 2, loop_count: 0}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_rand_len_9k.py
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/hlt/hlt_udp_rand_len_9k.py
+ cpu_util : 1
+ bw_per_core : 1
+
+
diff --git a/scripts/automation/regression/setups/trex12/config.yaml b/scripts/automation/regression/setups/trex12/config.yaml
new file mode 100644
index 00000000..f8c37c6b
--- /dev/null
+++ b/scripts/automation/regression/setups/trex12/config.yaml
@@ -0,0 +1,40 @@
+################################################################
+#### TRex nightly test configuration file ####
+################################################################
+
+
+### TRex configuration:
+# hostname - can be DNS name or IP for the TRex machine for ssh to the box
+# password - root password for TRex machine
+# is_dual - should the TRex inject with -p ?
+# version_path - path to the TRex version and executable
+# cores - how many cores should be used
+# latency - rate of latency packets injected by the TRex
+# modes - list of modes (tagging) of this setup (loopback etc.)
+# * loopback - Trex works via loopback. Router and TFTP configurations may be skipped.
+# * VM - Virtual OS (accept low CPU utilization in tests, latency can get spikes)
+# * virt_nics - NICs are virtual (VMXNET3 etc.)
+
+### Router configuration:
+# hostname - the router hostname as apears in ______# cli prefix
+# ip_address - the router's ip that can be used to communicate with
+# image - the desired imaged wished to be loaded as the router's running config
+# line_password - router password when access via Telent
+# en_password - router password when changing to "enable" mode
+# interfaces - an array of client-server pairs, representing the interfaces configurations of the router
+# configurations - an array of configurations that could possibly loaded into the router during the test.
+# The "clean" configuration is a mandatory configuration the router will load with to run the basic test bench
+
+### TFTP configuration:
+# hostname - the tftp hostname
+# ip_address - the tftp's ip address
+# images_path - the tftp's relative path in which the router's images are located
+
+### Test_misc configuration:
+# expected_bw - the "golden" bandwidth (in Gbps) results planned on receiving from the test
+
+trex:
+ hostname : csi-trex-12
+ cores : 1
+ modes : ['loopback', '1G', 'VM']
+
diff --git a/scripts/automation/regression/setups/trex14/benchmark.yaml b/scripts/automation/regression/setups/trex14/benchmark.yaml
new file mode 100644
index 00000000..04f13e79
--- /dev/null
+++ b/scripts/automation/regression/setups/trex14/benchmark.yaml
@@ -0,0 +1,245 @@
+###############################################################
+#### TRex benchmark configuration file ####
+###############################################################
+
+#### common templates ###
+
+stat_route_dict: &stat_route_dict
+ clients_start : 16.0.0.1
+ servers_start : 48.0.0.1
+ dual_port_mask : 1.0.0.0
+ client_destination_mask : 255.0.0.0
+ server_destination_mask : 255.0.0.0
+
+nat_dict: &nat_dict
+ clients_net_start : 16.0.0.0
+ client_acl_wildcard_mask : 0.0.0.255
+ dual_port_mask : 1.0.0.0
+ pool_start : 200.0.0.0
+ pool_netmask : 255.255.255.0
+
+
+### stateful ###
+
+test_jumbo:
+ multiplier : 17
+ cores : 1
+ bw_per_core : 543.232
+
+
+test_routing_imix:
+ multiplier : 10
+ cores : 1
+ bw_per_core : 34.128
+
+
+test_routing_imix_64:
+ multiplier : 430
+ cores : 1
+ bw_per_core : 5.893
+
+
+test_static_routing_imix: &test_static_routing_imix
+ stat_route_dict : *stat_route_dict
+ multiplier : 8
+ cores : 1
+ bw_per_core : 34.339
+
+test_static_routing_imix_asymmetric: *test_static_routing_imix
+
+
+test_ipv6_simple:
+ multiplier : 9
+ cores : 2
+ bw_per_core : 19.064
+
+
+test_nat_simple_mode1: &test_nat_simple
+ stat_route_dict : *stat_route_dict
+ nat_dict : *nat_dict
+ multiplier : 6000
+ cores : 1
+ nat_opened : 500000
+ allow_timeout_dev : True
+ bw_per_core : 44.445
+
+test_nat_simple_mode2: *test_nat_simple
+
+test_nat_simple_mode3: *test_nat_simple
+
+test_nat_learning: *test_nat_simple
+
+
+test_nbar_simple:
+ multiplier : 7.5
+ cores : 2
+ bw_per_core : 17.174
+ nbar_classification:
+ http : 32.58
+ rtp-audio : 21.21
+ oracle_sqlnet : 11.41
+ exchange : 11.22
+ rtp : 11.2
+ citrix : 5.65
+ rtsp : 2.87
+ dns : 1.96
+ smtp : 0.57
+ pop3 : 0.37
+ ssl : 0.28
+ sctp : 0.13
+ sip : 0.09
+ unknown : 0.45
+
+
+test_rx_check_http: &rx_http
+ multiplier : 15000
+ cores : 1
+ rx_sample_rate : 16
+ bw_per_core : 39.560
+
+test_rx_check_http_ipv6:
+ << : *rx_http
+ bw_per_core : 49.237
+
+test_rx_check_http_negative_disabled:
+ << : *rx_http
+ stat_route_dict : *stat_route_dict
+ nat_dict : *nat_dict
+
+
+test_rx_check_sfr: &rx_sfr
+ multiplier : 10
+ cores : 3
+ rx_sample_rate : 16
+ bw_per_core : 16.082
+
+test_rx_check_sfr_ipv6:
+ << : *rx_sfr
+ bw_per_core : 19.198
+
+
+
+### stateless ###
+
+test_CPU_benchmark:
+ profiles:
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 64}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 64, stream_count: 10}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 64, stream_count: 100}
+ cpu_util : 1
+ bw_per_core : 1
+
+# causes queue full
+# - name : stl/udp_for_benchmarks.py
+# kwargs : {packet_len: 64, stream_count: 1000}
+# cpu_util : 1
+# bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 128}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 256}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 512}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 1500}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 4000}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 9000}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 9000, stream_count: 10}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 9000, stream_count: 100}
+ cpu_util : 1
+ bw_per_core : 1
+
+# not enough memory + queue full if memory increase
+# - name : stl/udp_for_benchmarks.py
+# kwargs : {packet_len: 9000, stream_count: 1000}
+# cpu_util : 1
+# bw_per_core : 1
+
+ - name : stl/imix.py
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_1pkt_tuple_gen.py
+ kwargs : {packet_len: 64}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_1pkt_tuple_gen.py
+ kwargs : {packet_len: 128}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_1pkt_tuple_gen.py
+ kwargs : {packet_len: 256}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_1pkt_tuple_gen.py
+ kwargs : {packet_len: 512}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_1pkt_tuple_gen.py
+ kwargs : {packet_len: 1500}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_1pkt_tuple_gen.py
+ kwargs : {packet_len: 4000}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_1pkt_tuple_gen.py
+ kwargs : {packet_len: 9000}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/pcap.py
+ kwargs : {ipg_usec: 2, loop_count: 0}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_rand_len_9k.py
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/hlt/hlt_udp_rand_len_9k.py
+ cpu_util : 1
+ bw_per_core : 1
+
+
diff --git a/scripts/automation/regression/setups/trex14/config.yaml b/scripts/automation/regression/setups/trex14/config.yaml
new file mode 100644
index 00000000..0fd6b70e
--- /dev/null
+++ b/scripts/automation/regression/setups/trex14/config.yaml
@@ -0,0 +1,67 @@
+################################################################
+#### TRex nightly test configuration file ####
+################################################################
+
+
+### TRex configuration:
+# hostname - can be DNS name or IP for the TRex machine for ssh to the box
+# password - root password for TRex machine
+# is_dual - should the TRex inject with -p ?
+# version_path - path to the TRex version and executable
+# cores - how many cores should be used
+# latency - rate of latency packets injected by the TRex
+# modes - list of modes (tagging) of this setup (loopback etc.)
+# * loopback - Trex works via loopback. Router and TFTP configurations may be skipped.
+# * VM - Virtual OS (accept low CPU utilization in tests, latency can get spikes)
+# * virt_nics - NICs are virtual (VMXNET3 etc.)
+
+### Router configuration:
+# hostname - the router hostname as apears in ______# cli prefix
+# ip_address - the router's ip that can be used to communicate with
+# image - the desired imaged wished to be loaded as the router's running config
+# line_password - router password when access via Telent
+# en_password - router password when changing to "enable" mode
+# interfaces - an array of client-server pairs, representing the interfaces configurations of the router
+# configurations - an array of configurations that could possibly loaded into the router during the test.
+# The "clean" configuration is a mandatory configuration the router will load with to run the basic test bench
+
+### TFTP configuration:
+# hostname - the tftp hostname
+# ip_address - the tftp's ip address
+# images_path - the tftp's relative path in which the router's images are located
+
+### Test_misc configuration:
+# expected_bw - the "golden" bandwidth (in Gbps) results planned on receiving from the test
+
+trex:
+ hostname : csi-trex-14
+ cores : 4
+ modes : []
+
+router:
+ model : ASR1001x
+ hostname : csi-asr-01
+ ip_address : 10.56.216.103
+ image : asr1001x-universalk9.03.17.00.S.156-1.S-std.SPA.bin
+ line_password : cisco
+ en_password : cisco
+ mgmt_interface : GigabitEthernet0
+ clean_config : /Configurations/danklei/asr1001_TRex_clean_config.cfg
+ intf_masking : 255.255.255.0
+ ipv6_mask : 64
+ interfaces :
+ - client :
+ name : Te0/0/0
+ src_mac_addr : 0000.0001.0000
+ dest_mac_addr : 0000.0001.0000
+ server :
+ name : Te0/0/1
+ src_mac_addr : 0000.0001.0000
+ dest_mac_addr : 0000.0001.0000
+ vrf_name : null
+
+tftp:
+ hostname : ats-asr-srv-1
+ ip_address : 10.56.217.7
+ root_dir : /scratch/tftp/
+ images_path : /asr1001x/
diff --git a/scripts/automation/regression/setups/trex15/benchmark.yaml b/scripts/automation/regression/setups/trex15/benchmark.yaml
new file mode 100644
index 00000000..b366b3fb
--- /dev/null
+++ b/scripts/automation/regression/setups/trex15/benchmark.yaml
@@ -0,0 +1,155 @@
+################################################################
+#### TRex benchmark configuration file ####
+################################################################
+
+### stateful ###
+
+test_jumbo:
+ multiplier : 2.8
+ cores : 1
+ bw_per_core : 106.652
+
+
+test_routing_imix:
+ multiplier : 0.5
+ cores : 1
+ bw_per_core : 11.577
+
+
+test_routing_imix_64:
+ multiplier : 28
+ cores : 1
+ bw_per_core : 2.030
+
+
+test_static_routing_imix_asymmetric:
+ multiplier : 0.8
+ cores : 1
+ bw_per_core : 13.742
+
+
+
+### stateless ###
+
+test_CPU_benchmark:
+ profiles:
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 64}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 64, stream_count: 10}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 64, stream_count: 100}
+ cpu_util : 1
+ bw_per_core : 1
+
+# causes queue full
+# - name : stl/udp_for_benchmarks.py
+# kwargs : {packet_len: 64, stream_count: 1000}
+# cpu_util : 1
+# bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 128}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 256}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 512}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 1500}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 4000}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 9000}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 9000, stream_count: 10}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 9000, stream_count: 100}
+ cpu_util : 1
+ bw_per_core : 1
+
+# not enough memory + queue full if memory increase
+# - name : stl/udp_for_benchmarks.py
+# kwargs : {packet_len: 9000, stream_count: 1000}
+# cpu_util : 1
+# bw_per_core : 1
+
+ - name : stl/imix.py
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_1pkt_tuple_gen.py
+ kwargs : {packet_len: 64}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_1pkt_tuple_gen.py
+ kwargs : {packet_len: 128}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_1pkt_tuple_gen.py
+ kwargs : {packet_len: 256}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_1pkt_tuple_gen.py
+ kwargs : {packet_len: 512}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_1pkt_tuple_gen.py
+ kwargs : {packet_len: 1500}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_1pkt_tuple_gen.py
+ kwargs : {packet_len: 4000}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_1pkt_tuple_gen.py
+ kwargs : {packet_len: 9000}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/pcap.py
+ kwargs : {ipg_usec: 4, loop_count: 0}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_rand_len_9k.py
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/hlt/hlt_udp_rand_len_9k.py
+ cpu_util : 1
+ bw_per_core : 1
+
+
diff --git a/scripts/automation/regression/setups/trex15/config.yaml b/scripts/automation/regression/setups/trex15/config.yaml
new file mode 100644
index 00000000..c5fc3b22
--- /dev/null
+++ b/scripts/automation/regression/setups/trex15/config.yaml
@@ -0,0 +1,39 @@
+################################################################
+#### TRex nightly test configuration file ####
+################################################################
+
+
+### TRex configuration:
+# hostname - can be DNS name or IP for the TRex machine for ssh to the box
+# password - root password for TRex machine
+# is_dual - should the TRex inject with -p ?
+# version_path - path to the TRex version and executable
+# cores - how many cores should be used
+# latency - rate of latency packets injected by the Trex
+# modes - list of modes (tagging) of this setup (loopback, virtual etc.)
+# * loopback - Trex works via loopback. Router and TFTP configurations may be skipped.
+# * VM - Virtual OS (accept low CPU utilization in tests, latency can get spikes)
+# * virt_nics - NICs are virtual (VMXNET3 etc. have their limitations in tests)
+
+### Router configuration:
+# hostname - the router hostname as apears in ______# cli prefix
+# ip_address - the router's ip that can be used to communicate with
+# image - the desired imaged wished to be loaded as the router's running config
+# line_password - router password when access via Telent
+# en_password - router password when changing to "enable" mode
+# interfaces - an array of client-server pairs, representing the interfaces configurations of the router
+# configurations - an array of configurations that could possibly loaded into the router during the test.
+# The "clean" configuration is a mandatory configuration the router will load with to run the basic test bench
+
+### TFTP configuration:
+# hostname - the tftp hostname
+# ip_address - the tftp's ip address
+# images_path - the tftp's relative path in which the router's images are located
+
+### Test_misc configuration:
+# expected_bw - the "golden" bandwidth (in Gbps) results planned on receiving from the test
+
+trex:
+ hostname : csi-trex-15
+ cores : 1
+ modes : [loopback, virt_nics, VM]
diff --git a/scripts/automation/regression/setups/trex17/benchmark.yaml b/scripts/automation/regression/setups/trex17/benchmark.yaml
new file mode 100644
index 00000000..8bc9d29c
--- /dev/null
+++ b/scripts/automation/regression/setups/trex17/benchmark.yaml
@@ -0,0 +1,155 @@
+################################################################
+#### TRex benchmark configuration file ####
+################################################################
+
+### stateful ###
+
+test_jumbo:
+ multiplier : 2.8
+ cores : 1
+ bw_per_core : 66.489
+
+
+test_routing_imix:
+ multiplier : 0.5
+ cores : 1
+ bw_per_core : 5.530
+
+
+test_routing_imix_64:
+ multiplier : 28
+ cores : 1
+ bw_per_core : 0.859
+
+
+test_static_routing_imix_asymmetric:
+ multiplier : 0.5
+ cores : 1
+ bw_per_core : 9.635
+
+
+
+### stateless ###
+
+test_CPU_benchmark:
+ profiles:
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 64}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 64, stream_count: 10}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 64, stream_count: 100}
+ cpu_util : 1
+ bw_per_core : 1
+
+# causes queue full
+# - name : stl/udp_for_benchmarks.py
+# kwargs : {packet_len: 64, stream_count: 1000}
+# cpu_util : 1
+# bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 128}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 256}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 512}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 1500}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 4000}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 9000}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 9000, stream_count: 10}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 9000, stream_count: 100}
+ cpu_util : 1
+ bw_per_core : 1
+
+# not enough memory + queue full if memory increase
+# - name : stl/udp_for_benchmarks.py
+# kwargs : {packet_len: 9000, stream_count: 1000}
+# cpu_util : 1
+# bw_per_core : 1
+
+ - name : stl/imix.py
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_1pkt_tuple_gen.py
+ kwargs : {packet_len: 64}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_1pkt_tuple_gen.py
+ kwargs : {packet_len: 128}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_1pkt_tuple_gen.py
+ kwargs : {packet_len: 256}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_1pkt_tuple_gen.py
+ kwargs : {packet_len: 512}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_1pkt_tuple_gen.py
+ kwargs : {packet_len: 1500}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_1pkt_tuple_gen.py
+ kwargs : {packet_len: 4000}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_1pkt_tuple_gen.py
+ kwargs : {packet_len: 9000}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/pcap.py
+ kwargs : {ipg_usec: 2, loop_count: 0}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_rand_len_9k.py
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/hlt/hlt_udp_rand_len_9k.py
+ cpu_util : 1
+ bw_per_core : 1
+
+
diff --git a/scripts/automation/regression/setups/trex17/config.yaml b/scripts/automation/regression/setups/trex17/config.yaml
new file mode 100644
index 00000000..7ad6a20a
--- /dev/null
+++ b/scripts/automation/regression/setups/trex17/config.yaml
@@ -0,0 +1,39 @@
+################################################################
+#### TRex nightly test configuration file ####
+################################################################
+
+
+### TRex configuration:
+# hostname - can be DNS name or IP for the TRex machine for ssh to the box
+# password - root password for TRex machine
+# is_dual - should the TRex inject with -p ?
+# version_path - path to the TRex version and executable
+# cores - how many cores should be used
+# latency - rate of latency packets injected by the Trex
+# modes - list of modes (tagging) of this setup (loopback, virtual etc.)
+# * loopback - Trex works via loopback. Router and TFTP configurations may be skipped.
+# * VM - Virtual OS (accept low CPU utilization in tests, latency can get spikes)
+# * virt_nics - NICs are virtual (VMXNET3 etc. have their limitations in tests)
+
+### Router configuration:
+# hostname - the router hostname as apears in ______# cli prefix
+# ip_address - the router's ip that can be used to communicate with
+# image - the desired imaged wished to be loaded as the router's running config
+# line_password - router password when access via Telent
+# en_password - router password when changing to "enable" mode
+# interfaces - an array of client-server pairs, representing the interfaces configurations of the router
+# configurations - an array of configurations that could possibly loaded into the router during the test.
+# The "clean" configuration is a mandatory configuration the router will load with to run the basic test bench
+
+### TFTP configuration:
+# hostname - the tftp hostname
+# ip_address - the tftp's ip address
+# images_path - the tftp's relative path in which the router's images are located
+
+### Test_misc configuration:
+# expected_bw - the "golden" bandwidth (in Gbps) results planned on receiving from the test
+
+trex:
+ hostname : csi-trex-17
+ cores : 1
+ modes : [loopback, virt_nics, VM]
diff --git a/scripts/automation/regression/setups/trex24/benchmark.yaml b/scripts/automation/regression/setups/trex24/benchmark.yaml
new file mode 100644
index 00000000..ddedd844
--- /dev/null
+++ b/scripts/automation/regression/setups/trex24/benchmark.yaml
@@ -0,0 +1,155 @@
+###############################################################
+#### TRex benchmark configuration file ####
+###############################################################
+
+### stateful ###
+
+test_jumbo:
+ multiplier : 2.8
+ cores : 1
+ bw_per_core : 67.030
+
+
+test_routing_imix:
+ multiplier : 1
+ cores : 1
+ bw_per_core : 3.979
+
+
+test_routing_imix_64:
+ multiplier : 50
+ cores : 1
+ bw_per_core : .681
+
+
+test_static_routing_imix_asymmetric:
+ multiplier : 0.5
+ cores : 1
+ bw_per_core : 13.742
+
+
+
+### stateless ###
+
+test_CPU_benchmark:
+ profiles:
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 64}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 64, stream_count: 10}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 64, stream_count: 100}
+ cpu_util : 1
+ bw_per_core : 1
+
+# causes queue full
+# - name : stl/udp_for_benchmarks.py
+# kwargs : {packet_len: 64, stream_count: 1000}
+# cpu_util : 1
+# bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 128}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 256}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 512}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 1500}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 4000}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 9000}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 9000, stream_count: 10}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 9000, stream_count: 100}
+ cpu_util : 1
+ bw_per_core : 1
+
+# not enough memory + queue full if memory increase
+# - name : stl/udp_for_benchmarks.py
+# kwargs : {packet_len: 9000, stream_count: 1000}
+# cpu_util : 1
+# bw_per_core : 1
+
+ - name : stl/imix.py
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_1pkt_tuple_gen.py
+ kwargs : {packet_len: 64}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_1pkt_tuple_gen.py
+ kwargs : {packet_len: 128}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_1pkt_tuple_gen.py
+ kwargs : {packet_len: 256}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_1pkt_tuple_gen.py
+ kwargs : {packet_len: 512}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_1pkt_tuple_gen.py
+ kwargs : {packet_len: 1500}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_1pkt_tuple_gen.py
+ kwargs : {packet_len: 4000}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_1pkt_tuple_gen.py
+ kwargs : {packet_len: 9000}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/pcap.py
+ kwargs : {ipg_usec: 5, loop_count: 0}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_rand_len_9k.py
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/hlt/hlt_udp_rand_len_9k.py
+ cpu_util : 1
+ bw_per_core : 1
+
+
diff --git a/scripts/automation/regression/setups/trex24/config.yaml b/scripts/automation/regression/setups/trex24/config.yaml
new file mode 100644
index 00000000..f4eecdf9
--- /dev/null
+++ b/scripts/automation/regression/setups/trex24/config.yaml
@@ -0,0 +1,39 @@
+################################################################
+#### TRex nightly test configuration file ####
+################################################################
+
+
+### TRex configuration:
+# hostname - can be DNS name or IP for the TRex machine for ssh to the box
+# is_dual - should the TRex inject with -p ?
+# version_path - path to the TRex version and executable
+# cores - how many cores should be used
+# latency - rate of latency packets injected by the TRex
+# modes - list of modes (tagging) of this setup (loopback, virtual etc.)
+# * loopback - Trex works via loopback. Router and TFTP configurations may be skipped.
+# * VM - Virtual OS (accept low CPU utilization in tests, latency can get spikes)
+# * virt_nics - NICs are virtual (VMXNET3 etc.)
+
+### Router configuration:
+# hostname - the router hostname as apears in ______# cli prefix
+# ip_address - the router's ip that can be used to communicate with
+# image - the desired imaged wished to be loaded as the router's running config
+# line_password - router password when access via Telent
+# en_password - router password when changing to "enable" mode
+# interfaces - an array of client-server pairs, representing the interfaces configurations of the router
+# configurations - an array of configurations that could possibly loaded into the router during the test.
+# The "clean" configuration is a mandatory configuration the router will load with to run the basic test bench
+
+### TFTP configuration:
+# hostname - the tftp hostname
+# ip_address - the tftp's ip address
+# images_path - the tftp's relative path in which the router's images are located
+
+### Test_misc configuration:
+# expected_bw - the "golden" bandwidth (in Gbps) results planned on receiving from the test
+
+trex:
+ hostname : csi-trex-24
+ cores : 1
+ modes : [VM, virt_nics, loopback]
+
diff --git a/scripts/automation/regression/setups/trex25/benchmark.yaml b/scripts/automation/regression/setups/trex25/benchmark.yaml
new file mode 100644
index 00000000..ccbdf6f5
--- /dev/null
+++ b/scripts/automation/regression/setups/trex25/benchmark.yaml
@@ -0,0 +1,254 @@
+###############################################################
+#### TRex benchmark configuration file ####
+###############################################################
+
+#### common templates ###
+
+stat_route_dict: &stat_route_dict
+ clients_start : 16.0.0.1
+ servers_start : 48.0.0.1
+ dual_port_mask : 1.0.0.0
+ client_destination_mask : 255.0.0.0
+ server_destination_mask : 255.0.0.0
+
+nat_dict: &nat_dict
+ clients_net_start : 16.0.0.0
+ client_acl_wildcard_mask : 0.0.0.255
+ dual_port_mask : 1.0.0.0
+ pool_start : 200.0.0.0
+ pool_netmask : 255.255.255.0
+
+
+### stateful ###
+
+test_jumbo:
+ multiplier : 6
+ cores : 1
+ bw_per_core : 443.970
+
+
+test_routing_imix:
+ multiplier : 4
+ cores : 1
+ bw_per_core : 26.509
+
+
+test_routing_imix_64:
+ multiplier : 600
+ cores : 1
+ bw_per_core : 6.391
+
+
+test_static_routing_imix:
+ stat_route_dict : *stat_route_dict
+ multiplier : 2.8
+ cores : 1
+ bw_per_core : 24.510
+
+
+
+test_static_routing_imix_asymmetric:
+ stat_route_dict : *stat_route_dict
+ multiplier : 3.2
+ cores : 1
+ bw_per_core : 28.229
+
+
+test_ipv6_simple:
+ multiplier : 6
+ cores : 1
+ bw_per_core : 19.185
+
+
+test_nat_simple_mode1: &test_nat_simple
+ stat_route_dict : *stat_route_dict
+ nat_dict : *nat_dict
+ multiplier : 2200
+ cores : 1
+ allow_timeout_dev : True
+ bw_per_core : 32.171
+
+test_nat_simple_mode2: *test_nat_simple
+
+test_nat_simple_mode3: *test_nat_simple
+
+test_nat_learning:
+ << : *test_nat_simple
+ nat_opened : 40000
+
+
+test_nbar_simple:
+ multiplier : 6
+ cores : 1
+ bw_per_core : 16.645
+ nbar_classification:
+ http : 24.55
+ rtp : 19.15
+ sqlnet : 10.38
+ secure-http : 5.11
+ citrix : 4.68
+ mapi : 4.04
+ dns : 1.56
+ sctp : 0.66
+ smtp : 0.48
+ pop3 : 0.30
+ novadigm : 0.09
+ sip : 0.08
+ h323 : 0.05
+ rtsp : 0.04
+ unknown : 28.52
+
+
+test_rx_check_http: &rx_http
+ multiplier : 8800
+ cores : 1
+ rx_sample_rate : 16
+ bw_per_core : 31.389
+
+test_rx_check_http_ipv6:
+ << : *rx_http
+ bw_per_core : 37.114
+
+test_rx_check_http_negative:
+ << : *rx_http
+ stat_route_dict : *stat_route_dict
+ nat_dict : *nat_dict
+
+
+test_rx_check_sfr: &rx_sfr
+ multiplier : 6.8
+ cores : 1
+ rx_sample_rate : 16
+ bw_per_core : 16.063
+
+test_rx_check_sfr_ipv6:
+ << : *rx_sfr
+ bw_per_core : 19.663
+
+
+### stateless ###
+
+test_CPU_benchmark:
+ profiles:
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 64}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 64, stream_count: 10}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 64, stream_count: 100}
+ cpu_util : 1
+ bw_per_core : 1
+
+# causes queue full
+# - name : stl/udp_for_benchmarks.py
+# kwargs : {packet_len: 64, stream_count: 1000}
+# cpu_util : 1
+# bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 128}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 256}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 512}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 1500}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 4000}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 9000}
+ cpu_util : 1
+ bw_per_core : 1
+
+# problem stabilizing CPU utilization at this setup
+# - name : stl/udp_for_benchmarks.py
+# kwargs : {packet_len: 9000, stream_count: 10}
+# cpu_util : 1
+# bw_per_core : 1
+
+# problem stabilizing CPU utilization at this setup
+# - name : stl/udp_for_benchmarks.py
+# kwargs : {packet_len: 9000, stream_count: 100}
+# cpu_util : 1
+# bw_per_core : 1
+
+# not enough memory + queue full if memory increase
+# - name : stl/udp_for_benchmarks.py
+# kwargs : {packet_len: 9000, stream_count: 1000}
+# cpu_util : 1
+# bw_per_core : 1
+
+ - name : stl/imix.py
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_1pkt_tuple_gen.py
+ kwargs : {packet_len: 64}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_1pkt_tuple_gen.py
+ kwargs : {packet_len: 128}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_1pkt_tuple_gen.py
+ kwargs : {packet_len: 256}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_1pkt_tuple_gen.py
+ kwargs : {packet_len: 512}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_1pkt_tuple_gen.py
+ kwargs : {packet_len: 1500}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_1pkt_tuple_gen.py
+ kwargs : {packet_len: 4000}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_1pkt_tuple_gen.py
+ kwargs : {packet_len: 9000}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/pcap.py
+ kwargs : {ipg_usec: 2, loop_count: 0}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_rand_len_9k.py
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/hlt/hlt_udp_rand_len_9k.py
+ cpu_util : 1
+ bw_per_core : 1
+
+
diff --git a/scripts/automation/regression/setups/trex25/config.yaml b/scripts/automation/regression/setups/trex25/config.yaml
new file mode 100644
index 00000000..c8190636
--- /dev/null
+++ b/scripts/automation/regression/setups/trex25/config.yaml
@@ -0,0 +1,93 @@
+################################################################
+#### TRex nightly test configuration file ####
+################################################################
+
+
+### TRex configuration:
+# hostname - can be DNS name or IP for the TRex machine for ssh to the box
+# is_dual - should the TRex inject with -p ?
+# version_path - path to the TRex version and executable
+# cores - how many cores should be used
+# latency - rate of latency packets injected by the TRex
+# modes - list of modes (tagging) of this setup (loopback, virtual etc.)
+# * loopback - Trex works via loopback. Router and TFTP configurations may be skipped.
+# * VM - Virtual OS (accept low CPU utilization in tests, latency can get spikes)
+# * virt_nics - NICs are virtual (VMXNET3 etc.)
+
+### Router configuration:
+# hostname - the router hostname as apears in ______# cli prefix
+# ip_address - the router's ip that can be used to communicate with
+# image - the desired imaged wished to be loaded as the router's running config
+# line_password - router password when access via Telent
+# en_password - router password when changing to "enable" mode
+# interfaces - an array of client-server pairs, representing the interfaces configurations of the router
+# configurations - an array of configurations that could possibly loaded into the router during the test.
+# The "clean" configuration is a mandatory configuration the router will load with to run the basic test bench
+
+### TFTP configuration:
+# hostname - the tftp hostname
+# ip_address - the tftp's ip address
+# images_path - the tftp's relative path in which the router's images are located
+
+### Test_misc configuration:
+# expected_bw - the "golden" bandwidth (in Gbps) results planned on receiving from the test
+
+trex:
+ hostname : csi-trex-25
+ cores : 2
+ modes : ['1G']
+
+router:
+ model : ASR1004(RP2)
+ hostname : csi-mcp-asr1k-4ru-12
+ ip_address : 10.56.217.181
+ image : asr1000rp2-adventerprisek9.BLD_V151_1_S_XE32_THROTTLE_LATEST_20100926_034325_2.bin
+ line_password : cisco
+ en_password : cisco
+ mgmt_interface : GigabitEthernet0/0/0
+ clean_config : clean_config.cfg
+ intf_masking : 255.255.255.0
+ ipv6_mask : 64
+ interfaces :
+ - client :
+ name : GigabitEthernet0/1/0
+ src_mac_addr : 0000.0001.0000
+ dest_ipv6_mac_addr : a036.9f4d.6a3c
+ server :
+ name : GigabitEthernet0/1/1
+ src_mac_addr : 0000.0001.0000
+ dest_ipv6_mac_addr : a036.9f4d.6a3d
+ vrf_name :
+ - client :
+ name : GigabitEthernet0/1/2
+ src_mac_addr : 0000.0001.0000
+ dest_ipv6_mac_addr : a036.9f4d.6a3e
+ server :
+ name : GigabitEthernet0/1/4
+ src_mac_addr : 0000.0001.0000
+ dest_ipv6_mac_addr : a036.9f4d.6a3f
+ vrf_name :
+ - client :
+ name : GigabitEthernet0/1/5
+ src_mac_addr : 0000.0001.0000
+ dest_ipv6_mac_addr : a036.9f4d.6b78
+ server :
+ name : GigabitEthernet0/1/3
+ src_mac_addr : 0000.0001.0000
+ dest_ipv6_mac_addr : a036.9f4d.6b79
+ vrf_name :
+ - client :
+ name : GigabitEthernet0/1/6
+ src_mac_addr : 0000.0001.0000
+ dest_ipv6_mac_addr : a036.9f4d.6b7a
+ server :
+ name : GigabitEthernet0/1/7
+ src_mac_addr : 0000.0001.0000
+ dest_ipv6_mac_addr : a036.9f4d.6b7b
+ vrf_name :
+
+tftp:
+ hostname : ats-asr-srv-1
+ ip_address : 10.56.128.23
+ root_dir : /auto/avc-devtest/
+ images_path : /images/1RU/
diff --git a/scripts/automation/regression/sshpass.exp b/scripts/automation/regression/sshpass.exp
new file mode 100755
index 00000000..2262290f
--- /dev/null
+++ b/scripts/automation/regression/sshpass.exp
@@ -0,0 +1,17 @@
+#!/usr/cisco/bin/expect -f
+# sample command: ./ssh.exp password 192.168.1.11 id *
+set pass [lrange $argv 0 0]
+set server [lrange $argv 1 1]
+set name [lrange $argv 2 2]
+set cmd [lrange $argv 3 10]
+
+set cmd_str [join $cmd]
+
+spawn ssh -t $name@$server $cmd_str
+match_max 100000
+expect "*?assword:*"
+send -- "$pass\r"
+send -- "\r"
+expect eof
+wait
+#interact
diff --git a/scripts/automation/regression/stateful_tests/__init__.py b/scripts/automation/regression/stateful_tests/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/scripts/automation/regression/stateful_tests/__init__.py
diff --git a/scripts/automation/regression/stateful_tests/tests_exceptions.py b/scripts/automation/regression/stateful_tests/tests_exceptions.py
new file mode 100755
index 00000000..360f44a5
--- /dev/null
+++ b/scripts/automation/regression/stateful_tests/tests_exceptions.py
@@ -0,0 +1,37 @@
+#!/router/bin/python
+
+class TRexInUseError(Exception):
+ def __init__(self, value):
+ self.value = value
+ def __str__(self):
+ return repr(self.value)
+
+class TRexRunFailedError(Exception):
+ def __init__(self, value):
+ self.value = value
+ def __str__(self):
+ return repr(self.value)
+
+class TRexIncompleteRunError(Exception):
+ def __init__(self, value):
+ self.value = value
+ def __str__(self):
+ return repr(self.value)
+
+class TRexLowCpuUtilError(Exception):
+ def __init__(self, value):
+ self.value = value
+ def __str__(self):
+ return repr(self.value)
+
+class AbnormalResultError(Exception):
+ def __init__(self, value):
+ self.value = value
+ def __str__(self):
+ return repr(self.value)
+
+class ClassificationMissmatchError(Exception):
+ def __init__(self, value):
+ self.value = value
+ def __str__(self):
+ return repr(self.value)
diff --git a/scripts/automation/regression/stateful_tests/trex_client_pkg_test.py b/scripts/automation/regression/stateful_tests/trex_client_pkg_test.py
new file mode 100755
index 00000000..892be966
--- /dev/null
+++ b/scripts/automation/regression/stateful_tests/trex_client_pkg_test.py
@@ -0,0 +1,34 @@
+#!/router/bin/python
+from .trex_general_test import CTRexGeneral_Test, CTRexScenario
+from misc_methods import run_command
+from nose.plugins.attrib import attr
+
+
+@attr('client_package')
+class CTRexClientPKG_Test(CTRexGeneral_Test):
+ """This class tests TRex client package"""
+
+ def setUp(self):
+ CTRexGeneral_Test.setUp(self)
+ if not self.is_loopback:
+ self.router.configure_basic_interfaces()
+ self.router.config_pbr(mode = 'config')
+ self.unzip_client_package()
+
+ def run_client_package_stf_example(self, python_version):
+ commands = [
+ 'cd %s' % CTRexScenario.scripts_path,
+ 'source find_python.sh --%s' % python_version,
+ 'which $PYTHON',
+ 'cd trex_client/stf/examples',
+ '$PYTHON stf_example.py -s %s' % self.configuration.trex['trex_name'],
+ ]
+ return_code, _, stderr = run_command("bash -ce '%s'" % '; '.join(commands))
+ if return_code:
+ self.fail('Error in running stf_example using %s: %s' % (python_version, stderr))
+
+ def test_client_python2(self):
+ self.run_client_package_stf_example(python_version = 'python2')
+
+ def test_client_python3(self):
+ self.run_client_package_stf_example(python_version = 'python3')
diff --git a/scripts/automation/regression/stateful_tests/trex_general_test.py b/scripts/automation/regression/stateful_tests/trex_general_test.py
new file mode 100755
index 00000000..e968d380
--- /dev/null
+++ b/scripts/automation/regression/stateful_tests/trex_general_test.py
@@ -0,0 +1,363 @@
+#!/router/bin/python
+
+__copyright__ = "Copyright 2014"
+
+"""
+Name:
+ trex_general_test.py
+
+
+Description:
+
+ This script creates the functionality to test the performance of the TRex traffic generator
+ The tested scenario is a TRex TG directly connected to a Cisco router.
+
+::
+
+ Topology:
+
+ ------- --------
+ | | Tx---1gig/10gig----Rx | |
+ | TRex | | router |
+ | | Rx---1gig/10gig----Tx | |
+ ------- --------
+
+"""
+from nose.plugins import Plugin
+from nose.plugins.skip import SkipTest
+import trex
+from trex import CTRexScenario
+import misc_methods
+import sys
+import os
+# from CPlatformUnderTest import *
+from CPlatform import *
+import termstyle
+import threading
+from .tests_exceptions import *
+from platform_cmd_link import *
+import unittest
+from glob import glob
+
+def setUpModule(module):
+ pass
+
+def tearDownModule(module):
+ pass
+
+class CTRexGeneral_Test(unittest.TestCase):
+ """This class defines the general stateful testcase of the TRex traffic generator"""
+ def __init__ (self, *args, **kwargs):
+ sys.stdout.flush()
+ unittest.TestCase.__init__(self, *args, **kwargs)
+ if CTRexScenario.is_test_list:
+ return
+ # Point test object to scenario global object
+ self.configuration = CTRexScenario.configuration
+ self.benchmark = CTRexScenario.benchmark
+ self.trex = CTRexScenario.trex
+ self.stl_trex = CTRexScenario.stl_trex
+ self.trex_crashed = CTRexScenario.trex_crashed
+ self.modes = CTRexScenario.modes
+ self.GAManager = CTRexScenario.GAManager
+ self.no_daemon = CTRexScenario.no_daemon
+ self.skipping = False
+ self.fail_reasons = []
+ if not hasattr(self, 'unsupported_modes'):
+ self.unsupported_modes = []
+ self.is_loopback = True if 'loopback' in self.modes else False
+ self.is_virt_nics = True if 'virt_nics' in self.modes else False
+ self.is_VM = True if 'VM' in self.modes else False
+
+ if not CTRexScenario.is_init:
+ if self.trex and not self.no_daemon: # stateful
+ CTRexScenario.trex_version = self.trex.get_trex_version()
+ if not self.is_loopback:
+ # initilize the scenario based on received configuration, once per entire testing session
+ CTRexScenario.router = CPlatform(CTRexScenario.router_cfg['silent_mode'])
+ device_cfg = CDeviceCfg()
+ device_cfg.set_platform_config(CTRexScenario.router_cfg['config_dict'])
+ device_cfg.set_tftp_config(CTRexScenario.router_cfg['tftp_config_dict'])
+ CTRexScenario.router.load_platform_data_from_file(device_cfg)
+ CTRexScenario.router.launch_connection(device_cfg)
+ if CTRexScenario.router_cfg['forceImageReload']:
+ running_image = CTRexScenario.router.get_running_image_details()['image']
+ print('Current router image: %s' % running_image)
+ needed_image = device_cfg.get_image_name()
+ if not CTRexScenario.router.is_image_matches(needed_image):
+ print('Setting router image: %s' % needed_image)
+ CTRexScenario.router.config_tftp_server(device_cfg)
+ CTRexScenario.router.load_platform_image(needed_image)
+ CTRexScenario.router.set_boot_image(needed_image)
+ CTRexScenario.router.reload_platform(device_cfg)
+ CTRexScenario.router.launch_connection(device_cfg)
+ running_image = CTRexScenario.router.get_running_image_details()['image'] # verify image
+ if not CTRexScenario.router.is_image_matches(needed_image):
+ self.fail('Unable to set router image: %s, current image is: %s' % (needed_image, running_image))
+ else:
+ print('Matches needed image: %s' % needed_image)
+ CTRexScenario.router_image = running_image
+
+ if self.modes:
+ print(termstyle.green('\t!!!\tRunning with modes: %s, not suitable tests will be skipped.\t!!!' % list(self.modes)))
+
+ CTRexScenario.is_init = True
+ print(termstyle.green("Done instantiating TRex scenario!\n"))
+
+# raise RuntimeError('CTRexScenario class is not initialized!')
+ self.router = CTRexScenario.router
+
+
+
+# def assert_dict_eq (self, dict, key, val, error=''):
+# v1 = int(dict[key]))
+# self.assertEqual(v1, int(val), error)
+#
+# def assert_dict_gt (self, d, key, val, error=''):
+# v1 = int(dict[key])
+# self.assert_gt(v1, int(val), error)
+
+ def assertEqual(self, v1, v2, s):
+ if v1 != v2:
+ error='ERROR '+str(v1)+' != '+str(v2)+ ' '+s;
+ self.fail(error)
+
+ def assert_gt(self, v1, v2, s):
+ if not v1 > v2:
+ error='ERROR {big} < {small} {str}'.format(big = v1, small = v2, str = s)
+ self.fail(error)
+
+ def check_results_eq (self,res,name,val):
+ if res is None:
+ self.fail('TRex results cannot be None !')
+ return
+
+ if name not in res:
+ self.fail('TRex results does not include key %s' % name)
+ return
+
+ if res[name] != float(val):
+ self.fail('TRex results[%s]==%f and not as expected %f ' % (name, res[name], val))
+
+ def check_CPU_benchmark (self, trex_res, err = 25, minimal_cpu = 10, maximal_cpu = 85):
+ cpu_util = trex_res.get_avg_steady_state_value('trex-global.data.m_cpu_util_raw')
+ trex_tx_bps = trex_res.get_avg_steady_state_value('trex-global.data.m_tx_bps')
+ expected_norm_cpu = self.get_benchmark_param('bw_per_core')
+ cores = self.get_benchmark_param('cores')
+ ports_count = trex_res.get_ports_count()
+ if not (cpu_util and ports_count and cores):
+ print("Can't calculate CPU benchmark, need to divide by zero: cpu util: %s, ports: %s, cores: %s" % (cpu_util, ports_count, cores))
+ test_norm_cpu = -1
+ else:
+ test_norm_cpu = trex_tx_bps / (cpu_util * ports_count * cores * 2.5e6)
+
+ if '1G' in self.modes:
+ minimal_cpu /= 10.0
+
+ if not self.is_virt_nics:
+ if cpu_util > maximal_cpu:
+ self.fail("CPU is too high (%s%%), probably queue full." % cpu_util )
+ #if cpu_util < minimal_cpu:
+ # self.fail("CPU is too low (%s%%), can't verify performance in such low CPU%%." % cpu_util )
+
+ print("TRex CPU utilization: %g%%, norm_cpu is : %g Gb/core" % (round(cpu_util, 2), round(test_norm_cpu, 2)))
+ if test_norm_cpu < 0:
+ return
+
+ if not expected_norm_cpu:
+ expected_norm_cpu = 1
+
+ calc_error_precent = abs(100.0 * test_norm_cpu / expected_norm_cpu - 100)
+ print('Err percent: %s' % calc_error_precent)
+ #if calc_error_precent > err and cpu_util > 10:
+ # self.fail('Excepted bw_per_core ratio: %s, got: %g' % (expected_norm_cpu, round(test_norm_cpu)))
+
+ # report benchmarks
+ if self.GAManager:
+ try:
+ pass
+ #setup_test = '%s.%s' % (CTRexScenario.setup_name, self.get_name())
+ #self.GAManager.gaAddAction(Event = 'stateful_test', action = setup_test, label = 'bw_per_core', value = int(test_norm_cpu))
+ #self.GAManager.gaAddAction(Event = 'stateful_test', action = setup_test, label = 'bw_per_core_exp', value = int(expected_norm_cpu))
+ #self.GAManager.emptyAndReportQ()
+ except Exception as e:
+ print('Sending GA failed: %s' % e)
+
+ def check_results_gt (self, res, name, val):
+ if res is None:
+ self.fail('TRex results canot be None !')
+ return
+
+ if name not in res:
+ self.fail('TRex results does not include key %s' % name)
+ return
+
+ if res[name]< float(val):
+ self.fail('TRex results[%s]<%f and not as expected greater than %f ' % (name, res[name], val))
+
+ def check_for_trex_crash(self):
+ pass
+
+ def get_benchmark_param (self, param, sub_param = None, test_name = None):
+ if not test_name:
+ test_name = self.get_name()
+ if test_name not in self.benchmark:
+ self.skip('No data in benchmark.yaml for test: %s, param: %s. Skipping.' % (test_name, param))
+ if sub_param:
+ return self.benchmark[test_name][param].get(sub_param)
+ else:
+ return self.benchmark[test_name].get(param)
+
+ def check_general_scenario_results (self, trex_res, check_latency = True):
+
+ try:
+ # check if test is valid
+ if not trex_res.is_done_warmup():
+ self.fail('TRex did not reach warm-up situtaion. Results are not valid.')
+
+ # check history size is enough
+ if len(trex_res._history) < 5:
+ self.fail('TRex results list is too short. Increase the test duration or check unexpected stopping.')
+
+ # check TRex number of drops
+ trex_tx_pckt = trex_res.get_last_value("trex-global.data.m_total_tx_pkts")
+ trex_drops = trex_res.get_total_drops()
+ trex_drop_rate = trex_res.get_drop_rate()
+ if ( trex_drops > 0.001 * trex_tx_pckt) and (trex_drop_rate > 0.0): # deliberately mask kickoff drops when TRex first initiated
+ self.fail('Number of packet drops larger than 0.1% of all traffic')
+
+ # check queue full, queue drop, allocation error
+ m_total_alloc_error = trex_res.get_last_value("trex-global.data.m_total_alloc_error")
+ m_total_queue_full = trex_res.get_last_value("trex-global.data.m_total_queue_full")
+ m_total_queue_drop = trex_res.get_last_value("trex-global.data.m_total_queue_drop")
+ self.assert_gt(1000, m_total_alloc_error, 'Got allocation errors. (%s), please review multiplier and templates configuration.' % m_total_alloc_error)
+ self.assert_gt(1000, m_total_queue_drop, 'Too much queue_drop (%s), please review multiplier.' % m_total_queue_drop)
+
+ if self.is_VM:
+ allowed_queue_full = 10000 + trex_tx_pckt / 100
+ else:
+ allowed_queue_full = 1000 + trex_tx_pckt / 1000
+ self.assert_gt(allowed_queue_full, m_total_queue_full, 'Too much queue_full (%s), please review multiplier.' % m_total_queue_full)
+
+ # # check TRex expected counters
+ #trex_exp_rate = trex_res.get_expected_tx_rate().get('m_tx_expected_bps')
+ #assert trex_exp_rate is not None
+ #trex_exp_gbps = trex_exp_rate/(10**9)
+
+ if check_latency:
+ # check that max latency does not exceed 1 msec
+ if self.configuration.trex['trex_name'] == '10.56.217.210': # temporary workaround for latency issue in kiwi02, remove it ASAP. http://trex-tgn.cisco.com/youtrack/issue/trex-194
+ allowed_latency = 8000
+ elif self.is_VM:
+ allowed_latency = 9999999
+ else: # no excuses, check 1ms
+ allowed_latency = 1000
+ if max(trex_res.get_max_latency().values()) > allowed_latency:
+ self.fail('LatencyError: Maximal latency exceeds %s (usec)' % allowed_latency)
+
+ # check that avg latency does not exceed 1 msec
+ if self.is_VM:
+ allowed_latency = 9999999
+ else: # no excuses, check 1ms
+ allowed_latency = 1000
+ if max(trex_res.get_avg_latency().values()) > allowed_latency:
+ self.fail('LatencyError: Average latency exceeds %s (usec)' % allowed_latency)
+
+ if not self.is_loopback:
+ # check router number of drops --> deliberately masked- need to be figured out!!!!!
+ pkt_drop_stats = self.router.get_drop_stats()
+# assert pkt_drop_stats['total_drops'] < 20
+
+ # check for trex-router packet consistency
+ # TODO: check if it's ok
+ print('router drop stats: %s' % pkt_drop_stats)
+ print('TRex drop stats: %s' % trex_drops)
+ #self.assertEqual(pkt_drop_stats, trex_drops, "TRex's and router's drop stats don't match.")
+
+ except KeyError as e:
+ self.fail(e)
+ #assert False
+
+ # except AssertionError as e:
+ # e.args += ('TRex has crashed!')
+ # raise
+
+ @staticmethod
+ def unzip_client_package():
+ client_pkg_files = glob('%s/trex_client*.tar.gz' % CTRexScenario.scripts_path)
+ if not len(client_pkg_files):
+ raise Exception('Could not find client package')
+ if len(client_pkg_files) > 1:
+ raise Exception('Found more than one client packages')
+ if not os.path.exists('%s/trex_client' % CTRexScenario.scripts_path):
+ print('\nUnzipping package')
+ return_code, _, stderr = misc_methods.run_command("tar -xzf %s -C %s" % (client_pkg_files[0], CTRexScenario.scripts_path))
+ if return_code:
+ raise Exception('Could not untar the client package: %s' % stderr)
+ else:
+ print('\nClient package is untarred')
+
+ # We encountered error, don't fail the test immediately
+ def fail(self, reason = 'Unknown error'):
+ print('Error: %s' % reason)
+ self.fail_reasons.append(reason)
+
+ # skip running of the test, counts as 'passed' but prints 'skipped'
+ def skip(self, message = 'Unknown reason'):
+ print('Skip: %s' % message)
+ self.skipping = True
+ raise SkipTest(message)
+
+ # get name of currently running test
+ def get_name(self):
+ return self._testMethodName
+
+ def setUp(self):
+ test_setup_modes_conflict = self.modes & set(self.unsupported_modes)
+ if test_setup_modes_conflict:
+ self.skip("The test can't run with following modes of given setup: %s " % test_setup_modes_conflict)
+ if not self.stl_trex and not self.trex.is_idle():
+ print('Warning: TRex is not idle at setUp, trying to stop it.')
+ self.trex.force_kill(confirm = False)
+ if not self.is_loopback:
+ print('')
+ if not self.stl_trex: # stateful
+ self.router.load_clean_config()
+ self.router.clear_counters()
+ self.router.clear_packet_drop_stats()
+
+ ########################################################################
+ #### DO NOT ADD TESTS TO THIS FILE ####
+ #### Added tests here will held once for EVERY test sub-class ####
+ ########################################################################
+
+ # masked example to such test. uncomment to watch how it affects #
+# def test_isInitialized(self):
+# assert CTRexScenario.is_init == True
+ def tearDown(self):
+ if not self.stl_trex and not self.trex.is_idle():
+ print('Warning: TRex is not idle at tearDown, trying to stop it.')
+ self.trex.force_kill(confirm = False)
+ if not self.skipping:
+ # print server logs of test run
+ if self.trex and CTRexScenario.server_logs and not self.no_daemon:
+ try:
+ print(termstyle.green('\n>>>>>>>>>>>>>>> Daemon log <<<<<<<<<<<<<<<'))
+ daemon_log = self.trex.get_trex_daemon_log()
+ log_size = len(daemon_log)
+ print(''.join(daemon_log[CTRexScenario.daemon_log_lines:]))
+ CTRexScenario.daemon_log_lines = log_size
+ except Exception as e:
+ print("Can't get TRex daemon log:", e)
+ try:
+ print(termstyle.green('>>>>>>>>>>>>>>>> Trex log <<<<<<<<<<<<<<<<'))
+ print(''.join(self.trex.get_trex_log()))
+ except Exception as e:
+ print("Can't get TRex log:", e)
+ if len(self.fail_reasons):
+ sys.stdout.flush()
+ raise Exception('The test is failed, reasons:\n%s' % '\n'.join(self.fail_reasons))
+ sys.stdout.flush()
+
+ def check_for_trex_crash(self):
+ pass
diff --git a/scripts/automation/regression/stateful_tests/trex_imix_test.py b/scripts/automation/regression/stateful_tests/trex_imix_test.py
new file mode 100755
index 00000000..f8fe0ed1
--- /dev/null
+++ b/scripts/automation/regression/stateful_tests/trex_imix_test.py
@@ -0,0 +1,213 @@
+#!/router/bin/python
+from .trex_general_test import CTRexGeneral_Test
+from CPlatform import CStaticRouteConfig
+from .tests_exceptions import *
+#import sys
+import time
+from nose.tools import nottest
+
+class CTRexIMIX_Test(CTRexGeneral_Test):
+ """This class defines the IMIX testcase of the TRex traffic generator"""
+ def __init__(self, *args, **kwargs):
+ # super(CTRexIMIX_Test, self).__init__()
+ CTRexGeneral_Test.__init__(self, *args, **kwargs)
+
+ def setUp(self):
+ super(CTRexIMIX_Test, self).setUp() # launch super test class setUp process
+ # CTRexGeneral_Test.setUp(self) # launch super test class setUp process
+ # self.router.clear_counters()
+ pass
+
+ def test_routing_imix_64(self):
+ # test initializtion
+ if not self.is_loopback:
+ self.router.configure_basic_interfaces()
+ self.router.config_pbr(mode = "config")
+
+# self.trex.set_yaml_file('cap2/imix_64.yaml')
+ mult = self.get_benchmark_param('multiplier')
+ core = self.get_benchmark_param('cores')
+
+# trex_res = self.trex.run(multiplier = mult, cores = core, duration = 30, l = 1000, p = True)
+ ret = self.trex.start_trex(
+ c = core,
+ m = mult,
+ p = True,
+ nc = True,
+ d = 30,
+ f = 'cap2/imix_64.yaml',
+ l = 1000)
+
+ trex_res = self.trex.sample_to_run_finish()
+
+ # trex_res is a CTRexResult instance- and contains the summary of the test results
+ # you may see all the results keys by simply calling here for 'print trex_res.result'
+ print("\nLATEST RESULT OBJECT:")
+ print(trex_res)
+
+ self.check_general_scenario_results(trex_res)
+ self.check_CPU_benchmark(trex_res)
+
+ # the name intentionally not matches nose default pattern, including the test should be specified explicitly
+ def dummy(self):
+ ret = self.trex.start_trex(
+ c = 1,
+ m = 1,
+ p = True,
+ nc = True,
+ d = 5,
+ f = 'cap2/imix_fast_1g.yaml',
+ l = 1000,
+ trex_development = True)
+
+ trex_res = self.trex.sample_to_run_finish()
+ print(trex_res)
+
+ def test_routing_imix (self):
+ # test initializtion
+ if not self.is_loopback:
+ self.router.configure_basic_interfaces()
+ self.router.config_pbr(mode = "config")
+
+# self.trex.set_yaml_file('cap2/imix_fast_1g.yaml')
+ mult = self.get_benchmark_param('multiplier')
+ core = self.get_benchmark_param('cores')
+
+ ret = self.trex.start_trex(
+ c = core,
+ m = mult,
+ p = True,
+ nc = True,
+ d = 60,
+ f = 'cap2/imix_fast_1g.yaml',
+ l = 1000)
+
+ trex_res = self.trex.sample_to_run_finish()
+
+ # trex_res is a CTRexResult instance- and contains the summary of the test results
+ # you may see all the results keys by simply calling here for 'print trex_res.result'
+ print("\nLATEST RESULT OBJECT:")
+ print(trex_res)
+
+ self.check_general_scenario_results(trex_res)
+
+ self.check_CPU_benchmark(trex_res)
+
+
+ def test_static_routing_imix (self):
+ if self.is_loopback:
+ self.skip('In loopback mode the test is same as test_routing_imix')
+ # test initializtion
+ if not self.is_loopback:
+ self.router.configure_basic_interfaces()
+
+ # Configure static routing based on benchmark data input
+ stat_route_dict = self.get_benchmark_param('stat_route_dict')
+ stat_route_obj = CStaticRouteConfig(stat_route_dict)
+ self.router.config_static_routing(stat_route_obj, mode = "config")
+
+ mult = self.get_benchmark_param('multiplier')
+ core = self.get_benchmark_param('cores')
+
+ ret = self.trex.start_trex(
+ c = core,
+ m = mult,
+ p = True,
+ nc = True,
+ d = 60,
+ f = 'cap2/imix_fast_1g.yaml',
+ l = 1000)
+
+ trex_res = self.trex.sample_to_run_finish()
+
+ # trex_res is a CTRexResult instance- and contains the summary of the test results
+ # you may see all the results keys by simply calling here for 'print trex_res.result'
+ print("\nLATEST RESULT OBJECT:")
+ print(trex_res)
+ print("\nLATEST DUMP:")
+ print(trex_res.get_latest_dump())
+
+ self.check_general_scenario_results(trex_res)
+ self.check_CPU_benchmark(trex_res)
+
+
+ def test_static_routing_imix_asymmetric (self):
+ # test initializtion
+ if not self.is_loopback:
+ self.router.configure_basic_interfaces()
+
+ # Configure static routing based on benchmark data input
+ stat_route_dict = self.get_benchmark_param('stat_route_dict')
+ stat_route_obj = CStaticRouteConfig(stat_route_dict)
+ self.router.config_static_routing(stat_route_obj, mode = "config")
+
+ mult = self.get_benchmark_param('multiplier')
+ core = self.get_benchmark_param('cores')
+
+ ret = self.trex.start_trex(
+ c = core,
+ m = mult,
+ nc = True,
+ d = 100,
+ f = 'cap2/imix_fast_1g.yaml',
+ l = 1000)
+
+ trex_res = self.trex.sample_to_run_finish()
+
+ # trex_res is a CTRexResults instance- and contains the summary of the test results
+ # you may see all the results keys by simply calling here for 'print trex_res.result'
+ print("\nLATEST RESULT OBJECT:")
+ print(trex_res)
+
+ self.check_general_scenario_results(trex_res)
+
+ self.check_CPU_benchmark(trex_res, minimal_cpu = 25)
+
+
+ def test_jumbo(self, duration = 100, **kwargs):
+ if not self.is_loopback:
+ self.router.configure_basic_interfaces(mtu = 9216)
+ self.router.config_pbr(mode = "config")
+
+ mult = self.get_benchmark_param('multiplier')
+ core = self.get_benchmark_param('cores')
+
+ ret = self.trex.start_trex(
+ c = core,
+ m = mult,
+ p = True,
+ nc = True,
+ d = duration,
+ f = 'cap2/imix_9k.yaml',
+ l = 1000,
+ **kwargs)
+
+ trex_res = self.trex.sample_to_run_finish()
+
+ # trex_res is a CTRexResults instance- and contains the summary of the test results
+ # you may see all the results keys by simply calling here for 'print trex_res.result'
+ print("\nLATEST RESULT OBJECT:")
+ print(trex_res)
+
+ self.check_general_scenario_results(trex_res)
+ self.check_CPU_benchmark(trex_res, minimal_cpu = 0, maximal_cpu = 10)
+
+ # don't include it to regular nose search
+ @nottest
+ def test_warm_up(self):
+ try:
+ self._testMethodName = 'test_jumbo'
+ self.test_jumbo(duration = 5, trex_development = True)
+ except Exception as e:
+ print('Ignoring this error: %s' % e)
+ if self.fail_reasons:
+ print('Ignoring this error(s):\n%s' % '\n'.join(self.fail_reasons))
+ self.fail_reasons = []
+
+ def tearDown(self):
+ CTRexGeneral_Test.tearDown(self)
+ # remove nbar config here
+ pass
+
+if __name__ == "__main__":
+ pass
diff --git a/scripts/automation/regression/stateful_tests/trex_ipv6_test.py b/scripts/automation/regression/stateful_tests/trex_ipv6_test.py
new file mode 100755
index 00000000..4d6f7953
--- /dev/null
+++ b/scripts/automation/regression/stateful_tests/trex_ipv6_test.py
@@ -0,0 +1,103 @@
+#!/router/bin/python
+from .trex_general_test import CTRexGeneral_Test
+from .tests_exceptions import *
+import time
+from nose.tools import assert_equal
+
+class CTRexIPv6_Test(CTRexGeneral_Test):
+ """This class defines the IPv6 testcase of the TRex traffic generator"""
+ def __init__(self, *args, **kwargs):
+ super(CTRexIPv6_Test, self).__init__(*args, **kwargs)
+
+ def setUp(self):
+ super(CTRexIPv6_Test, self).setUp() # launch super test class setUp process
+# print " before sleep setup !!"
+# time.sleep(100000);
+# pass
+
+ def test_ipv6_simple(self):
+ if self.is_virt_nics:
+ self.skip('--ipv6 flag does not work correctly in with virtual NICs') # TODO: fix
+ # test initializtion
+ if not self.is_loopback:
+ self.router.configure_basic_interfaces()
+
+ self.router.config_pbr(mode = "config")
+ self.router.config_ipv6_pbr(mode = "config")
+
+ mult = self.get_benchmark_param('multiplier')
+ core = self.get_benchmark_param('cores')
+
+ ret = self.trex.start_trex(
+ cfg = '/etc/trex_cfg_mac.yaml',
+ c = core,
+ m = mult,
+ p = True,
+ nc = True,
+ ipv6 = True,
+ d = 60,
+ f = 'avl/sfr_delay_10_1g.yaml',
+ l = 1000)
+
+ trex_res = self.trex.sample_to_run_finish()
+
+ # trex_res is a CTRexResult instance- and contains the summary of the test results
+ # you may see all the results keys by simply calling here for 'print trex_res.result'
+ print("\nLATEST RESULT OBJECT:")
+ print(trex_res)
+
+ self.check_general_scenario_results(trex_res)
+
+ self.check_CPU_benchmark (trex_res, 10.0)
+
+ assert True
+
+
+ def test_ipv6_negative (self):
+ if self.is_loopback:
+ self.skip('The test checks ipv6 drops by device and we are in loopback setup')
+ # test initializtion
+ self.router.configure_basic_interfaces()
+
+ # NOT CONFIGURING IPv6 INTENTIONALLY TO GET DROPS!
+ self.router.config_pbr(mode = "config")
+
+ # same params as test_ipv6_simple
+ mult = self.get_benchmark_param('multiplier', test_name = 'test_ipv6_simple')
+ core = self.get_benchmark_param('cores', test_name = 'test_ipv6_simple')
+
+ ret = self.trex.start_trex(
+ cfg = '/etc/trex_cfg_mac.yaml',
+ c = core,
+ m = mult,
+ p = True,
+ nc = True,
+ ipv6 = True,
+ d = 60,
+ f = 'avl/sfr_delay_10_1g.yaml',
+ l = 1000)
+
+ trex_res = self.trex.sample_to_run_finish()
+
+ # trex_res is a CTRexResult instance- and contains the summary of the test results
+ # you may see all the results keys by simply calling here for 'print trex_res.result'
+ print("\nLATEST RESULT OBJECT:")
+ print(trex_res)
+
+ trex_tx_pckt = float(trex_res.get_last_value("trex-global.data.m_total_tx_pkts"))
+ trex_drops = int(trex_res.get_total_drops())
+
+ trex_drop_rate = trex_res.get_drop_rate()
+
+ # make sure that at least 50% of the total transmitted packets failed
+ self.assert_gt((trex_drops/trex_tx_pckt), 0.5, 'packet drop ratio is not high enough')
+
+
+
+ def tearDown(self):
+ CTRexGeneral_Test.tearDown(self)
+ # remove config here
+ pass
+
+if __name__ == "__main__":
+ pass
diff --git a/scripts/automation/regression/stateful_tests/trex_nat_test.py b/scripts/automation/regression/stateful_tests/trex_nat_test.py
new file mode 100755
index 00000000..c23f67c4
--- /dev/null
+++ b/scripts/automation/regression/stateful_tests/trex_nat_test.py
@@ -0,0 +1,169 @@
+#!/router/bin/python
+from .trex_general_test import CTRexGeneral_Test
+from .tests_exceptions import *
+import time
+from CPlatform import CStaticRouteConfig, CNatConfig
+from nose.tools import assert_equal
+
+
+class CTRexNoNat_Test(CTRexGeneral_Test):#(unittest.TestCase):
+ """This class defines the NAT testcase of the TRex traffic generator"""
+ def __init__(self, *args, **kwargs):
+ super(CTRexNoNat_Test, self).__init__(*args, **kwargs)
+ self.unsupported_modes = ['loopback'] # NAT requires device
+
+ def setUp(self):
+ super(CTRexNoNat_Test, self).setUp() # launch super test class setUp process
+
+ def check_nat_stats (self, nat_stats):
+ pass
+
+
+ def test_nat_learning(self):
+ # test initializtion
+ self.router.configure_basic_interfaces()
+
+ stat_route_dict = self.get_benchmark_param('stat_route_dict')
+ stat_route_obj = CStaticRouteConfig(stat_route_dict)
+ self.router.config_static_routing(stat_route_obj, mode = "config")
+
+ self.router.config_nat_verify() # shutdown duplicate interfaces
+
+# self.trex.set_yaml_file('cap2/http_simple.yaml')
+ mult = self.get_benchmark_param('multiplier')
+ core = self.get_benchmark_param('cores')
+
+# trex_res = self.trex.run(multiplier = mult, cores = core, duration = 100, l = 1000, learn_verify = True)
+ ret = self.trex.start_trex(
+ c = core,
+ m = mult,
+ learn_verify = True,
+ d = 100,
+ f = 'cap2/http_simple.yaml',
+ l = 1000)
+
+ trex_res = self.trex.sample_to_run_finish()
+
+ print("\nLATEST RESULT OBJECT:")
+ print(trex_res)
+ print("\nLATEST DUMP:")
+ print(trex_res.get_latest_dump())
+
+
+ expected_nat_opened = self.get_benchmark_param('nat_opened')
+ learning_stats = trex_res.get_last_value("trex-global.data", ".*nat.*") # extract all nat data
+
+ if self.get_benchmark_param('allow_timeout_dev'):
+ nat_timeout_ratio = float(learning_stats['m_total_nat_time_out']) / learning_stats['m_total_nat_open']
+ if nat_timeout_ratio > 0.005:
+ self.fail('TRex nat_timeout ratio %f > 0.5%%' % nat_timeout_ratio)
+ else:
+ self.check_results_eq (learning_stats, 'm_total_nat_time_out', 0.0)
+ self.check_results_eq (learning_stats, 'm_total_nat_no_fid', 0.0)
+ self.check_results_gt (learning_stats, 'm_total_nat_learn_error', 0.0)
+#
+ self.check_results_gt (learning_stats, 'm_total_nat_open', expected_nat_opened)
+
+ self.check_general_scenario_results(trex_res)
+ self.check_CPU_benchmark(trex_res, minimal_cpu = 10, maximal_cpu = 85)
+
+ def tearDown(self):
+ CTRexGeneral_Test.tearDown(self)
+ pass
+
+
+class CTRexNat_Test(CTRexGeneral_Test):#(unittest.TestCase):
+ """This class defines the NAT testcase of the TRex traffic generator"""
+ def __init__(self, *args, **kwargs):
+ super(CTRexNat_Test, self).__init__(*args, **kwargs)
+ self.unsupported_modes = ['loopback'] # NAT requires device
+
+ def setUp(self):
+ super(CTRexNat_Test, self).setUp() # launch super test class setUp process
+ # config nat here
+
+
+ def check_nat_stats (self, nat_stats):
+ pass
+
+
+ def test_nat_simple_mode1(self):
+ self.nat_simple_helper(learn_mode=1)
+
+ def test_nat_simple_mode2(self):
+ self.nat_simple_helper(learn_mode=2)
+
+ def test_nat_simple_mode3(self):
+ self.nat_simple_helper(learn_mode=3)
+
+ def nat_simple_helper(self, learn_mode=1):
+ # test initializtion
+ self.router.configure_basic_interfaces()
+
+
+ stat_route_dict = self.get_benchmark_param('stat_route_dict')
+ stat_route_obj = CStaticRouteConfig(stat_route_dict)
+ self.router.config_static_routing(stat_route_obj, mode = "config")
+
+ nat_dict = self.get_benchmark_param('nat_dict')
+ nat_obj = CNatConfig(nat_dict)
+ self.router.config_nat(nat_obj)
+
+# self.trex.set_yaml_file('cap2/http_simple.yaml')
+ mult = self.get_benchmark_param('multiplier')
+ core = self.get_benchmark_param('cores')
+
+# trex_res = self.trex.run(nc=False,multiplier = mult, cores = core, duration = 100, l = 1000, learn = True)
+ ret = self.trex.start_trex(
+ c = core,
+ m = mult,
+ learn_mode = learn_mode,
+ d = 100,
+ f = 'cap2/http_simple.yaml',
+ l = 1000)
+
+ trex_res = self.trex.sample_to_run_finish()
+
+ print("\nLATEST RESULT OBJECT:")
+ print(trex_res)
+ print("\nLATEST DUMP:")
+ print(trex_res.get_latest_dump())
+
+ trex_nat_stats = trex_res.get_last_value("trex-global.data", ".*nat.*") # extract all nat data
+ if self.get_benchmark_param('allow_timeout_dev'):
+ nat_timeout_ratio = float(trex_nat_stats['m_total_nat_time_out']) / trex_nat_stats['m_total_nat_open']
+ if nat_timeout_ratio > 0.005:
+ self.fail('TRex nat_timeout ratio %f > 0.5%%' % nat_timeout_ratio)
+ else:
+ self.check_results_eq (trex_nat_stats,'m_total_nat_time_out', 0.0)
+ self.check_results_eq (trex_nat_stats,'m_total_nat_no_fid', 0.0)
+ self.check_results_gt (trex_nat_stats,'m_total_nat_open', 6000)
+
+
+ self.check_general_scenario_results(trex_res, check_latency = False) # NAT can cause latency
+## test_norm_cpu = 2*(trex_res.result['total-tx']/(core*trex_res.result['cpu_utilization']))
+# trex_tx_pckt = trex_res.get_last_value("trex-global.data.m_total_tx_bps")
+# cpu_util = int(trex_res.get_last_value("trex-global.data.m_cpu_util"))
+# test_norm_cpu = 2*(trex_tx_pckt/(core*cpu_util))
+# print "test_norm_cpu is: ", test_norm_cpu
+
+ self.check_CPU_benchmark(trex_res, minimal_cpu = 10, maximal_cpu = 85)
+
+ #if ( abs((test_norm_cpu/self.get_benchmark_param('cpu_to_core_ratio')) - 1) > 0.03):
+ # raiseraise AbnormalResultError('Normalized bandwidth to CPU utilization ratio exceeds 3%')
+
+ nat_stats = self.router.get_nat_stats()
+ print(nat_stats)
+
+ self.assert_gt(nat_stats['total_active_trans'], 5000, 'total active translations is not high enough')
+ self.assert_gt(nat_stats['dynamic_active_trans'], 5000, 'total dynamic active translations is not high enough')
+ self.assertEqual(nat_stats['static_active_trans'], 0, "NAT statistics nat_stats['static_active_trans'] should be zero")
+ self.assert_gt(nat_stats['num_of_hits'], 50000, 'total nat hits is not high enough')
+
+ def tearDown(self):
+ self.router.clear_nat_translations()
+ CTRexGeneral_Test.tearDown(self)
+
+
+if __name__ == "__main__":
+ pass
diff --git a/scripts/automation/regression/stateful_tests/trex_nbar_test.py b/scripts/automation/regression/stateful_tests/trex_nbar_test.py
new file mode 100755
index 00000000..6611ac96
--- /dev/null
+++ b/scripts/automation/regression/stateful_tests/trex_nbar_test.py
@@ -0,0 +1,123 @@
+#!/router/bin/python
+from .trex_general_test import CTRexGeneral_Test
+from .tests_exceptions import *
+from interfaces_e import IFType
+from nose.tools import nottest
+from misc_methods import print_r
+
+class CTRexNbar_Test(CTRexGeneral_Test):
+ """This class defines the NBAR testcase of the TRex traffic generator"""
+ def __init__(self, *args, **kwargs):
+ super(CTRexNbar_Test, self).__init__(*args, **kwargs)
+ self.unsupported_modes = ['loopback'] # obviously no NBar in loopback
+
+ def setUp(self):
+ super(CTRexNbar_Test, self).setUp() # launch super test class setUp process
+# self.router.kill_nbar_flows()
+ self.router.clear_cft_counters()
+ self.router.clear_nbar_stats()
+
+ def match_classification (self):
+ nbar_benchmark = self.get_benchmark_param("nbar_classification")
+ test_classification = self.router.get_nbar_stats()
+ print("TEST CLASSIFICATION:")
+ print(test_classification)
+ missmatchFlag = False
+ missmatchMsg = "NBAR classification contians a missmatch on the following protocols:"
+ fmt = '\n\t{0:15} | Expected: {1:>3.2f}%, Got: {2:>3.2f}%'
+ noise_level = 0.045
+
+ for cl_intf in self.router.get_if_manager().get_if_list(if_type = IFType.Client):
+ client_intf = cl_intf.get_name()
+
+ for protocol, bench in nbar_benchmark.items():
+ if protocol != 'total':
+ try:
+ bench = float(bench)
+ protocol = protocol.replace('_','-')
+ protocol_test_res = test_classification[client_intf]['percentage'][protocol]
+ deviation = 100 * abs(bench/protocol_test_res - 1) # percents
+ difference = abs(bench - protocol_test_res)
+ if (deviation > 10 and difference > noise_level): # allowing 10% deviation and 'noise_level'% difference
+ missmatchFlag = True
+ missmatchMsg += fmt.format(protocol, bench, protocol_test_res)
+ except KeyError as e:
+ missmatchFlag = True
+ print(e)
+ print("Changes missmatchFlag to True. ", "\n\tProtocol {0} isn't part of classification results on interface {intf}".format( protocol, intf = client_intf ))
+ missmatchMsg += "\n\tProtocol {0} isn't part of classification results on interface {intf}".format( protocol, intf = client_intf )
+ except ZeroDivisionError as e:
+ print("ZeroDivisionError: %s" % protocol)
+ pass
+ if missmatchFlag:
+ self.fail(missmatchMsg)
+
+
+ def test_nbar_simple(self):
+ # test initializtion
+ deviation_compare_value = 0.03 # default value of deviation - 3%
+ self.router.configure_basic_interfaces()
+
+ self.router.config_pbr(mode = "config")
+ self.router.config_nbar_pd()
+
+ mult = self.get_benchmark_param('multiplier')
+ core = self.get_benchmark_param('cores')
+
+ ret = self.trex.start_trex(
+ c = core,
+ m = mult,
+ p = True,
+ nc = True,
+ d = 100,
+ f = 'avl/sfr_delay_10_1g.yaml',
+ l = 1000)
+
+ trex_res = self.trex.sample_to_run_finish()
+
+ # trex_res is a CTRexResult instance- and contains the summary of the test results
+ # you may see all the results keys by simply calling here for 'print trex_res.result'
+ print("\nLATEST RESULT OBJECT:")
+ print(trex_res)
+ print("\nLATEST DUMP:")
+ print(trex_res.get_latest_dump())
+
+ self.check_general_scenario_results(trex_res, check_latency = False) # NBAR can cause latency
+ self.check_CPU_benchmark(trex_res)
+ self.match_classification()
+
+
+ # the name intentionally not matches nose default pattern, including the test should be specified explicitly
+ def NBarLong(self):
+ self.router.configure_basic_interfaces()
+ self.router.config_pbr(mode = "config")
+ self.router.config_nbar_pd()
+
+ mult = self.get_benchmark_param('multiplier')
+ core = self.get_benchmark_param('cores')
+
+ ret = self.trex.start_trex(
+ c = core,
+ m = mult,
+ p = True,
+ nc = True,
+ d = 18000, # 5 hours
+ f = 'avl/sfr_delay_10_1g.yaml',
+ l = 1000)
+
+ trex_res = self.trex.sample_to_run_finish()
+
+ # trex_res is a CTRexResult instance- and contains the summary of the test results
+ # you may see all the results keys by simply calling here for 'print trex_res.result'
+ print("\nLATEST RESULT OBJECT:")
+ print(trex_res)
+
+ self.check_general_scenario_results(trex_res, check_latency = False)
+
+
+ def tearDown(self):
+ CTRexGeneral_Test.tearDown(self)
+ pass
+
+if __name__ == "__main__":
+ pass
diff --git a/scripts/automation/regression/stateful_tests/trex_rx_test.py b/scripts/automation/regression/stateful_tests/trex_rx_test.py
new file mode 100755
index 00000000..161856b1
--- /dev/null
+++ b/scripts/automation/regression/stateful_tests/trex_rx_test.py
@@ -0,0 +1,280 @@
+#!/router/bin/python
+from .trex_general_test import CTRexGeneral_Test
+from CPlatform import CStaticRouteConfig, CNatConfig
+from .tests_exceptions import *
+#import sys
+import time
+import copy
+from nose.tools import nottest
+import traceback
+
+class CTRexRx_Test(CTRexGeneral_Test):
+ """This class defines the rx testcase of the TRex traffic generator"""
+ def __init__(self, *args, **kwargs):
+ CTRexGeneral_Test.__init__(self, *args, **kwargs)
+ self.unsupported_modes = ['virt_nics'] # TODO: fix
+
+ def setUp(self):
+ CTRexGeneral_Test.setUp(self)
+
+
+ def check_rx_errors(self, trex_res, allow_error_tolerance = True):
+ try:
+ # counters to check
+
+ latency_counters_display = {'m_unsup_prot': 0, 'm_no_magic': 0, 'm_no_id': 0, 'm_seq_error': 0, 'm_length_error': 0, 'm_no_ipv4_option': 0, 'm_tx_pkt_err': 0}
+ rx_counters = {'m_err_drop': 0, 'm_err_aged': 0, 'm_err_no_magic': 0, 'm_err_wrong_pkt_id': 0, 'm_err_fif_seen_twice': 0, 'm_err_open_with_no_fif_pkt': 0, 'm_err_oo_dup': 0, 'm_err_oo_early': 0, 'm_err_oo_late': 0, 'm_err_flow_length_changed': 0}
+
+ # get relevant TRex results
+
+ try:
+ ports_names = trex_res.get_last_value('trex-latecny-v2.data', 'port\-\d+')
+ if not ports_names:
+ raise AbnormalResultError('Could not find ports info in TRex results, path: trex-latecny-v2.data.port-*')
+ for port_name in ports_names:
+ path = 'trex-latecny-v2.data.%s.stats' % port_name
+ port_result = trex_res.get_last_value(path)
+ if not port_result:
+ raise AbnormalResultError('Could not find port stats in TRex results, path: %s' % path)
+ for key in latency_counters_display:
+ latency_counters_display[key] += port_result[key]
+
+ # using -k flag in TRex produces 1 error per port in latency counter m_seq_error, allow it until issue resolved. For comparing use dict with reduces m_seq_error number.
+ latency_counters_compare = copy.deepcopy(latency_counters_display)
+ latency_counters_compare['m_seq_error'] = max(0, latency_counters_compare['m_seq_error'] - len(ports_names))
+
+ path = 'rx-check.data.stats'
+ rx_check_results = trex_res.get_last_value(path)
+ if not rx_check_results:
+ raise AbnormalResultError('No TRex results by path: %s' % path)
+ for key in rx_counters:
+ rx_counters[key] = rx_check_results[key]
+
+ path = 'rx-check.data.stats.m_total_rx'
+ total_rx = trex_res.get_last_value(path)
+ if total_rx is None:
+ raise AbnormalResultError('No TRex results by path: %s' % path)
+ elif not total_rx:
+ raise AbnormalResultError('Total rx_check (%s) packets is zero.' % path)
+
+ print('Total packets checked: %s' % total_rx)
+ print('Latency counters: %s' % latency_counters_display)
+ print('rx_check counters: %s' % rx_counters)
+
+ except KeyError as e:
+ self.fail('Expected key in TRex result was not found.\n%s' % traceback.print_exc())
+
+ # the check. in loopback expect 0 problems, at others allow errors <error_tolerance>% of total_rx
+
+ total_errors = sum(rx_counters.values()) + sum(latency_counters_compare.values())
+ error_tolerance = self.get_benchmark_param('error_tolerance')
+ if not error_tolerance:
+ if not allow_error_tolerance:
+ error_tolerance = 0
+ else:
+ error_tolerance = 0.1
+ error_percentage = total_errors * 100.0 / total_rx
+
+ if total_errors > 0:
+ if error_percentage > error_tolerance:
+ self.fail('Too much errors in rx_check. (~%s%% of traffic)' % error_percentage)
+ else:
+ print('There are errors in rx_check (%f%%), not exceeding allowed limit (%s%%)' % (error_percentage, error_tolerance))
+ else:
+ print('No errors in rx_check.')
+ except Exception as e:
+ print(traceback.print_exc())
+ self.fail('Errors in rx_check: %s' % e)
+
+ def test_rx_check_sfr(self):
+ if not self.is_loopback:
+ self.router.configure_basic_interfaces()
+ self.router.config_pbr(mode = 'config')
+
+ core = self.get_benchmark_param('cores')
+ mult = self.get_benchmark_param('multiplier')
+ sample_rate = self.get_benchmark_param('rx_sample_rate')
+
+ ret = self.trex.start_trex(
+ c = core,
+ m = mult,
+ p = True,
+ nc = True,
+ rx_check = sample_rate,
+ d = 100,
+ f = 'avl/sfr_delay_10_1g_no_bundeling.yaml',
+ l = 1000,
+ k = 10,
+ learn_verify = True,
+ l_pkt_mode = 2)
+
+ trex_res = self.trex.sample_to_run_finish()
+
+ print("\nLATEST RESULT OBJECT:")
+ print(trex_res)
+ #print ("\nLATEST DUMP:")
+ #print trex_res.get_latest_dump()
+
+ self.check_general_scenario_results(trex_res)
+ self.check_CPU_benchmark(trex_res)
+ self.check_rx_errors(trex_res)
+
+
+ def test_rx_check_http(self):
+ if not self.is_loopback:
+ # TODO: skip as test_rx_check_http_negative will cover it
+ #self.skip('This test is covered by test_rx_check_http_negative')
+ self.router.configure_basic_interfaces()
+ self.router.config_pbr(mode = "config")
+
+ core = self.get_benchmark_param('cores')
+ mult = self.get_benchmark_param('multiplier')
+ sample_rate = self.get_benchmark_param('rx_sample_rate')
+
+ ret = self.trex.start_trex(
+ c = core,
+ m = mult,
+ p = True,
+ nc = True,
+ rx_check = sample_rate,
+ d = 100,
+ f = 'cap2/http_simple.yaml',
+ l = 1000,
+ k = 10,
+ learn_verify = True,
+ l_pkt_mode = 2)
+
+ trex_res = self.trex.sample_to_run_finish()
+
+ print("\nLATEST RESULT OBJECT:")
+ print(trex_res)
+
+ self.check_general_scenario_results(trex_res)
+ self.check_CPU_benchmark(trex_res)
+ self.check_rx_errors(trex_res)
+
+
+ def test_rx_check_sfr_ipv6(self):
+ if not self.is_loopback:
+ self.router.configure_basic_interfaces()
+ self.router.config_pbr(mode = 'config')
+ self.router.config_ipv6_pbr(mode = "config")
+
+ core = self.get_benchmark_param('cores')
+ mult = self.get_benchmark_param('multiplier')
+ sample_rate = self.get_benchmark_param('rx_sample_rate')
+
+ ret = self.trex.start_trex(
+ cfg = '/etc/trex_cfg_mac.yaml',
+ c = core,
+ m = mult,
+ p = True,
+ nc = True,
+ rx_check = sample_rate,
+ d = 100,
+ f = 'avl/sfr_delay_10_1g_no_bundeling.yaml',
+ l = 1000,
+ k = 10,
+ ipv6 = True)
+
+ trex_res = self.trex.sample_to_run_finish()
+
+ print("\nLATEST RESULT OBJECT:")
+ print(trex_res)
+ #print ("\nLATEST DUMP:")
+ #print trex_res.get_latest_dump()
+
+ self.check_general_scenario_results(trex_res)
+ self.check_CPU_benchmark(trex_res)
+ self.check_rx_errors(trex_res)
+
+
+ def test_rx_check_http_ipv6(self):
+ if not self.is_loopback:
+ self.router.configure_basic_interfaces()
+ self.router.config_pbr(mode = "config")
+ self.router.config_ipv6_pbr(mode = "config")
+
+ core = self.get_benchmark_param('cores')
+ mult = self.get_benchmark_param('multiplier')
+ sample_rate = self.get_benchmark_param('rx_sample_rate')
+
+ ret = self.trex.start_trex(
+ cfg = '/etc/trex_cfg_mac.yaml',
+ c = core,
+ m = mult,
+ p = True,
+ nc = True,
+ rx_check = sample_rate,
+ d = 100,
+ f = 'cap2/http_simple.yaml',
+ l = 1000,
+ k = 10,
+ ipv6 = True)
+
+ trex_res = self.trex.sample_to_run_finish()
+
+ print("\nLATEST RESULT OBJECT:")
+ print(trex_res)
+
+ self.check_general_scenario_results(trex_res)
+ self.check_CPU_benchmark(trex_res)
+ self.check_rx_errors(trex_res)
+
+ #@nottest
+ def test_rx_check_http_negative(self):
+ if self.is_loopback:
+ self.skip('This test uses NAT, not relevant for loopback')
+
+ self.router.configure_basic_interfaces()
+ self.router.config_pbr(mode = "config")
+
+ core = self.get_benchmark_param('cores')
+ mult = self.get_benchmark_param('multiplier')
+ sample_rate = self.get_benchmark_param('rx_sample_rate')
+
+ ret = self.trex.start_trex(
+ c = core,
+ m = mult,
+ p = True,
+ rx_check = sample_rate,
+ d = 60,
+ f = 'cap2/http_simple.yaml',
+ l = 1000,
+ k = 10,
+ learn_verify = True,
+ l_pkt_mode = 2)
+
+ print('Run for 40 seconds, expect no errors')
+ trex_res = self.trex.sample_x_seconds(40)
+ print("\nLATEST RESULT OBJECT:")
+ print(trex_res)
+ self.check_general_scenario_results(trex_res)
+ self.check_CPU_benchmark(trex_res)
+ self.check_rx_errors(trex_res)
+
+ print('Run until finish, expect errors')
+ old_errors = copy.deepcopy(self.fail_reasons)
+ nat_dict = self.get_benchmark_param('nat_dict', test_name = 'test_nat_simple_mode1')
+ nat_obj = CNatConfig(nat_dict)
+ self.router.config_nat(nat_obj)
+ self.router.config_zbf()
+ trex_res = self.trex.sample_to_run_finish()
+ self.router.config_no_zbf()
+ self.router.config_no_nat(nat_obj)
+ #self.router.clear_nat_translations()
+ print("\nLATEST RESULT OBJECT:")
+ print(trex_res)
+ self.check_rx_errors(trex_res, allow_error_tolerance = False)
+ if self.fail_reasons == old_errors:
+ self.fail('Expected errors here, got none.')
+ else:
+ print('Got errors as expected.')
+ self.fail_reasons = old_errors
+
+ def tearDown(self):
+ CTRexGeneral_Test.tearDown(self)
+ pass
+
+if __name__ == "__main__":
+ pass
diff --git a/scripts/automation/regression/stateless_tests/__init__.py b/scripts/automation/regression/stateless_tests/__init__.py
new file mode 100755
index 00000000..e69de29b
--- /dev/null
+++ b/scripts/automation/regression/stateless_tests/__init__.py
diff --git a/scripts/automation/regression/stateless_tests/stl_benchmark_test.py b/scripts/automation/regression/stateless_tests/stl_benchmark_test.py
new file mode 100755
index 00000000..6940efd3
--- /dev/null
+++ b/scripts/automation/regression/stateless_tests/stl_benchmark_test.py
@@ -0,0 +1,75 @@
+#!/router/bin/python
+from .stl_general_test import CStlGeneral_Test, CTRexScenario
+from trex_stl_lib.api import *
+import os, sys
+from collections import deque
+from time import time, sleep
+
+class STLBenchmark_Test(CStlGeneral_Test):
+ """Benchark stateless performance"""
+
+ def test_CPU_benchmark(self):
+ critical_test = CTRexScenario.setup_name in ('kiwi02', 'trex08', 'trex09') # temporary patch, this test needs to be fixed
+ timeout = 60 # max time to wait for stabilization
+ stabilize = 5 # ensure stabilization over this period
+ print('')
+
+ for profile_bench in self.get_benchmark_param('profiles'):
+ cpu_utils = deque([0] * stabilize, maxlen = stabilize)
+ bws_per_core = deque([0] * stabilize, maxlen = stabilize)
+ kwargs = profile_bench.get('kwargs', {})
+ print('Testing profile %s, kwargs: %s' % (profile_bench['name'], kwargs))
+ profile = STLProfile.load(os.path.join(CTRexScenario.scripts_path, profile_bench['name']), **kwargs)
+
+ self.stl_trex.reset()
+ self.stl_trex.clear_stats()
+ sleep(1)
+ self.stl_trex.add_streams(profile)
+ mult = '1%' if self.is_virt_nics else '10%'
+ self.stl_trex.start(mult = mult)
+ start_time = time()
+
+ for i in range(timeout + 1):
+ stats = self.stl_trex.get_stats()
+ cpu_utils.append(stats['global']['cpu_util'])
+ bws_per_core.append(stats['global']['bw_per_core'])
+ if i > stabilize and min(cpu_utils) > max(cpu_utils) * 0.95:
+ break
+ sleep(0.5)
+
+ agv_cpu_util = sum(cpu_utils) / stabilize
+ agv_bw_per_core = sum(bws_per_core) / stabilize
+
+ if critical_test and i == timeout and agv_cpu_util > 10:
+ raise Exception('Timeout on waiting for stabilization, last CPU util values: %s' % list(cpu_utils))
+ if stats[0]['opackets'] < 300 or stats[1]['opackets'] < 300:
+ raise Exception('Too few opackets, port0: %s, port1: %s' % (stats[0]['opackets'], stats[1]['opackets']))
+ if stats['global']['queue_full'] > 100000:
+ raise Exception('Too much queue_full: %s' % stats['global']['queue_full'])
+ if not cpu_utils[-1]:
+ raise Exception('CPU util is zero, last values: %s' % list(cpu_utils))
+ print('Done (%ss), CPU util: %4g, bw_per_core: %6sGb/core' % (int(time() - start_time), agv_cpu_util, round(agv_bw_per_core, 2)))
+ # TODO: add check of benchmark based on results from regression
+
+ # report benchmarks
+ if self.GAManager:
+ try:
+ pass
+ #profile_repr = '%s.%s %s' % (CTRexScenario.setup_name,
+ # os.path.basename(profile_bench['name']),
+ # repr(kwargs).replace("'", ''))
+ #self.GAManager.gaAddAction(Event = 'stateless_test', action = profile_repr,
+ # label = 'bw_per_core', value = int(agv_bw_per_core))
+ # TODO: report expected once acquired
+ #self.GAManager.gaAddAction(Event = 'stateless_test', action = profile_repr,
+ # label = 'bw_per_core_exp', value = int(expected_norm_cpu))
+ #self.GAManager.emptyAndReportQ()
+ except Exception as e:
+ print('Sending GA failed: %s' % e)
+
+ def tearDown(self):
+ self.stl_trex.reset()
+ self.stl_trex.clear_stats()
+ sleep(1)
+ CStlGeneral_Test.tearDown(self)
+
diff --git a/scripts/automation/regression/stateless_tests/stl_client_test.py b/scripts/automation/regression/stateless_tests/stl_client_test.py
new file mode 100644
index 00000000..36ac0ee1
--- /dev/null
+++ b/scripts/automation/regression/stateless_tests/stl_client_test.py
@@ -0,0 +1,350 @@
+#!/router/bin/python
+from .stl_general_test import CStlGeneral_Test, CTRexScenario
+from trex_stl_lib.api import *
+import os, sys
+import glob
+
+
+def get_error_in_percentage (golden, value):
+ return abs(golden - value) / float(golden)
+
+def get_stl_profiles ():
+ profiles_path = os.path.join(CTRexScenario.scripts_path, 'stl/')
+ py_profiles = glob.glob(profiles_path + "/*.py")
+ yaml_profiles = glob.glob(profiles_path + "yaml/*.yaml")
+ return py_profiles + yaml_profiles
+
+
+class STLClient_Test(CStlGeneral_Test):
+ """Tests for stateless client"""
+
+ def setUp(self):
+ CStlGeneral_Test.setUp(self)
+
+ if self.is_virt_nics:
+ self.percentage = 5
+ self.pps = 500
+ else:
+ self.percentage = 50
+ self.pps = 50000
+
+ # strict mode is only for 'wire only' connection
+ self.strict = True if self.is_loopback and not self.is_virt_nics else False
+
+ assert 'bi' in CTRexScenario.stl_ports_map
+
+ self.c = CTRexScenario.stl_trex
+
+ self.tx_port, self.rx_port = CTRexScenario.stl_ports_map['bi'][0]
+
+ self.c.connect()
+ self.c.reset(ports = [self.tx_port, self.rx_port])
+
+ self.pkt = STLPktBuilder(pkt = Ether()/IP(src="16.0.0.1",dst="48.0.0.1")/UDP(dport=12,sport=1025)/IP()/'a_payload_example')
+ self.profiles = get_stl_profiles()
+
+
+ @classmethod
+ def tearDownClass(cls):
+ if CTRexScenario.stl_init_error:
+ return
+ # connect back at end of tests
+ if not cls.is_connected():
+ CTRexScenario.stl_trex.connect()
+
+
+ def verify (self, expected, got):
+ if self.strict:
+ assert expected == got
+ else:
+ assert get_error_in_percentage(expected, got) < 0.05
+
+
+ def test_basic_connect_disconnect (self):
+ try:
+ self.c.connect()
+ assert self.c.is_connected(), 'client should be connected'
+ self.c.disconnect()
+ assert not self.c.is_connected(), 'client should be disconnected'
+
+ except STLError as e:
+ assert False , '{0}'.format(e)
+
+
+ def test_basic_single_burst (self):
+
+ try:
+ b1 = STLStream(name = 'burst',
+ packet = self.pkt,
+ mode = STLTXSingleBurst(total_pkts = 100,
+ percentage = self.percentage)
+ )
+
+ for i in range(0, 5):
+ self.c.add_streams([b1], ports = [self.tx_port, self.rx_port])
+
+ self.c.clear_stats()
+ self.c.start(ports = [self.tx_port, self.rx_port])
+
+ self.c.wait_on_traffic(ports = [self.tx_port, self.rx_port])
+ stats = self.c.get_stats()
+
+ assert self.tx_port in stats
+ assert self.rx_port in stats
+
+ self.verify(100, stats[self.tx_port]['opackets'])
+ self.verify(100, stats[self.rx_port]['ipackets'])
+
+ self.verify(100, stats[self.rx_port]['opackets'])
+ self.verify(100, stats[self.tx_port]['ipackets'])
+
+
+ self.c.remove_all_streams(ports = [self.tx_port, self.rx_port])
+
+
+
+ except STLError as e:
+ assert False , '{0}'.format(e)
+
+
+ #
+ def test_basic_multi_burst (self):
+ try:
+ b1 = STLStream(name = 'burst',
+ packet = self.pkt,
+ mode = STLTXMultiBurst(pkts_per_burst = 10,
+ count = 20,
+ percentage = self.percentage)
+ )
+
+ for i in range(0, 5):
+ self.c.add_streams([b1], ports = [self.tx_port, self.rx_port])
+
+ self.c.clear_stats()
+ self.c.start(ports = [self.tx_port, self.rx_port])
+
+ self.c.wait_on_traffic(ports = [self.tx_port, self.rx_port])
+ stats = self.c.get_stats()
+
+ assert self.tx_port in stats
+ assert self.rx_port in stats
+
+ self.verify(200, stats[self.tx_port]['opackets'])
+ self.verify(200, stats[self.rx_port]['ipackets'])
+
+ self.verify(200, stats[self.rx_port]['opackets'])
+ self.verify(200, stats[self.tx_port]['ipackets'])
+
+ self.c.remove_all_streams(ports = [self.tx_port, self.rx_port])
+
+
+
+ except STLError as e:
+ assert False , '{0}'.format(e)
+
+
+ #
+ def test_basic_cont (self):
+ pps = self.pps
+ duration = 0.1
+ golden = pps * duration
+
+ try:
+ b1 = STLStream(name = 'burst',
+ packet = self.pkt,
+ mode = STLTXCont(pps = pps)
+ )
+
+ for i in range(0, 5):
+ self.c.add_streams([b1], ports = [self.tx_port, self.rx_port])
+
+ self.c.clear_stats()
+ self.c.start(ports = [self.tx_port, self.rx_port], duration = duration)
+
+ assert self.c.ports[self.tx_port].is_transmitting(), 'port should be active'
+ assert self.c.ports[self.rx_port].is_transmitting(), 'port should be active'
+
+ self.c.wait_on_traffic(ports = [self.tx_port, self.rx_port])
+ stats = self.c.get_stats()
+
+ assert self.tx_port in stats
+ assert self.rx_port in stats
+
+ # cont. with duration should be quite percise - 5% error is relaxed enough
+
+ assert get_error_in_percentage(stats[self.tx_port]['opackets'], golden) < 0.05
+ assert get_error_in_percentage(stats[self.rx_port]['ipackets'], golden) < 0.05
+
+ assert get_error_in_percentage(stats[self.rx_port]['opackets'], golden) < 0.05
+ assert get_error_in_percentage(stats[self.tx_port]['ipackets'], golden) < 0.05
+
+
+ self.c.remove_all_streams(ports = [self.tx_port, self.rx_port])
+
+
+
+ except STLError as e:
+ assert False , '{0}'.format(e)
+
+
+ def test_stress_connect_disconnect (self):
+ try:
+ for i in range(0, 100):
+ self.c.connect()
+ assert self.c.is_connected(), 'client should be connected'
+ self.c.disconnect()
+ assert not self.c.is_connected(), 'client should be disconnected'
+
+
+ except STLError as e:
+ assert False , '{0}'.format(e)
+
+
+
+ def test_stress_tx (self):
+ try:
+ s1 = STLStream(name = 'stress',
+ packet = self.pkt,
+ mode = STLTXCont(percentage = self.percentage))
+
+ # add both streams to ports
+ self.c.add_streams([s1], ports = [self.tx_port, self.rx_port])
+ for i in range(0, 100):
+
+ self.c.start(ports = [self.tx_port, self.rx_port])
+
+ assert self.c.ports[self.tx_port].is_transmitting(), 'port should be active'
+ assert self.c.ports[self.rx_port].is_transmitting(), 'port should be active'
+
+ self.c.pause(ports = [self.tx_port, self.rx_port])
+
+ assert self.c.ports[self.tx_port].is_paused(), 'port should be paused'
+ assert self.c.ports[self.rx_port].is_paused(), 'port should be paused'
+
+ self.c.resume(ports = [self.tx_port, self.rx_port])
+
+ assert self.c.ports[self.tx_port].is_transmitting(), 'port should be active'
+ assert self.c.ports[self.rx_port].is_transmitting(), 'port should be active'
+
+ self.c.stop(ports = [self.tx_port, self.rx_port])
+
+ assert not self.c.ports[self.tx_port].is_active(), 'port should be idle'
+ assert not self.c.ports[self.rx_port].is_active(), 'port should be idle'
+
+ except STLError as e:
+ assert False , '{0}'.format(e)
+
+
+ def test_all_profiles (self):
+ if self.is_virt_nics or not self.is_loopback:
+ self.skip('skipping profile tests for virtual / non loopback')
+ return
+
+ try:
+
+ for profile in self.profiles:
+
+ print("now testing profile {0}...\n".format(profile))
+
+ p1 = STLProfile.load(profile, port_id = self.tx_port)
+ p2 = STLProfile.load(profile, port_id = self.rx_port)
+
+ # if profile contains custom MAC addrs we need promiscuous mode
+ # but virtual NICs does not support promiscuous mode
+ self.c.set_port_attr(ports = [self.tx_port, self.rx_port], promiscuous = False)
+
+ if p1.has_custom_mac_addr():
+ if not self.is_virt_nics:
+ self.c.set_port_attr(ports = [self.tx_port, self.rx_port], promiscuous = True)
+ else:
+ print("\n*** profile needs promiscuous mode but running on virtual NICs - skipping... ***\n")
+ continue
+
+ if p1.has_flow_stats():
+ print("\n*** profile needs RX caps - skipping... ***\n")
+ continue
+
+ self.c.add_streams(p1, ports = self.tx_port)
+ self.c.add_streams(p2, ports = self.rx_port)
+
+ self.c.clear_stats()
+
+ self.c.start(ports = [self.tx_port, self.rx_port], mult = "30%")
+ time.sleep(100 / 1000.0)
+
+ if p1.is_pauseable() and p2.is_pauseable():
+ self.c.pause(ports = [self.tx_port, self.rx_port])
+ time.sleep(100 / 1000.0)
+
+ self.c.resume(ports = [self.tx_port, self.rx_port])
+ time.sleep(100 / 1000.0)
+
+ self.c.stop(ports = [self.tx_port, self.rx_port])
+
+ stats = self.c.get_stats()
+
+ assert self.tx_port in stats, '{0} - no stats for TX port'.format(profile)
+ assert self.rx_port in stats, '{0} - no stats for RX port'.format(profile)
+
+ self.verify(stats[self.tx_port]['opackets'], stats[self.rx_port]['ipackets'])
+ self.verify(stats[self.rx_port]['opackets'], stats[self.tx_port]['ipackets'])
+
+ self.c.remove_all_streams(ports = [self.tx_port, self.rx_port])
+
+ except STLError as e:
+ assert False , '{0}'.format(e)
+
+
+ finally:
+ self.c.set_port_attr(ports = [self.tx_port, self.rx_port], promiscuous = False)
+
+
+ # see https://trex-tgn.cisco.com/youtrack/issue/trex-226
+ def test_latency_pause_resume (self):
+
+ try:
+
+ s1 = STLStream(name = 'latency',
+ packet = self.pkt,
+ mode = STLTXCont(percentage = self.percentage),
+ flow_stats = STLFlowLatencyStats(pg_id = 1))
+
+ self.c.add_streams([s1], ports = self.tx_port)
+
+ self.c.clear_stats()
+
+ self.c.start(ports = self.tx_port)
+
+ for i in range(100):
+ self.c.pause()
+ self.c.resume()
+
+ self.c.stop()
+
+ except STLError as e:
+ assert False , '{0}'.format(e)
+
+
+ def test_pcap_remote (self):
+ try:
+ pcap_file = os.path.join(CTRexScenario.scripts_path, 'automation/regression/test_pcaps/pcap_dual_test.erf')
+
+ master = self.tx_port
+ slave = master ^ 0x1
+
+ self.c.reset(ports = [master, slave])
+ self.c.clear_stats()
+ self.c.push_remote(pcap_file,
+ ports = [master],
+ ipg_usec = 100,
+ is_dual = True)
+ self.c.wait_on_traffic(ports = [master])
+
+ stats = self.c.get_stats()
+
+ self.verify(stats[master]['opackets'], 52)
+ self.verify(stats[slave]['opackets'], 48)
+
+ except STLError as e:
+ assert False , '{0}'.format(e)
+
diff --git a/scripts/automation/regression/stateless_tests/stl_examples_test.py b/scripts/automation/regression/stateless_tests/stl_examples_test.py
new file mode 100755
index 00000000..71fc3287
--- /dev/null
+++ b/scripts/automation/regression/stateless_tests/stl_examples_test.py
@@ -0,0 +1,31 @@
+#!/router/bin/python
+from .stl_general_test import CStlGeneral_Test, CTRexScenario
+import os, sys
+from misc_methods import run_command
+
+
+class STLExamples_Test(CStlGeneral_Test):
+ """This class defines the IMIX testcase of the TRex traffic generator"""
+
+ def explicitSetUp(self):
+ # examples connect by their own
+ if self.is_connected():
+ CTRexScenario.stl_trex.disconnect()
+
+ def explicitTearDown(self):
+ # connect back at end of tests
+ if not self.is_connected():
+ self.stl_trex.connect()
+
+ def test_stl_examples(self):
+ examples_dir = '../trex_control_plane/stl/examples'
+ examples_to_test = [
+ 'stl_imix.py',
+ ]
+
+ for example in examples_to_test:
+ self.explicitSetUp()
+ return_code, stdout, stderr = run_command("sh -c 'cd %s; %s %s -s %s'" % (examples_dir, sys.executable, example, CTRexScenario.configuration.trex['trex_name']))
+ self.explicitTearDown()
+ assert return_code == 0, 'example %s failed.\nstdout: %s\nstderr: %s' % (return_code, stdout, stderr)
+
diff --git a/scripts/automation/regression/stateless_tests/stl_general_test.py b/scripts/automation/regression/stateless_tests/stl_general_test.py
new file mode 100644
index 00000000..590733ba
--- /dev/null
+++ b/scripts/automation/regression/stateless_tests/stl_general_test.py
@@ -0,0 +1,113 @@
+import os, sys
+import unittest
+from trex import CTRexScenario
+from stateful_tests.trex_general_test import CTRexGeneral_Test
+from trex_stl_lib.api import *
+import time
+from nose.tools import nottest
+
+class CStlGeneral_Test(CTRexGeneral_Test):
+ """This class defines the general stateless testcase of the TRex traffic generator"""
+
+ def setUp(self):
+ self.stl_trex = CTRexScenario.stl_trex if CTRexScenario.stl_trex else 'mock'
+ CTRexGeneral_Test.setUp(self)
+ # check basic requirements, should be verified at test_connectivity, here only skip test
+ if CTRexScenario.stl_init_error:
+ self.skip(CTRexScenario.stl_init_error)
+
+ def connect(self, timeout = 100):
+ # need delay and check only because TRex process might be still starting
+ sys.stdout.write('Connecting')
+ for i in range(timeout):
+ try:
+ sys.stdout.write('.')
+ sys.stdout.flush()
+ self.stl_trex.connect()
+ print('')
+ return True
+ except:
+ time.sleep(0.1)
+ print('')
+ return False
+
+ def map_ports(self, timeout = 100):
+ sys.stdout.write('Mapping ports')
+ for i in range(timeout):
+ sys.stdout.write('.')
+ sys.stdout.flush()
+ CTRexScenario.stl_ports_map = stl_map_ports(self.stl_trex)
+ if self.verify_bidirectional(CTRexScenario.stl_ports_map):
+ print('')
+ return True
+ time.sleep(0.1)
+ print('')
+ return False
+
+ # verify all the ports are bidirectional
+ @staticmethod
+ def verify_bidirectional(mapping_dict):
+ if len(mapping_dict['unknown']):
+ return False
+ if len(mapping_dict['bi']) * 2 == len(mapping_dict['map']):
+ return True
+ return False
+
+ @staticmethod
+ def get_port_count():
+ return CTRexScenario.stl_trex.get_port_count()
+
+ @staticmethod
+ def is_connected():
+ return CTRexScenario.stl_trex.is_connected()
+
+class STLBasic_Test(CStlGeneral_Test):
+ # will run it first explicitly, check connectivity and configure routing
+ @nottest
+ def test_connectivity(self):
+ if not self.is_loopback:
+ try:
+ sys.stdout.flush()
+ sys.stdout.write('Configuring DUT... ')
+ start_time = time.time()
+ if CTRexScenario.router_cfg['forceCleanConfig']:
+ CTRexScenario.router.load_clean_config()
+ CTRexScenario.router.configure_basic_interfaces()
+ CTRexScenario.router.config_pbr(mode = "config")
+ CTRexScenario.router.config_ipv6_pbr(mode = "config")
+ sys.stdout.write('done. (%ss)\n' % int(time.time() - start_time))
+ except Exception as e:
+ print('')
+ CTRexScenario.stl_init_error = 'Could not configure device, err: %s' % e
+ self.fail(CTRexScenario.stl_init_error)
+ return
+
+ try:
+ sys.stdout.write('Starting TRex... ')
+ start_time = time.time()
+ cores = self.configuration.trex.get('trex_cores', 1)
+ if self.is_virt_nics and cores > 1:
+ raise Exception('Number of cores should be 1 with virtual NICs')
+ if not CTRexScenario.no_daemon:
+ self.trex.start_stateless(c = cores)
+ self.stl_trex = STLClient(username = 'TRexRegression',
+ server = self.configuration.trex['trex_name'],
+ verbose_level = CTRexScenario.json_verbose)
+ CTRexScenario.stl_trex = self.stl_trex
+ sys.stdout.write('done. (%ss)\n' % int(time.time() - start_time))
+ except Exception as e:
+ print('')
+ CTRexScenario.stl_init_error = 'Could not start stateless TRex, err: %s' % e
+ self.fail(CTRexScenario.stl_init_error)
+ return
+
+ if not self.connect():
+ CTRexScenario.stl_init_error = 'Client could not connect'
+ self.fail(CTRexScenario.stl_init_error)
+ return
+ print('Connected')
+ if not self.map_ports():
+ CTRexScenario.stl_init_error = 'Client could not map ports'
+ self.fail(CTRexScenario.stl_init_error)
+ return
+ print('Got ports mapping: %s' % CTRexScenario.stl_ports_map)
diff --git a/scripts/automation/regression/stateless_tests/stl_performance_test.py b/scripts/automation/regression/stateless_tests/stl_performance_test.py
new file mode 100644
index 00000000..a556daf3
--- /dev/null
+++ b/scripts/automation/regression/stateless_tests/stl_performance_test.py
@@ -0,0 +1,351 @@
+import os
+from .stl_general_test import CStlGeneral_Test, CTRexScenario
+from trex_stl_lib.api import *
+
+def avg (values):
+ return (sum(values) / float(len(values)))
+
+# performance report object
+class PerformanceReport(object):
+ GOLDEN_NORMAL = 1
+ GOLDEN_FAIL = 2
+ GOLDEN_BETTER = 3
+
+ def __init__ (self,
+ scenario,
+ machine_name,
+ core_count,
+ avg_cpu,
+ avg_gbps,
+ avg_mpps,
+ avg_gbps_per_core,
+ avg_mpps_per_core,
+ ):
+
+ self.scenario = scenario
+ self.machine_name = machine_name
+ self.core_count = core_count
+ self.avg_cpu = avg_cpu
+ self.avg_gbps = avg_gbps
+ self.avg_mpps = avg_mpps
+ self.avg_gbps_per_core = avg_gbps_per_core
+ self.avg_mpps_per_core = avg_mpps_per_core
+
+ def show (self):
+
+ print("\n")
+ print("scenario: {0}".format(self.scenario))
+ print("machine name: {0}".format(self.machine_name))
+ print("DP core count: {0}".format(self.core_count))
+ print("average CPU: {0}".format(self.avg_cpu))
+ print("average Gbps: {0}".format(self.avg_gbps))
+ print("average Mpps: {0}".format(self.avg_mpps))
+ print("average pkt size (bytes): {0}".format( (self.avg_gbps * 1000 / 8) / self.avg_mpps))
+ print("average Gbps per core (at 100% CPU): {0}".format(self.avg_gbps_per_core))
+ print("average Mpps per core (at 100% CPU): {0}".format(self.avg_mpps_per_core))
+
+
+ def check_golden (self, golden_mpps):
+ if self.avg_mpps_per_core < golden_mpps['min']:
+ return self.GOLDEN_FAIL
+
+ if self.avg_mpps_per_core > golden_mpps['max']:
+ return self.GOLDEN_BETTER
+
+ return self.GOLDEN_NORMAL
+
+ def report_to_analytics(self, ga, golden_mpps):
+ print("\n* Reporting to GA *\n")
+ ga.gaAddTestQuery(TestName = self.scenario,
+ TRexMode = 'stl',
+ SetupName = self.machine_name,
+ TestType = 'performance',
+ Mppspc = self.avg_mpps_per_core,
+ ActionNumber = os.getenv("BUILD_ID","n/a"),
+ GoldenMin = golden_mpps['min'],
+ GoldenMax = golden_mpps['max'])
+
+ ga.emptyAndReportQ()
+
+
+class STLPerformance_Test(CStlGeneral_Test):
+ """Tests for stateless client"""
+
+ def setUp(self):
+
+ CStlGeneral_Test.setUp(self)
+
+ self.c = CTRexScenario.stl_trex
+ self.c.connect()
+ self.c.reset()
+
+
+
+ def tearDown (self):
+ CStlGeneral_Test.tearDown(self)
+
+
+ def build_perf_profile_vm (self, pkt_size, cache_size = None):
+ size = pkt_size - 4; # HW will add 4 bytes ethernet FCS
+ src_ip = '16.0.0.1'
+ dst_ip = '48.0.0.1'
+
+ base_pkt = Ether()/IP(src=src_ip,dst=dst_ip)/UDP(dport=12,sport=1025)
+ pad = max(0, size - len(base_pkt)) * 'x'
+
+ vm = STLScVmRaw( [ STLVmFlowVar ( "ip_src", min_value="10.0.0.1", max_value="10.0.0.255", size=4, step=1,op="inc"),
+ STLVmWrFlowVar (fv_name="ip_src", pkt_offset= "IP.src" ),
+ STLVmFixIpv4(offset = "IP")
+ ],
+ cache_size = cache_size
+ );
+
+ pkt = STLPktBuilder(pkt = base_pkt/pad, vm = vm)
+ return STLStream(packet = pkt, mode = STLTXCont())
+
+
+ def build_perf_profile_syn_attack (self, pkt_size):
+ size = pkt_size - 4; # HW will add 4 bytes ethernet FCS
+
+ # TCP SYN
+ base_pkt = Ether()/IP(dst="48.0.0.1")/TCP(dport=80,flags="S")
+ pad = max(0, size - len(base_pkt)) * 'x'
+
+ # vm
+ vm = STLScVmRaw( [ STLVmFlowVar(name="ip_src",
+ min_value="16.0.0.0",
+ max_value="18.0.0.254",
+ size=4, op="random"),
+
+ STLVmFlowVar(name="src_port",
+ min_value=1025,
+ max_value=65000,
+ size=2, op="random"),
+
+ STLVmWrFlowVar(fv_name="ip_src", pkt_offset= "IP.src" ),
+
+ STLVmFixIpv4(offset = "IP"), # fix checksum
+
+ STLVmWrFlowVar(fv_name="src_port",
+ pkt_offset= "TCP.sport") # fix udp len
+
+ ]
+ )
+
+ pkt = STLPktBuilder(pkt = base_pkt,
+ vm = vm)
+
+ return STLStream(packet = pkt,
+ random_seed = 0x1234,# can be remove. will give the same random value any run
+ mode = STLTXCont())
+
+
+
+ # single CPU, VM, no cache, 64 bytes
+ def test_performance_vm_single_cpu (self):
+ setup_cfg = self.get_benchmark_param('cfg')
+ scenario_cfg = {}
+
+ scenario_cfg['name'] = "VM - 64 bytes, single CPU"
+ scenario_cfg['streams'] = self.build_perf_profile_vm(64)
+ scenario_cfg['core_count'] = 1
+
+ scenario_cfg['mult'] = setup_cfg['mult']
+ scenario_cfg['mpps_per_core_golden'] = setup_cfg['mpps_per_core_golden']
+
+
+
+ self.execute_single_scenario(scenario_cfg)
+
+
+ # single CPU, VM, cached, 64 bytes
+ def test_performance_vm_single_cpu_cached (self):
+ setup_cfg = self.get_benchmark_param('cfg')
+ scenario_cfg = {}
+
+ scenario_cfg['name'] = "VM - 64 bytes, single CPU, cache size 1024"
+ scenario_cfg['streams'] = self.build_perf_profile_vm(64, cache_size = 1024)
+ scenario_cfg['core_count'] = 1
+
+ scenario_cfg['mult'] = setup_cfg['mult']
+ scenario_cfg['mpps_per_core_golden'] = setup_cfg['mpps_per_core_golden']
+
+ self.execute_single_scenario(scenario_cfg)
+
+
+ # single CPU, syn attack, 64 bytes
+ def test_performance_syn_attack_single_cpu (self):
+ setup_cfg = self.get_benchmark_param('cfg')
+ scenario_cfg = {}
+
+ scenario_cfg['name'] = "syn attack - 64 bytes, single CPU"
+ scenario_cfg['streams'] = self.build_perf_profile_syn_attack(64)
+ scenario_cfg['core_count'] = 1
+
+ scenario_cfg['mult'] = setup_cfg['mult']
+ scenario_cfg['mpps_per_core_golden'] = setup_cfg['mpps_per_core_golden']
+
+ self.execute_single_scenario(scenario_cfg)
+
+
+ # two CPUs, VM, no cache, 64 bytes
+ def test_performance_vm_multi_cpus (self):
+ setup_cfg = self.get_benchmark_param('cfg')
+ scenario_cfg = {}
+
+ scenario_cfg['name'] = "VM - 64 bytes, multi CPUs"
+ scenario_cfg['streams'] = self.build_perf_profile_vm(64)
+
+ scenario_cfg['core_count'] = setup_cfg['core_count']
+ scenario_cfg['mult'] = setup_cfg['mult']
+ scenario_cfg['mpps_per_core_golden'] = setup_cfg['mpps_per_core_golden']
+
+ self.execute_single_scenario(scenario_cfg)
+
+
+
+ # multi CPUs, VM, cached, 64 bytes
+ def test_performance_vm_multi_cpus_cached (self):
+ setup_cfg = self.get_benchmark_param('cfg')
+ scenario_cfg = {}
+
+ scenario_cfg['name'] = "VM - 64 bytes, multi CPU, cache size 1024"
+ scenario_cfg['streams'] = self.build_perf_profile_vm(64, cache_size = 1024)
+
+
+ scenario_cfg['core_count'] = setup_cfg['core_count']
+ scenario_cfg['mult'] = setup_cfg['mult']
+ scenario_cfg['mpps_per_core_golden'] = setup_cfg['mpps_per_core_golden']
+
+ self.execute_single_scenario(scenario_cfg)
+
+
+ # multi CPUs, syn attack, 64 bytes
+ def test_performance_syn_attack_multi_cpus (self):
+ setup_cfg = self.get_benchmark_param('cfg')
+ scenario_cfg = {}
+
+ scenario_cfg['name'] = "syn attack - 64 bytes, multi CPUs"
+ scenario_cfg['streams'] = self.build_perf_profile_syn_attack(64)
+
+ scenario_cfg['core_count'] = setup_cfg['core_count']
+ scenario_cfg['mult'] = setup_cfg['mult']
+ scenario_cfg['mpps_per_core_golden'] = setup_cfg['mpps_per_core_golden']
+
+ self.execute_single_scenario(scenario_cfg)
+
+
+
+############################################# test's infra functions ###########################################
+
+ def execute_single_scenario (self, scenario_cfg, iterations = 4):
+ golden = scenario_cfg['mpps_per_core_golden']
+
+
+ for i in range(iterations, -1, -1):
+ report = self.execute_single_scenario_iteration(scenario_cfg)
+ rc = report.check_golden(golden)
+
+ if (rc == PerformanceReport.GOLDEN_NORMAL) or (rc == PerformanceReport.GOLDEN_BETTER):
+ if self.GAManager:
+ report.report_to_analytics(self.GAManager, golden)
+
+ return
+
+ if rc == PerformanceReport.GOLDEN_BETTER:
+ return
+
+ print("\n*** Measured Mpps per core '{0}' is lower than expected golden '{1} - re-running scenario...{2} attempts left".format(report.avg_mpps_per_core, scenario_cfg['mpps_per_core_golden'], i))
+
+ assert 0, "performance failure"
+
+
+
+
+ def execute_single_scenario_iteration (self, scenario_cfg):
+
+ print("\nExecuting performance scenario: '{0}'\n".format(scenario_cfg['name']))
+
+ self.c.reset(ports = [0])
+ self.c.add_streams(ports = [0], streams = scenario_cfg['streams'])
+
+ # use one core
+ cores_per_port = self.c.system_info.get('dp_core_count_per_port', 0)
+ if cores_per_port < scenario_cfg['core_count']:
+ assert 0, "test configuration requires {0} cores but only {1} per port are available".format(scenario_cfg['core_count'], cores_per_port)
+
+ core_mask = (2 ** scenario_cfg['core_count']) - 1
+ self.c.start(ports = [0], mult = scenario_cfg['mult'], core_mask = [core_mask])
+
+ # stablize
+ print("Step 1 - waiting for stabilization... (10 seconds)")
+ for _ in range(10):
+ time.sleep(1)
+ sys.stdout.write('.')
+ sys.stdout.flush()
+
+ print("\n")
+
+ samples = {'cpu' : [], 'bps': [], 'pps': []}
+
+ # let the server gather samples
+ print("Step 2 - Waiting for samples... (60 seconds)")
+
+ for i in range(0, 3):
+
+ # sample bps/pps
+ for _ in range(0, 20):
+ stats = self.c.get_stats(ports = 0)
+ samples['bps'].append(stats[0]['tx_bps'])
+ samples['pps'].append(stats[0]['tx_pps'])
+ time.sleep(1)
+ sys.stdout.write('.')
+ sys.stdout.flush()
+
+ # sample CPU per core
+ rc = self.c._transmit('get_utilization')
+ if not rc:
+ raise Exception(rc)
+
+ data = rc.data()['cpu']
+ # filter
+ data = [s for s in data if s['ports'][0] == 0]
+
+ assert len(data) == scenario_cfg['core_count'] , "sampling info does not match core count"
+
+ for s in data:
+ samples['cpu'] += s['history']
+
+
+ stats = self.c.get_stats(ports = 0)
+ self.c.stop(ports = [0])
+
+
+
+ avg_values = {k:avg(v) for k, v in samples.items()}
+ avg_cpu = avg_values['cpu'] * scenario_cfg['core_count']
+ avg_gbps = avg_values['bps'] / 1e9
+ avg_mpps = avg_values['pps'] / 1e6
+
+ avg_gbps_per_core = avg_gbps * (100.0 / avg_cpu)
+ avg_mpps_per_core = avg_mpps * (100.0 / avg_cpu)
+
+ report = PerformanceReport(scenario = scenario_cfg['name'],
+ machine_name = CTRexScenario.setup_name,
+ core_count = scenario_cfg['core_count'],
+ avg_cpu = avg_cpu,
+ avg_gbps = avg_gbps,
+ avg_mpps = avg_mpps,
+ avg_gbps_per_core = avg_gbps_per_core,
+ avg_mpps_per_core = avg_mpps_per_core)
+
+
+ report.show()
+
+ print("")
+ golden = scenario_cfg['mpps_per_core_golden']
+ print("golden Mpps per core (at 100% CPU): min: {0}, max {1}".format(golden['min'], golden['max']))
+
+
+ return report
+
diff --git a/scripts/automation/regression/stateless_tests/stl_rx_test.py b/scripts/automation/regression/stateless_tests/stl_rx_test.py
new file mode 100644
index 00000000..524ad4bf
--- /dev/null
+++ b/scripts/automation/regression/stateless_tests/stl_rx_test.py
@@ -0,0 +1,568 @@
+#!/router/bin/python
+from .stl_general_test import CStlGeneral_Test, CTRexScenario
+from trex_stl_lib.api import *
+import os, sys
+
+ERROR_LATENCY_TOO_HIGH = 1
+
+class STLRX_Test(CStlGeneral_Test):
+ """Tests for RX feature"""
+
+ def setUp(self):
+ per_driver_params = {
+ 'rte_vmxnet3_pmd': {
+ 'rate_percent': 1,
+ 'total_pkts': 50,
+ 'rate_latency': 1,
+ 'latency_9k_enable': False,
+ },
+ 'rte_ixgbe_pmd': {
+ 'rate_percent': 30,
+ 'total_pkts': 1000,
+ 'rate_latency': 1,
+ 'latency_9k_enable': True,
+ 'latency_9k_max_average': 300,
+ 'latency_9k_max_latency': 400,
+ },
+ 'rte_i40e_pmd': {
+ 'rate_percent': 80,
+ 'total_pkts': 1000,
+ 'rate_latency': 1,
+ 'latency_9k_enable': True,
+ 'latency_9k_max_average': 100,
+ 'latency_9k_max_latency': 250,
+ },
+ 'rte_igb_pmd': {
+ 'rate_percent': 80,
+ 'total_pkts': 500,
+ 'rate_latency': 1,
+ 'latency_9k_enable': False,
+ },
+ 'rte_em_pmd': {
+ 'rate_percent': 1,
+ 'total_pkts': 50,
+ 'rate_latency': 1,
+ 'latency_9k_enable': False,
+ },
+ 'rte_virtio_pmd': {
+ 'rate_percent': 1,
+ 'total_pkts': 50,
+ 'rate_latency': 1,
+ 'latency_9k_enable': False,
+ 'allow_packets_drop_num': 1, # allow 1 pkt drop
+ },
+ }
+
+ CStlGeneral_Test.setUp(self)
+ assert 'bi' in CTRexScenario.stl_ports_map
+
+ self.c = CTRexScenario.stl_trex
+
+ self.tx_port, self.rx_port = CTRexScenario.stl_ports_map['bi'][0]
+
+ port_info = self.c.get_port_info(ports = self.rx_port)[0]
+ self.speed = port_info['speed']
+
+
+ cap = port_info['rx']['caps']
+ if "flow_stats" not in cap or "latency" not in cap:
+ self.skip('port {0} does not support RX'.format(self.rx_port))
+ self.cap = cap
+
+ drv_name = port_info['driver']
+ if drv_name == 'rte_ixgbe_pmd':
+ self.ipv6_support = False
+ else:
+ self.ipv6_support = True
+ self.rate_percent = per_driver_params[drv_name]['rate_percent']
+ self.total_pkts = per_driver_params[drv_name]['total_pkts']
+ self.rate_lat = per_driver_params[drv_name].get('rate_latency', self.rate_percent)
+ self.latency_9k_enable = per_driver_params[drv_name]['latency_9k_enable']
+ self.latency_9k_max_average = per_driver_params[drv_name].get('latency_9k_max_average')
+ self.latency_9k_max_latency = per_driver_params[drv_name].get('latency_9k_max_latency')
+ self.allow_drop = per_driver_params[drv_name].get('allow_packets_drop_num', 0)
+
+ self.lat_pps = 1000
+ self.drops_expected = False
+ self.c.reset(ports = [self.tx_port, self.rx_port])
+
+ vm = STLScVmRaw( [ STLVmFlowVar ( "ip_src", min_value="10.0.0.1",
+ max_value="10.0.0.255", size=4, step=1,op="inc"),
+ STLVmWrFlowVar (fv_name="ip_src", pkt_offset= "IP.src" ), # write ip to packet IP.src
+ STLVmFixIpv4(offset = "IP") # fix checksum
+ ]
+ # Latency is bound to one core. We test that this option is not causing trouble
+ ,split_by_field = "ip_src"
+ ,cache_size =255 # Cache is ignored by latency flows. Need to test it is not crashing.
+ );
+
+ self.pkt = STLPktBuilder(pkt = Ether()/IP(src="16.0.0.1",dst="48.0.0.1")/UDP(dport=12,sport=1025)/('Your_paylaod_comes_here'))
+ self.ipv6pkt = STLPktBuilder(pkt = Ether()/IPv6(dst="2001:0:4137:9350:8000:f12a:b9c8:2815",src="2001:4860:0:2001::68")
+ /UDP(dport=12,sport=1025)/('Your_paylaod_comes_here'))
+ self.large_pkt = STLPktBuilder(pkt = Ether()/IP(src="16.0.0.1",dst="48.0.0.1")/UDP(dport=12,sport=1025)/('a'*1000))
+ self.pkt_9k = STLPktBuilder(pkt = Ether()/IP(src="16.0.0.1",dst="48.0.0.1")/UDP(dport=12,sport=1025)/('a'*9000))
+ self.vm_pkt = STLPktBuilder(pkt = Ether()/IP(src="16.0.0.1",dst="48.0.0.1")
+ / UDP(dport=12,sport=1025)/('Your_paylaod_comes_here')
+ , vm = vm)
+ self.vm_large_pkt = STLPktBuilder(pkt = Ether()/IP(src="16.0.0.1",dst="48.0.0.1")/UDP(dport=12,sport=1025)/('a'*1000)
+ , vm = vm)
+ self.vm_9k_pkt = STLPktBuilder(pkt = Ether()/IP(src="16.0.0.1",dst="48.0.0.1")/UDP(dport=12,sport=1025)/('a'*9000)
+ ,vm = vm)
+
+
+ @classmethod
+ def tearDownClass(cls):
+ if CTRexScenario.stl_init_error:
+ return
+ # connect back at end of tests
+ if not cls.is_connected():
+ CTRexScenario.stl_trex.connect()
+
+
+ def __verify_latency (self, latency_stats,max_latency,max_average):
+
+ error=0;
+ err_latency = latency_stats['err_cntrs']
+ latency = latency_stats['latency']
+
+ for key in err_latency :
+ error +=err_latency[key]
+ if error !=0 :
+ pprint.pprint(err_latency)
+ tmp = 'RX pkts ERROR - one of the error is on'
+ print(tmp)
+ assert False, tmp
+
+ if latency['average']> max_average:
+ pprint.pprint(latency_stats)
+ tmp = 'Average latency is too high {0} {1} '.format(latency['average'], max_average)
+ print(tmp)
+ return ERROR_LATENCY_TOO_HIGH
+
+ if latency['total_max']> max_latency:
+ pprint.pprint(latency_stats)
+ tmp = 'Max latency is too high {0} {1} '.format(latency['total_max'], max_latency)
+ print(tmp)
+ return ERROR_LATENCY_TOO_HIGH
+
+ return 0
+
+
+
+ def __verify_flow (self, pg_id, total_pkts, pkt_len, stats):
+ flow_stats = stats['flow_stats'].get(pg_id)
+ latency_stats = stats['latency'].get(pg_id)
+
+ if not flow_stats:
+ assert False, "no flow stats available"
+
+ tx_pkts = flow_stats['tx_pkts'].get(self.tx_port, 0)
+ tx_bytes = flow_stats['tx_bytes'].get(self.tx_port, 0)
+ rx_pkts = flow_stats['rx_pkts'].get(self.rx_port, 0)
+ if latency_stats is not None:
+ drops = latency_stats['err_cntrs']['dropped']
+ ooo = latency_stats['err_cntrs']['out_of_order']
+ dup = latency_stats['err_cntrs']['dup']
+ sth = latency_stats['err_cntrs']['seq_too_high']
+ stl = latency_stats['err_cntrs']['seq_too_low']
+ lat = latency_stats['latency']
+ if ooo != 0 or dup != 0 or stl != 0:
+ pprint.pprint(latency_stats)
+ tmp='Error packets - dropped:{0}, ooo:{1} dup:{2} seq too high:{3} seq too low:{4}'.format(drops, ooo, dup, sth, stl)
+ assert False, tmp
+
+ if (drops > self.allow_drop or sth != 0) and not self.drops_expected:
+ pprint.pprint(latency_stats)
+ tmp='Error packets - dropped:{0}, ooo:{1} dup:{2} seq too high:{3} seq too low:{4}'.format(drops, ooo, dup, sth, stl)
+ assert False, tmp
+
+ if tx_pkts != total_pkts:
+ pprint.pprint(flow_stats)
+ tmp = 'TX pkts mismatch - got: {0}, expected: {1}'.format(tx_pkts, total_pkts)
+ assert False, tmp
+
+ if tx_bytes != (total_pkts * pkt_len):
+ pprint.pprint(flow_stats)
+ tmp = 'TX bytes mismatch - got: {0}, expected: {1}'.format(tx_bytes, (total_pkts * pkt_len))
+ assert False, tmp
+
+ if abs(total_pkts - rx_pkts) > self.allow_drop and not self.drops_expected:
+ pprint.pprint(flow_stats)
+ tmp = 'RX pkts mismatch - got: {0}, expected: {1}'.format(rx_pkts, total_pkts)
+ assert False, tmp
+
+ if "rx_bytes" in self.cap:
+ rx_bytes = flow_stats['rx_bytes'].get(self.rx_port, 0)
+ if abs(rx_bytes / pkt_len - total_pkts ) > self.allow_drop and not self.drops_expected:
+ pprint.pprint(flow_stats)
+ tmp = 'RX bytes mismatch - got: {0}, expected: {1}'.format(rx_bytes, (total_pkts * pkt_len))
+ assert False, tmp
+
+
+ # RX itreation
+ def __rx_iteration (self, exp_list):
+
+ self.c.clear_stats()
+
+ self.c.start(ports = [self.tx_port])
+ self.c.wait_on_traffic(ports = [self.tx_port])
+ stats = self.c.get_stats()
+
+ for exp in exp_list:
+ self.__verify_flow(exp['pg_id'], exp['total_pkts'], exp['pkt_len'], stats)
+
+
+ # one stream on TX --> RX
+ def test_one_stream(self):
+ total_pkts = self.total_pkts
+
+ try:
+ s1 = STLStream(name = 'rx',
+ packet = self.pkt,
+ flow_stats = STLFlowLatencyStats(pg_id = 5),
+ mode = STLTXSingleBurst(total_pkts = total_pkts,
+ percentage = self.rate_lat
+ ))
+
+ # add both streams to ports
+ self.c.add_streams([s1], ports = [self.tx_port])
+
+ print("\ninjecting {0} packets on port {1}\n".format(total_pkts, self.tx_port))
+
+ exp = {'pg_id': 5, 'total_pkts': total_pkts, 'pkt_len': s1.get_pkt_len()}
+
+ self.__rx_iteration( [exp] )
+
+
+ except STLError as e:
+ assert False , '{0}'.format(e)
+
+
+ def test_multiple_streams(self):
+ if self.is_virt_nics:
+ self.skip('Skip this for virtual NICs')
+
+ num_latency_streams = 128
+ num_flow_stat_streams = 127
+ total_pkts = int(self.total_pkts / (num_latency_streams + num_flow_stat_streams))
+ if total_pkts == 0:
+ total_pkts = 1
+ percent = float(self.rate_lat) / (num_latency_streams + num_flow_stat_streams)
+
+ try:
+ streams = []
+ exp = []
+ # 10 identical streams
+ for pg_id in range(1, num_latency_streams):
+
+ streams.append(STLStream(name = 'rx {0}'.format(pg_id),
+ packet = self.pkt,
+ flow_stats = STLFlowLatencyStats(pg_id = pg_id),
+ mode = STLTXSingleBurst(total_pkts = total_pkts+pg_id, percentage = percent)))
+
+ exp.append({'pg_id': pg_id, 'total_pkts': total_pkts+pg_id, 'pkt_len': streams[-1].get_pkt_len()})
+
+ for pg_id in range(num_latency_streams + 1, num_latency_streams + num_flow_stat_streams):
+
+ streams.append(STLStream(name = 'rx {0}'.format(pg_id),
+ packet = self.pkt,
+ flow_stats = STLFlowStats(pg_id = pg_id),
+ mode = STLTXSingleBurst(total_pkts = total_pkts+pg_id, percentage = percent)))
+
+ exp.append({'pg_id': pg_id, 'total_pkts': total_pkts+pg_id, 'pkt_len': streams[-1].get_pkt_len()})
+
+ # add both streams to ports
+ self.c.add_streams(streams, ports = [self.tx_port])
+
+ self.__rx_iteration(exp)
+
+
+ except STLError as e:
+ assert False , '{0}'.format(e)
+
+ def test_1_stream_many_iterations (self):
+ total_pkts = self.total_pkts
+
+ try:
+ streams_data = [
+ {'name': 'Flow stat. No latency', 'pkt': self.pkt, 'lat': False},
+ {'name': 'Latency, no field engine', 'pkt': self.pkt, 'lat': True},
+ {'name': 'Latency, short packet with field engine', 'pkt': self.vm_pkt, 'lat': True},
+ {'name': 'Latency, large packet field engine', 'pkt': self.vm_large_pkt, 'lat': True}
+ ]
+ if self.latency_9k_enable:
+ streams_data.append({'name': 'Latency, 9k packet with field engine', 'pkt': self.vm_9k_pkt, 'lat': True})
+
+ if self.ipv6_support:
+ streams_data.append({'name': 'IPv6 flow stat. No latency', 'pkt': self.ipv6pkt, 'lat': False})
+ streams_data.append({'name': 'IPv6 latency, no field engine', 'pkt': self.ipv6pkt, 'lat': True})
+
+ streams = []
+ for data in streams_data:
+ if data['lat']:
+ flow_stats = STLFlowLatencyStats(pg_id = 5)
+ mode = STLTXSingleBurst(total_pkts = total_pkts, percentage = self.rate_percent)
+ else:
+ flow_stats = STLFlowStats(pg_id = 5)
+ mode = STLTXSingleBurst(total_pkts = total_pkts, pps = self.lat_pps)
+
+ s = STLStream(name = data['name'],
+ packet = data['pkt'],
+ flow_stats = flow_stats,
+ mode = mode
+ )
+ streams.append(s)
+
+ print("\ninjecting {0} packets on port {1}".format(total_pkts, self.tx_port))
+ exp = {'pg_id': 5, 'total_pkts': total_pkts}
+
+ for stream in streams:
+ self.c.add_streams([stream], ports = [self.tx_port])
+ print("Stream: {0}".format(stream.name))
+ exp['pkt_len'] = stream.get_pkt_len()
+ for i in range(0, 10):
+ print("Iteration {0}".format(i))
+ self.__rx_iteration( [exp] )
+ self.c.remove_all_streams(ports = [self.tx_port])
+
+
+ except STLError as e:
+ assert False , '{0}'.format(e)
+
+
+
+ def __9k_stream(self,pgid,ports,precet,max_latency,avg_latency,duration,pkt_size):
+ my_pg_id=pgid
+ s_ports=ports;
+ all_ports=list(CTRexScenario.stl_ports_map['map'].keys());
+ if ports == None:
+ s_ports=all_ports
+ assert( type(s_ports)==list)
+
+ stream_pkt = STLPktBuilder(pkt = Ether()/IP(src="16.0.0.1",dst="48.0.0.1")/UDP(dport=12,sport=1025)/('a'*pkt_size))
+
+ try:
+ # reset all ports
+ self.c.reset(ports = all_ports)
+
+
+ for pid in s_ports:
+ s1 = STLStream(name = 'rx',
+ packet = self.pkt,
+ flow_stats = STLFlowLatencyStats(pg_id = my_pg_id+pid),
+ mode = STLTXCont(pps = 1000))
+
+ s2 = STLStream(name = 'bulk',
+ packet = stream_pkt,
+ mode = STLTXCont(percentage =precet))
+
+
+ # add both streams to ports
+ self.c.add_streams([s1,s2], ports = [pid])
+
+ self.c.clear_stats()
+
+ self.c.start(ports = s_ports,duration = duration)
+ self.c.wait_on_traffic(ports = s_ports,timeout = duration+10,rx_delay_ms = 100)
+ stats = self.c.get_stats()
+
+ for pid in s_ports:
+ latency_stats = stats['latency'].get(my_pg_id+pid)
+ #pprint.pprint(latency_stats)
+ if self.__verify_latency (latency_stats,max_latency,avg_latency) !=0:
+ return (ERROR_LATENCY_TOO_HIGH);
+
+ return 0
+
+ except STLError as e:
+ assert False , '{0}'.format(e)
+
+
+
+
+
+ # check low latency when you have stream of 9K stream
+ def test_9k_stream(self):
+ if self.is_virt_nics:
+ self.skip('Skip this for virtual NICs')
+
+ if self.latency_9k_enable == False:
+ print("SKIP")
+ return
+
+ for i in range(0,5):
+ print("Iteration {0}".format(i));
+ duration=random.randint(10, 70);
+ pgid=random.randint(1, 65000);
+ pkt_size=random.randint(1000, 9000);
+ all_ports = list(CTRexScenario.stl_ports_map['map'].keys());
+
+
+ s_port=random.sample(all_ports, random.randint(1, len(all_ports)) )
+ s_port=sorted(s_port)
+ if self.speed == 40 :
+ # the NIC does not support all full rate in case both port works let's filter odd ports
+ s_port=list(filter(lambda x: x % 2==0, s_port))
+ if len(s_port)==0:
+ s_port=[0];
+
+ error=1;
+ for j in range(0,5):
+ print(" {4} - duration {0} pgid {1} pkt_size {2} s_port {3} ".format(duration,pgid,pkt_size,s_port,j));
+ if self.__9k_stream(pgid,
+ s_port,90,
+ self.latency_9k_max_latency,
+ self.latency_9k_max_average,
+ duration,
+ pkt_size)==0:
+ error=0;
+ break;
+
+ if error:
+ assert False , "Latency too high"
+ else:
+ print("===>Iteration {0} PASS {1}".format(i,j));
+
+
+ def check_stats (self,stats,a,b,err):
+ if a != b:
+ tmp = 'ERROR field : {0}, read : {1} != expected : {2} '.format(err,a,b)
+ pprint.pprint(stats)
+ assert False,tmp
+
+
+
+ def send_1_burst(self,from_port,is_latency,pkts):
+
+ pid = from_port
+ base_pkt = Ether()/IP(src="16.0.0.1",dst="48.0.0.1")/UDP(dport=12,sport=1025)
+
+ pad = (60 - len(base_pkt)) * 'x'
+
+ stream_pkt = STLPktBuilder(pkt = base_pkt/pad)
+
+ all_ports=list(CTRexScenario.stl_ports_map['map'].keys());
+
+ dpid = CTRexScenario.stl_ports_map['map'][pid]
+
+ s_ports =[pid]
+
+ try:
+ # reset all ports
+ self.c.reset(ports = all_ports)
+
+
+ for pid in s_ports:
+ if is_latency:
+ s1 = STLStream(name = 'rx',
+ packet = stream_pkt,
+ flow_stats = STLFlowLatencyStats(pg_id = 5 + pid),
+ mode = STLTXSingleBurst(total_pkts = pkts,pps = 1000))
+ else:
+ s1 = STLStream(name = 'rx',
+ packet = stream_pkt,
+ mode = STLTXSingleBurst(total_pkts = pkts,pps = 1000))
+
+
+ # add both streams to ports
+ self.c.add_streams(s1, ports = [pid])
+
+ self.c.clear_stats()
+
+ self.c.start(ports = s_ports)
+ self.c.wait_on_traffic(ports = s_ports)
+
+ stats = self.c.get_stats()
+
+ ips = stats[dpid]
+ ops = stats[pid]
+ tps = stats['total']
+ tbytes = pkts*64
+
+ self.check_stats (stats,ops["obytes"], tbytes,"ops[obytes]")
+ self.check_stats (stats,ops["opackets"], pkts,"ops[opackets]")
+
+ self.check_stats (stats,ips["ibytes"], tbytes,"ips[ibytes]")
+ self.check_stats (stats,ips["ipackets"], pkts,"ips[ipackets]")
+
+ self.check_stats (stats,tps['ibytes'], tbytes,"tps[ibytes]")
+ self.check_stats (stats,tps['obytes'], tbytes,"tps[obytes]")
+ self.check_stats (stats,tps['ipackets'], pkts,"tps[ipackets]")
+ self.check_stats (stats,tps['opackets'], pkts,"tps[opackets]")
+
+ if is_latency:
+ ls=stats['flow_stats'][5+ pid]
+ self.check_stats (stats,ls['rx_pkts']['total'], pkts,"ls['rx_pkts']['total']")
+ self.check_stats (stats,ls['rx_pkts'][dpid], pkts,"ls['rx_pkts'][dpid]")
+
+ self.check_stats (stats,ls['tx_pkts']['total'], pkts,"ls['tx_pkts']['total']")
+ self.check_stats (stats,ls['tx_pkts'][pid], pkts,"ls['tx_pkts'][pid]")
+
+ self.check_stats (stats,ls['tx_bytes']['total'], tbytes,"ls['tx_bytes']['total']")
+ self.check_stats (stats,ls['tx_bytes'][pid], tbytes,"ls['tx_bytes'][pid]")
+
+
+ return 0
+
+ except STLError as e:
+ assert False , '{0}'.format(e)
+
+
+
+ def test_fcs_stream(self):
+ """ this test send 1 64 byte packet with latency and check that all counters are reported as 64 bytes"""
+
+ if self.is_virt_nics:
+ self.skip('Skip this for virtual NICs')
+
+ all_ports=list(CTRexScenario.stl_ports_map['map'].keys());
+ for port in all_ports:
+ for l in [True,False]:
+ print(" test port {0} latency : {1} ".format(port,l))
+ self.send_1_burst(port,l,100)
+
+
+ # this test adds more and more latency streams and re-test with incremental
+ def test_incremental_latency_streams (self):
+ if self.is_virt_nics:
+ self.skip('Skip this for virtual NICs')
+
+ total_pkts = self.total_pkts
+ percent = 0.5
+
+ try:
+ # We run till maximum streams allowed. At some point, expecting drops, because rate is too high.
+ # then run with less streams again, to see that system is still working.
+ for num_iter in [128, 5]:
+ exp = []
+ for i in range(1, num_iter):
+ # mix small and large packets
+ if i % 2 != 0:
+ my_pkt = self.pkt
+ else:
+ my_pkt = self.large_pkt
+ s1 = STLStream(name = 'rx',
+ packet = my_pkt,
+ flow_stats = STLFlowLatencyStats(pg_id = i),
+ mode = STLTXSingleBurst(total_pkts = total_pkts,
+ percentage = percent
+ ))
+
+ # add both streams to ports
+ self.c.add_streams([s1], ports = [self.tx_port])
+ total_percent = i * percent
+ if total_percent > self.rate_lat:
+ self.drops_expected = True
+ else:
+ self.drops_expected = False
+
+ print("port {0} : {1} streams at {2}% of line rate\n".format(self.tx_port, i, total_percent))
+
+ exp.append({'pg_id': i, 'total_pkts': total_pkts, 'pkt_len': s1.get_pkt_len()})
+
+ self.__rx_iteration( exp )
+
+ self.c.remove_all_streams(ports = [self.tx_port])
+
+ except STLError as e:
+ assert False , '{0}'.format(e)
diff --git a/scripts/automation/regression/stateless_tests/trex_client_pkg_test.py b/scripts/automation/regression/stateless_tests/trex_client_pkg_test.py
new file mode 100755
index 00000000..14ef36f7
--- /dev/null
+++ b/scripts/automation/regression/stateless_tests/trex_client_pkg_test.py
@@ -0,0 +1,39 @@
+#!/router/bin/python
+from .stl_general_test import CStlGeneral_Test, CTRexScenario
+from misc_methods import run_command
+from nose.plugins.attrib import attr
+
+@attr('client_package')
+class CTRexClientPKG_Test(CStlGeneral_Test):
+ """This class tests TRex client package"""
+
+ def setUp(self):
+ CStlGeneral_Test.setUp(self)
+ # examples connect by their own
+ if CTRexScenario.stl_trex.is_connected():
+ CTRexScenario.stl_trex.disconnect()
+ CStlGeneral_Test.unzip_client_package()
+
+ def tearDown(self):
+ # connect back at end of tests
+ if not CTRexScenario.stl_trex.is_connected():
+ CTRexScenario.stl_trex.connect()
+ CStlGeneral_Test.tearDown(self)
+
+ def run_client_package_stl_example(self, python_version):
+ commands = [
+ 'cd %s' % CTRexScenario.scripts_path,
+ 'source find_python.sh --%s' % python_version,
+ 'which $PYTHON',
+ 'cd trex_client/stl/examples',
+ '$PYTHON stl_imix.py -s %s' % self.configuration.trex['trex_name'],
+ ]
+ return_code, stdout, stderr = run_command("bash -ce '%s'" % '; '.join(commands))
+ if return_code:
+ self.fail('Error in running stf_example using %s: %s' % (python_version, [return_code, stdout, stderr]))
+
+ def test_client_python2(self):
+ self.run_client_package_stl_example(python_version = 'python2')
+
+ def test_client_python3(self):
+ self.run_client_package_stl_example(python_version = 'python3')
diff --git a/scripts/automation/regression/test_pcaps/pcap_dual_test.erf b/scripts/automation/regression/test_pcaps/pcap_dual_test.erf
new file mode 100644
index 00000000..26b0b6b4
--- /dev/null
+++ b/scripts/automation/regression/test_pcaps/pcap_dual_test.erf
Binary files differ
diff --git a/scripts/automation/regression/trex.py b/scripts/automation/regression/trex.py
new file mode 100644
index 00000000..7b96f2f8
--- /dev/null
+++ b/scripts/automation/regression/trex.py
@@ -0,0 +1,457 @@
+#!/router/bin/python
+
+import os
+import sys
+import subprocess
+import misc_methods
+import re
+import signal
+import time
+from CProgressDisp import TimedProgressBar
+from stateful_tests.tests_exceptions import TRexInUseError
+import datetime
+
+class CTRexScenario:
+ modes = set() # list of modes of this setup: loopback, virtual etc.
+ server_logs = False
+ is_test_list = False
+ is_init = False
+ is_stl_init = False
+ trex_crashed = False
+ configuration = None
+ trex = None
+ stl_trex = None
+ stl_ports_map = None
+ stl_init_error = None
+ router = None
+ router_cfg = None
+ daemon_log_lines = 0
+ setup_name = None
+ setup_dir = None
+ router_image = None
+ trex_version = None
+ scripts_path = None
+ benchmark = None
+ report_dir = 'reports'
+ # logger = None
+ test_types = {'functional_tests': [], 'stateful_tests': [], 'stateless_tests': []}
+ is_copied = False
+ GAManager = None
+ no_daemon = False
+ debug_image = False
+ test = None
+ json_verbose = False
+
+class CTRexRunner:
+ """This is an instance for generating a CTRexRunner"""
+
+ def __init__ (self, config_dict, yaml):
+ self.trex_config = config_dict#misc_methods.load_config_file(config_file)
+ self.yaml = yaml
+
+
+ def get_config (self):
+ """ get_config() -> dict
+
+ Returns the stored configuration of the TRex server of the CTRexRunner instance as a dictionary
+ """
+ return self.trex_config
+
+ def set_yaml_file (self, yaml_path):
+ """ update_yaml_file (self, yaml_path) -> None
+
+ Defines the yaml file to be used by the TRex.
+ """
+ self.yaml = yaml_path
+
+
+ def generate_run_cmd (self, multiplier, cores, duration, nc = True, export_path="/tmp/trex.txt", **kwargs):
+ """ generate_run_cmd(self, multiplier, duration, export_path) -> str
+
+ Generates a custom running command for the kick-off of the TRex traffic generator.
+ Returns a command (string) to be issued on the trex server
+
+ Parameters
+ ----------
+ multiplier : float
+ Defines the TRex multiplier factor (platform dependant)
+ duration : int
+ Defines the duration of the test
+ export_path : str
+ a full system path to which the results of the trex-run will be logged.
+
+ """
+ fileName, fileExtension = os.path.splitext(self.yaml)
+ if self.yaml == None:
+ raise ValueError('TRex yaml file is not defined')
+ elif fileExtension != '.yaml':
+ raise TypeError('yaml path is not referencing a .yaml file')
+
+ if 'results_file_path' in kwargs:
+ export_path = kwargs['results_file_path']
+
+ trex_cmd_str = './t-rex-64 -c %d -m %f -d %d -f %s '
+
+ if nc:
+ trex_cmd_str = trex_cmd_str + ' --nc '
+
+ trex_cmd = trex_cmd_str % (cores,
+ multiplier,
+ duration,
+ self.yaml)
+ # self.trex_config['trex_latency'])
+
+ for key, value in kwargs.items():
+ tmp_key = key.replace('_','-')
+ dash = ' -' if (len(key)==1) else ' --'
+ if value == True:
+ trex_cmd += (dash + tmp_key)
+ else:
+ trex_cmd += (dash + '{k} {val}'.format( k = tmp_key, val = value ))
+
+ print("\nTRex COMMAND: ", trex_cmd)
+
+ cmd = 'sshpass.exp %s %s root "cd %s; %s > %s"' % (self.trex_config['trex_password'],
+ self.trex_config['trex_name'],
+ self.trex_config['trex_version_path'],
+ trex_cmd,
+ export_path)
+
+ return cmd;
+
+ def generate_fetch_cmd (self, result_file_full_path="/tmp/trex.txt"):
+ """ generate_fetch_cmd(self, result_file_full_path) -> str
+
+ Generates a custom command for which will enable to fetch the resutls of the TRex run.
+ Returns a command (string) to be issued on the trex server.
+
+ Example use: fetch_trex_results() - command that will fetch the content from the default log file- /tmp/trex.txt
+ fetch_trex_results("/tmp/trex_secondary_file.txt") - command that will fetch the content from a custom log file- /tmp/trex_secondary_file.txt
+ """
+ #dir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
+ script_running_dir = os.path.dirname(os.path.realpath(__file__)) # get the current script working directory so that the sshpass could be accessed.
+ cmd = script_running_dir + '/sshpass.exp %s %s root "cat %s"' % (self.trex_config['trex_password'],
+ self.trex_config['trex_name'],
+ result_file_full_path);
+ return cmd;
+
+
+
+ def run (self, multiplier, cores, duration, **kwargs):
+ """ run(self, multiplier, duration, results_file_path) -> CTRexResults
+
+ Running the TRex server based on the config file.
+ Returns a CTRexResults object containing the results of the run.
+
+ Parameters
+ ----------
+ multiplier : float
+ Defines the TRex multiplier factor (platform dependant)
+ duration : int
+ Defines the duration of the test
+ results_file_path : str
+ a full system path to which the results of the trex-run will be logged and fetched from.
+
+ """
+ tmp_path = None
+ # print kwargs
+ if 'export_path' in kwargs:
+ tmp_path = kwargs['export_path']
+ del kwargs['export_path']
+ cmd = self.generate_run_cmd(multiplier, cores, duration, tmp_path, **kwargs)
+ else:
+ cmd = self.generate_run_cmd(multiplier, cores, duration, **kwargs)
+
+# print 'TRex complete command to be used:'
+# print cmd
+ # print kwargs
+
+ progress_thread = TimedProgressBar(duration)
+ progress_thread.start()
+ interrupted = False
+ try:
+ start_time = time.time()
+ start = datetime.datetime.now()
+ results = subprocess.call(cmd, shell = True, stdout = open(os.devnull, 'wb'))
+ end_time = time.time()
+ fin = datetime.datetime.now()
+ # print "Time difference : ", fin-start
+ runtime_deviation = abs(( (end_time - start_time)/ (duration+15) ) - 1)
+ print("runtime_deviation: %2.0f %%" % ( runtime_deviation*100.0))
+ if ( runtime_deviation > 0.6 ) :
+ # If the run stopped immediately - classify as Trex in use or reachability issue
+ interrupted = True
+ if ((end_time - start_time) < 2):
+ raise TRexInUseError ('TRex run failed since TRex is used by another process, or due to reachability issues')
+ else:
+ CTRexScenario.trex_crashed = True
+ # results = subprocess.Popen(cmd, stdout = open(os.devnull, 'wb'),
+ # shell=True, preexec_fn=os.setsid)
+ except KeyboardInterrupt:
+ print("\nTRex test interrupted by user during traffic generation!!")
+ results.killpg(results.pid, signal.SIGTERM) # Send the kill signal to all the process groups
+ interrupted = True
+ raise RuntimeError
+ finally:
+ progress_thread.join(isPlannedStop = (not interrupted) )
+
+ if results!=0:
+ sys.stderr.write("TRex run failed. Please Contact trex-dev mailer for further details")
+ sys.stderr.flush()
+ return None
+ elif interrupted:
+ sys.stderr.write("TRex run failed due user-interruption.")
+ sys.stderr.flush()
+ return None
+ else:
+
+ if tmp_path:
+ cmd = self.generate_fetch_cmd( tmp_path )#**kwargs)#results_file_path)
+ else:
+ cmd = self.generate_fetch_cmd()
+
+ try:
+ run_log = subprocess.check_output(cmd, shell = True)
+ trex_result = CTRexResult(None, run_log)
+ trex_result.load_file_lines()
+ trex_result.parse()
+
+ return trex_result
+
+ except subprocess.CalledProcessError:
+ sys.stderr.write("TRex result fetching failed. Please Contact trex-dev mailer for further details")
+ sys.stderr.flush()
+ return None
+
+class CTRexResult():
+ """This is an instance for generating a CTRexResult"""
+ def __init__ (self, file, buffer = None):
+ self.file = file
+ self.buffer = buffer
+ self.result = {}
+
+
+ def load_file_lines (self):
+ """ load_file_lines(self) -> None
+
+ Loads into the self.lines the content of self.file
+ """
+ if self.buffer:
+ self.lines = self.buffer.split("\n")
+ else:
+ f = open(self.file,'r')
+ self.lines = f.readlines()
+ f.close()
+
+
+ def dump (self):
+ """ dump(self) -> None
+
+ Prints nicely the content of self.result dictionary into the screen
+ """
+ for key, value in self.result.items():
+ print("{0:20} : \t{1}".format(key, float(value)))
+
+ def update (self, key, val, _str):
+ """ update (self, key, val, _str) -> None
+
+ Updates the self.result[key] with a possibly new value representation of val
+ Example: 15K might be updated into 15000.0
+
+ Parameters
+ ----------
+ key :
+ Key of the self.result dictionary of the TRexResult instance
+ val : float
+ Key of the self.result dictionary of the TRexResult instance
+ _str : str
+ a represntation of the BW (.
+
+ """
+
+ s = _str.strip()
+
+ if s[0]=="G":
+ val = val*1E9
+ elif s[0]=="M":
+ val = val*1E6
+ elif s[0]=="K":
+ val = val*1E3
+
+ if key in self.result:
+ if self.result[key] > 0:
+ if (val/self.result[key] > 0.97 ):
+ self.result[key]= val
+ else:
+ self.result[key] = val
+ else:
+ self.result[key] = val
+
+
+
+ def parse (self):
+ """ parse(self) -> None
+
+ Parse the content of the result file from the TRex test and upload the data into
+ """
+ stop_read = False
+ d = {
+ 'total-tx' : 0,
+ 'total-rx' : 0,
+ 'total-pps' : 0,
+ 'total-cps' : 0,
+
+ 'expected-pps' : 0,
+ 'expected-cps' : 0,
+ 'expected-bps' : 0,
+ 'active-flows' : 0,
+ 'open-flows' : 0
+ }
+
+ self.error = ""
+
+ # Parse the output of the test, line by line (each line matches another RegEx and as such
+ # different rules apply
+ for line in self.lines:
+ match = re.match(".*/var/run/.rte_config.*", line)
+ if match:
+ stop_read = True
+ continue
+
+ #Total-Tx : 462.42 Mbps Nat_time_out : 0 ==> we try to parse the next decimal in this case Nat_time_out
+# match = re.match("\W*(\w(\w|[-])+)\W*([:]|[=])\W*(\d+[.]\d+)\W*\w+\W+(\w+)\W*([:]|[=])\W*(\d+)(.*)", line);
+# if match:
+# key = misc_methods.mix_string(match.group(5))
+# val = float(match.group(7))
+# # continue to parse !! we try the second
+# self.result[key] = val #update latest
+
+ # check if we need to stop reading
+ match = re.match(".*latency daemon has stopped.*", line)
+ if match:
+ stop_read = True
+ continue
+
+ match = re.match("\W*(\w(\w|[-])+)\W*([:]|[=])\W*(\d+[.]\d+)(.*ps)\s+(\w+)\W*([:]|[=])\W*(\d+)", line)
+ if match:
+ key = misc_methods.mix_string(match.group(1))
+ val = float(match.group(4))
+ if key in d:
+ if stop_read == False:
+ self.update (key, val, match.group(5))
+ else:
+ self.result[key] = val # update latest
+ key2 = misc_methods.mix_string(match.group(6))
+ val2 = int(match.group(8))
+ self.result[key2] = val2 # always take latest
+
+
+ match = re.match("\W*(\w(\w|[-])+)\W*([:]|[=])\W*(\d+[.]\d+)(.*)", line)
+ if match:
+ key = misc_methods.mix_string(match.group(1))
+ val = float(match.group(4))
+ if key in d:
+ if stop_read == False:
+ self.update (key, val, match.group(5))
+ else:
+ self.result[key] = val # update latest
+ continue
+
+ match = re.match("\W*(\w(\w|[-])+)\W*([:]|[=])\W*(\d+)(.*)", line)
+ if match:
+ key = misc_methods.mix_string(match.group(1))
+ val = float(match.group(4))
+ self.result[key] = val #update latest
+ continue
+
+ match = re.match("\W*(\w(\w|[-])+)\W*([:]|[=])\W*(OK)(.*)", line)
+ if match:
+ key = misc_methods.mix_string(match.group(1))
+ val = 0 # valid
+ self.result[key] = val #update latest
+ continue
+
+ match = re.match("\W*(Cpu Utilization)\W*([:]|[=])\W*(\d+[.]\d+) %(.*)", line)
+ if match:
+ key = misc_methods.mix_string(match.group(1))
+ val = float(match.group(3))
+ if key in self.result:
+ if (self.result[key] < val): # update only if larger than previous value
+ self.result[key] = val
+ else:
+ self.result[key] = val
+ continue
+
+ match = re.match(".*(rx_check\s.*)\s+:\s+(\w+)", line)
+ if match:
+ key = misc_methods.mix_string(match.group(1))
+ try:
+ val = int(match.group(2))
+ except ValueError: # corresponds with rx_check validation case
+ val = match.group(2)
+ finally:
+ self.result[key] = val
+ continue
+
+
+ def get_status (self, drop_expected = False):
+ if (self.error != ""):
+ print(self.error)
+ return (self.STATUS_ERR_FATAL)
+
+ d = self.result
+
+ # test for latency
+ latency_limit = 5000
+ if ( d['maximum-latency'] > latency_limit ):
+ self.reason="Abnormal latency measured (higher than %s" % latency_limit
+ return self.STATUS_ERR_LATENCY
+
+ # test for drops
+ if drop_expected == False:
+ if ( d['total-pkt-drop'] > 0 ):
+ self.reason=" At least one packet dropped "
+ return self.STATUS_ERR_DROP
+
+ # test for rx/tx distance
+ rcv_vs_tx = d['total-tx']/d['total-rx']
+ if ( (rcv_vs_tx >1.2) or (rcv_vs_tx <0.9) ):
+ self.reason="rx and tx should be close"
+ return self.STATUS_ERR_RX_TX_DISTANCE
+
+ # expected measurement
+ expect_vs_measued=d['total-tx']/d['expected-bps']
+ if ( (expect_vs_measued >1.1) or (expect_vs_measued < 0.9) ) :
+ print(expect_vs_measued)
+ print(d['total-tx'])
+ print(d['expected-bps'])
+ self.reason="measure is not as expected"
+ return self.STATUS_ERR_BAD_EXPECTED_MEASUREMENT
+
+ if ( d['latency-any-error'] !=0 ):
+ self.reason=" latency-any-error has error"
+ return self.STATUS_ERR_LATENCY_ANY_ERROR
+
+ return self.STATUS_OK
+
+ # return types
+ STATUS_OK = 0
+ STATUS_ERR_FATAL = 1
+ STATUS_ERR_LATENCY = 2
+ STATUS_ERR_DROP = 3
+ STATUS_ERR_RX_TX_DISTANCE = 4
+ STATUS_ERR_BAD_EXPECTED_MEASUREMENT = 5,
+ STATUS_ERR_LATENCY_ANY_ERROR = 6
+
+def test_TRex_result_parser():
+ t=CTRexResult('trex.txt');
+ t.load_file_lines()
+ t.parse()
+ print(t.result)
+
+
+
+
+if __name__ == "__main__":
+ #test_TRex_result_parser();
+ pass
diff --git a/scripts/automation/regression/trex_unit_test.py b/scripts/automation/regression/trex_unit_test.py
new file mode 100755
index 00000000..daa1abaf
--- /dev/null
+++ b/scripts/automation/regression/trex_unit_test.py
@@ -0,0 +1,437 @@
+#!/usr/bin/env python
+
+__copyright__ = "Copyright 2014"
+
+"""
+Name:
+ trex_unit_test.py
+
+
+Description:
+
+ This script creates the functionality to test the performance of the TRex traffic generator
+ The tested scenario is a TRex TG directly connected to a Cisco router.
+
+::
+
+ Topology:
+
+ ------- --------
+ | | Tx---1gig/10gig----Rx | |
+ | TRex | | router |
+ | | Rx---1gig/10gig----Tx | |
+ ------- --------
+
+"""
+
+import os
+import sys
+import outer_packages
+
+import nose
+from nose.plugins import Plugin
+from nose.selector import Selector
+import CustomLogger
+import misc_methods
+from rednose import RedNose
+import termstyle
+from trex import CTRexScenario
+from trex_stf_lib.trex_client import *
+from trex_stf_lib.trex_exceptions import *
+from trex_stl_lib.api import *
+from trex_stl_lib.utils.GAObjClass import GAmanager_Regression
+import trex
+import socket
+from pprint import pprint
+import time
+from distutils.dir_util import mkpath
+
+# nose overrides
+
+# option to select wanted test by name without file, class etc.
+def new_Selector_wantMethod(self, method, orig_Selector_wantMethod = Selector.wantMethod):
+ result = orig_Selector_wantMethod(self, method)
+ return result and (not CTRexScenario.test or CTRexScenario.test in getattr(method, '__name__', ''))
+
+Selector.wantMethod = new_Selector_wantMethod
+
+def new_Selector_wantFunction(self, function, orig_Selector_wantFunction = Selector.wantFunction):
+ result = orig_Selector_wantFunction(self, function)
+ return result and (not CTRexScenario.test or CTRexScenario.test in getattr(function, '__name__', ''))
+
+Selector.wantFunction = new_Selector_wantFunction
+
+# override nose's strange representation of setUpClass errors
+def __suite_repr__(self):
+ if hasattr(self.context, '__module__'): # inside class, setUpClass etc.
+ class_repr = nose.suite._strclass(self.context)
+ else: # outside of class, setUpModule etc.
+ class_repr = nose.suite._strclass(self.__class__)
+ return '%s.%s' % (class_repr, getattr(self.context, '__name__', self.context))
+
+nose.suite.ContextSuite.__repr__ = __suite_repr__
+nose.suite.ContextSuite.__str__ = __suite_repr__
+
+# /nose overrides
+
+def check_trex_path(trex_path):
+ if os.path.isfile('%s/trex_daemon_server' % trex_path):
+ return os.path.abspath(trex_path)
+
+def check_setup_path(setup_path):
+ if os.path.isfile('%s/config.yaml' % setup_path):
+ return os.path.abspath(setup_path)
+
+
+def get_trex_path():
+ latest_build_path = check_trex_path(os.getenv('TREX_UNDER_TEST')) # TREX_UNDER_TEST is env var pointing to <trex-core>/scripts
+ if not latest_build_path:
+ latest_build_path = check_trex_path(os.path.join(os.pardir, os.pardir))
+ if not latest_build_path:
+ raise Exception('Could not determine trex_under_test folder, try setting env.var. TREX_UNDER_TEST')
+ return latest_build_path
+
+
+def address_to_ip(address):
+ for i in range(5):
+ try:
+ return socket.gethostbyname(address)
+ except:
+ continue
+ return socket.gethostbyname(address)
+
+
+class CTRexTestConfiguringPlugin(Plugin):
+ def options(self, parser, env = os.environ):
+ super(CTRexTestConfiguringPlugin, self).options(parser, env)
+ parser.add_option('--cfg', '--trex-scenario-config', action='store',
+ dest='config_path',
+ help='Specify path to folder with config.yaml and benchmark.yaml')
+ parser.add_option('--skip-clean', '--skip_clean', action='store_true',
+ dest='skip_clean_config',
+ help='Skip the clean configuration replace on the platform.')
+ parser.add_option('--load-image', '--load_image', action='store_true', default = False,
+ dest='load_image',
+ help='Install image specified in config file on router.')
+ parser.add_option('--log-path', '--log_path', action='store',
+ dest='log_path',
+ help='Specify path for the tests` log to be saved at. Once applied, logs capturing by nose will be disabled.') # Default is CURRENT/WORKING/PATH/trex_log/trex_log.log')
+ parser.add_option('--json-verbose', '--json_verbose', action="store_true", default = False,
+ dest="json_verbose",
+ help="Print JSON-RPC commands.")
+ parser.add_option('--telnet-verbose', '--telnet_verbose', action="store_true", default = False,
+ dest="telnet_verbose",
+ help="Print telnet commands and responces.")
+ parser.add_option('--server-logs', '--server_logs', action="store_true", default = False,
+ dest="server_logs",
+ help="Print server side (TRex and trex_daemon) logs per test.")
+ parser.add_option('--kill-running', '--kill_running', action="store_true", default = False,
+ dest="kill_running",
+ help="Kills running TRex process on remote server (useful for regression).")
+ parser.add_option('--func', '--functional', action="store_true", default = False,
+ dest="functional",
+ help="Run functional tests.")
+ parser.add_option('--stl', '--stateless', action="store_true", default = False,
+ dest="stateless",
+ help="Run stateless tests.")
+ parser.add_option('--stf', '--stateful', action="store_true", default = False,
+ dest="stateful",
+ help="Run stateful tests.")
+ parser.add_option('--pkg', action="store",
+ dest="pkg",
+ help="Run with given TRex package. Make sure the path available at server machine.")
+ parser.add_option('--collect', action="store_true", default = False,
+ dest="collect",
+ help="Alias to --collect-only.")
+ parser.add_option('--warmup', action="store_true", default = False,
+ dest="warmup",
+ help="Warm up the system for stateful: run 30 seconds 9k imix test without check of results.")
+ parser.add_option('--test-client-package', '--test_client_package', action="store_true", default = False,
+ dest="test_client_package",
+ help="Includes tests of client package.")
+ parser.add_option('--long', action="store_true", default = False,
+ dest="long",
+ help="Flag of long tests (stability).")
+ parser.add_option('--ga', action="store_true", default = False,
+ dest="ga",
+ help="Flag to send benchmarks to GA.")
+ parser.add_option('--no-daemon', action="store_true", default = False,
+ dest="no_daemon",
+ help="Flag that specifies to use running stl server, no need daemons.")
+ parser.add_option('--debug-image', action="store_true", default = False,
+ dest="debug_image",
+ help="Flag that specifies to use t-rex-64-debug as TRex executable.")
+ parser.add_option('--trex-args', action='store', default = '',
+ dest="trex_args",
+ help="Additional TRex arguments (--no-watchdog etc.).")
+ parser.add_option('-t', '--test', action='store', default = '', dest='test',
+ help='Test name to run (without file, class etc.)')
+
+
+ def configure(self, options, conf):
+ self.collect_only = options.collect_only
+ self.functional = options.functional
+ self.stateless = options.stateless
+ self.stateful = options.stateful
+ self.pkg = options.pkg
+ self.json_verbose = options.json_verbose
+ self.telnet_verbose = options.telnet_verbose
+ self.no_daemon = options.no_daemon
+ CTRexScenario.test = options.test
+ if self.collect_only or self.functional:
+ return
+ if CTRexScenario.setup_dir and options.config_path:
+ raise Exception('Please either define --cfg or use env. variable SETUP_DIR, not both.')
+ if not options.config_path and CTRexScenario.setup_dir:
+ options.config_path = CTRexScenario.setup_dir
+ if not options.config_path:
+ raise Exception('Please specify path to config.yaml using --cfg parameter or env. variable SETUP_DIR')
+ options.config_path = options.config_path.rstrip('/')
+ CTRexScenario.setup_name = os.path.basename(options.config_path)
+ self.configuration = misc_methods.load_complete_config_file(os.path.join(options.config_path, 'config.yaml'))
+ self.configuration.trex['trex_name'] = address_to_ip(self.configuration.trex['trex_name']) # translate hostname to ip
+ self.benchmark = misc_methods.load_benchmark_config_file(os.path.join(options.config_path, 'benchmark.yaml'))
+ self.enabled = True
+ self.modes = self.configuration.trex.get('modes', [])
+ self.kill_running = options.kill_running
+ self.load_image = options.load_image
+ self.clean_config = False if options.skip_clean_config else True
+ self.server_logs = options.server_logs
+ if options.log_path:
+ self.loggerPath = options.log_path
+ # initialize CTRexScenario global testing class, to be used by all tests
+ CTRexScenario.configuration = self.configuration
+ CTRexScenario.no_daemon = options.no_daemon
+ CTRexScenario.benchmark = self.benchmark
+ CTRexScenario.modes = set(self.modes)
+ CTRexScenario.server_logs = self.server_logs
+ CTRexScenario.debug_image = options.debug_image
+ CTRexScenario.json_verbose = self.json_verbose
+ if not self.no_daemon:
+ CTRexScenario.trex = CTRexClient(trex_host = self.configuration.trex['trex_name'],
+ verbose = self.json_verbose,
+ debug_image = options.debug_image,
+ trex_args = options.trex_args)
+ if not CTRexScenario.trex.check_master_connectivity():
+ print('Could not connect to master daemon')
+ sys.exit(-1)
+ if options.ga and CTRexScenario.setup_name:
+ CTRexScenario.GAManager = GAmanager_Regression(GoogleID = 'UA-75220362-3',
+ AnalyticsUserID = CTRexScenario.setup_name,
+ QueueSize = 100,
+ Timeout = 3, # seconds
+ UserPermission = 1,
+ BlockingMode = 0,
+ appName = 'TRex',
+ appVer = CTRexScenario.trex_version)
+
+
+ def begin (self):
+ client = CTRexScenario.trex
+ if self.pkg and not CTRexScenario.is_copied:
+ if client.master_daemon.is_trex_daemon_running() and client.get_trex_cmds() and not self.kill_running:
+ print("Can't update TRex, it's running")
+ sys.exit(-1)
+ print('Updating TRex to %s' % self.pkg)
+ if not client.master_daemon.update_trex(self.pkg):
+ print('Failed updating TRex')
+ sys.exit(-1)
+ else:
+ print('Updated')
+ CTRexScenario.is_copied = True
+ if self.functional or self.collect_only:
+ return
+ if not self.no_daemon:
+ print('Restarting TRex daemon server')
+ res = client.restart_trex_daemon()
+ if not res:
+ print('Could not restart TRex daemon server')
+ sys.exit(-1)
+ print('Restarted.')
+
+ if self.kill_running:
+ client.kill_all_trexes()
+ else:
+ if client.get_trex_cmds():
+ print('TRex is already running')
+ sys.exit(-1)
+
+ if 'loopback' not in self.modes:
+ CTRexScenario.router_cfg = dict(config_dict = self.configuration.router,
+ forceImageReload = self.load_image,
+ silent_mode = not self.telnet_verbose,
+ forceCleanConfig = self.clean_config,
+ tftp_config_dict = self.configuration.tftp)
+ try:
+ CustomLogger.setup_custom_logger('TRexLogger', self.loggerPath)
+ except AttributeError:
+ CustomLogger.setup_custom_logger('TRexLogger')
+
+ def finalize(self, result):
+ if self.functional or self.collect_only:
+ return
+ #CTRexScenario.is_init = False
+ if self.stateful:
+ CTRexScenario.trex = None
+ if self.stateless:
+ if self.no_daemon:
+ if CTRexScenario.stl_trex and CTRexScenario.stl_trex.is_connected():
+ CTRexScenario.stl_trex.disconnect()
+ else:
+ CTRexScenario.trex.force_kill(False)
+ CTRexScenario.stl_trex = None
+
+
+def save_setup_info():
+ try:
+ if CTRexScenario.setup_name and CTRexScenario.trex_version:
+ setup_info = ''
+ for key, value in CTRexScenario.trex_version.items():
+ setup_info += '{0:8}: {1}\n'.format(key, value)
+ cfg = CTRexScenario.configuration
+ setup_info += 'Server: %s, Modes: %s' % (cfg.trex.get('trex_name'), cfg.trex.get('modes'))
+ if cfg.router:
+ setup_info += '\nRouter: Model: %s, Image: %s' % (cfg.router.get('model'), CTRexScenario.router_image)
+ if CTRexScenario.debug_image:
+ setup_info += '\nDebug image: %s' % CTRexScenario.debug_image
+
+ with open('%s/report_%s.info' % (CTRexScenario.report_dir, CTRexScenario.setup_name), 'w') as f:
+ f.write(setup_info)
+ except Exception as err:
+ print('Error saving setup info: %s ' % err)
+
+
+if __name__ == "__main__":
+
+ # setting defaults. By default we run all the test suite
+ specific_tests = False
+ CTRexScenario.report_dir = 'reports'
+ need_to_copy = False
+ setup_dir = os.getenv('SETUP_DIR', '').rstrip('/')
+ CTRexScenario.setup_dir = check_setup_path(setup_dir)
+ CTRexScenario.scripts_path = get_trex_path()
+ if not CTRexScenario.setup_dir:
+ CTRexScenario.setup_dir = check_setup_path(os.path.join('setups', setup_dir))
+
+
+ nose_argv = ['', '-s', '-v', '--exe', '--rednose', '--detailed-errors']
+ test_client_package = False
+ if '--test-client-package' in sys.argv:
+ test_client_package = True
+
+ if '--collect' in sys.argv:
+ sys.argv.append('--collect-only')
+ if '--collect-only' in sys.argv: # this is a user trying simply to view the available tests. no need xunit.
+ CTRexScenario.is_test_list = True
+ xml_arg = ''
+ else:
+ xml_name = 'unit_test.xml'
+ if CTRexScenario.setup_dir:
+ CTRexScenario.setup_name = os.path.basename(CTRexScenario.setup_dir)
+ xml_name = 'report_%s.xml' % CTRexScenario.setup_name
+ xml_arg= '--xunit-file=%s/%s' % (CTRexScenario.report_dir, xml_name)
+ mkpath(CTRexScenario.report_dir)
+
+ sys_args = sys.argv[:]
+ for i, arg in enumerate(sys.argv):
+ if 'log-path' in arg:
+ nose_argv += ['--nologcapture']
+ else:
+ for tests_type in CTRexScenario.test_types.keys():
+ if tests_type in arg:
+ specific_tests = True
+ CTRexScenario.test_types[tests_type].append(arg[arg.find(tests_type):])
+ sys_args.remove(arg)
+
+ if not specific_tests:
+ for key in ('--func', '--functional'):
+ if key in sys_args:
+ CTRexScenario.test_types['functional_tests'].append('functional_tests')
+ sys_args.remove(key)
+ for key in ('--stf', '--stateful'):
+ if key in sys_args:
+ CTRexScenario.test_types['stateful_tests'].append('stateful_tests')
+ sys_args.remove(key)
+ for key in ('--stl', '--stateless'):
+ if key in sys_args:
+ CTRexScenario.test_types['stateless_tests'].append('stateless_tests')
+ sys_args.remove(key)
+ # Run all of the tests or just the selected ones
+ if not sum([len(x) for x in CTRexScenario.test_types.values()]):
+ for key in CTRexScenario.test_types.keys():
+ CTRexScenario.test_types[key].append(key)
+
+ nose_argv += sys_args
+
+ addplugins = [RedNose(), CTRexTestConfiguringPlugin()]
+ result = True
+ try:
+ if len(CTRexScenario.test_types['functional_tests']):
+ additional_args = ['--func'] + CTRexScenario.test_types['functional_tests']
+ if xml_arg:
+ additional_args += ['--with-xunit', xml_arg.replace('.xml', '_functional.xml')]
+ result = nose.run(argv = nose_argv + additional_args, addplugins = addplugins)
+ if len(CTRexScenario.test_types['stateful_tests']):
+ additional_args = ['--stf']
+ if '--warmup' in sys.argv:
+ additional_args.append('stateful_tests/trex_imix_test.py:CTRexIMIX_Test.test_warm_up')
+ additional_args += CTRexScenario.test_types['stateful_tests']
+ if not test_client_package:
+ additional_args.extend(['-a', '!client_package'])
+ if xml_arg:
+ additional_args += ['--with-xunit', xml_arg.replace('.xml', '_stateful.xml')]
+ result = nose.run(argv = nose_argv + additional_args, addplugins = addplugins) and result
+ if len(CTRexScenario.test_types['stateless_tests']):
+ additional_args = ['--stl', 'stateless_tests/stl_general_test.py:STLBasic_Test.test_connectivity'] + CTRexScenario.test_types['stateless_tests']
+ if not test_client_package:
+ additional_args.extend(['-a', '!client_package'])
+ if xml_arg:
+ additional_args += ['--with-xunit', xml_arg.replace('.xml', '_stateless.xml')]
+ result = nose.run(argv = nose_argv + additional_args, addplugins = addplugins) and result
+ #except Exception as e:
+ # result = False
+ # print(e)
+ finally:
+ save_setup_info()
+
+ if not CTRexScenario.is_test_list:
+ if result == True:
+ print(termstyle.green("""
+ ..::''''::..
+ .;'' ``;.
+ :: :: :: ::
+ :: :: :: ::
+ :: :: :: ::
+ :: .:' :: :: `:. ::
+ :: : : ::
+ :: `:. .:' ::
+ `;..``::::''..;'
+ ``::,,,,::''
+
+ ___ ___ __________
+ / _ \/ _ | / __/ __/ /
+ / ___/ __ |_\ \_\ \/_/
+ /_/ /_/ |_/___/___(_)
+
+ """))
+ sys.exit(0)
+ else:
+ print(termstyle.red("""
+ /\_/\
+ ( o.o )
+ > ^ <
+
+This cat is sad, test failed.
+ """))
+ sys.exit(-1)
+
+
+
+
+
+
+
+
+
+
+
diff --git a/scripts/automation/report_template.html b/scripts/automation/report_template.html
new file mode 100755
index 00000000..ccd5388d
--- /dev/null
+++ b/scripts/automation/report_template.html
@@ -0,0 +1,96 @@
+<!DOCTYPE html>
+<html>
+
+<head>
+
+<style>
+ html{overflow-y:scroll;}
+body
+{
+font-size:12px;
+color:#000000;
+background-color:#ffffff;
+margin:0px;
+background-image:url('/images/gradientfromtop.gif');
+background-repeat:repeat-x;
+}
+body,p,h1,h2,h3,h4,table,td,th,ul,ol,textarea,input
+{
+font-family:verdana,helvetica,arial,sans-serif;
+}
+h1 {font-size:190%;margin-top:0px;font-weight:normal}
+h2 {font-size:160%;margin-top:10px;margin-bottom:10px;font-weight:normal}
+h3 {font-size:120%;font-weight:normal}
+h4 {font-size:100%;}
+h5 {font-size:90%;}
+h6 {font-size:80%;}
+h1,h2,h3,h4,h5,h6
+{
+background-color:transparent;
+color:#000000;
+}
+table.myWideTable
+{
+background-color:#ffffff;
+border:1px solid #c3c3c3;
+border-collapse:collapse;
+width:100%;
+}
+table.myWideTable th
+{
+background-color:#e5eecc;
+border:1px solid #c3c3c3;
+padding:3px;
+vertical-align:top;
+text-align:left;
+}table.myWideTable td
+{
+border:1px solid #c3c3c3;
+padding:3px;
+vertical-align:top;
+}table.myTable
+{
+background-color:#ffffff;
+border:1px solid #c3c3c3;
+border-collapse:collapse;
+width:50%;
+}
+table.myTable th
+{
+background-color:#e5eecc;
+border:1px solid #c3c3c3;
+padding:3px;
+vertical-align:top;
+text-align:left;
+}table.myTable td
+{
+border:1px solid #c3c3c3;
+padding:3px;
+vertical-align:top;
+}}
+ </style>
+
+
+</head>
+
+<body>
+
+<H1>
+TRex Performance Report
+</H1>
+
+<H2>
+Job Setup
+</H2>
+
+!@#$template_fill_job_setup_table!@#$
+
+<H2>
+Job Summary
+</H2>
+
+!@#$template_fill_job_summary_table!@#$
+
+</body>
+</html>
+
diff --git a/scripts/automation/sshpass.exp b/scripts/automation/sshpass.exp
new file mode 100755
index 00000000..3b5ce560
--- /dev/null
+++ b/scripts/automation/sshpass.exp
@@ -0,0 +1,17 @@
+#!/usr/cisco/bin/expect -f
+# ./ssh.exp password 192.168.1.11 id *
+set pass [lrange $argv 0 0]
+set server [lrange $argv 1 1]
+set name [lrange $argv 2 2]
+set cmd [lrange $argv 3 10]
+
+set cmd_str [join $cmd]
+
+spawn ssh $name@$server $cmd_str
+match_max 100000
+expect "*?assword:*"
+send -- "$pass\r"
+send -- "\r"
+expect eof
+wait
+#interact
diff --git a/scripts/automation/trex_control_plane/__init__.py b/scripts/automation/trex_control_plane/__init__.py
new file mode 100755
index 00000000..d3f5a12f
--- /dev/null
+++ b/scripts/automation/trex_control_plane/__init__.py
@@ -0,0 +1 @@
+
diff --git a/scripts/automation/trex_control_plane/client_utils/__init__.py b/scripts/automation/trex_control_plane/client_utils/__init__.py
new file mode 100644
index 00000000..c38c2cca
--- /dev/null
+++ b/scripts/automation/trex_control_plane/client_utils/__init__.py
@@ -0,0 +1 @@
+__all__ = ["general_utils", "trex_yaml_gen"]
diff --git a/scripts/automation/trex_control_plane/client_utils/external_packages.py b/scripts/automation/trex_control_plane/client_utils/external_packages.py
new file mode 100644
index 00000000..c682dc18
--- /dev/null
+++ b/scripts/automation/trex_control_plane/client_utils/external_packages.py
@@ -0,0 +1,72 @@
+#!/router/bin/python
+
+import sys
+import os
+import warnings
+
+CURRENT_PATH = os.path.dirname(os.path.realpath(__file__))
+ROOT_PATH = os.path.abspath(os.path.join(CURRENT_PATH, os.pardir)) # path to trex_control_plane directory
+PATH_TO_PYTHON_LIB = os.path.abspath(os.path.join(ROOT_PATH, os.pardir, os.pardir, 'external_libs'))
+
+CLIENT_UTILS_MODULES = ['dpkt-1.8.6',
+ 'yaml-3.11',
+ 'texttable-0.8.4',
+ 'scapy-2.3.1'
+ ]
+
+def import_client_utils_modules():
+
+ # must be in a higher priority
+ sys.path.insert(0, PATH_TO_PYTHON_LIB)
+
+ sys.path.append(ROOT_PATH)
+ import_module_list(CLIENT_UTILS_MODULES)
+
+
+def import_module_list(modules_list):
+ assert(isinstance(modules_list, list))
+ for p in modules_list:
+ full_path = os.path.join(PATH_TO_PYTHON_LIB, p)
+ fix_path = os.path.normcase(full_path)
+ sys.path.insert(1, full_path)
+
+
+ import_platform_dirs()
+
+
+
+def import_platform_dirs ():
+ # handle platform dirs
+
+ # try fedora 18 first and then cel5.9
+ # we are using the ZMQ module to determine the right platform
+
+ full_path = os.path.join(PATH_TO_PYTHON_LIB, 'platform/fedora18')
+ fix_path = os.path.normcase(full_path)
+ sys.path.insert(0, full_path)
+ try:
+ # try to import and delete it from the namespace
+ import zmq
+ del zmq
+ return
+ except:
+ sys.path.pop(0)
+ pass
+
+ full_path = os.path.join(PATH_TO_PYTHON_LIB, 'platform/cel59')
+ fix_path = os.path.normcase(full_path)
+ sys.path.insert(0, full_path)
+ try:
+ # try to import and delete it from the namespace
+ import zmq
+ del zmq
+ return
+
+ except:
+ sys.path.pop(0)
+ sys.modules['zmq'] = None
+ warnings.warn("unable to determine platform type for ZMQ import")
+
+
+
+import_client_utils_modules()
diff --git a/scripts/automation/trex_control_plane/client_utils/trex_yaml_gen.py b/scripts/automation/trex_control_plane/client_utils/trex_yaml_gen.py
new file mode 100644
index 00000000..c26fef29
--- /dev/null
+++ b/scripts/automation/trex_control_plane/client_utils/trex_yaml_gen.py
@@ -0,0 +1,212 @@
+#!/router/bin/python
+
+import pprint
+import yaml
+import os
+# import bisect
+
+class CTRexYaml(object):
+ """
+ This class functions as a YAML generator according to TRex YAML format.
+
+ CTRexYaml is compatible with both Python 2 and Python 3.
+ """
+ YAML_TEMPLATE = [{'cap_info': [],
+ 'duration': 10.0,
+ 'generator': {'clients_end': '16.0.1.255',
+ 'clients_per_gb': 201,
+ 'clients_start': '16.0.0.1',
+ 'distribution': 'seq',
+ 'dual_port_mask': '1.0.0.0',
+ 'min_clients': 101,
+ 'servers_end': '48.0.0.255',
+ 'servers_start': '48.0.0.1',
+ 'tcp_aging': 1,
+ 'udp_aging': 1},
+ 'mac' : [0x00,0x00,0x00,0x01,0x00,0x00]}]
+ PCAP_TEMPLATE = {'cps': 1.0,
+ 'ipg': 10000,
+ 'name': '',
+ 'rtt': 10000,
+ 'w': 1}
+
+ def __init__ (self, trex_files_path):
+ """
+ The initialization of this class creates a CTRexYaml object with **empty** 'cap-info', and with default client-server configuration.
+
+ Use class methods to add and assign pcap files and export the data to a YAML file.
+
+ :parameters:
+ trex_files_path : str
+ a path (on TRex server side) for the pcap files using which TRex can access it.
+
+ """
+ self.yaml_obj = list(CTRexYaml.YAML_TEMPLATE)
+ self.empty_cap = True
+ self.file_list = []
+ self.yaml_dumped = False
+ self.trex_files_path = trex_files_path
+
+ def add_pcap_file (self, local_pcap_path):
+ """
+ Adds a .pcap file with recorded traffic to the yaml object by linking the file with 'cap-info' template key fields.
+
+ :parameters:
+ local_pcap_path : str
+ a path (on client side) for the pcap file to be added.
+
+ :return:
+ + The index of the inserted item (as int) if item added successfully
+ + -1 if pcap file already exists in 'cap_info'.
+
+ """
+ new_pcap = dict(CTRexYaml.PCAP_TEMPLATE)
+ new_pcap['name'] = self.trex_files_path + os.path.basename(local_pcap_path)
+ if self.get_pcap_idx(new_pcap['name']) != -1:
+ # pcap already exists in 'cap_info'
+ return -1
+ else:
+ self.yaml_obj[0]['cap_info'].append(new_pcap)
+ if self.empty_cap:
+ self.empty_cap = False
+ self.file_list.append(local_pcap_path)
+ return ( len(self.yaml_obj[0]['cap_info']) - 1)
+
+
+ def get_pcap_idx (self, pcap_name):
+ """
+ Checks if a certain .pcap file has been added into the yaml object.
+
+ :parameters:
+ pcap_name : str
+ the name of the pcap file to be searched
+
+ :return:
+ + The index of the pcap file (as int) if exists
+ + -1 if not exists.
+
+ """
+ comp_pcap = pcap_name if pcap_name.startswith(self.trex_files_path) else (self.trex_files_path + pcap_name)
+ for idx, pcap in enumerate(self.yaml_obj[0]['cap_info']):
+ print (pcap['name'] == comp_pcap)
+ if pcap['name'] == comp_pcap:
+ return idx
+ # pcap file wasn't found
+ return -1
+
+ def dump_as_python_obj (self):
+ """
+ dumps with nice indentation the pythonic format (dictionaries and lists) of the currently built yaml object.
+
+ :parameters:
+ None
+
+ :return:
+ None
+
+ """
+ pprint.pprint(self.yaml_obj)
+
+ def dump(self):
+ """
+ dumps with nice indentation the YAML format of the currently built yaml object.
+
+ :parameters:
+ None
+
+ :return:
+ None
+
+ """
+ print (yaml.safe_dump(self.yaml_obj, default_flow_style = False))
+
+ def to_yaml(self, filename):
+ """
+ Exports to YAML file the built configuration into an actual YAML file.
+
+ :parameters:
+ filename : str
+ a path (on client side, including filename) to store the generated yaml file.
+
+ :return:
+ None
+
+ :raises:
+ + :exc:`ValueError`, in case no pcap files has been added to the object.
+ + :exc:`EnvironmentError`, in case of any IO error of writing to the files or OSError when trying to open it for writing.
+
+ """
+ if self.empty_cap:
+ raise ValueError("No .pcap file has been assigned to yaml object. Must add at least one")
+ else:
+ try:
+ with open(filename, 'w') as yaml_file:
+ yaml_file.write( yaml.safe_dump(self.yaml_obj, default_flow_style = False) )
+ self.yaml_dumped = True
+ self.file_list.append(filename)
+ except EnvironmentError as inst:
+ raise
+
+ def set_cap_info_param (self, param, value, seq):
+ """
+ Set cap-info parameters' value of a specific pcap file.
+
+ :parameters:
+ param : str
+ the name of the parameters to be set.
+ value : int/float
+ the desired value to be set to `param` key.
+ seq : int
+ an index to the relevant caps array to be changed (index supplied when adding new pcap file, see :func:`add_pcap_file`).
+
+ :return:
+ **True** on success
+
+ :raises:
+ :exc:`IndexError`, in case an out-of range index was given.
+
+ """
+ try:
+ self.yaml_obj[0]['cap_info'][seq][param] = value
+
+ return True
+ except IndexError:
+ return False
+
+ def set_generator_param (self, param, value):
+ """
+ Set generator parameters' value of the yaml object.
+
+ :parameters:
+ param : str
+ the name of the parameters to be set.
+ value : int/float/str
+ the desired value to be set to `param` key.
+
+ :return:
+ None
+
+ """
+ self.yaml_obj[0]['generator'][param] = value
+
+ def get_file_list(self):
+ """
+ Returns a list of all files related to the YAML object, including the YAML filename itself.
+
+ .. tip:: This method is especially useful for listing all the files that should be pushed to TRex server as part of the same yaml selection.
+
+ :parameters:
+ None
+
+ :return:
+ a list of filepaths, each is a local client-machine file path.
+
+ """
+ if not self.yaml_dumped:
+ print ("WARNING: .yaml file wasn't dumped yet. Files list contains only .pcap files")
+ return self.file_list
+
+
+
+if __name__ == "__main__":
+ pass
diff --git a/scripts/automation/trex_control_plane/client_utils/yaml_utils.py b/scripts/automation/trex_control_plane/client_utils/yaml_utils.py
new file mode 100644
index 00000000..776a51a7
--- /dev/null
+++ b/scripts/automation/trex_control_plane/client_utils/yaml_utils.py
@@ -0,0 +1,163 @@
+
+"""
+Dan Klein
+Cisco Systems, Inc.
+
+Copyright (c) 2015-2015 Cisco Systems, Inc.
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+ http://www.apache.org/licenses/LICENSE-2.0
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+"""
+import traceback
+import sys
+import yaml
+
+
+class CTRexYAMLLoader(object):
+ TYPE_DICT = {"double":float,
+ "int":int,
+ "array":list,
+ "string":str,
+ "boolean":bool}
+
+ def __init__(self, yaml_ref_file_path):
+ self.yaml_path = yaml_ref_file_path
+ self.ref_obj = None
+
+ def check_term_param_type(self, val, val_field, ref_val, multiplier):
+ # print val, val_field, ref_val
+ tmp_type = ref_val.get('type')
+ if isinstance(tmp_type, list):
+ # item can be one of multiple types
+ # print "multiple choice!"
+ python_types = set()
+ for t in tmp_type:
+ if t in self.TYPE_DICT:
+ python_types.add(self.TYPE_DICT.get(t))
+ else:
+ return False, TypeError("Unknown resolving for type {0}".format(t))
+ # print "python legit types: ", python_types
+ if type(val) not in python_types:
+ return False, TypeError("Type of object field '{0}' is not allowed".format(val_field))
+ else:
+ # WE'RE OK!
+ return True, CTRexYAMLLoader._calc_final_value(val, multiplier, ref_val.get('multiply', False))
+ else:
+ # this is a single type field
+ python_type = self.TYPE_DICT.get(tmp_type)
+ if not isinstance(val, python_type):
+ return False, TypeError("Type of object field '{0}' is not allowed".format(val_field))
+ else:
+ # WE'RE OK!
+ return True, CTRexYAMLLoader._calc_final_value(val, multiplier, ref_val.get('multiply', False))
+
+ def get_reference_default(self, root_obj, sub_obj, key):
+ # print root_obj, sub_obj, key
+ if sub_obj:
+ ref_field = self.ref_obj.get(root_obj).get(sub_obj).get(key)
+ else:
+ ref_field = self.ref_obj.get(root_obj).get(key)
+ if 'has_default' in ref_field:
+ if ref_field.get('has_default'):
+ # WE'RE OK!
+ return True, ref_field.get('default')
+ else:
+ # This is a mandatory field!
+ return False, ValueError("The {0} field is mandatory and must be specified explicitly".format(key))
+ else:
+ return False, ValueError("The {0} field has no indication about default value".format(key))
+
+ def validate_yaml(self, evaluated_obj, root_obj, fill_defaults=True, multiplier=1):
+ if isinstance(evaluated_obj, dict) and evaluated_obj.keys() == [root_obj]:
+ evaluated_obj = evaluated_obj.get(root_obj)
+ if not self.ref_obj:
+ self.ref_obj = load_yaml_to_obj(self.yaml_path)
+ # self.load_reference()
+ ref_item = self.ref_obj.get(root_obj)
+ if ref_item is not None:
+ try:
+ typed_obj = [False, None] # first item stores validity (multiple object "shapes"), second stored type
+ if "type" in evaluated_obj:
+ ref_item = ref_item[evaluated_obj.get("type")]
+ # print "lower resolution with typed object"
+ typed_obj = [True, evaluated_obj.get("type")]
+ if isinstance(ref_item, dict) and "type" not in ref_item: # this is not a terminal
+ result_obj = {}
+ if typed_obj[0]:
+ result_obj["type"] = typed_obj[1]
+ # print "processing dictionary non-terminal value"
+ for k, v in ref_item.items():
+ # print "processing element '{0}' with value '{1}'".format(k,v)
+ if k in evaluated_obj:
+ # validate with ref obj
+ # print "found in evaluated object!"
+ tmp_type = v.get('type')
+ # print tmp_type
+ # print evaluated_obj
+ if tmp_type == "object":
+ # go deeper into nesting hierarchy
+ # print "This is an object type, recursion!"
+ result_obj[k] = self.validate_yaml(evaluated_obj.get(k), k, fill_defaults, multiplier)
+ else:
+ # validation on terminal type
+ # print "Validating terminal type %s" % k
+ res_ok, data = self.check_term_param_type(evaluated_obj.get(k), k, v, multiplier)
+ if res_ok:
+ # data field contains the value to save
+ result_obj[k] = data
+ else:
+ # data var contains the exception to throw
+ raise data
+ elif fill_defaults:
+ # complete missing values with default value, if exists
+ sub_obj = typed_obj[1] if typed_obj[0] else None
+ res_ok, data = self.get_reference_default(root_obj, sub_obj, k)
+ if res_ok:
+ # data field contains the value to save
+ result_obj[k] = data
+ else:
+ # data var contains the exception to throw
+ raise data
+ return result_obj
+ elif isinstance(ref_item, list):
+ # currently not handling list objects
+ return NotImplementedError("List object are currently unsupported")
+ else:
+ raise TypeError("Unknown parse tree object type.")
+ except KeyError as e:
+ raise
+ else:
+ raise KeyError("The given root_key '{key}' does not exists on reference object".format(key=root_obj))
+
+ @staticmethod
+ def _calc_final_value(val, multiplier, multiply):
+ def to_num(s):
+ try:
+ return int(s)
+ except ValueError:
+ return float(s)
+ if multiply:
+ return val * to_num(multiplier)
+ else:
+ return val
+
+
+def load_yaml_to_obj(file_path):
+ try:
+ return yaml.load(file(file_path, 'r'))
+ except yaml.YAMLError as e:
+ raise
+ except Exception as e:
+ raise
+
+def yaml_exporter(file_path):
+ pass
+
+if __name__ == "__main__":
+ pass
diff --git a/scripts/automation/trex_control_plane/common/__init__.py b/scripts/automation/trex_control_plane/common/__init__.py
new file mode 100755
index 00000000..5a1da046
--- /dev/null
+++ b/scripts/automation/trex_control_plane/common/__init__.py
@@ -0,0 +1 @@
+__all__ = ["trex_status_e", "trex_exceptions"]
diff --git a/scripts/automation/trex_control_plane/common/external_packages.py b/scripts/automation/trex_control_plane/common/external_packages.py
new file mode 100755
index 00000000..7353c397
--- /dev/null
+++ b/scripts/automation/trex_control_plane/common/external_packages.py
@@ -0,0 +1,28 @@
+#!/router/bin/python
+
+import sys
+import os
+
+CURRENT_PATH = os.path.dirname(os.path.realpath(__file__))
+ROOT_PATH = os.path.abspath(os.path.join(CURRENT_PATH, os.pardir)) # path to trex_control_plane directory
+PATH_TO_PYTHON_LIB = os.path.abspath(os.path.join(ROOT_PATH, os.pardir, os.pardir, 'external_libs'))
+
+CLIENT_UTILS_MODULES = ['yaml-3.11'
+ ]
+
+def import_common_modules():
+ # must be in a higher priority
+ sys.path.insert(0, PATH_TO_PYTHON_LIB)
+ sys.path.append(ROOT_PATH)
+ import_module_list(CLIENT_UTILS_MODULES)
+
+
+def import_module_list(modules_list):
+ assert(isinstance(modules_list, list))
+ for p in modules_list:
+ full_path = os.path.join(PATH_TO_PYTHON_LIB, p)
+ fix_path = os.path.normcase(full_path)
+ sys.path.insert(1, full_path)
+
+import_common_modules()
+
diff --git a/scripts/automation/trex_control_plane/common/text_opts.py b/scripts/automation/trex_control_plane/common/text_opts.py
new file mode 100755
index 00000000..c9ab7ca8
--- /dev/null
+++ b/scripts/automation/trex_control_plane/common/text_opts.py
@@ -0,0 +1,198 @@
+import json
+import re
+
+TEXT_CODES = {'bold': {'start': '\x1b[1m',
+ 'end': '\x1b[22m'},
+ 'cyan': {'start': '\x1b[36m',
+ 'end': '\x1b[39m'},
+ 'blue': {'start': '\x1b[34m',
+ 'end': '\x1b[39m'},
+ 'red': {'start': '\x1b[31m',
+ 'end': '\x1b[39m'},
+ 'magenta': {'start': '\x1b[35m',
+ 'end': '\x1b[39m'},
+ 'green': {'start': '\x1b[32m',
+ 'end': '\x1b[39m'},
+ 'yellow': {'start': '\x1b[33m',
+ 'end': '\x1b[39m'},
+ 'underline': {'start': '\x1b[4m',
+ 'end': '\x1b[24m'}}
+
+class TextCodesStripper:
+ keys = [re.escape(v['start']) for k,v in TEXT_CODES.iteritems()]
+ keys += [re.escape(v['end']) for k,v in TEXT_CODES.iteritems()]
+ pattern = re.compile("|".join(keys))
+
+ @staticmethod
+ def strip (s):
+ return re.sub(TextCodesStripper.pattern, '', s)
+
+def format_num (size, suffix = "", compact = True, opts = None):
+ if opts is None:
+ opts = ()
+
+ txt = "NaN"
+
+ if type(size) == str:
+ return "N/A"
+
+ u = ''
+
+ if compact:
+ for unit in ['','K','M','G','T','P']:
+ if abs(size) < 1000.0:
+ u = unit
+ break
+ size /= 1000.0
+
+ if isinstance(size, float):
+ txt = "%3.2f" % (size)
+ else:
+ txt = "{:,}".format(size)
+
+ if u or suffix:
+ txt += " {:}{:}".format(u, suffix)
+
+ if isinstance(opts, tuple):
+ return format_text(txt, *opts)
+ else:
+ return format_text(txt, (opts))
+
+
+
+def format_time (t_sec):
+ if t_sec < 0:
+ return "infinite"
+
+ if t_sec == 0:
+ return "zero"
+
+ if t_sec < 1:
+ # low numbers
+ for unit in ['ms', 'usec', 'ns']:
+ t_sec *= 1000.0
+ if t_sec >= 1.0:
+ return '{:,.2f} [{:}]'.format(t_sec, unit)
+
+ return "NaN"
+
+ else:
+ # seconds
+ if t_sec < 60.0:
+ return '{:,.2f} [{:}]'.format(t_sec, 'sec')
+
+ # minutes
+ t_sec /= 60.0
+ if t_sec < 60.0:
+ return '{:,.2f} [{:}]'.format(t_sec, 'minutes')
+
+ # hours
+ t_sec /= 60.0
+ if t_sec < 24.0:
+ return '{:,.2f} [{:}]'.format(t_sec, 'hours')
+
+ # days
+ t_sec /= 24.0
+ return '{:,.2f} [{:}]'.format(t_sec, 'days')
+
+
+def format_percentage (size):
+ return "%0.2f %%" % (size)
+
+def bold(text):
+ return text_attribute(text, 'bold')
+
+
+def cyan(text):
+ return text_attribute(text, 'cyan')
+
+
+def blue(text):
+ return text_attribute(text, 'blue')
+
+
+def red(text):
+ return text_attribute(text, 'red')
+
+
+def magenta(text):
+ return text_attribute(text, 'magenta')
+
+
+def green(text):
+ return text_attribute(text, 'green')
+
+def yellow(text):
+ return text_attribute(text, 'yellow')
+
+def underline(text):
+ return text_attribute(text, 'underline')
+
+
+def text_attribute(text, attribute):
+ if isinstance(text, str):
+ return "{start}{txt}{stop}".format(start=TEXT_CODES[attribute]['start'],
+ txt=text,
+ stop=TEXT_CODES[attribute]['end'])
+ elif isinstance(text, unicode):
+ return u"{start}{txt}{stop}".format(start=TEXT_CODES[attribute]['start'],
+ txt=text,
+ stop=TEXT_CODES[attribute]['end'])
+ else:
+ raise Exception("not a string")
+
+
+FUNC_DICT = {'blue': blue,
+ 'bold': bold,
+ 'green': green,
+ 'yellow': yellow,
+ 'cyan': cyan,
+ 'magenta': magenta,
+ 'underline': underline,
+ 'red': red}
+
+
+def format_text(text, *args):
+ return_string = text
+ for i in args:
+ func = FUNC_DICT.get(i)
+ if func:
+ return_string = func(return_string)
+
+ return return_string
+
+
+def format_threshold (value, red_zone, green_zone):
+ if value >= red_zone[0] and value <= red_zone[1]:
+ return format_text("{0}".format(value), 'red')
+
+ if value >= green_zone[0] and value <= green_zone[1]:
+ return format_text("{0}".format(value), 'green')
+
+ return "{0}".format(value)
+
+# pretty print for JSON
+def pretty_json (json_str, use_colors = True):
+ pretty_str = json.dumps(json.loads(json_str), indent = 4, separators=(',', ': '), sort_keys = True)
+
+ if not use_colors:
+ return pretty_str
+
+ try:
+ # int numbers
+ pretty_str = re.sub(r'([ ]*:[ ]+)(\-?[1-9][0-9]*[^.])',r'\1{0}'.format(blue(r'\2')), pretty_str)
+ # float
+ pretty_str = re.sub(r'([ ]*:[ ]+)(\-?[1-9][0-9]*\.[0-9]+)',r'\1{0}'.format(magenta(r'\2')), pretty_str)
+ # # strings
+ #
+ pretty_str = re.sub(r'([ ]*:[ ]+)("[^"]*")',r'\1{0}'.format(red(r'\2')), pretty_str)
+ pretty_str = re.sub(r"('[^']*')", r'{0}\1{1}'.format(TEXT_CODES['magenta']['start'],
+ TEXT_CODES['red']['start']), pretty_str)
+ except :
+ pass
+
+ return pretty_str
+
+
+if __name__ == "__main__":
+ pass
diff --git a/scripts/automation/trex_control_plane/common/trex_exceptions.py b/scripts/automation/trex_control_plane/common/trex_exceptions.py
new file mode 100755
index 00000000..0de38411
--- /dev/null
+++ b/scripts/automation/trex_control_plane/common/trex_exceptions.py
@@ -0,0 +1,140 @@
+#!/router/bin/python
+
+#from rpc_exceptions import RPCExceptionHandler, WrappedRPCError
+
+from jsonrpclib import Fault, ProtocolError, AppError
+
+class RPCError(Exception):
+ """
+ This is the general RPC error exception class from which :exc:`trex_exceptions.TRexException` inherits.
+
+ Every exception in this class has as error format according to JSON-RPC convention convention: code, message and data.
+
+ """
+ def __init__(self, code, message, remote_data = None):
+ self.code = code
+ self.msg = message or self._default_message
+ self.data = remote_data
+ self.args = (code, self.msg, remote_data)
+
+ def __str__(self):
+ return self.__repr__()
+ def __repr__(self):
+ if self.args[2] is not None:
+ return u"[errcode:%r] %r. Extended data: %r" % (self.args[0], self.args[1], self.args[2])
+ else:
+ return u"[errcode:%r] %r" % (self.args[0], self.args[1])
+
+class TRexException(RPCError):
+ """
+ This is the most general TRex exception.
+
+ All exceptions inherits from this class has an error code and a default message which describes the most common use case of the error.
+
+ This exception isn't used by default and will only when an unrelated to ProtocolError will occur, and it can't be resolved to any of the deriviate exceptions.
+
+ """
+ code = -10
+ _default_message = 'TRex encountered an unexpected error. please contact TRex dev team.'
+ # api_name = 'TRex'
+
+class TRexError(TRexException):
+ """
+ This is the most general TRex exception.
+
+ This exception isn't used by default and will only when an unrelated to ProtocolError will occur, and it can't be resolved to any of the deriviate exceptions.
+ """
+ code = -11
+ _default_message = 'TRex run failed due to wrong input parameters, or due to reachability issues.'
+
+class TRexWarning(TRexException):
+ """ Indicates a warning from TRex server. When this exception raises it normally used to indicate required data isn't ready yet """
+ code = -12
+ _default_message = 'TRex is starting (data is not available yet).'
+
+class TRexRequestDenied(TRexException):
+ """ Indicates the desired reques was denied by the server """
+ code = -33
+ _default_message = 'TRex desired request denied because the requested resource is already taken. Try again once TRex is back in IDLE state.'
+
+class TRexInUseError(TRexException):
+ """
+ Indicates that TRex is currently in use
+
+ """
+ code = -13
+ _default_message = 'TRex is already being used by another user or process. Try again once TRex is back in IDLE state.'
+
+class TRexRunFailedError(TRexException):
+ """ Indicates that TRex has failed due to some reason. This Exception is used when TRex process itself terminates due to unknown reason """
+ code = -14
+ _default_message = ''
+
+class TRexIncompleteRunError(TRexException):
+ """
+ Indicates that TRex has failed due to some reason.
+ This Exception is used when TRex process itself terminated with error fault or it has been terminated by an external intervention in the OS.
+
+ """
+ code = -15
+ _default_message = 'TRex run was terminated unexpectedly by outer process or by the hosting OS'
+
+EXCEPTIONS = [TRexException, TRexError, TRexWarning, TRexInUseError, TRexRequestDenied, TRexRunFailedError, TRexIncompleteRunError]
+
+class CExceptionHandler(object):
+ """
+ CExceptionHandler is responsible for generating TRex API related exceptions in client side.
+ """
+ def __init__(self, exceptions):
+ """
+ Instatiate a CExceptionHandler object
+
+ :parameters:
+
+ exceptions : list
+ a list of all TRex acceptable exception objects.
+
+ default list:
+ - :exc:`trex_exceptions.TRexException`
+ - :exc:`trex_exceptions.TRexError`
+ - :exc:`trex_exceptions.TRexWarning`
+ - :exc:`trex_exceptions.TRexInUseError`
+ - :exc:`trex_exceptions.TRexRequestDenied`
+ - :exc:`trex_exceptions.TRexRunFailedError`
+ - :exc:`trex_exceptions.TRexIncompleteRunError`
+
+ """
+ if isinstance(exceptions, type):
+ exceptions = [ exceptions, ]
+ self.exceptions = exceptions
+ self.exceptions_dict = dict((e.code, e) for e in self.exceptions)
+
+ def gen_exception (self, err):
+ """
+ Generates an exception based on a general ProtocolError exception object `err`.
+
+ When TRex is reserved, no other user can start new TRex runs.
+
+
+ :parameters:
+
+ err : exception
+ a ProtocolError exception raised by :class:`trex_client.CTRexClient` class
+
+ :return:
+ A TRex exception from the exception list defined in class creation.
+
+ If such exception wasn't found, returns a TRexException exception
+
+ """
+ code, message, data = err
+ try:
+ exp = self.exceptions_dict[code]
+ return exp(exp.code, message, data)
+ except KeyError:
+ # revert to TRexException when unknown error application raised
+ return TRexException(err)
+
+
+exception_handler = CExceptionHandler( EXCEPTIONS )
+
diff --git a/scripts/automation/trex_control_plane/common/trex_status.py b/scripts/automation/trex_control_plane/common/trex_status.py
new file mode 100644
index 00000000..8f2859d1
--- /dev/null
+++ b/scripts/automation/trex_control_plane/common/trex_status.py
@@ -0,0 +1,8 @@
+#!/router/bin/python
+
+# define the states in which a TRex can hold during its lifetime
+# TRexStatus = Enum('TRexStatus', 'Idle Starting Running')
+
+IDLE = 1
+STARTING = 2
+RUNNING = 3
diff --git a/scripts/automation/trex_control_plane/common/trex_status_e.py b/scripts/automation/trex_control_plane/common/trex_status_e.py
new file mode 100755
index 00000000..fbfe92af
--- /dev/null
+++ b/scripts/automation/trex_control_plane/common/trex_status_e.py
@@ -0,0 +1,8 @@
+#!/router/bin/python
+
+import outer_packages # import this to overcome doc building import error by sphinx
+from enum import Enum
+
+
+# define the states in which a TRex can hold during its lifetime
+TRexStatus = Enum('TRexStatus', 'Idle Starting Running')
diff --git a/scripts/automation/trex_control_plane/doc/Makefile b/scripts/automation/trex_control_plane/doc/Makefile
new file mode 100755
index 00000000..37a28d0f
--- /dev/null
+++ b/scripts/automation/trex_control_plane/doc/Makefile
@@ -0,0 +1,192 @@
+# Makefile for Sphinx documentation
+#
+
+# You can set these variables from the command line.
+SPHINXOPTS =
+SPHINXBUILD = sphinx-build
+PAPER =
+BUILDDIR = _build
+
+# User-friendly check for sphinx-build
+ifeq ($(shell which $(SPHINXBUILD) >/dev/null 2>&1; echo $$?), 1)
+$(error The '$(SPHINXBUILD)' command was not found. Make sure you have Sphinx installed, then set the SPHINXBUILD environment variable to point to the full path of the '$(SPHINXBUILD)' executable. Alternatively you can add the directory with the executable to your PATH. If you don't have Sphinx installed, grab it from http://sphinx-doc.org/)
+endif
+
+# Internal variables.
+PAPEROPT_a4 = -D latex_paper_size=a4
+PAPEROPT_letter = -D latex_paper_size=letter
+ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) .
+# the i18n builder cannot share the environment and doctrees with the others
+I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) .
+
+.PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest coverage gettext
+
+help:
+ @echo "Please use \`make <target>' where <target> is one of"
+ @echo " html to make standalone HTML files"
+ @echo " dirhtml to make HTML files named index.html in directories"
+ @echo " singlehtml to make a single large HTML file"
+ @echo " pickle to make pickle files"
+ @echo " json to make JSON files"
+ @echo " htmlhelp to make HTML files and a HTML help project"
+ @echo " qthelp to make HTML files and a qthelp project"
+ @echo " applehelp to make an Apple Help Book"
+ @echo " devhelp to make HTML files and a Devhelp project"
+ @echo " epub to make an epub"
+ @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter"
+ @echo " latexpdf to make LaTeX files and run them through pdflatex"
+ @echo " latexpdfja to make LaTeX files and run them through platex/dvipdfmx"
+ @echo " text to make text files"
+ @echo " man to make manual pages"
+ @echo " texinfo to make Texinfo files"
+ @echo " info to make Texinfo files and run them through makeinfo"
+ @echo " gettext to make PO message catalogs"
+ @echo " changes to make an overview of all changed/added/deprecated items"
+ @echo " xml to make Docutils-native XML files"
+ @echo " pseudoxml to make pseudoxml-XML files for display purposes"
+ @echo " linkcheck to check all external links for integrity"
+ @echo " doctest to run all doctests embedded in the documentation (if enabled)"
+ @echo " coverage to run coverage check of the documentation (if enabled)"
+
+clean:
+ rm -rf $(BUILDDIR)/*
+
+html:
+ $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html
+ @echo
+ @echo "Build finished. The HTML pages are in $(BUILDDIR)/html."
+
+dirhtml:
+ $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml
+ @echo
+ @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml."
+
+singlehtml:
+ $(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml
+ @echo
+ @echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml."
+
+pickle:
+ $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle
+ @echo
+ @echo "Build finished; now you can process the pickle files."
+
+json:
+ $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json
+ @echo
+ @echo "Build finished; now you can process the JSON files."
+
+htmlhelp:
+ $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp
+ @echo
+ @echo "Build finished; now you can run HTML Help Workshop with the" \
+ ".hhp project file in $(BUILDDIR)/htmlhelp."
+
+qthelp:
+ $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp
+ @echo
+ @echo "Build finished; now you can run "qcollectiongenerator" with the" \
+ ".qhcp project file in $(BUILDDIR)/qthelp, like this:"
+ @echo "# qcollectiongenerator $(BUILDDIR)/qthelp/T-RexProjectControlPlain.qhcp"
+ @echo "To view the help file:"
+ @echo "# assistant -collectionFile $(BUILDDIR)/qthelp/T-RexProjectControlPlain.qhc"
+
+applehelp:
+ $(SPHINXBUILD) -b applehelp $(ALLSPHINXOPTS) $(BUILDDIR)/applehelp
+ @echo
+ @echo "Build finished. The help book is in $(BUILDDIR)/applehelp."
+ @echo "N.B. You won't be able to view it unless you put it in" \
+ "~/Library/Documentation/Help or install it in your application" \
+ "bundle."
+
+devhelp:
+ $(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp
+ @echo
+ @echo "Build finished."
+ @echo "To view the help file:"
+ @echo "# mkdir -p $$HOME/.local/share/devhelp/T-RexProjectControlPlain"
+ @echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/T-RexProjectControlPlain"
+ @echo "# devhelp"
+
+epub:
+ $(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub
+ @echo
+ @echo "Build finished. The epub file is in $(BUILDDIR)/epub."
+
+latex:
+ $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
+ @echo
+ @echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex."
+ @echo "Run \`make' in that directory to run these through (pdf)latex" \
+ "(use \`make latexpdf' here to do that automatically)."
+
+latexpdf:
+ $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
+ @echo "Running LaTeX files through pdflatex..."
+ $(MAKE) -C $(BUILDDIR)/latex all-pdf
+ @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex."
+
+latexpdfja:
+ $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
+ @echo "Running LaTeX files through platex and dvipdfmx..."
+ $(MAKE) -C $(BUILDDIR)/latex all-pdf-ja
+ @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex."
+
+text:
+ $(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text
+ @echo
+ @echo "Build finished. The text files are in $(BUILDDIR)/text."
+
+man:
+ $(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man
+ @echo
+ @echo "Build finished. The manual pages are in $(BUILDDIR)/man."
+
+texinfo:
+ $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo
+ @echo
+ @echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo."
+ @echo "Run \`make' in that directory to run these through makeinfo" \
+ "(use \`make info' here to do that automatically)."
+
+info:
+ $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo
+ @echo "Running Texinfo files through makeinfo..."
+ make -C $(BUILDDIR)/texinfo info
+ @echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo."
+
+gettext:
+ $(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale
+ @echo
+ @echo "Build finished. The message catalogs are in $(BUILDDIR)/locale."
+
+changes:
+ $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes
+ @echo
+ @echo "The overview file is in $(BUILDDIR)/changes."
+
+linkcheck:
+ $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck
+ @echo
+ @echo "Link check complete; look for any errors in the above output " \
+ "or in $(BUILDDIR)/linkcheck/output.txt."
+
+doctest:
+ $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest
+ @echo "Testing of doctests in the sources finished, look at the " \
+ "results in $(BUILDDIR)/doctest/output.txt."
+
+coverage:
+ $(SPHINXBUILD) -b coverage $(ALLSPHINXOPTS) $(BUILDDIR)/coverage
+ @echo "Testing of coverage in the sources finished, look at the " \
+ "results in $(BUILDDIR)/coverage/python.txt."
+
+xml:
+ $(SPHINXBUILD) -b xml $(ALLSPHINXOPTS) $(BUILDDIR)/xml
+ @echo
+ @echo "Build finished. The XML files are in $(BUILDDIR)/xml."
+
+pseudoxml:
+ $(SPHINXBUILD) -b pseudoxml $(ALLSPHINXOPTS) $(BUILDDIR)/pseudoxml
+ @echo
+ @echo "Build finished. The pseudo-XML files are in $(BUILDDIR)/pseudoxml."
diff --git a/scripts/automation/trex_control_plane/doc/_static/no_scrollbars.css b/scripts/automation/trex_control_plane/doc/_static/no_scrollbars.css
new file mode 100755
index 00000000..f86e823a
--- /dev/null
+++ b/scripts/automation/trex_control_plane/doc/_static/no_scrollbars.css
@@ -0,0 +1,10 @@
+/* override table width restrictions */
+.wy-table-responsive table td, .wy-table-responsive table th {
+ /* !important prevents the common CSS stylesheets from
+ overriding this as on RTD they are loaded after this stylesheet */
+ white-space: normal !important;
+}
+
+.wy-table-responsive {
+ overflow: visible !important;
+} \ No newline at end of file
diff --git a/scripts/automation/trex_control_plane/doc/_templates/layout.html b/scripts/automation/trex_control_plane/doc/_templates/layout.html
new file mode 100644
index 00000000..8c1c709c
--- /dev/null
+++ b/scripts/automation/trex_control_plane/doc/_templates/layout.html
@@ -0,0 +1,17 @@
+{% extends "!layout.html" %}
+
+{% block footer %}
+{{ super() }}
+<script>
+ (function(i,s,o,g,r,a,m){i['GoogleAnalyticsObject']=r;i[r]=i[r]||function(){
+ (i[r].q=i[r].q||[]).push(arguments)},i[r].l=1*new Date();a=s.createElement(o),
+ m=s.getElementsByTagName(o)[0];a.async=1;a.src=g;m.parentNode.insertBefore(a,m)
+ })(window,document,'script','//www.google-analytics.com/analytics.js','ga');
+
+ ga('create', 'UA-75220362-1', 'auto');
+ ga('send', 'pageview');
+
+</script>
+{% endblock %}
+
+
diff --git a/scripts/automation/trex_control_plane/doc/api/client_code.rst b/scripts/automation/trex_control_plane/doc/api/client_code.rst
new file mode 100755
index 00000000..0cda3451
--- /dev/null
+++ b/scripts/automation/trex_control_plane/doc/api/client_code.rst
@@ -0,0 +1,17 @@
+
+trex_client Module documentation
+================================
+
+
+CTRexClient class
+-----------------
+
+.. autoclass:: trex_client.CTRexClient
+ :members:
+ :member-order: alphabetical
+
+CTRexResult class
+-----------------
+
+.. autoclass:: trex_client.CTRexResult
+ :members:
diff --git a/scripts/automation/trex_control_plane/doc/api/exceptions.rst b/scripts/automation/trex_control_plane/doc/api/exceptions.rst
new file mode 100755
index 00000000..d9df6484
--- /dev/null
+++ b/scripts/automation/trex_control_plane/doc/api/exceptions.rst
@@ -0,0 +1,7 @@
+
+
+trex_exceptions Exceptions module
+=================================
+
+.. automodule:: trex_exceptions
+ :members:
diff --git a/scripts/automation/trex_control_plane/doc/api/index.rst b/scripts/automation/trex_control_plane/doc/api/index.rst
new file mode 100755
index 00000000..cfdc6917
--- /dev/null
+++ b/scripts/automation/trex_control_plane/doc/api/index.rst
@@ -0,0 +1,18 @@
+
+API Reference
+=============
+
+**TRex Modules**
+
+.. toctree::
+ :maxdepth: 4
+
+ client_code
+ exceptions
+
+**TRex JSON Template**
+
+.. toctree::
+ :maxdepth: 4
+
+ json_fields
diff --git a/scripts/automation/trex_control_plane/doc/api/json_fields.rst b/scripts/automation/trex_control_plane/doc/api/json_fields.rst
new file mode 100755
index 00000000..9e32d23e
--- /dev/null
+++ b/scripts/automation/trex_control_plane/doc/api/json_fields.rst
@@ -0,0 +1,233 @@
+
+TRex JSON Template
+==================
+
+Whenever TRex is publishing live data, it uses JSON notation to describe the data-object.
+
+Each client may parse it differently, however this page will describe the values meaning when published by TRex server.
+
+
+Main Fields
+-----------
+
+Each TRex server-published JSON object contains data divided to main fields under which the actual data lays.
+
+These main fields are:
+
++-----------------------------+----------------------------------------------------+---------------------------+
+| Main field | Contains | Comments |
++=============================+====================================================+===========================+
+| :ref:`trex-global-field` | Must-have data on TRex run, | |
+| | mainly regarding Tx/Rx and packet drops | |
++-----------------------------+----------------------------------------------------+---------------------------+
+| :ref:`tx-gen-field` | Data indicate the quality of the transmit process. | |
+| | In case histogram is zero it means that all packets| |
+| | were injected in the right time. | |
++-----------------------------+----------------------------------------------------+---------------------------+
+| :ref:`trex-latecny-field` | Latency reports, containing latency data on | - Generated when latency |
+| | generated data and on response traffic | test is enabled (``l`` |
+| | | param) |
+| | | - *typo* on field key: |
++-----------------------------+----------------------------------------------------+ will be fixed on next |
+| :ref:`trex-latecny-v2-field`| Extended latency information | release |
++-----------------------------+----------------------------------------------------+---------------------------+
+
+
+Each of these fields contains keys for field general data (such as its name) and its actual data, which is always stored under the **"data"** key.
+
+For example, in order to access some trex-global data, the access path would look like::
+
+ AllData -> trex-global -> data -> desired_info
+
+
+
+
+Detailed explanation
+--------------------
+
+.. _trex-global-field:
+
+trex-global field
+~~~~~~~~~~~~~~~~~
+
+
++--------------------------------+-------+-----------------------------------------------------------+
+| Sub-key | Type | Meaning |
++================================+=======+===========================================================+
+| m_cpu_util | float | CPU utilization (0-100) |
++--------------------------------+-------+-----------------------------------------------------------+
+| m_platform_factor | float | multiplier factor |
++--------------------------------+-------+-----------------------------------------------------------+
+| m_tx_bps | float | total tx bit per second |
++--------------------------------+-------+-----------------------------------------------------------+
+| m_rx_bps | float | total rx bit per second |
++--------------------------------+-------+-----------------------------------------------------------+
+| m_tx_pps | float | total tx packet per second |
++--------------------------------+-------+-----------------------------------------------------------+
+| m_tx_cps | float | total tx connection per second |
++--------------------------------+-------+-----------------------------------------------------------+
+| m_tx_expected_cps | float | expected tx connection per second |
++--------------------------------+-------+-----------------------------------------------------------+
+| m_tx_expected_pps | float | expected tx packet per second |
++--------------------------------+-------+-----------------------------------------------------------+
+| m_tx_expected_bps | float | expected tx bit per second |
++--------------------------------+-------+-----------------------------------------------------------+
+| m_rx_drop_bps | float | drop rate in bit per second |
++--------------------------------+-------+-----------------------------------------------------------+
+| m_active_flows | float | active trex flows |
++--------------------------------+-------+-----------------------------------------------------------+
+| m_open_flows | float | open trex flows from startup (monotonically incrementing) |
++--------------------------------+-------+-----------------------------------------------------------+
+| m_total_tx_pkts | int | total tx in packets |
++--------------------------------+-------+-----------------------------------------------------------+
+| m_total_rx_pkts | int | total rx in packets |
++--------------------------------+-------+-----------------------------------------------------------+
+| m_total_tx_bytes | int | total tx in bytes |
++--------------------------------+-------+-----------------------------------------------------------+
+| m_total_rx_bytes | int | total rx in bytes |
++--------------------------------+-------+-----------------------------------------------------------+
+| opackets-# | int | output packets (per interface) |
++--------------------------------+-------+-----------------------------------------------------------+
+| obytes-# | int | output bytes (per interface) |
++--------------------------------+-------+-----------------------------------------------------------+
+| ipackets-# | int | input packet (per interface) |
++--------------------------------+-------+-----------------------------------------------------------+
+| ibytes-# | int | input bytes (per interface) |
++--------------------------------+-------+-----------------------------------------------------------+
+| ierrors-# | int | input errors (per interface) |
++--------------------------------+-------+-----------------------------------------------------------+
+| oerrors-# | int | input errors (per interface) |
++--------------------------------+-------+-----------------------------------------------------------+
+| m_total_tx_bps-# | float | total transmitted data in bit per second |
++--------------------------------+-------+-----------------------------------------------------------+
+| unknown | int | |
++--------------------------------+-------+-----------------------------------------------------------+
+| m_total_nat_learn_error [#f1]_ | int | |
++--------------------------------+-------+-----------------------------------------------------------+
+| m_total_nat_active [#f2]_ | int | |
++--------------------------------+-------+-----------------------------------------------------------+
+| m_total_nat_no_fid [#f2]_ | int | |
++--------------------------------+-------+-----------------------------------------------------------+
+| m_total_nat_time_out [#f2]_ | int | |
++--------------------------------+-------+-----------------------------------------------------------+
+| m_total_nat_open [#f2]_ | int | |
++--------------------------------+-------+-----------------------------------------------------------+
+
+
+.. _tx-gen-field:
+
+tx-gen field
+~~~~~~~~~~~~
+
++-------------------+-------+-----------------------------------------------------------+
+| Sub-key | Type | Meaning |
++===================+=======+===========================================================+
+| realtime-hist | dict | histogram of transmission. See extended information about |
+| | | histogram object under :ref:`histogram-object-fields`. |
+| | | The attribute analyzed is time packet has been sent |
+| | | before/after it was intended to be |
++-------------------+-------+-----------------------------------------------------------+
+| unknown | int | |
++-------------------+-------+-----------------------------------------------------------+
+
+.. _trex-latecny-field:
+
+trex-latecny field
+~~~~~~~~~~~~~~~~~~
+
++---------+-------+---------------------------------------------------------+
+| Sub-key | Type | Meaning |
++=========+=======+=========================================================+
+| avg-# | float | average latency in usec (per interface) |
++---------+-------+---------------------------------------------------------+
+| max-# | float | max latency in usec from the test start (per interface) |
++---------+-------+---------------------------------------------------------+
+| c-max-# | float | max in the last 1 sec window (per interface) |
++---------+-------+---------------------------------------------------------+
+| error-# | float | errors in latency packets (per interface) |
++---------+-------+---------------------------------------------------------+
+| unknown | int | |
++---------+-------+---------------------------------------------------------+
+
+.. _trex-latecny-v2-field:
+
+trex-latecny-v2 field
+~~~~~~~~~~~~~~~~~~~~~
+
++--------------------------------------+-------+--------------------------------------+
+| Sub-key | Type | Meaning |
++======================================+=======+======================================+
+| cpu_util | float | rx thread cpu % (this is not trex DP |
+| | | threads cpu%%) |
++--------------------------------------+-------+--------------------------------------+
+| port-# | | Containing per interface |
+| | dict | information. See extended |
+| | | information under ``port-# -> |
+| | | key_name -> sub_key`` |
++--------------------------------------+-------+--------------------------------------+
+| port-#->hist | dict | histogram of latency. See extended |
+| | | information about histogram object |
+| | | under :ref:`histogram-object-fields`.|
++--------------------------------------+-------+--------------------------------------+
+| port-#->stats | | Containing per interface |
+| | dict | information. See extended |
+| | | information under ``port-# -> |
+| | | key_name -> sub_key`` |
++--------------------------------------+-------+--------------------------------------+
+| port-#->stats->m_tx_pkt_ok | int | total of try sent packets |
++--------------------------------------+-------+--------------------------------------+
+| port-#->stats->m_pkt_ok | int | total of packets sent from hardware |
++--------------------------------------+-------+--------------------------------------+
+| port-#->stats->m_no_magic | int | rx error with no magic |
++--------------------------------------+-------+--------------------------------------+
+| port-#->stats->m_no_id | int | rx errors with no id |
++--------------------------------------+-------+--------------------------------------+
+| port-#->stats->m_seq_error | int | error in seq number |
++--------------------------------------+-------+--------------------------------------+
+| port-#->stats->m_length_error | int | |
++--------------------------------------+-------+--------------------------------------+
+| port-#->stats->m_rx_check | int | packets tested in rx |
++--------------------------------------+-------+--------------------------------------+
+| unknown | int | |
++--------------------------------------+-------+--------------------------------------+
+
+
+
+.. _histogram-object-fields:
+
+Histogram object fields
+~~~~~~~~~~~~~~~~~~~~~~~
+
+The histogram object is being used in number of place throughout the JSON object.
+The following section describes its fields in detail.
+
+
++-----------+-------+-----------------------------------------------------------------------------------+
+| Sub-key | Type | Meaning |
++===========+=======+===================================================================================+
+| min_usec | int | min attribute value in usec. pkt with latency less than this value is not counted |
++-----------+-------+-----------------------------------------------------------------------------------+
+| max_usec | int | max attribute value in usec |
++-----------+-------+-----------------------------------------------------------------------------------+
+| high_cnt | int | how many packets on which its attribute > min_usec |
++-----------+-------+-----------------------------------------------------------------------------------+
+| cnt | int | total packets from test startup |
++-----------+-------+-----------------------------------------------------------------------------------+
+| s_avg | float | average value from test startup |
++-----------+-------+-----------------------------------------------------------------------------------+
+| histogram | | histogram of relevant object by the following keys: |
+| | array | - key: value in usec |
+| | | - val: number of packets |
++-----------+-------+-----------------------------------------------------------------------------------+
+
+
+Access Examples
+---------------
+
+
+
+.. rubric:: Footnotes
+
+.. [#f1] Available only in NAT and NAT learning operation (``learn`` and ``learn-verify`` flags)
+
+.. [#f2] Available only in NAT operation (``learn`` flag) \ No newline at end of file
diff --git a/scripts/automation/trex_control_plane/doc/client_utils.rst b/scripts/automation/trex_control_plane/doc/client_utils.rst
new file mode 100755
index 00000000..32728a57
--- /dev/null
+++ b/scripts/automation/trex_control_plane/doc/client_utils.rst
@@ -0,0 +1,19 @@
+
+Client Utilities
+================
+
+.. toctree::
+ :maxdepth: 2
+
+ packet_generator/index
+
+TRex YAML generator
+-------------------
+
+.. automodule:: trex_yaml_gen
+ :members:
+
+General Utilities
+-----------------
+.. automodule:: general_utils
+ :members: \ No newline at end of file
diff --git a/scripts/automation/trex_control_plane/doc/conf.py b/scripts/automation/trex_control_plane/doc/conf.py
new file mode 100755
index 00000000..ec133a1c
--- /dev/null
+++ b/scripts/automation/trex_control_plane/doc/conf.py
@@ -0,0 +1,312 @@
+# -*- coding: utf-8 -*-
+#
+# TRex Control Plain documentation build configuration file, created by
+# sphinx-quickstart on Tue Jun 2 07:48:10 2015.
+#
+# This file is execfile()d with the current directory set to its
+# containing dir.
+#
+# Note that not all possible configuration values are present in this
+# autogenerated file.
+#
+# All configuration values have a default; values that are commented out
+# serve to show the default.
+
+import sys
+import os
+import shlex
+
+
+# If extensions (or modules to document with autodoc) are in another directory,
+# add these directories to sys.path here. If the directory is relative to the
+# documentation root, use os.path.abspath to make it absolute, like shown here.
+sys.path.insert(0, os.path.abspath('../stf/trex_stf_lib'))
+sys.path.insert(0, os.path.abspath('../client_utils'))
+sys.path.insert(0, os.path.abspath('../examples'))
+sys.path.insert(0, os.path.abspath('../common'))
+## add all external libs path manually
+external_libs_path = os.path.join(os.pardir, os.pardir, os.pardir, "external_libs")
+external_libs_pkgs = [os.path.join(external_libs_path, p)
+ for p in os.listdir(external_libs_path)
+ if os.path.isdir(os.path.join(external_libs_path, p))]
+for p in external_libs_pkgs:
+ sys.path.insert(1, p)
+
+# -- General configuration ------------------------------------------------
+
+# If your documentation needs a minimal Sphinx version, state it here.
+#needs_sphinx = '1.0'
+
+# Add any Sphinx extension module names here, as strings. They can be
+# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
+# ones.
+extensions = [
+ 'sphinx.ext.autodoc',
+ 'sphinx.ext.todo',
+ 'sphinx.ext.viewcode',
+]
+
+# Add any paths that contain templates here, relative to this directory.
+templates_path = ['_templates']
+
+# The suffix(es) of source filenames.
+# You can specify multiple suffix as a list of string:
+# source_suffix = ['.rst', '.md']
+source_suffix = '.rst'
+
+# The encoding of source files.
+#source_encoding = 'utf-8-sig'
+
+# The master toctree document.
+master_doc = 'index'
+
+# General information about the project.
+project = u'TRex Control Plain'
+copyright = u'2015, Cisco Systems Inc.'
+author = u'Dan Klein for Cisco Systems Inc.'
+
+# The version info for the project you're documenting, acts as replacement for
+# |version| and |release|, also used in various other places throughout the
+# built documents.
+#
+# The short X.Y version.
+version = '1.7'
+# The full version, including alpha/beta/rc tags.
+release = '1.7.1'
+
+# The language for content autogenerated by Sphinx. Refer to documentation
+# for a list of supported languages.
+#
+# This is also used if you do content translation via gettext catalogs.
+# Usually you set "language" from the command line for these cases.
+language = None
+
+# There are two options for replacing |today|: either, you set today to some
+# non-false value, then it is used:
+#today = ''
+# Else, today_fmt is used as the format for a strftime call.
+#today_fmt = '%B %d, %Y'
+
+# List of patterns, relative to source directory, that match files and
+# directories to ignore when looking for source files.
+exclude_patterns = ['_build']
+
+# The reST default role (used for this markup: `text`) to use for all
+# documents.
+#default_role = None
+
+# If true, '()' will be appended to :func: etc. cross-reference text.
+#add_function_parentheses = True
+
+# If true, the current module name will be prepended to all description
+# unit titles (such as .. function::).
+#add_module_names = True
+
+# If true, sectionauthor and moduleauthor directives will be shown in the
+# output. They are ignored by default.
+#show_authors = False
+
+# The name of the Pygments (syntax highlighting) style to use.
+pygments_style = 'sphinx'
+
+# A list of ignored prefixes for module index sorting.
+#modindex_common_prefix = []
+
+# If true, keep warnings as "system message" paragraphs in the built documents.
+#keep_warnings = False
+
+# If true, `todo` and `todoList` produce output, else they produce nothing.
+todo_include_todos = True
+
+
+# -- Options for HTML output ----------------------------------------------
+
+# The theme to use for HTML and HTML Help pages. See the documentation for
+# a list of builtin themes.
+#html_theme = 'sphinx_rtd_theme'
+html_theme = 'classic'
+
+#html_theme_options = {
+# "rightsidebar": "true"
+ # }
+
+# Theme options are theme-specific and customize the look and feel of a theme
+# further. For a list of options available for each theme, see the
+# documentation.
+#html_theme_options = {}
+
+# Add any paths that contain custom themes here, relative to this directory.
+#html_theme_path = []
+
+# The name for this set of Sphinx documents. If None, it defaults to
+# "<project> v<release> documentation".
+#html_title = None
+
+# A shorter title for the navigation bar. Default is the same as html_title.
+#html_short_title = None
+
+# The name of an image file (relative to this directory) to place at the top
+# of the sidebar.
+#html_logo = None
+
+# The name of an image file (within the static path) to use as favicon of the
+# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
+# pixels large.
+#html_favicon = None
+
+# Add any paths that contain custom static files (such as style sheets) here,
+# relative to this directory. They are copied after the builtin static files,
+# so a file named "default.css" will overwrite the builtin "default.css".
+html_static_path = ['_static']
+
+# Add any extra paths that contain custom files (such as robots.txt or
+# .htaccess) here, relative to this directory. These files are copied
+# directly to the root of the documentation.
+#html_extra_path = []
+
+# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
+# using the given strftime format.
+#html_last_updated_fmt = '%b %d, %Y'
+
+# If true, SmartyPants will be used to convert quotes and dashes to
+# typographically correct entities.
+#html_use_smartypants = True
+
+# Custom sidebar templates, maps document names to template names.
+#html_sidebars = {}
+
+# Additional templates that should be rendered to pages, maps page names to
+# template names.
+#html_additional_pages = {}
+
+# If false, no module index is generated.
+#html_domain_indices = True
+
+# If false, no index is generated.
+#html_use_index = True
+
+# If true, the index is split into individual pages for each letter.
+#html_split_index = False
+
+# If true, links to the reST sources are added to the pages.
+#html_show_sourcelink = True
+
+# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
+#html_show_sphinx = True
+
+# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
+#html_show_copyright = True
+
+# If true, an OpenSearch description file will be output, and all pages will
+# contain a <link> tag referring to it. The value of this option must be the
+# base URL from which the finished HTML is served.
+#html_use_opensearch = ''
+
+# This is the file name suffix for HTML files (e.g. ".xhtml").
+#html_file_suffix = None
+
+# Language to be used for generating the HTML full-text search index.
+# Sphinx supports the following languages:
+# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
+# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
+#html_search_language = 'en'
+
+# A dictionary with options for the search language support, empty by default.
+# Now only 'ja' uses this config value
+#html_search_options = {'type': 'default'}
+
+# The name of a javascript file (relative to the configuration directory) that
+# implements a search results scorer. If empty, the default will be used.
+#html_search_scorer = 'scorer.js'
+
+# Output file base name for HTML help builder.
+htmlhelp_basename = 'TRexControlPlaindoc'
+
+# -- Options for LaTeX output ---------------------------------------------
+
+latex_elements = {
+# The paper size ('letterpaper' or 'a4paper').
+#'papersize': 'letterpaper',
+
+# The font size ('10pt', '11pt' or '12pt').
+#'pointsize': '10pt',
+
+# Additional stuff for the LaTeX preamble.
+#'preamble': '',
+
+# Latex figure (float) alignment
+#'figure_align': 'htbp',
+}
+
+# Grouping the document tree into LaTeX files. List of tuples
+# (source start file, target name, title,
+# author, documentclass [howto, manual, or own class]).
+latex_documents = [
+ (master_doc, 'TRexControlPlain.tex', u'TRex Control Plain Documentation',
+ u'Dan Klein for Cisco Systems Inc', 'manual'),
+]
+
+# The name of an image file (relative to this directory) to place at the top of
+# the title page.
+#latex_logo = None
+
+# For "manual" documents, if this is true, then toplevel headings are parts,
+# not chapters.
+#latex_use_parts = False
+
+# If true, show page references after internal links.
+#latex_show_pagerefs = False
+
+# If true, show URL addresses after external links.
+#latex_show_urls = False
+
+# Documents to append as an appendix to all manuals.
+#latex_appendices = []
+
+# If false, no module index is generated.
+#latex_domain_indices = True
+
+
+# -- Options for manual page output ---------------------------------------
+
+# One entry per manual page. List of tuples
+# (source start file, name, description, authors, manual section).
+man_pages = [
+ (master_doc, 'TRexcontrolplain', u'TRex Control Plain Documentation',
+ [author], 1)
+]
+
+# If true, show URL addresses after external links.
+#man_show_urls = False
+
+
+# -- Options for Texinfo output -------------------------------------------
+
+# Grouping the document tree into Texinfo files. List of tuples
+# (source start file, target name, title, author,
+# dir menu entry, description, category)
+texinfo_documents = [
+ (master_doc, 'TRexControlPlain', u'TRex Control Plain Documentation',
+ author, 'TRexControlPlain', 'One line description of project.',
+ 'Miscellaneous'),
+]
+
+# Documents to append as an appendix to all manuals.
+#texinfo_appendices = []
+
+# If false, no module index is generated.
+#texinfo_domain_indices = True
+
+# How to display URL addresses: 'footnote', 'no', or 'inline'.
+#texinfo_show_urls = 'footnote'
+
+# If true, do not generate a @detailmenu in the "Top" node's menu.
+#texinfo_no_detailmenu = False
+
+
+# show documentation for both __init__ methods and class methods
+autoclass_content = "both"
+
+# A workaround for the responsive tables always having annoying scrollbars.
+def setup(app):
+ app.add_stylesheet("no_scrollbars.css")
diff --git a/scripts/automation/trex_control_plane/doc/docs_utilities.py b/scripts/automation/trex_control_plane/doc/docs_utilities.py
new file mode 100755
index 00000000..e80d765f
--- /dev/null
+++ b/scripts/automation/trex_control_plane/doc/docs_utilities.py
@@ -0,0 +1,37 @@
+#!/router/bin/python
+
+from texttable import Texttable
+import yaml
+
+
+def handle_data_items(field_yaml_dict):
+ data_items = field_yaml_dict['data']
+ return [ [json_key, meaning['type'], meaning['exp'] ]
+ for json_key,meaning in data_items.items() ]
+
+
+def json_dict_to_txt_table(dict_yaml_file):
+
+ # table = Texttable(max_width=120)
+ with open(dict_yaml_file, 'r') as f:
+ yaml_stream = yaml.load(f)
+
+ for main_field, sub_key in yaml_stream.items():
+ print main_field + ' field' '\n' + '~'*len(main_field+' field') + '\n'
+
+ field_data_rows = handle_data_items(sub_key)
+ table = Texttable(max_width=120)
+ table.set_cols_align(["l", "c", "l"])
+ table.set_cols_valign(["t", "m", "m"])
+ # create table's header
+ table.add_rows([["Sub-key", "Type", "Meaning"]])
+ table.add_rows(field_data_rows, header=False)
+
+
+ print table.draw() + "\n"
+
+
+
+
+
+json_dict_to_txt_table("json_dictionary.yaml") \ No newline at end of file
diff --git a/scripts/automation/trex_control_plane/doc/index.rst b/scripts/automation/trex_control_plane/doc/index.rst
new file mode 100755
index 00000000..041a17c2
--- /dev/null
+++ b/scripts/automation/trex_control_plane/doc/index.rst
@@ -0,0 +1,96 @@
+.. TRex Control Plain documentation master file, created by
+ sphinx-quickstart on Tue Jun 2 07:48:10 2015.
+ You can adapt this file completely to your liking, but it should at least
+ contain the root `toctree` directive.
+
+Welcome to TRex Control Plain's documentation!
+==============================================
+
+TRex is a **realistic traffic generator** that enables you to do get learn more about your under development devices.
+
+This site covers the Python API of TRex control plane, and explains how to utilize it to your needs.
+However, since the entire API is JSON-RPC [#f1]_ based, you may want to check out other implementations that could suit you.
+
+
+To understand the entirely how the API works and how to set up the server side, check out the `trex-core Wiki <https://github.com/cisco-system-traffic-generator/trex-core/wiki>`_ under the documentation section of TRex website.
+
+
+**Use the table of contents below or the menu to your left to navigate through the site**
+
+
+Client Package
+==============
+
+| Starting from version v1.99 TRex has separated client package included in main directory.
+| Put it at any place you like, preferably same place as your scripts.
+| (If it's not at same place as your scripts, you will need to ensure trex_client directory is in sys.path)
+
+Un-pack it using command:::
+
+ tar -xzf trex_client_<TRex version>.tar.gz
+
+| The client assumes stateful daemon is running.
+| After un-tarring the client package, you can verify basic tests in examples directory out of the box:
+
+.. code-block:: bash
+
+ cd trex_client/stf/examples
+ python stf_example.py -s <server address>
+
+You can verify that the traffic was sent and arrives properly if you see something like this:::
+
+ Connecting to 127.0.0.1
+ Connected, start TRex
+ Sample until end
+ Test results:
+ Is valid history? True
+ Done warmup? True
+ Expected tx rate: {u'm_tx_expected_pps': 71898.4, u'm_tx_expected_bps': 535157280.0, u'm_tx_expected_cps': 1943.2}
+ Current tx rate: {u'm_tx_bps': 542338368.0, u'm_tx_cps': 1945.4, u'm_tx_pps': 79993.4}
+ Maximum latency: {u'max-4': 55, u'max-5': 30, u'max-6': 50, u'max-7': 30, u'max-0': 55, u'max-1': 40, u'max-2': 55, u'max-3': 30}
+ Average latency: {'all': 32.913, u'port6': 35.9, u'port7': 30.0, u'port4': 35.8, u'port5': 30.0, u'port2': 35.8, u'port3': 30.0, u'port0': 35.8, u'port1': 30.0}
+ Average window latency: {'all': 31.543, u'port6': 32.871, u'port7': 28.929, u'port4': 33.886, u'port5': 28.929, u'port2': 33.843, u'port3': 28.929, u'port0': 33.871, u'port1': 31.086}
+ Total drops: -3
+ Drop rate: 0.0
+ History size so far: 7
+
+ TX by ports:
+ 0: 230579 | 1: 359435 | 2: 230578 | 3: 359430 | 4: 230570 | 5: 359415 | 6: 230564 | 7: 359410
+ RX by ports:
+ 0: 359434 | 1: 230580 | 2: 359415 | 3: 230571 | 4: 359429 | 5: 230579 | 6: 359411 | 7: 230565
+ CPU utilization:
+ [0.0, 0.8, 0.8, 0.8, 0.8, 0.8, 0.8]
+
+API Reference
+=============
+.. toctree::
+ :maxdepth: 2
+
+ api/index
+
+Client Utilities
+================
+.. toctree::
+ :maxdepth: 2
+
+ client_utils
+
+Usage Examples
+==============
+.. toctree::
+ :maxdepth: 2
+
+ usage_examples
+
+
+Indices and tables
+==================
+
+* :ref:`genindex`
+* :ref:`modindex`
+* :ref:`search`
+
+
+.. rubric:: Footnotes
+
+.. [#f1] For more information on JSON-RPC, check out the `official site <http://www.jsonrpc.org/>`_
diff --git a/scripts/automation/trex_control_plane/doc/json_dictionary.yaml b/scripts/automation/trex_control_plane/doc/json_dictionary.yaml
new file mode 100755
index 00000000..89535b56
--- /dev/null
+++ b/scripts/automation/trex_control_plane/doc/json_dictionary.yaml
@@ -0,0 +1,252 @@
+###############################################################
+#### TRex JSON Dictionary definitions ####
+###############################################################
+
+
+trex-global :
+ name :
+ type : string
+ exp : "this is a name representation of the main field"
+ val : "trex-global"
+ type :
+ type : int
+ val : 0
+ data :
+ m_cpu_util :
+ type : float
+ exp : "CPU utilization (0-100)"
+ val : 0.0
+ m_platform_factor :
+ type : float
+ exp : "multiplier factor"
+ val : 1.0
+ m_tx_bps :
+ type : float
+ exp : "total tx bit per second"
+ val : 0.0
+ m_rx_bps :
+ type : float
+ exp : "total rx bit per second"
+ val : 0.0
+ m_tx_pps :
+ type : float
+ exp : "total tx packet per second"
+ val : 0.0
+ m_tx_cps :
+ type : float
+ exp : "total tx connection per second"
+ val : 0.0
+ m_tx_expected_cps :
+ type : float
+ exp : "expected tx connection per second"
+ val : 0.0
+ m_tx_expected_pps :
+ type : float
+ exp : "expected tx packet per second"
+ val : 0.0
+ m_tx_expected_bps :
+ type : float
+ exp : "expected tx bit per second"
+ val : 0.0
+ m_rx_drop_bps :
+ type : float
+ exp : "drop rate in bit per second"
+ val : 0.0
+ m_active_flows :
+ type : float
+ exp : "active trex flows"
+ val : 0.0
+ m_open_flows :
+ type : float
+ exp : "open trex flows from startup (monotonically incrementing)"
+ val : 0.0
+ m_total_tx_pkts :
+ type : int
+ exp : "total tx in packets"
+ val : 0
+ m_total_rx_pkts :
+ type : int
+ exp : "total rx in packets"
+ val : 0
+ m_total_tx_bytes :
+ type : int
+ exp : "total tx in bytes"
+ val : 0
+ m_total_rx_bytes :
+ type : int
+ exp : "total rx in bytes"
+ val : 0
+ opackets-# :
+ type : int
+ exp : "output packets (per interface)"
+ val : 0
+ obytes-# :
+ type : int
+ exp : "output bytes (per interface)"
+ val : 0
+ ipackets-# :
+ type : int
+ exp : "input packet (per interface)"
+ val : 0
+ ibytes-# :
+ type : int
+ exp : "input bytes (per interface)"
+ val : 0
+ ierrors-# :
+ type : int
+ exp : "input errors (per interface)"
+ val : 0
+ oerrors-# :
+ type : int
+ exp : "input errors (per interface)"
+ val : 0
+ m_total_tx_bps-# :
+ type : float
+ exp : "total transmitted data in bit per second"
+ val : 0.0
+ unknown :
+ type : int
+ exp : ""
+ val : 0
+
+tx-gen :
+ name :
+ type : string
+ exp : "this is a name representation of the main field"
+ val : "tx-gen"
+ type :
+ type : int
+ val : 0
+ data :
+ realtime-hist :
+ type : dict
+ #exp : "Containing TX history data, by the following keys:\n - min_usec (max_usec): min (max) time packet sent before (after) it was intended to be sent\n - cnt (high_cnt): how many packet were lower than min_usec (higher than max_usec) relative to the time these packets were intended to be injected"
+ exp : "histogram of transmission. See extended information about histogram object under :ref:`histogram-object-fields`. The attribute analyzed is time packet has been sent before/after it was intended to be"
+ val : '{ "min_usec":10, "max_usec":0, "high_cnt":0, "cnt":667866, "s_avg":0.0, "histogram":[] }'
+ unknown :
+ type : int
+ exp : ""
+ val : 0
+
+trex-latecny :
+ name :
+ type : string
+ exp : "this is a name representation of the main field"
+ val : "trex-latecny"
+ type :
+ type : int
+ val : 0
+ data :
+ avg-# :
+ type : float
+ exp : "average latency in usec (per interface)"
+ val : 75.0
+ max-# :
+ type : float
+ exp : "max latency in usec from the test start (per interface)"
+ val : 75.0
+ c-max-# :
+ type : float
+ exp : "max in the last 1 sec window (per interface)"
+ val : 75.0
+ error-# :
+ type : float
+ exp : "errors in latency packets (per interface)"
+ val : 75.0
+ unknown :
+ type : int
+ exp : ""
+ val : 0
+
+
+trex-latecny-v2 :
+ name :
+ type : string
+ exp : "this is a name representation of the main field"
+ val : "trex-latecny-v2"
+ type :
+ type : int
+ val : 0
+ data :
+ cpu_util :
+ type : float
+ exp : "rx thread cpu % (this is not trex DP threads cpu%%)"
+ val : 75.0
+ port-# :
+ type : dict
+ exp : "Containing per interface information. See extended information under ``port-# -> key_name -> sub_key``"
+ val : ''
+ port-#->hist :
+ type : dict
+ exp : "histogram of latency. See extended information about histogram object under :ref:`histogram-object-fields`"
+ val : ''
+ port-#->stats :
+ type : dict
+ exp : "Containing per interface information. See extended information under ``port-# -> key_name -> sub_key``"
+ val : ''
+ port-#->stats->m_tx_pkt_ok :
+ type : int
+ exp : "total of try sent packets"
+ val : 60110
+ port-#->stats->m_pkt_ok :
+ type : int
+ exp : "total of packets sent from hardware"
+ val : 60065
+ port-#->stats->m_no_magic :
+ type : int
+ exp : "rx error with no magic"
+ val : 0
+ port-#->stats->m_no_id :
+ type : int
+ exp : "rx errors with no id"
+ val : 0
+ port-#->stats->m_seq_error :
+ type : int
+ exp : "error in seq number"
+ val : 18
+ port-#->stats->m_length_error :
+ type : int
+ exp : ""
+ val : 0
+ port-#->stats->m_rx_check :
+ type : int
+ exp : "packets tested in rx"
+ val : 407495
+ unknown :
+ type : int
+ exp : ""
+ val : 0
+histogram-obj :
+ name :
+ type : string
+ exp : "this is description of a histogram object being used in number of place throughout the JSON object"
+ val : "histogram-obj"
+ data :
+ min_usec :
+ type : int
+ exp : "min attribute value in usec. pkt with latency less than this value is not counted"
+ val : 10
+ max_usec :
+ type : int
+ exp : "max attribute value in usec"
+ val : 83819
+ high_cnt :
+ type : int
+ exp : "how many packets on which its attribute > min_usec"
+ val : 83819
+ cnt :
+ type : int
+ exp : "total packets from test startup"
+ val : 83819
+ s_avg :
+ type : float
+ exp : "average value from test startup"
+ val : 39.3
+ histogram :
+ type : array
+ exp : "histogram of relevant object by the following keys:\n - key: value in usec \n - val: number of packets"
+ val : '[{"key": 20, "val": 5048}, {"key": 30, "val": 6092}, {"key": 40, "val": 2092}]'
+
+
+
+
diff --git a/scripts/automation/trex_control_plane/doc/packet_generator/examples.rst b/scripts/automation/trex_control_plane/doc/packet_generator/examples.rst
new file mode 100755
index 00000000..bff1ef7f
--- /dev/null
+++ b/scripts/automation/trex_control_plane/doc/packet_generator/examples.rst
@@ -0,0 +1,5 @@
+
+Packet Builder Usage Examples
+=============================
+
+Here I'll add usage examples, very similar to those I added to RPC document \ No newline at end of file
diff --git a/scripts/automation/trex_control_plane/doc/packet_generator/export_format.yaml b/scripts/automation/trex_control_plane/doc/packet_generator/export_format.yaml
new file mode 100755
index 00000000..9f8c8f7b
--- /dev/null
+++ b/scripts/automation/trex_control_plane/doc/packet_generator/export_format.yaml
@@ -0,0 +1,47 @@
+####################################################
+#### TRex packet export format ####
+####################################################
+
+# PACKET REP - OPTION #1
+packet:
+ is_pcap : YES/NO # <1>
+ binary : [] # <2>
+ pcap : path/to/pcap/file.pcap # <3>
+ meta : any metadata wished to # <4>
+
+# PACKET REP - OPTION #2
+packet:
+ data : [] / path/to/pcap/file.pcap # <5>
+ meta : any metadata wished to # <4>
+
+vm: # <6>
+ - vm instruction #1
+ - vm instruction #2
+ ...
+ - vm instruction #N
+
+
+###################################
+#### Comments ####
+###################################
+#
+# <1>: is_pcap is a boolean field that indicates if packet is transferred by pcap referencs
+# ('YES') or binary representation ('NO').
+#
+# <2>: binary field encodes the packet in binary representation. in a sequence (array) data.
+# Each array item is an integer ranging 0-255.
+# **LEAVE BLANK IF USING PCAP REFERENCE**
+#
+# <3>: path to the linked pcap file. Make sure to provide path with reading credentials.
+# **LEAVE BLANK IF USING BINARY REP FOR THE PACKET**
+#
+# <4>: meta data is any JSON formatted data ment to be passed on.
+#
+# <5>: data field can be both binary representation or pcap file refernce,
+# without the need for user's explicit typing.
+# The application logic differs between the cases by the object type
+# (array/string ending in '.pcap')
+# Less configuration, little more confusing, LESS similar to RPC spec
+#
+# <6>: vm instructions passed in array representation (sequence).
+# Each instruction is deifned according to the structures of the supported VM instructions. \ No newline at end of file
diff --git a/scripts/automation/trex_control_plane/doc/packet_generator/index.rst b/scripts/automation/trex_control_plane/doc/packet_generator/index.rst
new file mode 100755
index 00000000..ea9820f7
--- /dev/null
+++ b/scripts/automation/trex_control_plane/doc/packet_generator/index.rst
@@ -0,0 +1,17 @@
+
+TRex Packet Builder
+-------------------
+The TRex Packet Generator is a module designed to generate single-packet and set its ranging options, later to be transmitted using TRex.
+
+The packet generator module does extensive usage with `dkpt <https://github.com/kbandla/dpkt>`_ python module to create packet headers.
+
+.. toctree::
+ :maxdepth: 4
+
+
+.. toctree::
+ :maxdepth: 0
+ :titlesonly:
+
+ examples
+ stream_export
diff --git a/scripts/automation/trex_control_plane/doc/packet_generator/packet_builder_code.bak b/scripts/automation/trex_control_plane/doc/packet_generator/packet_builder_code.bak
new file mode 100755
index 00000000..3a6e8d5f
--- /dev/null
+++ b/scripts/automation/trex_control_plane/doc/packet_generator/packet_builder_code.bak
@@ -0,0 +1,12 @@
+
+CTRexPktBuilder class
+---------------------
+
+.. autoclass:: packet_builder.CTRexPktBuilder
+ :members:
+ :member-order: bysource
+
+Packet Builder Exceptions
+-------------------------
+
+For exceptions documentation see here: :exc:`Packet Builder Exceptions <packet_builder.CTRexPktBuilder.CPacketBuildException>` \ No newline at end of file
diff --git a/scripts/automation/trex_control_plane/doc/packet_generator/stream_export.rst b/scripts/automation/trex_control_plane/doc/packet_generator/stream_export.rst
new file mode 100755
index 00000000..eb639f7c
--- /dev/null
+++ b/scripts/automation/trex_control_plane/doc/packet_generator/stream_export.rst
@@ -0,0 +1,29 @@
+
+Stream Export YAML syntax
+=========================
+
+In order to provide a fluent work-flow that utilize the best TRex user's time, an export-import mini language has been created.
+
+This enables a work-flow that supports saving and sharing a built packets and its scenarios, so that other tools
+(such as TRex Console) could use them.
+
+The TRex Packet Builder module supports (using ___ method) the export of built stream according to the format described below.
+
+Guidelines
+----------
+
+1. The YAML file can either contain Byte representation of the packet of refer to a .pcap file that contains it.
+2. The YAML file is similar as much as possible to the `add_stream method <http://trex-tgn.cisco.com/trex/doc/trex_rpc_server_spec.html#_add_stream>`_ of TRex RPC server spec, which defines the raw interaction with TRex server.
+3. Only packet binary data and VM instructions are to be saved. Any meta-data packet builder module used while creating the packet will be stripped out.
+
+Export Format
+-------------
+
+.. literalinclude:: export_format.yaml
+ :lines: 4-
+ :linenos:
+
+Example
+-------
+
+The following files snapshot represents each of the options (Binary/pcap) for the very same HTTP GET request packet.
diff --git a/scripts/automation/trex_control_plane/doc/usage_examples.rst b/scripts/automation/trex_control_plane/doc/usage_examples.rst
new file mode 100755
index 00000000..ff5c026d
--- /dev/null
+++ b/scripts/automation/trex_control_plane/doc/usage_examples.rst
@@ -0,0 +1,68 @@
+
+Usage Examples
+==============
+
+
+Full-featured interactive shell
+-------------------------------
+
+The `client_interactive_example.py` extends and uses the `Cmd <https://docs.python.org/2/library/cmd.html>`_ built in python class to create a Full-featured shell using which one can interact with TRex server and get instant results.
+
+The help menu of this application is:
+
+.. code-block:: json
+
+ usage: client_interactive_example [options]
+
+ Run TRex client API demos and scenarios.
+
+ optional arguments:
+ -h, --help show this help message and exit
+ -v, --version show program's version number and exit
+ -t HOST, --trex-host HOST
+ Specify the hostname or ip to connect with TRex
+ server.
+ -p PORT, --trex-port PORT
+ Select port on which the TRex server listens. Default
+ port is 8090.
+ -m SIZE, --maxhist SIZE
+ Specify maximum history size saved at client side.
+ Default size is 100.
+ --verbose Switch ON verbose option at TRex client. Default is:
+ OFF.
+
+**Code Excerpt**
+
+.. literalinclude:: ../examples/client_interactive_example.py
+ :language: python
+ :emphasize-lines: 0
+ :linenos:
+
+
+End-to-End cycle
+----------------
+
+This example (``pkt_generation_for_trex.py``) demonstrates a full cycle of using the API.
+
+.. note:: this module uses the `Scapy <http://www.secdev.org/projects/scapy/doc/usage.html>`_ in order to generate packets to be used as a basis of the traffic injection. It is recommended to *install* this module to best experience the example.
+
+The demo takes the user a full circle:
+ 1. Generating packets (using Scapy)
+ 2. exporting the generated packets into .pcap file named `dns_traffic.pcap`.
+ 3. Use the :class:`trex_yaml_gen.CTRexYaml` generator to include that pcap file in the yaml object.
+ 4. Export the YAML object onto a YAML file named `dns_traffic.yaml`
+ 5. Push the generated files to TRex server.
+ 6. Run TRex based on the generated (and pushed) files.
+
+**Code Excerpt** [#f1]_
+
+.. literalinclude:: ../examples/pkt_generation_for_trex.py
+ :language: python
+ :lines: 10-
+ :emphasize-lines: 32,36,42,46,51,60,63-69,76-80
+ :linenos:
+
+
+.. rubric:: Footnotes
+
+.. [#f1] The marked codelines corresponds with the steps mentioned above. \ No newline at end of file
diff --git a/scripts/automation/trex_control_plane/doc_stl/Makefile b/scripts/automation/trex_control_plane/doc_stl/Makefile
new file mode 100644
index 00000000..37a28d0f
--- /dev/null
+++ b/scripts/automation/trex_control_plane/doc_stl/Makefile
@@ -0,0 +1,192 @@
+# Makefile for Sphinx documentation
+#
+
+# You can set these variables from the command line.
+SPHINXOPTS =
+SPHINXBUILD = sphinx-build
+PAPER =
+BUILDDIR = _build
+
+# User-friendly check for sphinx-build
+ifeq ($(shell which $(SPHINXBUILD) >/dev/null 2>&1; echo $$?), 1)
+$(error The '$(SPHINXBUILD)' command was not found. Make sure you have Sphinx installed, then set the SPHINXBUILD environment variable to point to the full path of the '$(SPHINXBUILD)' executable. Alternatively you can add the directory with the executable to your PATH. If you don't have Sphinx installed, grab it from http://sphinx-doc.org/)
+endif
+
+# Internal variables.
+PAPEROPT_a4 = -D latex_paper_size=a4
+PAPEROPT_letter = -D latex_paper_size=letter
+ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) .
+# the i18n builder cannot share the environment and doctrees with the others
+I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) .
+
+.PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest coverage gettext
+
+help:
+ @echo "Please use \`make <target>' where <target> is one of"
+ @echo " html to make standalone HTML files"
+ @echo " dirhtml to make HTML files named index.html in directories"
+ @echo " singlehtml to make a single large HTML file"
+ @echo " pickle to make pickle files"
+ @echo " json to make JSON files"
+ @echo " htmlhelp to make HTML files and a HTML help project"
+ @echo " qthelp to make HTML files and a qthelp project"
+ @echo " applehelp to make an Apple Help Book"
+ @echo " devhelp to make HTML files and a Devhelp project"
+ @echo " epub to make an epub"
+ @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter"
+ @echo " latexpdf to make LaTeX files and run them through pdflatex"
+ @echo " latexpdfja to make LaTeX files and run them through platex/dvipdfmx"
+ @echo " text to make text files"
+ @echo " man to make manual pages"
+ @echo " texinfo to make Texinfo files"
+ @echo " info to make Texinfo files and run them through makeinfo"
+ @echo " gettext to make PO message catalogs"
+ @echo " changes to make an overview of all changed/added/deprecated items"
+ @echo " xml to make Docutils-native XML files"
+ @echo " pseudoxml to make pseudoxml-XML files for display purposes"
+ @echo " linkcheck to check all external links for integrity"
+ @echo " doctest to run all doctests embedded in the documentation (if enabled)"
+ @echo " coverage to run coverage check of the documentation (if enabled)"
+
+clean:
+ rm -rf $(BUILDDIR)/*
+
+html:
+ $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html
+ @echo
+ @echo "Build finished. The HTML pages are in $(BUILDDIR)/html."
+
+dirhtml:
+ $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml
+ @echo
+ @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml."
+
+singlehtml:
+ $(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml
+ @echo
+ @echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml."
+
+pickle:
+ $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle
+ @echo
+ @echo "Build finished; now you can process the pickle files."
+
+json:
+ $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json
+ @echo
+ @echo "Build finished; now you can process the JSON files."
+
+htmlhelp:
+ $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp
+ @echo
+ @echo "Build finished; now you can run HTML Help Workshop with the" \
+ ".hhp project file in $(BUILDDIR)/htmlhelp."
+
+qthelp:
+ $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp
+ @echo
+ @echo "Build finished; now you can run "qcollectiongenerator" with the" \
+ ".qhcp project file in $(BUILDDIR)/qthelp, like this:"
+ @echo "# qcollectiongenerator $(BUILDDIR)/qthelp/T-RexProjectControlPlain.qhcp"
+ @echo "To view the help file:"
+ @echo "# assistant -collectionFile $(BUILDDIR)/qthelp/T-RexProjectControlPlain.qhc"
+
+applehelp:
+ $(SPHINXBUILD) -b applehelp $(ALLSPHINXOPTS) $(BUILDDIR)/applehelp
+ @echo
+ @echo "Build finished. The help book is in $(BUILDDIR)/applehelp."
+ @echo "N.B. You won't be able to view it unless you put it in" \
+ "~/Library/Documentation/Help or install it in your application" \
+ "bundle."
+
+devhelp:
+ $(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp
+ @echo
+ @echo "Build finished."
+ @echo "To view the help file:"
+ @echo "# mkdir -p $$HOME/.local/share/devhelp/T-RexProjectControlPlain"
+ @echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/T-RexProjectControlPlain"
+ @echo "# devhelp"
+
+epub:
+ $(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub
+ @echo
+ @echo "Build finished. The epub file is in $(BUILDDIR)/epub."
+
+latex:
+ $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
+ @echo
+ @echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex."
+ @echo "Run \`make' in that directory to run these through (pdf)latex" \
+ "(use \`make latexpdf' here to do that automatically)."
+
+latexpdf:
+ $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
+ @echo "Running LaTeX files through pdflatex..."
+ $(MAKE) -C $(BUILDDIR)/latex all-pdf
+ @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex."
+
+latexpdfja:
+ $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
+ @echo "Running LaTeX files through platex and dvipdfmx..."
+ $(MAKE) -C $(BUILDDIR)/latex all-pdf-ja
+ @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex."
+
+text:
+ $(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text
+ @echo
+ @echo "Build finished. The text files are in $(BUILDDIR)/text."
+
+man:
+ $(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man
+ @echo
+ @echo "Build finished. The manual pages are in $(BUILDDIR)/man."
+
+texinfo:
+ $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo
+ @echo
+ @echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo."
+ @echo "Run \`make' in that directory to run these through makeinfo" \
+ "(use \`make info' here to do that automatically)."
+
+info:
+ $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo
+ @echo "Running Texinfo files through makeinfo..."
+ make -C $(BUILDDIR)/texinfo info
+ @echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo."
+
+gettext:
+ $(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale
+ @echo
+ @echo "Build finished. The message catalogs are in $(BUILDDIR)/locale."
+
+changes:
+ $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes
+ @echo
+ @echo "The overview file is in $(BUILDDIR)/changes."
+
+linkcheck:
+ $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck
+ @echo
+ @echo "Link check complete; look for any errors in the above output " \
+ "or in $(BUILDDIR)/linkcheck/output.txt."
+
+doctest:
+ $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest
+ @echo "Testing of doctests in the sources finished, look at the " \
+ "results in $(BUILDDIR)/doctest/output.txt."
+
+coverage:
+ $(SPHINXBUILD) -b coverage $(ALLSPHINXOPTS) $(BUILDDIR)/coverage
+ @echo "Testing of coverage in the sources finished, look at the " \
+ "results in $(BUILDDIR)/coverage/python.txt."
+
+xml:
+ $(SPHINXBUILD) -b xml $(ALLSPHINXOPTS) $(BUILDDIR)/xml
+ @echo
+ @echo "Build finished. The XML files are in $(BUILDDIR)/xml."
+
+pseudoxml:
+ $(SPHINXBUILD) -b pseudoxml $(ALLSPHINXOPTS) $(BUILDDIR)/pseudoxml
+ @echo
+ @echo "Build finished. The pseudo-XML files are in $(BUILDDIR)/pseudoxml."
diff --git a/scripts/automation/trex_control_plane/doc_stl/_static/no_scrollbars.css b/scripts/automation/trex_control_plane/doc_stl/_static/no_scrollbars.css
new file mode 100644
index 00000000..f86e823a
--- /dev/null
+++ b/scripts/automation/trex_control_plane/doc_stl/_static/no_scrollbars.css
@@ -0,0 +1,10 @@
+/* override table width restrictions */
+.wy-table-responsive table td, .wy-table-responsive table th {
+ /* !important prevents the common CSS stylesheets from
+ overriding this as on RTD they are loaded after this stylesheet */
+ white-space: normal !important;
+}
+
+.wy-table-responsive {
+ overflow: visible !important;
+} \ No newline at end of file
diff --git a/scripts/automation/trex_control_plane/doc_stl/_templates/layout.html b/scripts/automation/trex_control_plane/doc_stl/_templates/layout.html
new file mode 100644
index 00000000..8c1c709c
--- /dev/null
+++ b/scripts/automation/trex_control_plane/doc_stl/_templates/layout.html
@@ -0,0 +1,17 @@
+{% extends "!layout.html" %}
+
+{% block footer %}
+{{ super() }}
+<script>
+ (function(i,s,o,g,r,a,m){i['GoogleAnalyticsObject']=r;i[r]=i[r]||function(){
+ (i[r].q=i[r].q||[]).push(arguments)},i[r].l=1*new Date();a=s.createElement(o),
+ m=s.getElementsByTagName(o)[0];a.async=1;a.src=g;m.parentNode.insertBefore(a,m)
+ })(window,document,'script','//www.google-analytics.com/analytics.js','ga');
+
+ ga('create', 'UA-75220362-1', 'auto');
+ ga('send', 'pageview');
+
+</script>
+{% endblock %}
+
+
diff --git a/scripts/automation/trex_control_plane/doc_stl/api/client_code.rst b/scripts/automation/trex_control_plane/doc_stl/api/client_code.rst
new file mode 100755
index 00000000..953c5c84
--- /dev/null
+++ b/scripts/automation/trex_control_plane/doc_stl/api/client_code.rst
@@ -0,0 +1,260 @@
+
+Client Module
+==================
+
+The TRex Client provides access to the TRex server.
+
+**Client and interfaces**
+
+Multiple users can interact with one TRex server. Each user "owns" a different set of interfaces.
+The protocol is JSON-RPC2 over ZMQ transport.
+
+In addition to the Python API, a console-based API interface is also available.
+
+Python-like example::
+
+ c.start(ports = [0, 1], mult = "5mpps", duration = 10)
+
+ c.start(ports = [0, 1], mult = "5mpps", duration = 10, core_mask = [0x1,0xe] )
+
+ c.start(ports = [0, 1], mult = "5mpps", duration = 10, core_mask = core_mask=STLClient.CORE_MASK_PIN )
+
+Console-like example::
+
+ c.start_line (" -f stl/udp_1pkt_simple.py -m 10mpps --port 0 1 ")
+
+
+
+Example 1 - Typical Python API::
+
+ c = STLClient(username = "itay",server = "10.0.0.10", verbose_level = LoggerApi.VERBOSE_HIGH)
+
+ try:
+ # connect to server
+ c.connect()
+
+ # prepare our ports (my machine has 0 <--> 1 with static route)
+ c.reset(ports = [0, 1])
+
+ # add both streams to ports
+ c.add_streams(s1, ports = [0])
+
+ # clear the stats before injecting
+ c.clear_stats()
+
+ c.start(ports = [0, 1], mult = "5mpps", duration = 10)
+
+ # block until done
+ c.wait_on_traffic(ports = [0, 1])
+
+ # check for any warnings
+ if c.get_warnings():
+ # handle warnings here
+ pass
+
+ finally:
+ c.disconnect()
+
+
+STLClient class
+---------------
+
+.. autoclass:: trex_stl_lib.trex_stl_client.STLClient
+ :members:
+ :member-order: bysource
+
+
+STLClient snippet
+-----------------
+
+
+.. code-block:: python
+
+ # Example 1: Minimal example of client interacting with the TRex server
+
+ c = STLClient()
+
+ try:
+ # connect to server
+ c.connect()
+
+ # prepare our ports (my machine has 0 <--> 1 with static route)
+ c.reset(ports = [0, 1])
+
+ # add both streams to ports
+ c.add_streams(s1, ports = [0])
+
+ # clear the stats before injecting
+ c.clear_stats()
+
+ c.start(ports = [0, 1], mult = "5mpps", duration = 10)
+
+ # block until done
+ c.wait_on_traffic(ports = [0, 1])
+
+ # check for any warnings
+ if c.get_warnings():
+ # handle warnings here
+ pass
+
+ finally:
+ c.disconnect()
+
+
+
+.. code-block:: python
+
+ # Example 2: Client can execute other functions while the TRex server is generating traffic
+
+
+ c = STLClient()
+ try:
+ #connect to server
+ c.connect()
+
+ #..
+
+ c.start(ports = [0, 1], mult = "5mpps", duration = 10)
+
+ # block until done
+ while True :
+ # do somthing else
+ os.sleep(1) # sleep for 1 sec
+ # check if the port is still active
+ if c.is_traffic_active(ports = [0, 1])==False
+ break;
+
+ finally:
+ c.disconnect()
+
+
+
+.. code-block:: python
+
+ # Example 3: Console-like API interface
+
+
+ def simple ():
+
+ # create client
+ #verbose_level = LoggerApi.VERBOSE_HIGH # set to see JSON-RPC commands
+ c = STLClient(verbose_level = LoggerApi.VERBOSE_REGULAR)
+ passed = True
+
+ try:
+ # connect to server
+ c.connect()
+
+ my_ports=[0,1]
+
+ # prepare our ports
+ c.reset(ports = my_ports)
+
+ print (" is connected {0}".format(c.is_connected()))
+
+ print (" number of ports {0}".format(c.get_port_count()))
+ print (" acquired_ports {0}".format(c.get_acquired_ports()))
+ # port stats
+ print c.get_stats(my_ports)
+
+ # port info, mac-addr info, speed
+ print c.get_port_info(my_ports)
+
+ c.ping()
+
+ print("start")
+ # start traffic on port 0,1 each 10mpps
+ c.start_line (" -f ../../../../stl/udp_1pkt_simple.py -m 10mpps --port 0 1 ")
+ time.sleep(2);
+ c.pause_line("--port 0 1");
+ time.sleep(2);
+ c.resume_line("--port 0 1");
+ time.sleep(2);
+ c.update_line("--port 0 1 -m 5mpps"); # reduce to 5 mpps
+ time.sleep(2);
+ c.stop_line("--port 0 1"); # stop both ports
+
+ except STLError as e:
+ passed = False
+ print e
+
+ finally:
+ c.disconnect()
+
+
+Example 4: Load profile from a file::
+
+ def simple ():
+
+ # create client
+ #verbose_level = LoggerApi.VERBOSE_HIGH
+ c = STLClient(verbose_level = LoggerApi.VERBOSE_REGULAR)
+ passed = True
+
+ try:
+ # connect to server
+ c.connect()
+
+ my_ports=[0,1]
+
+ # prepare our ports
+ c.reset(ports = my_ports)
+
+ profile_file = "../../../../stl/udp_1pkt_simple.py" # a traffic profile file
+
+ try: # load a profile
+ profile = STLProfile.load(profile_file)
+ except STLError as e:
+ print format_text("\nError while loading profile '{0}'\n".format(profile_file), 'bold')
+ print e.brief() + "\n"
+ return
+
+ print profile.dump_to_yaml() # print it as YAML
+
+ c.remove_all_streams(my_ports) # remove all streams
+
+
+ c.add_streams(profile.get_streams(), ports = my_ports) # add them
+
+ c.start(ports = [0, 1], mult = "5mpps", duration = 10) # start for 10 sec
+
+ # block until done
+ c.wait_on_traffic(ports = [0, 1]) # wait
+
+
+ finally:
+ c.disconnect()
+
+
+.. code-block:: python
+
+ # Example 5: pin cores to ports
+
+ c = STLClient()
+
+ try:
+ # connect to server
+ c.connect()
+
+ # prepare our ports (my machine has 0 <--> 1 with static route)
+ c.reset(ports = [0, 1])
+
+ # add both streams to ports
+ c.add_streams(s1, ports = [0])
+
+ # clear the stats before injecting
+ c.clear_stats()
+
+ c.start(ports = [0, 1], mult = "5mpps", duration = 10, core_mask = [0x1,0x2]) # pin core to ports for better performance
+
+ # block until done
+ c.wait_on_traffic(ports = [0, 1])
+
+ # check for any warnings
+ if c.get_warnings():
+ # handle warnings here
+ pass
+
+ finally:
+ c.disconnect()
+
diff --git a/scripts/automation/trex_control_plane/doc_stl/api/field_engine.rst b/scripts/automation/trex_control_plane/doc_stl/api/field_engine.rst
new file mode 100755
index 00000000..cac2f5ab
--- /dev/null
+++ b/scripts/automation/trex_control_plane/doc_stl/api/field_engine.rst
@@ -0,0 +1,254 @@
+
+Field Engine modules
+=======================
+
+The Field Engine (FE) has limited number of instructions/operations to support most use cases.
+There is a plan to add LuaJIT to be more flexible at the cost of performance.
+The FE can allocate stream variables in a stream context, write a stream variable to a packet offset, change packet size, and so on.
+
+*Examples of Field Engine uses:*
+
+* Change ipv4.tos 1-10
+* Change packet size to a random value in the range 64 to 9K
+* Create a range of flows (change src_ip, dest_ip, src_port, dest_port)
+* Update IPv4 checksum
+
+
+The following snippet creates a range of 64 bytes packets ::
+
+ # split the range of IP to cores
+ #
+ class STLS1(object):
+
+ def __init__ (self):
+ self.fsize =64;
+
+ def create_stream (self):
+ # create a base packet and pad it to size
+ size = self.fsize - 4; # no FCS
+
+ base_pkt = Ether()/IP(src="16.0.0.1",dst="48.0.0.1")/UDP(dport=12,sport=1025)
+
+ pad = max(0, size - len(base_pkt)) * 'x'
+
+ vm = STLScVmRaw( [ STLVmFlowVar ( "ip_src",
+ min_value="10.0.0.1",
+ max_value="10.0.0.255",
+ size=4,
+ step=1,
+ op="inc"),
+ STLVmWrFlowVar (fv_name="ip_src",
+ pkt_offset= "IP.src" ), # write ip to packet IP.src
+ STLVmFixIpv4(offset = "IP") # fix checksum
+ ],
+ split_by_field = "ip_src",
+ cache_size =255 # cache the packets, much better performance
+ );
+
+ pkt = STLPktBuilder(pkt = base_pkt/pad,
+ vm = vm)
+ stream = STLStream(packet = pkt,
+ mode = STLTXCont())
+ #print(stream.to_code())
+ return stream
+
+
+ def get_streams (self, direction = 0, **kwargs):
+ # create 1 stream
+ return [ self.create_stream() ]
+
+
+The following snippet creates a SYN attack::
+
+ # create attack from random src_ip from 16.0.0.0-18.0.0.254 and random src_port 1025-65000
+ # attack 48.0.0.1 server
+
+ def create_stream (self):
+
+
+ # TCP SYN
+ base_pkt = Ether()/IP(dst="48.0.0.1")/TCP(dport=80,flags="S")
+
+
+ # vm
+ vm = STLScVmRaw( [ STLVmFlowVar(name="ip_src",
+ min_value="16.0.0.0",
+ max_value="18.0.0.254",
+ size=4, op="random"),
+
+ STLVmFlowVar(name="src_port",
+ min_value=1025,
+ max_value=65000,
+ size=2, op="random"),
+
+ STLVmWrFlowVar(fv_name="ip_src", pkt_offset= "IP.src" ),
+
+ STLVmFixIpv4(offset = "IP"), # fix checksum
+
+ STLVmWrFlowVar(fv_name="src_port",
+ pkt_offset= "TCP.sport") # fix udp len
+
+ ]
+ )
+
+ pkt = STLPktBuilder(pkt = base_pkt,
+ vm = vm)
+
+ return STLStream(packet = pkt,
+ random_seed = 0x1234,# can be remove. will give the same random value any run
+ mode = STLTXCont())
+
+
+
+
+STLScVmRaw class
+----------------
+
+Aggregate raw instructions objects
+
+.. autoclass:: trex_stl_lib.trex_stl_packet_builder_scapy.STLScVmRaw
+ :members:
+ :member-order: bysource
+
+
+STLVmFlowVar
+------------
+
+.. autoclass:: trex_stl_lib.trex_stl_packet_builder_scapy.STLVmFlowVar
+ :members:
+ :member-order: bysource
+
+STLVmWrFlowVar
+---------------
+
+.. autoclass:: trex_stl_lib.trex_stl_packet_builder_scapy.STLVmWrFlowVar
+ :members:
+ :member-order: bysource
+
+
+STLVmWrMaskFlowVar
+------------------
+
+.. autoclass:: trex_stl_lib.trex_stl_packet_builder_scapy.STLVmWrMaskFlowVar
+ :members:
+ :member-order: bysource
+
+STLVmFixChecksumHw
+--------------------
+
+.. autoclass:: trex_stl_lib.trex_stl_packet_builder_scapy.STLVmFixChecksumHw
+ :members:
+ :member-order: bysource
+
+
+STLVmFixIpv4
+------------------
+
+.. autoclass:: trex_stl_lib.trex_stl_packet_builder_scapy.STLVmFixIpv4
+ :members:
+ :member-order: bysource
+
+
+STLVmTrimPktSize
+------------------
+
+.. autoclass:: trex_stl_lib.trex_stl_packet_builder_scapy.STLVmTrimPktSize
+ :members:
+ :member-order: bysource
+
+STLVmTupleGen
+------------------
+
+.. autoclass:: trex_stl_lib.trex_stl_packet_builder_scapy.STLVmTupleGen
+ :members:
+ :member-order: bysource
+
+STLVmFlowVarRepetableRandom
+----------------------------
+
+.. autoclass:: trex_stl_lib.trex_stl_packet_builder_scapy.STLVmFlowVarRepetableRandom
+ :members:
+ :member-order: bysource
+
+
+
+
+Field Engine snippet
+--------------------
+
+.. code-block:: python
+
+ # FE Example1
+
+
+ base_pkt = Ether()/IP(src="16.0.0.1",dst="48.0.0.1")/UDP(dport=12,sport=1025)
+
+ pad = max(0, size - len(base_pkt)) * 'x'
+
+ vm = STLScVmRaw( [ STLVmTupleGen ( ip_min="16.0.0.1", ip_max="16.0.0.2",
+ port_min=1025, port_max=65535,
+ name="tuple"), # define tuple gen
+
+ # write ip to packet IP.src
+ STLVmWrFlowVar (fv_name="tuple.ip", pkt_offset= "IP.src" ),
+
+ STLVmFixIpv4(offset = "IP"), # fix checksum
+ STLVmWrFlowVar (fv_name="tuple.port", pkt_offset= "UDP.sport" ) #write udp.port
+ ]
+ );
+
+ pkt = STLPktBuilder(pkt = base_pkt/pad,
+ vm = vm)
+
+
+.. code-block:: python
+
+ # FE Example2
+
+
+ #range of source mac-addr
+
+ base_pkt = Ether()/IP(src="16.0.0.1",dst="48.0.0.1")/UDP(dport=12,sport=1025)
+ pad = max(0, size - len(base_pkt)) * 'x'
+
+ vm = STLScVmRaw( [ STLVmFlowVar(name="mac_src",
+ min_value=1,
+ max_value=30,
+ size=2, op="dec",step=1),
+ STLVmWrMaskFlowVar(fv_name="mac_src",
+ pkt_offset= 11,
+ pkt_cast_size=1,
+ mask=0xff)
+ ]
+ )
+
+
+.. code-block:: python
+
+ # FE Example3
+
+
+ #IP dest would have 10 random values betwean 48.0.0.1 48.0.0.255
+
+ base_pkt = Ether()/IP(src=src_ip,dst=dst_ip)/UDP(dport=12,sport=1025)
+
+ pad = max(0, size - len(base_pkt)) * 'x'
+
+ vm = STLScVmRaw( [ STLVmFlowVar ( "ip_src", min_value="10.0.0.1",
+ max_value="10.0.0.255", size=4, step=1,op="inc"),
+ STLVmFlowVarRepetableRandom ( "ip_dst",
+ min_value="48.0.0.1",
+ max_value="48.0.0.255",
+ size=4,
+ limit=10, seed=0x1235),
+
+ STLVmWrFlowVar (fv_name="ip_src", pkt_offset= "IP.src" ), # write ip to packet IP.src
+ STLVmWrFlowVar (fv_name="ip_dst", pkt_offset= "IP.dst" ), # write ip to packet IP.dst
+
+ STLVmFixIpv4(offset = "IP") # fix checksum
+ ]
+ ,split_by_field = "ip_src" # split to cores base on the tuple generator
+ ,cache_size = cache_size # the cache size
+ );
+
+
diff --git a/scripts/automation/trex_control_plane/doc_stl/api/index.rst b/scripts/automation/trex_control_plane/doc_stl/api/index.rst
new file mode 100755
index 00000000..a3c8ad5a
--- /dev/null
+++ b/scripts/automation/trex_control_plane/doc_stl/api/index.rst
@@ -0,0 +1,37 @@
+
+TRex Stateless API Reference
+============================
+
+**Client: STLClient**
+
+.. toctree::
+ :maxdepth: 4
+
+ client_code
+
+**Traffic profile STLProfile,STLStream**
+
+.. toctree::
+ :maxdepth: 4
+
+ profile_code
+
+
+**Packet builder**
+
+.. toctree::
+ :maxdepth: 4
+
+ scapy_builder
+
+
+**Field Engine**
+
+.. toctree::
+ :maxdepth: 4
+
+ field_engine
+
+
+
+
diff --git a/scripts/automation/trex_control_plane/doc_stl/api/profile_code.rst b/scripts/automation/trex_control_plane/doc_stl/api/profile_code.rst
new file mode 100755
index 00000000..8a0d7a2a
--- /dev/null
+++ b/scripts/automation/trex_control_plane/doc_stl/api/profile_code.rst
@@ -0,0 +1,140 @@
+
+Traffic profile modules
+=======================
+
+The TRex STLProfile traffic profile includes a number of streams. The profile is a ``program`` of related streams.
+Each stream can trigger another stream. Each stream can be named. For a full set of examples, see Manual_.
+
+.. _Manual: ../trex_stateless.html
+
+
+Example::
+
+ def create_stream (self):
+
+ # create a base packet and pad it to size
+ size = self.fsize - 4; # no FCS
+ base_pkt = Ether()/IP(src="16.0.0.1",dst="48.0.0.1")/UDP(dport=12,sport=1025)
+ base_pkt1 = Ether()/IP(src="16.0.0.2",dst="48.0.0.1")/UDP(dport=12,sport=1025)
+ base_pkt2 = Ether()/IP(src="16.0.0.3",dst="48.0.0.1")/UDP(dport=12,sport=1025)
+ pad = max(0, size - len(base_pkt)) * 'x'
+
+
+ return STLProfile( [ STLStream( isg = 1.0, # star in delay in usec
+ packet = STLPktBuilder(pkt = base_pkt/pad),
+ mode = STLTXCont( pps = 10),
+ ),
+
+ STLStream( isg = 2.0,
+ packet = STLPktBuilder(pkt = base_pkt1/pad),
+ mode = STLTXCont( pps = 20),
+ ),
+
+ STLStream( isg = 3.0,
+ packet = STLPktBuilder(pkt = base_pkt2/pad),
+ mode = STLTXCont( pps = 30)
+
+ )
+ ]).get_streams()
+
+
+STLProfile class
+----------------
+
+.. autoclass:: trex_stl_lib.trex_stl_streams.STLProfile
+ :members:
+ :member-order: bysource
+
+STLStream
+---------
+
+.. autoclass:: trex_stl_lib.trex_stl_streams.STLStream
+ :members:
+ :member-order: bysource
+
+
+STLStream modes
+----------------
+
+.. autoclass:: trex_stl_lib.trex_stl_streams.STLTXMode
+ :members:
+ :member-order: bysource
+
+.. autoclass:: trex_stl_lib.trex_stl_streams.STLTXCont
+ :members:
+ :member-order: bysource
+
+.. autoclass:: trex_stl_lib.trex_stl_streams.STLTXSingleBurst
+ :members:
+ :member-order: bysource
+
+.. autoclass:: trex_stl_lib.trex_stl_streams.STLTXMultiBurst
+ :members:
+ :member-order: bysource
+
+.. autoclass:: trex_stl_lib.trex_stl_streams.STLFlowStats
+ :members:
+ :member-order: bysource
+
+.. autoclass:: trex_stl_lib.trex_stl_streams.STLFlowLatencyStats
+ :members:
+ :member-order: bysource
+
+
+
+
+STLProfile snippet
+------------------
+
+
+.. code-block:: python
+
+ # STLProfile Example1
+
+
+ size = self.fsize - 4; # no FCS
+ base_pkt = Ether()/IP(src="16.0.0.1",dst="48.0.0.1")/UDP(dport=12,sport=1025)
+ base_pkt1 = Ether()/IP(src="16.0.0.2",dst="48.0.0.1")/UDP(dport=12,sport=1025)
+ base_pkt2 = Ether()/IP(src="16.0.0.3",dst="48.0.0.1")/UDP(dport=12,sport=1025)
+ pad = max(0, size - len(base_pkt)) * 'x'
+
+
+ return STLProfile( [ STLStream( isg = 10.0, # star in delay
+ name ='S0',
+ packet = STLPktBuilder(pkt = base_pkt/pad),
+ mode = STLTXSingleBurst( pps = 10, total_pkts = 10),
+ next = 'S1'), # point to next stream
+
+ STLStream( self_start = False, # stream is disabled enable trow S0
+ name ='S1',
+ packet = STLPktBuilder(pkt = base_pkt1/pad),
+ mode = STLTXSingleBurst( pps = 10, total_pkts = 20),
+ next = 'S2' ),
+
+ STLStream( self_start = False, # stream is disabled enable trow S0
+ name ='S2',
+ packet = STLPktBuilder(pkt = base_pkt2/pad),
+ mode = STLTXSingleBurst( pps = 10, total_pkts = 30 )
+ )
+ ]).get_streams()
+
+
+.. code-block:: python
+
+ # STLProfile Example2
+
+
+ class STLS1(object):
+
+ def get_streams (self, direction = 0):
+ return [STLStream(packet = STLPktBuilder(pkt ="stl/yaml/udp_64B_no_crc.pcap"),
+ mode = STLTXCont(pps=1000),
+ flow_stats = STLFlowStats(pg_id = 7)),
+
+ STLStream(packet = STLPktBuilder(pkt ="stl/yaml/udp_594B_no_crc.pcap"),
+ mode = STLTXCont(pps=5000),
+ flow_stats = STLFlowStats(pg_id = 12))
+ ]
+
+
+
diff --git a/scripts/automation/trex_control_plane/doc_stl/api/scapy_builder.rst b/scripts/automation/trex_control_plane/doc_stl/api/scapy_builder.rst
new file mode 100755
index 00000000..2c5790bf
--- /dev/null
+++ b/scripts/automation/trex_control_plane/doc_stl/api/scapy_builder.rst
@@ -0,0 +1,44 @@
+
+Packet builder modules
+=======================
+
+The packet builder module is used for building a template packet for a stream, and creating a Field Engine program to change fields in the packet.
+
+**Examples:**
+
+* Build a IP/UDP/DNS packet with a src_ip range of 10.0.0.1 to 10.0.0.255
+* Build IP/UDP packets in IMIX sizes
+
+
+For example, this snippet creates a SYN attack::
+
+ # create attack from random src_ip from 16.0.0.0-18.0.0.254 and random src_port 1025-65000
+ # attack 48.0.0.1 server
+
+ def create_stream (self):
+
+
+ # TCP SYN
+ base_pkt = Ether()/IP(dst="48.0.0.1")/TCP(dport=80,flags="S")
+
+ pkt = STLPktBuilder(pkt = base_pkt)
+
+ return STLStream(packet = pkt, #<<<<< set packet builder inside the stream
+ mode = STLTXCont())
+
+
+
+
+STLPktBuilder class
+--------------------
+
+Aggregate a raw instructions objects
+
+.. autoclass:: trex_stl_lib.trex_stl_packet_builder_scapy.STLPktBuilder
+ :members:
+ :member-order: bysource
+
+
+
+
+
diff --git a/scripts/automation/trex_control_plane/doc_stl/conf.py b/scripts/automation/trex_control_plane/doc_stl/conf.py
new file mode 100644
index 00000000..c8788ca7
--- /dev/null
+++ b/scripts/automation/trex_control_plane/doc_stl/conf.py
@@ -0,0 +1,328 @@
+# -*- coding: utf-8 -*-
+#
+# TRex Control Plain documentation build configuration file, created by
+# sphinx-quickstart on Tue Jun 2 07:48:10 2015.
+#
+# This file is execfile()d with the current directory set to its
+# containing dir.
+#
+# Note that not all possible configuration values are present in this
+# autogenerated file.
+#
+# All configuration values have a default; values that are commented out
+# serve to show the default.
+
+import sys
+import os
+import shlex
+import functools
+
+def no_op_wraps(func):
+ """Replaces functools.wraps in order to undo wrapping.
+
+ Can be used to preserve the decorated function's signature
+ in the documentation generated by Sphinx.
+
+ """
+ def wrapper(decorator):
+ return func
+ return wrapper
+
+functools.wraps = no_op_wraps
+
+
+
+# If extensions (or modules to document with autodoc) are in another directory,
+# add these directories to sys.path here. If the directory is relative to the
+# documentation root, use os.path.abspath to make it absolute, like shown here.
+sys.path.insert(0, os.path.abspath('../stl'))
+## add all external libs path manually
+external_libs_path = os.path.join(os.pardir, os.pardir, os.pardir, "external_libs")
+external_libs_pkgs = [os.path.join(external_libs_path, p)
+ for p in os.listdir(external_libs_path)
+ if os.path.isdir(os.path.join(external_libs_path, p))]
+for p in external_libs_pkgs:
+ sys.path.insert(1, p)
+
+# -- General configuration ------------------------------------------------
+
+# If your documentation needs a minimal Sphinx version, state it here.
+#needs_sphinx = '1.0'
+
+# Add any Sphinx extension module names here, as strings. They can be
+# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
+# ones.
+extensions = [
+ 'sphinx.ext.autodoc',
+ 'sphinx.ext.todo',
+ 'sphinx.ext.viewcode',
+]
+
+# Add any paths that contain templates here, relative to this directory.
+templates_path = ['_templates']
+
+# The suffix(es) of source filenames.
+# You can specify multiple suffix as a list of string:
+# source_suffix = ['.rst', '.md']
+source_suffix = '.rst'
+
+# The encoding of source files.
+#source_encoding = 'utf-8-sig'
+
+# The master toctree document.
+master_doc = 'index'
+
+# General information about the project.
+project = u'TRex Stateless Python API'
+copyright = u'2015, Cisco Systems Inc.'
+author = u'TRex team, Cisco Systems Inc.'
+
+# The version info for the project you're documenting, acts as replacement for
+# |version| and |release|, also used in various other places throughout the
+# built documents.
+#
+# The short X.Y version.
+version = '1.94'
+# The full version, including alpha/beta/rc tags.
+release = '2.0'
+
+# The language for content autogenerated by Sphinx. Refer to documentation
+# for a list of supported languages.
+#
+# This is also used if you do content translation via gettext catalogs.
+# Usually you set "language" from the command line for these cases.
+language = None
+
+# There are two options for replacing |today|: either, you set today to some
+# non-false value, then it is used:
+#today = ''
+# Else, today_fmt is used as the format for a strftime call.
+#today_fmt = '%B %d, %Y'
+
+# List of patterns, relative to source directory, that match files and
+# directories to ignore when looking for source files.
+exclude_patterns = ['_build']
+
+# The reST default role (used for this markup: `text`) to use for all
+# documents.
+#default_role = None
+
+# If true, '()' will be appended to :func: etc. cross-reference text.
+#add_function_parentheses = True
+
+# If true, the current module name will be prepended to all description
+# unit titles (such as .. function::).
+#add_module_names = True
+
+# If true, sectionauthor and moduleauthor directives will be shown in the
+# output. They are ignored by default.
+#show_authors = False
+
+# The name of the Pygments (syntax highlighting) style to use.
+pygments_style = 'sphinx'
+
+# A list of ignored prefixes for module index sorting.
+#modindex_common_prefix = []
+
+# If true, keep warnings as "system message" paragraphs in the built documents.
+#keep_warnings = False
+
+# If true, `todo` and `todoList` produce output, else they produce nothing.
+todo_include_todos = True
+
+
+# -- Options for HTML output ----------------------------------------------
+
+# The theme to use for HTML and HTML Help pages. See the documentation for
+# a list of builtin themes.
+#html_theme = 'sphinx_rtd_theme'
+html_theme = 'classic'
+#html_theme = 'sphinxdoc'
+
+#html_theme_options = {
+# "rightsidebar": "true"
+# }
+
+autodoc_docstring_signature = True
+
+# Theme options are theme-specific and customize the look and feel of a theme
+# further. For a list of options available for each theme, see the
+# documentation.
+#html_theme_options = {}
+
+# Add any paths that contain custom themes here, relative to this directory.
+#html_theme_path = []
+
+# The name for this set of Sphinx documents. If None, it defaults to
+# "<project> v<release> documentation".
+#html_title = None
+
+# A shorter title for the navigation bar. Default is the same as html_title.
+#html_short_title = None
+
+# The name of an image file (relative to this directory) to place at the top
+# of the sidebar.
+#html_logo = None
+
+# The name of an image file (within the static path) to use as favicon of the
+# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
+# pixels large.
+#html_favicon = None
+
+# Add any paths that contain custom static files (such as style sheets) here,
+# relative to this directory. They are copied after the builtin static files,
+# so a file named "default.css" will overwrite the builtin "default.css".
+html_static_path = ['_static']
+
+# Add any extra paths that contain custom files (such as robots.txt or
+# .htaccess) here, relative to this directory. These files are copied
+# directly to the root of the documentation.
+#html_extra_path = []
+
+# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
+# using the given strftime format.
+#html_last_updated_fmt = '%b %d, %Y'
+
+# If true, SmartyPants will be used to convert quotes and dashes to
+# typographically correct entities.
+#html_use_smartypants = True
+
+# Custom sidebar templates, maps document names to template names.
+#html_sidebars = {}
+
+# Additional templates that should be rendered to pages, maps page names to
+# template names.
+#html_additional_pages = {}
+
+# If false, no module index is generated.
+#html_domain_indices = True
+
+# If false, no index is generated.
+#html_use_index = True
+
+# If true, the index is split into individual pages for each letter.
+#html_split_index = False
+
+# If true, links to the reST sources are added to the pages.
+#html_show_sourcelink = True
+
+# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
+#html_show_sphinx = True
+
+# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
+#html_show_copyright = True
+
+# If true, an OpenSearch description file will be output, and all pages will
+# contain a <link> tag referring to it. The value of this option must be the
+# base URL from which the finished HTML is served.
+#html_use_opensearch = ''
+
+# This is the file name suffix for HTML files (e.g. ".xhtml").
+#html_file_suffix = None
+
+# Language to be used for generating the HTML full-text search index.
+# Sphinx supports the following languages:
+# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
+# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
+#html_search_language = 'en'
+
+# A dictionary with options for the search language support, empty by default.
+# Now only 'ja' uses this config value
+#html_search_options = {'type': 'default'}
+
+# The name of a javascript file (relative to the configuration directory) that
+# implements a search results scorer. If empty, the default will be used.
+#html_search_scorer = 'scorer.js'
+
+# Output file base name for HTML help builder.
+htmlhelp_basename = 'TRexControlPlaindoc'
+
+# -- Options for LaTeX output ---------------------------------------------
+
+latex_elements = {
+# The paper size ('letterpaper' or 'a4paper').
+#'papersize': 'letterpaper',
+
+# The font size ('10pt', '11pt' or '12pt').
+#'pointsize': '10pt',
+
+# Additional stuff for the LaTeX preamble.
+#'preamble': '',
+
+# Latex figure (float) alignment
+#'figure_align': 'htbp',
+}
+
+# Grouping the document tree into LaTeX files. List of tuples
+# (source start file, target name, title,
+# author, documentclass [howto, manual, or own class]).
+latex_documents = [
+ (master_doc, 'TRexControlPlain.tex', u'TRex Control Plain Documentation',
+ u'hhaim', 'manual'),
+]
+
+# The name of an image file (relative to this directory) to place at the top of
+# the title page.
+#latex_logo = None
+
+# For "manual" documents, if this is true, then toplevel headings are parts,
+# not chapters.
+#latex_use_parts = False
+
+# If true, show page references after internal links.
+#latex_show_pagerefs = False
+
+# If true, show URL addresses after external links.
+#latex_show_urls = False
+
+# Documents to append as an appendix to all manuals.
+#latex_appendices = []
+
+# If false, no module index is generated.
+#latex_domain_indices = True
+
+
+# -- Options for manual page output ---------------------------------------
+
+# One entry per manual page. List of tuples
+# (source start file, name, description, authors, manual section).
+man_pages = [
+ (master_doc, 'TRexcontrolplain', u'TRex Control Plain Documentation',
+ [author], 1)
+]
+
+# If true, show URL addresses after external links.
+#man_show_urls = False
+
+
+# -- Options for Texinfo output -------------------------------------------
+
+# Grouping the document tree into Texinfo files. List of tuples
+# (source start file, target name, title, author,
+# dir menu entry, description, category)
+texinfo_documents = [
+ (master_doc, 'TRexControlPlain', u'TRex Control Plain Documentation',
+ author, 'TRexControlPlain', 'One line description of project.',
+ 'Miscellaneous'),
+]
+
+# Documents to append as an appendix to all manuals.
+#texinfo_appendices = []
+
+# If false, no module index is generated.
+#texinfo_domain_indices = True
+
+# How to display URL addresses: 'footnote', 'no', or 'inline'.
+#texinfo_show_urls = 'footnote'
+
+# If true, do not generate a @detailmenu in the "Top" node's menu.
+#texinfo_no_detailmenu = False
+
+
+# show documentation for both __init__ methods and class methods
+autoclass_content = "both"
+
+# A workaround for the responsive tables always having annoying scrollbars.
+def setup(app):
+ app.add_stylesheet("no_scrollbars.css")
+
diff --git a/scripts/automation/trex_control_plane/doc_stl/index.rst b/scripts/automation/trex_control_plane/doc_stl/index.rst
new file mode 100644
index 00000000..8a2fc4b0
--- /dev/null
+++ b/scripts/automation/trex_control_plane/doc_stl/index.rst
@@ -0,0 +1,101 @@
+.. TRex Stateless Python API documentation
+ contain the root `toctree` directive.
+
+TRex Stateless Python API
+==============================================
+
+TRex is a **traffic generator**
+
+This site covers the Python API of TRex and explains how to utilize it to your needs.
+To understand the entirely how the API works and how to set up the server side, check out the `trex-core Wiki <https://github.com/cisco-system-traffic-generator/trex-core/wiki>`_ under the documentation section of TRex website.
+
+**Use the table of contents below or the menu to your left to navigate through the site**
+
+How to Install
+===============
+.. toctree::
+ :maxdepth: 2
+
+| TRex package contains trex_client.tar.gz
+| Put it at any place you like, preferably same place as your scripts.
+| (If it's not at same place as your scripts, you will need to ensure trex_client directory is in sys.path)
+
+Un-pack it using command::
+
+ tar -xzf trex_client.tar.gz
+
+How to use
+==================
+
+| The client assumes server is running.
+| After un-tarring the client package, you can verify basic tests in examples directory out of the box:
+
+.. code-block:: bash
+
+ cd trex_client/stl/examples
+ python stl_imix.py -s <server address>
+
+You can verify that the traffic was sent and arrives properly if you see something like this:::
+
+ Mapped ports to sides [0, 2] <--> [1, 3]
+ Injecting [0, 2] <--> [1, 3] on total rate of '30%' for 10 seconds
+
+ Packets injected from [0, 2]: 473,856
+ Packets injected from [1, 3]: 473,856
+
+ packets lost from [0, 2] --> [0, 2]: 0 pkts
+ packets lost from [1, 3] --> [1, 3]: 0 pkts
+
+ Test has passed :-)
+
+
+Also, in the stl folder there are directories with profiles that define streams and the console (with which you can easily send the profiles)
+
+How to pyATS
+==================
+
+.. sectionauthor:: David Shen
+
+pyATS Compatibility
+
+TRex supports both Python2 and Python3 pyATS.
+
+* Install python2/python3 pyats
+ /auto/pyats/bin/pyats-install --python2
+ /auto/pyats/bin/pyats-install --python3
+
+* setenv TREX_PATH to the trex stateless lib path
+ setenv TREX_PATH <your path>/automation/trex_control_plane/stl
+
+* In the script or job file, add the TREX_PATH to sys.path::
+
+ import sys, os; sys.path.append(os.environ['TREX_PATH'])
+
+* Source trex stateless libs in scripts::
+
+ from trex_stl_lib.api import *
+ from scapy.contrib.mpls import *
+ from trex_stl_lib.trex_stl_hltapi import *
+
+If using trex_client package, import syntax is::
+
+ from trex_client.stl.api import *
+
+
+API Reference
+==============
+.. toctree::
+ :maxdepth: 2
+
+ api/index
+
+Indices and tables
+==================
+
+* :ref:`genindex`
+* :ref:`modindex`
+* :ref:`search`
+
+
+.. rubric:: Footnotes
+
diff --git a/scripts/automation/trex_control_plane/examples/__init__.py b/scripts/automation/trex_control_plane/examples/__init__.py
new file mode 100755
index 00000000..d3f5a12f
--- /dev/null
+++ b/scripts/automation/trex_control_plane/examples/__init__.py
@@ -0,0 +1 @@
+
diff --git a/scripts/automation/trex_control_plane/examples/client_interactive_example.py b/scripts/automation/trex_control_plane/examples/client_interactive_example.py
new file mode 100755
index 00000000..d21b2b15
--- /dev/null
+++ b/scripts/automation/trex_control_plane/examples/client_interactive_example.py
@@ -0,0 +1,254 @@
+#!/router/bin/python-2.7.4
+
+import trex_root_path
+from client.trex_client import *
+from common.trex_exceptions import *
+import cmd
+import termstyle
+import os
+from argparse import ArgumentParser
+from pprint import pprint
+import json
+import time
+import socket
+import errno
+
+class InteractiveTRexClient(cmd.Cmd):
+
+ intro = termstyle.green("\nInteractive shell to play with Cisco's TRex API.\nType help to view available pre-defined scenarios\n(c) All rights reserved.\n")
+ prompt = '> '
+
+ def __init__(self, trex_host, max_history_size = 100, trex_port = 8090, verbose_mode = False ):
+ cmd.Cmd.__init__(self)
+ self.verbose = verbose_mode
+ self.trex = CTRexClient(trex_host, max_history_size, trex_daemon_port = trex_port, verbose = verbose_mode)
+ self.DEFAULT_RUN_PARAMS = dict( m = 1.5,
+ nc = True,
+ p = True,
+ d = 100,
+ f = 'avl/sfr_delay_10_1g.yaml',
+ l = 1000)
+ self.run_params = dict(self.DEFAULT_RUN_PARAMS)
+ self.decoder = json.JSONDecoder()
+
+
+ def do_push_files (self, filepaths):
+ """Pushes a custom file to be stored locally on TRex server.\nPush multiple files by spefiying their path separated by ' ' (space)."""
+ try:
+ filepaths = filepaths.split(' ')
+ print termstyle.green("*** Starting pushing files ({trex_files}) to TRex. ***".format (trex_files = ', '.join(filepaths)) )
+ ret_val = self.trex.push_files(filepaths)
+ if ret_val:
+ print termstyle.green("*** End of TRex push_files method (success) ***")
+ else:
+ print termstyle.magenta("*** End of TRex push_files method (failed) ***")
+
+ except IOError as inst:
+ print termstyle.magenta(inst)
+
+ def do_show_default_run_params(self,line):
+ """Outputs the default TRex running parameters"""
+ pprint(self.DEFAULT_RUN_PARAMS)
+ print termstyle.green("*** End of default TRex running parameters ***")
+
+ def do_show_run_params(self,line):
+ """Outputs the currently configured TRex running parameters"""
+ pprint(self.run_params)
+ print termstyle.green("*** End of TRex running parameters ***")
+
+ def do_update_run_params(self, json_str):
+ """Updates provided parameters on TRex running configuration. Provide using JSON string"""
+ if json_str:
+ try:
+ upd_params = self.decoder.decode(json_str)
+ self.run_params.update(upd_params)
+ print termstyle.green("*** End of TRex parameters update ***")
+ except ValueError as inst:
+ print termstyle.magenta("Provided illegal JSON string. Please try again.\n[", inst,"]")
+ else:
+ print termstyle.magenta("JSON configuration string is missing. Please try again.")
+
+ def do_show_status (self, line):
+ """Prompts TRex current status"""
+ print self.trex.get_running_status()
+ print termstyle.green("*** End of TRex status prompt ***")
+
+ def do_show_trex_files_path (self, line):
+ """Prompts the local path in which files are stored when pushed to trex server from client"""
+ print self.trex.get_trex_files_path()
+ print termstyle.green("*** End of trex_files_path prompt ***")
+
+ def do_show_reservation_status (self, line):
+ """Prompts if TRex is currently reserved or not"""
+ if self.trex.is_reserved():
+ print "TRex is reserved"
+ else:
+ print "TRex is NOT reserved"
+ print termstyle.green("*** End of reservation status prompt ***")
+
+ def do_reserve_trex (self, user):
+ """Reserves the usage of TRex to a certain user"""
+ try:
+ if not user:
+ ret = self.trex.reserve_trex()
+ else:
+ ret = self.trex.reserve_trex(user.split(' ')[0])
+ print termstyle.green("*** TRex reserved successfully ***")
+ except TRexException as inst:
+ print termstyle.red(inst)
+
+ def do_cancel_reservation (self, user):
+ """Cancels a current reservation of TRex to a certain user"""
+ try:
+ if not user:
+ ret = self.trex.cancel_reservation()
+ else:
+ ret = self.trex.cancel_reservation(user.split(' ')[0])
+ print termstyle.green("*** TRex reservation canceled successfully ***")
+ except TRexException as inst:
+ print termstyle.red(inst)
+
+ def do_restore_run_default (self, line):
+ """Restores original TRex running configuration"""
+ self.run_params = dict(self.DEFAULT_RUN_PARAMS)
+ print termstyle.green("*** End of restoring default run parameters ***")
+
+ def do_run_until_finish (self, sample_rate):
+ """Starts TRex and sample server until run is done."""
+ print termstyle.green("*** Starting TRex run_until_finish scenario ***")
+
+ if not sample_rate: # use default sample rate if not passed
+ sample_rate = 5
+ try:
+ sample_rate = int(sample_rate)
+ ret = self.trex.start_trex(**self.run_params)
+ self.trex.sample_to_run_finish(sample_rate)
+ print termstyle.green("*** End of TRex run ***")
+ except ValueError as inst:
+ print termstyle.magenta("Provided illegal sample rate value. Please try again.\n[", inst,"]")
+ except TRexException as inst:
+ print termstyle.red(inst)
+
+ def do_run_and_poll (self, sample_rate):
+ """Starts TRex and sample server manually until run is done."""
+ print termstyle.green("*** Starting TRex run and manually poll scenario ***")
+ if not sample_rate: # use default sample rate if not passed
+ sample_rate = 5
+ try:
+ sample_rate = int(sample_rate)
+ ret = self.trex.start_trex(**self.run_params)
+ last_res = dict()
+ while self.trex.is_running(dump_out = last_res):
+ obj = self.trex.get_result_obj()
+ if (self.verbose):
+ print obj
+ # do WHATEVER here
+ time.sleep(sample_rate)
+
+ print termstyle.green("*** End of TRex run ***")
+ except ValueError as inst:
+ print termstyle.magenta("Provided illegal sample rate value. Please try again.\n[", inst,"]")
+ except TRexException as inst:
+ print termstyle.red(inst)
+
+
+ def do_run_until_condition (self, sample_rate):
+ """Starts TRex and sample server until condition is satisfied."""
+ print termstyle.green("*** Starting TRex run until condition is satisfied scenario ***")
+
+ def condition (result_obj):
+ return result_obj.get_current_tx_rate()['m_tx_pps'] > 200000
+
+ if not sample_rate: # use default sample rate if not passed
+ sample_rate = 5
+ try:
+ sample_rate = int(sample_rate)
+ ret = self.trex.start_trex(**self.run_params)
+ ret_val = self.trex.sample_until_condition(condition, sample_rate)
+ print ret_val
+ print termstyle.green("*** End of TRex run ***")
+ except ValueError as inst:
+ print termstyle.magenta("Provided illegal sample rate value. Please try again.\n[", inst,"]")
+ except TRexException as inst:
+ print termstyle.red(inst)
+
+ def do_start_and_return (self, line):
+ """Start TRex run and once in 'Running' mode, return to cmd prompt"""
+ print termstyle.green("*** Starting TRex run, wait until in 'Running' state ***")
+ try:
+ ret = self.trex.start_trex(**self.run_params)
+ print termstyle.green("*** End of scenario (TRex is probably still running!) ***")
+ except TRexException as inst:
+ print termstyle.red(inst)
+
+ def do_poll_once (self, line):
+ """Performs a single poll of TRex current data dump (if TRex is running) and prompts and short version of latest result_obj"""
+ print termstyle.green("*** Trying TRex single poll ***")
+ try:
+ last_res = dict()
+ if self.trex.is_running(dump_out = last_res):
+ obj = self.trex.get_result_obj()
+ print obj
+ else:
+ print termstyle.magenta("TRex isn't currently running.")
+ print termstyle.green("*** End of scenario (TRex is posssibly still running!) ***")
+ except TRexException as inst:
+ print termstyle.red(inst)
+
+
+ def do_stop_trex (self, line):
+ """Try to stop TRex run (if TRex is currently running)"""
+ print termstyle.green("*** Starting TRex termination ***")
+ try:
+ ret = self.trex.stop_trex()
+ print termstyle.green("*** End of scenario (TRex is not running now) ***")
+ except TRexException as inst:
+ print termstyle.red(inst)
+
+ def do_kill_indiscriminately (self, line):
+ """Force killing of running TRex process (if exists) on the server."""
+ print termstyle.green("*** Starting TRex termination ***")
+ ret = self.trex.force_kill()
+ if ret:
+ print termstyle.green("*** End of scenario (TRex is not running now) ***")
+ elif ret is None:
+ print termstyle.magenta("*** End of scenario (TRex termination aborted) ***")
+ else:
+ print termstyle.red("*** End of scenario (TRex termination failed) ***")
+
+ def do_exit(self, arg):
+ """Quits the application"""
+ print termstyle.cyan('Bye Bye!')
+ return True
+
+
+if __name__ == "__main__":
+ parser = ArgumentParser(description = termstyle.cyan('Run TRex client API demos and scenarios.'),
+ usage = """client_interactive_example [options]""" )
+
+ parser.add_argument('-v', '--version', action='version', version='%(prog)s 1.0 \t (C) Cisco Systems Inc.\n')
+
+ parser.add_argument("-t", "--trex-host", required = True, dest="trex_host",
+ action="store", help="Specify the hostname or ip to connect with TRex server.",
+ metavar="HOST" )
+ parser.add_argument("-p", "--trex-port", type=int, default = 8090, metavar="PORT", dest="trex_port",
+ help="Select port on which the TRex server listens. Default port is 8090.", action="store")
+ parser.add_argument("-m", "--maxhist", type=int, default = 100, metavar="SIZE", dest="hist_size",
+ help="Specify maximum history size saved at client side. Default size is 100.", action="store")
+ parser.add_argument("--verbose", dest="verbose",
+ action="store_true", help="Switch ON verbose option at TRex client. Default is: OFF.",
+ default = False )
+ args = parser.parse_args()
+
+ try:
+ InteractiveTRexClient(args.trex_host, args.hist_size, args.trex_port, args.verbose).cmdloop()
+
+ except KeyboardInterrupt:
+ print termstyle.cyan('Bye Bye!')
+ exit(-1)
+ except socket.error, e:
+ if e.errno == errno.ECONNREFUSED:
+ raise socket.error(errno.ECONNREFUSED, "Connection from TRex server was terminated. Please make sure the server is up.")
+
+
+
diff --git a/scripts/automation/trex_control_plane/examples/client_tcl_example.tcl b/scripts/automation/trex_control_plane/examples/client_tcl_example.tcl
new file mode 100755
index 00000000..3467c898
--- /dev/null
+++ b/scripts/automation/trex_control_plane/examples/client_tcl_example.tcl
@@ -0,0 +1,28 @@
+#!/bin/sh
+#\
+exec /usr/bin/expect "$0" "$@"
+#
+
+# Sourcing the tcl trex client api
+source trex_tcl_client.tcl
+
+#Initializing trex server attributes
+set check [TRexTclClient::create_host "localhost"]
+
+#Formulate the command options as a dict
+set trex_cmds [dict create c 2 m 10 l 1000 ]
+
+#call the start_trex rpc function by feeding the appropriate arguments
+set status [::TRexTclClient::start_trex "cap2/dns.yaml" 40 $trex_cmds]
+puts "Status : $status"
+
+#get the result json
+set result [::TRexTclClient::get_result_obj]
+puts "$result"
+
+#stop the trex server
+set ret_value [ ::TRexTclClient::stop_trex $status]
+puts "Stop value : $ret_value"
+puts "\n \n"
+
+
diff --git a/scripts/automation/trex_control_plane/examples/interactive_stateless.py b/scripts/automation/trex_control_plane/examples/interactive_stateless.py
new file mode 100644
index 00000000..f6ada17d
--- /dev/null
+++ b/scripts/automation/trex_control_plane/examples/interactive_stateless.py
@@ -0,0 +1,128 @@
+#!/router/bin/python
+
+import trex_root_path
+from client.trex_stateless_client import *
+from common.trex_exceptions import *
+import cmd
+from termstyle import termstyle
+# import termstyle
+import os
+from argparse import ArgumentParser
+import socket
+import errno
+import ast
+import json
+
+
+class InteractiveStatelessTRex(cmd.Cmd):
+
+ intro = termstyle.green("\nInteractive shell to play with Cisco's TRex stateless API.\
+ \nType help to view available pre-defined scenarios\n(c) All rights reserved.\n")
+ prompt = '> '
+
+ def __init__(self, trex_host, trex_port, virtual, verbose):
+ cmd.Cmd.__init__(self)
+
+ self.verbose = verbose
+ self.virtual = virtual
+ self.trex = STLClient(trex_host, trex_port, self.virtual)
+ self.DEFAULT_RUN_PARAMS = dict(m=1.5,
+ nc=True,
+ p=True,
+ d=100,
+ f='avl/sfr_delay_10_1g.yaml',
+ l=1000)
+ self.run_params = dict(self.DEFAULT_RUN_PARAMS)
+
+ def do_transmit(self, line):
+ """Transmits a request over using a given link to server.\
+ \nuse: transmit [method_name] [method_params]"""
+ if line == "":
+ print "\nUsage: [method name] [param dict as string]\n"
+ print "Example: rpc test_add {'x': 12, 'y': 17}\n"
+ return
+
+ args = line.split(' ', 1) # args will have max length of 2
+ method_name = args[0]
+ params = None
+ bad_parse = False
+
+ try:
+ params = ast.literal_eval(args[1])
+ if not isinstance(params, dict):
+ bad_parse = True
+ except ValueError as e1:
+ bad_parse = True
+ except SyntaxError as e2:
+ bad_parse = True
+
+ if bad_parse:
+ print "\nValue should be a valid dict: '{0}'".format(args[1])
+ print "\nUsage: [method name] [param dict as string]\n"
+ print "Example: rpc test_add {'x': 12, 'y': 17}\n"
+ return
+
+ response = self.trex.transmit(method_name, params)
+ if not self.virtual:
+ # expect response
+ rc, msg = response
+ if rc:
+ print "\nServer Response:\n\n" + json.dumps(msg) + "\n"
+ else:
+ print "\n*** " + msg + "\n"
+
+
+
+
+
+ def do_push_files(self, filepaths):
+ """Pushes a custom file to be stored locally on TRex server.\
+ \nPush multiple files by specifying their path separated by ' ' (space)."""
+ try:
+ filepaths = filepaths.split(' ')
+ print termstyle.green("*** Starting pushing files ({trex_files}) to TRex. ***".format(
+ trex_files=', '.join(filepaths))
+ )
+ ret_val = self.trex.push_files(filepaths)
+ if ret_val:
+ print termstyle.green("*** End of TRex push_files method (success) ***")
+ else:
+ print termstyle.magenta("*** End of TRex push_files method (failed) ***")
+
+ except IOError as inst:
+ print termstyle.magenta(inst)
+
+if __name__ == "__main__":
+ parser = ArgumentParser(description=termstyle.cyan('Run TRex client stateless API demos and scenarios.'),
+ usage="client_interactive_example [options]")
+
+ parser.add_argument('-v', '--version', action='version', version='%(prog)s 1.0 \t (C) Cisco Systems Inc.\n')
+
+ parser.add_argument("-t", "--trex-host", required = True, dest="trex_host",
+ action="store", help="Specify the hostname or ip to connect with TRex server.",
+ metavar="HOST" )
+ parser.add_argument("-p", "--trex-port", type=int, default = 5050, metavar="PORT", dest="trex_port",
+ help="Select port on which the TRex server listens. Default port is 5050.", action="store")
+ # parser.add_argument("-m", "--maxhist", type=int, default = 100, metavar="SIZE", dest="hist_size",
+ # help="Specify maximum history size saved at client side. Default size is 100.", action="store")
+ parser.add_argument("--virtual", dest="virtual",
+ action="store_true",
+ help="Switch ON virtual option at TRex client. Default is: OFF.",
+ default=False)
+ parser.add_argument("--verbose", dest="verbose",
+ action="store_true",
+ help="Switch ON verbose option at TRex client. Default is: OFF.",
+ default=False)
+ args = parser.parse_args()
+
+ try:
+ InteractiveStatelessTRex(**vars(args)).cmdloop()
+
+ except KeyboardInterrupt:
+ print termstyle.cyan('Bye Bye!')
+ exit(-1)
+ except socket.error, e:
+ if e.errno == errno.ECONNREFUSED:
+ raise socket.error(errno.ECONNREFUSED,
+ "Connection from TRex server was terminated. \
+ Please make sure the server is up.")
diff --git a/scripts/automation/trex_control_plane/examples/pkt_generation_for_trex.py b/scripts/automation/trex_control_plane/examples/pkt_generation_for_trex.py
new file mode 100755
index 00000000..acaa95d3
--- /dev/null
+++ b/scripts/automation/trex_control_plane/examples/pkt_generation_for_trex.py
@@ -0,0 +1,105 @@
+#!/router/bin/python
+
+######################################################################################
+### ###
+### TRex end-to-end demo script, written by TRex dev-team ###
+### THIS SCRIPT ASSUMES PyYaml and Scapy INSTALLED ON PYTHON'S RUNNING MACHINE ###
+### (for any question please contact trex-dev team @ trex-dev@cisco.com) ###
+### ###
+######################################################################################
+
+
+import logging
+import time
+import trex_root_path
+from client.trex_client import *
+from client_utils.general_utils import *
+from client_utils.trex_yaml_gen import *
+from pprint import pprint
+from argparse import ArgumentParser
+
+# import scapy package
+logging.getLogger("scapy.runtime").setLevel(logging.ERROR) # supress scapy import warnings from being displayed
+from scapy.all import *
+
+
+def generate_dns_packets (src_ip, dst_ip):
+ dns_rqst = Ether(src='00:15:17:a7:75:a3', dst='e0:5f:b9:69:e9:22')/IP(src=src_ip,dst=dst_ip,version=4L)/UDP(dport=53, sport=1030)/DNS(rd=1,qd=DNSQR(qname="www.cisco.com"))
+ dns_resp = Ether(src='e0:5f:b9:69:e9:22', dst='00:15:17:a7:75:a3')/IP(src=dst_ip,dst=src_ip,version=4L)/UDP(dport=1030, sport=53)/DNS(aa=1L, qr=1L, an=DNSRR(rclass=1, rrname='www.cisco.com.', rdata='100.100.100.100', type=1), ad=0L, qdcount=1, ns=None, tc=0L, rd=0L, ar=None, opcode=0L, ra=1L, cd=0L, z=0L, rcode=0L, qd=DNSQR(qclass=1, qtype=1, qname='www.cisco.com.'))
+ return [dns_rqst, dns_resp]
+
+def pkts_to_pcap (pcap_filename, packets):
+ wrpcap(pcap_filename, packets)
+
+
+def main (args):
+ # instantiate TRex client
+ trex = CTRexClient('trex-dan', verbose = args.verbose)
+
+ if args.steps:
+ print "\nNext step: .pcap generation."
+ raw_input("Press Enter to continue...")
+ # generate TRex traffic.
+ pkts = generate_dns_packets('21.0.0.2', '22.0.0.12') # In this case - DNS traffic (request-response)
+ print "\ngenerated traffic:"
+ print "=================="
+ map(lambda x: pprint(x.summary()) , pkts)
+ pkts_to_pcap("dns_traffic.pcap", pkts) # Export the generated to a .pcap file
+
+ if args.steps:
+ print "\nNext step: .yaml generation."
+ raw_input("Press Enter to continue...")
+ # Generate .yaml file that uses the generated .pcap file
+ trex_files_path = trex.get_trex_files_path() # fetch the path in which packets are saved on TRex server
+ yaml_obj = CTRexYaml(trex_files_path) # instantiate CTRexYaml obj
+
+ # set .yaml file parameters according to need and use
+ ret_idx = yaml_obj.add_pcap_file("dns_traffic.pcap")
+ yaml_obj.set_cap_info_param('cps', 1.1, ret_idx)
+
+ # export yaml_ob to .yaml file
+ yaml_file_path = trex_files_path + 'dns_traffic.yaml'
+ yaml_obj.to_yaml('dns_traffic.yaml')
+ print "\ngenerated .yaml file:"
+ print "===================="
+ yaml_obj.dump()
+
+ if args.steps:
+ print "\nNext step: run TRex with provided files."
+ raw_input("Press Enter to continue...")
+ # push all relevant files to server
+ trex.push_files( yaml_obj.get_file_list() )
+
+ print "\nStarting TRex..."
+ trex.start_trex(c = 2,
+ m = 1.5,
+ nc = True,
+ p = True,
+ d = 30,
+ f = yaml_file_path, # <-- we use out generated .yaml file here
+ l = 1000)
+
+ if args.verbose:
+ print "TRex state changed to 'Running'."
+ print "Sampling TRex in 0.2 samples/sec (single sample every 5 secs)"
+
+ last_res = dict()
+ while trex.is_running(dump_out = last_res):
+ print "CURRENT RESULT OBJECT:"
+ obj = trex.get_result_obj()
+ print obj
+ time.sleep(5)
+
+
+if __name__ == "__main__":
+ parser = ArgumentParser(description = 'Run TRex client API end-to-end example.',
+ usage = """pkt_generation_for_trex [options]""" )
+
+ parser.add_argument("-s", "--step-by-step", dest="steps",
+ action="store_false", help="Switch OFF step-by-step script overview. Default is: ON.",
+ default = True )
+ parser.add_argument("--verbose", dest="verbose",
+ action="store_true", help="Switch ON verbose option at TRex client. Default is: OFF.",
+ default = False )
+ args = parser.parse_args()
+ main(args) \ No newline at end of file
diff --git a/scripts/automation/trex_control_plane/examples/stateless_example.py b/scripts/automation/trex_control_plane/examples/stateless_example.py
new file mode 100755
index 00000000..bb0fe983
--- /dev/null
+++ b/scripts/automation/trex_control_plane/examples/stateless_example.py
@@ -0,0 +1,30 @@
+#!/router/bin/python
+
+import trex_root_path
+from client.trex_hltapi import CTRexHltApi
+
+if __name__ == "__main__":
+ port_list = [1,2]
+ try:
+ hlt_client = CTRexHltApi()
+ con = hlt_client.connect("localhost", port_list, "danklei", break_locks=True, reset=True)#, port=6666)
+ print con
+
+ res = hlt_client.traffic_config("create", 1)#, ip_src_addr="2000.2.2")
+ print res
+ res = hlt_client.traffic_config("create", 2)#, ip_src_addr="2000.2.2")
+ print res
+
+ res = hlt_client.traffic_control("run", [1, 2])#, ip_src_addr="2000.2.2")
+ print res
+
+ res = hlt_client.traffic_control("stop", [1, 2])#, ip_src_addr="2000.2.2")
+ print res
+
+
+
+ except Exception as e:
+ raise
+ finally:
+ res = hlt_client.cleanup_session(port_list)
+ print res \ No newline at end of file
diff --git a/scripts/automation/trex_control_plane/examples/trex_root_path.py b/scripts/automation/trex_control_plane/examples/trex_root_path.py
new file mode 100755
index 00000000..3aefd1d2
--- /dev/null
+++ b/scripts/automation/trex_control_plane/examples/trex_root_path.py
@@ -0,0 +1,15 @@
+#!/router/bin/python
+
+import os
+import sys
+
+def add_root_to_path ():
+ """adds trex_control_plane root dir to script path, up to `depth` parent dirs"""
+ root_dirname = 'trex_control_plane'
+ file_path = os.path.dirname(os.path.realpath(__file__))
+
+ components = file_path.split(os.sep)
+ sys.path.append( str.join(os.sep, components[:components.index(root_dirname)+1]) )
+ return
+
+add_root_to_path()
diff --git a/scripts/automation/trex_control_plane/examples/trex_tcl_client.tcl b/scripts/automation/trex_control_plane/examples/trex_tcl_client.tcl
new file mode 100755
index 00000000..f700a5d2
--- /dev/null
+++ b/scripts/automation/trex_control_plane/examples/trex_tcl_client.tcl
@@ -0,0 +1,228 @@
+#!/bin/sh
+#\
+exec /usr/bin/expect "$0" "$@"
+#
+
+package require JSONRPC
+package require json
+
+
+
+################################################################
+# Author: Vamsi Kalapala
+# Version: 0.1
+# Description: This Trex Tcl Client will help in conecting to the trex_host
+# by sending JSOMN RPC calls to the python trex server.
+#
+#
+#
+################################################################
+
+namespace eval TRexTclClient {
+ variable version 0.1
+ ################################################################
+ # trex_host should be either the IPV4 address or
+ # dns name accompanied by http://
+ # Accepted trex_host names:
+ # http://172.18.73.122,
+ # http://up-trex-1-10g
+ # http://up-trex-1-10g.cisco.com,
+ ################################################################
+
+ variable TRex_Status [dict create 1 "Idle" 2 "Starting" 3 "Running"]
+
+ variable trex_host "localhost"
+ variable server_port "8090"
+ variable trex_zmq_port "4500"
+
+ variable current_seq_num 0
+
+ variable trex_debug 0
+
+}
+
+
+proc TRexTclClient::create_host {trex_host {server_port 8090} {trex_zmq_port 4500} {trex_debug 0}} {
+ puts "\n"
+
+ if (![string match "http://*" $trex_host]) {
+ append temp_host "http://" $trex_host
+ set trex_host $temp_host
+ #puts $trex_host
+ #puts stderr "The trex_host should contain http:// in it"
+ #exit 1
+ }
+
+ set ::TRexTclClient::trex_host $trex_host
+ if ($TRexTclClient::trex_debug) {
+ puts "The server port is : $server_port"
+ }
+ set TRexTclClient::server_port $server_port
+ set TRexTclClient::trex_zmq_port $trex_zmq_port
+ set TRexTclClient::trex_debug $trex_debug
+
+ set trex_host_url [TRexTclClient::appendHostName]
+ puts "Host attributes have been initialized."
+ puts "Establishing connection to server at $trex_host_url ..."
+ puts "\n"
+ return 1
+}
+
+proc TRexTclClient::appendHostName {} {
+ ################################################################
+ # Please check for the need of more args here.
+ #
+ # ** Do sanity checks **
+ ################################################################
+ if {[string is integer $TRexTclClient::server_port] && $TRexTclClient::server_port < 65535 } {
+ return [append newHost $TRexTclClient::trex_host ":" $TRexTclClient::server_port]
+ } else {
+ puts stderr "\[ERROR\]: Invalid server port. Valid server port range is 0 - 65535 !"
+ puts "\nExiting client initialization ... \n"
+ exit 1
+ }
+}
+
+# ** Chnage the args list to keyed lists or dictionaries**
+proc TRexTclClient::start_trex {f d trex_cmd_options {user "root"} {block_to_success True} {timeout 40}} {
+
+ ################################################################
+ # This proc sends out the RPC call to start trex.
+ # 'f' should string
+ # 'd' should be integer and greater than 30
+ # 'trex_cmd_options' SHOULD be a dict
+ #
+ ################################################################
+
+ set trex_host_url [TRexTclClient::appendHostName]
+ if {$d<30} {
+ puts stderr "\[ERROR\]: The test duration should be at least 30 secs."
+ puts "\nExiting start_trex process ... \n"
+ exit 1
+ }
+
+ if ($TRexTclClient::trex_debug) {
+ puts "\[start_trex :: before call\] : The arguements are: "
+ puts "URL: $trex_host_url"
+ puts "f: $f"
+ puts "d: $d"
+ puts "trex_cmd_options: $trex_cmd_options"
+ puts "user: $user"
+ puts "block_to_success: $block_to_success"
+ puts "timeout: $timeout\n"
+ }
+
+ JSONRPC::create start_trex -proxy $trex_host_url -params {"trex_cmd_options" "object" "user" "string" "block_to_success" "string" "timeout" "int"}
+ puts "Connecting to Trex host at $trex_host_url ....."
+
+ dict append trex_cmd_options d $d
+ dict append trex_cmd_options f $f
+ if ($TRexTclClient::trex_debug) {
+ puts "\[start_trex :: before call\] : trex_cmd_options: $trex_cmd_options \n"
+ }
+ set ret_value [ start_trex $trex_cmd_options $user $block_to_success $timeout]
+
+ if ($TRexTclClient::trex_debug) {
+ puts "\[start_trex :: after call\] : The returned result: $ret_value"
+ }
+
+ if ($ret_value!=0) {
+ puts "Connection successful! \n"
+ puts "TRex started running successfully! \n"
+ puts "Trex Run Sequence number: $ret_value"
+ set TRexTclClient::current_seq_num $ret_value
+ return $ret_value
+ }
+
+ puts "\n \n"
+ return 0
+}
+
+proc TRexTclClient::stop_trex {seq} {
+ set trex_host_url [TRexTclClient::appendHostName]
+ JSONRPC::create stop_trex -proxy $trex_host_url -params {seq int}
+ set ret_value [ stop_trex $TRexTclClient::current_seq_num]
+ if ($ret_value) {
+ puts "TRex Run successfully stopped!"
+ } else {
+ puts "Unable to stop the server. Either provided sequence number is incorrect or you dont have sufficient permissions!"
+ }
+ puts "\n"
+ return $ret_value
+}
+
+proc TRexTclClient::get_trex_files_path {} {
+
+ set trex_host_url [TRexTclClient::appendHostName]
+ JSONRPC::create get_files_path -proxy $trex_host_url
+ set ret_value [get_files_path]
+ puts "The Trex file path is $ret_value"
+ puts "\n"
+ return $ret_value
+}
+
+proc TRexTclClient::get_running_status {} {
+
+ set trex_host_url [TRexTclClient::appendHostName]
+ JSONRPC::create get_running_status -proxy $trex_host_url
+ set ret_value [get_running_status]
+ if ($TRexTclClient::trex_debug) {
+ puts "\[get_running_status :: after call\] : The result is: $ret_value"
+ }
+ set current_status [dict get $TRexTclClient::TRex_Status [dict get $ret_value "state" ]]
+ set current_status_decr [dict get $ret_value "verbose"]
+ puts "Current TRex Status: $current_status"
+ puts "TRex Status Verbose: $current_status_decr"
+ puts "\n"
+ return $ret_value
+}
+
+proc TRexTclClient::get_result_obj { {copy_obj True} } {
+
+ set trex_host_url [TRexTclClient::appendHostName]
+ JSONRPC::create get_running_info -proxy $trex_host_url
+
+ set result_json_obj [get_running_info ]
+ set result_dict_obj [json::json2dict $result_json_obj]
+ if ($TRexTclClient::trex_debug) {
+ puts "\[get_result_obj :: after call\] : The result json is: "
+ puts "################################################################"
+ puts "$result_json_obj"
+ puts "################################################################"
+ puts "\[get_result_obj :: after call\] : The result dict is: "
+ puts "################################################################"
+ puts "$result_dict_obj"
+ puts "################################################################"
+ }
+ puts "\n"
+ return $result_dict_obj
+}
+
+proc TRexTclClient::is_reserved {} {
+ set trex_host_url [TRexTclClient::appendHostName]
+ JSONRPC::create is_reserved -proxy $trex_host_url
+ puts "\n"
+ set ret_value [is_reserved]
+ if ($TRexTclClient::trex_debug) {
+ puts "\[is_reserved :: after call\] : The result json is: $ret_value"
+ }
+ return $ret_value
+}
+
+proc TRexTclClient::get_trex_version {} {
+ set trex_host_url [TRexTclClient::appendHostName]
+ JSONRPC::create get_trex_version -proxy $trex_host_url
+ set ret_value [get_trex_version]
+ puts "\n"
+ return $ret_value
+}
+
+
+
+
+
+################################################################
+#
+#
+#
+################################################################
diff --git a/scripts/automation/trex_control_plane/examples/zmq_server_client.py b/scripts/automation/trex_control_plane/examples/zmq_server_client.py
new file mode 100755
index 00000000..15f37f1a
--- /dev/null
+++ b/scripts/automation/trex_control_plane/examples/zmq_server_client.py
@@ -0,0 +1,45 @@
+import sys
+import os
+python2_zmq_path = os.path.abspath(os.path.join(os.pardir,os.pardir,os.pardir,
+ 'external_libs','pyzmq-14.5.0','python2','fedora18','64bit'))
+sys.path.append(python2_zmq_path)
+import zmq
+import json
+from argparse import *
+
+parser = ArgumentParser(description=' Runs a Scapy Server Client example ')
+parser.add_argument('-p','--dest-scapy-port',type=int, default = 4507, dest='dest_scapy_port',
+ help='Select port to which this Scapy Server client will send to.\n default is 4507\n',action='store')
+parser.add_argument('-s','--server',type=str, default = 'localhost', dest='dest_scapy_ip',
+ help='Remote server IP address .\n default is localhost\n',action='store')
+
+args = parser.parse_args()
+
+dest_scapy_port = args.dest_scapy_port
+dest_scapy_ip = args.dest_scapy_ip
+
+context = zmq.Context()
+
+# Socket to talk to server
+print 'Connecting:'
+socket = context.socket(zmq.REQ)
+socket.connect("tcp://"+str(dest_scapy_ip)+":"+str(dest_scapy_port))
+try:
+ while True:
+ command = raw_input("enter RPC command [enter quit to exit]:\n")
+ if (command == 'quit'):
+ break
+ user_parameter = raw_input("input for command [should be left blank if not needed]:\n")
+ json_rpc_req = { "jsonrpc":"2.0","method": command ,"params":[user_parameter], "id":"1"}
+ request = json.dumps(json_rpc_req)
+ print("Sending request in json format %s" % request)
+ socket.send(request)
+
+ # Get the reply.
+ message = socket.recv()
+ print("Received reply %s [ %s ]" % (request, message))
+except KeyboardInterrupt:
+ print('Terminated By Ctrl+C')
+ socket.close()
+ context.destroy()
+
diff --git a/scripts/automation/trex_control_plane/server/CCustomLogger.py b/scripts/automation/trex_control_plane/server/CCustomLogger.py
new file mode 100755
index 00000000..551b0239
--- /dev/null
+++ b/scripts/automation/trex_control_plane/server/CCustomLogger.py
@@ -0,0 +1,106 @@
+
+import sys
+import os
+import logging
+
+def prepare_dir(log_path):
+ log_dir = os.path.dirname(log_path)
+ if not os.path.exists(log_dir):
+ os.makedirs(log_dir)
+
+def setup_custom_logger(name, log_path = None):
+ # first make sure path availabe
+ if log_path:
+ prepare_dir(log_path)
+ logging.basicConfig(level = logging.INFO,
+ format = '%(asctime)s %(name)-10s %(module)-20s %(levelname)-8s %(message)s',
+ datefmt = '%m-%d %H:%M')
+# filename= log_path,
+# filemode= 'w')
+#
+# # define a Handler which writes INFO messages or higher to the sys.stderr
+# consoleLogger = logging.StreamHandler()
+# consoleLogger.setLevel(logging.ERROR)
+# # set a format which is simpler for console use
+# formatter = logging.Formatter('%(name)-12s: %(levelname)-8s %(message)s')
+# # tell the handler to use this format
+# consoleLogger.setFormatter(formatter)
+#
+# # add the handler to the logger
+# logging.getLogger(name).addHandler(consoleLogger)
+
+def setup_daemon_logger (name, log_path = None):
+ # first make sure path availabe
+ if log_path:
+ prepare_dir(log_path)
+ try:
+ os.unlink(log_path)
+ except:
+ pass
+ logging.basicConfig(level = logging.INFO,
+ format = '%(asctime)s %(name)-10s %(module)-20s %(levelname)-8s %(message)s',
+ datefmt = '%m-%d %H:%M',
+ filename= log_path,
+ filemode= 'w')
+
+class CustomLogger(object):
+
+ def __init__(self, log_filename):
+ # Store the original stdout and stderr
+ sys.stdout.flush()
+ sys.stderr.flush()
+
+ self.stdout_fd = os.dup(sys.stdout.fileno())
+ self.devnull = os.open('/dev/null', os.O_WRONLY)
+ self.log_file = open(log_filename, 'w')
+ self.silenced = False
+ self.pending_log_file_prints = 0
+
+ # silence all prints from stdout
+ def silence(self):
+ os.dup2(self.devnull, sys.stdout.fileno())
+ self.silenced = True
+
+ # restore stdout status
+ def restore(self):
+ sys.stdout.flush()
+ sys.stderr.flush()
+ # Restore normal stdout
+ os.dup2(self.stdout_fd, sys.stdout.fileno())
+ self.silenced = False
+
+ #print a message to the log (both stdout / log file)
+ def log(self, text, force = False, newline = True):
+ self.log_file.write((text + "\n") if newline else text)
+ self.pending_log_file_prints += 1
+
+ if (self.pending_log_file_prints >= 10):
+ self.log_file.flush()
+ self.pending_log_file_prints = 0
+
+ self.console(text, force, newline)
+
+ # print a message to the console alone
+ def console(self, text, force = False, newline = True):
+ _text = (text + "\n") if newline else text
+ # if we are silenced and not forced - go home
+ if self.silenced and not force:
+ return
+
+ if self.silenced:
+ os.write(self.stdout_fd, _text)
+ else:
+ sys.stdout.write(_text)
+
+ sys.stdout.flush()
+
+ # flush
+ def flush(self):
+ sys.stdout.flush()
+ self.log_file.flush()
+
+ def __exit__(self, type, value, traceback):
+ sys.stdout.flush()
+ self.log_file.flush()
+ os.close(self.devnull)
+ os.close(self.log_file)
diff --git a/scripts/automation/trex_control_plane/server/outer_packages.py b/scripts/automation/trex_control_plane/server/outer_packages.py
new file mode 100755
index 00000000..f49a9925
--- /dev/null
+++ b/scripts/automation/trex_control_plane/server/outer_packages.py
@@ -0,0 +1,38 @@
+#!/router/bin/python
+
+import sys
+import os
+python_ver = 'python%s' % sys.version_info.major
+
+CURRENT_PATH = os.path.dirname(os.path.realpath(__file__))
+ROOT_PATH = os.path.abspath(os.path.join(CURRENT_PATH, os.pardir)) # path to trex_control_plane directory
+PATH_TO_PYTHON_LIB = os.path.abspath(os.path.join(ROOT_PATH, os.pardir, os.pardir, 'external_libs'))
+PATH_TO_PLATFORM_LIB = os.path.abspath(os.path.join(PATH_TO_PYTHON_LIB, 'pyzmq-14.5.0', python_ver , 'fedora18', '64bit'))
+
+SERVER_MODULES = ['enum34-1.0.4',
+ 'zmq',
+ 'jsonrpclib-pelix-0.2.5',
+ 'python-daemon-2.0.5',
+ 'lockfile-0.10.2',
+ 'termstyle'
+ ]
+
+
+def import_server_modules():
+ # must be in a higher priority
+ sys.path.insert(0, PATH_TO_PYTHON_LIB)
+ sys.path.insert(0, PATH_TO_PLATFORM_LIB)
+ sys.path.append(ROOT_PATH)
+ import_module_list(SERVER_MODULES)
+
+
+def import_module_list(modules_list):
+ assert(isinstance(modules_list, list))
+ for p in modules_list:
+ full_path = os.path.join(PATH_TO_PYTHON_LIB, p)
+ fix_path = os.path.normcase(full_path)
+ sys.path.insert(1, full_path)
+
+
+import_server_modules()
+
diff --git a/scripts/automation/trex_control_plane/server/singleton_daemon.py b/scripts/automation/trex_control_plane/server/singleton_daemon.py
new file mode 100755
index 00000000..507967aa
--- /dev/null
+++ b/scripts/automation/trex_control_plane/server/singleton_daemon.py
@@ -0,0 +1,176 @@
+import errno
+import os
+import shlex
+import socket
+import signal
+import tempfile
+import types
+from subprocess import Popen
+from time import sleep
+import outer_packages
+import jsonrpclib
+
+# uses Unix sockets for determine running process.
+# (assumes used daemons will register proper socket)
+# all daemons should use -p argument as listening tcp port and check_connectivity RPC method
+class SingletonDaemon(object):
+
+ # run_cmd can be function of how to run daemon or a str to run at subprocess
+ def __init__(self, name, tag, port, run_cmd, dir = None):
+ self.name = name
+ self.tag = tag
+ self.port = port
+ self.run_cmd = run_cmd
+ self.dir = dir
+ self.stop = self.kill # alias
+ if ' ' in tag:
+ raise Exception('Error: tag should not include spaces')
+ if dir and not os.path.exists(dir):
+ print('Warning: path given for %s: %s, does not exist' % (name, dir))
+
+
+ # returns True if daemon is running
+ def is_running(self):
+ try:
+ lock_socket = register_socket(self.tag) # the check is ~200000 faster and more reliable than checking via 'netstat' or 'ps' etc.
+ lock_socket.shutdown(socket.SHUT_RDWR)
+ lock_socket.close()
+ except socket.error: # Unix socket in use
+ return True
+ sleep(0.5)
+ # Unix socket is not used, but maybe it's old version of daemon not using socket
+ return bool(self.get_pid_by_listening_port())
+
+
+ # get pid of running daemon by registered Unix socket (most robust way)
+ def get_pid_by_unix_socket(self):
+ ret_code, stdout, stderr = run_command('netstat -px')
+ if ret_code:
+ raise Exception('Error running netstat: %s' % [ret_code, stdout, stderr])
+ for line in stdout.splitlines():
+ line_arr = line.strip().split()
+ if len(line_arr) == 8 and line_arr[0] == 'unix' and line_arr[4] == 'DGRAM' and line_arr[7] == '@%s' % self.tag:
+ return int(line_arr[6].split('/', 1)[0])
+
+
+ # get pid of running daemon by listening tcp port (for backward compatibility)
+ def get_pid_by_listening_port(self):
+ ret_code, stdout, stderr = run_command('netstat -tlnp')
+ if ret_code:
+ raise Exception('Error running netstat: %s' % [ret_code, stdout, stderr])
+ for line in stdout.splitlines():
+ line_arr = line.strip().split()
+ if len(line_arr) == 7 and line_arr[3] == '0.0.0.0:%s' % self.port:
+ if '/' not in line_arr[6]:
+ raise Exception('Expecting pid/program name in netstat line of using port %s, got: %s' % (self.port, line))
+ return int(line_arr[6].split('/')[0])
+
+
+ # get PID of running process, None if not found
+ def get_pid(self):
+ pid = self.get_pid_by_unix_socket()
+ if pid:
+ return pid
+ pid = self.get_pid_by_listening_port()
+ if pid:
+ return pid
+
+ def kill_by_signal(self, pid, signal_name, timeout):
+ os.kill(pid, signal_name)
+ poll_rate = 0.1
+ for i in range(int(timeout / poll_rate)):
+ if not self.is_running():
+ return True
+ sleep(poll_rate)
+
+ # kill daemon, with verification
+ def kill(self, timeout = 15):
+ pid = self.get_pid()
+ if not pid:
+ raise Exception('%s is not running' % self.name)
+ # try Ctrl+C, usual kill, kill -9
+ for signal_name in [signal.SIGINT, signal.SIGTERM, signal.SIGKILL]:
+ if self.kill_by_signal(pid, signal_name, timeout):
+ return True
+ raise Exception('Could not kill %s, even with -9' % self.name)
+
+ # try connection as RPC client, return True upon success, False if fail
+ def check_connectivity(self, timeout = 15):
+ daemon = jsonrpclib.Server('http://127.0.0.1:%s/' % self.port)
+ poll_rate = 0.1
+ for i in range(int(timeout/poll_rate)):
+ try:
+ daemon.check_connectivity()
+ return True
+ except socket.error: # daemon is not up yet
+ sleep(poll_rate)
+ return False
+
+ # start daemon
+ # returns True if success, False if already running
+ def start(self, timeout = 20):
+ if self.is_running():
+ raise Exception('%s is already running' % self.name)
+ if not self.run_cmd:
+ raise Exception('No starting command registered for %s' % self.name)
+ if type(self.run_cmd) is types.FunctionType:
+ self.run_cmd()
+ return
+ with tempfile.TemporaryFile() as stdout_file, tempfile.TemporaryFile() as stderr_file:
+ proc = Popen(shlex.split('%s -p %s' % (self.run_cmd, self.port)), cwd = self.dir, close_fds = True,
+ stdout = stdout_file, stderr = stderr_file)
+ if timeout > 0:
+ poll_rate = 0.1
+ for i in range(int(timeout/poll_rate)):
+ if self.is_running():
+ break
+ sleep(poll_rate)
+ if bool(proc.poll()): # process ended with error
+ stdout_file.seek(0)
+ stderr_file.seek(0)
+ raise Exception('Run of %s ended unexpectfully: %s' % (self.name, [proc.returncode, stdout_file.read().decode(errors = 'replace'), stderr_file.read().decode(errors = 'replace')]))
+ elif proc.poll() == 0: # process runs other process, and ended
+ break
+ if self.is_running():
+ if self.check_connectivity():
+ return True
+ raise Exception('Daemon process is running, but no connectivity')
+ raise Exception('%s failed to run.' % self.name)
+
+ # restart the daemon
+ def restart(self, timeout = 15):
+ if self.is_running():
+ self.kill(timeout)
+ sleep(0.5)
+ return self.start(timeout)
+
+
+# provides unique way to determine running process, should be used inside daemon
+def register_socket(tag):
+ global lock_socket # Without this our lock gets garbage collected
+ lock_socket = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM)
+ try:
+ lock_socket.bind('\0%s' % tag)
+ return lock_socket
+ except socket.error:
+ raise socket.error('Error: process with tag %s is already running.' % tag)
+
+# runs command
+def run_command(command, timeout = 15, cwd = None):
+ # pipes might stuck, even with timeout
+ with tempfile.TemporaryFile() as stdout_file, tempfile.TemporaryFile() as stderr_file:
+ proc = Popen(shlex.split(command), stdout = stdout_file, stderr = stderr_file, cwd = cwd, close_fds = True)
+ if timeout > 0:
+ poll_rate = 0.1
+ for i in range(int(timeout/poll_rate)):
+ sleep(poll_rate)
+ if proc.poll() is not None: # process stopped
+ break
+ if proc.poll() is None:
+ proc.kill() # timeout
+ return (errno.ETIME, '', 'Timeout on running: %s' % command)
+ else:
+ proc.wait()
+ stdout_file.seek(0)
+ stderr_file.seek(0)
+ return (proc.returncode, stdout_file.read().decode(errors = 'replace'), stderr_file.read().decode(errors = 'replace'))
diff --git a/scripts/automation/trex_control_plane/server/trex_launch_thread.py b/scripts/automation/trex_control_plane/server/trex_launch_thread.py
new file mode 100755
index 00000000..f4ee0d6b
--- /dev/null
+++ b/scripts/automation/trex_control_plane/server/trex_launch_thread.py
@@ -0,0 +1,96 @@
+#!/router/bin/python
+
+
+import os
+import signal
+import socket
+from common.trex_status_e import TRexStatus
+import subprocess
+import shlex
+import time
+import threading
+import logging
+import CCustomLogger
+
+# setup the logger
+CCustomLogger.setup_custom_logger('TRexServer')
+logger = logging.getLogger('TRexServer')
+
+
+class AsynchronousTRexSession(threading.Thread):
+ def __init__(self, trexObj , trex_launch_path, trex_cmd_data):
+ super(AsynchronousTRexSession, self).__init__()
+ self.stoprequest = threading.Event()
+ self.terminateFlag = False
+ self.launch_path = trex_launch_path
+ self.cmd, self.export_path, self.duration = trex_cmd_data
+ self.session = None
+ self.trexObj = trexObj
+ self.time_stamps = {'start' : None, 'run_time' : None}
+ self.trexObj.zmq_dump = {}
+
+ def run (self):
+ try:
+ with open(self.export_path, 'w') as output_file:
+ self.time_stamps['start'] = self.time_stamps['run_time'] = time.time()
+ self.session = subprocess.Popen(shlex.split(self.cmd), cwd = self.launch_path, stdout = output_file,
+ stderr = subprocess.STDOUT, preexec_fn=os.setsid, close_fds = True)
+ logger.info("TRex session initialized successfully, Parent process pid is {pid}.".format( pid = self.session.pid ))
+ while self.session.poll() is None: # subprocess is NOT finished
+ time.sleep(0.5)
+ if self.stoprequest.is_set():
+ logger.debug("Abort request received by handling thread. Terminating TRex session." )
+ os.killpg(self.session.pid, signal.SIGUSR1)
+ self.trexObj.set_status(TRexStatus.Idle)
+ self.trexObj.set_verbose_status("TRex is Idle")
+ break
+ except Exception as e:
+ logger.error(e)
+
+ self.time_stamps['run_time'] = time.time() - self.time_stamps['start']
+
+ try:
+ if self.time_stamps['run_time'] < 5:
+ logger.error("TRex run failed due to wrong input parameters, or due to readability issues.")
+ self.trexObj.set_verbose_status("TRex run failed due to wrong input parameters, or due to readability issues.\n\nTRex command: {cmd}\n\nRun output:\n{output}".format(
+ cmd = self.cmd, output = self.load_trex_output(self.export_path)))
+ self.trexObj.errcode = -11
+ elif (self.session.returncode is not None and self.session.returncode != 0) or ( (self.time_stamps['run_time'] < self.duration) and (not self.stoprequest.is_set()) ):
+ if (self.session.returncode is not None and self.session.returncode != 0):
+ logger.debug("Failed TRex run due to session return code ({ret_code})".format( ret_code = self.session.returncode ) )
+ elif ( (self.time_stamps['run_time'] < self.duration) and not self.stoprequest.is_set()):
+ logger.debug("Failed TRex run due to running time ({runtime}) combined with no-stopping request.".format( runtime = self.time_stamps['run_time'] ) )
+
+ logger.warning("TRex run was terminated unexpectedly by outer process or by the hosting OS")
+ self.trexObj.set_verbose_status("TRex run was terminated unexpectedly by outer process or by the hosting OS.\n\nRun output:\n{output}".format(
+ output = self.load_trex_output(self.export_path)))
+ self.trexObj.errcode = -15
+ else:
+ logger.info("TRex run session finished.")
+ self.trexObj.set_verbose_status('TRex finished.')
+ self.trexObj.errcode = None
+
+ finally:
+ self.trexObj.set_status(TRexStatus.Idle)
+ logger.info("TRex running state changed to 'Idle'.")
+ self.trexObj.expect_trex.clear()
+ logger.debug("Finished handling a single run of TRex.")
+ self.trexObj.zmq_dump = None
+
+ def join (self, timeout = None):
+ self.stoprequest.set()
+ super(AsynchronousTRexSession, self).join(timeout)
+
+ def load_trex_output (self, export_path):
+ output = None
+ with open(export_path, 'r') as f:
+ output = f.read()
+ return output
+
+
+
+
+
+if __name__ == "__main__":
+ pass
+
diff --git a/scripts/automation/trex_control_plane/server/trex_server.py b/scripts/automation/trex_control_plane/server/trex_server.py
new file mode 100755
index 00000000..2b718a69
--- /dev/null
+++ b/scripts/automation/trex_control_plane/server/trex_server.py
@@ -0,0 +1,640 @@
+#!/usr/bin/python
+
+
+import os
+import stat
+import sys
+import time
+import outer_packages
+import zmq
+from jsonrpclib.SimpleJSONRPCServer import SimpleJSONRPCServer
+import jsonrpclib
+from jsonrpclib import Fault
+import binascii
+import socket
+import errno
+import signal
+from common.trex_status_e import TRexStatus
+from common.trex_exceptions import *
+import subprocess
+from random import randrange
+import logging
+import threading
+import CCustomLogger
+from trex_launch_thread import AsynchronousTRexSession
+from zmq_monitor_thread import ZmqMonitorSession
+from argparse import ArgumentParser, RawTextHelpFormatter
+from json import JSONEncoder
+import re
+import shlex
+import tempfile
+
+try:
+ from .singleton_daemon import register_socket, run_command
+except:
+ from singleton_daemon import register_socket, run_command
+
+
+# setup the logger
+CCustomLogger.setup_custom_logger('TRexServer')
+logger = logging.getLogger('TRexServer')
+
+class CTRexServer(object):
+ """This class defines the server side of the RESTfull interaction with TRex"""
+ DEFAULT_TREX_PATH = '/auto/proj-pcube-b/apps/PL-b/tools/bp_sim2/v1.55/' #'/auto/proj-pcube-b/apps/PL-b/tools/nightly/trex_latest'
+ TREX_START_CMD = './t-rex-64'
+ DEFAULT_FILE_PATH = '/tmp/trex_files/'
+
+ def __init__(self, trex_path, trex_files_path, trex_host='0.0.0.0', trex_daemon_port=8090, trex_zmq_port=4500, trex_nice=-19):
+ """
+ Parameters
+ ----------
+ trex_host : str
+ a string of the TRex ip address or hostname.
+ default value: machine hostname as fetched from socket.gethostname()
+ trex_daemon_port : int
+ the port number on which the trex-daemon server can be reached
+ default value: 8090
+ trex_zmq_port : int
+ the port number on which trex's zmq module will interact with daemon server
+ default value: 4500
+ nice: int
+ priority of the TRex process
+
+ Instantiate a TRex client object, and connecting it to listening daemon-server
+ """
+ self.TREX_PATH = os.path.abspath(os.path.dirname(trex_path+'/'))
+ self.trex_files_path = os.path.abspath(os.path.dirname(trex_files_path+'/'))
+ self.__check_trex_path_validity()
+ self.__check_files_path_validity()
+ self.trex = CTRex()
+ self.trex_version = None
+ self.trex_host = trex_host
+ self.trex_daemon_port = trex_daemon_port
+ self.trex_zmq_port = trex_zmq_port
+ self.trex_server_path = "http://{hostname}:{port}".format( hostname = trex_host, port = trex_daemon_port )
+ self.start_lock = threading.Lock()
+ self.__reservation = None
+ self.zmq_monitor = ZmqMonitorSession(self.trex, self.trex_zmq_port) # intiate single ZMQ monitor thread for server usage
+ self.trex_nice = int(trex_nice)
+ if self.trex_nice < -20 or self.trex_nice > 19:
+ err = "Parameter 'nice' should be integer in range [-20, 19]"
+ print(err)
+ logger.error(err)
+ raise Exception(err)
+
+ def add(self, x, y):
+ logger.info("Processing add function. Parameters are: {0}, {1} ".format( x, y ))
+ return x + y
+ # return Fault(-10, "")
+
+ def push_file (self, filename, bin_data):
+ logger.info("Processing push_file() command.")
+ try:
+ filepath = os.path.join(self.trex_files_path, os.path.basename(filename))
+ with open(filepath, 'wb') as f:
+ f.write(binascii.a2b_base64(bin_data))
+ logger.info("push_file() command finished. File is saved as %s" % filepath)
+ return True
+ except IOError as inst:
+ logger.error("push_file method failed. " + str(inst))
+ return False
+
+ def connectivity_check (self):
+ logger.info("Processing connectivity_check function.")
+ return True
+
+ def start(self):
+ """This method fires up the daemon server based on initialized parameters of the class"""
+ # initialize the server instance with given resources
+ register_socket('trex_daemon_server')
+ try:
+ print("Firing up TRex REST daemon @ port {trex_port} ...\n".format( trex_port = self.trex_daemon_port ))
+ logger.info("Firing up TRex REST daemon @ port {trex_port} ...".format( trex_port = self.trex_daemon_port ))
+ logger.info("current working dir is: {0}".format(self.TREX_PATH) )
+ logger.info("current files dir is : {0}".format(self.trex_files_path) )
+ logger.debug("Starting TRex server. Registering methods to process.")
+ logger.info(self.get_trex_version(base64 = False))
+ self.server = SimpleJSONRPCServer( (self.trex_host, self.trex_daemon_port) )
+ except socket.error as e:
+ if e.errno == errno.EADDRINUSE:
+ logger.error("TRex server requested address already in use. Aborting server launching.")
+ print("TRex server requested address already in use. Aborting server launching.")
+ raise socket.error(errno.EADDRINUSE, "TRex daemon requested address already in use. "
+ "Server launch aborted. Please make sure no other process is "
+ "using the desired server properties.")
+ elif isinstance(e, socket.gaierror) and e.errno == -3:
+ # handling Temporary failure in name resolution exception
+ raise socket.gaierror(-3, "Temporary failure in name resolution.\n"
+ "Make sure provided hostname has DNS resolving.")
+ else:
+ raise
+
+ # set further functionality and peripherals to server instance
+ self.server.register_function(self.add)
+ self.server.register_function(self.cancel_reservation)
+ self.server.register_function(self.connectivity_check)
+ self.server.register_function(self.connectivity_check, 'check_connectivity') # alias
+ self.server.register_function(self.force_trex_kill)
+ self.server.register_function(self.get_file)
+ self.server.register_function(self.get_files_list)
+ self.server.register_function(self.get_files_path)
+ self.server.register_function(self.get_running_info)
+ self.server.register_function(self.get_running_status)
+ self.server.register_function(self.get_trex_cmds)
+ self.server.register_function(self.get_trex_config)
+ self.server.register_function(self.get_trex_daemon_log)
+ self.server.register_function(self.get_trex_log)
+ self.server.register_function(self.get_trex_version)
+ self.server.register_function(self.is_reserved)
+ self.server.register_function(self.is_running)
+ self.server.register_function(self.kill_all_trexes)
+ self.server.register_function(self.push_file)
+ self.server.register_function(self.reserve_trex)
+ self.server.register_function(self.start_trex)
+ self.server.register_function(self.stop_trex)
+ self.server.register_function(self.wait_until_kickoff_finish)
+ signal.signal(signal.SIGTSTP, self.stop_handler)
+ signal.signal(signal.SIGTERM, self.stop_handler)
+ try:
+ self.zmq_monitor.start()
+ self.server.serve_forever()
+ except KeyboardInterrupt:
+ logger.info("Daemon shutdown request detected." )
+ finally:
+ self.zmq_monitor.join() # close ZMQ monitor thread resources
+ self.server.shutdown()
+ #self.server.server_close()
+
+
+ # get files from Trex server and return their content (mainly for logs)
+ @staticmethod
+ def _pull_file(filepath):
+ try:
+ with open(filepath, 'rb') as f:
+ file_content = f.read()
+ return binascii.b2a_base64(file_content).decode(errors='replace')
+ except Exception as e:
+ err_str = "Can't get requested file %s: %s" % (filepath, e)
+ logger.error(err_str)
+ return Fault(-33, err_str)
+
+ # returns True if given path is under TRex package or under /tmp/trex_files
+ def _check_path_under_TRex_or_temp(self, path):
+ if not os.path.relpath(path, self.trex_files_path).startswith(os.pardir):
+ return True
+ if not os.path.relpath(path, self.TREX_PATH).startswith(os.pardir):
+ return True
+ return False
+
+ # gets the file content encoded base64 either from /tmp/trex_files or TRex server dir
+ def get_file(self, filepath):
+ try:
+ logger.info("Processing get_file() command.")
+ if not self._check_path_under_TRex_or_temp(filepath):
+ raise Exception('Given path should be under current TRex package or /tmp/trex_files')
+ return self._pull_file(filepath)
+ except Exception as e:
+ err_str = "Can't get requested file %s: %s" % (filepath, e)
+ logger.error(err_str)
+ return Fault(-33, err_str)
+
+ # get tuple (dirs, files) with directories and files lists from given path (limited under TRex package or /tmp/trex_files)
+ def get_files_list(self, path):
+ try:
+ logger.info("Processing get_files_list() command, given path: %s" % path)
+ if not self._check_path_under_TRex_or_temp(path):
+ raise Exception('Given path should be under current TRex package or /tmp/trex_files')
+ return os.walk(path).next()[1:3]
+ except Exception as e:
+ err_str = "Error processing get_files_list(): %s" % e
+ logger.error(err_str)
+ return Fault(-33, err_str)
+
+ # get Trex log /tmp/trex.txt
+ def get_trex_log(self):
+ logger.info("Processing get_trex_log() command.")
+ return self._pull_file('/tmp/trex.txt')
+
+ # get /etc/trex_cfg.yaml
+ def get_trex_config(self):
+ logger.info("Processing get_trex_config() command.")
+ return self._pull_file('/etc/trex_cfg.yaml')
+
+ # get daemon log /var/log/trex/trex_daemon_server.log
+ def get_trex_daemon_log (self):
+ logger.info("Processing get_trex_daemon_log() command.")
+ return self._pull_file('/var/log/trex/trex_daemon_server.log')
+
+ # get Trex version from ./t-rex-64 --help (last lines starting with "Version : ...")
+ def get_trex_version (self, base64 = True):
+ try:
+ logger.info("Processing get_trex_version() command.")
+ if not self.trex_version:
+ ret_code, stdout, stderr = run_command('./t-rex-64 --help', cwd = self.TREX_PATH)
+ search_result = re.search('\n\s*(Version\s*:.+)', stdout, re.DOTALL)
+ if not search_result:
+ raise Exception('Could not determine version from ./t-rex-64 --help')
+ self.trex_version = binascii.b2a_base64(search_result.group(1).encode(errors='replace'))
+ if base64:
+ return self.trex_version.decode(errors='replace')
+ else:
+ return binascii.a2b_base64(self.trex_version).decode(errors='replace')
+ except Exception as e:
+ err_str = "Can't get trex version, error: %s" % e
+ logger.error(err_str)
+ return Fault(-33, err_str)
+
+ def stop_handler (self, *args, **kwargs):
+ logger.info("Daemon STOP request detected.")
+ if self.is_running():
+ # in case TRex process is currently running, stop it before terminating server process
+ self.stop_trex(self.trex.get_seq())
+ sys.exit(0)
+
+ def assert_zmq_ok(self):
+ if self.trex.zmq_error:
+ raise Exception('ZMQ thread got error: %s' % self.trex.zmq_error)
+ if not self.zmq_monitor.is_alive():
+ if self.trex.get_status() != TRexStatus.Idle:
+ self.force_trex_kill()
+ raise Exception('ZMQ thread is dead.')
+
+ def is_running (self):
+ run_status = self.trex.get_status()
+ logger.info("Processing is_running() command. Running status is: {stat}".format(stat = run_status) )
+ if run_status==TRexStatus.Running:
+ return True
+ else:
+ return False
+
+ def is_reserved (self):
+ logger.info("Processing is_reserved() command.")
+ return bool(self.__reservation)
+
+ def get_running_status (self):
+ run_status = self.trex.get_status()
+ logger.info("Processing get_running_status() command. Running status is: {stat}".format(stat = run_status) )
+ return { 'state' : run_status.value, 'verbose' : self.trex.get_verbose_status() }
+
+ def get_files_path (self):
+ logger.info("Processing get_files_path() command." )
+ return self.trex_files_path
+
+ def reserve_trex (self, user):
+ if user == "":
+ logger.info("TRex reservation cannot apply to empty string user. Request denied.")
+ return Fault(-33, "TRex reservation cannot apply to empty string user. Request denied.")
+
+ with self.start_lock:
+ logger.info("Processing reserve_trex() command.")
+ if self.is_reserved():
+ if user == self.__reservation['user']:
+ # return True is the same user is asking and already has the resrvation
+ logger.info("the same user is asking and already has the resrvation. Re-reserving TRex.")
+ return True
+
+ logger.info("TRex is already reserved to another user ({res_user}), cannot reserve to another user.".format( res_user = self.__reservation['user'] ))
+ return Fault(-33, "TRex is already reserved to another user ({res_user}). Please make sure TRex is free before reserving it.".format(
+ res_user = self.__reservation['user']) ) # raise at client TRexInUseError
+ elif self.trex.get_status() != TRexStatus.Idle:
+ logger.info("TRex is currently running, cannot reserve TRex unless in Idle state.")
+ return Fault(-13, 'TRex is currently running, cannot reserve TRex unless in Idle state. Please try again when TRex run finished.') # raise at client TRexInUseError
+ else:
+ logger.info("TRex is now reserved for user ({res_user}).".format( res_user = user ))
+ self.__reservation = {'user' : user, 'since' : time.ctime()}
+ logger.debug("Reservation details: "+ str(self.__reservation))
+ return True
+
+ def cancel_reservation (self, user):
+ with self.start_lock:
+ logger.info("Processing cancel_reservation() command.")
+ if self.is_reserved():
+ if self.__reservation['user'] == user:
+ logger.info("TRex reservation to {res_user} has been canceled successfully.".format(res_user = self.__reservation['user']))
+ self.__reservation = None
+ return True
+ else:
+ logger.warning("TRex is reserved to different user than the provided one. Reservation wasn't canceled.")
+ return Fault(-33, "Cancel reservation request is available to the user that holds the reservation. Request denied") # raise at client TRexRequestDenied
+
+ else:
+ logger.info("TRex is not reserved to anyone. No need to cancel anything")
+ assert(self.__reservation is None)
+ return False
+
+ def start_trex(self, trex_cmd_options, user, block_to_success = True, timeout = 40, stateless = False, debug_image = False, trex_args = ''):
+ self.assert_zmq_ok()
+ with self.start_lock:
+ logger.info("Processing start_trex() command.")
+ if self.is_reserved():
+ # check if this is not the user to which TRex is reserved
+ if self.__reservation['user'] != user:
+ logger.info("TRex is reserved to another user ({res_user}). Only that user is allowed to initiate new runs.".format(res_user = self.__reservation['user']))
+ return Fault(-33, "TRex is reserved to another user ({res_user}). Only that user is allowed to initiate new runs.".format(res_user = self.__reservation['user'])) # raise at client TRexRequestDenied
+ elif self.trex.get_status() != TRexStatus.Idle:
+ logger.info("TRex is already taken, cannot create another run until done.")
+ return Fault(-13, '') # raise at client TRexInUseError
+
+ try:
+ server_cmd_data = self.generate_run_cmd(stateless = stateless, debug_image = debug_image, trex_args = trex_args, **trex_cmd_options)
+ self.zmq_monitor.first_dump = True
+ self.trex.start_trex(self.TREX_PATH, server_cmd_data)
+ logger.info("TRex session has been successfully initiated.")
+ if block_to_success:
+ # delay server response until TRex is at 'Running' state.
+ start_time = time.time()
+ trex_state = None
+ while (time.time() - start_time) < timeout :
+ trex_state = self.trex.get_status()
+ if trex_state != TRexStatus.Starting:
+ break
+ else:
+ time.sleep(0.5)
+ self.assert_zmq_ok()
+
+ # check for TRex run started normally
+ if trex_state == TRexStatus.Starting: # reached timeout
+ logger.warning("TimeoutError: TRex initiation outcome could not be obtained, since TRex stays at Starting state beyond defined timeout.")
+ return Fault(-12, 'TimeoutError: TRex initiation outcome could not be obtained, since TRex stays at Starting state beyond defined timeout.') # raise at client TRexWarning
+ elif trex_state == TRexStatus.Idle:
+ return Fault(-11, self.trex.get_verbose_status()) # raise at client TRexError
+
+ # reach here only if TRex is at 'Running' state
+ self.trex.gen_seq()
+ return self.trex.get_seq() # return unique seq number to client
+
+ except TypeError as e:
+ logger.error("TRex command generation failed, probably because either -f (traffic generation .yaml file) and -c (num of cores) was not specified correctly.\nReceived params: {params}".format( params = trex_cmd_options) )
+ raise TypeError('TRex -f (traffic generation .yaml file) and -c (num of cores) must be specified. %s' % e)
+
+
+ def stop_trex(self, seq):
+ logger.info("Processing stop_trex() command.")
+ if self.trex.get_seq()== seq:
+ logger.debug("Abort request legit since seq# match")
+ return self.trex.stop_trex()
+ else:
+ if self.trex.get_status() != TRexStatus.Idle:
+ logger.warning("Abort request is only allowed to process initiated the run. Request denied.")
+
+ return Fault(-33, 'Abort request is only allowed to process initiated the run. Request denied.') # raise at client TRexRequestDenied
+ else:
+ return False
+
+ def force_trex_kill (self):
+ logger.info("Processing force_trex_kill() command. --> Killing TRex session indiscriminately.")
+ return self.trex.stop_trex()
+
+ # returns list of tuples (pid, command line) of running TRex(es)
+ def get_trex_cmds(self):
+ logger.info('Processing get_trex_cmds() command.')
+ ret_code, stdout, stderr = run_command('ps -u root --format pid,comm,cmd')
+ if ret_code:
+ raise Exception('Failed to determine running processes, stderr: %s' % stderr)
+ trex_cmds_list = []
+ for line in stdout.splitlines():
+ pid, proc_name, full_cmd = line.strip().split(' ', 2)
+ pid = pid.strip()
+ full_cmd = full_cmd.strip()
+ if proc_name.find('t-rex-64') >= 0:
+ trex_cmds_list.append((pid, full_cmd))
+ return trex_cmds_list
+
+
+ # Silently tries to kill TRexes with given signal.
+ # Responsibility of client to verify with get_trex_cmds.
+ def kill_all_trexes(self, signal_name):
+ logger.info('Processing kill_all_trexes() command.')
+ trex_cmds_list = self.get_trex_cmds()
+ for pid, cmd in trex_cmds_list:
+ logger.info('Killing with signal %s process %s %s' % (signal_name, pid, cmd))
+ os.kill(int(pid), signal_name)
+
+
+ def wait_until_kickoff_finish (self, timeout = 40):
+ # block until TRex exits Starting state
+ logger.info("Processing wait_until_kickoff_finish() command.")
+ trex_state = None
+ start_time = time.time()
+ while (time.time() - start_time) < timeout :
+ self.assert_zmq_ok()
+ trex_state = self.trex.get_status()
+ if trex_state != TRexStatus.Starting:
+ return
+ sleep(0.1)
+ return Fault(-12, 'TimeoutError: TRex initiation outcome could not be obtained, since TRex stays at Starting state beyond defined timeout.') # raise at client TRexWarning
+
+ def get_running_info (self):
+ self.assert_zmq_ok()
+ logger.info("Processing get_running_info() command.")
+ return self.trex.get_running_info()
+
+
+ def generate_run_cmd (self, iom = 0, export_path="/tmp/trex.txt", stateless = False, debug_image = False, trex_args = '', **kwargs):
+ """ generate_run_cmd(self, iom, export_path, kwargs) -> str
+
+ Generates a custom running command for the kick-off of the TRex traffic generator.
+ Returns a tuple of command (string) and export path (string) to be issued on the trex server
+
+ Parameters
+ ----------
+ iom: int
+ 0 = don't print stats screen to log, 1 = print stats (can generate huge logs)
+ stateless: boolean
+ True = run as stateless, False = require -f and -d arguments
+ kwargs: dictionary
+ Dictionary of parameters for trex. For example: (c=1, nc=True, l_pkt_mode=3).
+ Notice that when sending command line parameters that has -, you need to replace it with _.
+ for example, to have on command line "--l-pkt-mode 3", you need to send l_pkt_mode=3
+ export_path : str
+ Full system path to which the results of the trex-run will be logged.
+
+ """
+ if 'results_file_path' in kwargs:
+ export_path = kwargs['results_file_path']
+ del kwargs['results_file_path']
+ if stateless:
+ kwargs['i'] = True
+
+ # adding additional options to the command
+ trex_cmd_options = ''
+ for key, value in kwargs.items():
+ tmp_key = key.replace('_','-').lstrip('-')
+ dash = ' -' if (len(key)==1) else ' --'
+ if value is True:
+ trex_cmd_options += (dash + tmp_key)
+ elif value is False:
+ continue
+ else:
+ trex_cmd_options += (dash + '{k} {val}'.format( k = tmp_key, val = value ))
+ if trex_args:
+ trex_cmd_options += ' %s' % trex_args
+
+ if not stateless:
+ if 'f' not in kwargs:
+ raise Exception('Argument -f should be specified in stateful command')
+ if 'd' not in kwargs:
+ raise Exception('Argument -d should be specified in stateful command')
+
+ cmd = "{nice}{run_command}{debug_image} --iom {io} {cmd_options} --no-key".format( # -- iom 0 disables the periodic log to the screen (not needed)
+ nice = '' if self.trex_nice == 0 else 'nice -n %s ' % self.trex_nice,
+ run_command = self.TREX_START_CMD,
+ debug_image = '-debug' if debug_image else '',
+ cmd_options = trex_cmd_options,
+ io = iom)
+
+ logger.info("TREX FULL COMMAND: {command}".format(command = cmd) )
+
+ return (cmd, export_path, kwargs.get('d', 0))
+
+
+ def __check_trex_path_validity(self):
+ # check for executable existance
+ if not os.path.exists(self.TREX_PATH+'/t-rex-64'):
+ print("The provided TRex path do not contain an executable TRex file.\nPlease check the path and retry.")
+ logger.error("The provided TRex path do not contain an executable TRex file")
+ exit(-1)
+ # check for executable permissions
+ st = os.stat(self.TREX_PATH+'/t-rex-64')
+ if not bool(st.st_mode & (stat.S_IXUSR ) ):
+ print("The provided TRex path do not contain an TRex file with execution privileges.\nPlease check the files permissions and retry.")
+ logger.error("The provided TRex path do not contain an TRex file with execution privileges")
+ exit(-1)
+ else:
+ return
+
+ def __check_files_path_validity(self):
+ # first, check for path existance. otherwise, try creating it with appropriate credentials
+ if not os.path.exists(self.trex_files_path):
+ try:
+ os.makedirs(self.trex_files_path, 0o660)
+ return
+ except os.error as inst:
+ print("The provided files path does not exist and cannot be created with needed access credentials using root user.\nPlease check the path's permissions and retry.")
+ logger.error("The provided files path does not exist and cannot be created with needed access credentials using root user.")
+ exit(-1)
+ elif os.access(self.trex_files_path, os.W_OK):
+ return
+ else:
+ print("The provided files path has insufficient access credentials for root user.\nPlease check the path's permissions and retry.")
+ logger.error("The provided files path has insufficient access credentials for root user")
+ exit(-1)
+
+class CTRex(object):
+ def __init__(self):
+ self.status = TRexStatus.Idle
+ self.verbose_status = 'TRex is Idle'
+ self.errcode = None
+ self.session = None
+ self.zmq_monitor = None
+ self.zmq_dump = None
+ self.zmq_error = None
+ self.seq = None
+ self.expect_trex = threading.Event()
+ self.encoder = JSONEncoder()
+
+ def get_status(self):
+ return self.status
+
+ def set_status(self, new_status):
+ self.status = new_status
+
+ def get_verbose_status(self):
+ return self.verbose_status
+
+ def set_verbose_status(self, new_status):
+ self.verbose_status = new_status
+
+ def gen_seq (self):
+ self.seq = randrange(1,1000)
+
+ def get_seq (self):
+ return self.seq
+
+ def get_running_info (self):
+ if self.status == TRexStatus.Running:
+ return self.encoder.encode(self.zmq_dump)
+ else:
+ logger.info("TRex isn't running. Running information isn't available.")
+ if self.status == TRexStatus.Idle:
+ if self.errcode is not None: # some error occured
+ logger.info("TRex is in Idle state, with errors. returning fault")
+ return Fault(self.errcode, self.verbose_status) # raise at client relevant exception, depending on the reason the error occured
+ else:
+ logger.info("TRex is in Idle state, no errors. returning {}")
+ return u'{}'
+
+ return Fault(-12, self.verbose_status) # raise at client TRexWarning, indicating TRex is back to Idle state or still in Starting state
+
+ def stop_trex(self):
+ if self.status == TRexStatus.Idle:
+ # TRex isn't running, nothing to abort
+ logger.info("TRex isn't running. No need to stop anything.")
+ if self.errcode is not None: # some error occurred, notify client despite TRex already stopped
+ return Fault(self.errcode, self.verbose_status) # raise at client relevant exception, depending on the reason the error occured
+ return False
+ else:
+ # handle stopping TRex's run
+ self.session.join()
+ logger.info("TRex session has been successfully aborted.")
+ return True
+
+ def start_trex(self, trex_launch_path, trex_cmd):
+ self.set_status(TRexStatus.Starting)
+ logger.info("TRex running state changed to 'Starting'.")
+ self.set_verbose_status('TRex is starting (data is not available yet)')
+
+ self.errcode = None
+ self.session = AsynchronousTRexSession(self, trex_launch_path, trex_cmd)
+ self.session.start()
+ self.expect_trex.set()
+# self.zmq_monitor= ZmqMonitorSession(self, zmq_port)
+# self.zmq_monitor.start()
+
+
+
+def generate_trex_parser ():
+ default_path = os.path.abspath(os.path.join(outer_packages.CURRENT_PATH, os.pardir, os.pardir, os.pardir))
+ default_files_path = os.path.abspath(CTRexServer.DEFAULT_FILE_PATH)
+
+ parser = ArgumentParser(description = 'Run server application for TRex traffic generator',
+ formatter_class = RawTextHelpFormatter,
+ usage = """
+trex_daemon_server [options]
+""" )
+
+ parser.add_argument('-v', '--version', action='version', version='%(prog)s 1.0')
+ parser.add_argument("-p", "--daemon-port", type=int, default = 8090, metavar="PORT", dest="daemon_port",
+ help="Select port on which the daemon runs.\nDefault port is 8090.", action="store")
+ parser.add_argument("-z", "--zmq-port", dest="zmq_port", type=int,
+ action="store", help="Select port on which the ZMQ module listens to TRex.\nDefault port is 4500.", metavar="PORT",
+ default = 4500)
+ parser.add_argument("-t", "--trex-path", dest="trex_path",
+ action="store", help="Specify the compiled TRex directory from which TRex would run.\nDefault path is: {def_path}.".format( def_path = default_path ),
+ metavar="PATH", default = default_path )
+ parser.add_argument("-f", "--files-path", dest="files_path",
+ action="store", help="Specify a path to directory on which pushed files will be saved at.\nDefault path is: {def_path}.".format( def_path = default_files_path ),
+ metavar="PATH", default = default_files_path )
+ parser.add_argument("--trex-host", dest="trex_host",
+ action="store", help="Specify a hostname to be registered as the TRex server.\n"
+ "Default is to bind all IPs using '0.0.0.0'.",
+ metavar="HOST", default = '0.0.0.0')
+ parser.add_argument('-n', '--nice', dest='nice', action="store", default = -19, type = int,
+ help="Determine the priority TRex process [-20, 19] (lower = higher priority)\nDefault is -19.")
+ return parser
+
+trex_parser = generate_trex_parser()
+
+def do_main_program ():
+
+ args = trex_parser.parse_args()
+ server = CTRexServer(trex_path = args.trex_path, trex_files_path = args.files_path,
+ trex_host = args.trex_host, trex_daemon_port = args.daemon_port,
+ trex_zmq_port = args.zmq_port, trex_nice = args.nice)
+ server.start()
+
+
+if __name__ == "__main__":
+ do_main_program()
+
diff --git a/scripts/automation/trex_control_plane/server/zipmsg.py b/scripts/automation/trex_control_plane/server/zipmsg.py
new file mode 100644
index 00000000..397ada16
--- /dev/null
+++ b/scripts/automation/trex_control_plane/server/zipmsg.py
@@ -0,0 +1,32 @@
+import zlib
+import struct
+
+class ZippedMsg:
+
+ MSG_COMPRESS_THRESHOLD = 256
+ MSG_COMPRESS_HEADER_MAGIC = 0xABE85CEA
+
+ def check_threshold (self, msg):
+ return len(msg) >= self.MSG_COMPRESS_THRESHOLD
+
+ def compress (self, msg):
+ # compress
+ compressed = zlib.compress(msg)
+ new_msg = struct.pack(">II", self.MSG_COMPRESS_HEADER_MAGIC, len(msg)) + compressed
+ return new_msg
+
+
+ def decompress (self, msg):
+ if len(msg) < 8:
+ return None
+
+ t = struct.unpack(">II", msg[:8])
+ if (t[0] != self.MSG_COMPRESS_HEADER_MAGIC):
+ return None
+
+ x = zlib.decompress(msg[8:])
+ if len(x) != t[1]:
+ return None
+
+ return x
+
diff --git a/scripts/automation/trex_control_plane/server/zmq_monitor_thread.py b/scripts/automation/trex_control_plane/server/zmq_monitor_thread.py
new file mode 100755
index 00000000..f559ebc1
--- /dev/null
+++ b/scripts/automation/trex_control_plane/server/zmq_monitor_thread.py
@@ -0,0 +1,86 @@
+#!/router/bin/python
+
+import os
+import outer_packages
+import zmq
+import threading
+import logging
+import CCustomLogger
+import zipmsg
+from json import JSONDecoder
+from common.trex_status_e import TRexStatus
+
+# setup the logger
+CCustomLogger.setup_custom_logger('TRexServer')
+logger = logging.getLogger('TRexServer')
+
+
+class ZmqMonitorSession(threading.Thread):
+ def __init__(self, trexObj , zmq_port):
+ super(ZmqMonitorSession, self).__init__()
+ self.stoprequest = threading.Event()
+ self.first_dump = True
+ self.zmq_port = zmq_port
+ self.zmq_publisher = "tcp://localhost:{port}".format(port=self.zmq_port)
+ self.trexObj = trexObj
+ self.expect_trex = self.trexObj.expect_trex # used to signal if TRex is expected to run and if data should be considered
+ self.decoder = JSONDecoder()
+ self.zipped = zipmsg.ZippedMsg()
+ logger.info("ZMQ monitor initialization finished")
+
+ def run(self):
+ try:
+ self.context = zmq.Context()
+ self.socket = self.context.socket(zmq.SUB)
+ logger.info("ZMQ monitor started listening @ {pub}".format(pub=self.zmq_publisher))
+ self.socket.connect(self.zmq_publisher)
+ self.socket.setsockopt(zmq.SUBSCRIBE, b'')
+
+ while not self.stoprequest.is_set():
+ try:
+ zmq_dump = self.socket.recv() # This call is BLOCKING until data received!
+ if self.expect_trex.is_set():
+ self.parse_and_update_zmq_dump(zmq_dump)
+ logger.debug("ZMQ dump received on socket, and saved to trexObject.")
+ except Exception as e:
+ if self.stoprequest.is_set():
+ # allow this exception since it comes from ZMQ monitor termination
+ pass
+ else:
+ logger.error("ZMQ monitor thrown an exception. Received exception: {ex}".format(ex=e))
+ raise
+ except Exception as e:
+ logger.error('ZMQ monitor error: %s' % e)
+ self.trexObj.zmq_error = e
+
+ def join(self, timeout=None):
+ self.stoprequest.set()
+ logger.debug("Handling termination of ZMQ monitor thread")
+ self.socket.close()
+ self.context.term()
+ logger.info("ZMQ monitor resources has been freed.")
+ super(ZmqMonitorSession, self).join(timeout)
+
+ def parse_and_update_zmq_dump(self, zmq_dump):
+ unzipped = self.zipped.decompress(zmq_dump)
+ if unzipped:
+ zmq_dump = unzipped
+ dict_obj = self.decoder.decode(zmq_dump.decode(errors = 'replace'))
+
+ if type(dict_obj) is not dict:
+ raise Exception('Expected ZMQ dump of type dict, got: %s' % type(dict_obj))
+
+ # add to trex_obj zmq latest dump, based on its 'name' header
+ if dict_obj != {}:
+ self.trexObj.zmq_dump[dict_obj['name']] = dict_obj
+ if self.first_dump:
+ # change TRexStatus from starting to Running once the first ZMQ dump is obtained and parsed successfully
+ self.first_dump = False
+ self.trexObj.set_status(TRexStatus.Running)
+ self.trexObj.set_verbose_status("TRex is Running")
+ logger.info("First ZMQ dump received and successfully parsed. TRex running state changed to 'Running'.")
+
+
+if __name__ == "__main__":
+ pass
+
diff --git a/scripts/automation/trex_control_plane/stf/examples/stf_example.py b/scripts/automation/trex_control_plane/stf/examples/stf_example.py
new file mode 100755
index 00000000..f57435bf
--- /dev/null
+++ b/scripts/automation/trex_control_plane/stf/examples/stf_example.py
@@ -0,0 +1,54 @@
+import argparse
+import stf_path
+from trex_stf_lib.trex_client import CTRexClient
+from pprint import pprint
+
+# sample TRex stateful run
+# assuming server daemon is running.
+
+def minimal_stateful_test(server):
+ print('Connecting to %s' % server)
+ trex_client = CTRexClient(server)
+
+ print('Connected, start TRex')
+ trex_client.start_trex(
+ c = 1,
+ m = 700,
+ f = 'cap2/http_simple.yaml',
+ d = 30,
+ l = 1000,
+ )
+
+ print('Sample until end')
+ result = trex_client.sample_to_run_finish()
+
+ print('Test results:')
+ print(result)
+
+ print('TX by ports:')
+ tx_ptks_dict = result.get_last_value('trex-global.data', 'opackets-*')
+ print(' | '.join(['%s: %s' % (k.split('-')[-1], tx_ptks_dict[k]) for k in sorted(tx_ptks_dict.keys())]))
+
+ print('RX by ports:')
+ rx_ptks_dict = result.get_last_value('trex-global.data', 'ipackets-*')
+ print(' | '.join(['%s: %s' % (k.split('-')[-1], rx_ptks_dict[k]) for k in sorted(rx_ptks_dict.keys())]))
+
+ print('CPU utilization:')
+ print(result.get_value_list('trex-global.data.m_cpu_util'))
+
+ #Dump of *latest* result sample, uncomment to see it all
+ #print('Latest result dump:')
+ #pprint(result.get_latest_dump())
+
+
+if __name__ == '__main__':
+ parser = argparse.ArgumentParser(description="Example for TRex Stateful, assuming server daemon is running.")
+ parser.add_argument('-s', '--server',
+ dest='server',
+ help='Remote trex address',
+ default='127.0.0.1',
+ type = str)
+ args = parser.parse_args()
+
+ minimal_stateful_test(args.server)
+
diff --git a/scripts/automation/trex_control_plane/stf/examples/stf_path.py b/scripts/automation/trex_control_plane/stf/examples/stf_path.py
new file mode 100755
index 00000000..bb401148
--- /dev/null
+++ b/scripts/automation/trex_control_plane/stf/examples/stf_path.py
@@ -0,0 +1,4 @@
+import sys
+
+# FIXME to the write path for trex_stf_lib
+sys.path.insert(0, "../")
diff --git a/scripts/automation/trex_control_plane/stf/trex_stf_lib/CCustomLogger.py b/scripts/automation/trex_control_plane/stf/trex_stf_lib/CCustomLogger.py
new file mode 100755
index 00000000..ecf7d519
--- /dev/null
+++ b/scripts/automation/trex_control_plane/stf/trex_stf_lib/CCustomLogger.py
@@ -0,0 +1,100 @@
+
+import sys
+import os
+import logging
+
+
+def setup_custom_logger(name, log_path = None):
+ # first make sure path availabe
+# if log_path is None:
+# log_path = os.getcwd()+'/trex_log.log'
+# else:
+# directory = os.path.dirname(log_path)
+# if not os.path.exists(directory):
+# os.makedirs(directory)
+ logging.basicConfig(level = logging.INFO,
+ format = '%(asctime)s %(name)-10s %(module)-20s %(levelname)-8s %(message)s',
+ datefmt = '%m-%d %H:%M')
+# filename= log_path,
+# filemode= 'w')
+#
+# # define a Handler which writes INFO messages or higher to the sys.stderr
+# consoleLogger = logging.StreamHandler()
+# consoleLogger.setLevel(logging.ERROR)
+# # set a format which is simpler for console use
+# formatter = logging.Formatter('%(name)-12s: %(levelname)-8s %(message)s')
+# # tell the handler to use this format
+# consoleLogger.setFormatter(formatter)
+#
+# # add the handler to the logger
+# logging.getLogger(name).addHandler(consoleLogger)
+
+def setup_daemon_logger (name, log_path = None):
+ # first make sure path availabe
+ logging.basicConfig(level = logging.INFO,
+ format = '%(asctime)s %(name)-10s %(module)-20s %(levelname)-8s %(message)s',
+ datefmt = '%m-%d %H:%M',
+ filename= log_path,
+ filemode= 'w')
+
+class CustomLogger(object):
+
+ def __init__(self, log_filename):
+ # Store the original stdout and stderr
+ sys.stdout.flush()
+ sys.stderr.flush()
+
+ self.stdout_fd = os.dup(sys.stdout.fileno())
+ self.devnull = os.open('/dev/null', os.O_WRONLY)
+ self.log_file = open(log_filename, 'w')
+ self.silenced = False
+ self.pending_log_file_prints = 0
+
+ # silence all prints from stdout
+ def silence(self):
+ os.dup2(self.devnull, sys.stdout.fileno())
+ self.silenced = True
+
+ # restore stdout status
+ def restore(self):
+ sys.stdout.flush()
+ sys.stderr.flush()
+ # Restore normal stdout
+ os.dup2(self.stdout_fd, sys.stdout.fileno())
+ self.silenced = False
+
+ #print a message to the log (both stdout / log file)
+ def log(self, text, force = False, newline = True):
+ self.log_file.write((text + "\n") if newline else text)
+ self.pending_log_file_prints += 1
+
+ if (self.pending_log_file_prints >= 10):
+ self.log_file.flush()
+ self.pending_log_file_prints = 0
+
+ self.console(text, force, newline)
+
+ # print a message to the console alone
+ def console(self, text, force = False, newline = True):
+ _text = (text + "\n") if newline else text
+ # if we are silenced and not forced - go home
+ if self.silenced and not force:
+ return
+
+ if self.silenced:
+ os.write(self.stdout_fd, _text)
+ else:
+ sys.stdout.write(_text)
+
+ sys.stdout.flush()
+
+ # flush
+ def flush(self):
+ sys.stdout.flush()
+ self.log_file.flush()
+
+ def __exit__(self, type, value, traceback):
+ sys.stdout.flush()
+ self.log_file.flush()
+ os.close(self.devnull)
+ os.close(self.log_file)
diff --git a/scripts/automation/trex_control_plane/stf/trex_stf_lib/__init__.py b/scripts/automation/trex_control_plane/stf/trex_stf_lib/__init__.py
new file mode 100755
index 00000000..5a1da046
--- /dev/null
+++ b/scripts/automation/trex_control_plane/stf/trex_stf_lib/__init__.py
@@ -0,0 +1 @@
+__all__ = ["trex_status_e", "trex_exceptions"]
diff --git a/scripts/automation/trex_control_plane/stf/trex_stf_lib/external_packages.py b/scripts/automation/trex_control_plane/stf/trex_stf_lib/external_packages.py
new file mode 100755
index 00000000..7353c397
--- /dev/null
+++ b/scripts/automation/trex_control_plane/stf/trex_stf_lib/external_packages.py
@@ -0,0 +1,28 @@
+#!/router/bin/python
+
+import sys
+import os
+
+CURRENT_PATH = os.path.dirname(os.path.realpath(__file__))
+ROOT_PATH = os.path.abspath(os.path.join(CURRENT_PATH, os.pardir)) # path to trex_control_plane directory
+PATH_TO_PYTHON_LIB = os.path.abspath(os.path.join(ROOT_PATH, os.pardir, os.pardir, 'external_libs'))
+
+CLIENT_UTILS_MODULES = ['yaml-3.11'
+ ]
+
+def import_common_modules():
+ # must be in a higher priority
+ sys.path.insert(0, PATH_TO_PYTHON_LIB)
+ sys.path.append(ROOT_PATH)
+ import_module_list(CLIENT_UTILS_MODULES)
+
+
+def import_module_list(modules_list):
+ assert(isinstance(modules_list, list))
+ for p in modules_list:
+ full_path = os.path.join(PATH_TO_PYTHON_LIB, p)
+ fix_path = os.path.normcase(full_path)
+ sys.path.insert(1, full_path)
+
+import_common_modules()
+
diff --git a/scripts/automation/trex_control_plane/stf/trex_stf_lib/general_utils.py b/scripts/automation/trex_control_plane/stf/trex_stf_lib/general_utils.py
new file mode 100755
index 00000000..d2521f02
--- /dev/null
+++ b/scripts/automation/trex_control_plane/stf/trex_stf_lib/general_utils.py
@@ -0,0 +1,95 @@
+#!/router/bin/python
+
+import sys
+import site
+import string
+import random
+import os
+
+try:
+ import pwd
+except ImportError:
+ import getpass
+ pwd = None
+
+using_python_3 = True if sys.version_info.major == 3 else False
+
+
+def user_input():
+ if using_python_3:
+ return input()
+ else:
+ # using python version 2
+ return raw_input()
+
+def get_current_user():
+ if pwd:
+ return pwd.getpwuid(os.geteuid()).pw_name
+ else:
+ return getpass.getuser()
+
+def import_module_list_by_path (modules_list):
+ assert(isinstance(modules_list, list))
+ for full_path in modules_list:
+ site.addsitedir(full_path)
+
+def find_path_to_pardir (pardir, base_path = os.getcwd() ):
+ """
+ Finds the absolute path for some parent dir `pardir`, starting from base_path
+
+ The request is only valid if the stop initiator is the same client as the TRex run initiator.
+
+ :parameters:
+ pardir : str
+ name of an upper-level directory to which we want to find an absolute path for
+ base_path : str
+ a full (usually nested) path from which we want to find a parent folder.
+
+ default value : **current working dir**
+
+ :return:
+ string representation of the full path to
+
+ """
+ components = base_path.split(os.sep)
+ return str.join(os.sep, components[:components.index(pardir)+1])
+
+
+def random_id_gen(length=8):
+ """
+ A generator for creating a random chars id of specific length
+
+ :parameters:
+ length : int
+ the desired length of the generated id
+
+ default: 8
+
+ :return:
+ a random id with each next() request.
+ """
+ id_chars = string.ascii_lowercase + string.digits
+ while True:
+ return_id = ''
+ for i in range(length):
+ return_id += random.choice(id_chars)
+ yield return_id
+
+def id_count_gen():
+ """
+ A generator for creating an increasing id for objects, starting from 0
+
+ :parameters:
+ None
+
+ :return:
+ an id (unsigned int) with each next() request.
+ """
+ return_id = 0
+ while True:
+ yield return_id
+ return_id += 1
+
+
+if __name__ == "__main__":
+ pass
diff --git a/scripts/automation/trex_control_plane/stf/trex_stf_lib/outer_packages.py b/scripts/automation/trex_control_plane/stf/trex_stf_lib/outer_packages.py
new file mode 100755
index 00000000..f8d50ce6
--- /dev/null
+++ b/scripts/automation/trex_control_plane/stf/trex_stf_lib/outer_packages.py
@@ -0,0 +1,30 @@
+#!/router/bin/python
+
+import sys
+import os
+
+
+CURRENT_PATH = os.path.dirname(os.path.realpath(__file__))
+PACKAGE_PATH = os.path.abspath(os.path.join(CURRENT_PATH, os.pardir, os.pardir, 'external_libs'))
+SCRIPTS_PATH = os.path.abspath(os.path.join(CURRENT_PATH, os.pardir, os.pardir, os.pardir, os.pardir, 'external_libs'))
+
+CLIENT_MODULES = ['enum34-1.0.4',
+ 'jsonrpclib-pelix-0.2.5',
+# 'termstyle',
+# 'yaml-3.11'
+ ]
+
+
+def import_module_list(ext_libs_path):
+ for p in CLIENT_MODULES:
+ full_path = os.path.join(ext_libs_path, p)
+ if not os.path.exists(full_path):
+ raise Exception('Library %s is absent in path %s' % (p, ext_libs_path))
+ sys.path.insert(1, full_path)
+
+if os.path.exists(PACKAGE_PATH):
+ import_module_list(PACKAGE_PATH)
+elif os.path.exists(SCRIPTS_PATH):
+ import_module_list(SCRIPTS_PATH)
+else:
+ raise Exception('Could not find external libs in path: %s' % [PACKAGE_PATH, SCRIPTS_PATH])
diff --git a/scripts/automation/trex_control_plane/stf/trex_stf_lib/text_opts.py b/scripts/automation/trex_control_plane/stf/trex_stf_lib/text_opts.py
new file mode 100755
index 00000000..78a0ab1f
--- /dev/null
+++ b/scripts/automation/trex_control_plane/stf/trex_stf_lib/text_opts.py
@@ -0,0 +1,192 @@
+import json
+import re
+
+TEXT_CODES = {'bold': {'start': '\x1b[1m',
+ 'end': '\x1b[22m'},
+ 'cyan': {'start': '\x1b[36m',
+ 'end': '\x1b[39m'},
+ 'blue': {'start': '\x1b[34m',
+ 'end': '\x1b[39m'},
+ 'red': {'start': '\x1b[31m',
+ 'end': '\x1b[39m'},
+ 'magenta': {'start': '\x1b[35m',
+ 'end': '\x1b[39m'},
+ 'green': {'start': '\x1b[32m',
+ 'end': '\x1b[39m'},
+ 'yellow': {'start': '\x1b[33m',
+ 'end': '\x1b[39m'},
+ 'underline': {'start': '\x1b[4m',
+ 'end': '\x1b[24m'}}
+
+class TextCodesStripper:
+ keys = [re.escape(v['start']) for k,v in TEXT_CODES.iteritems()]
+ keys += [re.escape(v['end']) for k,v in TEXT_CODES.iteritems()]
+ pattern = re.compile("|".join(keys))
+
+ @staticmethod
+ def strip (s):
+ return re.sub(TextCodesStripper.pattern, '', s)
+
+def format_num (size, suffix = "", compact = True, opts = ()):
+ txt = "NaN"
+
+ if type(size) == str:
+ return "N/A"
+
+ u = ''
+
+ if compact:
+ for unit in ['','K','M','G','T','P']:
+ if abs(size) < 1000.0:
+ u = unit
+ break
+ size /= 1000.0
+
+ if isinstance(size, float):
+ txt = "%3.2f" % (size)
+ else:
+ txt = "{:,}".format(size)
+
+ if u or suffix:
+ txt += " {:}{:}".format(u, suffix)
+
+ if isinstance(opts, tuple):
+ return format_text(txt, *opts)
+ else:
+ return format_text(txt, (opts))
+
+
+
+def format_time (t_sec):
+ if t_sec < 0:
+ return "infinite"
+
+ if t_sec < 1:
+ # low numbers
+ for unit in ['ms', 'usec', 'ns']:
+ t_sec *= 1000.0
+ if t_sec >= 1.0:
+ return '{:,.2f} [{:}]'.format(t_sec, unit)
+
+ return "NaN"
+
+ else:
+ # seconds
+ if t_sec < 60.0:
+ return '{:,.2f} [{:}]'.format(t_sec, 'sec')
+
+ # minutes
+ t_sec /= 60.0
+ if t_sec < 60.0:
+ return '{:,.2f} [{:}]'.format(t_sec, 'minutes')
+
+ # hours
+ t_sec /= 60.0
+ if t_sec < 24.0:
+ return '{:,.2f} [{:}]'.format(t_sec, 'hours')
+
+ # days
+ t_sec /= 24.0
+ return '{:,.2f} [{:}]'.format(t_sec, 'days')
+
+
+def format_percentage (size):
+ return "%0.2f %%" % (size)
+
+def bold(text):
+ return text_attribute(text, 'bold')
+
+
+def cyan(text):
+ return text_attribute(text, 'cyan')
+
+
+def blue(text):
+ return text_attribute(text, 'blue')
+
+
+def red(text):
+ return text_attribute(text, 'red')
+
+
+def magenta(text):
+ return text_attribute(text, 'magenta')
+
+
+def green(text):
+ return text_attribute(text, 'green')
+
+def yellow(text):
+ return text_attribute(text, 'yellow')
+
+def underline(text):
+ return text_attribute(text, 'underline')
+
+
+def text_attribute(text, attribute):
+ if isinstance(text, str):
+ return "{start}{txt}{stop}".format(start=TEXT_CODES[attribute]['start'],
+ txt=text,
+ stop=TEXT_CODES[attribute]['end'])
+ elif isinstance(text, unicode):
+ return u"{start}{txt}{stop}".format(start=TEXT_CODES[attribute]['start'],
+ txt=text,
+ stop=TEXT_CODES[attribute]['end'])
+ else:
+ raise Exception("not a string")
+
+
+FUNC_DICT = {'blue': blue,
+ 'bold': bold,
+ 'green': green,
+ 'yellow': yellow,
+ 'cyan': cyan,
+ 'magenta': magenta,
+ 'underline': underline,
+ 'red': red}
+
+
+def format_text(text, *args):
+ return_string = text
+ for i in args:
+ func = FUNC_DICT.get(i)
+ if func:
+ return_string = func(return_string)
+
+ return return_string
+
+
+def format_threshold (value, red_zone, green_zone):
+ if value >= red_zone[0] and value <= red_zone[1]:
+ return format_text("{0}".format(value), 'red')
+
+ if value >= green_zone[0] and value <= green_zone[1]:
+ return format_text("{0}".format(value), 'green')
+
+ return "{0}".format(value)
+
+# pretty print for JSON
+def pretty_json (json_str, use_colors = True):
+ pretty_str = json.dumps(json.loads(json_str), indent = 4, separators=(',', ': '), sort_keys = True)
+
+ if not use_colors:
+ return pretty_str
+
+ try:
+ # int numbers
+ pretty_str = re.sub(r'([ ]*:[ ]+)(\-?[1-9][0-9]*[^.])',r'\1{0}'.format(blue(r'\2')), pretty_str)
+ # float
+ pretty_str = re.sub(r'([ ]*:[ ]+)(\-?[1-9][0-9]*\.[0-9]+)',r'\1{0}'.format(magenta(r'\2')), pretty_str)
+ # # strings
+ #
+ pretty_str = re.sub(r'([ ]*:[ ]+)("[^"]*")',r'\1{0}'.format(red(r'\2')), pretty_str)
+ pretty_str = re.sub(r"('[^']*')", r'{0}\1{1}'.format(TEXT_CODES['magenta']['start'],
+ TEXT_CODES['red']['start']), pretty_str)
+ except :
+ pass
+
+ return pretty_str
+
+
+if __name__ == "__main__":
+ pass
diff --git a/scripts/automation/trex_control_plane/stf/trex_stf_lib/trex_client.py b/scripts/automation/trex_control_plane/stf/trex_stf_lib/trex_client.py
new file mode 100755
index 00000000..e9d2b8a0
--- /dev/null
+++ b/scripts/automation/trex_control_plane/stf/trex_stf_lib/trex_client.py
@@ -0,0 +1,1561 @@
+#!/router/bin/python
+
+# internal libs
+import sys
+import os
+import socket
+import errno
+import time
+import re
+import copy
+import binascii
+from distutils.util import strtobool
+from collections import deque, OrderedDict
+from json import JSONDecoder
+import traceback
+import signal
+
+try:
+ from . import outer_packages
+ from .trex_status_e import TRexStatus
+ from .trex_exceptions import *
+ from .trex_exceptions import exception_handler
+ from .general_utils import *
+except Exception as e: # is __main__
+ import outer_packages
+ from trex_status_e import TRexStatus
+ from trex_exceptions import *
+ from trex_exceptions import exception_handler
+ from general_utils import *
+
+# external libs
+import jsonrpclib
+from jsonrpclib import ProtocolError, AppError
+from enum import Enum
+
+
+
+class CTRexClient(object):
+ """
+ This class defines the client side of the RESTfull interaction with TRex
+ """
+
+ def __init__(self, trex_host, max_history_size = 100, filtered_latency_amount = 0.001, trex_daemon_port = 8090, master_daemon_port = 8091, trex_zmq_port = 4500, verbose = False, debug_image = False, trex_args = ''):
+ """
+ Instantiate a TRex client object, and connecting it to listening daemon-server
+
+ :parameters:
+ trex_host : str
+ a string of the TRex ip address or hostname.
+ max_history_size : int
+ a number to set the maximum history size of a single TRex run. Each sampling adds a new item to history.
+
+ default value : **100**
+
+ filtered_latency_amount : float
+ Ignore high latency for this ammount of packets. (by default take value of 99.9% measurements)
+
+ default value : **0.001**
+
+ trex_daemon_port : int
+ the port number on which the trex-daemon server can be reached
+
+ default value: **8090**
+ master_daemon_port : int
+ the port number on which the master-daemon server can be reached
+
+ default value: **8091**
+ trex_zmq_port : int
+ the port number on which trex's zmq module will interact with daemon server
+
+ default value: **4500**
+ verbose : bool
+ sets a verbose output on supported class method.
+
+ default value : **False**
+ trex_args : string
+ additional arguments passed to TRex. For example, "-w 3 --no-watchdog"
+
+ :raises:
+ socket errors, in case server could not be reached.
+
+ """
+ try:
+ self.trex_host = socket.gethostbyname(trex_host)
+ except: # give it another try
+ self.trex_host = socket.gethostbyname(trex_host)
+ self.trex_daemon_port = trex_daemon_port
+ self.master_daemon_port = master_daemon_port
+ self.trex_zmq_port = trex_zmq_port
+ self.seq = None
+ self._last_sample = time.time()
+ self.__default_user = get_current_user()
+ self.verbose = verbose
+ self.result_obj = CTRexResult(max_history_size, filtered_latency_amount)
+ self.decoder = JSONDecoder()
+ self.history = jsonrpclib.history.History()
+ self.master_daemon_path = "http://{hostname}:{port}/".format( hostname = self.trex_host, port = master_daemon_port )
+ self.master_daemon = jsonrpclib.Server(self.master_daemon_path, history = self.history)
+ self.trex_server_path = "http://{hostname}:{port}/".format( hostname = self.trex_host, port = trex_daemon_port )
+ self.server = jsonrpclib.Server(self.trex_server_path, history = self.history)
+ self.debug_image = debug_image
+ self.trex_args = trex_args
+ self.sample_to_run_finish = self.sample_until_finish # alias for legacy
+
+
+ def add (self, x, y):
+ try:
+ return self.server.add(x,y)
+ except AppError as err:
+ self._handle_AppError_exception(err.args[0])
+ except ProtocolError:
+ raise
+ finally:
+ self.prompt_verbose_data()
+
+ def start_trex (self, f, d, block_to_success = True, timeout = 40, user = None, trex_development = False, **trex_cmd_options):
+ """
+ Request to start a TRex run on server in stateful mode.
+
+ :parameters:
+ f : str
+ a path (on server) for the injected traffic data (.yaml file)
+ d : int
+ the desired duration of the test. must be at least 30 seconds long.
+ block_to_success : bool
+ determine if this method blocks until TRex changes state from 'Starting' to either 'Idle' or 'Running'
+
+ default value : **True**
+ timeout : int
+ maximum time (in seconds) to wait in blocking state until TRex changes state from 'Starting' to either 'Idle' or 'Running'
+
+ default value: **40**
+ user : str
+ the identity of the the run issuer.
+ trex_cmd_options : key, val
+ sets desired TRex options using key=val syntax, separated by comma.
+ for keys with no value, state key=True
+
+ :return:
+ **True** on success
+
+ :raises:
+ + :exc:`ValueError`, in case 'd' parameter inserted with wrong value.
+ + :exc:`trex_exceptions.TRexError`, in case one of the trex_cmd_options raised an exception at server.
+ + :exc:`trex_exceptions.TRexInUseError`, in case TRex is already taken.
+ + :exc:`trex_exceptions.TRexRequestDenied`, in case TRex is reserved for another user than the one trying start TRex.
+ + ProtocolError, in case of error in JSON-RPC protocol.
+
+ """
+ user = user or self.__default_user
+ try:
+ d = int(d)
+ if d < 30 and not trex_development: # test duration should be at least 30 seconds, unless trex_development flag is specified.
+ raise ValueError
+ except ValueError:
+ raise ValueError('d parameter must be integer, specifying how long TRex run, and must be larger than 30 secs.')
+
+ trex_cmd_options.update( {'f' : f, 'd' : d} )
+ if not trex_cmd_options.get('l'):
+ self.result_obj.latency_checked = False
+ if 'k' in trex_cmd_options:
+ timeout += int(trex_cmd_options['k']) # during 'k' seconds TRex stays in 'Starting' state
+
+ self.result_obj.clear_results()
+ try:
+ issue_time = time.time()
+ retval = self.server.start_trex(trex_cmd_options, user, block_to_success, timeout, False, self.debug_image, self.trex_args)
+ except AppError as err:
+ self._handle_AppError_exception(err.args[0])
+ except ProtocolError:
+ raise
+ finally:
+ self.prompt_verbose_data()
+
+ if retval!=0:
+ self.seq = retval # update seq num only on successful submission
+ return True
+ else: # TRex is has been started by another user
+ raise TRexInUseError('TRex is already being used by another user or process. Try again once TRex is back in IDLE state.')
+
+
+ def start_stateless(self, block_to_success = True, timeout = 40, user = None, **trex_cmd_options):
+ """
+ Request to start a TRex run on server in stateless mode.
+
+ :parameters:
+ block_to_success : bool
+ determine if this method blocks until TRex changes state from 'Starting' to either 'Idle' or 'Running'
+
+ default value : **True**
+ timeout : int
+ maximum time (in seconds) to wait in blocking state until TRex changes state from 'Starting' to either 'Idle' or 'Running'
+
+ default value: **40**
+ user : str
+ the identity of the the run issuer.
+ trex_cmd_options : key, val
+ sets desired TRex options using key=val syntax, separated by comma.
+ for keys with no value, state key=True
+
+ :return:
+ **True** on success
+
+ :raises:
+ + :exc:`trex_exceptions.TRexError`, in case one of the trex_cmd_options raised an exception at server.
+ + :exc:`trex_exceptions.TRexInUseError`, in case TRex is already taken.
+ + :exc:`trex_exceptions.TRexRequestDenied`, in case TRex is reserved for another user than the one trying start TRex.
+ + ProtocolError, in case of error in JSON-RPC protocol.
+
+ """
+ try:
+ user = user or self.__default_user
+ retval = self.server.start_trex(trex_cmd_options, user, block_to_success, timeout, True, self.debug_image, self.trex_args)
+ except AppError as err:
+ self._handle_AppError_exception(err.args[0])
+ except ProtocolError:
+ raise
+ finally:
+ self.prompt_verbose_data()
+
+ if retval!=0:
+ self.seq = retval # update seq num only on successful submission
+ return True
+ else: # TRex is has been started by another user
+ raise TRexInUseError('TRex is already being used by another user or process. Try again once TRex is back in IDLE state.')
+
+
+ def stop_trex (self):
+ """
+ Request to stop a TRex run on server.
+
+ The request is only valid if the stop initiator is the same client as the TRex run initiator.
+
+ :parameters:
+ None
+
+ :return:
+ + **True** on successful termination
+ + **False** if request issued but TRex wasn't running.
+
+ :raises:
+ + :exc:`trex_exceptions.TRexRequestDenied`, in case TRex ir running but started by another user.
+ + :exc:`trex_exceptions.TRexIncompleteRunError`, in case one of failed TRex run (unexpected termination).
+ + ProtocolError, in case of error in JSON-RPC protocol.
+
+ """
+ try:
+ return self.server.stop_trex(self.seq)
+ except AppError as err:
+ self._handle_AppError_exception(err.args[0])
+ except ProtocolError:
+ raise
+ finally:
+ self.prompt_verbose_data()
+
+ def force_kill (self, confirm = True):
+ """
+ Force killing of running TRex process (if exists) on the server.
+
+ .. tip:: This method is a safety method and **overrides any running or reserved resources**, and as such isn't designed to be used on a regular basis.
+ Always consider using :func:`trex_client.CTRexClient.stop_trex` instead.
+
+ In the end of this method, TRex will return to IDLE state with no reservation.
+
+ :parameters:
+ confirm : bool
+ Prompt a user confirmation before continue terminating TRex session
+
+ :return:
+ + **True** on successful termination
+ + **False** otherwise.
+
+ :raises:
+ + ProtocolError, in case of error in JSON-RPC protocol.
+
+ """
+ if confirm:
+ prompt = "WARNING: This will terminate active TRex session indiscriminately.\nAre you sure? "
+ sys.stdout.write('%s [y/n]\n' % prompt)
+ while True:
+ try:
+ if strtobool(user_input().lower()):
+ break
+ else:
+ return
+ except ValueError:
+ sys.stdout.write('Please respond with \'y\' or \'n\'.\n')
+ try:
+ return self.server.force_trex_kill()
+ except AppError as err:
+ # Silence any kind of application errors- by design
+ return False
+ except ProtocolError:
+ raise
+ finally:
+ self.prompt_verbose_data()
+
+ def kill_all_trexes(self, timeout = 15):
+ """
+ Kills running TRex processes (if exists) on the server, not only owned by current daemon.
+ Raises exception upon error killing.
+
+ :return:
+ + **True** if processes killed/not running
+ + **False** otherwise.
+
+ """
+ try:
+ poll_rate = 0.1
+ # try Ctrl+C, usual kill, -9
+ for signal_name in [signal.SIGINT, signal.SIGTERM, signal.SIGKILL]:
+ self.server.kill_all_trexes(signal_name)
+ for i in range(int(timeout / poll_rate)):
+ if not self.get_trex_cmds():
+ return True
+ time.sleep(poll_rate)
+ if self.get_trex_cmds():
+ return False
+ return True
+ except AppError as err:
+ self._handle_AppError_exception(err.args[0])
+ finally:
+ self.prompt_verbose_data()
+
+
+ def get_trex_cmds(self):
+ """
+ Gets list of running TRex pids and command lines.
+ Can be used to verify if any TRex is running.
+
+ :return:
+ List of tuples (pid, command) of running TRexes
+ """
+ try:
+ return self.server.get_trex_cmds()
+ except AppError as err:
+ self._handle_AppError_exception(err.args[0])
+ finally:
+ self.prompt_verbose_data()
+
+
+ def get_trex_path(self):
+ '''
+ Returns TRex path on server
+ '''
+ try:
+ return str(self.master_daemon.get_trex_path())
+ except AppError as err:
+ self._handle_AppError_exception(err.args[0])
+ finally:
+ self.prompt_verbose_data()
+
+
+ def wait_until_kickoff_finish(self, timeout = 40):
+ """
+ Block the client application until TRex changes state from 'Starting' to either 'Idle' or 'Running'
+
+ The request is only valid if the stop initiator is the same client as the TRex run initiator.
+
+ :parameters:
+ timeout : int
+ maximum time (in seconds) to wait in blocking state until TRex changes state from 'Starting' to either 'Idle' or 'Running'
+
+ :return:
+ + **True** on successful termination
+ + **False** if request issued but TRex wasn't running.
+
+ :raises:
+ + :exc:`trex_exceptions.TRexIncompleteRunError`, in case one of failed TRex run (unexpected termination).
+ + ProtocolError, in case of error in JSON-RPC protocol.
+
+ .. note:: Exceptions are throws only when start_trex did not block in the first place, i.e. `block_to_success` parameter was set to `False`
+
+ """
+
+ try:
+ return self.server.wait_until_kickoff_finish(timeout)
+ except AppError as err:
+ self._handle_AppError_exception(err.args[0])
+ except ProtocolError:
+ raise
+ finally:
+ self.prompt_verbose_data()
+
+ def is_running (self, dump_out = False):
+ """
+ Poll for TRex running status.
+
+ If TRex is running, a history item will be added into result_obj and processed.
+
+ .. tip:: This method is especially useful for iterating until TRex run is finished.
+
+ :parameters:
+ dump_out : dict
+ if passed, the pointer object is cleared and the latest dump stored in it.
+
+ :return:
+ + **True** if TRex is running.
+ + **False** if TRex is not running.
+
+ :raises:
+ + :exc:`trex_exceptions.TRexIncompleteRunError`, in case one of failed TRex run (unexpected termination).
+ + :exc:`TypeError`, in case JSON stream decoding error.
+ + ProtocolError, in case of error in JSON-RPC protocol.
+
+ """
+ try:
+ res = self.get_running_info()
+ if res == {}:
+ return False
+ if (dump_out != False) and (isinstance(dump_out, dict)): # save received dump to given 'dump_out' pointer
+ dump_out.clear()
+ dump_out.update(res)
+ return True
+ except TRexWarning as err:
+ if err.code == -12: # TRex is either still at 'Starting' state or in Idle state, however NO error occured
+ return False
+ except TRexException:
+ raise
+ except ProtocolError as err:
+ raise
+ #is printed by self.get_running_info()
+ #finally:
+ # self.prompt_verbose_data()
+
+ def is_idle (self):
+ """
+ Poll for TRex running status, check if TRex is in Idle state.
+
+ :parameters:
+ None
+
+ :return:
+ + **True** if TRex is idle.
+ + **False** if TRex is starting or running.
+
+ :raises:
+ + :exc:`trex_exceptions.TRexIncompleteRunError`, in case one of failed TRex run (unexpected termination).
+ + :exc:`TypeError`, in case JSON stream decoding error.
+ + ProtocolError, in case of error in JSON-RPC protocol.
+
+ """
+ try:
+ if self.get_running_status()['state'] == TRexStatus.Idle:
+ return True
+ return False
+ except TRexException:
+ raise
+ except ProtocolError as err:
+ raise
+ finally:
+ self.prompt_verbose_data()
+
+ def get_trex_files_path (self):
+ """
+ Fetches the local path in which files are stored when pushed to TRex server from client.
+
+ :parameters:
+ None
+
+ :return:
+ string representation of the desired path
+
+ .. note:: The returned path represents a path on the TRex server **local machine**
+
+ :raises:
+ ProtocolError, in case of error in JSON-RPC protocol.
+
+ """
+ try:
+ return (self.server.get_files_path() + '/')
+ except AppError as err:
+ self._handle_AppError_exception(err.args[0])
+ except ProtocolError:
+ raise
+ finally:
+ self.prompt_verbose_data()
+
+ def get_running_status (self):
+ """
+ Fetches the current TRex status.
+
+ If available, a verbose data will accompany the state itself.
+
+ :parameters:
+ None
+
+ :return:
+ dictionary with 'state' and 'verbose' keys.
+
+ :raises:
+ ProtocolError, in case of error in JSON-RPC protocol.
+
+ """
+ try:
+ res = self.server.get_running_status()
+ res['state'] = TRexStatus(res['state'])
+ return res
+ except AppError as err:
+ self._handle_AppError_exception(err.args[0])
+ except ProtocolError:
+ raise
+ finally:
+ self.prompt_verbose_data()
+
+ def get_running_info (self):
+ """
+ Performs single poll of TRex running data and process it into the result object (named `result_obj`).
+
+ .. tip:: This method will throw an exception if TRex isn't running. Always consider using :func:`trex_client.CTRexClient.is_running` which handles a single poll operation in safer manner.
+
+ :parameters:
+ None
+
+ :return:
+ dictionary containing the most updated data dump from TRex.
+
+ :raises:
+ + :exc:`trex_exceptions.TRexIncompleteRunError`, in case one of failed TRex run (unexpected termination).
+ + :exc:`TypeError`, in case JSON stream decoding error.
+ + ProtocolError, in case of error in JSON-RPC protocol.
+
+ """
+ if not self.is_query_relevance():
+ # if requested in timeframe smaller than the original sample rate, return the last known data without interacting with server
+ return self.result_obj.get_latest_dump()
+ else:
+ try:
+ latest_dump = self.decoder.decode( self.server.get_running_info() ) # latest dump is not a dict, but json string. decode it.
+ self.result_obj.update_result_data(latest_dump)
+ return latest_dump
+ except TypeError as inst:
+ raise TypeError('JSON-RPC data decoding failed. Check out incoming JSON stream.')
+ except AppError as err:
+ self._handle_AppError_exception(err.args[0])
+ except ProtocolError:
+ raise
+ finally:
+ self.prompt_verbose_data()
+
+ def sample_until_condition (self, condition_func, time_between_samples = 1):
+ """
+ Automatically sets ongoing sampling of TRex data, with sampling rate described by time_between_samples.
+
+ On each fetched dump, the condition_func is applied on the result objects, and if returns True, the sampling will stop.
+
+ :parameters:
+ condition_func : function
+ function that operates on result_obj and checks if a condition has been met
+
+ .. note:: `condition_finc` is applied on `CTRexResult` object. Make sure to design a relevant method.
+ time_between_samples : int
+ determines the time between each sample of the server
+
+ default value : **1**
+
+ :return:
+ the first result object (see :class:`CTRexResult` for further details) of the TRex run on which the condition has been met.
+
+ :raises:
+ + :exc:`UserWarning`, in case the condition_func method condition hasn't been met
+ + :exc:`trex_exceptions.TRexIncompleteRunError`, in case one of failed TRex run (unexpected termination).
+ + :exc:`TypeError`, in case JSON stream decoding error.
+ + ProtocolError, in case of error in JSON-RPC protocol.
+ + :exc:`Exception`, in case the condition_func suffered from any kind of exception
+
+ """
+ # make sure TRex is running. raise exceptions here if any
+ self.wait_until_kickoff_finish()
+ try:
+ while self.is_running():
+ results = self.get_result_obj()
+ if condition_func(results):
+ # if condition satisfied, stop TRex and return result object
+ self.stop_trex()
+ return results
+ time.sleep(time_between_samples)
+ except TRexWarning:
+ # means we're back to Idle state, and didn't meet our condition
+ raise UserWarning("TRex results condition wasn't met during TRex run.")
+ except Exception:
+ # this could come from provided method 'condition_func'
+ raise
+
+ def sample_until_finish (self, time_between_samples = 1):
+ """
+ Automatically samples TRex data with sampling rate described by time_between_samples until TRex run finishes.
+
+ :parameters:
+ time_between_samples : int
+ determines the time between each sample of the server
+
+ default value : **1**
+
+ :return:
+ the latest result object (see :class:`CTRexResult` for further details) with sampled data.
+
+ :raises:
+ + :exc:`UserWarning`, in case the condition_func method condition hasn't been met
+ + :exc:`trex_exceptions.TRexIncompleteRunError`, in case one of failed TRex run (unexpected termination).
+ + :exc:`TypeError`, in case JSON stream decoding error.
+ + ProtocolError, in case of error in JSON-RPC protocol.
+
+ """
+ self.wait_until_kickoff_finish()
+
+ try:
+ while self.is_running():
+ time.sleep(time_between_samples)
+ except TRexWarning:
+ pass
+ results = self.get_result_obj()
+ return results
+
+ def sample_x_seconds (self, sample_time, time_between_samples = 1):
+ """
+ Automatically sets ongoing sampling of TRex data for sample_time seconds, with sampling rate described by time_between_samples.
+ Does not stop the TRex afterwards!
+
+ .. tip:: Useful for changing the device (Router, ASA etc.) configuration after given time.
+
+ :parameters:
+ sample_time : int
+ sample the TRex this number of seconds
+
+ time_between_samples : int
+ determines the time between each sample of the server
+
+ default value : **1**
+
+ :return:
+ the first result object (see :class:`CTRexResult` for further details) of the TRex run after given sample_time.
+
+ :raises:
+ + :exc:`UserWarning`, in case the TRex run ended before sample_time duration
+ + :exc:`trex_exceptions.TRexIncompleteRunError`, in case one of failed TRex run (unexpected termination).
+ + :exc:`TypeError`, in case JSON stream decoding error.
+ + ProtocolError, in case of error in JSON-RPC protocol.
+
+ """
+ # make sure TRex is running. raise exceptions here if any
+ self.wait_until_kickoff_finish()
+ end_time = time.time() + sample_time
+ while self.is_running():
+ if time.time() < end_time:
+ time.sleep(time_between_samples)
+ else:
+ return self.get_result_obj()
+ raise UserWarning("TRex has stopped at %s seconds (before expected %s seconds)\nTry increasing test duration or decreasing sample_time" % (elapsed_time, sample_time))
+
+ def get_result_obj (self, copy_obj = True):
+ """
+ Returns the result object of the trex_client's instance.
+
+ By default, returns a **copy** of the objects (so that changes to the original object are masked).
+
+ :parameters:
+ copy_obj : bool
+ False means that a reference to the original (possibly changing) object are passed
+
+ defaul value : **True**
+
+ :return:
+ the latest result object (see :class:`CTRexResult` for further details) with sampled data.
+
+ """
+ if copy_obj:
+ return copy.deepcopy(self.result_obj)
+ else:
+ return self.result_obj
+
+ def is_reserved (self):
+ """
+ Checks if TRex is currently reserved to any user or not.
+
+ :parameters:
+ None
+
+ :return:
+ + **True** if TRex is reserved.
+ + **False** otherwise.
+
+ :raises:
+ ProtocolError, in case of error in JSON-RPC protocol.
+
+ """
+ try:
+ return self.server.is_reserved()
+ except AppError as err:
+ self._handle_AppError_exception(err.args[0])
+ except ProtocolError:
+ raise
+ finally:
+ self.prompt_verbose_data()
+
+ def get_trex_daemon_log (self):
+ """
+ Get Trex daemon log.
+
+ :return:
+ String representation of TRex daemon log
+
+ :raises:
+ + :exc:`trex_exceptions.TRexRequestDenied`, in case file could not be read.
+ + ProtocolError, in case of error in JSON-RPC protocol.
+
+ """
+ try:
+ res = binascii.a2b_base64(self.server.get_trex_daemon_log())
+ if type(res) is bytes:
+ return res.decode()
+ return res
+ except AppError as err:
+ self._handle_AppError_exception(err.args[0])
+ except ProtocolError:
+ raise
+ finally:
+ self.prompt_verbose_data()
+
+ def get_trex_log (self):
+ """
+ Get TRex CLI output log
+
+ :return:
+ String representation of TRex log
+
+ :raises:
+ + :exc:`trex_exceptions.TRexRequestDenied`, in case file could not be fetched at server side.
+ + ProtocolError, in case of error in JSON-RPC protocol.
+
+ """
+ try:
+ res = binascii.a2b_base64(self.server.get_trex_log())
+ if type(res) is bytes:
+ return res.decode()
+ return res
+ except AppError as err:
+ self._handle_AppError_exception(err.args[0])
+ except ProtocolError:
+ raise
+ finally:
+ self.prompt_verbose_data()
+
+ def get_trex_version (self):
+ """
+ Get TRex version details.
+
+ :return:
+ Trex details (Version, User, Date, Uuid, Git SHA) as ordered dictionary
+
+ :raises:
+ + :exc:`trex_exceptions.TRexRequestDenied`, in case TRex version could not be determined.
+ + ProtocolError, in case of error in JSON-RPC protocol.
+ + General Exception is case one of the keys is missing in response
+ """
+
+ try:
+ version_dict = OrderedDict()
+ res = binascii.a2b_base64(self.server.get_trex_version())
+ if type(res) is bytes:
+ res = res.decode()
+ result_lines = res.split('\n')
+ for line in result_lines:
+ if not line:
+ continue
+ key, value = line.strip().split(':', 1)
+ version_dict[key.strip()] = value.strip()
+ for key in ('Version', 'User', 'Date', 'Uuid', 'Git SHA'):
+ if key not in version_dict:
+ raise Exception('get_trex_version: got server response without key: {0}'.format(key))
+ return version_dict
+ except AppError as err:
+ self._handle_AppError_exception(err.args[0])
+ except ProtocolError:
+ raise
+ finally:
+ self.prompt_verbose_data()
+
+ def reserve_trex (self, user = None):
+ """
+ Reserves the usage of TRex to a certain user.
+
+ When TRex is reserved, it can't be reserved.
+
+ :parameters:
+ user : str
+ a username of the desired owner of TRex
+
+ default: current logged user
+
+ :return:
+ **True** if reservation made successfully
+
+ :raises:
+ + :exc:`trex_exceptions.TRexRequestDenied`, in case TRex is reserved for another user than the one trying to make the reservation.
+ + :exc:`trex_exceptions.TRexInUseError`, in case TRex is currently running.
+ + ProtocolError, in case of error in JSON-RPC protocol.
+
+ """
+ username = user or self.__default_user
+ try:
+ return self.server.reserve_trex(user = username)
+ except AppError as err:
+ self._handle_AppError_exception(err.args[0])
+ except ProtocolError:
+ raise
+ finally:
+ self.prompt_verbose_data()
+
+ def cancel_reservation (self, user = None):
+ """
+ Cancels a current reservation of TRex to a certain user.
+
+ When TRex is reserved, no other user can start new TRex runs.
+
+
+ :parameters:
+ user : str
+ a username of the desired owner of TRex
+
+ default: current logged user
+
+ :return:
+ + **True** if reservation canceled successfully,
+ + **False** if there was no reservation at all.
+
+ :raises:
+ + :exc:`trex_exceptions.TRexRequestDenied`, in case TRex is reserved for another user than the one trying to cancel the reservation.
+ + ProtocolError, in case of error in JSON-RPC protocol.
+
+ """
+
+ username = user or self.__default_user
+ try:
+ return self.server.cancel_reservation(user = username)
+ except AppError as err:
+ self._handle_AppError_exception(err.args[0])
+ except ProtocolError:
+ raise
+ finally:
+ self.prompt_verbose_data()
+
+ def get_files_list (self, path):
+ """
+ Gets a list of dirs and files either from /tmp/trex_files or path relative to TRex server.
+
+ :parameters:
+ path : str
+ a path to directory to read.
+
+ :return:
+ Tuple: list of dirs and list of files in given path
+
+ :raises:
+ + :exc:`trex_exceptions.TRexRequestDenied`, in case TRex is reserved for another user than the one trying to cancel the reservation.
+ + ProtocolError, in case of error in JSON-RPC protocol.
+
+ """
+
+ try:
+ return self.server.get_files_list(path)
+ except AppError as err:
+ self._handle_AppError_exception(err.args[0])
+ except ProtocolError:
+ raise
+ finally:
+ self.prompt_verbose_data()
+
+ def get_file(self, filepath):
+ """
+ Gets content of file as bytes string from /tmp/trex_files or TRex server directory.
+
+ :parameters:
+ filepath : str
+ a path to a file at server.
+ it can be either relative to TRex server or absolute path starting with /tmp/trex_files
+
+ :return:
+ Content of the file
+
+ :raises:
+ + :exc:`trex_exceptions.TRexRequestDenied`, in case TRex is reserved for another user than the one trying to cancel the reservation.
+ + ProtocolError, in case of error in JSON-RPC protocol.
+ """
+
+ try:
+ return binascii.a2b_base64(self.server.get_file(filepath))
+ except AppError as err:
+ self._handle_AppError_exception(err.args[0])
+ except ProtocolError:
+ raise
+ finally:
+ self.prompt_verbose_data()
+
+ def push_files (self, filepaths):
+ """
+ Pushes a file (or a list of files) to store locally on server.
+
+ :parameters:
+ filepaths : str or list
+ a path to a file to be pushed to server.
+ if a list of paths is passed, all of those will be pushed to server
+
+ :return:
+ + **True** if file(s) copied successfully.
+ + **False** otherwise.
+
+ :raises:
+ + :exc:`IOError`, in case specified file wasn't found or could not be accessed.
+ + ProtocolError, in case of error in JSON-RPC protocol.
+
+ """
+ paths_list = None
+ if isinstance(filepaths, str):
+ paths_list = [filepaths]
+ elif isinstance(filepaths, list):
+ paths_list = filepaths
+ else:
+ raise TypeError("filepaths argument must be of type str or list")
+
+ for filepath in paths_list:
+ try:
+ if not os.path.exists(filepath):
+ raise IOError(errno.ENOENT, "The requested `{fname}` file wasn't found. Operation aborted.".format(
+ fname = filepath) )
+ else:
+ filename = os.path.basename(filepath)
+ with open(filepath, 'rb') as f:
+ file_content = f.read()
+ self.server.push_file(filename, binascii.b2a_base64(file_content).decode())
+ finally:
+ self.prompt_verbose_data()
+ return True
+
+ def is_query_relevance(self):
+ """
+ Checks if time between any two consecutive server queries (asking for live running data) passed.
+
+ .. note:: The allowed minimum time between each two consecutive samples is 0.5 seconds.
+
+ :parameters:
+ None
+
+ :return:
+ + **True** if more than 0.5 seconds has been past from last server query.
+ + **False** otherwise.
+
+ """
+ cur_time = time.time()
+ if cur_time-self._last_sample < 0.5:
+ return False
+ else:
+ self._last_sample = cur_time
+ return True
+
+ def call_server_mathod_safely (self, method_to_call):
+ try:
+ return method_to_call()
+ except socket.error as e:
+ if e.errno == errno.ECONNREFUSED:
+ raise SocketError(errno.ECONNREFUSED, "Connection to TRex daemon server was refused. Please make sure the server is up.")
+
+ def check_server_connectivity (self):
+ """
+ Checks TRex daemon server for connectivity.
+ """
+ try:
+ socket.gethostbyname(self.trex_host)
+ return self.server.connectivity_check()
+ except socket.gaierror as e:
+ raise socket.gaierror(e.errno, "Could not resolve server hostname. Please make sure hostname entered correctly.")
+ except socket.error as e:
+ if e.errno == errno.ECONNREFUSED:
+ raise socket.error(errno.ECONNREFUSED, "Connection to TRex daemon server was refused. Please make sure the server is up.")
+ finally:
+ self.prompt_verbose_data()
+
+
+ def master_add(self, x, y):
+ ''' Sanity check for Master daemon '''
+ try:
+ return self.master_daemon.add(x,y)
+ except AppError as err:
+ self._handle_AppError_exception(err.args[0])
+ finally:
+ self.prompt_verbose_data()
+
+
+ def check_master_connectivity (self):
+ '''
+ Check Master daemon for connectivity.
+ Return True upon success
+ '''
+ try:
+ socket.gethostbyname(self.trex_host)
+ return self.master_daemon.check_connectivity()
+ except socket.gaierror as e:
+ raise socket.gaierror(e.errno, "Could not resolve server hostname. Please make sure hostname entered correctly.")
+ except socket.error as e:
+ if e.errno == errno.ECONNREFUSED:
+ raise socket.error(errno.ECONNREFUSED, "Connection to Master daemon was refused. Please make sure the server is up.")
+ finally:
+ self.prompt_verbose_data()
+
+ def is_trex_daemon_running(self):
+ '''
+ Check if TRex server daemon is running.
+ Returns True/False
+ '''
+ try:
+ return self.master_daemon.is_trex_daemon_running()
+ except AppError as err:
+ self._handle_AppError_exception(err.args[0])
+ finally:
+ self.prompt_verbose_data()
+
+ def restart_trex_daemon(self):
+ '''
+ Restart TRex server daemon. Useful after update.
+ Will not fail if daemon is initially stopped.
+ '''
+ try:
+ return self.master_daemon.restart_trex_daemon()
+ except AppError as err:
+ self._handle_AppError_exception(err.args[0])
+ finally:
+ self.prompt_verbose_data()
+
+ def start_trex_daemon(self):
+ '''
+ Start TRex server daemon.
+
+ :return:
+ + **True** if success.
+ + **False** if TRex server daemon already running.
+ '''
+ try:
+ return self.master_daemon.start_trex_daemon()
+ except AppError as err:
+ self._handle_AppError_exception(err.args[0])
+ finally:
+ self.prompt_verbose_data()
+
+ def stop_trex_daemon(self):
+ '''
+ Stop TRex server daemon.
+
+ :return:
+ + **True** if success.
+ + **False** if TRex server daemon already running.
+ '''
+ try:
+ return self.master_daemon.stop_trex_daemon()
+ except AppError as err:
+ self._handle_AppError_exception(err.args[0])
+ finally:
+ self.prompt_verbose_data()
+
+ def prompt_verbose_data(self):
+ """
+ This method prompts any verbose data available, only if `verbose` option has been turned on.
+ """
+ if self.verbose:
+ print ('\n')
+ print ("(*) JSON-RPC request:", self.history.request)
+ print ("(*) JSON-RPC response:", self.history.response)
+
+ def __verbose_print(self, print_str):
+ """
+ This private method prints the `print_str` string only in case self.verbose flag is turned on.
+
+ :parameters:
+ print_str : str
+ a string to be printed
+
+ :returns:
+ None
+ """
+ if self.verbose:
+ print (print_str)
+
+
+
+ def _handle_AppError_exception(self, err):
+ """
+ This private method triggres the TRex dedicated exception generation in case a general ProtocolError has been raised.
+ """
+ # handle known exceptions based on known error codes.
+ # if error code is not known, raise ProtocolError
+ exc = exception_handler.gen_exception(err)
+ exc.__cause__ = None # remove "During handling of the above exception, another exception occurred:" in Python3.3+
+ raise exc
+
+
+class CTRexResult(object):
+ """
+ A class containing all results received from TRex.
+
+ Ontop to containing the results, this class offers easier data access and extended results processing options
+ """
+ def __init__(self, max_history_size, filtered_latency_amount = 0.001):
+ """
+ Instatiate a TRex result object
+
+ :parameters:
+ max_history_size : int
+ A number to set the maximum history size of a single TRex run. Each sampling adds a new item to history.
+ filtered_latency_amount : float
+ Ignore high latency for this ammount of packets. (by default take into account 99.9%)
+
+ """
+ self._history = deque(maxlen = max_history_size)
+ self.clear_results()
+ self.latency_checked = True
+ self.filtered_latency_amount = filtered_latency_amount
+
+ def __repr__(self):
+ return ("Is valid history? {arg}\n".format( arg = self.is_valid_hist() ) +
+ "Done warmup? {arg}\n".format( arg = self.is_done_warmup() ) +
+ "Expected tx rate: {arg}\n".format( arg = self.get_expected_tx_rate() ) +
+ "Current tx rate: {arg}\n".format( arg = self.get_current_tx_rate() ) +
+ "Maximum latency: {arg}\n".format( arg = self.get_max_latency() ) +
+ "Average latency: {arg}\n".format( arg = self.get_avg_latency() ) +
+ "Average window latency: {arg}\n".format( arg = self.get_avg_window_latency() ) +
+ "Total drops: {arg}\n".format( arg = self.get_total_drops() ) +
+ "Drop rate: {arg}\n".format( arg = self.get_drop_rate() ) +
+ "History size so far: {arg}\n".format( arg = len(self._history) ) )
+
+ def get_expected_tx_rate (self):
+ """
+ Fetches the expected TX rate in various units representation
+
+ :parameters:
+ None
+
+ :return:
+ dictionary containing the expected TX rate, where the key is the measurement units, and the value is the measurement value.
+
+ """
+ return self._expected_tx_rate
+
+ def get_current_tx_rate (self):
+ """
+ Fetches the current TX rate in various units representation
+
+ :parameters:
+ None
+
+ :return:
+ dictionary containing the current TX rate, where the key is the measurement units, and the value is the measurement value.
+
+ """
+ return self._current_tx_rate
+
+ def get_max_latency (self):
+ """
+ Fetches the maximum latency measured on each of the interfaces
+
+ :parameters:
+ None
+
+ :return:
+ dictionary containing the maximum latency, where the key is the measurement interface (`c` indicates client), and the value is the measurement value.
+
+ """
+ return self._max_latency
+
+ def get_avg_latency (self):
+ """
+ Fetches the average latency measured on each of the interfaces from the start of TRex run
+
+ :parameters:
+ None
+
+ :return:
+ dictionary containing the average latency, where the key is the measurement interface (`c` indicates client), and the value is the measurement value.
+
+ The `all` key represents the average of all interfaces' average
+
+ """
+ return self._avg_latency
+
+ def get_avg_window_latency (self):
+ """
+ Fetches the average latency measured on each of the interfaces from all the sampled currently stored in window.
+
+ :parameters:
+ None
+
+ :return:
+ dictionary containing the average latency, where the key is the measurement interface (`c` indicates client), and the value is the measurement value.
+
+ The `all` key represents the average of all interfaces' average
+
+ """
+ return self._avg_window_latency
+
+ def get_total_drops (self):
+ """
+ Fetches the total number of drops identified from the moment TRex run began.
+
+ :parameters:
+ None
+
+ :return:
+ total drops count (as int)
+
+ """
+ return self._total_drops
+
+ def get_drop_rate (self):
+ """
+ Fetches the most recent drop rate in pkts/sec units.
+
+ :parameters:
+ None
+
+ :return:
+ current drop rate (as float)
+
+ """
+ return self._drop_rate
+
+ def is_valid_hist (self):
+ """
+ Checks if result obejct contains valid data.
+
+ :parameters:
+ None
+
+ :return:
+ + **True** if history is valid.
+ + **False** otherwise.
+
+ """
+ return self.valid
+
+ def set_valid_hist (self, valid_stat = True):
+ """
+ Sets result obejct validity status.
+
+ :parameters:
+ valid_stat : bool
+ defines the validity status
+
+ dafault value : **True**
+
+ :return:
+ None
+
+ """
+ self.valid = valid_stat
+
+ def is_done_warmup (self):
+ """
+ Checks if TRex latest results TX-rate indicates that TRex has reached its expected TX-rate.
+
+ :parameters:
+ None
+
+ :return:
+ + **True** if expected TX-rate has been reached.
+ + **False** otherwise.
+
+ """
+ return self._done_warmup
+
+ def get_last_value (self, tree_path_to_key, regex = None):
+ """
+ A dynamic getter from the latest sampled data item stored in the result object.
+
+ :parameters:
+ tree_path_to_key : str
+ defines a path to desired data.
+
+ .. tip:: | Use '.' to enter one level deeper in dictionary hierarchy.
+ | Use '[i]' to access the i'th indexed object of an array.
+
+ regex : regex
+ apply a regex to filter results out from a multiple results set.
+
+ Filter applies only on keys of dictionary type.
+
+ dafault value : **None**
+
+ :return:
+ + a list of values relevant to the specified path
+ + None if no results were fetched or the history isn't valid.
+
+ """
+ if not self.is_valid_hist():
+ return None
+ else:
+ return CTRexResult.__get_value_by_path(self._history[-1], tree_path_to_key, regex)
+
+ def get_value_list (self, tree_path_to_key, regex = None, filter_none = True):
+ """
+ A dynamic getter from all sampled data items stored in the result object.
+
+ :parameters:
+ tree_path_to_key : str
+ defines a path to desired data.
+
+ .. tip:: | Use '.' to enter one level deeper in dictionary hierarchy.
+ | Use '[i]' to access the i'th indexed object of an array.
+
+ regex : regex
+ apply a regex to filter results out from a multiple results set.
+
+ Filter applies only on keys of dictionary type.
+
+ dafault value : **None**
+
+ filter_none : bool
+ specify if None results should be filtered out or not.
+
+ dafault value : **True**
+
+ :return:
+ + a list of values relevant to the specified path. Each item on the list refers to a single server sample.
+ + None if no results were fetched or the history isn't valid.
+ """
+
+ if not self.is_valid_hist():
+ return None
+ else:
+ raw_list = list( map(lambda x: CTRexResult.__get_value_by_path(x, tree_path_to_key, regex), self._history) )
+ if filter_none:
+ return list (filter(lambda x: x!=None, raw_list) )
+ else:
+ return raw_list
+
+ def get_latest_dump(self):
+ """
+ A getter to the latest sampled data item stored in the result object.
+
+ :parameters:
+ None
+
+ :return:
+ + a dictionary of the latest data item
+ + an empty dictionary if history is empty.
+
+ """
+ if len(self._history):
+ return self._history[-1]
+ return {}
+
+ def get_ports_count(self):
+ """
+ Returns number of ports based on TRex result
+
+ :return:
+ + number of ports in TRex result
+ + -1 if history is empty.
+ """
+
+ if not len(self._history):
+ return -1
+ return len(self.get_last_value('trex-global.data', 'opackets-\d+'))
+
+
+ def update_result_data (self, latest_dump):
+ """
+ Integrates a `latest_dump` dictionary into the CTRexResult object.
+
+ :parameters:
+ latest_dump : dict
+ a dictionary with the items desired to be integrated into the object history and stats
+
+ :return:
+ None
+
+ """
+ # add latest dump to history
+ if latest_dump != {}:
+ self._history.append(latest_dump)
+ if not self.valid:
+ self.valid = True
+
+ # parse important fields and calculate averages and others
+ if self._expected_tx_rate is None:
+ # get the expected data only once since it doesn't change
+ self._expected_tx_rate = CTRexResult.__get_value_by_path(latest_dump, "trex-global.data", "m_tx_expected_\w+")
+
+ self._current_tx_rate = CTRexResult.__get_value_by_path(latest_dump, "trex-global.data", "m_tx_(?!expected_)\w+")
+ if not self._done_warmup and self._expected_tx_rate is not None:
+ # check for up to 2% change between expected and actual
+ if (self._current_tx_rate['m_tx_bps'] > 0.98 * self._expected_tx_rate['m_tx_expected_bps']):
+ self._done_warmup = True
+ latest_dump['warmup_barrier'] = True
+
+ # handle latency data
+ if self.latency_checked:
+ # fix typos, by "pointer"
+ if 'trex-latecny-v2' in latest_dump and 'trex-latency-v2' not in latest_dump:
+ latest_dump['trex-latency-v2'] = latest_dump['trex-latecny-v2']
+ if 'trex-latecny' in latest_dump and 'trex-latency' not in latest_dump:
+ latest_dump['trex-latency'] = latest_dump['trex-latecny']
+
+ latency_per_port = self.get_last_value("trex-latency-v2.data", "port-")
+ self._max_latency = self.__get_filtered_max_latency(latency_per_port, self.filtered_latency_amount)
+ avg_latency = self.get_last_value("trex-latency.data", "avg-")
+ self._avg_latency = CTRexResult.__avg_all_and_rename_keys(avg_latency)
+ avg_win_latency_list = self.get_value_list("trex-latency.data", "avg-")
+ self._avg_window_latency = CTRexResult.__calc_latency_win_stats(avg_win_latency_list)
+
+ tx_pkts = CTRexResult.__get_value_by_path(latest_dump, "trex-global.data.m_total_tx_pkts")
+ rx_pkts = CTRexResult.__get_value_by_path(latest_dump, "trex-global.data.m_total_rx_pkts")
+ if tx_pkts is not None and rx_pkts is not None:
+ self._total_drops = tx_pkts - rx_pkts
+ self._drop_rate = CTRexResult.__get_value_by_path(latest_dump, "trex-global.data.m_rx_drop_bps")
+
+ def clear_results (self):
+ """
+ Clears all results and sets the history's validity to `False`
+
+ :parameters:
+ None
+
+ :return:
+ None
+
+ """
+ self.valid = False
+ self._done_warmup = False
+ self._expected_tx_rate = None
+ self._current_tx_rate = None
+ self._max_latency = None
+ self._avg_latency = None
+ self._avg_window_latency = None
+ self._total_drops = None
+ self._drop_rate = None
+ self._history.clear()
+
+ @staticmethod
+ def __get_value_by_path (dct, tree_path, regex = None):
+ try:
+ for i, p in re.findall(r'(\d+)|([\w|-]+)', tree_path):
+ dct = dct[p or int(i)]
+ if regex is not None and isinstance(dct, dict):
+ res = {}
+ for key,val in dct.items():
+ match = re.match(regex, key)
+ if match:
+ res[key]=val
+ return res
+ else:
+ return dct
+ except (KeyError, TypeError):
+ return None
+
+ @staticmethod
+ def __calc_latency_win_stats (latency_win_list):
+ res = {'all' : None }
+ port_dict = {'all' : []}
+ list( map(lambda x: CTRexResult.__update_port_dict(x, port_dict), latency_win_list) )
+
+ # finally, calculate everages for each list
+ res['all'] = float("%.3f" % (sum(port_dict['all'])/float(len(port_dict['all']))) )
+ port_dict.pop('all')
+ for port, avg_list in port_dict.items():
+ res[port] = float("%.3f" % (sum(avg_list)/float(len(avg_list))) )
+
+ return res
+
+ @staticmethod
+ def __update_port_dict (src_avg_dict, dest_port_dict):
+ all_list = src_avg_dict.values()
+ dest_port_dict['all'].extend(all_list)
+ for key, val in src_avg_dict.items():
+ reg_res = re.match("avg-(\d+)", key)
+ if reg_res:
+ tmp_key = "port"+reg_res.group(1)
+ if tmp_key in dest_port_dict:
+ dest_port_dict[tmp_key].append(val)
+ else:
+ dest_port_dict[tmp_key] = [val]
+
+ @staticmethod
+ def __avg_all_and_rename_keys (src_dict):
+ res = {}
+ all_list = src_dict.values()
+ res['all'] = float("%.3f" % (sum(all_list)/float(len(all_list))) )
+ for key, val in src_dict.items():
+ reg_res = re.match("avg-(\d+)", key)
+ if reg_res:
+ tmp_key = "port"+reg_res.group(1)
+ res[tmp_key] = val # don't touch original fields values
+ return res
+
+ @staticmethod
+ def __get_filtered_max_latency (src_dict, filtered_latency_amount = 0.001):
+ result = {}
+ if src_dict:
+ for port, data in src_dict.items():
+ if not port.startswith('port-'):
+ continue
+ max_port = 'max-%s' % port[5:]
+ res = data['hist']
+ if not len(res['histogram']):
+ result[max_port] = 0
+ continue
+ result[max_port] = 5 # if sum below will not get to filtered amount, use this value
+ sum_high = 0.0
+ for elem in reversed(res['histogram']):
+ sum_high += elem['val']
+ if sum_high >= filtered_latency_amount * res['cnt']:
+ result[max_port] = elem['key'] + int('5' + repr(elem['key'])[2:])
+ break
+ return result
+
+
+ # history iterator after warmup period
+ def _get_steady_state_history_iterator(self):
+ if not self.is_done_warmup():
+ raise Exception('Warm-up period not finished')
+ for index, res in enumerate(self._history):
+ if 'warmup_barrier' in res:
+ for steady_state_index in range(index, max(index, len(self._history) - 1)):
+ yield self._history[steady_state_index]
+ return
+ for index in range(len(self._history) - 1):
+ yield self._history[index]
+
+
+ def get_avg_steady_state_value(self, tree_path_to_key):
+ '''
+ Gets average value after warmup period.
+ For example: <result object>.get_avg_steady_state_value('trex-global.data.m_tx_bps')
+ Usually more accurate than latest history value.
+
+ :parameters:
+ tree_path_to_key : str
+ defines a path to desired data.
+
+ :return:
+ average value at steady state
+
+ :raises:
+ Exception in case steady state period was not reached or tree_path_to_key was not found in result.
+ '''
+ values_arr = [self.__get_value_by_path(res, tree_path_to_key) for res in self._get_steady_state_history_iterator()]
+ values_arr = list(filter(lambda x: x is not None, values_arr))
+ if not values_arr:
+ raise Exception('All the keys are None, probably wrong tree_path_to_key: %s' % tree_path_to_key)
+ return sum(values_arr) / float(len(values_arr))
+
+
+if __name__ == "__main__":
+ c = CTRexClient('127.0.0.1')
+ print('restarting daemon')
+ c.restart_trex_daemon()
+ print('kill any running')
+ c.kill_all_trexes()
+ print('start')
+ c.start_stateless()
+ print('sleep')
+ time.sleep(5)
+ print('done')
+
diff --git a/scripts/automation/trex_control_plane/stf/trex_stf_lib/trex_daemon_server.py b/scripts/automation/trex_control_plane/stf/trex_stf_lib/trex_daemon_server.py
new file mode 100755
index 00000000..9784d42a
--- /dev/null
+++ b/scripts/automation/trex_control_plane/stf/trex_stf_lib/trex_daemon_server.py
@@ -0,0 +1,79 @@
+#!/usr/bin/python
+
+import outer_packages
+import daemon
+from trex_server import do_main_program, trex_parser
+import CCustomLogger
+
+import logging
+import time
+import sys
+import os, errno
+import grp
+import signal
+from daemon import runner
+from extended_daemon_runner import ExtendedDaemonRunner
+import lockfile
+import errno
+
+class TRexServerApp(object):
+ def __init__(self):
+ TRexServerApp.create_working_dirs()
+ self.stdin_path = '/dev/null'
+ self.stdout_path = '/dev/tty' # All standard prints will come up from this source.
+ self.stderr_path = "/var/log/trex/trex_daemon_server.log" # All log messages will come up from this source
+ self.pidfile_path = '/var/run/trex/trex_daemon_server.pid'
+ self.pidfile_timeout = 5 # timeout in seconds
+
+ def run(self):
+ do_main_program()
+
+
+ @staticmethod
+ def create_working_dirs():
+ if not os.path.exists('/var/log/trex'):
+ os.mkdir('/var/log/trex')
+ if not os.path.exists('/var/run/trex'):
+ os.mkdir('/var/run/trex')
+
+
+
+def main ():
+
+ trex_app = TRexServerApp()
+
+ # setup the logger
+ default_log_path = '/var/log/trex/trex_daemon_server.log'
+
+ try:
+ CCustomLogger.setup_daemon_logger('TRexServer', default_log_path)
+ logger = logging.getLogger('TRexServer')
+ logger.setLevel(logging.INFO)
+ formatter = logging.Formatter("%(asctime)s %(name)-10s %(module)-20s %(levelname)-8s %(message)s")
+ handler = logging.FileHandler("/var/log/trex/trex_daemon_server.log")
+ logger.addHandler(handler)
+ except EnvironmentError, e:
+ if e.errno == errno.EACCES: # catching permission denied error
+ print "Launching user must have sudo privileges in order to run TRex daemon.\nTerminating daemon process."
+ exit(-1)
+
+ daemon_runner = ExtendedDaemonRunner(trex_app, trex_parser)
+
+ #This ensures that the logger file handle does not get closed during daemonization
+ daemon_runner.daemon_context.files_preserve=[handler.stream]
+
+ try:
+ if not set(['start', 'stop']).isdisjoint(set(sys.argv)):
+ print "Logs are saved at: {log_path}".format( log_path = default_log_path )
+ daemon_runner.do_action()
+
+ except lockfile.LockTimeout as inst:
+ logger.error(inst)
+ print inst
+ print """
+ Please try again once the timeout has been reached.
+ If this error continues, consider killing the process manually and restart the daemon."""
+
+
+if __name__ == "__main__":
+ main()
diff --git a/scripts/automation/trex_control_plane/stf/trex_stf_lib/trex_exceptions.py b/scripts/automation/trex_control_plane/stf/trex_stf_lib/trex_exceptions.py
new file mode 100755
index 00000000..89134e7f
--- /dev/null
+++ b/scripts/automation/trex_control_plane/stf/trex_stf_lib/trex_exceptions.py
@@ -0,0 +1,141 @@
+#!/router/bin/python
+
+#from rpc_exceptions import RPCExceptionHandler, WrappedRPCError
+
+from jsonrpclib import Fault, ProtocolError, AppError
+
+class RPCError(Exception):
+ """
+ This is the general RPC error exception class from which :exc:`trex_exceptions.TRexException` inherits.
+
+ Every exception in this class has as error format according to JSON-RPC convention convention: code, message and data.
+
+ """
+ def __init__(self, code, message, remote_data = None):
+ self.code = code
+ self.msg = message or self._default_message
+ self.data = remote_data
+ self.args = (code, self.msg, remote_data)
+
+ def __str__(self):
+ return self.__repr__()
+
+ def __repr__(self):
+ if self.args[2] is not None:
+ return u"[errcode:%s] %s. Extended data: %s" % self.args
+ else:
+ return u"[errcode:%s] %s" % self.args[:2]
+
+class TRexException(RPCError):
+ """
+ This is the most general TRex exception.
+
+ All exceptions inherits from this class has an error code and a default message which describes the most common use case of the error.
+
+ This exception isn't used by default and will only when an unrelated to ProtocolError will occur, and it can't be resolved to any of the deriviate exceptions.
+
+ """
+ code = -10
+ _default_message = 'TRex encountered an unexpected error. please contact TRex dev team.'
+ # api_name = 'TRex'
+
+class TRexError(TRexException):
+ """
+ This is the most general TRex exception.
+
+ This exception isn't used by default and will only when an unrelated to ProtocolError will occur, and it can't be resolved to any of the deriviate exceptions.
+ """
+ code = -11
+ _default_message = 'TRex run failed due to wrong input parameters, or due to reachability issues.'
+
+class TRexWarning(TRexException):
+ """ Indicates a warning from TRex server. When this exception raises it normally used to indicate required data isn't ready yet """
+ code = -12
+ _default_message = 'TRex is starting (data is not available yet).'
+
+class TRexRequestDenied(TRexException):
+ """ Indicates the desired reques was denied by the server """
+ code = -33
+ _default_message = 'TRex desired request denied because the requested resource is already taken. Try again once TRex is back in IDLE state.'
+
+class TRexInUseError(TRexException):
+ """
+ Indicates that TRex is currently in use
+
+ """
+ code = -13
+ _default_message = 'TRex is already being used by another user or process. Try again once TRex is back in IDLE state.'
+
+class TRexRunFailedError(TRexException):
+ """ Indicates that TRex has failed due to some reason. This Exception is used when TRex process itself terminates due to unknown reason """
+ code = -14
+ _default_message = ''
+
+class TRexIncompleteRunError(TRexException):
+ """
+ Indicates that TRex has failed due to some reason.
+ This Exception is used when TRex process itself terminated with error fault or it has been terminated by an external intervention in the OS.
+
+ """
+ code = -15
+ _default_message = 'TRex run was terminated unexpectedly by outer process or by the hosting OS'
+
+EXCEPTIONS = [TRexException, TRexError, TRexWarning, TRexInUseError, TRexRequestDenied, TRexRunFailedError, TRexIncompleteRunError]
+
+class CExceptionHandler(object):
+ """
+ CExceptionHandler is responsible for generating TRex API related exceptions in client side.
+ """
+ def __init__(self, exceptions):
+ """
+ Instatiate a CExceptionHandler object
+
+ :parameters:
+
+ exceptions : list
+ a list of all TRex acceptable exception objects.
+
+ default list:
+ - :exc:`trex_exceptions.TRexException`
+ - :exc:`trex_exceptions.TRexError`
+ - :exc:`trex_exceptions.TRexWarning`
+ - :exc:`trex_exceptions.TRexInUseError`
+ - :exc:`trex_exceptions.TRexRequestDenied`
+ - :exc:`trex_exceptions.TRexRunFailedError`
+ - :exc:`trex_exceptions.TRexIncompleteRunError`
+
+ """
+ if isinstance(exceptions, type):
+ exceptions = [ exceptions, ]
+ self.exceptions = exceptions
+ self.exceptions_dict = dict((e.code, e) for e in self.exceptions)
+
+ def gen_exception (self, err):
+ """
+ Generates an exception based on a general ProtocolError exception object `err`.
+
+ When TRex is reserved, no other user can start new TRex runs.
+
+
+ :parameters:
+
+ err : exception
+ a ProtocolError exception raised by :class:`trex_client.CTRexClient` class
+
+ :return:
+ A TRex exception from the exception list defined in class creation.
+
+ If such exception wasn't found, returns a TRexException exception
+
+ """
+ code, message, data = err
+ try:
+ exp = self.exceptions_dict[code]
+ return exp(exp.code, message, data)
+ except KeyError:
+ # revert to TRexException when unknown error application raised
+ return TRexException(err)
+
+
+exception_handler = CExceptionHandler( EXCEPTIONS )
+
diff --git a/scripts/automation/trex_control_plane/stf/trex_stf_lib/trex_status.py b/scripts/automation/trex_control_plane/stf/trex_stf_lib/trex_status.py
new file mode 100644
index 00000000..8f2859d1
--- /dev/null
+++ b/scripts/automation/trex_control_plane/stf/trex_stf_lib/trex_status.py
@@ -0,0 +1,8 @@
+#!/router/bin/python
+
+# define the states in which a TRex can hold during its lifetime
+# TRexStatus = Enum('TRexStatus', 'Idle Starting Running')
+
+IDLE = 1
+STARTING = 2
+RUNNING = 3
diff --git a/scripts/automation/trex_control_plane/stf/trex_stf_lib/trex_status_e.py b/scripts/automation/trex_control_plane/stf/trex_stf_lib/trex_status_e.py
new file mode 100755
index 00000000..79a25acc
--- /dev/null
+++ b/scripts/automation/trex_control_plane/stf/trex_stf_lib/trex_status_e.py
@@ -0,0 +1,11 @@
+#!/router/bin/python
+
+try:
+ from . import outer_packages
+except:
+ import outer_packages
+from enum import Enum
+
+
+# define the states in which a TRex can hold during its lifetime
+TRexStatus = Enum('TRexStatus', 'Idle Starting Running')
diff --git a/scripts/automation/trex_control_plane/stl/console/__init__.py b/scripts/automation/trex_control_plane/stl/console/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/scripts/automation/trex_control_plane/stl/console/__init__.py
diff --git a/scripts/automation/trex_control_plane/stl/console/stl_path.py b/scripts/automation/trex_control_plane/stl/console/stl_path.py
new file mode 100644
index 00000000..f15c666e
--- /dev/null
+++ b/scripts/automation/trex_control_plane/stl/console/stl_path.py
@@ -0,0 +1,7 @@
+import sys, os
+
+# FIXME to the write path for trex_stl_lib
+sys.path.insert(0, "../")
+
+STL_PROFILES_PATH = os.path.join(os.pardir, 'profiles')
+
diff --git a/scripts/automation/trex_control_plane/stl/console/trex_console.py b/scripts/automation/trex_control_plane/stl/console/trex_console.py
new file mode 100755
index 00000000..b23b5f1f
--- /dev/null
+++ b/scripts/automation/trex_control_plane/stl/console/trex_console.py
@@ -0,0 +1,889 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+"""
+Dan Klein, Itay Marom
+Cisco Systems, Inc.
+
+Copyright (c) 2015-2015 Cisco Systems, Inc.
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+ http://www.apache.org/licenses/LICENSE-2.0
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+"""
+from __future__ import print_function
+
+import subprocess
+import cmd
+import json
+import ast
+import argparse
+import random
+import readline
+import string
+import os
+import sys
+import tty, termios
+
+try:
+ import stl_path
+except:
+ from . import stl_path
+from trex_stl_lib.api import *
+
+from trex_stl_lib.utils.text_opts import *
+from trex_stl_lib.utils.common import user_input, get_current_user
+from trex_stl_lib.utils import parsing_opts
+
+try:
+ import trex_tui
+except:
+ from . import trex_tui
+
+from functools import wraps
+
+__version__ = "2.0"
+
+# console custom logger
+class ConsoleLogger(LoggerApi):
+ def __init__ (self):
+ self.prompt_redraw = None
+
+ def write (self, msg, newline = True):
+ if newline:
+ print(msg)
+ else:
+ print(msg, end=' ')
+
+ def flush (self):
+ sys.stdout.flush()
+
+ # override this for the prompt fix
+ def async_log (self, msg, level = LoggerApi.VERBOSE_REGULAR, newline = True):
+ self.log(msg, level, newline)
+ if ( (self.level >= LoggerApi.VERBOSE_REGULAR) and self.prompt_redraw ):
+ self.prompt_redraw()
+ self.flush()
+
+
+def set_window_always_on_top (title):
+ # we need the GDK module, if not available - ignroe this command
+ try:
+ if sys.version_info < (3,0):
+ from gtk import gdk
+ else:
+ #from gi.repository import Gdk as gdk
+ return
+
+ except ImportError:
+ return
+
+ # search the window and set it as above
+ root = gdk.get_default_root_window()
+
+ for id in root.property_get('_NET_CLIENT_LIST')[2]:
+ w = gdk.window_foreign_new(id)
+ if w:
+ name = w.property_get('WM_NAME')[2]
+ if name == title:
+ w.set_keep_above(True)
+ gdk.window_process_all_updates()
+ break
+
+
+class TRexGeneralCmd(cmd.Cmd):
+ def __init__(self):
+ cmd.Cmd.__init__(self)
+ # configure history behaviour
+ self._history_file_dir = "/tmp/trex/console/"
+ self._history_file = self.get_history_file_full_path()
+ readline.set_history_length(100)
+ # load history, if any
+ self.load_console_history()
+
+
+ def get_console_identifier(self):
+ return self.__class__.__name__
+
+ def get_history_file_full_path(self):
+ return "{dir}{filename}.hist".format(dir=self._history_file_dir,
+ filename=self.get_console_identifier())
+
+ def load_console_history(self):
+ if os.path.exists(self._history_file):
+ readline.read_history_file(self._history_file)
+ return
+
+ def save_console_history(self):
+ if not os.path.exists(self._history_file_dir):
+ # make the directory available for every user
+ try:
+ original_umask = os.umask(0)
+ os.makedirs(self._history_file_dir, mode = 0o777)
+ finally:
+ os.umask(original_umask)
+
+
+ # os.mknod(self._history_file)
+ readline.write_history_file(self._history_file)
+ return
+
+ def print_history (self):
+
+ length = readline.get_current_history_length()
+
+ for i in range(1, length + 1):
+ cmd = readline.get_history_item(i)
+ print("{:<5} {:}".format(i, cmd))
+
+ def get_history_item (self, index):
+ length = readline.get_current_history_length()
+ if index > length:
+ print(format_text("please select an index between {0} and {1}".format(0, length)))
+ return None
+
+ return readline.get_history_item(index)
+
+
+ def emptyline(self):
+ """Called when an empty line is entered in response to the prompt.
+
+ This overriding is such that when empty line is passed, **nothing happens**.
+ """
+ return
+
+ def completenames(self, text, *ignored):
+ """
+ This overriding is such that a space is added to name completion.
+ """
+ dotext = 'do_'+text
+ return [a[3:]+' ' for a in self.get_names() if a.startswith(dotext)]
+
+
+#
+# main console object
+class TRexConsole(TRexGeneralCmd):
+ """Trex Console"""
+
+ def __init__(self, stateless_client, verbose = False):
+
+ self.stateless_client = stateless_client
+
+ TRexGeneralCmd.__init__(self)
+
+ self.tui = trex_tui.TrexTUI(stateless_client)
+ self.terminal = None
+
+ self.verbose = verbose
+
+ self.intro = "\n-=TRex Console v{ver}=-\n".format(ver=__version__)
+ self.intro += "\nType 'help' or '?' for supported actions\n"
+
+ self.postcmd(False, "")
+
+
+ ################### internal section ########################
+
+ def prompt_redraw (self):
+ self.postcmd(False, "")
+ sys.stdout.write("\n" + self.prompt + readline.get_line_buffer())
+ sys.stdout.flush()
+
+
+ def verify_connected(f):
+ @wraps(f)
+ def wrap(*args):
+ inst = args[0]
+ func_name = f.__name__
+ if func_name.startswith("do_"):
+ func_name = func_name[3:]
+
+ if not inst.stateless_client.is_connected():
+ print(format_text("\n'{0}' cannot be executed on offline mode\n".format(func_name), 'bold'))
+ return
+
+ ret = f(*args)
+ return ret
+
+ return wrap
+
+
+ def get_console_identifier(self):
+ return "{context}_{server}".format(context=get_current_user(),
+ server=self.stateless_client.get_connection_info()['server'])
+
+ def register_main_console_methods(self):
+ main_names = set(self.trex_console.get_names()).difference(set(dir(self.__class__)))
+ for name in main_names:
+ for prefix in 'do_', 'help_', 'complete_':
+ if name.startswith(prefix):
+ self.__dict__[name] = getattr(self.trex_console, name)
+
+ def precmd(self, line):
+ # before doing anything, save history snapshot of the console
+ # this is done before executing the command in case of ungraceful application exit
+ self.save_console_history()
+
+ lines = line.split(';')
+ try:
+ for line in lines:
+ stop = self.onecmd(line)
+ stop = self.postcmd(stop, line)
+ if stop:
+ return "quit"
+
+ return ""
+ except STLError as e:
+ print(e)
+ return ''
+
+
+ def postcmd(self, stop, line):
+ self.prompt = self.stateless_client.generate_prompt(prefix = 'trex')
+ return stop
+
+
+ def default(self, line):
+ print("'{0}' is an unrecognized command. type 'help' or '?' for a list\n".format(line))
+
+ @staticmethod
+ def tree_autocomplete(text):
+ dir = os.path.dirname(text)
+ if dir:
+ path = dir
+ else:
+ path = "."
+
+
+ start_string = os.path.basename(text)
+
+ targets = []
+
+ for x in os.listdir(path):
+ if x.startswith(start_string):
+ y = os.path.join(path, x)
+ if os.path.isfile(y):
+ targets.append(x + ' ')
+ elif os.path.isdir(y):
+ targets.append(x + '/')
+
+ return targets
+
+
+ ####################### shell commands #######################
+ @verify_connected
+ def do_ping (self, line):
+ '''Ping the server\n'''
+ self.stateless_client.ping_line(line)
+
+
+ @verify_connected
+ def do_shutdown (self, line):
+ '''Sends the server a shutdown request\n'''
+ self.stateless_client.shutdown_line(line)
+
+ # set verbose on / off
+ def do_verbose(self, line):
+ '''Shows or set verbose mode\n'''
+ if line == "":
+ print("\nverbose is " + ("on\n" if self.verbose else "off\n"))
+
+ elif line == "on":
+ self.verbose = True
+ self.stateless_client.set_verbose("high")
+ print(format_text("\nverbose set to on\n", 'green', 'bold'))
+
+ elif line == "off":
+ self.verbose = False
+ self.stateless_client.set_verbose("normal")
+ print(format_text("\nverbose set to off\n", 'green', 'bold'))
+
+ else:
+ print(format_text("\nplease specify 'on' or 'off'\n", 'bold'))
+
+ # show history
+ def help_history (self):
+ self.do_history("-h")
+
+ def do_shell (self, line):
+ self.do_history(line)
+
+ def do_push (self, line):
+ '''Push a local PCAP file\n'''
+ self.stateless_client.push_line(line)
+
+ def help_push (self):
+ self.do_push("-h")
+
+ def do_portattr (self, line):
+ '''Change/show port(s) attributes\n'''
+ self.stateless_client.set_port_attr_line(line)
+
+ def help_portattr (self):
+ self.do_portattr("-h")
+
+ @verify_connected
+ def do_map (self, line):
+ '''Maps ports topology\n'''
+ ports = self.stateless_client.get_acquired_ports()
+ if not ports:
+ print("No ports acquired\n")
+ return
+
+ with self.stateless_client.logger.supress():
+ table = stl_map_ports(self.stateless_client, ports = ports)
+
+
+ print(format_text('\nAcquired ports topology:\n', 'bold', 'underline'))
+
+ # bi-dir ports
+ print(format_text('Bi-directional ports:\n','underline'))
+ for port_a, port_b in table['bi']:
+ print("port {0} <--> port {1}".format(port_a, port_b))
+
+ print("")
+
+ # unknown ports
+ print(format_text('Mapping unknown:\n','underline'))
+ for port in table['unknown']:
+ print("port {0}".format(port))
+ print("")
+
+
+
+
+ def do_history (self, line):
+ '''Manage the command history\n'''
+
+ item = parsing_opts.ArgumentPack(['item'],
+ {"nargs": '?',
+ 'metavar': 'item',
+ 'type': parsing_opts.check_negative,
+ 'help': "an history item index",
+ 'default': 0})
+
+ parser = parsing_opts.gen_parser(self.stateless_client,
+ "history",
+ self.do_history.__doc__,
+ item)
+
+ opts = parser.parse_args(line.split())
+ if opts is None:
+ return
+
+ if opts.item == 0:
+ self.print_history()
+ else:
+ cmd = self.get_history_item(opts.item)
+ if cmd == None:
+ return
+
+ print("Executing '{0}'".format(cmd))
+
+ return self.onecmd(cmd)
+
+
+
+ ############### connect
+ def do_connect (self, line):
+ '''Connects to the server and acquire ports\n'''
+
+ self.stateless_client.connect_line(line)
+
+ def help_connect (self):
+ self.do_connect("-h")
+
+ def do_disconnect (self, line):
+ '''Disconnect from the server\n'''
+
+ self.stateless_client.disconnect_line(line)
+
+
+ @verify_connected
+ def do_acquire (self, line):
+ '''Acquire ports\n'''
+
+ self.stateless_client.acquire_line(line)
+
+
+ @verify_connected
+ def do_release (self, line):
+ '''Release ports\n'''
+ self.stateless_client.release_line(line)
+
+ def do_reacquire (self, line):
+ '''reacquire all the ports under your logged user name'''
+ self.stateless_client.reacquire_line(line)
+
+ def help_acquire (self):
+ self.do_acquire("-h")
+
+ def help_release (self):
+ self.do_release("-h")
+
+ def help_reacquire (self):
+ self.do_reacquire("-h")
+
+ ############### start
+
+ def complete_start(self, text, line, begidx, endidx):
+ s = line.split()
+ l = len(s)
+
+ file_flags = parsing_opts.get_flags(parsing_opts.FILE_PATH)
+
+ if (l > 1) and (s[l - 1] in file_flags):
+ return TRexConsole.tree_autocomplete("")
+
+ if (l > 2) and (s[l - 2] in file_flags):
+ return TRexConsole.tree_autocomplete(s[l - 1])
+
+ complete_push = complete_start
+
+ @verify_connected
+ def do_start(self, line):
+ '''Start selected traffic in specified port(s) on TRex\n'''
+
+ self.stateless_client.start_line(line)
+
+
+
+ def help_start(self):
+ self.do_start("-h")
+
+ ############# stop
+ @verify_connected
+ def do_stop(self, line):
+ '''stops port(s) transmitting traffic\n'''
+
+ self.stateless_client.stop_line(line)
+
+ def help_stop(self):
+ self.do_stop("-h")
+
+ ############# update
+ @verify_connected
+ def do_update(self, line):
+ '''update speed of port(s)currently transmitting traffic\n'''
+
+ self.stateless_client.update_line(line)
+
+ def help_update (self):
+ self.do_update("-h")
+
+ ############# pause
+ @verify_connected
+ def do_pause(self, line):
+ '''pause port(s) transmitting traffic\n'''
+
+ self.stateless_client.pause_line(line)
+
+ ############# resume
+ @verify_connected
+ def do_resume(self, line):
+ '''resume port(s) transmitting traffic\n'''
+
+ self.stateless_client.resume_line(line)
+
+
+
+ ########## reset
+ @verify_connected
+ def do_reset (self, line):
+ '''force stop all ports\n'''
+ self.stateless_client.reset_line(line)
+
+
+ ######### validate
+ @verify_connected
+ def do_validate (self, line):
+ '''validates port(s) stream configuration\n'''
+
+ self.stateless_client.validate_line(line)
+
+
+ @verify_connected
+ def do_stats(self, line):
+ '''Fetch statistics from TRex server by port\n'''
+ self.stateless_client.show_stats_line(line)
+
+
+ def help_stats(self):
+ self.do_stats("-h")
+
+ @verify_connected
+ def do_streams(self, line):
+ '''Fetch statistics from TRex server by port\n'''
+ self.stateless_client.show_streams_line(line)
+
+
+ def help_streams(self):
+ self.do_streams("-h")
+
+ @verify_connected
+ def do_clear(self, line):
+ '''Clear cached local statistics\n'''
+ self.stateless_client.clear_stats_line(line)
+
+
+ def help_clear(self):
+ self.do_clear("-h")
+
+
+ def help_events (self):
+ self.do_events("-h")
+
+ def do_events (self, line):
+ '''shows events recieved from server\n'''
+ self.stateless_client.get_events_line(line)
+
+
+ def complete_profile(self, text, line, begidx, endidx):
+ return self.complete_start(text,line, begidx, endidx)
+
+ def do_profile (self, line):
+ '''shows information about a profile'''
+ self.stateless_client.show_profile_line(line)
+
+ # tui
+ @verify_connected
+ def do_tui (self, line):
+ '''Shows a graphical console\n'''
+ parser = parsing_opts.gen_parser(self.stateless_client,
+ "tui",
+ self.do_tui.__doc__,
+ parsing_opts.XTERM,
+ parsing_opts.LOCKED)
+
+ opts = parser.parse_args(line.split())
+
+ if not opts:
+ return opts
+ if opts.xterm:
+ if not os.path.exists('/usr/bin/xterm'):
+ print(format_text("XTERM does not exists on this machine", 'bold'))
+ return
+
+ info = self.stateless_client.get_connection_info()
+
+ exe = './trex-console --top -t -q -s {0} -p {1} --async_port {2}'.format(info['server'], info['sync_port'], info['async_port'])
+ cmd = ['/usr/bin/xterm', '-geometry', '{0}x{1}'.format(self.tui.MIN_COLS, self.tui.MIN_ROWS), '-sl', '0', '-title', 'trex_tui', '-e', exe]
+
+ # detach child
+ self.terminal = subprocess.Popen(cmd, preexec_fn = os.setpgrp)
+
+ return
+
+
+ try:
+ with self.stateless_client.logger.supress():
+ self.tui.show(self.stateless_client, self.save_console_history, locked = opts.locked)
+
+ except self.tui.ScreenSizeException as e:
+ print(format_text(str(e) + "\n", 'bold'))
+
+
+ def help_tui (self):
+ do_tui("-h")
+
+
+ # quit function
+ def do_quit(self, line):
+ '''Exit the client\n'''
+ return True
+
+
+ def do_help (self, line):
+ '''Shows This Help Screen\n'''
+ if line:
+ try:
+ func = getattr(self, 'help_' + line)
+ except AttributeError:
+ try:
+ doc = getattr(self, 'do_' + line).__doc__
+ if doc:
+ self.stdout.write("%s\n"%str(doc))
+ return
+ except AttributeError:
+ pass
+ self.stdout.write("%s\n"%str(self.nohelp % (line,)))
+ return
+ func()
+ return
+
+ print("\nSupported Console Commands:")
+ print("----------------------------\n")
+
+ cmds = [x[3:] for x in self.get_names() if x.startswith("do_")]
+ hidden = ['EOF', 'q', 'exit', 'h', 'shell']
+ for cmd in cmds:
+ if cmd in hidden:
+ continue
+
+ try:
+ doc = getattr(self, 'do_' + cmd).__doc__
+ if doc:
+ help = str(doc)
+ else:
+ help = "*** Undocumented Function ***\n"
+ except AttributeError:
+ help = "*** Undocumented Function ***\n"
+
+ l=help.splitlines()
+ print("{:<30} {:<30}".format(cmd + " - ",l[0] ))
+
+ # a custorm cmdloop wrapper
+ def start(self):
+ while True:
+ try:
+ self.cmdloop()
+ break
+ except KeyboardInterrupt as e:
+ if not readline.get_line_buffer():
+ raise KeyboardInterrupt
+ else:
+ print("")
+ self.intro = None
+ continue
+
+ if self.terminal:
+ self.terminal.kill()
+
+ # aliases
+ do_exit = do_EOF = do_q = do_quit
+ do_h = do_history
+
+
+# run a script of commands
+def run_script_file (self, filename, stateless_client):
+
+ self.logger.log(format_text("\nRunning script file '{0}'...".format(filename), 'bold'))
+
+ with open(filename) as f:
+ script_lines = f.readlines()
+
+ cmd_table = {}
+
+ # register all the commands
+ cmd_table['start'] = stateless_client.start_line
+ cmd_table['stop'] = stateless_client.stop_line
+ cmd_table['reset'] = stateless_client.reset_line
+
+ for index, line in enumerate(script_lines, start = 1):
+ line = line.strip()
+ if line == "":
+ continue
+ if line.startswith("#"):
+ continue
+
+ sp = line.split(' ', 1)
+ cmd = sp[0]
+ if len(sp) == 2:
+ args = sp[1]
+ else:
+ args = ""
+
+ stateless_client.logger.log(format_text("Executing line {0} : '{1}'\n".format(index, line)))
+
+ if not cmd in cmd_table:
+ print("\n*** Error at line {0} : '{1}'\n".format(index, line))
+ stateless_client.logger.log(format_text("unknown command '{0}'\n".format(cmd), 'bold'))
+ return False
+
+ cmd_table[cmd](args)
+
+ stateless_client.logger.log(format_text("\n[Done]", 'bold'))
+
+ return True
+
+
+#
+def is_valid_file(filename):
+ if not os.path.isfile(filename):
+ raise argparse.ArgumentTypeError("The file '%s' does not exist" % filename)
+
+ return filename
+
+
+
+def setParserOptions():
+ parser = argparse.ArgumentParser(prog="trex_console.py")
+
+ parser.add_argument("-s", "--server", help = "TRex Server [default is localhost]",
+ default = "localhost",
+ type = str)
+
+ parser.add_argument("-p", "--port", help = "TRex Server Port [default is 4501]\n",
+ default = 4501,
+ type = int)
+
+ parser.add_argument("--async_port", help = "TRex ASync Publisher Port [default is 4500]\n",
+ default = 4500,
+ dest='pub',
+ type = int)
+
+ parser.add_argument("-u", "--user", help = "User Name [default is currently logged in user]\n",
+ default = get_current_user(),
+ type = str)
+
+ parser.add_argument("-v", "--verbose", dest="verbose",
+ action="store_true", help="Switch ON verbose option. Default is: OFF.",
+ default = False)
+
+
+ group = parser.add_mutually_exclusive_group()
+
+ group.add_argument("-a", "--acquire", dest="acquire",
+ nargs = '+',
+ type = int,
+ help="Acquire ports on connect. default is all available ports",
+ default = None)
+
+ group.add_argument("-r", "--readonly", dest="readonly",
+ action="store_true",
+ help="Starts console in a read only mode",
+ default = False)
+
+
+ parser.add_argument("-f", "--force", dest="force",
+ action="store_true",
+ help="Force acquire the requested ports",
+ default = False)
+
+ parser.add_argument("--batch", dest="batch",
+ nargs = 1,
+ type = is_valid_file,
+ help = "Run the console in a batch mode with file",
+ default = None)
+
+ parser.add_argument("-t", "--tui", dest="tui",
+ action="store_true", help="Starts with TUI mode",
+ default = False)
+
+ parser.add_argument("-x", "--xtui", dest="xtui",
+ action="store_true", help="Starts with XTERM TUI mode",
+ default = False)
+
+ parser.add_argument("--top", dest="top",
+ action="store_true", help="Set the window as always on top",
+ default = False)
+
+ parser.add_argument("-q", "--quiet", dest="quiet",
+ action="store_true", help="Starts with all outputs suppressed",
+ default = False)
+
+ return parser
+
+# a simple info printed on log on
+def show_intro (logger, c):
+ x = c.get_server_system_info()
+ ver = c.get_server_version().get('version', 'N/A')
+
+ # find out which NICs the server has
+ port_types = {}
+ for port in x['ports']:
+ if 'supp_speeds' in port:
+ speed = max(port['supp_speeds']) // 1000
+ else:
+ speed = port['speed']
+ key = (speed, port.get('description', port['driver']))
+ if key not in port_types:
+ port_types[key] = 0
+ port_types[key] += 1
+
+ port_line = ''
+ for k, v in port_types.items():
+ port_line += "{0} x {1}Gbps @ {2}\t".format(v, k[0], k[1])
+
+ logger.log(format_text("\nServer Info:\n", 'underline'))
+ logger.log("Server version: {:>}".format(format_text(ver, 'bold')))
+ logger.log("Server CPU: {:>}".format(format_text("{:>} x {:>}".format(x.get('dp_core_count'), x.get('core_type')), 'bold')))
+ logger.log("Ports count: {:>}".format(format_text(port_line, 'bold')))
+
+
+def main():
+ parser = setParserOptions()
+ options = parser.parse_args()
+
+ if options.xtui:
+ options.tui = True
+
+ # always on top
+ if options.top:
+ set_window_always_on_top('trex_tui')
+
+
+ # Stateless client connection
+ if options.quiet:
+ verbose_level = LoggerApi.VERBOSE_QUIET
+ elif options.verbose:
+ verbose_level = LoggerApi.VERBOSE_HIGH
+ else:
+ verbose_level = LoggerApi.VERBOSE_REGULAR
+
+ # Stateless client connection
+ logger = ConsoleLogger()
+ stateless_client = STLClient(username = options.user,
+ server = options.server,
+ sync_port = options.port,
+ async_port = options.pub,
+ verbose_level = verbose_level,
+ logger = logger)
+
+ # TUI or no acquire will give us READ ONLY mode
+ try:
+ stateless_client.connect()
+ except STLError as e:
+ logger.log("Log:\n" + format_text(e.brief() + "\n", 'bold'))
+ return
+
+ if not options.tui and not options.readonly:
+ try:
+ # acquire all ports
+ stateless_client.acquire(options.acquire, force = options.force)
+ except STLError as e:
+ logger.log("Log:\n" + format_text(e.brief() + "\n", 'bold'))
+
+ logger.log("\n*** Failed to acquire all required ports ***\n")
+ return
+
+ if options.readonly:
+ logger.log(format_text("\nRead only mode - only few commands will be available", 'bold'))
+
+ show_intro(logger, stateless_client)
+
+
+ # a script mode
+ if options.batch:
+ cont = run_script_file(options.batch[0], stateless_client)
+ if not cont:
+ return
+
+ # console
+ try:
+ console = TRexConsole(stateless_client, options.verbose)
+ logger.prompt_redraw = console.prompt_redraw
+
+ # TUI
+ if options.tui:
+ console.do_tui("-x" if options.xtui else "-l")
+
+ else:
+ console.start()
+
+ except KeyboardInterrupt as e:
+ print("\n\n*** Caught Ctrl + C... Exiting...\n\n")
+
+ finally:
+ with stateless_client.logger.supress():
+ stateless_client.disconnect(stop_traffic = False)
+
+if __name__ == '__main__':
+
+ main()
+
diff --git a/scripts/automation/trex_control_plane/stl/console/trex_root_path.py b/scripts/automation/trex_control_plane/stl/console/trex_root_path.py
new file mode 100755
index 00000000..de4ec03b
--- /dev/null
+++ b/scripts/automation/trex_control_plane/stl/console/trex_root_path.py
@@ -0,0 +1,15 @@
+#!/router/bin/python
+
+import os
+import sys
+
+def add_root_to_path ():
+ """adds trex_control_plane root dir to script path, up to `depth` parent dirs"""
+ root_dirname = 'trex_control_plane'
+ file_path = os.path.dirname(os.path.realpath(__file__))
+
+ components = file_path.split(os.sep)
+ sys.path.append( str.join(os.sep, components[:components.index(root_dirname)+1]) )
+ return
+
+add_root_to_path()
diff --git a/scripts/automation/trex_control_plane/stl/console/trex_tui.py b/scripts/automation/trex_control_plane/stl/console/trex_tui.py
new file mode 100644
index 00000000..d7db6d30
--- /dev/null
+++ b/scripts/automation/trex_control_plane/stl/console/trex_tui.py
@@ -0,0 +1,1250 @@
+from __future__ import print_function
+
+import termios
+import sys
+import os
+import time
+import threading
+
+from collections import OrderedDict, deque
+from texttable import ansi_len
+
+
+import datetime
+import readline
+
+
+if sys.version_info > (3,0):
+ from io import StringIO
+else:
+ from cStringIO import StringIO
+
+from trex_stl_lib.utils.text_opts import *
+from trex_stl_lib.utils import text_tables
+from trex_stl_lib import trex_stl_stats
+from trex_stl_lib.utils.filters import ToggleFilter
+
+class TUIQuit(Exception):
+ pass
+
+
+# for STL exceptions
+from trex_stl_lib.api import *
+
+def ascii_split (s):
+ output = []
+
+ lines = s.split('\n')
+ for elem in lines:
+ if ansi_len(elem) > 0:
+ output.append(elem)
+
+ return output
+
+class SimpleBar(object):
+ def __init__ (self, desc, pattern):
+ self.desc = desc
+ self.pattern = pattern
+ self.pattern_len = len(pattern)
+ self.index = 0
+
+ def show (self, buffer):
+ if self.desc:
+ print(format_text("{0} {1}".format(self.desc, self.pattern[self.index]), 'bold'), file = buffer)
+ else:
+ print(format_text("{0}".format(self.pattern[self.index]), 'bold'), file = buffer)
+
+ self.index = (self.index + 1) % self.pattern_len
+
+
+# base type of a panel
+class TrexTUIPanel(object):
+ def __init__ (self, mng, name):
+
+ self.mng = mng
+ self.name = name
+ self.stateless_client = mng.stateless_client
+ self.is_graph = False
+
+ def show (self, buffer):
+ raise NotImplementedError("must implement this")
+
+ def get_key_actions (self):
+ raise NotImplementedError("must implement this")
+
+
+ def get_name (self):
+ return self.name
+
+
+# dashboard panel
+class TrexTUIDashBoard(TrexTUIPanel):
+
+ FILTER_ACQUIRED = 1
+ FILTER_ALL = 2
+
+ def __init__ (self, mng):
+ super(TrexTUIDashBoard, self).__init__(mng, "dashboard")
+
+ self.ports = self.stateless_client.get_all_ports()
+
+ self.key_actions = OrderedDict()
+
+ self.key_actions['c'] = {'action': self.action_clear, 'legend': 'clear', 'show': True}
+ self.key_actions['p'] = {'action': self.action_pause, 'legend': 'pause', 'show': True, 'color': 'red'}
+ self.key_actions['r'] = {'action': self.action_resume, 'legend': 'resume', 'show': True, 'color': 'blue'}
+
+ self.key_actions['o'] = {'action': self.action_show_owned, 'legend': 'owned ports', 'show': True}
+ self.key_actions['n'] = {'action': self.action_reset_view, 'legend': 'reset view', 'show': True}
+ self.key_actions['a'] = {'action': self.action_show_all, 'legend': 'all ports', 'show': True}
+
+ # register all the ports to the toggle action
+ for port_id in self.ports:
+ self.key_actions[str(port_id)] = {'action': self.action_toggle_port(port_id), 'legend': 'port {0}'.format(port_id), 'show': False}
+
+
+ self.toggle_filter = ToggleFilter(self.ports)
+
+ if self.stateless_client.get_acquired_ports():
+ self.action_show_owned()
+ else:
+ self.action_show_all()
+
+
+ def get_showed_ports (self):
+ return self.toggle_filter.filter_items()
+
+
+ def show (self, buffer):
+ stats = self.stateless_client._get_formatted_stats(self.get_showed_ports())
+ # print stats to screen
+ for stat_type, stat_data in stats.items():
+ text_tables.print_table_with_header(stat_data.text_table, stat_type, buffer = buffer)
+
+
+ def get_key_actions (self):
+ allowed = OrderedDict()
+
+
+ allowed['n'] = self.key_actions['n']
+ allowed['o'] = self.key_actions['o']
+ allowed['a'] = self.key_actions['a']
+ for i in self.ports:
+ allowed[str(i)] = self.key_actions[str(i)]
+
+
+ if self.get_showed_ports():
+ allowed['c'] = self.key_actions['c']
+
+ # if not all ports are acquired - no operations
+ if not (set(self.get_showed_ports()) <= set(self.stateless_client.get_acquired_ports())):
+ return allowed
+
+ # if any/some ports can be resumed
+ if set(self.get_showed_ports()) & set(self.stateless_client.get_paused_ports()):
+ allowed['r'] = self.key_actions['r']
+
+ # if any/some ports are transmitting - support those actions
+ if set(self.get_showed_ports()) & set(self.stateless_client.get_transmitting_ports()):
+ allowed['p'] = self.key_actions['p']
+
+
+ return allowed
+
+
+ ######### actions
+ def action_pause (self):
+ try:
+ rc = self.stateless_client.pause(ports = self.get_showed_ports())
+ except STLError:
+ pass
+
+ return ""
+
+
+
+ def action_resume (self):
+ try:
+ self.stateless_client.resume(ports = self.get_showed_ports())
+ except STLError:
+ pass
+
+ return ""
+
+
+ def action_reset_view (self):
+ self.toggle_filter.reset()
+ return ""
+
+ def action_show_owned (self):
+ self.toggle_filter.reset()
+ self.toggle_filter.toggle_items(*self.stateless_client.get_acquired_ports())
+ return ""
+
+ def action_show_all (self):
+ self.toggle_filter.reset()
+ self.toggle_filter.toggle_items(*self.stateless_client.get_all_ports())
+ return ""
+
+ def action_clear (self):
+ self.stateless_client.clear_stats(self.toggle_filter.filter_items())
+ return "cleared all stats"
+
+
+ def action_toggle_port(self, port_id):
+ def action_toggle_port_x():
+ self.toggle_filter.toggle_item(port_id)
+ return ""
+
+ return action_toggle_port_x
+
+
+
+# streams stats
+class TrexTUIStreamsStats(TrexTUIPanel):
+ def __init__ (self, mng):
+ super(TrexTUIStreamsStats, self).__init__(mng, "sstats")
+
+ self.key_actions = OrderedDict()
+
+ self.key_actions['c'] = {'action': self.action_clear, 'legend': 'clear', 'show': True}
+
+
+ def show (self, buffer):
+ stats = self.stateless_client._get_formatted_stats(port_id_list = None, stats_mask = trex_stl_stats.SS_COMPAT)
+ # print stats to screen
+ for stat_type, stat_data in stats.items():
+ text_tables.print_table_with_header(stat_data.text_table, stat_type, buffer = buffer)
+ pass
+
+
+ def get_key_actions (self):
+ return self.key_actions
+
+ def action_clear (self):
+ self.stateless_client.flow_stats.clear_stats()
+
+ return ""
+
+
+# latency stats
+class TrexTUILatencyStats(TrexTUIPanel):
+ def __init__ (self, mng):
+ super(TrexTUILatencyStats, self).__init__(mng, "lstats")
+ self.key_actions = OrderedDict()
+ self.key_actions['c'] = {'action': self.action_clear, 'legend': 'clear', 'show': True}
+ self.key_actions['h'] = {'action': self.action_toggle_histogram, 'legend': 'histogram toggle', 'show': True}
+ self.is_histogram = False
+
+
+ def show (self, buffer):
+ if self.is_histogram:
+ stats = self.stateless_client._get_formatted_stats(port_id_list = None, stats_mask = trex_stl_stats.LH_COMPAT)
+ else:
+ stats = self.stateless_client._get_formatted_stats(port_id_list = None, stats_mask = trex_stl_stats.LS_COMPAT)
+ # print stats to screen
+ for stat_type, stat_data in stats.items():
+ if stat_type == 'latency_statistics':
+ untouched_header = ' (usec)'
+ else:
+ untouched_header = ''
+ text_tables.print_table_with_header(stat_data.text_table, stat_type, untouched_header = untouched_header, buffer = buffer)
+
+ def get_key_actions (self):
+ return self.key_actions
+
+ def action_toggle_histogram (self):
+ self.is_histogram = not self.is_histogram
+ return ""
+
+ def action_clear (self):
+ self.stateless_client.latency_stats.clear_stats()
+ return ""
+
+
+# utilization stats
+class TrexTUIUtilizationStats(TrexTUIPanel):
+ def __init__ (self, mng):
+ super(TrexTUIUtilizationStats, self).__init__(mng, "ustats")
+ self.key_actions = {}
+
+ def show (self, buffer):
+ stats = self.stateless_client._get_formatted_stats(port_id_list = None, stats_mask = trex_stl_stats.UT_COMPAT)
+ # print stats to screen
+ for stat_type, stat_data in stats.items():
+ text_tables.print_table_with_header(stat_data.text_table, stat_type, buffer = buffer)
+
+ def get_key_actions (self):
+ return self.key_actions
+
+
+# log
+class TrexTUILog():
+ def __init__ (self):
+ self.log = []
+
+ def add_event (self, msg):
+ self.log.append("[{0}] {1}".format(str(datetime.datetime.now().time()), msg))
+
+ def show (self, buffer, max_lines = 4):
+
+ cut = len(self.log) - max_lines
+ if cut < 0:
+ cut = 0
+
+ print(format_text("\nLog:", 'bold', 'underline'), file = buffer)
+
+ for msg in self.log[cut:]:
+ print(msg, file = buffer)
+
+
+# a predicate to wrap function as a bool
+class Predicate(object):
+ def __init__ (self, func):
+ self.func = func
+
+ def __nonzero__ (self):
+ return True if self.func() else False
+ def __bool__ (self):
+ return True if self.func() else False
+
+
+# Panels manager (contains server panels)
+class TrexTUIPanelManager():
+ def __init__ (self, tui):
+ self.tui = tui
+ self.stateless_client = tui.stateless_client
+ self.ports = self.stateless_client.get_all_ports()
+ self.locked = False
+
+ self.panels = {}
+ self.panels['dashboard'] = TrexTUIDashBoard(self)
+ self.panels['sstats'] = TrexTUIStreamsStats(self)
+ self.panels['lstats'] = TrexTUILatencyStats(self)
+ self.panels['ustats'] = TrexTUIUtilizationStats(self)
+
+ self.key_actions = OrderedDict()
+
+ # we allow console only when ports are acquired
+ self.key_actions['ESC'] = {'action': self.action_none, 'legend': 'console', 'show': Predicate(lambda : not self.locked)}
+
+ self.key_actions['q'] = {'action': self.action_none, 'legend': 'quit', 'show': True}
+ self.key_actions['d'] = {'action': self.action_show_dash, 'legend': 'dashboard', 'show': True}
+ self.key_actions['s'] = {'action': self.action_show_sstats, 'legend': 'streams', 'show': True}
+ self.key_actions['l'] = {'action': self.action_show_lstats, 'legend': 'latency', 'show': True}
+ self.key_actions['u'] = {'action': self.action_show_ustats, 'legend': 'util', 'show': True}
+
+
+ # start with dashboard
+ self.main_panel = self.panels['dashboard']
+
+ # log object
+ self.log = TrexTUILog()
+
+ self.generate_legend()
+
+ self.conn_bar = SimpleBar('status: ', ['|','/','-','\\'])
+ self.dis_bar = SimpleBar('status: ', ['X', ' '])
+ self.show_log = False
+
+
+ def generate_legend (self):
+
+ self.legend = "\n{:<12}".format("browse:")
+
+ for k, v in self.key_actions.items():
+ if v['show']:
+ x = "'{0}' - {1}, ".format(k, v['legend'])
+ if v.get('color'):
+ self.legend += "{:}".format(format_text(x, v.get('color')))
+ else:
+ self.legend += "{:}".format(x)
+
+
+ self.legend += "\n{:<12}".format(self.main_panel.get_name() + ":")
+
+ for k, v in self.main_panel.get_key_actions().items():
+ if v['show']:
+ x = "'{0}' - {1}, ".format(k, v['legend'])
+
+ if v.get('color'):
+ self.legend += "{:}".format(format_text(x, v.get('color')))
+ else:
+ self.legend += "{:}".format(x)
+
+
+ def print_connection_status (self, buffer):
+ if self.tui.get_state() == self.tui.STATE_ACTIVE:
+ self.conn_bar.show(buffer = buffer)
+ else:
+ self.dis_bar.show(buffer = buffer)
+
+ def print_legend (self, buffer):
+ print(format_text(self.legend, 'bold'), file = buffer)
+
+
+ # on window switch or turn on / off of the TUI we call this
+ def init (self, show_log = False, locked = False):
+ self.show_log = show_log
+ self.locked = locked
+ self.generate_legend()
+
+ def show (self, show_legend, buffer):
+ self.main_panel.show(buffer)
+ self.print_connection_status(buffer)
+
+ if show_legend:
+ self.generate_legend()
+ self.print_legend(buffer)
+
+ if self.show_log:
+ self.log.show(buffer)
+
+
+ def handle_key (self, ch):
+ # check for the manager registered actions
+ if ch in self.key_actions:
+ msg = self.key_actions[ch]['action']()
+
+ # check for main panel actions
+ elif ch in self.main_panel.get_key_actions():
+ msg = self.main_panel.get_key_actions()[ch]['action']()
+
+ else:
+ return False
+
+ self.generate_legend()
+ return True
+
+ #if msg == None:
+ # return False
+ #else:
+ # if msg:
+ # self.log.add_event(msg)
+ # return True
+
+
+ # actions
+
+ def action_none (self):
+ return None
+
+ def action_show_dash (self):
+ self.main_panel = self.panels['dashboard']
+ self.init(self.show_log)
+ return ""
+
+ def action_show_port (self, port_id):
+ def action_show_port_x ():
+ self.main_panel = self.panels['port {0}'.format(port_id)]
+ self.init()
+ return ""
+
+ return action_show_port_x
+
+
+ def action_show_sstats (self):
+ self.main_panel = self.panels['sstats']
+ self.init(self.show_log)
+ return ""
+
+
+ def action_show_lstats (self):
+ self.main_panel = self.panels['lstats']
+ self.init(self.show_log)
+ return ""
+
+ def action_show_ustats(self):
+ self.main_panel = self.panels['ustats']
+ self.init(self.show_log)
+ return ""
+
+
+
+# ScreenBuffer is a class designed to
+# avoid inline delays when reprinting the screen
+class ScreenBuffer():
+ def __init__ (self, redraw_cb):
+ self.snapshot = ''
+ self.lock = threading.Lock()
+
+ self.redraw_cb = redraw_cb
+ self.update_flag = False
+
+
+ def start (self):
+ self.active = True
+ self.t = threading.Thread(target = self.__handler)
+ self.t.setDaemon(True)
+ self.t.start()
+
+ def stop (self):
+ self.active = False
+ self.t.join()
+
+
+ # request an update
+ def update (self):
+ self.update_flag = True
+
+ # fetch the screen, return None if no new screen exists yet
+ def get (self):
+
+ if not self.snapshot:
+ return None
+
+ # we have a snapshot - fetch it
+ with self.lock:
+ x = self.snapshot
+ self.snapshot = None
+ return x
+
+
+ def __handler (self):
+
+ while self.active:
+ if self.update_flag:
+ self.__redraw()
+
+ time.sleep(0.01)
+
+ # redraw the next screen
+ def __redraw (self):
+ buffer = StringIO()
+
+ self.redraw_cb(buffer)
+
+ with self.lock:
+ self.snapshot = buffer
+ self.update_flag = False
+
+# a policer class to make sure no too-fast redraws
+# occurs - it filters fast bursts of redraws
+class RedrawPolicer():
+ def __init__ (self, rate):
+ self.ts = 0
+ self.marked = False
+ self.rate = rate
+ self.force = False
+
+ def mark_for_redraw (self, force = False):
+ self.marked = True
+ if force:
+ self.force = True
+
+ def should_redraw (self):
+ dt = time.time() - self.ts
+ return self.force or (self.marked and (dt > self.rate))
+
+ def reset (self, restart = False):
+ self.ts = time.time()
+ self.marked = restart
+ self.force = False
+
+
+# shows a textual top style window
+class TrexTUI():
+
+ STATE_ACTIVE = 0
+ STATE_LOST_CONT = 1
+ STATE_RECONNECT = 2
+ is_graph = False
+
+ MIN_ROWS = 50
+ MIN_COLS = 111
+
+
+ class ScreenSizeException(Exception):
+ def __init__ (self, cols, rows):
+ msg = "TUI requires console screen size of at least {0}x{1}, current is {2}x{3}".format(TrexTUI.MIN_COLS,
+ TrexTUI.MIN_ROWS,
+ cols,
+ rows)
+ super(TrexTUI.ScreenSizeException, self).__init__(msg)
+
+
+ def __init__ (self, stateless_client):
+ self.stateless_client = stateless_client
+
+ self.tui_global_lock = threading.Lock()
+ self.pm = TrexTUIPanelManager(self)
+ self.sb = ScreenBuffer(self.redraw_handler)
+
+ def redraw_handler (self, buffer):
+ # this is executed by the screen buffer - should be protected against TUI commands
+ with self.tui_global_lock:
+ self.pm.show(show_legend = self.async_keys.is_legend_mode(), buffer = buffer)
+
+ def clear_screen (self, lines = 50):
+ # reposition the cursor
+ sys.stdout.write("\x1b[0;0H")
+
+ # clear all lines
+ for i in range(lines):
+ sys.stdout.write("\x1b[0K")
+ if i < (lines - 1):
+ sys.stdout.write("\n")
+
+ # reposition the cursor
+ sys.stdout.write("\x1b[0;0H")
+
+
+
+ def show (self, client, save_console_history, show_log = False, locked = False):
+
+ rows, cols = os.popen('stty size', 'r').read().split()
+ if (int(rows) < TrexTUI.MIN_ROWS) or (int(cols) < TrexTUI.MIN_COLS):
+ raise self.ScreenSizeException(rows = rows, cols = cols)
+
+ with AsyncKeys(client, save_console_history, self.tui_global_lock, locked) as async_keys:
+ sys.stdout.write("\x1bc")
+ self.async_keys = async_keys
+ self.show_internal(show_log, locked)
+
+
+
+ def show_internal (self, show_log, locked):
+
+ self.pm.init(show_log, locked)
+
+ self.state = self.STATE_ACTIVE
+
+ # create print policers
+ self.full_redraw = RedrawPolicer(0.5)
+ self.keys_redraw = RedrawPolicer(0.05)
+ self.full_redraw.mark_for_redraw()
+
+
+ try:
+ self.sb.start()
+
+ while True:
+ # draw and handle user input
+ status = self.async_keys.tick(self.pm)
+
+ # prepare the next frame
+ self.prepare(status)
+ time.sleep(0.01)
+ self.draw_screen()
+
+ with self.tui_global_lock:
+ self.handle_state_machine()
+
+ except TUIQuit:
+ print("\nExiting TUI...")
+
+ finally:
+ self.sb.stop()
+
+ print("")
+
+
+
+ # handle state machine
+ def handle_state_machine (self):
+ # regular state
+ if self.state == self.STATE_ACTIVE:
+ # if no connectivity - move to lost connecitivty
+ if not self.stateless_client.async_client.is_alive():
+ self.stateless_client._invalidate_stats(self.pm.ports)
+ self.state = self.STATE_LOST_CONT
+
+
+ # lost connectivity
+ elif self.state == self.STATE_LOST_CONT:
+ # got it back
+ if self.stateless_client.async_client.is_alive():
+ # move to state reconnect
+ self.state = self.STATE_RECONNECT
+
+
+ # restored connectivity - try to reconnect
+ elif self.state == self.STATE_RECONNECT:
+
+ try:
+ self.stateless_client.connect()
+ self.stateless_client.acquire()
+ self.state = self.STATE_ACTIVE
+ except STLError:
+ self.state = self.STATE_LOST_CONT
+
+
+ # logic before printing
+ def prepare (self, status):
+ if status == AsyncKeys.STATUS_REDRAW_ALL:
+ self.full_redraw.mark_for_redraw(force = True)
+
+ elif status == AsyncKeys.STATUS_REDRAW_KEYS:
+ self.keys_redraw.mark_for_redraw()
+
+ if self.full_redraw.should_redraw():
+ self.sb.update()
+ self.full_redraw.reset(restart = True)
+
+ return
+
+
+ # draw once
+ def draw_screen (self):
+
+ # check for screen buffer's new screen
+ x = self.sb.get()
+
+ # we have a new screen to draw
+ if x:
+ self.clear_screen()
+
+ self.async_keys.draw(x)
+ sys.stdout.write(x.getvalue())
+ sys.stdout.flush()
+
+ # maybe we need to redraw the keys
+ elif self.keys_redraw.should_redraw():
+ sys.stdout.write("\x1b[4A")
+ self.async_keys.draw(sys.stdout)
+ sys.stdout.flush()
+
+ # reset the policer for next time
+ self.keys_redraw.reset()
+
+
+
+
+ def get_state (self):
+ return self.state
+
+
+class TokenParser(object):
+ def __init__ (self, seq):
+ self.buffer = list(seq)
+
+ def pop (self):
+ return self.buffer.pop(0)
+
+
+ def peek (self):
+ if not self.buffer:
+ return None
+ return self.buffer[0]
+
+ def next_token (self):
+ if not self.peek():
+ return None
+
+ token = self.pop()
+
+ # special chars
+ if token == '\x1b' and self.peek() == '[':
+ token += self.pop()
+ if self.peek():
+ token += self.pop()
+
+ return token
+
+ def parse (self):
+ tokens = []
+
+ while True:
+ token = self.next_token()
+ if token == None:
+ break
+ tokens.append(token)
+
+ return tokens
+
+
+# handles async IO
+class AsyncKeys:
+
+ MODE_LEGEND = 1
+ MODE_CONSOLE = 2
+
+ STATUS_NONE = 0
+ STATUS_REDRAW_KEYS = 1
+ STATUS_REDRAW_ALL = 2
+
+ def __init__ (self, client, save_console_history, tui_global_lock, locked = False):
+ self.tui_global_lock = tui_global_lock
+
+ self.engine_console = AsyncKeysEngineConsole(self, client, save_console_history)
+ self.engine_legend = AsyncKeysEngineLegend(self)
+ self.locked = locked
+
+ if locked:
+ self.engine = self.engine_legend
+ self.locked = True
+ else:
+ self.engine = self.engine_console
+ self.locked = False
+
+ def __enter__ (self):
+ # init termios
+ self.old_settings = termios.tcgetattr(sys.stdin)
+ new_settings = termios.tcgetattr(sys.stdin)
+ new_settings[3] = new_settings[3] & ~(termios.ECHO | termios.ICANON) # lflags
+ new_settings[6][termios.VMIN] = 0 # cc
+ new_settings[6][termios.VTIME] = 0 # cc
+ termios.tcsetattr(sys.stdin, termios.TCSADRAIN, new_settings)
+
+ # huge buffer - no print without flush
+ sys.stdout = open('/dev/stdout', 'w', TrexTUI.MIN_COLS * TrexTUI.MIN_COLS * 2)
+ return self
+
+ def __exit__ (self, type, value, traceback):
+ termios.tcsetattr(sys.stdin, termios.TCSADRAIN, self.old_settings)
+
+ # restore sys.stdout
+ sys.stdout.close()
+ sys.stdout = sys.__stdout__
+
+
+ def is_legend_mode (self):
+ return self.engine.get_type() == AsyncKeys.MODE_LEGEND
+
+ def is_console_mode (self):
+ return self.engine.get_type == AsyncKeys.MODE_CONSOLE
+
+ def switch (self):
+ if self.is_legend_mode():
+ self.engine = self.engine_console
+ else:
+ self.engine = self.engine_legend
+
+
+ # parse the buffer to manageble tokens
+ def parse_tokens (self, seq):
+
+ tokens = []
+ chars = list(seq)
+
+ while chars:
+ token = chars.pop(0)
+
+ # special chars
+ if token == '\x1b' and chars[0] == '[':
+ token += chars.pop(0)
+ token += chars.pop(0)
+
+ tokens.append(token)
+
+ return tokens
+
+ def handle_token (self, token, pm):
+ # ESC for switch
+ if token == '\x1b':
+ if not self.locked:
+ self.switch()
+ return self.STATUS_REDRAW_ALL
+
+ # EOF (ctrl + D)
+ if token == '\x04':
+ raise TUIQuit()
+
+ # pass tick to engine
+ return self.engine.tick(token, pm)
+
+
+ def tick (self, pm):
+ rc = self.STATUS_NONE
+
+ # fetch the stdin buffer
+ seq = os.read(sys.stdin.fileno(), 1024).decode()
+ if not seq:
+ return self.STATUS_NONE
+
+ # parse all the tokens from the buffer
+ tokens = TokenParser(seq).parse()
+
+ # process them
+ for token in tokens:
+ token_rc = self.handle_token(token, pm)
+ rc = max(rc, token_rc)
+
+
+ return rc
+
+
+ def draw (self, buffer):
+ self.engine.draw(buffer)
+
+
+
+# Legend engine
+class AsyncKeysEngineLegend:
+ def __init__ (self, async):
+ self.async = async
+
+ def get_type (self):
+ return self.async.MODE_LEGEND
+
+ def tick (self, seq, pm):
+
+ if seq == 'q':
+ raise TUIQuit()
+
+ # ignore escapes
+ if len(seq) > 1:
+ return AsyncKeys.STATUS_NONE
+
+ rc = pm.handle_key(seq)
+ return AsyncKeys.STATUS_REDRAW_ALL if rc else AsyncKeys.STATUS_NONE
+
+ def draw (self, buffer):
+ pass
+
+
+# console engine
+class AsyncKeysEngineConsole:
+ def __init__ (self, async, client, save_console_history):
+ self.async = async
+ self.lines = deque(maxlen = 100)
+
+ self.generate_prompt = client.generate_prompt
+ self.save_console_history = save_console_history
+
+ self.ac = {'start' : client.start_line,
+ 'stop' : client.stop_line,
+ 'pause' : client.pause_line,
+ 'clear' : client.clear_stats_line,
+ 'push' : client.push_line,
+ 'resume' : client.resume_line,
+ 'reset' : client.reset_line,
+ 'update' : client.update_line,
+ 'connect' : client.connect_line,
+ 'disconnect' : client.disconnect_line,
+ 'acquire' : client.acquire_line,
+ 'release' : client.release_line,
+ 'quit' : self.action_quit,
+ 'q' : self.action_quit,
+ 'exit' : self.action_quit,
+ 'help' : self.action_help,
+ '?' : self.action_help}
+
+ # fetch readline history and add relevants
+ for i in range(0, readline.get_current_history_length()):
+ cmd = readline.get_history_item(i)
+ if cmd and cmd.split()[0] in self.ac:
+ self.lines.appendleft(CmdLine(cmd))
+
+ # new line
+ self.lines.appendleft(CmdLine(''))
+ self.line_index = 0
+ self.last_status = ''
+
+ def action_quit (self, _):
+ raise TUIQuit()
+
+ def action_help (self, _):
+ return ' '.join([format_text(cmd, 'bold') for cmd in self.ac.keys()])
+
+ def get_type (self):
+ return self.async.MODE_CONSOLE
+
+
+ def handle_escape_char (self, seq):
+ # up
+ if seq == '\x1b[A':
+ self.line_index = min(self.line_index + 1, len(self.lines) - 1)
+
+ # down
+ elif seq == '\x1b[B':
+ self.line_index = max(self.line_index - 1, 0)
+
+ # left
+ elif seq == '\x1b[D':
+ self.lines[self.line_index].go_left()
+
+ # right
+ elif seq == '\x1b[C':
+ self.lines[self.line_index].go_right()
+
+ # del
+ elif seq == '\x1b[3~':
+ self.lines[self.line_index].del_key()
+
+ # home
+ elif seq == '\x1b[H':
+ self.lines[self.line_index].home_key()
+
+ # end
+ elif seq == '\x1b[F':
+ self.lines[self.line_index].end_key()
+ return True
+
+ # unknown key
+ else:
+ return AsyncKeys.STATUS_NONE
+
+ return AsyncKeys.STATUS_REDRAW_KEYS
+
+
+ def tick (self, seq, _):
+
+ # handle escape chars
+ if len(seq) > 1:
+ return self.handle_escape_char(seq)
+
+ # handle each char
+ for ch in seq:
+ return self.handle_single_key(ch)
+
+
+
+ def handle_single_key (self, ch):
+ # newline
+ if ch == '\n':
+ self.handle_cmd()
+
+ # backspace
+ elif ch == '\x7f':
+ self.lines[self.line_index].backspace()
+
+ # TAB
+ elif ch == '\t':
+ tokens = self.lines[self.line_index].get().split()
+ if not tokens:
+ return
+
+ if len(tokens) == 1:
+ self.handle_tab_names(tokens[0])
+ else:
+ self.handle_tab_files(tokens)
+
+
+ # simple char
+ else:
+ self.lines[self.line_index] += ch
+
+ return AsyncKeys.STATUS_REDRAW_KEYS
+
+
+ # handle TAB key for completing function names
+ def handle_tab_names (self, cur):
+ matching_cmds = [x for x in self.ac if x.startswith(cur)]
+
+ common = os.path.commonprefix([x for x in self.ac if x.startswith(cur)])
+ if common:
+ if len(matching_cmds) == 1:
+ self.lines[self.line_index].set(common + ' ')
+ self.last_status = ''
+ else:
+ self.lines[self.line_index].set(common)
+ self.last_status = 'ambigious: '+ ' '.join([format_text(cmd, 'bold') for cmd in matching_cmds])
+
+
+ # handle TAB for completing filenames
+ def handle_tab_files (self, tokens):
+
+ # only commands with files
+ if tokens[0] not in {'start', 'push'}:
+ return
+
+ # '-f' with no paramters - no partial and use current dir
+ if tokens[-1] == '-f':
+ partial = ''
+ d = '.'
+
+ # got a partial path
+ elif tokens[-2] == '-f':
+ partial = tokens.pop()
+
+ # check for dirs
+ dirname, basename = os.path.dirname(partial), os.path.basename(partial)
+ if os.path.isdir(dirname):
+ d = dirname
+ partial = basename
+ else:
+ d = '.'
+ else:
+ return
+
+ # fetch all dirs and files matching wildcard
+ files = []
+ for x in os.listdir(d):
+ if os.path.isdir(os.path.join(d, x)):
+ files.append(x + '/')
+ elif x.endswith( ('.py', 'yaml', 'pcap', 'cap', 'erf') ):
+ files.append(x)
+
+ # dir might not have the files
+ if not files:
+ self.last_status = format_text('no loadble files under path', 'bold')
+ return
+
+
+ # find all the matching files
+ matching_files = [x for x in files if x.startswith(partial)] if partial else files
+
+ # do we have a longer common than partial ?
+ common = os.path.commonprefix([x for x in files if x.startswith(partial)])
+ if not common:
+ common = partial
+
+ tokens.append(os.path.join(d, common) if d is not '.' else common)
+
+ # reforge the line
+ newline = ' '.join(tokens)
+
+ if len(matching_files) == 1:
+ if os.path.isfile(tokens[-1]):
+ newline += ' '
+
+ self.lines[self.line_index].set(newline)
+ self.last_status = ''
+ else:
+ self.lines[self.line_index].set(newline)
+ self.last_status = ' '.join([format_text(f, 'bold') for f in matching_files[:5]])
+ if len(matching_files) > 5:
+ self.last_status += ' ... [{0} more matches]'.format(len(matching_files) - 5)
+
+
+
+ def split_cmd (self, cmd):
+ s = cmd.split(' ', 1)
+ op = s[0]
+ param = s[1] if len(s) == 2 else ''
+ return op, param
+
+
+ def handle_cmd (self):
+
+ cmd = self.lines[self.line_index].get().strip()
+ if not cmd:
+ return
+
+ op, param = self.split_cmd(cmd)
+
+ func = self.ac.get(op)
+ if func:
+ with self.async.tui_global_lock:
+ func_rc = func(param)
+
+ # take out the empty line
+ empty_line = self.lines.popleft()
+ assert(empty_line.ro_line == '')
+
+ if not self.lines or self.lines[0].ro_line != cmd:
+ self.lines.appendleft(CmdLine(cmd))
+
+ # back in
+ self.lines.appendleft(empty_line)
+ self.line_index = 0
+ readline.add_history(cmd)
+ self.save_console_history()
+
+ # back to readonly
+ for line in self.lines:
+ line.invalidate()
+
+ assert(self.lines[0].modified == False)
+ color = None
+ if not func:
+ self.last_status = "unknown command: '{0}'".format(format_text(cmd.split()[0], 'bold'))
+ else:
+ # internal commands
+ if isinstance(func_rc, str):
+ self.last_status = func_rc
+
+ # RC response
+ else:
+ # success
+ if func_rc:
+ self.last_status = format_text("[OK]", 'green')
+ # errors
+ else:
+ err_msgs = ascii_split(str(func_rc))
+ self.last_status = format_text(err_msgs[0], 'red')
+ if len(err_msgs) > 1:
+ self.last_status += " [{0} more errors messages]".format(len(err_msgs) - 1)
+ color = 'red'
+
+
+
+ # trim too long lines
+ if ansi_len(self.last_status) > TrexTUI.MIN_COLS:
+ self.last_status = format_text(self.last_status[:TrexTUI.MIN_COLS] + "...", color, 'bold')
+
+
+ def draw (self, buffer):
+ buffer.write("\nPress 'ESC' for navigation panel...\n")
+ buffer.write("status: \x1b[0K{0}\n".format(self.last_status))
+ buffer.write("\n{0}\x1b[0K".format(self.generate_prompt(prefix = 'tui')))
+ self.lines[self.line_index].draw(buffer)
+
+
+# a readline alike command line - can be modified during edit
+class CmdLine(object):
+ def __init__ (self, line):
+ self.ro_line = line
+ self.w_line = None
+ self.modified = False
+ self.cursor_index = len(line)
+
+ def get (self):
+ if self.modified:
+ return self.w_line
+ else:
+ return self.ro_line
+
+ def set (self, line, cursor_pos = None):
+ self.w_line = line
+ self.modified = True
+
+ if cursor_pos is None:
+ self.cursor_index = len(self.w_line)
+ else:
+ self.cursor_index = cursor_pos
+
+
+ def __add__ (self, other):
+ assert(0)
+
+
+ def __str__ (self):
+ return self.get()
+
+
+ def __iadd__ (self, other):
+
+ self.set(self.get()[:self.cursor_index] + other + self.get()[self.cursor_index:],
+ cursor_pos = self.cursor_index + len(other))
+
+ return self
+
+
+ def backspace (self):
+ if self.cursor_index == 0:
+ return
+
+ self.set(self.get()[:self.cursor_index - 1] + self.get()[self.cursor_index:],
+ self.cursor_index - 1)
+
+
+ def del_key (self):
+ if self.cursor_index == len(self.get()):
+ return
+
+ self.set(self.get()[:self.cursor_index] + self.get()[self.cursor_index + 1:],
+ self.cursor_index)
+
+ def home_key (self):
+ self.cursor_index = 0
+
+ def end_key (self):
+ self.cursor_index = len(self.get())
+
+ def invalidate (self):
+ self.modified = False
+ self.w_line = None
+ self.cursor_index = len(self.ro_line)
+
+ def go_left (self):
+ self.cursor_index = max(0, self.cursor_index - 1)
+
+ def go_right (self):
+ self.cursor_index = min(len(self.get()), self.cursor_index + 1)
+
+ def draw (self, buffer):
+ buffer.write(self.get())
+ buffer.write('\b' * (len(self.get()) - self.cursor_index))
+
diff --git a/scripts/automation/trex_control_plane/stl/examples/hlt_udp_simple.py b/scripts/automation/trex_control_plane/stl/examples/hlt_udp_simple.py
new file mode 100644
index 00000000..1f754f0a
--- /dev/null
+++ b/scripts/automation/trex_control_plane/stl/examples/hlt_udp_simple.py
@@ -0,0 +1,114 @@
+#!/usr/bin/python
+
+"""
+Sample HLTAPI application (for loopback)
+Connect to TRex
+Send UDP packet in specific length
+Each direction has its own IP range
+"""
+
+import sys
+import argparse
+import stl_path
+from trex_stl_lib.api import *
+from trex_stl_lib.trex_stl_hltapi import *
+
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser(usage="""
+ Connect to TRex and send bidirectional continuous traffic
+
+ examples:
+
+ hlt_udp_simple.py --server <hostname/ip>
+
+ hlt_udp_simple.py -s 300 -d 30 -rate_pps 5000000 --src <MAC> --dst <MAC>
+
+ then run the simulator on the output
+ ./stl-sim -f example.yaml -o a.pcap ==> a.pcap include the packet
+
+ """,
+ description="Example for TRex HLTAPI",
+ epilog=" based on hhaim's stl_run_udp_simple example");
+
+ parser.add_argument("--server",
+ dest="server",
+ help='Remote trex address',
+ default="127.0.0.1",
+ type = str)
+
+ parser.add_argument("-s", "--frame-size",
+ dest="frame_size",
+ help='L2 frame size in bytes without FCS',
+ default=60,
+ type = int,)
+
+ parser.add_argument('-d','--duration',
+ dest='duration',
+ help='duration in second ',
+ default=10,
+ type = int,)
+
+ parser.add_argument('--rate-pps',
+ dest='rate_pps',
+ help='speed in pps',
+ default="100")
+
+ parser.add_argument('--src',
+ dest='src_mac',
+ help='src MAC',
+ default='00:50:56:b9:de:75')
+
+ parser.add_argument('--dst',
+ dest='dst_mac',
+ help='dst MAC',
+ default='00:50:56:b9:34:f3')
+
+ args = parser.parse_args();
+
+ hltapi = CTRexHltApi()
+ print('Connecting to TRex')
+ res = hltapi.connect(device = args.server, port_list = [0, 1], reset = True, break_locks = True)
+ check_res(res)
+ ports = list(res['port_handle'].values())
+ if len(ports) < 2:
+ error('Should have at least 2 ports for this test')
+ print('Connected, acquired ports: %s' % ports)
+
+ print('Creating traffic')
+
+ res = hltapi.traffic_config(mode = 'create', bidirectional = True,
+ port_handle = ports[0], port_handle2 = ports[1],
+ frame_size = args.frame_size,
+ mac_src = args.src_mac, mac_dst = args.dst_mac,
+ mac_src2 = args.dst_mac, mac_dst2 = args.src_mac,
+ l3_protocol = 'ipv4',
+ ip_src_addr = '10.0.0.1', ip_src_mode = 'increment', ip_src_count = 254,
+ ip_dst_addr = '8.0.0.1', ip_dst_mode = 'increment', ip_dst_count = 254,
+ l4_protocol = 'udp',
+ udp_dst_port = 12, udp_src_port = 1025,
+ rate_pps = args.rate_pps,
+ )
+ check_res(res)
+
+ print('Starting traffic')
+ res = hltapi.traffic_control(action = 'run', port_handle = ports[:2])
+ check_res(res)
+ wait_with_progress(args.duration)
+
+ print('Stopping traffic')
+ res = hltapi.traffic_control(action = 'stop', port_handle = ports[:2])
+ check_res(res)
+
+ res = hltapi.traffic_stats(mode = 'aggregate', port_handle = ports[:2])
+ check_res(res)
+ print_brief_stats(res)
+
+ print('Removing all streams from port 0')
+ res = hltapi.traffic_config(mode = 'remove', port_handle = ports[0], stream_id = 'all')
+ check_res(res)
+
+ res = hltapi.cleanup_session(port_handle = 'all')
+ check_res(res)
+
+ print('Done')
diff --git a/scripts/automation/trex_control_plane/stl/examples/rpc_proxy_server.py b/scripts/automation/trex_control_plane/stl/examples/rpc_proxy_server.py
new file mode 100755
index 00000000..ad2697d8
--- /dev/null
+++ b/scripts/automation/trex_control_plane/stl/examples/rpc_proxy_server.py
@@ -0,0 +1,167 @@
+#!/usr/bin/python
+
+import argparse
+import traceback
+import logging
+import sys
+import os
+import json
+import socket
+from functools import partial
+logging.basicConfig(level = logging.FATAL) # keep quiet
+
+import stl_path
+from trex_stl_lib.api import *
+from trex_stl_lib.trex_stl_hltapi import CTRexHltApi, HLT_OK, HLT_ERR
+
+# ext libs
+ext_libs = os.path.join(os.pardir, os.pardir, os.pardir, os.pardir, 'external_libs') # usual package path
+if not os.path.exists(ext_libs):
+ ext_libs = os.path.join(os.pardir, os.pardir, 'external_libs') # client package path
+sys.path.append(os.path.join(ext_libs, 'jsonrpclib-pelix-0.2.5'))
+from jsonrpclib.SimpleJSONRPCServer import SimpleJSONRPCServer
+import yaml
+
+# TODO: refactor this to class
+
+native_client = None
+hltapi_client = None
+
+def OK(res = True):
+ return[True, res]
+
+def ERR(res = 'Unknown error'):
+ return [False, res]
+
+def deunicode_json(data):
+ return yaml.safe_load(json.dumps(data))
+
+
+### Server functions ###
+
+def add(a, b): # for sanity checks
+ try:
+ return OK(a + b)
+ except:
+ return ERR(traceback.format_exc())
+
+def check_connectivity():
+ return OK()
+
+def native_proxy_init(force = False, *args, **kwargs):
+ global native_client
+ if native_client and not force:
+ return ERR('Native Client is already initiated')
+ try:
+ native_client = STLClient(*args, **kwargs)
+ return OK('Native Client initiated')
+ except:
+ return ERR(traceback.format_exc())
+
+def native_proxy_del():
+ global native_client
+ native_client = None
+ return OK()
+
+def hltapi_proxy_init(force = False, *args, **kwargs):
+ global hltapi_client
+ if hltapi_client and not force:
+ return HLT_ERR('HLTAPI Client is already initiated')
+ try:
+ hltapi_client = CTRexHltApi(*args, **kwargs)
+ return HLT_OK()
+ except:
+ return HLT_ERR(traceback.format_exc())
+
+def hltapi_proxy_del():
+ global hltapi_client
+ hltapi_client = None
+ return HLT_OK()
+
+# any method not listed above can be called with passing its name here
+def native_method(func_name, *args, **kwargs):
+ try:
+ func = getattr(native_client, func_name)
+ return OK(func(*deunicode_json(args), **deunicode_json(kwargs)))
+ except:
+ return ERR(traceback.format_exc())
+
+# any HLTAPI method can be called with passing its name here
+def hltapi_method(func_name, *args, **kwargs):
+ try:
+ func = getattr(hltapi_client, func_name)
+ return func(*deunicode_json(args), **deunicode_json(kwargs))
+ except:
+ return HLT_ERR(traceback.format_exc())
+
+### /Server functions ###
+
+
+def run_server(port = 8095):
+ native_methods = [
+ 'acquire',
+ 'connect',
+ 'disconnect',
+ 'get_stats',
+ 'get_warnings',
+ 'push_remote',
+ 'reset',
+ 'wait_on_traffic',
+ ]
+ hltapi_methods = [
+ 'connect',
+ 'cleanup_session',
+ 'interface_config',
+ 'traffic_config',
+ 'traffic_control',
+ 'traffic_stats',
+ ]
+
+ try:
+ register_socket('trex_stl_rpc_proxy')
+ server = SimpleJSONRPCServer(('0.0.0.0', port))
+ server.register_function(add)
+ server.register_function(check_connectivity)
+ server.register_function(native_proxy_init)
+ server.register_function(native_proxy_del)
+ server.register_function(hltapi_proxy_init)
+ server.register_function(hltapi_proxy_del)
+ server.register_function(native_method)
+ server.register_function(hltapi_method)
+
+ for method in native_methods:
+ server.register_function(partial(native_method, method), method)
+ for method in hltapi_methods:
+ if method in native_methods: # collision in names
+ method_hlt_name = 'hlt_%s' % method
+ else:
+ method_hlt_name = method
+ server.register_function(partial(hltapi_method, method), method_hlt_name)
+ server.register_function(server.funcs.keys, 'get_methods') # should be last
+ print('Started Stateless RPC proxy at port %s' % port)
+ server.serve_forever()
+ except KeyboardInterrupt:
+ print('Done')
+
+# provides unique way to determine running process
+def register_socket(tag):
+ global foo_socket # Without this our lock gets garbage collected
+ foo_socket = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM)
+ try:
+ foo_socket.bind('\0%s' % tag)
+ print('Got the socket lock for tag %s.' % tag)
+ except socket.error:
+ print('Error: process with tag %s is already running.' % tag)
+ sys.exit(-1)
+
+### Main ###
+
+
+
+if __name__ == '__main__':
+ parser = argparse.ArgumentParser(description = 'Runs TRex Stateless RPC proxy for usage with any language client.')
+ parser.add_argument('-p', '--port', type=int, default = 8095, dest='port', action = 'store',
+ help = 'Select port on which the stl rpc proxy will run.\nDefault is 8095.')
+ kwargs = vars(parser.parse_args())
+ run_server(**kwargs)
+
diff --git a/scripts/automation/trex_control_plane/stl/examples/stl_bi_dir_flows.py b/scripts/automation/trex_control_plane/stl/examples/stl_bi_dir_flows.py
new file mode 100644
index 00000000..9977fa3e
--- /dev/null
+++ b/scripts/automation/trex_control_plane/stl/examples/stl_bi_dir_flows.py
@@ -0,0 +1,118 @@
+import stl_path
+from trex_stl_lib.api import *
+
+import time
+import json
+
+# simple packet creation
+def create_pkt (size, direction):
+
+ ip_range = {'src': {'start': "10.0.0.1", 'end': "10.0.0.254"},
+ 'dst': {'start': "8.0.0.1", 'end': "8.0.0.254"}}
+
+ if (direction == 0):
+ src = ip_range['src']
+ dst = ip_range['dst']
+ else:
+ src = ip_range['dst']
+ dst = ip_range['src']
+
+ vm = [
+ # src
+ STLVmFlowVar(name="src",min_value=src['start'],max_value=src['end'],size=4,op="inc"),
+ STLVmWrFlowVar(fv_name="src",pkt_offset= "IP.src"),
+
+ # dst
+ STLVmFlowVar(name="dst",min_value=dst['start'],max_value=dst['end'],size=4,op="inc"),
+ STLVmWrFlowVar(fv_name="dst",pkt_offset= "IP.dst"),
+
+ # checksum
+ STLVmFixIpv4(offset = "IP")
+ ]
+
+
+ base = Ether()/IP()/UDP()
+ pad = max(0, size-len(base)) * 'x'
+
+ return STLPktBuilder(pkt = base/pad,
+ vm = vm)
+
+
+def simple_burst (port_a, port_b, pkt_size, rate):
+
+
+ # create client
+ c = STLClient()
+ passed = True
+
+ try:
+ # turn this on for some information
+ #c.set_verbose("high")
+
+ # create two streams
+ s1 = STLStream(packet = create_pkt(pkt_size, 0),
+ mode = STLTXCont(pps = 100))
+
+ # second stream with a phase of 1ms (inter stream gap)
+ s2 = STLStream(packet = create_pkt(pkt_size, 1),
+ isg = 1000,
+ mode = STLTXCont(pps = 100))
+
+
+ # connect to server
+ c.connect()
+
+ # prepare our ports
+ c.reset(ports = [port_a, port_b])
+
+ # add both streams to ports
+ c.add_streams(s1, ports = [port_a])
+ c.add_streams(s2, ports = [port_b])
+
+ # clear the stats before injecting
+ c.clear_stats()
+
+ # here we multiply the traffic lineaer to whatever given in rate
+ print("Running {:} on ports {:}, {:} for 10 seconds...".format(rate, port_a, port_b))
+ c.start(ports = [port_a, port_b], mult = rate, duration = 10)
+
+ # block until done
+ c.wait_on_traffic(ports = [port_a, port_b])
+
+ # read the stats after the test
+ stats = c.get_stats()
+
+ print(json.dumps(stats[port_a], indent = 4, separators=(',', ': '), sort_keys = True))
+ print(json.dumps(stats[port_b], indent = 4, separators=(',', ': '), sort_keys = True))
+
+ lost_a = stats[port_a]["opackets"] - stats[port_b]["ipackets"]
+ lost_b = stats[port_b]["opackets"] - stats[port_a]["ipackets"]
+
+ print("\npackets lost from {0} --> {1}: {2} pkts".format(port_a, port_b, lost_a))
+ print("packets lost from {0} --> {1}: {2} pkts".format(port_b, port_a, lost_b))
+
+ if c.get_warnings():
+ print("\n\n*** test had warnings ****\n\n")
+ for w in c.get_warnings():
+ print(w)
+
+ if (lost_a == 0) and (lost_b == 0) and not c.get_warnings():
+ passed = True
+ else:
+ passed = False
+
+ except STLError as e:
+ passed = False
+ print(e)
+
+ finally:
+ c.disconnect()
+
+ if passed:
+ print("\nTest has passed :-)\n")
+ else:
+ print("\nTest has failed :-(\n")
+
+# run the tests
+simple_burst(0, 3, 64, "10gbps")
+
diff --git a/scripts/automation/trex_control_plane/stl/examples/stl_flow_latency_stats.py b/scripts/automation/trex_control_plane/stl/examples/stl_flow_latency_stats.py
new file mode 100644
index 00000000..d8a99479
--- /dev/null
+++ b/scripts/automation/trex_control_plane/stl/examples/stl_flow_latency_stats.py
@@ -0,0 +1,144 @@
+# Example showing how to define stream for latency measurement, and how to parse the latency information
+
+import stl_path
+from trex_stl_lib.api import *
+
+import time
+import pprint
+
+def rx_example (tx_port, rx_port, burst_size, pps):
+
+ print("\nGoing to inject {0} packets on port {1} - checking RX stats on port {2}\n".format(burst_size, tx_port, rx_port))
+
+ # create client
+ c = STLClient()
+ passed = True
+
+ try:
+ pkt = STLPktBuilder(pkt = Ether()/IP(src="16.0.0.1",dst="48.0.0.1")/UDP(dport=12,sport=1025)/'at_least_16_bytes_payload_needed')
+ total_pkts = burst_size
+ s1 = STLStream(name = 'rx',
+ packet = pkt,
+ flow_stats = STLFlowLatencyStats(pg_id = 5),
+ mode = STLTXSingleBurst(total_pkts = total_pkts,
+ pps = pps))
+
+ # connect to server
+ c.connect()
+
+ # prepare our ports
+ c.reset(ports = [tx_port, rx_port])
+
+ # add both streams to ports
+ c.add_streams([s1], ports = [tx_port])
+
+ print("\nInjecting {0} packets on port {1}\n".format(total_pkts, tx_port))
+
+ rc = rx_iteration(c, tx_port, rx_port, total_pkts, pkt.get_pkt_len())
+ if not rc:
+ passed = False
+
+ except STLError as e:
+ passed = False
+ print(e)
+
+ finally:
+ c.disconnect()
+
+ if passed:
+ print("\nTest passed :-)\n")
+ else:
+ print("\nTest failed :-(\n")
+
+# RX one iteration
+def rx_iteration (c, tx_port, rx_port, total_pkts, pkt_len):
+
+ c.clear_stats()
+
+ c.start(ports = [tx_port])
+ c.wait_on_traffic(ports = [tx_port])
+
+ stats = c.get_stats()
+ flow_stats = stats['flow_stats'].get(5)
+ global_lat_stats = stats['latency']
+ lat_stats = global_lat_stats.get(5)
+ if not flow_stats:
+ print("no flow stats available")
+ return False
+ if not lat_stats:
+ print("no latency stats available")
+ return False
+
+ tx_pkts = flow_stats['tx_pkts'].get(tx_port, 0)
+ tx_bytes = flow_stats['tx_bytes'].get(tx_port, 0)
+ rx_pkts = flow_stats['rx_pkts'].get(rx_port, 0)
+ drops = lat_stats['err_cntrs']['dropped']
+ ooo = lat_stats['err_cntrs']['out_of_order']
+ dup = lat_stats['err_cntrs']['dup']
+ sth = lat_stats['err_cntrs']['seq_too_high']
+ stl = lat_stats['err_cntrs']['seq_too_low']
+ old_flow = global_lat_stats['global']['old_flow']
+ bad_hdr = global_lat_stats['global']['bad_hdr']
+ lat = lat_stats['latency']
+ jitter = lat['jitter']
+ avg = lat['average']
+ tot_max = lat['total_max']
+ tot_min = lat['total_min']
+ last_max = lat['last_max']
+ hist = lat ['histogram']
+
+ if c.get_warnings():
+ print("\n\n*** test had warnings ****\n\n")
+ for w in c.get_warnings():
+ print(w)
+ return False
+
+ print('Error counters: dropped:{0}, ooo:{1} dup:{2} seq too high:{3} seq too low:{4}'.format(drops, ooo, dup, sth, stl))
+ if old_flow:
+ print ('Packets arriving too late after flow stopped: {0}'.format(old_flow))
+ if bad_hdr:
+ print ('Latency packets with corrupted info: {0}'.format(bad_hdr))
+ print('Latency info:')
+ print(" Maximum latency(usec): {0}".format(tot_max))
+ print(" Minimum latency(usec): {0}".format(tot_min))
+ print(" Maximum latency in last sampling period (usec): {0}".format(last_max))
+ print(" Average latency(usec): {0}".format(avg))
+ print(" Jitter(usec): {0}".format(jitter))
+ print(" Latency distribution histogram:")
+ l = hist.keys()
+ l.sort()
+ for sample in l:
+ range_start = sample
+ if range_start == 0:
+ range_end = 10
+ else:
+ range_end = range_start + pow(10, (len(str(range_start))-1))
+ val = hist[sample]
+ print (" Packets with latency between {0} and {1}:{2} ".format(range_start, range_end, val))
+
+ if tx_pkts != total_pkts:
+ print("TX pkts mismatch - got: {0}, expected: {1}".format(tx_pkts, total_pkts))
+ pprint.pprint(flow_stats)
+ return False
+ else:
+ print("TX pkts match - {0}".format(tx_pkts))
+
+ if tx_bytes != (total_pkts * (pkt_len + 4)): # +4 for ethernet CRC
+ print("TX bytes mismatch - got: {0}, expected: {1}".format(tx_bytes, (total_pkts * pkt_len)))
+ pprint.pprint(flow_stats)
+ return False
+ else:
+ print("TX bytes match - {0}".format(tx_bytes))
+
+ if rx_pkts != total_pkts:
+ print("RX pkts mismatch - got: {0}, expected: {1}".format(rx_pkts, total_pkts))
+ pprint.pprint(flow_stats)
+ return False
+ else:
+ print("RX pkts match - {0}".format(rx_pkts))
+
+ return True
+
+# run the tests
+rx_example(tx_port = 0, rx_port = 1, burst_size = 1000, pps = 1000)
+
diff --git a/scripts/automation/trex_control_plane/stl/examples/stl_flow_stats.py b/scripts/automation/trex_control_plane/stl/examples/stl_flow_stats.py
new file mode 100644
index 00000000..3c630ece
--- /dev/null
+++ b/scripts/automation/trex_control_plane/stl/examples/stl_flow_stats.py
@@ -0,0 +1,110 @@
+# Example showing how to define stream for getting per flow statistics, and how to parse the received statistics
+
+import stl_path
+from trex_stl_lib.api import *
+
+import time
+import pprint
+
+def rx_example (tx_port, rx_port, burst_size, bw):
+
+ print("\nGoing to inject {0} packets on port {1} - checking RX stats on port {2}\n".format(burst_size, tx_port, rx_port))
+
+ # create client
+ c = STLClient()
+ passed = True
+
+ try:
+ pkt = STLPktBuilder(pkt = Ether()/IP(src="16.0.0.1",dst="48.0.0.1")/UDP(dport=12,sport=1025)/IP()/'a_payload_example')
+ total_pkts = burst_size
+ s1 = STLStream(name = 'rx',
+ packet = pkt,
+ flow_stats = STLFlowStats(pg_id = 5),
+ mode = STLTXSingleBurst(total_pkts = total_pkts,
+ percentage = bw))
+
+ # connect to server
+ c.connect()
+
+ # prepare our ports
+ c.reset(ports = [tx_port, rx_port])
+
+ # add stream to port
+ c.add_streams([s1], ports = [tx_port])
+
+ print("\ngoing to inject {0} packets on port {1}\n".format(total_pkts, tx_port))
+
+ rc = rx_iteration(c, tx_port, rx_port, total_pkts, s1.get_pkt_len())
+ if not rc:
+ passed = False
+
+ except STLError as e:
+ passed = False
+ print(e)
+
+ finally:
+ c.disconnect()
+
+ if passed:
+ print("\nTest passed :-)\n")
+ else:
+ print("\nTest failed :-(\n")
+
+# RX one iteration
+def rx_iteration (c, tx_port, rx_port, total_pkts, pkt_len):
+ ret = True
+
+ c.clear_stats()
+
+ c.start(ports = [tx_port])
+ c.wait_on_traffic(ports = [tx_port])
+
+ global_flow_stats = c.get_stats()['flow_stats']
+ flow_stats = global_flow_stats.get(5)
+ if not flow_stats:
+ print("no flow stats available")
+ return False
+
+ tx_pkts = flow_stats['tx_pkts'].get(tx_port, 0)
+ tx_bytes = flow_stats['tx_bytes'].get(tx_port, 0)
+ rx_pkts = flow_stats['rx_pkts'].get(rx_port, 0)
+
+ if c.get_warnings():
+ print("\n\n*** test had warnings ****\n\n")
+ for w in c.get_warnings():
+ print(w)
+ return False
+
+ if tx_pkts != total_pkts:
+ print("TX pkts mismatch - got: {0}, expected: {1}".format(tx_pkts, total_pkts))
+ pprint.pprint(flow_stats)
+ ret = False
+ else:
+ print("TX pkts match - {0}".format(tx_pkts))
+
+ if tx_bytes != (total_pkts * pkt_len):
+ print("TX bytes mismatch - got: {0}, expected: {1}".format(tx_bytes, (total_pkts * pkt_len)))
+ pprint.pprint(flow_stats)
+ ret = False
+ else:
+ print("TX bytes match - {0}".format(tx_bytes))
+
+ if rx_pkts != total_pkts:
+ print("RX pkts mismatch - got: {0}, expected: {1}".format(rx_pkts, total_pkts))
+ pprint.pprint(flow_stats)
+ ret = False
+ else:
+ print("RX pkts match - {0}".format(rx_pkts))
+
+
+ for field in ['rx_err', 'tx_err']:
+ for port in global_flow_stats['global'][field].keys():
+ if global_flow_stats['global'][field][port] != 0:
+ print ("\n{0} on port {1}: {2} - You should consider increasing rx_delay_ms value in wait_on_traffic"
+ .format(field, port, global_flow_stats['global'][field][port]))
+
+ return ret
+
+# run the tests
+rx_example(tx_port = 0, rx_port = 1, burst_size = 500, bw = 50)
+
diff --git a/scripts/automation/trex_control_plane/stl/examples/stl_imix.py b/scripts/automation/trex_control_plane/stl/examples/stl_imix.py
new file mode 100644
index 00000000..875186ba
--- /dev/null
+++ b/scripts/automation/trex_control_plane/stl/examples/stl_imix.py
@@ -0,0 +1,126 @@
+import stl_path
+from trex_stl_lib.api import *
+
+import time
+import json
+from pprint import pprint
+import argparse
+import sys
+
+# IMIX test
+# it maps the ports to sides
+# then it load a predefind profile 'IMIX'
+# and attach it to both sides and inject
+# at a certain rate for some time
+# finally it checks that all packets arrived
+def imix_test (server, mult):
+
+
+ # create client
+ c = STLClient(server = server)
+
+ passed = True
+
+
+ try:
+
+ # connect to server
+ c.connect()
+
+ # take all the ports
+ c.reset()
+
+
+ # map ports - identify the routes
+ table = stl_map_ports(c)
+
+ dir_0 = [x[0] for x in table['bi']]
+ dir_1 = [x[1] for x in table['bi']]
+
+ print("Mapped ports to sides {0} <--> {1}".format(dir_0, dir_1))
+
+ # load IMIX profile
+ profile_file = os.path.join(stl_path.STL_PROFILES_PATH, 'imix.py')
+ profile = STLProfile.load_py(profile_file)
+ streams = profile.get_streams()
+
+ # add both streams to ports
+ c.add_streams(streams, ports = dir_0)
+ c.add_streams(streams, ports = dir_1)
+
+ # clear the stats before injecting
+ c.clear_stats()
+
+ # choose rate and start traffic for 10 seconds
+ duration = 10
+ print("Injecting {0} <--> {1} on total rate of '{2}' for {3} seconds".format(dir_0, dir_1, mult, duration))
+
+ c.start(ports = (dir_0 + dir_1), mult = mult, duration = duration, total = True)
+
+ # block until done
+ c.wait_on_traffic(ports = (dir_0 + dir_1))
+
+ # read the stats after the test
+ stats = c.get_stats()
+
+ # use this for debug info on all the stats
+ #pprint(stats)
+
+ # sum dir 0
+ dir_0_opackets = sum([stats[i]["opackets"] for i in dir_0])
+ dir_0_ipackets = sum([stats[i]["ipackets"] for i in dir_0])
+
+ # sum dir 1
+ dir_1_opackets = sum([stats[i]["opackets"] for i in dir_1])
+ dir_1_ipackets = sum([stats[i]["ipackets"] for i in dir_1])
+
+
+ lost_0 = dir_0_opackets - dir_1_ipackets
+ lost_1 = dir_1_opackets - dir_0_ipackets
+
+ print("\nPackets injected from {0}: {1:,}".format(dir_0, dir_0_opackets))
+ print("Packets injected from {0}: {1:,}".format(dir_1, dir_1_opackets))
+
+ print("\npackets lost from {0} --> {1}: {2:,} pkts".format(dir_0, dir_0, lost_0))
+ print("packets lost from {0} --> {1}: {2:,} pkts".format(dir_1, dir_1, lost_1))
+
+ if c.get_warnings():
+ print("\n\n*** test had warnings ****\n\n")
+ for w in c.get_warnings():
+ print(w)
+
+ if (lost_0 <= 0) and (lost_1 <= 0) and not c.get_warnings(): # less or equal because we might have incoming arps etc.
+ passed = True
+ else:
+ passed = False
+
+
+ except STLError as e:
+ passed = False
+ print(e)
+ sys.exit(1)
+
+ finally:
+ c.disconnect()
+
+ if passed:
+ print("\nTest has passed :-)\n")
+ else:
+ print("\nTest has failed :-(\n")
+
+parser = argparse.ArgumentParser(description="Example for TRex Stateless, sending IMIX traffic")
+parser.add_argument('-s', '--server',
+ dest='server',
+ help='Remote trex address',
+ default='127.0.0.1',
+ type = str)
+parser.add_argument('-m', '--mult',
+ dest='mult',
+ help='Multiplier of traffic, see Stateless help for more info',
+ default='30%',
+ type = str)
+args = parser.parse_args()
+
+# run the tests
+imix_test(args.server, args.mult)
+
diff --git a/scripts/automation/trex_control_plane/stl/examples/stl_imix_bidir.py b/scripts/automation/trex_control_plane/stl/examples/stl_imix_bidir.py
new file mode 100644
index 00000000..956b910a
--- /dev/null
+++ b/scripts/automation/trex_control_plane/stl/examples/stl_imix_bidir.py
@@ -0,0 +1,113 @@
+import stl_path
+from trex_stl_lib.api import *
+
+import imp
+import time
+import json
+from pprint import pprint
+import argparse
+
+# IMIX test
+# it maps the ports to sides
+# then it load a predefind profile 'IMIX'
+# and attach it to both sides and inject
+# at a certain rate for some time
+# finally it checks that all packets arrived
+def imix_test (server):
+
+
+ # create client
+ c = STLClient(server = server)
+ passed = True
+
+
+ try:
+
+ # connect to server
+ c.connect()
+
+ # take all the ports
+ c.reset()
+
+ dir_0 = [0]
+ dir_1 = [1]
+
+ print "Mapped ports to sides {0} <--> {1}".format(dir_0, dir_1)
+
+ # load IMIX profile
+ profile_file = os.path.join(stl_path.STL_PROFILES_PATH, 'imix.py')
+ profile1 = STLProfile.load_py(profile_file, direction=0)
+ profile2 = STLProfile.load_py(profile_file, direction=1)
+ stream1 = profile1.get_streams()
+ stream2 = profile2.get_streams()
+
+ # add both streams to ports
+ c.add_streams(stream1, ports = dir_0)
+ c.add_streams(stream2, ports = dir_1)
+
+ # clear the stats before injecting
+ c.clear_stats()
+
+ # choose rate and start traffic for 10 seconds on 5 mpps
+ duration = 30
+ mult = "30%"
+ print "Injecting {0} <--> {1} on total rate of '{2}' for {3} seconds".format(dir_0, dir_1, mult, duration)
+
+ c.start(ports = (dir_0 + dir_1), mult = mult, duration = duration, total = True)
+
+ # block until done
+ c.wait_on_traffic(ports = (dir_0 + dir_1))
+
+ # read the stats after the test
+ stats = c.get_stats()
+
+ # use this for debug info on all the stats
+ pprint(stats)
+
+ # sum dir 0
+ dir_0_opackets = sum([stats[i]["opackets"] for i in dir_0])
+ dir_0_ipackets = sum([stats[i]["ipackets"] for i in dir_0])
+
+ # sum dir 1
+ dir_1_opackets = sum([stats[i]["opackets"] for i in dir_1])
+ dir_1_ipackets = sum([stats[i]["ipackets"] for i in dir_1])
+
+
+ lost_0 = dir_0_opackets - dir_1_ipackets
+ lost_1 = dir_1_opackets - dir_0_ipackets
+
+ print "\nPackets injected from {0}: {1:,}".format(dir_0, dir_0_opackets)
+ print "Packets injected from {0}: {1:,}".format(dir_1, dir_1_opackets)
+
+ print "\npackets lost from {0} --> {1}: {2:,} pkts".format(dir_0, dir_1, lost_0)
+ print "packets lost from {0} --> {1}: {2:,} pkts".format(dir_1, dir_0, lost_1)
+
+ if (lost_0 <= 0) and (lost_1 <= 0): # less or equal because we might have incoming arps etc.
+ passed = True
+ else:
+ passed = False
+
+
+ except STLError as e:
+ passed = False
+ print e
+
+ finally:
+ c.disconnect()
+
+ if passed:
+ print "\nTest has passed :-)\n"
+ else:
+ print "\nTest has failed :-(\n"
+
+parser = argparse.ArgumentParser(description="Example for TRex Stateless, sending IMIX traffic")
+parser.add_argument('-s', '--server',
+ dest='server',
+ help='Remote trex address',
+ default='127.0.0.1',
+ type = str)
+args = parser.parse_args()
+
+# run the tests
+imix_test(args.server)
+
diff --git a/scripts/automation/trex_control_plane/stl/examples/stl_path.py b/scripts/automation/trex_control_plane/stl/examples/stl_path.py
new file mode 100644
index 00000000..f190aab1
--- /dev/null
+++ b/scripts/automation/trex_control_plane/stl/examples/stl_path.py
@@ -0,0 +1,7 @@
+import sys, os
+
+# FIXME to the right path for trex_stl_lib
+sys.path.insert(0, "../")
+
+STL_PROFILES_PATH = os.path.join(os.pardir, os.pardir, os.pardir, os.pardir, 'stl')
+
diff --git a/scripts/automation/trex_control_plane/stl/examples/stl_pcap.py b/scripts/automation/trex_control_plane/stl/examples/stl_pcap.py
new file mode 100644
index 00000000..98af6134
--- /dev/null
+++ b/scripts/automation/trex_control_plane/stl/examples/stl_pcap.py
@@ -0,0 +1,117 @@
+import stl_path
+from trex_stl_lib.api import *
+import argparse
+import sys
+
+
+def packet_hook_generator (remove_fcs, vlan_id):
+
+ def packet_hook (packet):
+ packet = Ether(packet)
+
+ if vlan_id >= 0 and vlan_id <= 4096:
+ packet_l3 = packet.payload
+ packet = Ether() / Dot1Q(vlan = vlan_id) / packet_l3
+
+ if remove_fcs and packet.lastlayer().name == 'Padding':
+ packet.lastlayer().underlayer.remove_payload()
+
+ return str(packet)
+
+ return packet_hook
+
+
+def inject_pcap (pcap_file, server, port, loop_count, ipg_usec, use_vm, remove_fcs, vlan_id):
+
+ # create client
+ c = STLClient(server = server)
+
+ if remove_fcs or vlan_id:
+ packet_hook = packet_hook_generator(remove_fcs, vlan_id)
+ else:
+ packet_hook = None
+
+ try:
+
+ vm = STLIPRange(dst = {'start': '10.0.0.1', 'end': '10.0.0.254', 'step' : 1}) if use_vm else None
+
+ c.connect()
+ c.reset(ports = [port])
+
+ c.clear_stats()
+ c.push_pcap(pcap_file,
+ ipg_usec = ipg_usec,
+ count = loop_count,
+ vm = vm,
+ packet_hook = packet_hook)
+
+ c.wait_on_traffic()
+
+
+ stats = c.get_stats()
+ opackets = stats[port]['opackets']
+ print("{0} packets were Tx on port {1}\n".format(opackets, port))
+
+ except STLError as e:
+ print(e)
+ sys.exit(1)
+
+ finally:
+ c.disconnect()
+
+
+def setParserOptions():
+ parser = argparse.ArgumentParser(prog="stl_pcap.py")
+
+ parser.add_argument("-f", "--file", help = "pcap file to inject",
+ dest = "pcap",
+ required = True,
+ type = str)
+
+ parser.add_argument("-s", "--server", help = "TRex server address",
+ dest = "server",
+ default = 'localhost',
+ type = str)
+
+ parser.add_argument("-p", "--port", help = "port to inject on",
+ dest = "port",
+ required = True,
+ type = int)
+
+ parser.add_argument("-n", "--number", help = "How many times to inject pcap [default is 1, 0 means forever]",
+ dest = "loop_count",
+ default = 1,
+ type = int)
+
+ parser.add_argument("-i", help = "IPG in usec",
+ dest = "ipg",
+ default = 10.0,
+ type = float)
+
+ parser.add_argument("-x", help = "Iterate over IP dest",
+ dest = "use_vm",
+ default = False,
+ action = "store_true")
+
+ parser.add_argument("-r", "--remove-fcs", help = "Remove FCS if exists. Limited by Scapy capabilities.",
+ dest = "remove",
+ default = False,
+ action = "store_true")
+
+ parser.add_argument("-v", "--vlan", help = "Add VLAN header with this ID. Limited by Scapy capabilities.",
+ dest = "vlan",
+ default = -1,
+ type = int)
+
+ return parser
+
+def main ():
+ parser = setParserOptions()
+ options = parser.parse_args()
+
+ inject_pcap(options.pcap, options.server, options.port, options.loop_count, options.ipg, options.use_vm, options.remove, options.vlan)
+
+# inject pcap
+if __name__ == '__main__':
+ main()
+
diff --git a/scripts/automation/trex_control_plane/stl/examples/stl_pcap_remote.py b/scripts/automation/trex_control_plane/stl/examples/stl_pcap_remote.py
new file mode 100644
index 00000000..c47eee31
--- /dev/null
+++ b/scripts/automation/trex_control_plane/stl/examples/stl_pcap_remote.py
@@ -0,0 +1,123 @@
+import stl_path
+from trex_stl_lib.api import *
+import argparse
+import sys
+
+
+def inject_pcap (c, pcap_file, port, loop_count, ipg_usec, duration):
+
+ pcap_file = os.path.abspath(pcap_file)
+
+ c.reset(ports = [port])
+ c.push_remote(pcap_file, ports = [port], ipg_usec = ipg_usec, speedup = 1.0, count = loop_count, duration = duration)
+ # assume 100 seconds is enough - but can be more
+ c.wait_on_traffic(ports = [port], timeout = 100)
+
+ stats = c.get_stats()
+ opackets = stats[port]['opackets']
+
+ return opackets
+ #print("{0} packets were Tx on port {1}\n".format(opackets, port))
+
+
+
+def setParserOptions():
+ parser = argparse.ArgumentParser(prog="stl_pcap.py")
+
+ parser.add_argument("-f", "--file", help = "pcap file to inject",
+ dest = "pcap",
+ required = True,
+ type = str)
+
+ parser.add_argument("-s", "--server", help = "TRex server address",
+ dest = "server",
+ default = 'localhost',
+ type = str)
+
+ parser.add_argument("-p", "--port", help = "port to inject on",
+ dest = "port",
+ required = True,
+ type = int)
+
+ parser.add_argument("-n", "--number", help = "How many times to inject pcap [default is 1, 0 means forever]",
+ dest = "loop_count",
+ default = 1,
+ type = int)
+
+ parser.add_argument("-i", help = "IPG in usec",
+ dest = "ipg",
+ default = None,
+ type = float)
+
+ parser.add_argument("-d", help = "duration in seconds",
+ dest = "duration",
+ default = -1,
+ type = float)
+
+ return parser
+
+def sizeof_fmt(num, suffix='B'):
+ for unit in ['','Ki','Mi','Gi','Ti','Pi','Ei','Zi']:
+ if abs(num) < 1024.0:
+ return "%3.1f%s%s" % (num, unit, suffix)
+ num /= 1024.0
+ return "%.1f%s%s" % (num, 'Yi', suffix)
+
+
+def read_txt_file (filename):
+
+ with open(filename) as f:
+ lines = f.readlines()
+
+ caps = []
+ for raw in lines:
+ raw = raw.rstrip()
+ if raw[0] == '#':
+ continue
+ ext=os.path.splitext(raw)[1]
+ if ext not in ['.cap', '.pcap', '.erf']:
+ # skip unknown format
+ continue
+
+ caps.append(raw)
+
+ return caps
+
+
+def start (args):
+
+ parser = setParserOptions()
+ options = parser.parse_args(args)
+
+ ext = os.path.splitext(options.pcap)[1]
+ if ext == '.txt':
+ caps = read_txt_file(options.pcap)
+ elif ext in ['.cap', '.pcap']:
+ caps = [options.pcap]
+ else:
+ print("unknown file extension for file {0}".format(options.pcap))
+ return
+
+ c = STLClient(server = options.server)
+ try:
+ c.connect()
+ for i, cap in enumerate(caps, start = 1):
+ before = time.time()
+ print ("{:} CAP {:} @ {:} - ".format(i, cap, sizeof_fmt(os.path.getsize(cap)))),
+ injected = inject_pcap(c, cap, options.port, options.loop_count, options.ipg, options.duration)
+ print("took {:.2f} seconds for {:} packets").format(time.time() - before, injected)
+
+ except STLError as e:
+ print(e)
+ return
+
+ finally:
+ c.disconnect()
+
+def main ():
+ start(sys.argv[1:])
+
+# inject pcap
+if __name__ == '__main__':
+ main()
+
diff --git a/scripts/automation/trex_control_plane/stl/examples/stl_profile.py b/scripts/automation/trex_control_plane/stl/examples/stl_profile.py
new file mode 100644
index 00000000..16d5238e
--- /dev/null
+++ b/scripts/automation/trex_control_plane/stl/examples/stl_profile.py
@@ -0,0 +1,58 @@
+import stl_path
+from trex_stl_lib.api import *
+
+import time
+
+def simple ():
+
+ # create client
+ #verbose_level = LoggerApi.VERBOSE_HIGH
+ c = STLClient(verbose_level = LoggerApi.VERBOSE_REGULAR)
+ passed = True
+
+ try:
+ # connect to server
+ c.connect()
+
+ my_ports=[0,1]
+
+ # prepare our ports
+ c.reset(ports = my_ports)
+ profile_file = os.path.join(stl_path.STL_PROFILES_PATH, 'hlt', 'udp_1pkt_simple.py')
+
+ try:
+ profile = STLProfile.load(profile_file)
+ except STLError as e:
+ print(format_text("\nError while loading profile '{0}'\n".format(opts.file[0]), 'bold'))
+ print(e.brief() + "\n")
+ return
+
+ print(profile.dump_to_yaml())
+
+ c.remove_all_streams(my_ports)
+
+
+ c.add_streams(profile.get_streams(), ports = my_ports)
+
+ c.start(ports = [0, 1], mult = "5mpps", duration = 10)
+
+ # block until done
+ c.wait_on_traffic(ports = [0, 1])
+
+
+ except STLError as e:
+ passed = False
+ print(e)
+
+ finally:
+ c.disconnect()
+
+ if passed:
+ print("\nTest has passed :-)\n")
+ else:
+ print("\nTest has failed :-(\n")
+
+
+# run the tests
+simple()
+
diff --git a/scripts/automation/trex_control_plane/stl/examples/stl_run_udp_simple.py b/scripts/automation/trex_control_plane/stl/examples/stl_run_udp_simple.py
new file mode 100644
index 00000000..d06414e4
--- /dev/null
+++ b/scripts/automation/trex_control_plane/stl/examples/stl_run_udp_simple.py
@@ -0,0 +1,218 @@
+#!/usr/bin/python
+import sys, getopt
+import argparse;
+"""
+Sample API application,
+Connect to TRex
+Send UDP packet in specific length
+Each direction has its own IP range
+Compare Rx-pkts to TX-pkts assuming ports are loopback
+
+"""
+
+import stl_path
+from trex_stl_lib.api import *
+
+H_VER = "trex-x v0.1 "
+
+class t_global(object):
+ args=None;
+
+
+import time
+import json
+import string
+
+def generate_payload(length):
+ word = ''
+ alphabet_size = len(string.letters)
+ for i in range(length):
+ word += string.letters[(i % alphabet_size)]
+ return word
+
+# simple packet creation
+def create_pkt (frame_size = 9000, direction=0):
+
+ ip_range = {'src': {'start': "10.0.0.1", 'end': "10.0.0.254"},
+ 'dst': {'start': "8.0.0.1", 'end': "8.0.0.254"}}
+
+ if (direction == 0):
+ src = ip_range['src']
+ dst = ip_range['dst']
+ else:
+ src = ip_range['dst']
+ dst = ip_range['src']
+
+ vm = [
+ # src
+ STLVmFlowVar(name="src",min_value=src['start'],max_value=src['end'],size=4,op="inc"),
+ STLVmWrFlowVar(fv_name="src",pkt_offset= "IP.src"),
+
+ # dst
+ STLVmFlowVar(name="dst",min_value=dst['start'],max_value=dst['end'],size=4,op="inc"),
+ STLVmWrFlowVar(fv_name="dst",pkt_offset= "IP.dst"),
+
+ # checksum
+ STLVmFixIpv4(offset = "IP")
+ ]
+
+ pkt_base = Ether(src="00:00:00:00:00:01",dst="00:00:00:00:00:02")/IP()/UDP(dport=12,sport=1025)
+ pyld_size = frame_size - len(pkt_base);
+ pkt_pyld = generate_payload(pyld_size)
+
+ return STLPktBuilder(pkt = pkt_base/pkt_pyld,
+ vm = vm)
+
+
+def simple_burst (duration = 10, frame_size = 9000, speed = '1gbps'):
+
+ if (frame_size < 60):
+ frame_size = 60
+
+ pkt_dir_0 = create_pkt (frame_size, 0)
+
+ pkt_dir_1 = create_pkt (frame_size, 1)
+
+ # create client
+ c = STLClient(server = t_global.args.ip)
+
+ passed = True
+
+ try:
+ # turn this on for some information
+ #c.set_verbose("high")
+
+ # create two streams
+ s1 = STLStream(packet = pkt_dir_0,
+ mode = STLTXCont(pps = 100))
+
+ # second stream with a phase of 1ms (inter stream gap)
+ s2 = STLStream(packet = pkt_dir_1,
+ isg = 1000,
+ mode = STLTXCont(pps = 100))
+
+ if t_global.args.debug:
+ STLStream.dump_to_yaml ("example.yaml", [s1,s2]) # export to YAML so you can run it on simulator ./stl-sim -f example.yaml -o o.pcap
+
+ # connect to server
+ c.connect()
+
+ # prepare our ports (my machine has 0 <--> 1 with static route)
+ c.reset(ports = [0, 1])
+
+ # add both streams to ports
+ c.add_streams(s1, ports = [0])
+ c.add_streams(s2, ports = [1])
+
+ # clear the stats before injecting
+ c.clear_stats()
+
+ # choose rate and start traffic for 10 seconds on 5 mpps
+ print("Running {0} on ports 0, 1 for 10 seconds, UDP {1}...".format(speed,frame_size+4))
+ c.start(ports = [0, 1], mult = speed, duration = duration)
+
+ # block until done
+ c.wait_on_traffic(ports = [0, 1])
+
+ # read the stats after the test
+ stats = c.get_stats()
+
+ #print stats
+ print(json.dumps(stats[0], indent = 4, separators=(',', ': '), sort_keys = True))
+ print(json.dumps(stats[1], indent = 4, separators=(',', ': '), sort_keys = True))
+
+ lost_a = stats[0]["opackets"] - stats[1]["ipackets"]
+ lost_b = stats[1]["opackets"] - stats[0]["ipackets"]
+
+ print("\npackets lost from 0 --> 1: {0} pkts".format(lost_a))
+ print("packets lost from 1 --> 0: {0} pkts".format(lost_b))
+
+ if (lost_a == 0) and (lost_b == 0):
+ passed = True
+ else:
+ passed = False
+
+ except STLError as e:
+ passed = False
+ print(e)
+
+ finally:
+ c.disconnect()
+
+ if passed:
+ print("\nPASSED\n")
+ else:
+ print("\nFAILED\n")
+
+def process_options ():
+ parser = argparse.ArgumentParser(usage="""
+ connect to TRex and send burst of packets
+
+ examples
+
+ stl_run_udp_simple.py -s 9001
+
+ stl_run_udp_simple.py -s 9000 -d 2
+
+ stl_run_udp_simple.py -s 3000 -d 3 -m 10mbps
+
+ stl_run_udp_simple.py -s 3000 -d 3 -m 10mbps --debug
+
+ then run the simulator on the output
+ ./stl-sim -f example.yaml -o a.pcap ==> a.pcap include the packet
+
+ """,
+ description="example for TRex api",
+ epilog=" written by hhaim");
+
+ parser.add_argument("-s", "--frame-size",
+ dest="frame_size",
+ help='L2 frame size in bytes without FCS',
+ default=60,
+ type = int,
+ )
+
+ parser.add_argument("--ip",
+ dest="ip",
+ help='remote trex ip default local',
+ default="127.0.0.1",
+ type = str
+ )
+
+
+ parser.add_argument('-d','--duration',
+ dest='duration',
+ help='duration in second ',
+ default=10,
+ type = int,
+ )
+
+
+ parser.add_argument('-m','--multiplier',
+ dest='mul',
+ help='speed in gbps/pps for example 1gbps, 1mbps, 1mpps ',
+ default="1mbps"
+ )
+
+ parser.add_argument('--debug',
+ action='store_true',
+ help='see debug into ')
+
+ parser.add_argument('--version', action='version',
+ version=H_VER )
+
+ t_global.args = parser.parse_args();
+ print(t_global.args)
+
+
+
+def main():
+ process_options ()
+ simple_burst(duration = t_global.args.duration,
+ frame_size = t_global.args.frame_size,
+ speed = t_global.args.mul
+ )
+
+if __name__ == "__main__":
+ main()
+
diff --git a/scripts/automation/trex_control_plane/stl/examples/stl_simple_burst.py b/scripts/automation/trex_control_plane/stl/examples/stl_simple_burst.py
new file mode 100644
index 00000000..4bd9fd4c
--- /dev/null
+++ b/scripts/automation/trex_control_plane/stl/examples/stl_simple_burst.py
@@ -0,0 +1,71 @@
+import stl_path
+from trex_stl_lib.api import *
+
+import time
+
+def simple_burst (port_a, port_b, pkt_size, burst_size, rate):
+
+ # create client
+ c = STLClient()
+ passed = True
+
+ try:
+ pkt_base = Ether()/IP(src="16.0.0.1",dst="48.0.0.1")/UDP(dport=12,sport=1025)/IP()
+ pad = max(0, pkt_size - len(pkt_base)) * 'x'
+ pkt = STLPktBuilder(pkt = pkt_base / pad)
+
+ # create two bursts and link them
+ s1 = STLStream(name = 'A',
+ packet = pkt,
+ mode = STLTXSingleBurst(total_pkts = burst_size),
+ next = 'B')
+
+ s2 = STLStream(name = 'B',
+ self_start = False,
+ packet = pkt,
+ mode = STLTXSingleBurst(total_pkts = burst_size))
+
+ # connect to server
+ c.connect()
+
+ # prepare our ports
+ c.reset(ports = [port_a, port_b])
+
+ # add both streams to ports
+ stream_ids = c.add_streams([s1, s2], ports = [port_a, port_b])
+
+ # run 5 times
+ for i in range(1, 6):
+ c.clear_stats()
+ c.start(ports = [port_a, port_b], mult = rate)
+ c.wait_on_traffic(ports = [port_a, port_b])
+
+ stats = c.get_stats()
+ ipackets = stats['total']['ipackets']
+
+ print("Test iteration {0} - Packets Received: {1} ".format(i, ipackets))
+ # two streams X 2 ports
+ if (ipackets != (burst_size * 2 * 2)):
+ passed = False
+
+ except STLError as e:
+ passed = False
+ print(e)
+
+ finally:
+ c.disconnect()
+
+ if c.get_warnings():
+ print("\n\n*** test had warnings ****\n\n")
+ for w in c.get_warnings():
+ print(w)
+
+ if passed and not c.get_warnings():
+ print("\nTest has passed :-)\n")
+ else:
+ print("\nTest has failed :-(\n")
+
+
+# run the tests
+simple_burst(0, 3, 256, 50000, "80%")
+
diff --git a/scripts/automation/trex_control_plane/stl/examples/stl_simple_console_like.py b/scripts/automation/trex_control_plane/stl/examples/stl_simple_console_like.py
new file mode 100644
index 00000000..1d4ef250
--- /dev/null
+++ b/scripts/automation/trex_control_plane/stl/examples/stl_simple_console_like.py
@@ -0,0 +1,60 @@
+import stl_path
+from trex_stl_lib.api import *
+
+import time
+
+def simple ():
+
+ # create client
+ #verbose_level = LoggerApi.VERBOSE_HIGH
+ c = STLClient(verbose_level = LoggerApi.VERBOSE_REGULAR)
+ passed = True
+
+ try:
+ # connect to server
+ c.connect()
+
+ my_ports=[0,1]
+
+ # prepare our ports
+ c.reset(ports = my_ports)
+
+ print((" is connected {0}".format(c.is_connected())))
+
+ print((" number of ports {0}".format(c.get_port_count())))
+ print((" acquired_ports {0}".format(c.get_acquired_ports())))
+ # port stats
+ print(c.get_stats(my_ports))
+ # port info
+ print(c.get_port_info(my_ports))
+
+ c.ping()
+ profile_file = os.path.join(stl_path.STL_PROFILES_PATH, 'udp_1pkt_simple.py')
+
+ print("start")
+ c.start_line (" -f %s -m 10mpps --port 0 1 " % profile_file)
+ time.sleep(2);
+ c.pause_line("--port 0 1");
+ time.sleep(2);
+ c.resume_line("--port 0 1");
+ time.sleep(2);
+ c.update_line("--port 0 1 -m 5mpps");
+ time.sleep(2);
+ c.stop_line("--port 0 1");
+
+ except STLError as e:
+ passed = False
+ print(e)
+
+ finally:
+ c.disconnect()
+
+ if passed:
+ print("\nTest has passed :-)\n")
+ else:
+ print("\nTest has failed :-(\n")
+
+
+# run the tests
+simple()
+
diff --git a/scripts/automation/trex_control_plane/stl/examples/stl_simple_pin_core.py b/scripts/automation/trex_control_plane/stl/examples/stl_simple_pin_core.py
new file mode 100644
index 00000000..6e3d5f7f
--- /dev/null
+++ b/scripts/automation/trex_control_plane/stl/examples/stl_simple_pin_core.py
@@ -0,0 +1,72 @@
+import stl_path
+from trex_stl_lib.api import *
+
+import time
+
+def simple_burst (port_a, port_b, pkt_size, burst_size, rate):
+
+ # create client
+ c = STLClient()
+ passed = True
+
+ try:
+ pkt_base = Ether()/IP(src="16.0.0.1",dst="48.0.0.1")/UDP(dport=12,sport=1025)/IP()
+ pad = max(0, pkt_size - len(pkt_base)) * 'x'
+ pkt = STLPktBuilder(pkt = pkt_base / pad)
+
+ # create two bursts and link them
+ s1 = STLStream(name = 'A',
+ packet = pkt,
+ mode = STLTXSingleBurst(total_pkts = burst_size),
+ next = 'B')
+
+ s2 = STLStream(name = 'B',
+ self_start = False,
+ packet = pkt,
+ mode = STLTXSingleBurst(total_pkts = burst_size))
+
+ # connect to server
+ c.connect()
+
+ # prepare our ports
+ c.reset(ports = [port_a, port_b])
+
+ # add both streams to ports
+ stream_ids = c.add_streams([s1, s2], ports = [port_a, port_b])
+
+ # run 5 times
+ for i in range(1, 6):
+ c.clear_stats()
+ ##
+ c.start(ports = [port_a, port_b], mult = rate, core_mask=STLClient.CORE_MASK_PIN) # better performance
+ c.wait_on_traffic(ports = [port_a, port_b])
+
+ stats = c.get_stats()
+ ipackets = stats['total']['ipackets']
+
+ print("Test iteration {0} - Packets Received: {1} ".format(i, ipackets))
+ # two streams X 2 ports
+ if (ipackets != (burst_size * 2 * 2)):
+ passed = False
+
+ except STLError as e:
+ passed = False
+ print(e)
+
+ finally:
+ c.disconnect()
+
+ if c.get_warnings():
+ print("\n\n*** test had warnings ****\n\n")
+ for w in c.get_warnings():
+ print(w)
+
+ if passed and not c.get_warnings():
+ print("\nTest has passed :-)\n")
+ else:
+ print("\nTest has failed :-(\n")
+
+
+# run the tests
+simple_burst(0, 3, 256, 50000, "80%")
+
diff --git a/scripts/automation/trex_control_plane/stl/examples/using_rpc_proxy.py b/scripts/automation/trex_control_plane/stl/examples/using_rpc_proxy.py
new file mode 100755
index 00000000..d2fcdff3
--- /dev/null
+++ b/scripts/automation/trex_control_plane/stl/examples/using_rpc_proxy.py
@@ -0,0 +1,149 @@
+#!/router/bin/python
+
+import argparse
+import sys
+import os
+from time import sleep
+from pprint import pprint
+
+# ext libs
+ext_libs = os.path.join(os.pardir, os.pardir, os.pardir, os.pardir, 'external_libs')
+sys.path.append(os.path.join(ext_libs, 'jsonrpclib-pelix-0.2.5'))
+import jsonrpclib
+
+def fail(msg):
+ print(msg)
+ sys.exit(1)
+
+def verify(res):
+ if not res[0]:
+ fail(res[1])
+ return res
+
+def verify_hlt(res):
+ if res['status'] == 0:
+ fail(res['log'])
+ return res
+
+### Main ###
+
+if __name__ == '__main__':
+ parser = argparse.ArgumentParser(description = 'Use of Stateless through rpc_proxy. (Can be implemented in any language)')
+ parser.add_argument('-s', '--server', type=str, default = 'localhost', dest='server', action = 'store',
+ help = 'Address of rpc proxy.')
+ parser.add_argument('-p', '--port', type=int, default = 8095, dest='port', action = 'store',
+ help = 'Port of rpc proxy.\nDefault is 8095.')
+ parser.add_argument('--master_port', type=int, default = 8091, dest='master_port', action = 'store',
+ help = 'Port of Master daemon.\nDefault is 8091.')
+ args = parser.parse_args()
+
+ server = jsonrpclib.Server('http://%s:%s' % (args.server, args.port))
+ master = jsonrpclib.Server('http://%s:%s' % (args.server, args.master_port))
+
+# Connecting
+
+ try:
+ print('Connecting to STL RPC proxy server')
+ server.check_connectivity()
+ print('Connected')
+ except Exception as e:
+ print('Could not connect to STL RPC proxy server: %s\nTrying to start it from Master daemon.' % e)
+ try:
+ master.check_connectivity()
+ master.start_stl_rpc_proxy()
+ print('Started')
+ except Exception as e:
+ print('Could not start it from Master daemon. Error: %s' % e)
+ sys.exit(-1)
+
+
+# Native API
+
+ print('Initializing Native Client')
+ verify(server.native_proxy_init(server = args.server, force = True))
+
+ print('Connecting to TRex server')
+ verify(server.connect())
+
+ print('Resetting all ports')
+ verify(server.reset())
+
+ print('Getting ports info')
+ res = verify(server.native_method(func_name = 'get_port_info'))
+ print('Ports info is: %s' % res[1])
+ ports = [port['index'] for port in res[1]]
+
+ print('Sending pcap to ports %s' % ports)
+ verify(server.push_remote(pcap_filename = 'stl/sample.pcap'))
+ sleep(3)
+
+ print('Getting stats')
+ res = verify(server.get_stats())
+ pprint(res[1])
+
+ print('Resetting all ports')
+ verify(server.reset())
+
+ imix_path_1 = '../../../../stl/imix.py'
+ imix_path_2 = '../../stl/imix.py'
+ if os.path.exists(imix_path_1):
+ imix_path = imix_path_1
+ elif os.path.exists(imix_path_2):
+ imix_path = imix_path_2
+ else:
+ print('Could not find path of imix profile, skipping')
+ imix_path = None
+
+ if imix_path:
+ print('Adding profile %s' % imix_path)
+ verify(server.native_method(func_name = 'add_profile', filename = imix_path))
+
+ print('Start traffic for 5 sec')
+ verify(server.native_method('start'))
+ sleep(5)
+
+ print('Getting stats')
+ res = verify(server.get_stats())
+ pprint(res[1])
+
+ print('Resetting all ports')
+ verify(server.reset())
+
+ print('Deleting Native Client instance')
+ verify(server.native_proxy_del())
+
+# HLTAPI
+
+ print('Initializing HLTAPI Client')
+ verify_hlt(server.hltapi_proxy_init(force = True))
+ print('HLTAPI Client initiated')
+
+ print('HLTAPI connect')
+ verify_hlt(server.hlt_connect(device = args.server, port_list = ports, reset = True, break_locks = True))
+
+ print('Creating traffic')
+ verify_hlt(server.traffic_config(
+ mode = 'create', bidirectional = True,
+ port_handle = ports[0], port_handle2 = ports[1],
+ frame_size = 100,
+ l3_protocol = 'ipv4',
+ ip_src_addr = '10.0.0.1', ip_src_mode = 'increment', ip_src_count = 254,
+ ip_dst_addr = '8.0.0.1', ip_dst_mode = 'increment', ip_dst_count = 254,
+ l4_protocol = 'udp',
+ udp_dst_port = 12, udp_src_port = 1025,
+ rate_percent = 10, ignore_macs = True,
+ ))
+
+ print('Starting traffic for 5 sec')
+ verify_hlt(server.traffic_control(action = 'run', port_handle = ports[:2]))
+
+ sleep(5)
+ print('Stopping traffic')
+ verify_hlt(server.traffic_control(action = 'stop', port_handle = ports[:2]))
+
+ print('Getting stats')
+ res = verify_hlt(server.traffic_stats(mode = 'aggregate', port_handle = ports[:2]))
+ pprint(res)
+
+ print('Deleting HLTAPI Client instance')
+ verify_hlt(server.hltapi_proxy_del())
diff --git a/scripts/automation/trex_control_plane/stl/services/scapy_server/scapy_service.py b/scripts/automation/trex_control_plane/stl/services/scapy_server/scapy_service.py
new file mode 100755
index 00000000..91257596
--- /dev/null
+++ b/scripts/automation/trex_control_plane/stl/services/scapy_server/scapy_service.py
@@ -0,0 +1,798 @@
+
+import os
+import sys
+stl_pathname = os.path.abspath(os.path.join(os.pardir, os.pardir))
+sys.path.append(stl_pathname)
+
+from trex_stl_lib.api import *
+import tempfile
+import hashlib
+import base64
+import numbers
+import inspect
+import json
+from pprint import pprint
+
+# add some layers as an example
+# need to test more
+from scapy.layers.dns import *
+from scapy.layers.dhcp import *
+from scapy.layers.ipsec import *
+from scapy.layers.netflow import *
+from scapy.layers.sctp import *
+from scapy.layers.tftp import *
+
+from scapy.contrib.mpls import *
+from scapy.contrib.igmp import *
+from scapy.contrib.igmpv3 import *
+
+
+
+
+#additional_stl_udp_pkts = os.path.abspath(os.path.join(os.pardir,os.pardir,os.pardir,os.pardir, os.pardir,'stl'))
+#sys.path.append(additional_stl_udp_pkts)
+#from udp_1pkt_vxlan import VXLAN
+#sys.path.remove(additional_stl_udp_pkts)
+
+try:
+ from cStringIO import StringIO
+except ImportError:
+ from io import StringIO
+
+
+
+
+class Scapy_service_api():
+
+ def get_version_handler(self,client_v_major,client_v_minor):
+ """ get_version_handler(self,client_v_major,client_v_minor)
+
+ Gives a handler to client to connect and use server api
+
+ Parameters
+ ----------
+ client_v_major - major number of api version on the client side
+
+ Returns
+ -------
+ Handler(string) to provide when using server api
+ """
+ pass
+ def get_all(self,client_v_handler):
+ """ get_all(self,client_v_handler)
+
+ Sends all the protocols and fields that Scapy Service supports.
+ also sends the md5 of the Protocol DB and Fields DB used to check if the DB's are up to date
+
+ Parameters
+ ----------
+ None
+
+ Returns
+ -------
+ Dictionary (of protocol DB and scapy fields DB)
+
+ Raises
+ ------
+ Raises an exception when a DB error occurs (i.e a layer is not loaded properly and has missing components)
+ """
+ pass
+
+ def check_update_of_dbs(self,client_v_handler,db_md5,field_md5):
+ """ check_update_of_dbs(self,client_v_handler,db_md5,field_md5)
+ Checks if the Scapy Service running on the server has a newer version of the databases that the client has
+
+ Parameters
+ ----------
+ db_md5 - The md5 that was delivered with the protocol database that the client owns, when first received at the client
+ field_md5 - The md5 that was delivered with the fields database that the client owns, when first received at the client
+
+ Returns
+ -------
+ True/False according the Databases version(determined by their md5)
+
+ Raises
+ ------
+ Raises an exception (ScapyException) when protocol DB/Fields DB is not up to date
+ """
+ pass
+
+
+ def build_pkt(self,client_v_handler,pkt_model_descriptor):
+ """ build_pkt(self,client_v_handler,pkt_model_descriptor) -> Dictionary (of Offsets,Show2 and Buffer)
+
+ Performs calculations on the given packet and returns results for that packet.
+
+ Parameters
+ ----------
+ pkt_descriptor - An array of dictionaries describing a network packet
+
+ Returns
+ -------
+ - The packets offsets: each field in every layer is mapped inside the Offsets Dictionary
+ - The Show2: A description of each field and its value in every layer of the packet
+ - The Buffer: The Hexdump of packet encoded in base64
+
+ Raises
+ ------
+ will raise an exception when the Scapy string format is illegal, contains syntax error, contains non-supported
+ protocl, etc.
+ """
+ pass
+
+
+ def get_tree(self,client_v_handler):
+ """ get_tree(self) -> Dictionary describing an example of hierarchy in layers
+
+ Scapy service holds a tree of layers that can be stacked to a recommended packet
+ according to the hierarchy
+
+ Parameters
+ ----------
+ None
+
+ Returns
+ -------
+ Returns an example hierarchy tree of layers that can be stacked to a packet
+
+ Raises
+ ------
+ None
+ """
+ pass
+
+ def reconstruct_pkt(self,client_v_handler,binary_pkt,model_descriptor):
+ """ reconstruct_pkt(self,client_v_handler,binary_pkt)
+
+ Makes a Scapy valid packet by applying changes to binary packet and returns all information returned in build_pkt
+
+ Parameters
+ ----------
+ Source packet in binary_pkt, formatted in "base64" encoding
+ List of changes in model_descriptor
+
+ Returns
+ -------
+ All data provided in build_pkt:
+ show2 - detailed description of the packet
+ buffer - the packet presented in binary
+ offsets - the offset[in bytes] of each field in the packet
+
+ """
+ pass
+
+ def read_pcap(self,client_v_handler,pcap_base64):
+ """ read_pcap(self,client_v_handler,pcap_base64)
+
+ Parses pcap file contents and returns an array with build_pkt information for each packet
+
+ Parameters
+ ----------
+ binary pcap file in base64 encoding
+
+ Returns
+ -------
+ Array of build_pkt(packet)
+ """
+ pass
+
+ def write_pcap(self,client_v_handler,packets_base64):
+ """ write_pcap(self,client_v_handler,packets_base64)
+
+ Writes binary packets to pcap file
+
+ Parameters
+ ----------
+ array of binary packets in base64 encoding
+
+ Returns
+ -------
+ binary pcap file in base64 encoding
+ """
+ pass
+
+ def get_definitions(self,client_v_handler, def_filter):
+ """ get_definitions(self,client_v_handler, def_filter)
+
+ Returns protocols and fields metadata of scapy service
+
+ Parameters
+ ----------
+ def_filter - array of protocol names
+
+ Returns
+ -------
+ definitions for protocols
+ """
+ pass
+
+ def get_payload_classes(self,client_v_handler, pkt_model_descriptor):
+ """ get_payload_classes(self,client_v_handler, pkt_model_descriptor)
+
+ Returns an array of protocol classes, which normally can be used as a payload
+
+ Parameters
+ ----------
+ pkt_model_descriptor - see build_pkt
+
+ Returns
+ -------
+ array of supported protocol classes
+ """
+ pass
+
+def is_python(version):
+ return version == sys.version_info[0]
+
+def is_number(obj):
+ return isinstance(obj, numbers.Number)
+
+def is_string(obj):
+ return type(obj) == str or type(obj).__name__ == 'unicode' # python3 doesn't have unicode type
+
+def is_ascii_str(strval):
+ return strval and all(ord(ch) < 128 for ch in strval)
+
+def is_ascii_bytes(buf):
+ return buf and all(byte < 128 for byte in buf)
+
+def is_ascii(obj):
+ if is_bytes3(obj):
+ return is_ascii_bytes(obj)
+ else:
+ return is_ascii_str(obj)
+
+def is_bytes3(obj):
+ # checks if obj is exactly bytes(always false for python2)
+ return is_python(3) and type(obj) == bytes
+
+def str_to_bytes(strval):
+ return strval.encode("utf8")
+
+def bytes_to_str(buf):
+ return buf.decode("utf8")
+
+def b64_to_bytes(payload_base64):
+ # get bytes from base64 string(unicode)
+ return base64.b64decode(payload_base64)
+
+def bytes_to_b64(buf):
+ # bytes to base64 string(unicode)
+ return base64.b64encode(buf).decode('ascii')
+
+def get_sample_field_val(scapy_layer, fieldId):
+ # get some sample value for the field, to determine the value type
+ # use random or serialized value if default value is None
+ field_desc, current_val = scapy_layer.getfield_and_val(fieldId)
+ if current_val is not None:
+ return current_val
+ try:
+ # try to get some random value to determine type
+ return field_desc.randval()._fix()
+ except:
+ pass
+ try:
+ # try to serialize/deserialize
+ ltype = type(scapy_layer)
+ pkt = ltype(bytes(ltype()))
+ return pkt.getfieldval(fieldId)
+ except:
+ pass
+
+class ScapyException(Exception): pass
+class Scapy_service(Scapy_service_api):
+
+#----------------------------------------------------------------------------------------------------
+ class ScapyFieldDesc:
+ def __init__(self,FieldName,regex='empty'):
+ self.FieldName = FieldName
+ self.regex = regex
+ #defualt values - should be changed when needed, or added to constructor
+ self.string_input =""
+ self.string_input_mex_len = 1
+ self.integer_input = 0
+ self.integer_input_min = 0
+ self.integer_input_max = 1
+ self.input_array = []
+ self.input_list_max_len = 1
+
+ def stringRegex(self):
+ return self.regex
+#----------------------------------------------------------------------------------------------------
+ def __init__(self):
+ self.Raw = {'Raw':''}
+ self.high_level_protocols = ['Raw']
+ self.transport_protocols = {'TCP':self.Raw,'UDP':self.Raw}
+ self.network_protocols = {'IP':self.transport_protocols ,'ARP':''}
+ self.low_level_protocols = { 'Ether': self.network_protocols }
+ self.regexDB= {'MACField' : self.ScapyFieldDesc('MACField','^([0-9a-fA-F][0-9a-fA-F]:){5}([0-9a-fA-F][0-9a-fA-F])$'),
+ 'IPField' : self.ScapyFieldDesc('IPField','^(25[0-5]|2[0-4][0-9]|[0-1]{1}[0-9]{2}|[1-9]{1}[0-9]{1}|[1-9])\.(25[0-5]|2[0-4][0-9]|[0-1]{1}[0-9]{2}|[1-9]{1}[0-9]{1}|[1-9]|0)\.(25[0-5]|2[0-4][0-9]|[0-1]{1}[0-9]{2}|[1-9]{1}[0-9]{1}|[1-9]|0)\.(25[0-5]|2[0-4][0-9]|[0-1]{1}[0-9]{2}|[1-9]{1}[0-9]{1}|[0-9])$')}
+ self.all_protocols = self._build_lib()
+ self.protocol_tree = {'ALL':{'Ether':{'ARP':{},'IP':{'TCP':{'RAW':'payload'},'UDP':{'RAW':'payload'}}}}}
+ self.version_major = '1'
+ self.version_minor = '01'
+ self.server_v_hashed = self._generate_version_hash(self.version_major,self.version_minor)
+
+
+ def _all_protocol_structs(self):
+ old_stdout = sys.stdout
+ sys.stdout = mystdout = StringIO()
+ ls()
+ sys.stdout = old_stdout
+ all_protocol_data= mystdout.getvalue()
+ return all_protocol_data
+
+ def _protocol_struct(self,protocol):
+ if '_' in protocol:
+ return []
+ if not protocol=='':
+ if protocol not in self.all_protocols:
+ return 'protocol not supported'
+ protocol = eval(protocol)
+ old_stdout = sys.stdout
+ sys.stdout = mystdout = StringIO()
+ ls(protocol)
+ sys.stdout = old_stdout
+ protocol_data= mystdout.getvalue()
+ return protocol_data
+
+ def _build_lib(self):
+ lib = self._all_protocol_structs()
+ lib = lib.splitlines()
+ all_protocols=[]
+ for entry in lib:
+ entry = entry.split(':')
+ all_protocols.append(entry[0].strip())
+ del all_protocols[len(all_protocols)-1]
+ return all_protocols
+
+ def _parse_description_line(self,line):
+ line_arr = [x.strip() for x in re.split(': | = ',line)]
+ return tuple(line_arr)
+
+ def _parse_entire_description(self,description):
+ description = description.split('\n')
+ description_list = [self._parse_description_line(x) for x in description]
+ del description_list[len(description_list)-1]
+ return description_list
+
+ def _get_protocol_details(self,p_name):
+ protocol_str = self._protocol_struct(p_name)
+ if protocol_str=='protocol not supported':
+ return 'protocol not supported'
+ if len(protocol_str) is 0:
+ return []
+ tupled_protocol = self._parse_entire_description(protocol_str)
+ return tupled_protocol
+
+ def _value_from_dict(self, val):
+ # allows building python objects from json
+ if type(val) == type({}):
+ value_type = val['vtype']
+ if value_type == 'EXPRESSION':
+ return eval(val['expr'], {})
+ elif value_type == 'BYTES': # bytes payload(ex Raw.load)
+ return b64_to_bytes(val['base64'])
+ elif value_type == 'OBJECT':
+ return val['value']
+ else:
+ return val # it's better to specify type explicitly
+ elif type(val) == type([]):
+ return [self._value_from_dict(v) for v in val]
+ else:
+ return val
+
+ def _field_value_from_def(self, layer, fieldId, val):
+ field_desc = layer.get_field(fieldId)
+ sample_val = get_sample_field_val(layer, fieldId)
+ # extensions for field values
+ if type(val) == type({}):
+ value_type = val['vtype']
+ if value_type == 'UNDEFINED': # clear field value
+ return None
+ elif value_type == 'RANDOM': # random field value
+ return field_desc.randval()
+ elif value_type == 'MACHINE': # internal machine field repr
+ return field_desc.m2i(layer, b64_to_bytes(val['base64']))
+ if is_number(sample_val) and is_string(val):
+ # human-value. guess the type and convert to internal value
+ # seems setfieldval already does this for some fields,
+ # but does not convert strings/hex(0x123) to integers and long
+ val = str(val) # unicode -> str(ascii)
+ # parse str to int/long as a decimal or hex
+ val_constructor = type(sample_val)
+ if len(val) == 0:
+ return None
+ elif re.match(r"^0x[\da-f]+$", val, flags=re.IGNORECASE): # hex
+ return val_constructor(val, 16)
+ elif re.match(r"^\d+L?$", val): # base10
+ return val_constructor(val)
+ # generate recursive field-independent values
+ return self._value_from_dict(val)
+
+ def _print_tree(self):
+ pprint(self.protocol_tree)
+
+ def _get_all_db(self):
+ db = {}
+ for pro in self.all_protocols:
+ details = self._get_protocol_details(pro)
+ db[pro] = details
+ return db
+
+ def _get_all_fields(self):
+ fields = []
+ for pro in self.all_protocols:
+ details = self._get_protocol_details(pro)
+ for i in range(0,len(details),1):
+ if len(details[i]) == 3:
+ fields.append(details[i][1])
+ uniqueFields = list(set(fields))
+ fieldDict = {}
+ for f in uniqueFields:
+ if f in self.regexDB:
+ fieldDict[f] = self.regexDB[f].stringRegex()
+ else:
+ fieldDict[f] = self.ScapyFieldDesc(f).stringRegex()
+ return fieldDict
+
+ def _fully_define(self,pkt):
+ # returns scapy object with all fields initialized
+ rootClass = type(pkt)
+ full_pkt = rootClass(bytes(pkt))
+ full_pkt.build() # this trick initializes offset
+ return full_pkt
+
+ def _bytes_to_value(self, payload_bytes):
+ # generates struct with a value
+ return { "vtype": "BYTES", "base64": bytes_to_b64(payload_bytes) }
+
+ def _pkt_to_field_tree(self,pkt):
+ pkt.build()
+ result = []
+ pcap_struct = self._fully_define(pkt) # structure, which will appear in pcap binary
+ while pkt:
+ layer_id = type(pkt).__name__ # Scapy classname
+ layer_full = self._fully_define(pkt) # current layer recreated from binary to get auto-calculated vals
+ real_layer_id = type(pcap_struct).__name__ if pcap_struct else None
+ valid_struct = True # shows if packet is mapped correctly to the binary representation
+ if not pcap_struct:
+ valid_struct = False
+ elif not issubclass(type(pkt), type(pcap_struct)) and not issubclass(type(pcap_struct), type(pkt)):
+ # structure mismatch. no need to go deeper in pcap_struct
+ valid_struct = False
+ pcap_struct = None
+ fields = []
+ for field_desc in pkt.fields_desc:
+ field_id = field_desc.name
+ ignored = field_id not in layer_full.fields
+ offset = field_desc.offset
+ protocol_offset = pkt.offset
+ field_sz = field_desc.get_size_bytes()
+ # some values are unavailable in pkt(original model)
+ # at the same time,
+ fieldval = pkt.getfieldval(field_id)
+ pkt_fieldval_defined = is_string(fieldval) or is_number(fieldval) or is_bytes3(fieldval)
+ if not pkt_fieldval_defined:
+ fieldval = layer_full.getfieldval(field_id)
+ value = None
+ hvalue = None
+ value_base64 = None
+ if is_python(3) and is_bytes3(fieldval):
+ value = self._bytes_to_value(fieldval)
+ if is_ascii_bytes(fieldval):
+ hvalue = bytes_to_str(fieldval)
+ else:
+ # can't be shown as ascii.
+ # also this buffer may not be unicode-compatible(still can try to convert)
+ value = self._bytes_to_value(fieldval)
+ hvalue = '<binary>'
+ elif not is_string(fieldval):
+ # value as is. this can be int,long, or custom object(list/dict)
+ # "nice" human value, i2repr(string) will have quotes, so we have special handling for them
+ hvalue = field_desc.i2repr(pkt, fieldval)
+
+ if is_number(fieldval):
+ value = fieldval
+ if is_string(hvalue) and re.match(r"^\d+L$", hvalue):
+ hvalue = hvalue[:-1] # chop trailing L for long decimal number(python2)
+ else:
+ # fieldval is an object( class / list / dict )
+ # generic serialization/deserialization needed for proper packet rebuilding from packet tree,
+ # some classes can not be mapped to json, but we can pass them serialize them
+ # as a python eval expr, value bytes base64, or field machine internal val(m2i)
+ value = {"vtype": "EXPRESSION", "expr": hvalue}
+ if is_python(3) and is_string(fieldval):
+ hvalue = value = fieldval
+ if is_python(2) and is_string(fieldval):
+ if is_ascii(fieldval):
+ hvalue = value = fieldval
+ else:
+ # python2 non-ascii byte buffers
+ # payload contains non-ascii chars, which
+ # sometimes can not be passed as unicode strings
+ value = self._bytes_to_value(fieldval)
+ hvalue = '<binary>'
+ if field_desc.name == 'load':
+ # show Padding(and possible similar classes) as Raw
+ layer_id = 'Raw'
+ field_sz = len(pkt)
+ value = self._bytes_to_value(fieldval)
+ field_data = {
+ "id": field_id,
+ "value": value,
+ "hvalue": hvalue,
+ "offset": offset,
+ "length": field_sz
+ }
+ if ignored:
+ field_data["ignored"] = ignored
+ fields.append(field_data)
+ layer_data = {
+ "id": layer_id,
+ "offset": pkt.offset,
+ "fields": fields,
+ "real_id": real_layer_id,
+ "valid_structure": valid_struct,
+ }
+ result.append(layer_data)
+ pkt = pkt.payload
+ if pcap_struct:
+ pcap_struct = pcap_struct.payload or None
+ return result
+
+#input: container
+#output: md5 encoded in base64
+ def _get_md5(self,container):
+ container = json.dumps(container)
+ m = hashlib.md5()
+ m.update(str_to_bytes(container))
+ res_md5 = bytes_to_b64(m.digest())
+ return res_md5
+
+ def get_version(self):
+ return {'built_by':'itraviv','version':self.version_major+'.'+self.version_minor}
+
+ def supported_methods(self,method_name='all'):
+ if method_name=='all':
+ methods = {}
+ for f in dir(Scapy_service):
+ if f[0]=='_':
+ continue
+ if inspect.ismethod(eval('Scapy_service.'+f)):
+ param_list = inspect.getargspec(eval('Scapy_service.'+f))[0]
+ del param_list[0] #deleting the parameter "self" that appears in every method
+ #because the server automatically operates on an instance,
+ #and this can cause confusion
+ methods[f] = (len(param_list), param_list)
+ return methods
+ if method_name in dir(Scapy_service):
+ return True
+ return False
+
+ def _generate_version_hash(self,v_major,v_minor):
+ v_for_hash = v_major+v_minor+v_major+v_minor
+ m = hashlib.md5()
+ m.update(str_to_bytes(v_for_hash))
+ return bytes_to_b64(m.digest())
+
+ def _generate_invalid_version_error(self):
+ error_desc1 = "Provided version handler does not correspond to the server's version.\nUpdate client to latest version.\nServer version:"+self.version_major+"."+self.version_minor
+ return error_desc1
+
+ def _verify_version_handler(self,client_v_handler):
+ return (self.server_v_hashed == client_v_handler)
+
+ def _parse_packet_dict(self,layer,scapy_layers,scapy_layer_names):
+ class_name = scapy_layer_names.index(layer['id'])
+ class_p = scapy_layers[class_name] # class pointer
+ scapy_layer = class_p()
+ if isinstance(scapy_layer, Raw):
+ scapy_layer.load = str_to_bytes("dummy")
+ if 'fields' in layer:
+ self._modify_layer(scapy_layer, layer['fields'])
+ return scapy_layer
+
+ def _packet_model_to_scapy_packet(self,data):
+ layers = Packet.__subclasses__()
+ layer_names = [ layer.__name__ for layer in layers]
+ base_layer = self._parse_packet_dict(data[0],layers,layer_names)
+ for i in range(1,len(data),1):
+ packet_layer = self._parse_packet_dict(data[i],layers,layer_names)
+ base_layer = base_layer/packet_layer
+ return base_layer
+
+ def _pkt_data(self,pkt):
+ if pkt == None:
+ return {'data': [], 'binary': None}
+ data = self._pkt_to_field_tree(pkt)
+ binary = bytes_to_b64(bytes(pkt))
+ res = {'data': data, 'binary': binary}
+ return res
+
+#--------------------------------------------API implementation-------------
+ def get_tree(self,client_v_handler):
+ if not (self._verify_version_handler(client_v_handler)):
+ raise ScapyException(self._generate_invalid_version_error())
+ return self.protocol_tree
+
+ def get_version_handler(self,client_v_major,client_v_minor):
+ v_handle = self._generate_version_hash(client_v_major,client_v_minor)
+ return v_handle
+
+# pkt_descriptor in packet model format (dictionary)
+ def build_pkt(self,client_v_handler,pkt_model_descriptor):
+ if not (self._verify_version_handler(client_v_handler)):
+ raise ScapyException(self._generate_invalid_version_error())
+ pkt = self._packet_model_to_scapy_packet(pkt_model_descriptor)
+ return self._pkt_data(pkt)
+
+ # @deprecated. to be removed
+ def get_all(self,client_v_handler):
+ if not (self._verify_version_handler(client_v_handler)):
+ raise ScapyException(self._generate_invalid_version_error())
+ fields=self._get_all_fields()
+ db=self._get_all_db()
+ fields_md5 = self._get_md5(fields)
+ db_md5 = self._get_md5(db)
+ res = {}
+ res['db'] = db
+ res['fields'] = fields
+ res['db_md5'] = db_md5
+ res['fields_md5'] = fields_md5
+ return res
+
+ def _is_packet_class(self, pkt_class):
+ # returns true for final Packet classes. skips aliases and metaclasses
+ return issubclass(pkt_class, Packet) and pkt_class.name and pkt_class.fields_desc
+
+ def _getDummyPacket(self, pkt_class):
+ if issubclass(pkt_class, Raw):
+ # need to have some payload. otherwise won't appear in the binary chunk
+ return pkt_class(load=str_to_bytes("dummy"))
+ else:
+ return pkt_class()
+
+
+ def _get_payload_classes(self, pkt):
+ # tries to find, which subclasses allowed.
+ # this can take long time, since it tries to build packets with all subclasses(O(N))
+ pkt_class = type(pkt)
+ allowed_subclasses = []
+ for pkt_subclass in conf.layers:
+ if self._is_packet_class(pkt_subclass):
+ try:
+ pkt_w_payload = pkt_class() / self._getDummyPacket(pkt_subclass)
+ recreated_pkt = pkt_class(bytes(pkt_w_payload))
+ if type(recreated_pkt.lastlayer()) is pkt_subclass:
+ allowed_subclasses.append(pkt_subclass)
+ except Exception as e:
+ # no actions needed on fail, just sliently skip
+ pass
+ return allowed_subclasses
+
+ def _get_fields_definition(self, pkt_class):
+ fields = []
+ for field_desc in pkt_class.fields_desc:
+ field_data = {
+ "id": field_desc.name,
+ "name": field_desc.name
+ }
+ if isinstance(field_desc, EnumField):
+ try:
+ field_data["values_dict"] = field_desc.s2i
+ except:
+ # MultiEnumField doesn't have s2i. need better handling
+ pass
+ fields.append(field_data)
+ return fields
+
+ def get_definitions(self,client_v_handler, def_filter):
+ # def_filter is an array of classnames or None
+ all_classes = Packet.__subclasses__() # as an alternative to conf.layers
+ if def_filter:
+ all_classes = [c for c in all_classes if c.__name__ in def_filter]
+ protocols = []
+ for pkt_class in all_classes:
+ if self._is_packet_class(pkt_class):
+ # enumerate all non-abstract Packet classes
+ protocols.append({
+ "id": pkt_class.__name__,
+ "name": pkt_class.name,
+ "fields": self._get_fields_definition(pkt_class)
+ })
+ res = {"protocols": protocols}
+ return res
+
+ def get_payload_classes(self,client_v_handler, pkt_model_descriptor):
+ pkt = self._packet_model_to_scapy_packet(pkt_model_descriptor)
+ return [c.__name__ for c in self._get_payload_classes(pkt)]
+
+#input in string encoded base64
+ def check_update_of_dbs(self,client_v_handler,db_md5,field_md5):
+ if not (self._verify_version_handler(client_v_handler)):
+ raise ScapyException(self._generate_invalid_version_error())
+ fields=self._get_all_fields()
+ db=self._get_all_db()
+ current_db_md5 = self._get_md5(db)
+ current_field_md5 = self._get_md5(fields)
+ res = []
+ if (field_md5 == current_field_md5):
+ if (db_md5 == current_db_md5):
+ return True
+ else:
+ raise ScapyException("Protocol DB is not up to date")
+ else:
+ raise ScapyException("Fields DB is not up to date")
+
+ def _modify_layer(self, scapy_layer, fields):
+ for field in fields:
+ fieldId = str(field['id'])
+ fieldval = self._field_value_from_def(scapy_layer, fieldId, field['value'])
+ if fieldval is not None:
+ scapy_layer.setfieldval(fieldId, fieldval)
+ else:
+ scapy_layer.delfieldval(fieldId)
+
+ def _is_last_layer(self, layer):
+ # can be used, that layer has no payload
+ # if true, the layer.payload is likely NoPayload()
+ return layer is layer.lastlayer()
+
+#input of binary_pkt must be encoded in base64
+ def reconstruct_pkt(self,client_v_handler,binary_pkt,model_descriptor):
+ pkt_bin = b64_to_bytes(binary_pkt)
+ scapy_pkt = Ether(pkt_bin)
+ if not model_descriptor:
+ model_descriptor = []
+ for depth in range(len(model_descriptor)):
+ model_layer = model_descriptor[depth]
+ if model_layer.get('delete') is True:
+ # slice packet from the current item
+ if depth == 0:
+ scapy_pkt = None
+ break
+ else:
+ scapy_pkt[depth-1].payload = None
+ break
+ if depth > 0 and self._is_last_layer(scapy_pkt[depth-1]):
+ # insert new layer(s) from json definition
+ remaining_definitions = model_descriptor[depth:]
+ pkt_to_append = self._packet_model_to_scapy_packet(remaining_definitions)
+ scapy_pkt = scapy_pkt / pkt_to_append
+ break
+ # modify fields of existing stack items
+ scapy_layer = scapy_pkt[depth]
+ if model_layer['id'] != type(scapy_layer).__name__:
+ # TODO: support replacing payload, instead of breaking
+ raise ScapyException("Protocol id inconsistent")
+ if 'fields' in model_layer:
+ self._modify_layer(scapy_layer, model_layer['fields'])
+ return self._pkt_data(scapy_pkt)
+
+ def read_pcap(self,client_v_handler,pcap_base64):
+ pcap_bin = b64_to_bytes(pcap_base64)
+ pcap = []
+ res_packets = []
+ with tempfile.NamedTemporaryFile(mode='w+b') as tmpPcap:
+ tmpPcap.write(pcap_bin)
+ tmpPcap.flush()
+ pcap = rdpcap(tmpPcap.name)
+ for scapy_packet in pcap:
+ res_packets.append(self._pkt_data(scapy_packet))
+ return res_packets
+
+ def write_pcap(self,client_v_handler,packets_base64):
+ packets = [Ether(b64_to_bytes(pkt_b64)) for pkt_b64 in packets_base64]
+ pcap_bin = None
+ with tempfile.NamedTemporaryFile(mode='r+b') as tmpPcap:
+ wrpcap(tmpPcap.name, packets)
+ pcap_bin = tmpPcap.read()
+ return bytes_to_b64(pcap_bin)
+
+
+
+
+#---------------------------------------------------------------------------
+
+
diff --git a/scripts/automation/trex_control_plane/stl/services/scapy_server/scapy_zmq_client.py b/scripts/automation/trex_control_plane/stl/services/scapy_server/scapy_zmq_client.py
new file mode 100644
index 00000000..18d32272
--- /dev/null
+++ b/scripts/automation/trex_control_plane/stl/services/scapy_server/scapy_zmq_client.py
@@ -0,0 +1,116 @@
+
+import sys
+import os
+python2_zmq_path = os.path.abspath(os.path.join(os.pardir,os.pardir,os.pardir,os.pardir,
+ os.pardir,'external_libs','pyzmq-14.5.0','python2','fedora18','64bit'))
+sys.path.append(python2_zmq_path)
+
+import zmq
+import json
+from argparse import *
+from pprint import pprint
+
+class Scapy_server_wrapper():
+ def __init__(self,dest_scapy_port=5555,server_ip_address='localhost'):
+ self.server_ip_address = server_ip_address
+ self.context = zmq.Context()
+ self.socket = self.context.socket(zmq.REQ)
+ self.dest_scapy_port =dest_scapy_port
+ self.socket.connect("tcp://"+str(self.server_ip_address)+":"+str(self.dest_scapy_port))
+
+ def call_method(self,method_name,method_params):
+ json_rpc_req = { "jsonrpc":"2.0","method": method_name ,"params": method_params, "id":"1"}
+ request = json.dumps(json_rpc_req)
+ self.socket.send_string(request)
+ # Get the reply.
+ message = self.socket.recv_string()
+ message_parsed = json.loads(message)
+ if 'result' in message_parsed.keys():
+ result = message_parsed['result']
+ else:
+ result = {'error':message_parsed['error']}
+ return result
+
+ def get_all(self):
+ return self.call_method('get_all',[])
+
+ def check_update(self,db_md5,field_md5):
+ result = self.call_method('check_update',[db_md5,field_md5])
+ if result!=True:
+ if 'error' in result.keys():
+ if "Fields DB is not up to date" in result['error']['message:']:
+ raise Exception("Fields DB is not up to date")
+ if "Protocol DB is not up to date" in result['error']['message:']:
+ raise Exception("Protocol DB is not up to date")
+ return result
+
+ def build_pkt(self,pkt_descriptor):
+ return self.call_method('build_pkt',[pkt_descriptor])
+
+ def _get_all_pkt_offsets(self,pkt_desc):
+ return self.call_method('_get_all_pkt_offsets',[pkt_desc])
+
+ def _activate_console(self):
+ context = zmq.Context()
+ # Socket to talk to server
+ print 'Connecting:'
+ socket = context.socket(zmq.REQ)
+ socket.connect("tcp://"+str(self.server_ip_address)+":"+str(self.dest_scapy_port))
+ try:
+ print('This is a simple console to communicate with Scapy server.\nInvoke supported_methods (with 1 parameter = all) to see supported commands\n')
+ while True:
+ command = raw_input("enter RPC command [enter quit to exit]:\n")
+ if (command == 'quit'):
+ break
+ parameter_num = 0
+ params = []
+ while True:
+ try:
+ parameter_num = int(raw_input('Enter number of parameters to command:\n'))
+ break
+ except Exception:
+ print('Invalid input. Try again')
+ for i in range(1,parameter_num+1,1):
+ print "input parameter %d:" % i
+ user_parameter = raw_input()
+ params.append(user_parameter)
+ pprint_output = raw_input('pprint the output [y/n]? ')
+ while ((pprint_output!= 'y') and (pprint_output!='n')):
+ pprint_output = raw_input('pprint the output [y/n]? ')
+ json_rpc_req = { "jsonrpc":"2.0","method": command ,"params":params, "id":"1"}
+ request = json.dumps(json_rpc_req)
+ print("Sending request in json format %s " % request)
+ socket.send(request)
+
+ # Get the reply.
+ message = socket.recv()
+ print ('received reply:')
+ parsed_message = json.loads(message)
+ if (pprint_output == 'y'):
+ pprint(parsed_message)
+ else:
+ print message
+ except KeyboardInterrupt:
+ print('Terminated By Ctrl+C')
+ finally:
+ socket.close()
+ context.destroy()
+
+
+
+if __name__=='__main__':
+ parser = ArgumentParser(description='Example of client module for Scapy server ')
+ parser.add_argument('-p','--dest-scapy-port',type=int, default = 4507, dest='dest_scapy_port',
+ help='Select port to which this Scapy Server client will send to.\n default is 4507\n',action='store')
+ parser.add_argument('-s','--server',type=str, default = 'localhost', dest='dest_scapy_ip',
+ help='Remote server IP address .\n default is localhost\n',action='store')
+ parser.add_argument('-c','--console',
+ help='Run simple client console for Scapy server.\nrun with \'-s\' and \'-p\' to determine IP and port of the server\n',
+ action='store_true',default = False)
+ args = parser.parse_args()
+ if (args.console):
+ s = Scapy_server_wrapper(args.dest_scapy_port,args.dest_scapy_ip)
+ sys.exit(s._activate_console())
+ else:
+ print('Scapy client: for interactive console re-run with \'-c\', else import as seperate module.')
+
diff --git a/scripts/automation/trex_control_plane/stl/services/scapy_server/scapy_zmq_server.py b/scripts/automation/trex_control_plane/stl/services/scapy_server/scapy_zmq_server.py
new file mode 100755
index 00000000..6489b36a
--- /dev/null
+++ b/scripts/automation/trex_control_plane/stl/services/scapy_server/scapy_zmq_server.py
@@ -0,0 +1,188 @@
+
+import time
+import sys
+import os
+import traceback
+
+stl_pathname = os.path.abspath(os.path.join(os.pardir, os.pardir))
+if stl_pathname not in sys.path:
+ sys.path.append(stl_pathname)
+from trex_stl_lib.api import *
+import zmq
+import inspect
+from scapy_service import *
+from argparse import *
+import socket
+import logging
+import logging.handlers
+
+
+class ParseException(Exception): pass
+class InvalidRequest(Exception): pass
+class MethodNotFound(Exception): pass
+class InvalidParams(Exception): pass
+
+class Scapy_wrapper:
+ def __init__(self):
+ self.scapy_master = Scapy_service()
+
+ def parse_req_msg(self,JSON_req):
+ try:
+ req = json.loads(JSON_req)
+ req_id='null'
+ if (type(req)!= type({})):
+ raise ParseException(req_id)
+ json_rpc_keys = ['jsonrpc','id','method']
+ if ((set(req.keys())!=set(json_rpc_keys)) and (set(req.keys())!=set(json_rpc_keys+['params']))) :
+ if 'id' in req.keys():
+ req_id = req['id']
+ raise InvalidRequest(req_id)
+ req_id = req['id']
+ if (req['method']=='shut_down'):
+ return 'shut_down',[],req_id
+ if not (self.scapy_master.supported_methods(req['method'])):
+ raise MethodNotFound(req_id)
+ scapy_method = eval("self.scapy_master."+req['method'])
+ arg_num_for_method = len(inspect.getargspec(scapy_method)[0])
+ if (arg_num_for_method>1) :
+ if not ('params' in req.keys()):
+ raise InvalidRequest(req_id)
+ params_len = len(req['params'])+1 # +1 because "self" is considered parameter in args for method
+ if not (params_len==arg_num_for_method):
+ raise InvalidParams(req_id)
+ return req['method'],req['params'],req_id
+ else:
+ return req['method'],[],req_id
+ except ValueError:
+ raise ParseException(req_id)
+
+ def create_error_response(self,error_code,error_msg,req_id):
+ return {"jsonrpc": "2.0", "error": {"code": error_code, "message": error_msg}, "id": req_id}
+
+ def create_success_response(self,result,req_id):
+ return {"jsonrpc": "2.0", "result": result, "id": req_id }
+
+ def get_exception(self):
+ return sys.exc_info()
+
+
+ def execute(self,method,params):
+ if len(params)>0:
+ result = eval('self.scapy_master.'+method+'(*'+str(params)+')')
+ else:
+ result = eval('self.scapy_master.'+method+'()')
+ return result
+
+
+ def error_handler(self,e,req_id):
+ response = []
+ try:
+ raise e
+ except ParseException as e:
+ response = self.create_error_response(-32700,'Parse error ',req_id)
+ except InvalidRequest as e:
+ response = self.create_error_response(-32600,'Invalid Request',req_id)
+ except MethodNotFound as e:
+ response = self.create_error_response(-32601,'Method not found',req_id)
+ except InvalidParams as e:
+ response = self.create_error_response(-32603,'Invalid params',req_id)
+ except SyntaxError as e:
+ response = self.create_error_response(-32097,'SyntaxError',req_id)
+ except Exception as e:
+ if hasattr(e,'message'):
+ response = self.create_error_response(-32098,'Scapy Server: '+str(e.message),req_id)
+ else:
+ response = self.create_error_response(-32096,'Scapy Server: Unknown Error',req_id)
+ finally:
+ return response
+
+class Scapy_server():
+ def __init__(self, args,port=4507):
+ self.scapy_wrapper = Scapy_wrapper()
+ self.port = port
+ self.context = zmq.Context()
+ self.socket = self.context.socket(zmq.REP)
+ self.socket.bind("tcp://*:"+str(port))
+ self.IP_address = socket.gethostbyname(socket.gethostname())
+ self.logger = logging.getLogger('scapy_logger')
+ self.logger.setLevel(logging.INFO)
+ console_h = logging.StreamHandler(sys.__stdout__)
+ formatter = logging.Formatter(fmt='%(asctime)s %(message)s',datefmt='%d-%m-%Y %H:%M:%S')
+ if args.log:
+ logfile_h = logging.FileHandler('scapy_server.log')
+ logfile_h.setLevel(logging.INFO)
+ logfile_h.setFormatter(formatter)
+ self.logger.addHandler(logfile_h)
+ if args.verbose:
+ console_h.setLevel(logging.INFO)
+ else:
+ console_h.setLevel(logging.WARNING)
+ console_h.setFormatter(formatter)
+ self.logger.addHandler(console_h)
+
+
+ def activate(self):
+ self.logger.info('***Scapy Server Started***')
+ self.logger.info('Listening on port: %d' % self.port)
+ self.logger.info('Server IP address: %s' % self.IP_address)
+ try:
+ while True:
+ message = self.socket.recv_string()
+ self.logger.info('Received Message: %s' % message)
+ try:
+ params = []
+ method=''
+ req_id = 'null'
+ method,params,req_id = self.scapy_wrapper.parse_req_msg(message)
+ if (method == 'shut_down'):
+ self.logger.info('Shut down by remote user')
+ result = 'Server shut down command received - server had shut down'
+ else:
+ result = self.scapy_wrapper.execute(method,params)
+ response = self.scapy_wrapper.create_success_response(result,req_id)
+ except Exception as e:
+ response = self.scapy_wrapper.error_handler(e,req_id)
+ self.logger.info('ERROR %s: %s',response['error']['code'], response['error']['message'])
+ self.logger.info('Exception info: %s' % traceback.format_exc())
+ finally:
+ try:
+ json_response = json.dumps(response)
+ self.logger.info('Sending Message: %s' % json_response)
+ except Exception as e:
+ # rare case when json can not be searialized due to encoding issues
+ # object is not JSON serializable
+ self.logger.error('Unexpected Error: %s' % traceback.format_exc())
+ json_response = json.dumps(self.scapy_wrapper.error_handler(e,req_id))
+
+ # Send reply back to client
+ self.socket.send_string(json_response)
+ if (method == 'shut_down'):
+ break
+
+ except KeyboardInterrupt:
+ self.logger.info(b'Terminated By local user')
+
+ finally:
+ self.socket.close()
+ self.context.destroy()
+
+
+
+#arg1 is port number for the server to listen to
+def main(args,port):
+ s = Scapy_server(args,port)
+ s.activate()
+
+if __name__=='__main__':
+
+ parser = ArgumentParser(description=' Runs Scapy Server ')
+ parser.add_argument('-s','--scapy-port',type=int, default = 4507, dest='scapy_port',
+ help='Select port to which Scapy Server will listen to.\n default is 4507.',action='store')
+ parser.add_argument('-v','--verbose',help='Print Client-Server Request-Reply information to console.',action='store_true',default = False)
+ parser.add_argument('-l','--log',help='Log every activity of the server to the log file scapy_server.log .The log does not discard older entries, the file is not limited by size.',
+ action='store_true',default = False)
+ args = parser.parse_args()
+ port = args.scapy_port
+ sys.exit(main(args,port))
+
+
diff --git a/scripts/automation/trex_control_plane/stl/services/scapy_server/unit_tests/basetest.py b/scripts/automation/trex_control_plane/stl/services/scapy_server/unit_tests/basetest.py
new file mode 100644
index 00000000..17dd304a
--- /dev/null
+++ b/scripts/automation/trex_control_plane/stl/services/scapy_server/unit_tests/basetest.py
@@ -0,0 +1,84 @@
+import os
+import sys
+import json
+import base64
+import inspect
+from inspect import getcallargs
+# add paths to scapy_service and trex_stl_lib.api
+sys.path.append(os.path.abspath(os.pardir))
+sys.path.append(os.path.abspath(os.path.join(os.pardir, os.pardir, os.pardir)))
+
+from scapy_service import *
+from scapy.all import *
+
+service = Scapy_service()
+v_handler = service.get_version_handler('1','01')
+
+def pretty_json(obj):
+ return json.dumps(obj, indent=4)
+
+def pprint(obj):
+ print(pretty_json(obj))
+
+def is_verbose():
+ return True
+
+def pass_result(result, *args):
+ # returns result unchanged, but can display debug info if enabled
+ if is_verbose():
+ fargs = (inspect.stack()[-1][4])
+ print(fargs[0])
+ pprint(result)
+ return result
+
+def pass_pkt(result):
+ # returns packet unchanged, but can display debug info if enabled
+ if is_verbose() and result is not None:
+ result.show2()
+ return result
+
+# utility functions for tests
+
+def layer_def(layerId, **layerfields):
+ # test helper method to generate JSON-like protocol definition object for scapy
+ # ex. { "id": "Ether", "fields": [ { "id": "dst", "value": "10:10:10:10:10:10" } ] }
+ res = { "id": layerId }
+ if layerfields:
+ res["fields"] = [ {"id": k, "value": v} for k,v in layerfields.items() ]
+ return res
+
+def get_version_handler():
+ return pass_result(service.get_version_handler("1", "01"))
+
+def build_pkt(model_def):
+ return pass_result(service.build_pkt(v_handler, model_def))
+
+def build_pkt_get_scapy(model_def):
+ return build_pkt_to_scapy(build_pkt(model_def))
+
+def reconstruct_pkt(bytes_b64, model_def):
+ return pass_result(service.reconstruct_pkt(v_handler, bytes_b64, model_def))
+
+def get_definitions(def_filter):
+ return pass_result(service.get_definitions(v_handler, def_filter))
+
+def get_payload_classes(def_filter):
+ return pass_result(service.get_payload_classes(v_handler, def_filter))
+
+def build_pkt_to_scapy(buildpkt_result):
+ return pass_pkt(Ether(b64_to_bytes(buildpkt_result['binary'])))
+
+def fields_to_map(field_array):
+ # [{id, value, hvalue, offset}, ...] to map id -> {value, hvalue, offset}
+ res = {}
+ if field_array:
+ for f in field_array:
+ res[ f["id"] ] = f
+ return res
+
+def adapt_json_protocol_fields(protocols_array):
+ # replaces layer.fields(array) with map for easier access in tests
+ for protocol in protocols_array:
+ # change structure for easier
+ if protocol.get("fields"):
+ protocol["fields"] = fields_to_map(protocol["fields"])
diff --git a/scripts/automation/trex_control_plane/stl/services/scapy_server/unit_tests/test_scapy_service.py b/scripts/automation/trex_control_plane/stl/services/scapy_server/unit_tests/test_scapy_service.py
new file mode 100644
index 00000000..9cd473d7
--- /dev/null
+++ b/scripts/automation/trex_control_plane/stl/services/scapy_server/unit_tests/test_scapy_service.py
@@ -0,0 +1,155 @@
+#
+# run with 'nosetests' utility
+
+import tempfile
+import re
+from basetest import *
+
+RE_MAC = "^([0-9A-Fa-f]{2}:){5}([0-9A-Fa-f]{2})$"
+
+TEST_MAC_1 = "10:10:10:10:10:10"
+# Test scapy structure
+TEST_PKT = Ether(dst=TEST_MAC_1)/IP(src='127.0.0.1')/TCP(sport=443)
+
+# Corresponding JSON-like structure
+TEST_PKT_DEF = [
+ layer_def("Ether", dst=TEST_MAC_1),
+ layer_def("IP", dst="127.0.0.1"),
+ layer_def("TCP", sport="443")
+ ]
+
+def test_build_pkt():
+ pkt = build_pkt_get_scapy(TEST_PKT_DEF)
+ assert(pkt[TCP].sport == 443)
+
+def test_build_invalid_structure_pkt():
+ ether_fields = {"dst": TEST_MAC_1, "type": "LOOP"}
+ pkt = build_pkt_get_scapy([
+ layer_def("Ether", **ether_fields),
+ layer_def("IP"),
+ layer_def("TCP", sport=8080)
+ ])
+ assert(pkt[Ether].dst == TEST_MAC_1)
+ assert(isinstance(pkt[Ether].payload, Raw))
+
+def test_reconstruct_pkt():
+ res = reconstruct_pkt(base64.b64encode(bytes(TEST_PKT)), None)
+ pkt = build_pkt_to_scapy(res)
+ assert(pkt[TCP].sport == 443)
+
+def test_layer_del():
+ modif = [
+ {"id": "Ether"},
+ {"id": "IP"},
+ {"id": "TCP", "delete": True},
+ ]
+ res = reconstruct_pkt(base64.b64encode(bytes(TEST_PKT)), modif)
+ pkt = build_pkt_to_scapy(res)
+ assert(not pkt[IP].payload)
+
+def test_layer_field_edit():
+ modif = [
+ {"id": "Ether"},
+ {"id": "IP"},
+ {"id": "TCP", "fields": [{"id": "dport", "value": 777}]},
+ ]
+ res = reconstruct_pkt(base64.b64encode(bytes(TEST_PKT)), modif)
+ pkt = build_pkt_to_scapy(res)
+ assert(pkt[TCP].dport == 777)
+ assert(pkt[TCP].sport == 443)
+
+def test_layer_add():
+ modif = [
+ {"id": "Ether"},
+ {"id": "IP"},
+ {"id": "TCP"},
+ {"id": "Raw", "fields": [{"id": "load", "value": "GET /helloworld HTTP/1.0\n\n"}]},
+ ]
+ res = reconstruct_pkt(base64.b64encode(bytes(TEST_PKT)), modif)
+ pkt = build_pkt_to_scapy(res)
+ assert("GET /helloworld" in str(pkt[TCP].payload.load))
+
+def test_build_Raw():
+ pkt = build_pkt_get_scapy([
+ layer_def("Ether"),
+ layer_def("IP"),
+ layer_def("TCP"),
+ layer_def("Raw", load={"vtype": "BYTES", "base64": bytes_to_b64(b"hi")})
+ ])
+ assert(str(pkt[Raw].load == "hi"))
+
+def test_get_all():
+ service.get_all(v_handler)
+
+def test_get_definitions_all():
+ get_definitions(None)
+ def_classnames = [pdef['id'] for pdef in get_definitions(None)['protocols']]
+ assert("IP" in def_classnames)
+ assert("Dot1Q" in def_classnames)
+ assert("TCP" in def_classnames)
+
+def test_get_definitions_ether():
+ res = get_definitions(["Ether"])
+ assert(len(res) == 1)
+ assert(res['protocols'][0]['id'] == "Ether")
+
+def test_get_payload_classes():
+ eth_payloads = get_payload_classes([{"id":"Ether"}])
+ assert("IP" in eth_payloads)
+ assert("Dot1Q" in eth_payloads)
+ assert("TCP" not in eth_payloads)
+
+def test_pcap_read_and_write():
+ pkts_to_write = [bytes_to_b64(bytes(TEST_PKT))]
+ pcap_b64 = service.write_pcap(v_handler, pkts_to_write)
+ array_pkt = service.read_pcap(v_handler, pcap_b64)
+ pkt = build_pkt_to_scapy(array_pkt[0])
+ assert(pkt[Ether].dst == TEST_MAC_1)
+
+def test_layer_default_value():
+ res = build_pkt([
+ layer_def("Ether", src={"vtype": "UNDEFINED"})
+ ])
+ ether_fields = fields_to_map(res['data'][0]['fields'])
+ assert(re.match(RE_MAC, ether_fields['src']['value']))
+
+def test_layer_random_value():
+ res = build_pkt([
+ layer_def("Ether", src={"vtype": "RANDOM"})
+ ])
+ ether_fields = fields_to_map(res['data'][0]['fields'])
+ assert(re.match(RE_MAC, ether_fields['src']['value']))
+
+def test_layer_wrong_structure():
+ payload = [
+ layer_def("Ether"),
+ layer_def("IP"),
+ layer_def("Raw", load="dummy"),
+ layer_def("Ether"),
+ layer_def("IP"),
+ ]
+ res = build_pkt(payload)
+ pkt = build_pkt_to_scapy(res)
+ assert(type(pkt[0]) is Ether)
+ assert(type(pkt[1]) is IP)
+ assert(isinstance(pkt[2], Raw))
+ assert(not pkt[2].payload)
+ model = res["data"]
+ assert(len(payload) == len(model))
+ # verify same protocol structure as in abstract model
+ # and all fields defined
+ for depth in range(len(payload)):
+ layer_model = model[depth]
+ layer_fields = fields_to_map(layer_model["fields"])
+ assert(payload[depth]["id"] == model[depth]["id"])
+ for field in layer_model["fields"]:
+ required_field_properties = ["value", "hvalue", "offset"]
+ for field_property in required_field_properties:
+ assert(field[field_property] is not None)
+ if (model[depth]["id"] == "Ether"):
+ assert(layer_fields["type"]["hvalue"] == "IPv4")
+ real_structure = [layer["real_id"] for layer in model]
+ valid_structure_flags = [layer["valid_structure"] for layer in model]
+ assert(real_structure == ["Ether", "IP", "Raw", None, None])
+ assert(valid_structure_flags == [True, True, True, False, False])
+
diff --git a/scripts/automation/trex_control_plane/stl/services/scapy_server/zmq_for_scapy_server_test.py b/scripts/automation/trex_control_plane/stl/services/scapy_server/zmq_for_scapy_server_test.py
new file mode 100755
index 00000000..8f7f7b01
--- /dev/null
+++ b/scripts/automation/trex_control_plane/stl/services/scapy_server/zmq_for_scapy_server_test.py
@@ -0,0 +1,14 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/scripts/automation/trex_control_plane/stl/trex_stl_lib/__init__.py b/scripts/automation/trex_control_plane/stl/trex_stl_lib/__init__.py
new file mode 100644
index 00000000..c6e14df3
--- /dev/null
+++ b/scripts/automation/trex_control_plane/stl/trex_stl_lib/__init__.py
@@ -0,0 +1,7 @@
+import sys
+
+if sys.version_info < (2, 7):
+ print("\n**** TRex STL package requires Python version >= 2.7 ***\n")
+ exit(-1)
+
+from . import trex_stl_ext
diff --git a/scripts/automation/trex_control_plane/stl/trex_stl_lib/api.py b/scripts/automation/trex_control_plane/stl/trex_stl_lib/api.py
new file mode 100644
index 00000000..bd95a20a
--- /dev/null
+++ b/scripts/automation/trex_control_plane/stl/trex_stl_lib/api.py
@@ -0,0 +1,18 @@
+
+# client and exceptions
+from .trex_stl_exceptions import *
+from .trex_stl_client import STLClient, LoggerApi
+
+# streams
+from .trex_stl_streams import *
+
+# packet builder
+from .trex_stl_packet_builder_scapy import *
+from scapy.all import *
+
+
+# simulator
+from .trex_stl_sim import STLSim
+
+# std lib (various lib functions)
+from .trex_stl_std import *
diff --git a/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_async_client.py b/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_async_client.py
new file mode 100644
index 00000000..2c95844b
--- /dev/null
+++ b/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_async_client.py
@@ -0,0 +1,440 @@
+#!/router/bin/python
+
+import json
+import threading
+import time
+import datetime
+import zmq
+import re
+import random
+
+from .trex_stl_jsonrpc_client import JsonRpcClient, BatchMessage
+
+from .utils.text_opts import *
+from .trex_stl_stats import *
+from .trex_stl_types import *
+from .utils.zipmsg import ZippedMsg
+
+# basic async stats class
+class CTRexAsyncStats(object):
+ def __init__ (self):
+ self.ref_point = None
+ self.current = {}
+ self.last_update_ts = datetime.datetime.now()
+
+ def update (self, snapshot):
+
+ #update
+ self.last_update_ts = datetime.datetime.now()
+
+ self.current = snapshot
+
+ if self.ref_point == None:
+ self.ref_point = self.current
+
+ def clear(self):
+ self.ref_point = self.current
+
+
+ def get(self, field, format=False, suffix=""):
+
+ if not field in self.current:
+ return "N/A"
+
+ if not format:
+ return self.current[field]
+ else:
+ return format_num(self.current[field], suffix)
+
+ def get_rel (self, field, format=False, suffix=""):
+ if not field in self.current:
+ return "N/A"
+
+ if not format:
+ return (self.current[field] - self.ref_point[field])
+ else:
+ return format_num(self.current[field] - self.ref_point[field], suffix)
+
+
+ # return true if new data has arrived in the past 2 seconds
+ def is_online (self):
+ delta_ms = (datetime.datetime.now() - self.last_update_ts).total_seconds() * 1000
+ return (delta_ms < 2000)
+
+# describes the general stats provided by TRex
+class CTRexAsyncStatsGeneral(CTRexAsyncStats):
+ def __init__ (self):
+ super(CTRexAsyncStatsGeneral, self).__init__()
+
+
+# per port stats
+class CTRexAsyncStatsPort(CTRexAsyncStats):
+ def __init__ (self):
+ super(CTRexAsyncStatsPort, self).__init__()
+
+ def get_stream_stats (self, stream_id):
+ return None
+
+# stats manager
+class CTRexAsyncStatsManager():
+ def __init__ (self):
+
+ self.general_stats = CTRexAsyncStatsGeneral()
+ self.port_stats = {}
+
+
+ def get_general_stats(self):
+ return self.general_stats
+
+ def get_port_stats (self, port_id):
+
+ if not str(port_id) in self.port_stats:
+ return None
+
+ return self.port_stats[str(port_id)]
+
+
+ def update(self, data):
+ self.__handle_snapshot(data)
+
+ def __handle_snapshot(self, snapshot):
+
+ general_stats = {}
+ port_stats = {}
+
+ # filter the values per port and general
+ for key, value in snapshot.items():
+
+ # match a pattern of ports
+ m = re.search('(.*)\-([0-8])', key)
+ if m:
+
+ port_id = m.group(2)
+ field_name = m.group(1)
+
+ if not port_id in port_stats:
+ port_stats[port_id] = {}
+
+ port_stats[port_id][field_name] = value
+
+ else:
+ # no port match - general stats
+ general_stats[key] = value
+
+ # update the general object with the snapshot
+ self.general_stats.update(general_stats)
+
+ # update all ports
+ for port_id, data in port_stats.items():
+
+ if not port_id in self.port_stats:
+ self.port_stats[port_id] = CTRexAsyncStatsPort()
+
+ self.port_stats[port_id].update(data)
+
+
+
+
+
+class CTRexAsyncClient():
+ def __init__ (self, server, port, stateless_client):
+
+ self.port = port
+ self.server = server
+
+ self.stateless_client = stateless_client
+
+ self.event_handler = stateless_client.event_handler
+ self.logger = self.stateless_client.logger
+
+ self.raw_snapshot = {}
+
+ self.stats = CTRexAsyncStatsManager()
+
+ self.last_data_recv_ts = 0
+ self.async_barrier = None
+
+ self.monitor = AsyncUtil()
+
+ self.connected = False
+
+ self.zipped = ZippedMsg()
+
+ # connects the async channel
+ def connect (self):
+
+ if self.connected:
+ self.disconnect()
+
+ self.tr = "tcp://{0}:{1}".format(self.server, self.port)
+
+ # Socket to talk to server
+ self.context = zmq.Context()
+ self.socket = self.context.socket(zmq.SUB)
+
+
+ # before running the thread - mark as active
+ self.active = True
+ self.t = threading.Thread(target = self._run)
+
+ # kill this thread on exit and don't add it to the join list
+ self.t.setDaemon(True)
+ self.t.start()
+
+ self.connected = True
+
+ # first barrier - make sure async thread is up
+ rc = self.barrier()
+ if not rc:
+ self.disconnect()
+ return rc
+
+ # second barrier - sync all stats data as a baseline from the server
+ rc = self.barrier(baseline = True)
+ if not rc:
+ self.disconnect()
+ return rc
+
+ return RC_OK()
+
+
+
+
+ # disconnect
+ def disconnect (self):
+ if not self.connected:
+ return
+
+ # mark for join
+ self.active = False
+
+ # signal that the context was destroyed (exit the thread loop)
+ self.context.term()
+
+ # join
+ self.t.join()
+
+ # done
+ self.connected = False
+
+
+ # thread function
+ def _run (self):
+
+ # socket must be created on the same thread
+ self.socket.setsockopt(zmq.SUBSCRIBE, b'')
+ self.socket.setsockopt(zmq.RCVTIMEO, 5000)
+ self.socket.connect(self.tr)
+
+ got_data = False
+
+ self.monitor.reset()
+
+
+ while self.active:
+ try:
+
+ with self.monitor:
+ line = self.socket.recv()
+
+ self.monitor.on_recv_msg(line)
+
+ # try to decomrpess
+ unzipped = self.zipped.decompress(line)
+ if unzipped:
+ line = unzipped
+
+ line = line.decode()
+
+ self.last_data_recv_ts = time.time()
+
+ # signal once
+ if not got_data:
+ self.event_handler.on_async_alive()
+ got_data = True
+
+
+ # got a timeout - mark as not alive and retry
+ except zmq.Again:
+ # signal once
+ if got_data:
+ self.event_handler.on_async_dead()
+ got_data = False
+
+ continue
+
+ except zmq.ContextTerminated:
+ # outside thread signaled us to exit
+ assert(not self.active)
+ break
+
+ msg = json.loads(line)
+
+ name = msg['name']
+ data = msg['data']
+ type = msg['type']
+ baseline = msg.get('baseline', False)
+
+ self.raw_snapshot[name] = data
+
+ self.__dispatch(name, type, data, baseline)
+
+
+ # closing of socket must be from the same thread
+ self.socket.close(linger = 0)
+
+ def is_thread_alive (self):
+ return self.t.is_alive()
+
+ # did we get info for the last 3 seconds ?
+ def is_alive (self):
+ if self.last_data_recv_ts == None:
+ return False
+
+ return ( (time.time() - self.last_data_recv_ts) < 3 )
+
+ def get_stats (self):
+ return self.stats
+
+ def get_raw_snapshot (self):
+ return self.raw_snapshot
+
+ # dispatch the message to the right place
+ def __dispatch (self, name, type, data, baseline):
+
+ # stats
+ if name == "trex-global":
+ self.event_handler.on_async_stats_update(data, baseline)
+
+ # events
+ elif name == "trex-event":
+ self.event_handler.on_async_event(type, data)
+
+ # barriers
+ elif name == "trex-barrier":
+ self.handle_async_barrier(type, data)
+
+ elif name == "flow_stats":
+ self.event_handler.on_async_rx_stats_event(data, baseline)
+
+ elif name == "latency_stats":
+ self.event_handler.on_async_latency_stats_event(data, baseline)
+
+ else:
+ pass
+
+
+ # async barrier handling routine
+ def handle_async_barrier (self, type, data):
+ if self.async_barrier['key'] == type:
+ self.async_barrier['ack'] = True
+
+
+ # block on barrier for async channel
+ def barrier(self, timeout = 5, baseline = False):
+
+ # set a random key
+ key = random.getrandbits(32)
+ self.async_barrier = {'key': key, 'ack': False}
+
+ # expr time
+ expr = time.time() + timeout
+
+ while not self.async_barrier['ack']:
+
+ # inject
+ rc = self.stateless_client._transmit("publish_now", params = {'key' : key, 'baseline': baseline})
+ if not rc:
+ return rc
+
+ # fast loop
+ for i in range(0, 100):
+ if self.async_barrier['ack']:
+ break
+ time.sleep(0.001)
+
+ if time.time() > expr:
+ return RC_ERR("*** [subscriber] - timeout - no data flow from server at : " + self.tr)
+
+ return RC_OK()
+
+
+# a class to measure util. of async subscriber thread
+class AsyncUtil(object):
+
+ STATE_SLEEP = 1
+ STATE_AWAKE = 2
+
+ def __init__ (self):
+ self.reset()
+
+
+ def reset (self):
+ self.state = self.STATE_AWAKE
+ self.clock = time.time()
+
+ # reset the current interval
+ self.interval = {'ts': time.time(), 'total_sleep': 0, 'total_bits': 0}
+
+ # global counters
+ self.cpu_util = 0
+ self.bps = 0
+
+
+ def on_recv_msg (self, message):
+ self.interval['total_bits'] += len(message) * 8.0
+
+ self._tick()
+
+
+ def __enter__ (self):
+ assert(self.state == self.STATE_AWAKE)
+ self.state = self.STATE_SLEEP
+
+ self.sleep_start_ts = time.time()
+
+
+ def __exit__(self, exc_type, exc_val, exc_tb):
+ assert(self.state == self.STATE_SLEEP)
+ self.state = self.STATE_AWAKE
+
+ # measure total sleep time for interval
+ self.interval['total_sleep'] += time.time() - self.sleep_start_ts
+
+ self._tick()
+
+ def _tick (self):
+ # how much time did the current interval lasted
+ ts = time.time() - self.interval['ts']
+ if ts < 1:
+ return
+
+ # if tick is in the middle of sleep - add the interval and reset
+ if self.state == self.STATE_SLEEP:
+ self.interval['total_sleep'] += time.time() - self.sleep_start_ts
+ self.sleep_start_ts = time.time()
+
+ # add the interval
+ if self.interval['total_sleep'] > 0:
+ # calculate
+ self.cpu_util = self.cpu_util * 0.75 + (float(ts - self.interval['total_sleep']) / ts) * 0.25
+ self.interval['total_sleep'] = 0
+
+
+ if self.interval['total_bits'] > 0:
+ # calculate
+ self.bps = self.bps * 0.75 + ( self.interval['total_bits'] / ts ) * 0.25
+ self.interval['total_bits'] = 0
+
+ # reset the interval's clock
+ self.interval['ts'] = time.time()
+
+
+ def get_cpu_util (self):
+ self._tick()
+ return (self.cpu_util * 100)
+
+ def get_bps (self):
+ self._tick()
+ return (self.bps)
+
diff --git a/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_client.py b/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_client.py
new file mode 100755
index 00000000..80a4c4dc
--- /dev/null
+++ b/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_client.py
@@ -0,0 +1,3370 @@
+#!/router/bin/python
+
+# for API usage the path name must be full
+from .trex_stl_exceptions import *
+from .trex_stl_streams import *
+
+from .trex_stl_jsonrpc_client import JsonRpcClient, BatchMessage
+from . import trex_stl_stats
+
+from .trex_stl_port import Port
+from .trex_stl_types import *
+from .trex_stl_async_client import CTRexAsyncClient
+
+from .utils import parsing_opts, text_tables, common
+from .utils.common import list_intersect, list_difference, is_sub_list, PassiveTimer
+from .utils.text_opts import *
+from functools import wraps
+
+from collections import namedtuple
+from yaml import YAMLError
+import time
+import datetime
+import re
+import random
+import json
+import traceback
+
+############################ logger #############################
+############################ #############################
+############################ #############################
+
+# logger API for the client
+class LoggerApi(object):
+ # verbose levels
+ VERBOSE_QUIET = 0
+ VERBOSE_REGULAR = 1
+ VERBOSE_HIGH = 2
+
+ def __init__(self):
+ self.level = LoggerApi.VERBOSE_REGULAR
+
+ # implemented by specific logger
+ def write(self, msg, newline = True):
+ raise Exception("Implement this")
+
+ # implemented by specific logger
+ def flush(self):
+ raise Exception("Implement this")
+
+ def set_verbose (self, level):
+ if not level in range(self.VERBOSE_QUIET, self.VERBOSE_HIGH + 1):
+ raise ValueError("Bad value provided for logger")
+
+ self.level = level
+
+ def get_verbose (self):
+ return self.level
+
+
+ def check_verbose (self, level):
+ return (self.level >= level)
+
+
+ # simple log message with verbose
+ def log (self, msg, level = VERBOSE_REGULAR, newline = True):
+ if not self.check_verbose(level):
+ return
+
+ self.write(msg, newline)
+
+ # logging that comes from async event
+ def async_log (self, msg, level = VERBOSE_REGULAR, newline = True):
+ self.log(msg, level, newline)
+
+
+ def pre_cmd (self, desc):
+ self.log(format_text('\n{:<60}'.format(desc), 'bold'), newline = False)
+ self.flush()
+
+ def post_cmd (self, rc):
+ if rc:
+ self.log(format_text("[SUCCESS]\n", 'green', 'bold'))
+ else:
+ self.log(format_text("[FAILED]\n", 'red', 'bold'))
+
+
+ def log_cmd (self, desc):
+ self.pre_cmd(desc)
+ self.post_cmd(True)
+
+
+ # supress object getter
+ def supress (self):
+ class Supress(object):
+ def __init__ (self, logger):
+ self.logger = logger
+
+ def __enter__ (self):
+ self.saved_level = self.logger.get_verbose()
+ self.logger.set_verbose(LoggerApi.VERBOSE_QUIET)
+
+ def __exit__ (self, type, value, traceback):
+ self.logger.set_verbose(self.saved_level)
+
+ return Supress(self)
+
+
+
+# default logger - to stdout
+class DefaultLogger(LoggerApi):
+
+ def __init__ (self):
+ super(DefaultLogger, self).__init__()
+
+ def write (self, msg, newline = True):
+ if newline:
+ print(msg)
+ else:
+ print (msg),
+
+ def flush (self):
+ sys.stdout.flush()
+
+
+############################ async event hander #############################
+############################ #############################
+############################ #############################
+
+# an event
+class Event(object):
+
+ def __init__ (self, origin, ev_type, msg):
+ self.origin = origin
+ self.ev_type = ev_type
+ self.msg = msg
+
+ self.ts = datetime.datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S')
+
+ def __str__ (self):
+
+ prefix = "[{:^}][{:^}]".format(self.origin, self.ev_type)
+
+ return "{:<10} - {:18} - {:}".format(self.ts, prefix, format_text(self.msg, 'bold'))
+
+
+# handles different async events given to the client
+class EventsHandler(object):
+
+
+ def __init__ (self, client):
+ self.client = client
+ self.logger = self.client.logger
+
+ self.events = []
+
+ # public functions
+
+ def get_events (self, ev_type_filter = None):
+ if ev_type_filter:
+ return [ev for ev in self.events if ev.ev_type in listify(ev_type_filter)]
+ else:
+ return [ev for ev in self.events]
+
+
+ def clear_events (self):
+ self.events = []
+
+
+ def log_warning (self, msg, show = True):
+ self.__add_event_log('local', 'warning', msg, show)
+
+
+ # events called internally
+
+ def on_async_dead (self):
+ if self.client.connected:
+ msg = 'Lost connection to server'
+ self.__add_event_log('local', 'info', msg, True)
+ self.client.connected = False
+
+
+ def on_async_alive (self):
+ pass
+
+
+
+ def on_async_rx_stats_event (self, data, baseline):
+ self.client.flow_stats.update(data, baseline)
+
+ def on_async_latency_stats_event (self, data, baseline):
+ self.client.latency_stats.update(data, baseline)
+
+ # handles an async stats update from the subscriber
+ def on_async_stats_update(self, dump_data, baseline):
+ global_stats = {}
+ port_stats = {}
+
+ # filter the values per port and general
+ for key, value in dump_data.items():
+ # match a pattern of ports
+ m = re.search('(.*)\-(\d+)', key)
+ if m:
+ port_id = int(m.group(2))
+ field_name = m.group(1)
+ if port_id in self.client.ports:
+ if not port_id in port_stats:
+ port_stats[port_id] = {}
+ port_stats[port_id][field_name] = value
+ else:
+ continue
+ else:
+ # no port match - general stats
+ global_stats[key] = value
+
+ # update the general object with the snapshot
+ self.client.global_stats.update(global_stats, baseline)
+
+ # update all ports
+ for port_id, data in port_stats.items():
+ self.client.ports[port_id].port_stats.update(data, baseline)
+
+
+
+ # dispatcher for server async events (port started, port stopped and etc.)
+ def on_async_event (self, event_type, data):
+ # DP stopped
+ show_event = False
+
+ # port started
+ if (event_type == 0):
+ port_id = int(data['port_id'])
+ ev = "Port {0} has started".format(port_id)
+ self.__async_event_port_started(port_id)
+
+ # port stopped
+ elif (event_type == 1):
+ port_id = int(data['port_id'])
+ ev = "Port {0} has stopped".format(port_id)
+
+ # call the handler
+ self.__async_event_port_stopped(port_id)
+
+
+ # port paused
+ elif (event_type == 2):
+ port_id = int(data['port_id'])
+ ev = "Port {0} has paused".format(port_id)
+
+ # call the handler
+ self.__async_event_port_paused(port_id)
+
+ # port resumed
+ elif (event_type == 3):
+ port_id = int(data['port_id'])
+ ev = "Port {0} has resumed".format(port_id)
+
+ # call the handler
+ self.__async_event_port_resumed(port_id)
+
+ # port finished traffic
+ elif (event_type == 4):
+ port_id = int(data['port_id'])
+ ev = "Port {0} job done".format(port_id)
+
+ # call the handler
+ self.__async_event_port_job_done(port_id)
+ show_event = True
+
+ # port was acquired - maybe stolen...
+ elif (event_type == 5):
+ session_id = data['session_id']
+
+ port_id = int(data['port_id'])
+ who = data['who']
+ force = data['force']
+
+ # if we hold the port and it was not taken by this session - show it
+ if port_id in self.client.get_acquired_ports() and session_id != self.client.session_id:
+ show_event = True
+
+ # format the thief/us...
+ if session_id == self.client.session_id:
+ user = 'you'
+ elif who == self.client.username:
+ user = 'another session of you'
+ else:
+ user = "'{0}'".format(who)
+
+ if force:
+ ev = "Port {0} was forcely taken by {1}".format(port_id, user)
+ else:
+ ev = "Port {0} was taken by {1}".format(port_id, user)
+
+ # call the handler in case its not this session
+ if session_id != self.client.session_id:
+ self.__async_event_port_acquired(port_id, who)
+
+
+ # port was released
+ elif (event_type == 6):
+ port_id = int(data['port_id'])
+ who = data['who']
+ session_id = data['session_id']
+
+ if session_id == self.client.session_id:
+ user = 'you'
+ elif who == self.client.username:
+ user = 'another session of you'
+ else:
+ user = "'{0}'".format(who)
+
+ ev = "Port {0} was released by {1}".format(port_id, user)
+
+ # call the handler in case its not this session
+ if session_id != self.client.session_id:
+ self.__async_event_port_released(port_id)
+
+ elif (event_type == 7):
+ port_id = int(data['port_id'])
+ ev = "port {0} job failed".format(port_id)
+ show_event = True
+
+ # port attr changed
+ elif (event_type == 8):
+ port_id = int(data['port_id'])
+ if data['attr'] == self.client.ports[port_id].attr:
+ return # false alarm
+ old_info = self.client.ports[port_id].get_info()
+ self.__async_event_port_attr_changed(port_id, data['attr'])
+ new_info = self.client.ports[port_id].get_info()
+ ev = "port {0} attributes changed".format(port_id)
+ for key, old_val in old_info.items():
+ new_val = new_info[key]
+ if old_val != new_val:
+ ev += '\n {key}: {old} -> {new}'.format(
+ key = key,
+ old = old_val.lower() if type(old_val) is str else old_val,
+ new = new_val.lower() if type(new_val) is str else new_val)
+ show_event = True
+
+ # server stopped
+ elif (event_type == 100):
+ ev = "Server has stopped"
+ self.__async_event_server_stopped()
+ show_event = True
+
+
+ else:
+ # unknown event - ignore
+ return
+
+
+ self.__add_event_log('server', 'info', ev, show_event)
+
+
+ # private functions
+
+ # on rare cases events may come on a non existent prot
+ # (server was re-run with different config)
+ def __async_event_port_job_done (self, port_id):
+ if port_id in self.client.ports:
+ self.client.ports[port_id].async_event_port_job_done()
+
+ def __async_event_port_stopped (self, port_id):
+ if port_id in self.client.ports:
+ self.client.ports[port_id].async_event_port_stopped()
+
+
+ def __async_event_port_started (self, port_id):
+ if port_id in self.client.ports:
+ self.client.ports[port_id].async_event_port_started()
+
+ def __async_event_port_paused (self, port_id):
+ if port_id in self.client.ports:
+ self.client.ports[port_id].async_event_port_paused()
+
+
+ def __async_event_port_resumed (self, port_id):
+ if port_id in self.client.ports:
+ self.client.ports[port_id].async_event_port_resumed()
+
+ def __async_event_port_acquired (self, port_id, who):
+ if port_id in self.client.ports:
+ self.client.ports[port_id].async_event_acquired(who)
+
+ def __async_event_port_released (self, port_id):
+ if port_id in self.client.ports:
+ self.client.ports[port_id].async_event_released()
+
+ def __async_event_server_stopped (self):
+ self.client.connected = False
+
+ def __async_event_port_attr_changed (self, port_id, attr):
+ if port_id in self.client.ports:
+ self.client.ports[port_id].async_event_port_attr_changed(attr)
+
+ # add event to log
+ def __add_event_log (self, origin, ev_type, msg, show = False):
+
+ event = Event(origin, ev_type, msg)
+ self.events.append(event)
+ if show:
+ self.logger.async_log("\n\n{0}".format(str(event)))
+
+
+
+
+
+############################ RPC layer #############################
+############################ #############################
+############################ #############################
+
+class CCommLink(object):
+ """Describes the connectivity of the stateless client method"""
+ def __init__(self, server="localhost", port=5050, virtual=False, client = None):
+ self.virtual = virtual
+ self.server = server
+ self.port = port
+ self.rpc_link = JsonRpcClient(self.server, self.port, client)
+
+ @property
+ def is_connected(self):
+ if not self.virtual:
+ return self.rpc_link.connected
+ else:
+ return True
+
+ def get_server (self):
+ return self.server
+
+ def get_port (self):
+ return self.port
+
+ def connect(self):
+ if not self.virtual:
+ return self.rpc_link.connect()
+
+ def disconnect(self):
+ if not self.virtual:
+ return self.rpc_link.disconnect()
+
+ def transmit(self, method_name, params = None, api_class = 'core'):
+ if self.virtual:
+ self._prompt_virtual_tx_msg()
+ _, msg = self.rpc_link.create_jsonrpc_v2(method_name, params, api_class)
+ print(msg)
+ return
+ else:
+ return self.rpc_link.invoke_rpc_method(method_name, params, api_class)
+
+ def transmit_batch(self, batch_list):
+ if self.virtual:
+ self._prompt_virtual_tx_msg()
+ print([msg
+ for _, msg in [self.rpc_link.create_jsonrpc_v2(command.method, command.params, command.api_class)
+ for command in batch_list]])
+ else:
+ batch = self.rpc_link.create_batch()
+ for command in batch_list:
+ batch.add(command.method, command.params, command.api_class)
+ # invoke the batch
+ return batch.invoke()
+
+ def _prompt_virtual_tx_msg(self):
+ print("Transmitting virtually over tcp://{server}:{port}".format(server=self.server,
+ port=self.port))
+
+
+
+############################ client #############################
+############################ #############################
+############################ #############################
+
+class STLClient(object):
+ """TRex Stateless client object - gives operations per TRex/user"""
+
+ # different modes for attaching traffic to ports
+ CORE_MASK_SPLIT = 1
+ CORE_MASK_PIN = 2
+
+ def __init__(self,
+ username = common.get_current_user(),
+ server = "localhost",
+ sync_port = 4501,
+ async_port = 4500,
+ verbose_level = LoggerApi.VERBOSE_QUIET,
+ logger = None,
+ virtual = False):
+ """
+ Configure the connection settings
+
+ :parameters:
+ username : string
+ the user name, for example imarom
+
+ server : string
+ the server name or ip
+
+ sync_port : int
+ the RPC port
+
+ async_port : int
+ the ASYNC port
+
+ .. code-block:: python
+
+ # Example
+
+ # connect to local TRex server
+ c = STLClient()
+
+ # connect to remote server trex-remote-server
+ c = STLClient(server = "trex-remote-server" )
+
+ c = STLClient(server = "10.0.0.10" )
+
+ # verbose mode
+ c = STLClient(server = "10.0.0.10", verbose_level = LoggerApi.VERBOSE_HIGH )
+
+ # change user name
+ c = STLClient(username = "root",server = "10.0.0.10", verbose_level = LoggerApi.VERBOSE_HIGH )
+
+ c.connect()
+
+ c.disconnect()
+
+ """
+
+ self.username = username
+
+ # init objects
+ self.ports = {}
+ self.server_version = {}
+ self.system_info = {}
+ self.session_id = random.getrandbits(32)
+ self.connected = False
+
+ # API classes
+ self.api_vers = [ {'type': 'core', 'major': 2, 'minor': 3 } ]
+ self.api_h = {'core': None}
+
+ # logger
+ self.logger = DefaultLogger() if not logger else logger
+
+ # initial verbose
+ self.logger.set_verbose(verbose_level)
+
+ # low level RPC layer
+ self.comm_link = CCommLink(server,
+ sync_port,
+ virtual,
+ self)
+
+ # async event handler manager
+ self.event_handler = EventsHandler(self)
+
+ # async subscriber level
+ self.async_client = CTRexAsyncClient(server,
+ async_port,
+ self)
+
+
+
+
+ # stats
+ self.connection_info = {"username": username,
+ "server": server,
+ "sync_port": sync_port,
+ "async_port": async_port,
+ "virtual": virtual}
+
+
+ self.global_stats = trex_stl_stats.CGlobalStats(self.connection_info,
+ self.server_version,
+ self.ports,
+ self.event_handler)
+
+ self.flow_stats = trex_stl_stats.CRxStats(self.ports)
+
+ self.latency_stats = trex_stl_stats.CLatencyStats(self.ports)
+
+ self.util_stats = trex_stl_stats.CUtilStats(self)
+
+ self.xstats = trex_stl_stats.CXStats(self)
+
+ self.stats_generator = trex_stl_stats.CTRexInfoGenerator(self.global_stats,
+ self.ports,
+ self.flow_stats,
+ self.latency_stats,
+ self.util_stats,
+ self.xstats,
+ self.async_client.monitor)
+
+
+
+
+ ############# private functions - used by the class itself ###########
+
+ # some preprocessing for port argument
+ def __ports (self, port_id_list):
+
+ # none means all
+ if port_id_list == None:
+ return range(0, self.get_port_count())
+
+ # always list
+ if isinstance(port_id_list, int):
+ port_id_list = [port_id_list]
+
+ if not isinstance(port_id_list, list):
+ raise ValueError("Bad port id list: {0}".format(port_id_list))
+
+ for port_id in port_id_list:
+ if not isinstance(port_id, int) or (port_id < 0) or (port_id > self.get_port_count()):
+ raise ValueError("Bad port id {0}".format(port_id))
+
+ return port_id_list
+
+
+ # sync ports
+ def __sync_ports (self, port_id_list = None, force = False):
+ port_id_list = self.__ports(port_id_list)
+
+ rc = RC()
+
+ for port_id in port_id_list:
+ rc.add(self.ports[port_id].sync())
+
+ return rc
+
+ # acquire ports, if port_list is none - get all
+ def __acquire (self, port_id_list = None, force = False, sync_streams = True):
+ port_id_list = self.__ports(port_id_list)
+
+ rc = RC()
+
+ for port_id in port_id_list:
+ rc.add(self.ports[port_id].acquire(force, sync_streams))
+
+ return rc
+
+ # release ports
+ def __release (self, port_id_list = None):
+ port_id_list = self.__ports(port_id_list)
+
+ rc = RC()
+
+ for port_id in port_id_list:
+ rc.add(self.ports[port_id].release())
+
+ return rc
+
+
+ def __add_streams(self, stream_list, port_id_list = None):
+
+ port_id_list = self.__ports(port_id_list)
+
+ rc = RC()
+
+ for port_id in port_id_list:
+ rc.add(self.ports[port_id].add_streams(stream_list))
+
+ return rc
+
+
+
+ def __remove_streams(self, stream_id_list, port_id_list = None):
+
+ port_id_list = self.__ports(port_id_list)
+
+ rc = RC()
+
+ for port_id in port_id_list:
+ rc.add(self.ports[port_id].remove_streams(stream_id_list))
+
+ return rc
+
+
+
+ def __remove_all_streams(self, port_id_list = None):
+ port_id_list = self.__ports(port_id_list)
+
+ rc = RC()
+
+ for port_id in port_id_list:
+ rc.add(self.ports[port_id].remove_all_streams())
+
+ return rc
+
+
+ def __get_stream(self, stream_id, port_id, get_pkt = False):
+
+ return self.ports[port_id].get_stream(stream_id)
+
+
+ def __get_all_streams(self, port_id, get_pkt = False):
+
+ return self.ports[port_id].get_all_streams()
+
+
+ def __get_stream_id_list(self, port_id):
+
+ return self.ports[port_id].get_stream_id_list()
+
+
+ def __start (self,
+ multiplier,
+ duration,
+ port_id_list,
+ force,
+ core_mask):
+
+ port_id_list = self.__ports(port_id_list)
+
+ rc = RC()
+
+
+ for port_id in port_id_list:
+ rc.add(self.ports[port_id].start(multiplier,
+ duration,
+ force,
+ core_mask[port_id]))
+
+ return rc
+
+
+ def __resume (self, port_id_list = None, force = False):
+
+ port_id_list = self.__ports(port_id_list)
+ rc = RC()
+
+ for port_id in port_id_list:
+ rc.add(self.ports[port_id].resume())
+
+ return rc
+
+ def __pause (self, port_id_list = None, force = False):
+
+ port_id_list = self.__ports(port_id_list)
+ rc = RC()
+
+ for port_id in port_id_list:
+ rc.add(self.ports[port_id].pause())
+
+ return rc
+
+
+ def __stop (self, port_id_list = None, force = False):
+
+ port_id_list = self.__ports(port_id_list)
+ rc = RC()
+
+ for port_id in port_id_list:
+ rc.add(self.ports[port_id].stop(force))
+
+ return rc
+
+
+ def __update (self, mult, port_id_list = None, force = False):
+
+ port_id_list = self.__ports(port_id_list)
+ rc = RC()
+
+ for port_id in port_id_list:
+ rc.add(self.ports[port_id].update(mult, force))
+
+ return rc
+
+
+ def __push_remote (self, pcap_filename, port_id_list, ipg_usec, speedup, count, duration, is_dual):
+
+ port_id_list = self.__ports(port_id_list)
+ rc = RC()
+
+ for port_id in port_id_list:
+
+ # for dual, provide the slave handler as well
+ slave_handler = self.ports[port_id ^ 0x1].handler if is_dual else ""
+
+ rc.add(self.ports[port_id].push_remote(pcap_filename,
+ ipg_usec,
+ speedup,
+ count,
+ duration,
+ is_dual,
+ slave_handler))
+
+ return rc
+
+
+ def __validate (self, port_id_list = None):
+ port_id_list = self.__ports(port_id_list)
+
+ rc = RC()
+
+ for port_id in port_id_list:
+ rc.add(self.ports[port_id].validate())
+
+ return rc
+
+
+ def __set_port_attr (self, port_id_list = None, attr_dict = None):
+
+ port_id_list = self.__ports(port_id_list)
+ rc = RC()
+
+ for port_id in port_id_list:
+ rc.add(self.ports[port_id].set_attr(attr_dict))
+
+ return rc
+
+
+
+ # connect to server
+ def __connect(self):
+
+ # first disconnect if already connected
+ if self.is_connected():
+ self.__disconnect()
+
+ # clear this flag
+ self.connected = False
+
+ # connect sync channel
+ self.logger.pre_cmd("Connecting to RPC server on {0}:{1}".format(self.connection_info['server'], self.connection_info['sync_port']))
+ rc = self.comm_link.connect()
+ self.logger.post_cmd(rc)
+
+ if not rc:
+ return rc
+
+
+ # API sync
+ rc = self._transmit("api_sync", params = {'api_vers': self.api_vers}, api_class = None)
+ if not rc:
+ return rc
+
+ # decode
+ for api in rc.data()['api_vers']:
+ self.api_h[ api['type'] ] = api['api_h']
+
+
+ # version
+ rc = self._transmit("get_version")
+ if not rc:
+ return rc
+
+ self.server_version = rc.data()
+ self.global_stats.server_version = rc.data()
+
+ # cache system info
+ rc = self._transmit("get_system_info")
+ if not rc:
+ return rc
+
+ self.system_info = rc.data()
+ self.global_stats.system_info = rc.data()
+
+ # cache supported commands
+ rc = self._transmit("get_supported_cmds")
+ if not rc:
+ return rc
+
+ self.supported_cmds = sorted(rc.data())
+
+ # create ports
+ for port_id in range(self.system_info["port_count"]):
+ info = self.system_info['ports'][port_id]
+
+ self.ports[port_id] = Port(port_id,
+ self.username,
+ self.comm_link,
+ self.session_id,
+ info)
+
+
+ # sync the ports
+ rc = self.__sync_ports()
+ if not rc:
+ return rc
+
+
+ # connect async channel
+ self.logger.pre_cmd("Connecting to publisher server on {0}:{1}".format(self.connection_info['server'], self.connection_info['async_port']))
+ rc = self.async_client.connect()
+ self.logger.post_cmd(rc)
+
+ if not rc:
+ return rc
+
+ self.connected = True
+
+ return RC_OK()
+
+
+ # disconenct from server
+ def __disconnect(self, release_ports = True):
+ # release any previous acquired ports
+ if self.is_connected() and release_ports:
+ self.__release(self.get_acquired_ports())
+
+ self.comm_link.disconnect()
+ self.async_client.disconnect()
+
+ self.connected = False
+
+ return RC_OK()
+
+
+ # clear stats
+ def __clear_stats(self, port_id_list, clear_global, clear_flow_stats, clear_latency_stats, clear_xstats):
+
+ # we must be sync with the server
+ self.async_client.barrier()
+
+ for port_id in port_id_list:
+ self.ports[port_id].clear_stats()
+
+ if clear_global:
+ self.global_stats.clear_stats()
+
+ if clear_flow_stats:
+ self.flow_stats.clear_stats()
+
+ if clear_latency_stats:
+ self.latency_stats.clear_stats()
+
+ if clear_xstats:
+ self.xstats.clear_stats()
+
+ self.logger.log_cmd("Clearing stats on port(s) {0}:".format(port_id_list))
+
+ return RC
+
+
+ # get stats
+ def __get_stats (self, port_id_list):
+ stats = {}
+
+ stats['global'] = self.global_stats.get_stats()
+
+ total = {}
+ for port_id in port_id_list:
+ port_stats = self.ports[port_id].get_stats()
+ stats[port_id] = port_stats
+
+ for k, v in port_stats.items():
+ if not k in total:
+ total[k] = v
+ else:
+ total[k] += v
+
+ stats['total'] = total
+
+ stats['flow_stats'] = self.flow_stats.get_stats()
+ stats['latency'] = self.latency_stats.get_stats()
+
+ return stats
+
+
+ def __decode_core_mask (self, ports, core_mask):
+
+ # predefined modes
+ if isinstance(core_mask, int):
+ if core_mask not in [self.CORE_MASK_PIN, self.CORE_MASK_SPLIT]:
+ raise STLError("'core_mask' can be either CORE_MASK_PIN, CORE_MASK_SPLIT or a list of masks")
+
+ decoded_mask = {}
+ for port in ports:
+ # a pin mode was requested and we have
+ # the second port from the group in the start list
+ if (core_mask == self.CORE_MASK_PIN) and ( (port ^ 0x1) in ports ):
+ decoded_mask[port] = 0x55555555 if( port % 2) == 0 else 0xAAAAAAAA
+ else:
+ decoded_mask[port] = None
+
+ return decoded_mask
+
+ # list of masks
+ elif isinstance(core_mask, list):
+ if len(ports) != len(core_mask):
+ raise STLError("'core_mask' list must be the same length as 'ports' list")
+
+ decoded_mask = {}
+ for i, port in enumerate(ports):
+ decoded_mask[port] = core_mask[i]
+
+ return decoded_mask
+
+
+
+ ############ functions used by other classes but not users ##############
+
+ def _validate_port_list (self, port_id_list):
+ # listfiy single int
+ if isinstance(port_id_list, int):
+ port_id_list = [port_id_list]
+
+ # should be a list
+ if not isinstance(port_id_list, list):
+ raise STLTypeError('port_id_list', type(port_id_list), list)
+
+ if not port_id_list:
+ raise STLError('No ports provided')
+
+ valid_ports = self.get_all_ports()
+ for port_id in port_id_list:
+ if not port_id in valid_ports:
+ raise STLError("Port ID '{0}' is not a valid port ID - valid values: {1}".format(port_id, valid_ports))
+
+ return port_id_list
+
+
+ # transmit request on the RPC link
+ def _transmit(self, method_name, params = None, api_class = 'core'):
+ return self.comm_link.transmit(method_name, params, api_class)
+
+ # transmit batch request on the RPC link
+ def _transmit_batch(self, batch_list):
+ return self.comm_link.transmit_batch(batch_list)
+
+ # stats
+ def _get_formatted_stats(self, port_id_list, stats_mask = trex_stl_stats.COMPACT):
+
+ stats_opts = common.list_intersect(trex_stl_stats.ALL_STATS_OPTS, stats_mask)
+
+ stats_obj = OrderedDict()
+ for stats_type in stats_opts:
+ stats_obj.update(self.stats_generator.generate_single_statistic(port_id_list, stats_type))
+
+ return stats_obj
+
+ def _get_streams(self, port_id_list, streams_mask=set()):
+
+ streams_obj = self.stats_generator.generate_streams_info(port_id_list, streams_mask)
+
+ return streams_obj
+
+
+ def _invalidate_stats (self, port_id_list):
+ for port_id in port_id_list:
+ self.ports[port_id].invalidate_stats()
+
+ self.global_stats.invalidate()
+ self.flow_stats.invalidate()
+
+ return RC_OK()
+
+
+ # remove all RX filters in a safe manner
+ def _remove_rx_filters (self, ports, rx_delay_ms):
+
+ # get the enabled RX ports
+ rx_ports = [port_id for port_id in ports if self.ports[port_id].has_rx_enabled()]
+
+ if not rx_ports:
+ return RC_OK()
+
+ # block while any RX configured port has not yet have it's delay expired
+ while any([not self.ports[port_id].has_rx_delay_expired(rx_delay_ms) for port_id in rx_ports]):
+ time.sleep(0.01)
+
+ # remove RX filters
+ rc = RC()
+ for port_id in rx_ports:
+ rc.add(self.ports[port_id].remove_rx_filters())
+
+ return rc
+
+
+ #################################
+ # ------ private methods ------ #
+ @staticmethod
+ def __get_mask_keys(ok_values={True}, **kwargs):
+ masked_keys = set()
+ for key, val in kwargs.items():
+ if val in ok_values:
+ masked_keys.add(key)
+ return masked_keys
+
+ @staticmethod
+ def __filter_namespace_args(namespace, ok_values):
+ return {k: v for k, v in namespace.__dict__.items() if k in ok_values}
+
+
+ # API decorator - double wrap because of argument
+ def __api_check(connected = True):
+
+ def wrap (f):
+ @wraps(f)
+ def wrap2(*args, **kwargs):
+ client = args[0]
+
+ func_name = f.__name__
+
+ # check connection
+ if connected and not client.is_connected():
+ raise STLStateError(func_name, 'disconnected')
+
+ try:
+ ret = f(*args, **kwargs)
+ except KeyboardInterrupt as e:
+ raise STLError("Interrupted by a keyboard signal (probably ctrl + c)")
+
+ return ret
+ return wrap2
+
+ return wrap
+
+
+
+ ############################ API #############################
+ ############################ #############################
+ ############################ #############################
+ def __enter__ (self):
+ self.connect()
+ self.acquire(force = True)
+ self.reset()
+ return self
+
+ def __exit__ (self, type, value, traceback):
+ if self.get_active_ports():
+ self.stop(self.get_active_ports())
+ self.disconnect()
+
+ ############################ Getters #############################
+ ############################ #############################
+ ############################ #############################
+
+
+ # return verbose level of the logger
+ def get_verbose (self):
+ """
+ Get the verbose mode
+
+ :parameters:
+ none
+
+ :return:
+ Get the verbose mode as Bool
+
+ :raises:
+ None
+
+ """
+ return self.logger.get_verbose()
+
+ # is the client on read only mode ?
+ def is_all_ports_acquired (self):
+ """
+ is_all_ports_acquired
+
+ :parameters:
+ None
+
+ :return:
+ Returns True if all ports are acquired
+
+ :raises:
+ None
+
+ """
+
+ return (self.get_all_ports() == self.get_acquired_ports())
+
+
+ # is the client connected ?
+ def is_connected (self):
+ """
+
+ :parameters:
+ None
+
+ :return:
+ is_connected
+
+ :raises:
+ None
+
+ """
+
+ return self.connected and self.comm_link.is_connected
+
+
+ # get connection info
+ def get_connection_info (self):
+ """
+
+ :parameters:
+ None
+
+ :return:
+ Connection dict
+
+ :raises:
+ None
+
+ """
+
+ return self.connection_info
+
+
+ # get supported commands by the server
+ def get_server_supported_cmds(self):
+ """
+
+ :parameters:
+ None
+
+ :return:
+ Connection dict
+
+ :raises:
+ None
+
+ """
+
+ return self.supported_cmds
+
+ # get server version
+ def get_server_version(self):
+ """
+
+ :parameters:
+ None
+
+ :return:
+ Connection dict
+
+ :raises:
+ None
+
+ """
+
+ return self.server_version
+
+ # get server system info
+ def get_server_system_info(self):
+ """
+
+ :parameters:
+ None
+
+ :return:
+ Connection dict
+
+ :raises:
+ None
+
+ """
+
+ return self.system_info
+
+ # get port count
+ def get_port_count(self):
+ """
+
+ :parameters:
+ None
+
+ :return:
+ Connection dict
+
+ :raises:
+ None
+
+ """
+
+ return len(self.ports)
+
+
+ # returns the port object
+ def get_port (self, port_id):
+ port = self.ports.get(port_id, None)
+ if (port != None):
+ return port
+ else:
+ raise STLArgumentError('port id', port_id, valid_values = self.get_all_ports())
+
+
+ # get all ports as IDs
+ def get_all_ports (self):
+ """
+
+ :parameters:
+ None
+
+ :return:
+ Connection dict
+
+ :raises:
+ None
+
+ """
+
+ return list(self.ports)
+
+ # get all acquired ports
+ def get_acquired_ports(self):
+ return [port_id
+ for port_id, port_obj in self.ports.items()
+ if port_obj.is_acquired()]
+
+ # get all active ports (TX or pause)
+ def get_active_ports(self, owned = True):
+ if owned:
+ return [port_id
+ for port_id, port_obj in self.ports.items()
+ if port_obj.is_active() and port_obj.is_acquired()]
+ else:
+ return [port_id
+ for port_id, port_obj in self.ports.items()
+ if port_obj.is_active()]
+
+
+ # get paused ports
+ def get_paused_ports (self, owned = True):
+ if owned:
+ return [port_id
+ for port_id, port_obj in self.ports.items()
+ if port_obj.is_paused() and port_obj.is_acquired()]
+ else:
+ return [port_id
+ for port_id, port_obj in self.ports.items()
+ if port_obj.is_paused()]
+
+
+ # get all TX ports
+ def get_transmitting_ports (self, owned = True):
+ if owned:
+ return [port_id
+ for port_id, port_obj in self.ports.items()
+ if port_obj.is_transmitting() and port_obj.is_acquired()]
+ else:
+ return [port_id
+ for port_id, port_obj in self.ports.items()
+ if port_obj.is_transmitting()]
+
+
+ # get stats
+ def get_stats (self, ports = None, sync_now = True):
+ """
+ Return dictionary containing statistics information gathered from the server.
+
+ :parameters:
+
+ ports - List of ports to retreive stats on.
+ If None, assume the request is for all acquired ports.
+
+ sync_now - Boolean - If true, create a call to the server to get latest stats, and wait for result to arrive. Otherwise, return last stats saved in client cache.
+ Downside of putting True is a slight delay (few 10th msecs) in getting the result. For practical uses, value should be True.
+ :return:
+ Statistics dictionary of dictionaries with the following format:
+
+ =============================== ===============
+ key Meaning
+ =============================== ===============
+ :ref:`numbers (0,1,..<total>` Statistcs per port number
+ :ref:`total <total>` Sum of port statistics
+ :ref:`flow_stats <flow_stats>` Per flow statistics
+ :ref:`global <global>` Global statistics
+ :ref:`latency <latency>` Per flow statistics regarding flow latency
+ =============================== ===============
+
+ Below is description of each of the inner dictionaries.
+
+ .. _total:
+
+ **total** and per port statistics contain dictionary with following format.
+
+ Most of the bytes counters (unless specified otherwise) are in L2 layer, including the Ethernet FCS. e.g. minimum packet size is 64 bytes
+
+ =============================== ===============
+ key Meaning
+ =============================== ===============
+ ibytes Number of input bytes
+ ierrors Number of input errors
+ ipackets Number of input packets
+ obytes Number of output bytes
+ oerrors Number of output errors
+ opackets Number of output packets
+ rx_bps Receive bytes per second rate (L2 layer)
+ rx_pps Receive packet per second rate
+ tx_bps Transmit bytes per second rate (L2 layer)
+ tx_pps Transmit packet per second rate
+ =============================== ===============
+
+ .. _flow_stats:
+
+ **flow_stats** contains :ref:`global dictionary <flow_stats_global>`, and dictionaries per packet group id (pg id). See structures below.
+
+ **per pg_id flow stat** dictionaries have following structure:
+
+ ================= ===============
+ key Meaning
+ ================= ===============
+ rx_bps Received bytes per second rate
+ rx_bps_l1 Received bytes per second rate, including layer one
+ rx_bytes Total number of received bytes
+ rx_pkts Total number of received packets
+ rx_pps Received packets per second
+ tx_bps Transmit bytes per second rate
+ tx_bps_l1 Transmit bytes per second rate, including layer one
+ tx_bytes Total number of sent bytes
+ tx_pkts Total number of sent packets
+ tx_pps Transmit packets per second rate
+ ================= ===============
+
+ .. _flow_stats_global:
+
+ **global flow stats** dictionary has the following structure:
+
+ ================= ===============
+ key Meaning
+ ================= ===============
+ rx_err Number of flow statistics packets received that we could not associate to any pg_id. This can happen if latency on the used setup is large. See :ref:`wait_on_traffic <wait_on_traffic>` rx_delay_ms parameter for details.
+ tx_err Number of flow statistics packets transmitted that we could not associate to any pg_id. This is never expected. If you see this different than 0, please report.
+ ================= ===============
+
+ .. _global:
+
+ **global**
+
+ ================= ===============
+ key Meaning
+ ================= ===============
+ bw_per_core Estimated byte rate Trex can support per core. This is calculated by extrapolation of current rate and load on transmitting cores.
+ cpu_util Estimate of the average utilization percentage of the transimitting cores
+ queue_full Total number of packets transmitted while the NIC TX queue was full. The packets will be transmitted, eventually, but will create high CPU%due to polling the queue. This usually indicates that the rate we trying to transmit is too high for this port.
+ rx_cpu_util Estimate of the utilization percentage of the core handling RX traffic. Too high value of this CPU utilization could cause drop of latency streams.
+ rx_drop_bps Received bytes per second drop rate
+ rx_bps Received bytes per second rate
+ rx_pps Received packets per second rate
+ tx_bps Transmit bytes per second rate
+ tx_pps Transmit packets per second rate
+ ================= ===============
+
+ .. _latency:
+
+ **latency** contains :ref:`global dictionary <lat_stats_global>`, and dictionaries per packet group id (pg id). Each one with the following structure.
+
+ **per pg_id latency stat** dictionaries have following structure:
+
+ =========================== ===============
+ key Meaning
+ =========================== ===============
+ :ref:`err_cntrs<err-cntrs>` Counters describing errors that occured with this pg id
+ :ref:`latency<lat_inner>` Information regarding packet latency
+ =========================== ===============
+
+ Following are the inner dictionaries of latency
+
+ .. _err-cntrs:
+
+ **err-cntrs**
+
+ ================= ===============
+ key Meaning (see better explanation below the table)
+ ================= ===============
+ dropped How many packets were dropped (estimation)
+ dup How many packets were duplicated.
+ out_of_order How many packets we received out of order.
+ seq_too_high How many events of packet with sequence number too high we saw.
+ seq_too_low How many events of packet with sequence number too low we saw.
+ ================= ===============
+
+ For calculating packet error events, we add sequence number to each packet's payload. We decide what went wrong only according to sequence number
+ of last packet received and that of the previous packet. 'seq_too_low' and 'seq_too_high' count events we see. 'dup', 'out_of_order' and 'dropped'
+ are heuristics we apply to try and understand what happened. They will be accurate in common error scenarios.
+ We describe few scenarios below to help understand this.
+
+ Scenario 1: Received packet with seq num 10, and another one with seq num 10. We increment 'dup' and 'seq_too_low' by 1.
+
+ Scenario 2: Received pacekt with seq num 10 and then packet with seq num 15. We assume 4 packets were dropped, and increment 'dropped' by 4, and 'seq_too_high' by 1.
+ We expect next packet to arrive with sequence number 16.
+
+ Scenario 2 continue: Received packet with seq num 11. We increment 'seq_too_low' by 1. We increment 'out_of_order' by 1. We *decrement* 'dropped' by 1.
+ (We assume here that one of the packets we considered as dropped before, actually arrived out of order).
+
+
+ .. _lat_inner:
+
+ **latency**
+
+ ================= ===============
+ key Meaning
+ ================= ===============
+ average Average latency over the stream lifetime (usec).Low pass filter is applied to the last window average.It is computed each sampling period by following formula: <average> = <prev average>/2 + <last sampling period average>/2
+ histogram Dictionary describing logarithmic distribution histogram of packet latencies. Keys in the dictionary represent range of latencies (in usec). Values are the total number of packets received in this latency range. For example, an entry {100:13} would mean that we saw 13 packets with latency in the range between 100 and 200 usec.
+ jitter Jitter of latency samples, computed as described in :rfc:`3550#appendix-A.8`
+ last_max Maximum latency measured between last two data reads from server (0.5 sec window).
+ total_max Maximum latency measured over the stream lifetime (in usec).
+ total_min Minimum latency measured over the stream lifetime (in usec).
+ ================= ===============
+
+ .. _lat_stats_global:
+
+ **global latency stats** dictionary has the following structure:
+
+ ================= ===============
+ key Meaning
+ ================= ===============
+ old_flow Number of latency statistics packets received that we could not associate to any pg_id. This can happen if latency on the used setup is large. See :ref:`wait_on_traffic <wait_on_traffic>` rx_delay_ms parameter for details.
+ bad_hdr Number of latency packets received with bad latency data. This can happen becuase of garbage packets in the network, or if the DUT causes packet corruption.
+ ================= ===============
+
+ :raises:
+ None
+
+ """
+ # by default use all acquired ports
+ ports = ports if ports is not None else self.get_acquired_ports()
+ ports = self._validate_port_list(ports)
+
+ # check async barrier
+ if not type(sync_now) is bool:
+ raise STLArgumentError('sync_now', sync_now)
+
+
+ # if the user requested a barrier - use it
+ if sync_now:
+ rc = self.async_client.barrier()
+ if not rc:
+ raise STLError(rc)
+
+ return self.__get_stats(ports)
+
+
+ def get_events (self, ev_type_filter = None):
+ """
+ returns all the logged events
+
+ :parameters:
+ ev_type_filter - 'info', 'warning' or a list of those
+ default: no filter
+
+ :return:
+ logged events
+
+ :raises:
+ None
+
+ """
+ return self.event_handler.get_events(ev_type_filter)
+
+
+ def get_warnings (self):
+ """
+ returns all the warnings logged events
+
+ :parameters:
+ None
+
+ :return:
+ warning logged events
+
+ :raises:
+ None
+
+ """
+ return self.get_events(ev_type_filter = 'warning')
+
+
+ def get_info (self):
+ """
+ returns all the info logged events
+
+ :parameters:
+ None
+
+ :return:
+ warning logged events
+
+ :raises:
+ None
+
+ """
+ return self.get_events(ev_type_filter = 'info')
+
+
+ # get port(s) info as a list of dicts
+ @__api_check(True)
+ def get_port_info (self, ports = None):
+
+ ports = ports if ports is not None else self.get_all_ports()
+ ports = self._validate_port_list(ports)
+
+ return [self.ports[port_id].get_info() for port_id in ports]
+
+
+ ############################ Commands #############################
+ ############################ #############################
+ ############################ #############################
+
+
+ def set_verbose (self, level):
+ """
+ Sets verbose level
+
+ :parameters:
+ level : str
+ "high"
+ "low"
+ "normal"
+
+ :raises:
+ None
+
+ """
+ modes = {'low' : LoggerApi.VERBOSE_QUIET, 'normal': LoggerApi.VERBOSE_REGULAR, 'high': LoggerApi.VERBOSE_HIGH}
+
+ if not level in modes.keys():
+ raise STLArgumentError('level', level)
+
+ self.logger.set_verbose(modes[level])
+
+
+ @__api_check(False)
+ def connect (self):
+ """
+
+ Connects to the TRex server
+
+ :parameters:
+ None
+
+ :raises:
+ + :exc:`STLError`
+
+ """
+
+ rc = self.__connect()
+ if not rc:
+ raise STLError(rc)
+
+
+ @__api_check(False)
+ def disconnect (self, stop_traffic = True, release_ports = True):
+ """
+ Disconnects from the server
+
+ :parameters:
+ stop_traffic : bool
+ Attempts to stop traffic before disconnecting.
+ release_ports : bool
+ Attempts to release all the acquired ports.
+
+ """
+
+ # try to stop ports but do nothing if not possible
+ if stop_traffic:
+ try:
+ self.stop()
+ except STLError:
+ pass
+
+
+ self.logger.pre_cmd("Disconnecting from server at '{0}':'{1}'".format(self.connection_info['server'],
+ self.connection_info['sync_port']))
+ rc = self.__disconnect(release_ports)
+ self.logger.post_cmd(rc)
+
+
+
+ @__api_check(True)
+ def acquire (self, ports = None, force = False, sync_streams = True):
+ """
+ Acquires ports for executing commands
+
+ :parameters:
+ ports : list
+ Ports on which to execute the command
+
+ force : bool
+ Force acquire the ports.
+
+ sync_streams: bool
+ sync with the server about the configured streams
+
+ :raises:
+ + :exc:`STLError`
+
+ """
+
+ # by default use all ports
+ ports = ports if ports is not None else self.get_all_ports()
+ ports = self._validate_port_list(ports)
+
+ if force:
+ self.logger.pre_cmd("Force acquiring ports {0}:".format(ports))
+ else:
+ self.logger.pre_cmd("Acquiring ports {0}:".format(ports))
+
+ rc = self.__acquire(ports, force, sync_streams)
+
+ self.logger.post_cmd(rc)
+
+ if not rc:
+ # cleanup
+ self.__release(ports)
+ raise STLError(rc)
+
+
+ @__api_check(True)
+ def release (self, ports = None):
+ """
+ Release ports
+
+ :parameters:
+ ports : list
+ Ports on which to execute the command
+
+ :raises:
+ + :exc:`STLError`
+
+ """
+
+ ports = ports if ports is not None else self.get_acquired_ports()
+ ports = self._validate_port_list(ports)
+
+ self.logger.pre_cmd("Releasing ports {0}:".format(ports))
+ rc = self.__release(ports)
+ self.logger.post_cmd(rc)
+
+ if not rc:
+ raise STLError(rc)
+
+ @__api_check(True)
+ def ping(self):
+ """
+ Pings the server
+
+ :parameters:
+ None
+
+
+ :raises:
+ + :exc:`STLError`
+
+ """
+
+ self.logger.pre_cmd("Pinging the server on '{0}' port '{1}': ".format(self.connection_info['server'],
+ self.connection_info['sync_port']))
+ rc = self._transmit("ping", api_class = None)
+
+ self.logger.post_cmd(rc)
+
+ if not rc:
+ raise STLError(rc)
+
+ @__api_check(True)
+ def server_shutdown (self, force = False):
+ """
+ Sends the server a request for total shutdown
+
+ :parameters:
+ force - shutdown server even if some ports are owned by another
+ user
+
+ :raises:
+ + :exc:`STLError`
+
+ """
+
+ self.logger.pre_cmd("Sending shutdown request for the server")
+
+ rc = self._transmit("shutdown", params = {'force': force, 'user': self.username})
+
+ self.logger.post_cmd(rc)
+
+ if not rc:
+ raise STLError(rc)
+
+
+ @__api_check(True)
+ def get_active_pgids(self):
+ """
+ Get active group IDs
+
+ :parameters:
+ None
+
+
+ :raises:
+ + :exc:`STLError`
+
+ """
+
+ self.logger.pre_cmd( "Getting active packet group ids")
+
+ rc = self._transmit("get_active_pgids")
+
+ self.logger.post_cmd(rc)
+
+ if not rc:
+ raise STLError(rc)
+
+ @__api_check(True)
+ def get_util_stats(self):
+ """
+ Get utilization stats:
+ History of TRex CPU utilization per thread (list of lists)
+ MBUFs memory consumption per CPU socket.
+
+ :parameters:
+ None
+
+ :raises:
+ + :exc:`STLError`
+
+ """
+ self.logger.pre_cmd('Getting Utilization stats')
+ return self.util_stats.get_stats()
+
+ @__api_check(True)
+ def get_xstats(self, port_id):
+ print(port_id)
+ """
+ Get extended stats of port: all the counters as dict.
+
+ :parameters:
+ port_id: int
+
+ :returns:
+ Dict with names of counters as keys and values of uint64. Actual keys may vary per NIC.
+
+ :raises:
+ + :exc:`STLError`
+
+ """
+ self.logger.pre_cmd('Getting xstats')
+ return self.xstats.get_stats(port_id)
+
+
+ @__api_check(True)
+ def reset(self, ports = None):
+ """
+ Force acquire ports, stop the traffic, remove all streams and clear stats
+
+ :parameters:
+ ports : list
+ Ports on which to execute the command
+
+
+ :raises:
+ + :exc:`STLError`
+
+ """
+
+
+ ports = ports if ports is not None else self.get_all_ports()
+ ports = self._validate_port_list(ports)
+
+ # force take the port and ignore any streams on it
+ self.acquire(ports, force = True, sync_streams = False)
+ self.stop(ports, rx_delay_ms = 0)
+ self.remove_all_streams(ports)
+ self.clear_stats(ports)
+
+
+ @__api_check(True)
+ def remove_all_streams (self, ports = None):
+ """
+ remove all streams from port(s)
+
+ :parameters:
+ ports : list
+ Ports on which to execute the command
+
+
+ :raises:
+ + :exc:`STLError`
+
+ """
+
+
+ ports = ports if ports is not None else self.get_acquired_ports()
+ ports = self._validate_port_list(ports)
+
+ self.logger.pre_cmd("Removing all streams from port(s) {0}:".format(ports))
+ rc = self.__remove_all_streams(ports)
+ self.logger.post_cmd(rc)
+
+ if not rc:
+ raise STLError(rc)
+
+
+ @__api_check(True)
+ def add_streams (self, streams, ports = None):
+ """
+ Add a list of streams to port(s)
+
+ :parameters:
+ ports : list
+ Ports on which to execute the command
+ streams: list
+ Streams to attach (or profile)
+
+ :returns:
+ List of stream IDs in order of the stream list
+
+ :raises:
+ + :exc:`STLError`
+
+ """
+
+
+ ports = ports if ports is not None else self.get_acquired_ports()
+ ports = self._validate_port_list(ports)
+
+ if isinstance(streams, STLProfile):
+ streams = streams.get_streams()
+
+ # transform single stream
+ if not isinstance(streams, list):
+ streams = [streams]
+
+ # check streams
+ if not all([isinstance(stream, STLStream) for stream in streams]):
+ raise STLArgumentError('streams', streams)
+
+ self.logger.pre_cmd("Attaching {0} streams to port(s) {1}:".format(len(streams), ports))
+ rc = self.__add_streams(streams, ports)
+ self.logger.post_cmd(rc)
+
+ if not rc:
+ raise STLError(rc)
+
+ # return the stream IDs
+ return rc.data()
+
+ @__api_check(True)
+ def add_profile(self, filename, ports = None, **kwargs):
+ """ | Add streams from profile by its type. Supported types are:
+ | .py
+ | .yaml
+ | .pcap file that converted to profile automatically
+
+ :parameters:
+ filename : string
+ filename (with path) of the profile
+ ports : list
+ list of ports to add the profile (default: all acquired)
+ kwargs : dict
+ forward those key-value pairs to the profile (tunables)
+
+ :returns:
+ List of stream IDs in order of the stream list
+
+ :raises:
+ + :exc:`STLError`
+
+ """
+
+ validate_type('filename', filename, basestring)
+ profile = STLProfile.load(filename, **kwargs)
+ return self.add_streams(profile.get_streams(), ports)
+
+
+ @__api_check(True)
+ def remove_streams (self, stream_id_list, ports = None):
+ """
+ Remove a list of streams from ports
+
+ :parameters:
+ ports : list
+ Ports on which to execute the command
+ stream_id_list: list
+ Stream id list to remove
+
+
+ :raises:
+ + :exc:`STLError`
+
+ """
+
+
+ ports = ports if ports is not None else self.get_acquired_ports()
+ ports = self._validate_port_list(ports)
+
+ # transform single stream
+ if not isinstance(stream_id_list, list):
+ stream_id_list = [stream_id_list]
+
+ # check streams
+ for stream_id in stream_id_list:
+ validate_type('stream_id', stream_id, int)
+
+ # remove streams
+ self.logger.pre_cmd("Removing {0} streams from port(s) {1}:".format(len(stream_id_list), ports))
+ rc = self.__remove_streams(stream_id_list, ports)
+ self.logger.post_cmd(rc)
+
+ if not rc:
+ raise STLError(rc)
+
+
+
+ @__api_check(True)
+ def start (self,
+ ports = None,
+ mult = "1",
+ force = False,
+ duration = -1,
+ total = False,
+ core_mask = CORE_MASK_SPLIT):
+ """
+ Start traffic on port(s)
+
+ :parameters:
+ ports : list
+ Ports on which to execute the command
+
+ mult : str
+ Multiplier in a form of pps, bps, or line util in %
+ Examples: "5kpps", "10gbps", "85%", "32mbps"
+
+ force : bool
+ If the ports are not in stopped mode or do not have sufficient bandwidth for the traffic, determines whether to stop the current traffic and force start.
+ True: Force start
+ False: Do not force start
+
+ duration : int
+ Limit the run time (seconds)
+ -1 = unlimited
+
+ total : bool
+ Determines whether to divide the configured bandwidth among the ports, or to duplicate the bandwidth for each port.
+ True: Divide bandwidth among the ports
+ False: Duplicate
+
+ core_mask: CORE_MASK_SPLIT, CORE_MASK_PIN or a list of masks (one per port)
+ Determine the allocation of cores per port
+ In CORE_MASK_SPLIT all the traffic will be divided equally between all the cores
+ associated with each port
+ In CORE_MASK_PIN, for each dual ports (a group that shares the same cores)
+ the cores will be divided half pinned for each port
+
+ :raises:
+ + :exc:`STLError`
+
+ """
+
+ ports = ports if ports is not None else self.get_acquired_ports()
+ ports = self._validate_port_list(ports)
+
+ validate_type('mult', mult, basestring)
+ validate_type('force', force, bool)
+ validate_type('duration', duration, (int, float))
+ validate_type('total', total, bool)
+ validate_type('core_mask', core_mask, (int, list))
+
+ # verify link status
+ ports_link_down = [port_id for port_id in ports if self.ports[port_id].attr.get('link',{}).get('up') == False]
+ if not force and ports_link_down:
+ raise STLError("Port(s) %s - link DOWN - check the connection or specify 'force'" % ports_link_down)
+
+ #########################
+ # decode core mask argument
+ decoded_mask = self.__decode_core_mask(ports, core_mask)
+ #######################
+
+ # verify multiplier
+ mult_obj = parsing_opts.decode_multiplier(mult,
+ allow_update = False,
+ divide_count = len(ports) if total else 1)
+ if not mult_obj:
+ raise STLArgumentError('mult', mult)
+
+
+ # verify ports are stopped or force stop them
+ active_ports = list(set(self.get_active_ports()).intersection(ports))
+ if active_ports:
+ if not force:
+ raise STLError("Port(s) {0} are active - please stop them or specify 'force'".format(active_ports))
+ else:
+ rc = self.stop(active_ports)
+ if not rc:
+ raise STLError(rc)
+
+
+ # start traffic
+ self.logger.pre_cmd("Starting traffic on port(s) {0}:".format(ports))
+ rc = self.__start(mult_obj, duration, ports, force, decoded_mask)
+ self.logger.post_cmd(rc)
+
+ if not rc:
+ raise STLError(rc)
+
+
+ @__api_check(True)
+ def stop (self, ports = None, rx_delay_ms = 10):
+ """
+ Stop port(s)
+
+ :parameters:
+ ports : list
+ Ports on which to execute the command
+
+ rx_delay_ms : int
+ time to wait until RX filters are removed
+ this value should reflect the time it takes
+ packets which were transmitted to arrive
+ to the destination.
+ after this time the RX filters will be removed
+
+ :raises:
+ + :exc:`STLError`
+
+ """
+
+ if ports is None:
+ ports = self.get_active_ports()
+ if not ports:
+ return
+
+ ports = self._validate_port_list(ports)
+
+ self.logger.pre_cmd("Stopping traffic on port(s) {0}:".format(ports))
+ rc = self.__stop(ports)
+ self.logger.post_cmd(rc)
+
+ if not rc:
+ raise STLError(rc)
+
+ # remove any RX filters
+ rc = self._remove_rx_filters(ports, rx_delay_ms = rx_delay_ms)
+ if not rc:
+ raise STLError(rc)
+
+
+ @__api_check(True)
+ def update (self, ports = None, mult = "1", total = False, force = False):
+ """
+ Update traffic on port(s)
+
+ :parameters:
+ ports : list
+ Ports on which to execute the command
+
+ mult : str
+ Multiplier in a form of pps, bps, or line util in %
+ Can also specify +/-
+ Examples: "5kpps+", "10gbps-", "85%", "32mbps", "20%+"
+
+ force : bool
+ If the ports are not in stopped mode or do not have sufficient bandwidth for the traffic, determines whether to stop the current traffic and force start.
+ True: Force start
+ False: Do not force start
+
+ total : bool
+ Determines whether to divide the configured bandwidth among the ports, or to duplicate the bandwidth for each port.
+ True: Divide bandwidth among the ports
+ False: Duplicate
+
+
+ :raises:
+ + :exc:`STLError`
+
+ """
+
+
+ ports = ports if ports is not None else self.get_active_ports()
+ ports = self._validate_port_list(ports)
+
+ validate_type('mult', mult, basestring)
+ validate_type('force', force, bool)
+ validate_type('total', total, bool)
+
+ # verify multiplier
+ mult_obj = parsing_opts.decode_multiplier(mult,
+ allow_update = True,
+ divide_count = len(ports) if total else 1)
+ if not mult_obj:
+ raise STLArgumentError('mult', mult)
+
+
+ # call low level functions
+ self.logger.pre_cmd("Updating traffic on port(s) {0}:".format(ports))
+ rc = self.__update(mult_obj, ports, force)
+ self.logger.post_cmd(rc)
+
+ if not rc:
+ raise STLError(rc)
+
+
+
+ @__api_check(True)
+ def pause (self, ports = None):
+ """
+ Pause traffic on port(s). Works only for ports that are active, and only if all streams are in Continuous mode.
+
+ :parameters:
+ ports : list
+ Ports on which to execute the command
+
+ :raises:
+ + :exc:`STLError`
+
+ """
+
+
+ ports = ports if ports is not None else self.get_transmitting_ports()
+ ports = self._validate_port_list(ports)
+
+ self.logger.pre_cmd("Pausing traffic on port(s) {0}:".format(ports))
+ rc = self.__pause(ports)
+ self.logger.post_cmd(rc)
+
+ if not rc:
+ raise STLError(rc)
+
+ @__api_check(True)
+ def resume (self, ports = None):
+ """
+ Resume traffic on port(s)
+
+ :parameters:
+ ports : list
+ Ports on which to execute the command
+
+ :raises:
+ + :exc:`STLError`
+
+ """
+
+
+ ports = ports if ports is not None else self.get_paused_ports()
+ ports = self._validate_port_list(ports)
+
+
+ self.logger.pre_cmd("Resume traffic on port(s) {0}:".format(ports))
+ rc = self.__resume(ports)
+ self.logger.post_cmd(rc)
+
+ if not rc:
+ raise STLError(rc)
+
+
+ @__api_check(True)
+ def push_remote (self,
+ pcap_filename,
+ ports = None,
+ ipg_usec = None,
+ speedup = 1.0,
+ count = 1,
+ duration = -1,
+ is_dual = False):
+ """
+ Push a remote server-reachable PCAP file
+ the path must be fullpath accessible to the server
+
+ :parameters:
+ pcap_filename : str
+ PCAP file name in full path and accessible to the server
+
+ ports : list
+ Ports on which to execute the command
+
+ ipg_usec : float
+ Inter-packet gap in microseconds
+
+ speedup : float
+ A factor to adjust IPG. effectively IPG = IPG / speedup
+
+ count: int
+ How many times to transmit the cap
+
+ duration: float
+ Limit runtime by duration in seconds
+
+ is_dual: bool
+ Inject from both directions.
+ requires ERF file with meta data for direction.
+ also requires that all the ports will be in master mode
+ with their adjacent ports as slaves
+
+ :raises:
+ + :exc:`STLError`
+
+ """
+ ports = ports if ports is not None else self.get_acquired_ports()
+ ports = self._validate_port_list(ports)
+
+ validate_type('pcap_filename', pcap_filename, basestring)
+ validate_type('ipg_usec', ipg_usec, (float, int, type(None)))
+ validate_type('speedup', speedup, (float, int))
+ validate_type('count', count, int)
+ validate_type('duration', duration, (float, int))
+ validate_type('is_dual', is_dual, bool)
+
+ # for dual mode check that all are masters
+ if is_dual:
+ if not pcap_filename.endswith('erf'):
+ raise STLError("dual mode: only ERF format is supported for dual mode")
+
+ for port in ports:
+ master = port
+ slave = port ^ 0x1
+
+ if slave in ports:
+ raise STLError("dual mode: cannot provide adjacent ports ({0}, {1}) in a batch".format(master, slave))
+
+ if not slave in self.get_acquired_ports():
+ raise STLError("dual mode: adjacent port {0} must be owned during dual mode".format(slave))
+
+
+ self.logger.pre_cmd("Pushing remote PCAP on port(s) {0}:".format(ports))
+ rc = self.__push_remote(pcap_filename, ports, ipg_usec, speedup, count, duration, is_dual)
+ self.logger.post_cmd(rc)
+
+ if not rc:
+ raise STLError(rc)
+
+
+ @__api_check(True)
+ def push_pcap (self,
+ pcap_filename,
+ ports = None,
+ ipg_usec = None,
+ speedup = 1.0,
+ count = 1,
+ duration = -1,
+ force = False,
+ vm = None,
+ packet_hook = None,
+ is_dual = False):
+ """
+ Push a local PCAP to the server
+ This is equivalent to loading a PCAP file to a profile
+ and attaching the profile to port(s)
+
+ file size is limited to 1MB
+
+ :parameters:
+ pcap_filename : str
+ PCAP filename (accessible locally)
+
+ ports : list
+ Ports on which to execute the command
+
+ ipg_usec : float
+ Inter-packet gap in microseconds
+
+ speedup : float
+ A factor to adjust IPG. effectively IPG = IPG / speedup
+
+ count: int
+ How many times to transmit the cap
+
+ duration: float
+ Limit runtime by duration in seconds
+
+ force: bool
+ Ignore file size limit - push any file size to the server
+
+ vm: list of VM instructions
+ VM instructions to apply for every packet
+
+ packet_hook : Callable or function
+ Will be applied to every packet
+
+ is_dual: bool
+ Inject from both directions.
+ requires ERF file with meta data for direction.
+ also requires that all the ports will be in master mode
+ with their adjacent ports as slaves
+
+ :raises:
+ + :exc:`STLError`
+
+ """
+ ports = ports if ports is not None else self.get_acquired_ports()
+ ports = self._validate_port_list(ports)
+
+ validate_type('pcap_filename', pcap_filename, basestring)
+ validate_type('ipg_usec', ipg_usec, (float, int, type(None)))
+ validate_type('speedup', speedup, (float, int))
+ validate_type('count', count, int)
+ validate_type('duration', duration, (float, int))
+ validate_type('vm', vm, (list, type(None)))
+ validate_type('is_dual', is_dual, bool)
+
+
+ # no support for > 1MB PCAP - use push remote
+ if not force and os.path.getsize(pcap_filename) > (1024 * 1024):
+ raise STLError("PCAP size of {:} is too big for local push - consider using remote push or provide 'force'".format(format_num(os.path.getsize(pcap_filename), suffix = 'B')))
+
+ if is_dual:
+ for port in ports:
+ master = port
+ slave = port ^ 0x1
+
+ if slave in ports:
+ raise STLError("dual mode: cannot provide adjacent ports ({0}, {1}) in a batch".format(master, slave))
+
+ if not slave in self.get_acquired_ports():
+ raise STLError("dual mode: adjacent port {0} must be owned during dual mode".format(slave))
+
+ # regular push
+ if not is_dual:
+
+ # create the profile from the PCAP
+ try:
+ self.logger.pre_cmd("Converting '{0}' to streams:".format(pcap_filename))
+ profile = STLProfile.load_pcap(pcap_filename,
+ ipg_usec,
+ speedup,
+ count,
+ vm = vm,
+ packet_hook = packet_hook)
+ self.logger.post_cmd(RC_OK)
+ except STLError as e:
+ self.logger.post_cmd(RC_ERR(e))
+ raise
+
+
+ self.remove_all_streams(ports = ports)
+ id_list = self.add_streams(profile.get_streams(), ports)
+
+ return self.start(ports = ports, duration = duration)
+
+ else:
+
+ # create a dual profile
+ split_mode = 'MAC'
+
+ try:
+ self.logger.pre_cmd("Analyzing '{0}' for dual ports based on {1}:".format(pcap_filename, split_mode))
+ profile_a, profile_b = STLProfile.load_pcap(pcap_filename,
+ ipg_usec,
+ speedup,
+ count,
+ vm = vm,
+ packet_hook = packet_hook,
+ split_mode = split_mode)
+
+ self.logger.post_cmd(RC_OK())
+
+ except STLError as e:
+ self.logger.post_cmd(RC_ERR(e))
+ raise
+
+ all_ports = ports + [p ^ 0x1 for p in ports]
+
+ self.remove_all_streams(ports = all_ports)
+
+ for port in ports:
+ master = port
+ slave = port ^ 0x1
+
+ self.add_streams(profile_a.get_streams(), master)
+ self.add_streams(profile_b.get_streams(), slave)
+
+ return self.start(ports = all_ports, duration = duration)
+
+
+
+
+
+ @__api_check(True)
+ def validate (self, ports = None, mult = "1", duration = -1, total = False):
+ """
+ Validate port(s) configuration
+
+ :parameters:
+ ports : list
+ Ports on which to execute the command
+
+ mult : str
+ Multiplier in a form of pps, bps, or line util in %
+ Examples: "5kpps", "10gbps", "85%", "32mbps"
+
+ duration : int
+ Limit the run time (seconds)
+ -1 = unlimited
+
+ total : bool
+ Determines whether to divide the configured bandwidth among the ports, or to duplicate the bandwidth for each port.
+ True: Divide bandwidth among the ports
+ False: Duplicate
+
+ :raises:
+ + :exc:`STLError`
+
+ """
+
+
+ ports = ports if ports is not None else self.get_acquired_ports()
+ ports = self._validate_port_list(ports)
+
+ validate_type('mult', mult, basestring)
+ validate_type('duration', duration, (int, float))
+ validate_type('total', total, bool)
+
+
+ # verify multiplier
+ mult_obj = parsing_opts.decode_multiplier(mult,
+ allow_update = True,
+ divide_count = len(ports) if total else 1)
+ if not mult_obj:
+ raise STLArgumentError('mult', mult)
+
+ self.logger.pre_cmd("Validating streams on port(s) {0}:".format(ports))
+ rc = self.__validate(ports)
+ self.logger.post_cmd(rc)
+
+ if not rc:
+ raise STLError(rc)
+
+ for port in ports:
+ self.ports[port].print_profile(mult_obj, duration)
+
+
+ @__api_check(False)
+ def clear_stats (self, ports = None, clear_global = True, clear_flow_stats = True, clear_latency_stats = True, clear_xstats = True):
+ """
+ Clear stats on port(s)
+
+ :parameters:
+ ports : list
+ Ports on which to execute the command
+
+ clear_global : bool
+ Clear the global stats
+
+ clear_flow_stats : bool
+ Clear the flow stats
+
+ clear_latency_stats : bool
+ Clear the latency stats
+
+ clear_xstats : bool
+ Clear the extended stats
+
+ :raises:
+ + :exc:`STLError`
+
+ """
+
+ ports = ports if ports is not None else self.get_all_ports()
+ ports = self._validate_port_list(ports)
+
+ # verify clear global
+ if not type(clear_global) is bool:
+ raise STLArgumentError('clear_global', clear_global)
+
+ rc = self.__clear_stats(ports, clear_global, clear_flow_stats, clear_latency_stats, clear_xstats)
+ if not rc:
+ raise STLError(rc)
+
+
+
+ @__api_check(True)
+ def is_traffic_active (self, ports = None):
+ """
+ Return if specified port(s) have traffic
+
+ :parameters:
+ ports : list
+ Ports on which to execute the command
+
+
+ :raises:
+ + :exc:`STLTimeoutError` - in case timeout has expired
+ + :exe:'STLError'
+
+ """
+
+ ports = ports if ports is not None else self.get_acquired_ports()
+ ports = self._validate_port_list(ports)
+
+ return set(self.get_active_ports()).intersection(ports)
+
+
+
+ @__api_check(True)
+ def wait_on_traffic (self, ports = None, timeout = None, rx_delay_ms = 10):
+ """
+ .. _wait_on_traffic:
+
+ Block until traffic on specified port(s) has ended
+
+ :parameters:
+ ports : list
+ Ports on which to execute the command
+
+ timeout : int
+ timeout in seconds
+ default will be blocking
+
+ rx_delay_ms : int
+ Time to wait (in milliseconds) after last packet was sent, until RX filters used for
+ measuring flow statistics and latency are removed.
+ This value should reflect the time it takes packets which were transmitted to arrive
+ to the destination.
+ After this time, RX filters will be removed, and packets arriving for per flow statistics feature and latency flows will be counted as errors.
+
+ :raises:
+ + :exc:`STLTimeoutError` - in case timeout has expired
+ + :exe:'STLError'
+
+ """
+
+ ports = ports if ports is not None else self.get_acquired_ports()
+ ports = self._validate_port_list(ports)
+
+
+ timer = PassiveTimer(timeout)
+
+ # wait while any of the required ports are active
+ while set(self.get_active_ports()).intersection(ports):
+
+ # make sure ASYNC thread is still alive - otherwise we will be stuck forever
+ if not self.async_client.is_thread_alive():
+ raise STLError("subscriber thread is dead")
+
+ time.sleep(0.01)
+ if timer.has_expired():
+ raise STLTimeoutError(timeout)
+
+ # remove any RX filters
+ rc = self._remove_rx_filters(ports, rx_delay_ms = rx_delay_ms)
+ if not rc:
+ raise STLError(rc)
+
+
+ @__api_check(True)
+ def set_port_attr (self, ports = None, promiscuous = None, link_up = None, led_on = None, flow_ctrl = None):
+ """
+ Set port attributes
+
+ :parameters:
+ promiscuous - True or False
+ link_up - True or False
+ led_on - True or False
+ flow_ctrl - 0: disable all, 1: enable tx side, 2: enable rx side, 3: full enable
+
+ :raises:
+ + :exe:'STLError'
+
+ """
+
+ ports = ports if ports is not None else self.get_acquired_ports()
+ ports = self._validate_port_list(ports)
+
+ # check arguments
+ validate_type('promiscuous', promiscuous, (bool, type(None)))
+ validate_type('link_up', link_up, (bool, type(None)))
+ validate_type('led_on', led_on, (bool, type(None)))
+ validate_type('flow_ctrl', flow_ctrl, (int, type(None)))
+
+ # build attributes
+ attr_dict = {}
+ if promiscuous is not None:
+ attr_dict['promiscuous'] = {'enabled': promiscuous}
+ if link_up is not None:
+ attr_dict['link_status'] = {'up': link_up}
+ if led_on is not None:
+ attr_dict['led_status'] = {'on': led_on}
+ if flow_ctrl is not None:
+ attr_dict['flow_ctrl_mode'] = {'mode': flow_ctrl}
+
+ # no attributes to set
+ if not attr_dict:
+ return
+
+ self.logger.pre_cmd("Applying attributes on port(s) {0}:".format(ports))
+ rc = self.__set_port_attr(ports, attr_dict)
+ self.logger.post_cmd(rc)
+
+ if not rc:
+ raise STLError(rc)
+
+ def clear_events (self):
+ """
+ Clear all events
+
+ :parameters:
+ None
+
+ :raises:
+ None
+
+ """
+ self.event_handler.clear_events()
+
+
+ ############################ Line #############################
+ ############################ Commands #############################
+ ############################ #############################
+
+ # console decorator
+ def __console(f):
+ @wraps(f)
+ def wrap(*args):
+ client = args[0]
+
+ time1 = time.time()
+
+ try:
+ rc = f(*args)
+ except STLError as e:
+ client.logger.log("Log:\n" + format_text(e.brief() + "\n", 'bold'))
+ return RC_ERR(e.brief())
+
+ # if got true - print time
+ if rc:
+ delta = time.time() - time1
+ client.logger.log(format_time(delta) + "\n")
+
+ return rc
+
+ return wrap
+
+ @__console
+ def ping_line (self, line):
+ '''pings the server'''
+ self.ping()
+ return RC_OK()
+
+ @__console
+ def shutdown_line (self, line):
+ '''shutdown the server'''
+ parser = parsing_opts.gen_parser(self,
+ "shutdown",
+ self.shutdown_line.__doc__,
+ parsing_opts.FORCE)
+
+ opts = parser.parse_args(line.split())
+ if not opts:
+ return opts
+
+ self.server_shutdown(force = opts.force)
+ return RC_OK()
+
+ @__console
+ def connect_line (self, line):
+ '''Connects to the TRex server and acquire ports'''
+ parser = parsing_opts.gen_parser(self,
+ "connect",
+ self.connect_line.__doc__,
+ parsing_opts.PORT_LIST_WITH_ALL,
+ parsing_opts.FORCE)
+
+ opts = parser.parse_args(line.split(), default_ports = self.get_all_ports())
+ if not opts:
+ return opts
+
+ self.connect()
+ self.acquire(ports = opts.ports, force = opts.force)
+
+ return RC_OK()
+
+
+ @__console
+ def acquire_line (self, line):
+ '''Acquire ports\n'''
+
+ # define a parser
+ parser = parsing_opts.gen_parser(self,
+ "acquire",
+ self.acquire_line.__doc__,
+ parsing_opts.PORT_LIST_WITH_ALL,
+ parsing_opts.FORCE)
+
+ opts = parser.parse_args(line.split(), default_ports = self.get_all_ports())
+ if not opts:
+ return opts
+
+ # filter out all the already owned ports
+ ports = list_difference(opts.ports, self.get_acquired_ports())
+ if not ports:
+ msg = "acquire - all of port(s) {0} are already acquired".format(opts.ports)
+ self.logger.log(format_text(msg, 'bold'))
+ return RC_ERR(msg)
+
+ self.acquire(ports = ports, force = opts.force)
+
+ return RC_OK()
+
+
+ #
+ @__console
+ def release_line (self, line):
+ '''Release ports\n'''
+
+ parser = parsing_opts.gen_parser(self,
+ "release",
+ self.release_line.__doc__,
+ parsing_opts.PORT_LIST_WITH_ALL)
+
+ opts = parser.parse_args(line.split(), default_ports = self.get_acquired_ports())
+ if not opts:
+ return opts
+
+ ports = list_intersect(opts.ports, self.get_acquired_ports())
+ if not ports:
+ if not opts.ports:
+ msg = "release - no acquired ports"
+ self.logger.log(format_text(msg, 'bold'))
+ return RC_ERR(msg)
+ else:
+ msg = "release - none of port(s) {0} are acquired".format(opts.ports)
+ self.logger.log(format_text(msg, 'bold'))
+ return RC_ERR(msg)
+
+
+ self.release(ports = ports)
+
+ return RC_OK()
+
+
+ @__console
+ def reacquire_line (self, line):
+ '''reacquire all the ports under your username which are not acquired by your session'''
+
+ parser = parsing_opts.gen_parser(self,
+ "reacquire",
+ self.reacquire_line.__doc__)
+
+ opts = parser.parse_args(line.split())
+ if not opts:
+ return opts
+
+ # find all the on-owned ports under your name
+ my_unowned_ports = list_difference([k for k, v in self.ports.items() if v.get_owner() == self.username], self.get_acquired_ports())
+ if not my_unowned_ports:
+ msg = "reacquire - no unowned ports under '{0}'".format(self.username)
+ self.logger.log(msg)
+ return RC_ERR(msg)
+
+ self.acquire(ports = my_unowned_ports, force = True)
+ return RC_OK()
+
+
+ @__console
+ def disconnect_line (self, line):
+ self.disconnect()
+
+
+ @__console
+ def reset_line (self, line):
+ '''Reset ports - if no ports are provided all acquired ports will be reset'''
+
+ parser = parsing_opts.gen_parser(self,
+ "reset",
+ self.reset_line.__doc__,
+ parsing_opts.PORT_LIST_WITH_ALL)
+
+ opts = parser.parse_args(line.split(), default_ports = self.get_acquired_ports(), verify_acquired = True)
+ if not opts:
+ return opts
+
+ self.reset(ports = opts.ports)
+
+ return RC_OK()
+
+
+
+ @__console
+ def start_line (self, line):
+ '''Start selected traffic on specified ports on TRex\n'''
+ # define a parser
+ parser = parsing_opts.gen_parser(self,
+ "start",
+ self.start_line.__doc__,
+ parsing_opts.PORT_LIST_WITH_ALL,
+ parsing_opts.TOTAL,
+ parsing_opts.FORCE,
+ parsing_opts.FILE_PATH,
+ parsing_opts.DURATION,
+ parsing_opts.TUNABLES,
+ parsing_opts.MULTIPLIER_STRICT,
+ parsing_opts.DRY_RUN,
+ parsing_opts.CORE_MASK_GROUP)
+
+ opts = parser.parse_args(line.split(), default_ports = self.get_acquired_ports(), verify_acquired = True)
+ if not opts:
+ return opts
+
+ # core mask
+ if opts.core_mask is not None:
+ core_mask = opts.core_mask
+ else:
+ core_mask = self.CORE_MASK_PIN if opts.pin_cores else self.CORE_MASK_SPLIT
+
+ # just for sanity - will be checked on the API as well
+ self.__decode_core_mask(opts.ports, core_mask)
+
+ active_ports = list_intersect(self.get_active_ports(), opts.ports)
+ if active_ports:
+ if not opts.force:
+ msg = "Port(s) {0} are active - please stop them or add '--force'\n".format(active_ports)
+ self.logger.log(format_text(msg, 'bold'))
+ return RC_ERR(msg)
+ else:
+ self.stop(active_ports)
+
+
+ # process tunables
+ if type(opts.tunables) is dict:
+ tunables = opts.tunables
+ else:
+ tunables = {}
+
+
+ # remove all streams
+ self.remove_all_streams(opts.ports)
+
+ # pack the profile
+ try:
+ for port in opts.ports:
+
+ profile = STLProfile.load(opts.file[0],
+ direction = tunables.get('direction', port % 2),
+ port_id = port,
+ **tunables)
+
+ self.add_streams(profile.get_streams(), ports = port)
+
+ except STLError as e:
+ error = 'Unknown error.'
+ for line in e.brief().split('\n'):
+ if line:
+ error = line
+ msg = format_text("\nError loading profile '{0}'".format(opts.file[0]), 'bold')
+ self.logger.log(msg + '\n')
+ self.logger.log(e.brief() + "\n")
+ return RC_ERR("%s: %s" % (msg, error))
+
+
+ if opts.dry:
+ self.validate(opts.ports, opts.mult, opts.duration, opts.total)
+ else:
+
+ self.start(opts.ports,
+ opts.mult,
+ opts.force,
+ opts.duration,
+ opts.total,
+ core_mask)
+
+ return RC_OK()
+
+
+
+ @__console
+ def stop_line (self, line):
+ '''Stop active traffic on specified ports on TRex\n'''
+ parser = parsing_opts.gen_parser(self,
+ "stop",
+ self.stop_line.__doc__,
+ parsing_opts.PORT_LIST_WITH_ALL)
+
+ opts = parser.parse_args(line.split(), default_ports = self.get_active_ports(), verify_acquired = True)
+ if not opts:
+ return opts
+
+
+ # find the relevant ports
+ ports = list_intersect(opts.ports, self.get_active_ports())
+ if not ports:
+ if not opts.ports:
+ msg = 'stop - no active ports'
+ else:
+ msg = 'stop - no active traffic on ports {0}'.format(opts.ports)
+
+ self.logger.log(msg)
+ return RC_ERR(msg)
+
+ # call API
+ self.stop(ports)
+
+ return RC_OK()
+
+
+ @__console
+ def update_line (self, line):
+ '''Update port(s) speed currently active\n'''
+ parser = parsing_opts.gen_parser(self,
+ "update",
+ self.update_line.__doc__,
+ parsing_opts.PORT_LIST_WITH_ALL,
+ parsing_opts.MULTIPLIER,
+ parsing_opts.TOTAL,
+ parsing_opts.FORCE)
+
+ opts = parser.parse_args(line.split(), default_ports = self.get_active_ports(), verify_acquired = True)
+ if not opts:
+ return opts
+
+
+ # find the relevant ports
+ ports = list_intersect(opts.ports, self.get_active_ports())
+ if not ports:
+ if not opts.ports:
+ msg = 'update - no active ports'
+ else:
+ msg = 'update - no active traffic on ports {0}'.format(opts.ports)
+
+ self.logger.log(msg)
+ return RC_ERR(msg)
+
+ self.update(ports, opts.mult, opts.total, opts.force)
+
+ return RC_OK()
+
+
+ @__console
+ def pause_line (self, line):
+ '''Pause active traffic on specified ports on TRex\n'''
+ parser = parsing_opts.gen_parser(self,
+ "pause",
+ self.pause_line.__doc__,
+ parsing_opts.PORT_LIST_WITH_ALL)
+
+ opts = parser.parse_args(line.split(), default_ports = self.get_transmitting_ports(), verify_acquired = True)
+ if not opts:
+ return opts
+
+ # check for already paused case
+ if opts.ports and is_sub_list(opts.ports, self.get_paused_ports()):
+ msg = 'pause - all of port(s) {0} are already paused'.format(opts.ports)
+ self.logger.log(msg)
+ return RC_ERR(msg)
+
+ # find the relevant ports
+ ports = list_intersect(opts.ports, self.get_transmitting_ports())
+ if not ports:
+ if not opts.ports:
+ msg = 'pause - no transmitting ports'
+ else:
+ msg = 'pause - none of ports {0} are transmitting'.format(opts.ports)
+
+ self.logger.log(msg)
+ return RC_ERR(msg)
+
+ self.pause(ports)
+
+ return RC_OK()
+
+
+ @__console
+ def resume_line (self, line):
+ '''Resume active traffic on specified ports on TRex\n'''
+ parser = parsing_opts.gen_parser(self,
+ "resume",
+ self.resume_line.__doc__,
+ parsing_opts.PORT_LIST_WITH_ALL)
+
+ opts = parser.parse_args(line.split(), default_ports = self.get_paused_ports(), verify_acquired = True)
+ if not opts:
+ return opts
+
+ # find the relevant ports
+ ports = list_intersect(opts.ports, self.get_paused_ports())
+ if not ports:
+ if not opts.ports:
+ msg = 'resume - no paused ports'
+ else:
+ msg = 'resume - none of ports {0} are paused'.format(opts.ports)
+
+ self.logger.log(msg)
+ return RC_ERR(msg)
+
+
+ self.resume(ports)
+
+ # true means print time
+ return RC_OK()
+
+
+ @__console
+ def clear_stats_line (self, line):
+ '''Clear cached local statistics\n'''
+ # define a parser
+ parser = parsing_opts.gen_parser(self,
+ "clear",
+ self.clear_stats_line.__doc__,
+ parsing_opts.PORT_LIST_WITH_ALL)
+
+ opts = parser.parse_args(line.split())
+
+ if not opts:
+ return opts
+
+ self.clear_stats(opts.ports)
+
+ return RC_OK()
+
+
+ @__console
+ def show_stats_line (self, line):
+ '''Get statistics from TRex server by port\n'''
+ # define a parser
+ parser = parsing_opts.gen_parser(self,
+ "stats",
+ self.show_stats_line.__doc__,
+ parsing_opts.PORT_LIST_WITH_ALL,
+ parsing_opts.STATS_MASK)
+
+ opts = parser.parse_args(line.split())
+
+ if not opts:
+ return opts
+
+ # determine stats mask
+ mask = self.__get_mask_keys(**self.__filter_namespace_args(opts, trex_stl_stats.ALL_STATS_OPTS))
+ if not mask:
+ # set to show all stats if no filter was given
+ mask = trex_stl_stats.COMPACT
+
+ stats_opts = common.list_intersect(trex_stl_stats.ALL_STATS_OPTS, mask)
+
+ stats = self._get_formatted_stats(opts.ports, mask)
+
+
+ # print stats to screen
+ for stat_type, stat_data in stats.items():
+ text_tables.print_table_with_header(stat_data.text_table, stat_type)
+
+
+ @__console
+ def show_streams_line(self, line):
+ '''Get stream statistics from TRex server by port\n'''
+ # define a parser
+ parser = parsing_opts.gen_parser(self,
+ "streams",
+ self.show_streams_line.__doc__,
+ parsing_opts.PORT_LIST_WITH_ALL,
+ parsing_opts.STREAMS_MASK)
+
+ opts = parser.parse_args(line.split())
+
+ if not opts:
+ return opts
+
+ streams = self._get_streams(opts.ports, set(opts.streams))
+ if not streams:
+ self.logger.log(format_text("No streams found with desired filter.\n", "bold", "magenta"))
+
+ else:
+ # print stats to screen
+ for stream_hdr, port_streams_data in streams.items():
+ text_tables.print_table_with_header(port_streams_data.text_table,
+ header= stream_hdr.split(":")[0] + ":",
+ untouched_header= stream_hdr.split(":")[1])
+
+
+
+
+ @__console
+ def validate_line (self, line):
+ '''Validates port(s) stream configuration\n'''
+
+ parser = parsing_opts.gen_parser(self,
+ "validate",
+ self.validate_line.__doc__,
+ parsing_opts.PORT_LIST_WITH_ALL)
+
+ opts = parser.parse_args(line.split())
+ if not opts:
+ return opts
+
+ self.validate(opts.ports)
+
+
+
+
+ @__console
+ def push_line (self, line):
+ '''Push a pcap file '''
+
+ parser = parsing_opts.gen_parser(self,
+ "push",
+ self.push_line.__doc__,
+ parsing_opts.FILE_PATH,
+ parsing_opts.REMOTE_FILE,
+ parsing_opts.PORT_LIST_WITH_ALL,
+ parsing_opts.COUNT,
+ parsing_opts.DURATION,
+ parsing_opts.IPG,
+ parsing_opts.SPEEDUP,
+ parsing_opts.FORCE,
+ parsing_opts.DUAL)
+
+ opts = parser.parse_args(line.split(), verify_acquired = True)
+ if not opts:
+ return opts
+
+ active_ports = list(set(self.get_active_ports()).intersection(opts.ports))
+
+ if active_ports:
+ if not opts.force:
+ msg = "Port(s) {0} are active - please stop them or add '--force'\n".format(active_ports)
+ self.logger.log(format_text(msg, 'bold'))
+ return RC_ERR(msg)
+ else:
+ self.stop(active_ports)
+
+
+ if opts.remote:
+ self.push_remote(opts.file[0],
+ ports = opts.ports,
+ ipg_usec = opts.ipg_usec,
+ speedup = opts.speedup,
+ count = opts.count,
+ duration = opts.duration,
+ is_dual = opts.dual)
+
+ else:
+ self.push_pcap(opts.file[0],
+ ports = opts.ports,
+ ipg_usec = opts.ipg_usec,
+ speedup = opts.speedup,
+ count = opts.count,
+ duration = opts.duration,
+ force = opts.force,
+ is_dual = opts.dual)
+
+
+
+ return RC_OK()
+
+
+
+ @__console
+ def set_port_attr_line (self, line):
+ '''Sets port attributes '''
+
+ parser = parsing_opts.gen_parser(self,
+ "port_attr",
+ self.set_port_attr_line.__doc__,
+ parsing_opts.PORT_LIST_WITH_ALL,
+ parsing_opts.PROMISCUOUS,
+ parsing_opts.LINK_STATUS,
+ parsing_opts.LED_STATUS,
+ parsing_opts.FLOW_CTRL,
+ parsing_opts.SUPPORTED,
+ )
+
+ opts = parser.parse_args(line.split(), default_ports = self.get_acquired_ports(), verify_acquired = True)
+ if not opts:
+ return opts
+
+ opts.prom = parsing_opts.ON_OFF_DICT.get(opts.prom)
+ opts.link = parsing_opts.UP_DOWN_DICT.get(opts.link)
+ opts.led = parsing_opts.ON_OFF_DICT.get(opts.led)
+ opts.flow_ctrl = parsing_opts.FLOW_CTRL_DICT.get(opts.flow_ctrl)
+
+ # if no attributes - fall back to printing the status
+ if not filter(lambda x:x is not None, [opts.prom, opts.link, opts.led, opts.flow_ctrl, opts.supp]):
+ self.show_stats_line("--ps --port {0}".format(' '.join(str(port) for port in opts.ports)))
+ return
+
+ if opts.supp:
+ info = self.ports[0].get_info() # assume for now all ports are same
+ print('')
+ print('Supported attributes for current NICs:')
+ print(' Promiscuous: yes')
+ print(' Link status: %s' % info['link_change_supported'])
+ print(' LED status: %s' % info['led_change_supported'])
+ print(' Flow control: %s' % info['fc_supported'])
+ print('')
+ else:
+ return self.set_port_attr(opts.ports, opts.prom, opts.link, opts.led, opts.flow_ctrl)
+
+
+ @__console
+ def show_profile_line (self, line):
+ '''Shows profile information'''
+
+ parser = parsing_opts.gen_parser(self,
+ "port",
+ self.show_profile_line.__doc__,
+ parsing_opts.FILE_PATH)
+
+ opts = parser.parse_args(line.split())
+ if not opts:
+ return opts
+
+ info = STLProfile.get_info(opts.file[0])
+
+ self.logger.log(format_text('\nProfile Information:\n', 'bold'))
+
+ # general info
+ self.logger.log(format_text('\nGeneral Information:', 'underline'))
+ self.logger.log('Filename: {:^12}'.format(opts.file[0]))
+ self.logger.log('Stream count: {:^12}'.format(info['stream_count']))
+
+ # specific info
+ profile_type = info['type']
+ self.logger.log(format_text('\nSpecific Information:', 'underline'))
+
+ if profile_type == 'python':
+ self.logger.log('Type: {:^12}'.format('Python Module'))
+ self.logger.log('Tunables: {:^12}'.format(str(['{0} = {1}'.format(k ,v) for k, v in info['tunables'].items()])))
+
+ elif profile_type == 'yaml':
+ self.logger.log('Type: {:^12}'.format('YAML'))
+
+ elif profile_type == 'pcap':
+ self.logger.log('Type: {:^12}'.format('PCAP file'))
+
+ self.logger.log("")
+
+
+ @__console
+ def get_events_line (self, line):
+ '''shows events recieved from server\n'''
+
+ x = [parsing_opts.ArgumentPack(['-c','--clear'],
+ {'action' : "store_true",
+ 'default': False,
+ 'help': "clear the events log"}),
+
+ parsing_opts.ArgumentPack(['-i','--info'],
+ {'action' : "store_true",
+ 'default': False,
+ 'help': "show info events"}),
+
+ parsing_opts.ArgumentPack(['-w','--warn'],
+ {'action' : "store_true",
+ 'default': False,
+ 'help': "show warning events"}),
+
+ ]
+
+
+ parser = parsing_opts.gen_parser(self,
+ "events",
+ self.get_events_line.__doc__,
+ *x)
+
+ opts = parser.parse_args(line.split())
+ if not opts:
+ return opts
+
+
+ ev_type_filter = []
+
+ if opts.info:
+ ev_type_filter.append('info')
+
+ if opts.warn:
+ ev_type_filter.append('warning')
+
+ if not ev_type_filter:
+ ev_type_filter = None
+
+ events = self.get_events(ev_type_filter)
+ for ev in events:
+ self.logger.log(ev)
+
+ if opts.clear:
+ self.clear_events()
+
+ def generate_prompt (self, prefix = 'trex'):
+ if not self.is_connected():
+ return "{0}(offline)>".format(prefix)
+
+ elif not self.get_acquired_ports():
+ return "{0}(read-only)>".format(prefix)
+
+ elif self.is_all_ports_acquired():
+ return "{0}>".format(prefix)
+
+ else:
+ return "{0} {1}>".format(prefix, self.get_acquired_ports())
diff --git a/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_exceptions.py b/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_exceptions.py
new file mode 100644
index 00000000..2ca92cb8
--- /dev/null
+++ b/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_exceptions.py
@@ -0,0 +1,71 @@
+import os
+import sys
+import traceback
+
+from .utils.text_opts import *
+
+try:
+ basestring
+except NameError:
+ basestring = str
+
+# basic error for API
+class STLError(Exception):
+ def __init__ (self, msg):
+ self.msg = str(msg)
+ self.tb = traceback.extract_stack()
+
+ def __str__ (self):
+
+ fname = os.path.split(self.tb[-2][0])[1]
+ lineno = self.tb[-2][1]
+ func = self.tb[-2][2]
+ src = self.tb[-2][3]
+
+ s = "\n******\n"
+ s += "Error at {0}:{1} - '{2}'\n\n".format(format_text(fname, 'bold'), format_text(lineno, 'bold'), format_text(src.strip(), 'bold'))
+ s += "specific error:\n\n{0}\n".format(format_text(self.msg, 'bold'))
+
+ return s
+
+ def brief (self):
+ return self.msg
+
+
+# raised when the client state is invalid for operation
+class STLStateError(STLError):
+ def __init__ (self, op, state):
+ self.msg = "Operation '{0}' is not valid while '{1}'".format(op, state)
+ self.tb = traceback.extract_stack()
+
+# port state error
+class STLPortStateError(STLError):
+ def __init__ (self, port, op, state):
+ self.msg = "Operation '{0}' on port(s) '{1}' is not valid while port(s) '{2}'".format(op, port, state)
+ self.tb = traceback.extract_stack()
+
+# raised when argument value is not valid for operation
+class STLArgumentError(STLError):
+ def __init__ (self, name, got, valid_values = None, extended = None):
+ self.tb = traceback.extract_stack()
+ self.msg = "Argument: '{0}' invalid value: '{1}'".format(name, got)
+ if valid_values:
+ self.msg += " - valid values are '{0}'".format(valid_values)
+
+ if extended:
+ self.msg += "\n{0}".format(extended)
+
+# raised when argument type is not valid for operation
+class STLTypeError(STLError):
+ def __init__ (self, arg_name, arg_type, valid_types):
+ self.tb = traceback.extract_stack()
+ self.msg = "Argument: '%s' invalid type: '%s', expecting type(s): %s." % (arg_name, arg_type.__name__,
+ [t.__name__ for t in valid_types] if isinstance(valid_types, tuple) else valid_types.__name__)
+
+# raised when timeout occurs
+class STLTimeoutError(STLError):
+ def __init__ (self, timeout):
+ self.tb = traceback.extract_stack()
+ self.msg = "Timeout: operation took more than '{0}' seconds".format(timeout)
+
+
diff --git a/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_ext.py b/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_ext.py
new file mode 100644
index 00000000..306302dc
--- /dev/null
+++ b/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_ext.py
@@ -0,0 +1,65 @@
+import sys
+import os
+import warnings
+import platform
+
+# if not set - set it to default
+TREX_STL_EXT_PATH = os.environ.get('TREX_STL_EXT_PATH')
+
+# take default
+if not TREX_STL_EXT_PATH or not os.path.exists(TREX_STL_EXT_PATH):
+ CURRENT_PATH = os.path.dirname(os.path.realpath(__file__))
+ TREX_STL_EXT_PATH = os.path.normpath(os.path.join(CURRENT_PATH, os.pardir, os.pardir, 'external_libs'))
+if not os.path.exists(TREX_STL_EXT_PATH):
+ # ../../../../external_libs
+ TREX_STL_EXT_PATH = os.path.normpath(os.path.join(CURRENT_PATH, os.pardir, os.pardir, os.pardir, os.pardir, 'external_libs'))
+if not os.path.exists(TREX_STL_EXT_PATH):
+ raise Exception('Could not determine path of external_libs, try setting TREX_STL_EXT_PATH variable')
+
+# the modules required
+# py-dep requires python2/python3 directories
+# arch-dep requires cel59/fedora and 32bit/64bit directories
+CLIENT_UTILS_MODULES = [ {'name': 'texttable-0.8.4'},
+ {'name': 'pyyaml-3.11', 'py-dep': True},
+ {'name': 'scapy-2.3.1', 'py-dep': True},
+ {'name': 'pyzmq-14.5.0', 'py-dep': True, 'arch-dep': True}
+ ]
+
+
+def generate_module_path (module, is_python3, is_64bit, is_cel):
+ platform_path = [module['name']]
+
+ if module.get('py-dep'):
+ platform_path.append('python3' if is_python3 else 'python2')
+
+ if module.get('arch-dep'):
+ platform_path.append('cel59' if is_cel else 'fedora18')
+ platform_path.append('64bit' if is_64bit else '32bit')
+
+ return os.path.normcase(os.path.join(TREX_STL_EXT_PATH, *platform_path))
+
+
+def import_module_list(modules_list):
+
+ # platform data
+ is_64bit = platform.architecture()[0] == '64bit'
+ is_python3 = (sys.version_info >= (3, 0))
+ is_cel = os.path.exists('/etc/system-profile')
+
+ # regular modules
+ for p in modules_list:
+ full_path = generate_module_path(p, is_python3, is_64bit, is_cel)
+
+ if not os.path.exists(full_path):
+ print("Unable to find required module library: '{0}'".format(p['name']))
+ print("Please provide the correct path using TREX_STL_EXT_PATH variable")
+ print("current path used: '{0}'".format(full_path))
+ exit(1)
+
+ sys.path.insert(1, full_path)
+
+
+
+
+
+import_module_list(CLIENT_UTILS_MODULES)
diff --git a/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_hltapi.py b/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_hltapi.py
new file mode 100755
index 00000000..464869aa
--- /dev/null
+++ b/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_hltapi.py
@@ -0,0 +1,1595 @@
+#!/router/bin/python
+
+'''
+Supported functions/arguments/defaults:
+'''
+
+connect_kwargs = {
+ 'device': 'localhost', # ip or hostname of TRex
+ 'port_list': None, # list of ports
+ 'username': 'TRexUser',
+ 'reset': True,
+ 'break_locks': False,
+}
+
+cleanup_session_kwargs = {
+ 'maintain_lock': False, # release ports at the end or not
+ 'port_list': None,
+ 'port_handle': None,
+}
+
+traffic_config_kwargs = {
+ 'mode': None, # ( create | modify | remove | reset )
+ 'split_by_cores': 'split', # ( split | duplicate | single ) TRex extention: split = split traffic by cores, duplicate = duplicate traffic for all cores, single = run only with sinle core (not implemented yet)
+ 'load_profile': None, # TRex extention: path to filename with stream profile (stream builder parameters will be ignored, limitation: modify)
+ 'consistent_random': False, # TRex extention: False (default) = random sequence will be different every run, True = random sequence will be same every run
+ 'ignore_macs': False, # TRex extention: True = use MACs from server configuration, no MAC VM (workaround on lack of ARP)
+ 'disable_flow_stats': False, # TRex extention: True = don't use flow stats for this stream, (workaround for limitation on type of packet for flow_stats)
+ 'flow_stats_id': None, # TRex extention: uint, for use of STLHltStream, specifies id for flow stats (see stateless manual for flow_stats details)
+ 'port_handle': None,
+ 'port_handle2': None,
+ 'bidirectional': False,
+ # stream builder parameters
+ 'transmit_mode': 'continuous', # ( continuous | multi_burst | single_burst )
+ 'rate_pps': None,
+ 'rate_bps': None,
+ 'rate_percent': 10,
+ 'stream_id': None,
+ 'name': None,
+ 'direction': 0, # TRex extention: 1 = exchange sources and destinations, 0 = do nothing
+ 'pkts_per_burst': 1,
+ 'burst_loop_count': 1,
+ 'inter_burst_gap': 12,
+ 'length_mode': 'fixed', # ( auto | fixed | increment | decrement | random | imix )
+ 'l3_imix1_size': 64,
+ 'l3_imix1_ratio': 7,
+ 'l3_imix2_size': 570,
+ 'l3_imix2_ratio': 4,
+ 'l3_imix3_size': 1518,
+ 'l3_imix3_ratio': 1,
+ 'l3_imix4_size': 9230,
+ 'l3_imix4_ratio': 0,
+ #L2
+ 'frame_size': 64,
+ 'frame_size_min': 64,
+ 'frame_size_max': 64,
+ 'frame_size_step': 1,
+ 'l2_encap': 'ethernet_ii', # ( ethernet_ii | ethernet_ii_vlan )
+ 'mac_src': '00:00:01:00:00:01',
+ 'mac_dst': '00:00:00:00:00:00',
+ 'mac_src2': '00:00:01:00:00:01',
+ 'mac_dst2': '00:00:00:00:00:00',
+ 'mac_src_mode': 'fixed', # ( fixed | increment | decrement | random )
+ 'mac_src_step': 1,
+ 'mac_src_count': 1,
+ 'mac_dst_mode': 'fixed', # ( fixed | increment | decrement | random )
+ 'mac_dst_step': 1,
+ 'mac_dst_count': 1,
+ 'mac_src2_mode': 'fixed', # ( fixed | increment | decrement | random )
+ 'mac_src2_step': 1,
+ 'mac_src2_count': 1,
+ 'mac_dst2_mode': 'fixed', # ( fixed | increment | decrement | random )
+ 'mac_dst2_step': 1,
+ 'mac_dst2_count': 1,
+ # vlan options below can have multiple values for nested Dot1Q headers
+ 'vlan_user_priority': 1,
+ 'vlan_priority_mode': 'fixed', # ( fixed | increment | decrement | random )
+ 'vlan_priority_count': 1,
+ 'vlan_priority_step': 1,
+ 'vlan_id': 0,
+ 'vlan_id_mode': 'fixed', # ( fixed | increment | decrement | random )
+ 'vlan_id_count': 1,
+ 'vlan_id_step': 1,
+ 'vlan_cfi': 1,
+ 'vlan_protocol_tag_id': None,
+ #L3, general
+ 'l3_protocol': None, # ( ipv4 | ipv6 )
+ 'l3_length_min': 110,
+ 'l3_length_max': 238,
+ 'l3_length_step': 1,
+ #L3, IPv4
+ 'ip_precedence': 0,
+ 'ip_tos_field': 0,
+ 'ip_mbz': 0,
+ 'ip_delay': 0,
+ 'ip_throughput': 0,
+ 'ip_reliability': 0,
+ 'ip_cost': 0,
+ 'ip_reserved': 0,
+ 'ip_dscp': 0,
+ 'ip_cu': 0,
+ 'l3_length': None,
+ 'ip_id': 0,
+ 'ip_fragment_offset': 0,
+ 'ip_ttl': 64,
+ 'ip_checksum': None,
+ 'ip_src_addr': '0.0.0.0',
+ 'ip_dst_addr': '192.0.0.1',
+ 'ip_src_mode': 'fixed', # ( fixed | increment | decrement | random )
+ 'ip_src_step': 1, # ip or number
+ 'ip_src_count': 1,
+ 'ip_dst_mode': 'fixed', # ( fixed | increment | decrement | random )
+ 'ip_dst_step': 1, # ip or number
+ 'ip_dst_count': 1,
+ #L3, IPv6
+ 'ipv6_traffic_class': 0,
+ 'ipv6_flow_label': 0,
+ 'ipv6_length': None,
+ 'ipv6_next_header': None,
+ 'ipv6_hop_limit': 64,
+ 'ipv6_src_addr': 'fe80:0:0:0:0:0:0:12',
+ 'ipv6_dst_addr': 'fe80:0:0:0:0:0:0:22',
+ 'ipv6_src_mode': 'fixed', # ( fixed | increment | decrement | random )
+ 'ipv6_src_step': 1, # we are changing only 32 lowest bits; can be ipv6 or number
+ 'ipv6_src_count': 1,
+ 'ipv6_dst_mode': 'fixed', # ( fixed | increment | decrement | random )
+ 'ipv6_dst_step': 1, # we are changing only 32 lowest bits; can be ipv6 or number
+ 'ipv6_dst_count': 1,
+ #L4, TCP
+ 'l4_protocol': None, # ( tcp | udp )
+ 'tcp_src_port': 1024,
+ 'tcp_dst_port': 80,
+ 'tcp_seq_num': 1,
+ 'tcp_ack_num': 1,
+ 'tcp_data_offset': 5,
+ 'tcp_fin_flag': 0,
+ 'tcp_syn_flag': 0,
+ 'tcp_rst_flag': 0,
+ 'tcp_psh_flag': 0,
+ 'tcp_ack_flag': 0,
+ 'tcp_urg_flag': 0,
+ 'tcp_window': 4069,
+ 'tcp_checksum': None,
+ 'tcp_urgent_ptr': 0,
+ 'tcp_src_port_mode': 'increment', # ( increment | decrement | random )
+ 'tcp_src_port_step': 1,
+ 'tcp_src_port_count': 1,
+ 'tcp_dst_port_mode': 'increment', # ( increment | decrement | random )
+ 'tcp_dst_port_step': 1,
+ 'tcp_dst_port_count': 1,
+ # L4, UDP
+ 'udp_src_port': 1024,
+ 'udp_dst_port': 80,
+ 'udp_length': None,
+ 'udp_dst_port_mode': 'increment', # ( increment | decrement | random )
+ 'udp_src_port_step': 1,
+ 'udp_src_port_count': 1,
+ 'udp_src_port_mode': 'increment', # ( increment | decrement | random )
+ 'udp_dst_port_step': 1,
+ 'udp_dst_port_count': 1,
+}
+
+traffic_control_kwargs = {
+ 'action': None, # ( clear_stats | run | stop | sync_run | poll | reset )
+ 'port_handle': None,
+}
+
+traffic_stats_kwargs = {
+ 'mode': 'aggregate', # ( all | aggregate | streams )
+ 'port_handle': None,
+}
+
+
+import sys
+import os
+import socket
+import copy
+from collections import defaultdict
+
+from .api import *
+from .trex_stl_types import *
+from .utils.common import get_number
+
+class HLT_ERR(dict):
+ def __init__(self, log = 'Unknown error', **kwargs):
+ dict.__init__(self, {'status': 0})
+ if type(log) is dict:
+ dict.update(self, log)
+ elif type(log) is str and not log.startswith('[ERR]'):
+ self['log'] = '[ERR] ' + log
+ else:
+ self['log'] = log
+ dict.update(self, kwargs)
+
+class HLT_OK(dict):
+ def __init__(self, init_dict = {}, **kwargs):
+ dict.__init__(self, {'status': 1, 'log': None})
+ dict.update(self, init_dict)
+ dict.update(self, kwargs)
+
+def merge_kwargs(default_kwargs, user_kwargs):
+ kwargs = copy.deepcopy(default_kwargs)
+ for key, value in user_kwargs.items():
+ if key in kwargs:
+ kwargs[key] = value
+ elif key in ('save_to_yaml', 'save_to_pcap', 'pg_id'): # internal arguments
+ kwargs[key] = value
+ else:
+ print("Warning: provided parameter '%s' is not supported" % key)
+ return kwargs
+
+# change MACs from formats 01-23-45-67-89-10 or 0123.4567.8910 or {01 23 45 67 89 10} to Scapy format 01:23:45:67:89:10
+def correct_macs(kwargs):
+ list_of_mac_args = ['mac_src', 'mac_dst', 'mac_src2', 'mac_dst2']
+ list_of_mac_steps = ['mac_src_step', 'mac_dst_step', 'mac_src2_step', 'mac_dst2_step']
+ for mac_arg in list_of_mac_args + list_of_mac_steps:
+ if mac_arg in kwargs:
+ mac_value = kwargs[mac_arg]
+ if is_integer(mac_value) and mac_arg in list_of_mac_steps: # step can be number
+ continue
+ if type(mac_value) is not str: raise STLError('Argument %s should be str' % mac_arg)
+ mac_value = mac_value.replace('{', '').replace('}', '').strip().replace('-', ' ').replace(':', ' ').replace('.', ' ')
+ if mac_value[4] == ' ' and mac_value[9] == ' ':
+ mac_value = ' '.join([mac_value[0:2], mac_value[2:7], mac_value[7:12], mac_value[12:14]])
+ mac_value = ':'.join(mac_value.split())
+ try:
+ mac2str(mac_value) # verify we are ok
+ kwargs[mac_arg] = mac_value
+ except:
+ raise STLError('Incorrect MAC %s=%s, please use 01:23:45:67:89:10 or 01-23-45-67-89-10 or 0123.4567.8910 or {01 23 45 67 89 10}' % (mac_arg, kwargs[mac_arg]))
+
+def is_true(input):
+ if input in (True, 'True', 'true', 1, '1', 'enable', 'Enable', 'Yes', 'yes', 'y', 'Y', 'enabled', 'Enabled'):
+ return True
+ return False
+
+def error(err = None):
+ if not err:
+ raise Exception('Unknown exception, look traceback')
+ if type(err) is str and not err.startswith('[ERR]'):
+ err = '[ERR] ' + err
+ print(err)
+ sys.exit(1)
+
+def check_res(res):
+ if res['status'] == 0:
+ error('Encountered error:\n%s' % res['log'])
+ return res
+
+def print_brief_stats(res):
+ title_str = ' '*3
+ tx_str = 'TX:'
+ rx_str = 'RX:'
+ for port_id, stat in res.items():
+ if type(port_id) is not int:
+ continue
+ title_str += ' '*10 + 'Port%s' % port_id
+ tx_str += '%15s' % res[port_id]['aggregate']['tx']['total_pkts']
+ rx_str += '%15s' % res[port_id]['aggregate']['rx']['total_pkts']
+ print(title_str)
+ print(tx_str)
+ print(rx_str)
+
+def wait_with_progress(seconds):
+ for i in range(0, seconds):
+ time.sleep(1)
+ sys.stdout.write('.')
+ sys.stdout.flush()
+ print('')
+
+# dict of streams per port
+# hlt_history = False: holds list of stream_id per port
+# hlt_history = True: act as dictionary (per port) stream_id -> hlt arguments used for build
+class CStreamsPerPort(defaultdict):
+ def __init__(self, hlt_history = False):
+ self.hlt_history = hlt_history
+ if self.hlt_history:
+ defaultdict.__init__(self, dict)
+ else:
+ defaultdict.__init__(self, list)
+
+ def get_stream_list(self, ports_list = None):
+ if self.hlt_history:
+ if ports_list is None:
+ ports_list = self.keys()
+ elif not isinstance(ports_list, list):
+ ports_list = [ports_list]
+ ret = {}
+ for port in ports_list:
+ ret[port] = self[port].keys()
+ return ret
+ else:
+ return self
+
+ # add to stream_id list per port, no HLT args, res = HLT result
+ def add_streams_from_res(self, res):
+ if self.hlt_history: raise STLError('CStreamsPerPort: this object is not meant for HLT history, try init with hlt_history = False')
+ if not isinstance(res, dict): raise STLError('CStreamsPerPort: res should be dict')
+ if res.get('status') != 1: raise STLError('CStreamsPerPort: res has status %s' % res.get('status'))
+ res_streams = res.get('stream_id')
+ if not isinstance(res_streams, dict):
+ raise STLError('CStreamsPerPort: stream_id in res should be dict')
+ for port, port_stream_ids in res_streams.items():
+ if type(port_stream_ids) is not list:
+ port_stream_ids = [port_stream_ids]
+ self[port].extend(port_stream_ids)
+
+ # save HLT args to modify streams later
+ def save_stream_args(self, ports_list, stream_id, stream_hlt_args):
+ if stream_id is None: raise STLError('CStreamsPerPort: no stream_id in stream')
+ if stream_hlt_args.get('load_profile'): return # can't modify profiles, don't save
+ if not self.hlt_history: raise STLError('CStreamsPerPort: this object works only with HLT history, try init with hlt_history = True')
+ if not is_integer(stream_id): raise STLError('CStreamsPerPort: stream_id should be number')
+ if not isinstance(stream_hlt_args, dict): raise STLError('CStreamsPerPort: stream_hlt_args should be dict')
+ if not isinstance(ports_list, list):
+ ports_list = [ports_list]
+ for port in ports_list:
+ if stream_id not in self[port]:
+ self[port][stream_id] = {}
+ self[port][stream_id].update(stream_hlt_args)
+
+ def remove_stream(self, ports_list, stream_id):
+ if not isinstance(ports_list, list):
+ ports_list = [ports_list]
+ if not isinstance(stream_id, dict):
+ raise STLError('CStreamsPerPort: stream_hlt_args should be dict')
+ for port in ports_list:
+ if port not in self:
+ raise STLError('CStreamsPerPort: port %s not defined' % port)
+ if stream_id not in self[port]:
+ raise STLError('CStreamsPerPort: stream_id %s not found at port %s' % (port, stream_id))
+ if self.hlt_history:
+ del self[port][stream_id]
+ else:
+ self[port].pop(stream_id)
+
+class CTRexHltApi(object):
+
+ def __init__(self, verbose = 0):
+ self.trex_client = None
+ self.verbose = verbose
+ self._last_pg_id = 0 # pg_id acts as stream_handle
+ self._streams_history = {} # streams in format of HLT arguments for modify later
+ self._native_handle_by_pg_id = {} # pg_id -> native handle + port
+ self._pg_id_by_id = {} # stream_id -> pg_id
+ self._pg_id_by_name = {} # name -> pg_id
+
+###########################
+# Session functions #
+###########################
+
+ def connect(self, **user_kwargs):
+ kwargs = merge_kwargs(connect_kwargs, user_kwargs)
+ device = kwargs['device']
+ try:
+ device = socket.gethostbyname(device) # work with ip
+ except: # give it another try
+ try:
+ device = socket.gethostbyname(device)
+ except Exception as e:
+ return HLT_ERR('Could not translate hostname "%s" to IP: %s' % (device, e))
+
+ try:
+ self.trex_client = STLClient(kwargs['username'], device, verbose_level = self.verbose)
+ except Exception as e:
+ return HLT_ERR('Could not init stateless client %s: %s' % (device, e if isinstance(e, STLError) else traceback.format_exc()))
+
+ try:
+ self.trex_client.connect()
+ except Exception as e:
+ self.trex_client = None
+ return HLT_ERR('Could not connect to device %s: %s' % (device, e if isinstance(e, STLError) else traceback.format_exc()))
+
+ # connection successfully created with server, try acquiring ports of TRex
+ try:
+ port_list = self._parse_port_list(kwargs['port_list'])
+ self.trex_client.acquire(ports = port_list, force = kwargs['break_locks'])
+ for port in port_list:
+ self._native_handle_by_pg_id[port] = {}
+ except Exception as e:
+ self.trex_client = None
+ return HLT_ERR('Could not acquire ports %s: %s' % (port_list, e if isinstance(e, STLError) else traceback.format_exc()))
+
+ # arrived here, all desired ports were successfully acquired
+ if kwargs['reset']:
+ # remove all port traffic configuration from TRex
+ try:
+ self.trex_client.stop(ports = port_list)
+ self.trex_client.reset(ports = port_list)
+ except Exception as e:
+ self.trex_client = None
+ return HLT_ERR('Error in reset traffic: %s' % e if isinstance(e, STLError) else traceback.format_exc())
+
+ self._streams_history = CStreamsPerPort(hlt_history = True)
+ return HLT_OK(port_handle = dict([(port_id, port_id) for port_id in port_list]))
+
+ def cleanup_session(self, **user_kwargs):
+ kwargs = merge_kwargs(cleanup_session_kwargs, user_kwargs)
+ if not kwargs['maintain_lock']:
+ # release taken ports
+ port_list = kwargs['port_list'] or kwargs['port_handle'] or 'all'
+ try:
+ if port_list == 'all':
+ port_list = self.trex_client.get_acquired_ports()
+ else:
+ port_list = self._parse_port_list(port_list)
+ except Exception as e:
+ return HLT_ERR('Unable to determine which ports to release: %s' % e if isinstance(e, STLError) else traceback.format_exc())
+ try:
+ self.trex_client.stop(port_list)
+ except Exception as e:
+ return HLT_ERR('Unable to stop traffic %s: %s' % (port_list, e if isinstance(e, STLError) else traceback.format_exc()))
+ try:
+ self.trex_client.remove_all_streams(port_list)
+ except Exception as e:
+ return HLT_ERR('Unable to remove all streams %s: %s' % (port_list, e if isinstance(e, STLError) else traceback.format_exc()))
+ try:
+ self.trex_client.release(port_list)
+ except Exception as e:
+ return HLT_ERR('Unable to release ports %s: %s' % (port_list, e if isinstance(e, STLError) else traceback.format_exc()))
+ try:
+ self.trex_client.disconnect(stop_traffic = False, release_ports = False)
+ except Exception as e:
+ return HLT_ERR('Error disconnecting: %s' % e)
+ self.trex_client = None
+ return HLT_OK()
+
+ def interface_config(self, port_handle, mode='config'):
+ if not self.trex_client:
+ return HLT_ERR('Connect first')
+ ALLOWED_MODES = ['config', 'modify', 'destroy']
+ if mode not in ALLOWED_MODES:
+ return HLT_ERR('Mode must be one of the following values: %s' % ALLOWED_MODES)
+ # pass this function for now...
+ return HLT_ERR('interface_config not implemented yet')
+
+
+###########################
+# Traffic functions #
+###########################
+
+ def traffic_config(self, **user_kwargs):
+ if not self.trex_client:
+ return HLT_ERR('Connect first')
+ try:
+ correct_macs(user_kwargs)
+ except Exception as e:
+ return HLT_ERR(e if isinstance(e, STLError) else traceback.format_exc())
+ kwargs = merge_kwargs(traffic_config_kwargs, user_kwargs)
+ stream_id = kwargs['stream_id']
+ mode = kwargs['mode']
+ pg_id = kwargs['flow_stats_id']
+ port_handle = port_list = self._parse_port_list(kwargs['port_handle'])
+
+ ALLOWED_MODES = ['create', 'modify', 'remove', 'enable', 'disable', 'reset']
+ if mode not in ALLOWED_MODES:
+ return HLT_ERR('Mode must be one of the following values: %s' % ALLOWED_MODES)
+
+ if mode == 'reset':
+ try:
+ self.trex_client.remove_all_streams(port_handle)
+ for port in port_handle:
+ if port in self._streams_history:
+ del self._streams_history[port]
+ return HLT_OK()
+ except Exception as e:
+ return HLT_ERR('Could not reset streams at ports %s: %s' % (port_handle, e if isinstance(e, STLError) else traceback.format_exc()))
+
+ if mode == 'remove':
+ if stream_id is None:
+ return HLT_ERR('Please specify stream_id to remove.')
+ if stream_id == 'all':
+ try:
+ self.trex_client.remove_all_streams(port_handle)
+ for port in port_handle:
+ if port in self._streams_history:
+ del self._streams_history[port]
+ except Exception as e:
+ return HLT_ERR('Could not remove all streams at ports %s: %s' % (port_handle, e if isinstance(e, STLError) else traceback.format_exc()))
+ else:
+ try:
+ self._remove_stream(stream_id, port_handle)
+ except Exception as e:
+ return HLT_ERR('Could not remove streams with specified by %s, error: %s' % (stream_id, e if isinstance(e, STLError) else traceback.format_exc()))
+ return HLT_OK()
+
+ #if mode == 'enable':
+ # stream_id = kwargs.get('stream_id')
+ # if stream_id is None:
+ # return HLT_ERR('Please specify stream_id to enable.')
+ # if stream_id not in self._streams_history:
+ # return HLT_ERR('This stream_id (%s) was not used before, please create new.' % stream_id)
+ # self._streams_history[stream_id].update(kwargs) # <- the modification
+
+ if mode == 'modify': # we remove stream and create new one with same stream_id
+ pg_id = kwargs.get('stream_id')
+ if pg_id is None:
+ return HLT_ERR('Please specify stream_id to modify.')
+
+ if len(port_handle) > 1:
+ for port in port_handle:
+ try:
+ user_kwargs['port_handle'] = port
+ res = self.traffic_config(**user_kwargs)
+ if res['status'] == 0:
+ return HLT_ERR('Error during modify of stream: %s' % res['log'])
+ except Exception as e:
+ return HLT_ERR('Could not remove stream(s) %s from port(s) %s: %s' % (stream_id, port_handle, e if isinstance(e, STLError) else traceback.format_exc()))
+ return HLT_OK()
+ else:
+ if type(port_handle) is list:
+ port = port_handle[0]
+ else:
+ port = port_handle
+ if port not in self._streams_history:
+ return HLT_ERR('Port %s was not used/acquired' % port)
+ if pg_id not in self._streams_history[port]:
+ return HLT_ERR('This stream_id (%s) was not used before at port %s, please create new.' % (stream_id, port))
+ new_kwargs = {}
+ new_kwargs.update(self._streams_history[port][pg_id])
+ new_kwargs.update(user_kwargs)
+ user_kwargs = new_kwargs
+ try:
+ self._remove_stream(pg_id, [port])
+ except Exception as e:
+ return HLT_ERR('Could not remove stream(s) %s from port(s) %s: %s' % (stream_id, port_handle, e if isinstance(e, STLError) else traceback.format_exc()))
+
+ if mode == 'create' or mode == 'modify':
+ # create a new stream with desired attributes, starting by creating packet
+ if is_true(kwargs['bidirectional']): # two streams with opposite directions
+ del user_kwargs['bidirectional']
+ stream_per_port = {}
+ save_to_yaml = user_kwargs.get('save_to_yaml')
+ bidirect_err = 'When using bidirectional flag, '
+ if len(port_handle) != 1:
+ return HLT_ERR(bidirect_err + 'port_handle should be single port handle.')
+ port_handle = port_handle[0]
+ port_handle2 = kwargs['port_handle2']
+ if (type(port_handle2) is list and len(port_handle2) > 1) or port_handle2 is None:
+ return HLT_ERR(bidirect_err + 'port_handle2 should be single port handle.')
+ try:
+ if save_to_yaml and type(save_to_yaml) is str:
+ user_kwargs['save_to_yaml'] = save_to_yaml.replace('.yaml', '_bi1.yaml')
+ res1 = self.traffic_config(**user_kwargs)
+ if res1['status'] == 0:
+ raise STLError('Could not create bidirectional stream 1: %s' % res1['log'])
+ stream_per_port[port_handle] = res1['stream_id']
+ kwargs['direction'] = 1 - kwargs['direction'] # not
+ correct_direction(user_kwargs, kwargs)
+ if save_to_yaml and type(save_to_yaml) is str:
+ user_kwargs['save_to_yaml'] = save_to_yaml.replace('.yaml', '_bi2.yaml')
+ user_kwargs['port_handle'] = port_handle2
+ res2 = self.traffic_config(**user_kwargs)
+ if res2['status'] == 0:
+ raise STLError('Could not create bidirectional stream 2: %s' % res2['log'])
+ stream_per_port[port_handle2] = res2['stream_id']
+ except Exception as e:
+ return HLT_ERR('Could not generate bidirectional traffic: %s' % e if isinstance(e, STLError) else traceback.format_exc())
+ if mode == 'create':
+ return HLT_OK(stream_id = stream_per_port)
+ else:
+ return HLT_OK()
+
+ try:
+ if not pg_id:
+ pg_id = self._get_available_pg_id()
+ if kwargs['load_profile']:
+ stream_obj = STLProfile.load_py(kwargs['load_profile'], direction = kwargs['direction'])
+ else:
+ user_kwargs['pg_id'] = pg_id
+ stream_obj = STLHltStream(**user_kwargs)
+ except Exception as e:
+ return HLT_ERR('Could not create stream: %s' % e if isinstance(e, STLError) else traceback.format_exc())
+
+ # try adding the stream per ports
+ try:
+ for port in port_handle:
+ stream_id_arr = self.trex_client.add_streams(streams = stream_obj,
+ ports = port)
+ self._streams_history.save_stream_args(port, pg_id, user_kwargs)
+ if type(stream_id_arr) is not list:
+ stream_id_arr = [stream_id_arr]
+ self._native_handle_by_pg_id[port][pg_id] = stream_id_arr
+ except Exception as e:
+ return HLT_ERR('Could not add stream to ports: %s' % e if isinstance(e, STLError) else traceback.format_exc())
+ if mode == 'create':
+ return HLT_OK(stream_id = pg_id)
+ else:
+ return HLT_OK()
+
+ return HLT_ERR('Got to the end of traffic_config, mode not implemented or forgot "return" somewhere.')
+
+ def traffic_control(self, **user_kwargs):
+ if not self.trex_client:
+ return HLT_ERR('Connect first')
+ kwargs = merge_kwargs(traffic_control_kwargs, user_kwargs)
+ action = kwargs['action']
+ port_handle = kwargs['port_handle']
+ ALLOWED_ACTIONS = ['clear_stats', 'run', 'stop', 'sync_run', 'poll', 'reset']
+ if action not in ALLOWED_ACTIONS:
+ return HLT_ERR('Action must be one of the following values: {actions}'.format(actions=ALLOWED_ACTIONS))
+
+ if action == 'run':
+ try:
+ self.trex_client.start(ports = port_handle)
+ except Exception as e:
+ return HLT_ERR('Could not start traffic: %s' % e if isinstance(e, STLError) else traceback.format_exc())
+
+ elif action == 'sync_run': # (clear_stats + run)
+ try:
+ self.trex_client.clear_stats(ports = port_handle)
+ self.trex_client.start(ports = port_handle)
+ except Exception as e:
+ return HLT_ERR('Unable to do sync_run: %s' % e if isinstance(e, STLError) else traceback.format_exc())
+
+ elif action == 'stop':
+ try:
+ self.trex_client.stop(ports = port_handle)
+ except Exception as e:
+ return HLT_ERR('Could not stop traffic: %s' % e if isinstance(e, STLError) else traceback.format_exc())
+
+ elif action == 'reset':
+ try:
+ self.trex_client.reset(ports = port_handle)
+ for port in port_handle:
+ if port in self._streams_history:
+ del self._streams_history[port]
+ except Exception as e:
+ return HLT_ERR('Could not reset traffic: %s' % e if isinstance(e, STLError) else traceback.format_exc())
+
+ elif action == 'clear_stats':
+ try:
+ self.trex_client.clear_stats(ports = port_handle)
+ except Exception as e:
+ return HLT_ERR('Could not clear stats: %s' % e if isinstance(e, STLError) else traceback.format_exc())
+
+ elif action != 'poll': # at poll just return 'stopped' status
+ return HLT_ERR("Action '%s' is not supported yet on TRex" % action)
+
+ try:
+ is_traffic_active = self.trex_client.is_traffic_active(ports = port_handle)
+ except Exception as e:
+ return HLT_ERR('Unable to determine ports status: %s' % e if isinstance(e, STLError) else traceback.format_exc())
+
+ return HLT_OK(stopped = not is_traffic_active)
+
+ def traffic_stats(self, **user_kwargs):
+ if not self.trex_client:
+ return HLT_ERR('Connect first')
+ kwargs = merge_kwargs(traffic_stats_kwargs, user_kwargs)
+ mode = kwargs['mode']
+ port_handle = kwargs['port_handle']
+ if type(port_handle) is not list:
+ port_handle = [port_handle]
+ ALLOWED_MODES = ['aggregate', 'streams', 'all']
+ if mode not in ALLOWED_MODES:
+ return HLT_ERR("'mode' must be one of the following values: %s" % ALLOWED_MODES)
+ hlt_stats_dict = dict([(port, {}) for port in port_handle])
+ try:
+ stats = self.trex_client.get_stats(port_handle)
+ if mode in ('all', 'aggregate'):
+ for port_id in port_handle:
+ port_stats = stats[port_id]
+ if is_integer(port_id):
+ hlt_stats_dict[port_id]['aggregate'] = {
+ 'tx': {
+ 'pkt_bit_rate': port_stats.get('tx_bps', 0),
+ 'pkt_byte_count': port_stats.get('obytes', 0),
+ 'pkt_count': port_stats.get('opackets', 0),
+ 'pkt_rate': port_stats.get('tx_pps', 0),
+ 'total_pkt_bytes': port_stats.get('obytes', 0),
+ 'total_pkt_rate': port_stats.get('tx_pps', 0),
+ 'total_pkts': port_stats.get('opackets', 0),
+ },
+ 'rx': {
+ 'pkt_bit_rate': port_stats.get('rx_bps', 0),
+ 'pkt_byte_count': port_stats.get('ibytes', 0),
+ 'pkt_count': port_stats.get('ipackets', 0),
+ 'pkt_rate': port_stats.get('rx_pps', 0),
+ 'total_pkt_bytes': port_stats.get('ibytes', 0),
+ 'total_pkt_rate': port_stats.get('rx_pps', 0),
+ 'total_pkts': port_stats.get('ipackets', 0),
+ }
+ }
+ if mode in ('all', 'streams'):
+ for pg_id, pg_stats in stats['flow_stats'].items():
+ for port_id in port_handle:
+ if 'stream' not in hlt_stats_dict[port_id]:
+ hlt_stats_dict[port_id]['stream'] = {}
+ hlt_stats_dict[port_id]['stream'][pg_id] = {
+ 'tx': {
+ 'total_pkts': pg_stats['tx_pkts'].get(port_id, 0),
+ 'total_pkt_bytes': pg_stats['tx_bytes'].get(port_id, 0),
+ 'total_pkts_bytes': pg_stats['tx_bytes'].get(port_id, 0),
+ 'total_pkt_bit_rate': pg_stats['tx_bps'].get(port_id, 0),
+ 'total_pkt_rate': pg_stats['tx_pps'].get(port_id, 0),
+ 'line_rate_percentage': pg_stats['tx_line_util'].get(port_id, 0),
+ },
+ 'rx': {
+ 'total_pkts': pg_stats['rx_pkts'].get(port_id, 0),
+ 'total_pkt_bytes': pg_stats['rx_bytes'].get(port_id, 0),
+ 'total_pkts_bytes': pg_stats['rx_bytes'].get(port_id, 0),
+ 'total_pkt_bit_rate': pg_stats['rx_bps'].get(port_id, 0),
+ 'total_pkt_rate': pg_stats['rx_pps'].get(port_id, 0),
+ 'line_rate_percentage': pg_stats['rx_line_util'].get(port_id, 0),
+ },
+ }
+ except Exception as e:
+ return HLT_ERR('Could not retrieve stats: %s' % e if isinstance(e, STLError) else traceback.format_exc())
+ return HLT_OK(hlt_stats_dict)
+
+ # timeout = maximal time to wait
+ def wait_on_traffic(self, port_handle = None, timeout = 60):
+ try:
+ self.trex_client.wait_on_traffic(port_handle, timeout)
+ except Exception as e:
+ return HLT_ERR('Unable to run wait_on_traffic: %s' % e if isinstance(e, STLError) else traceback.format_exc())
+
+###########################
+# Private functions #
+###########################
+
+ def _get_available_pg_id(self):
+ pg_id = self._last_pg_id
+ used_pg_ids = self.trex_client.get_stats()['flow_stats'].keys()
+ for i in range(65535):
+ pg_id += 1
+ if pg_id not in used_pg_ids:
+ self._last_pg_id = pg_id
+ return pg_id
+ if pg_id == 65535:
+ pg_id = 0
+ raise STLError('Could not find free pg_id in range [1, 65535].')
+
+ # remove streams from given port(s).
+ # stream_id can be:
+ # * int - exact stream_id value
+ # * list - list of stream_id values or strings (see below)
+ # * string - exact stream_id value, mix of ranges/list separated by comma: 2, 4-13
+ def _remove_stream(self, stream_id, port_handle):
+ stream_num = get_number(stream_id)
+ if stream_num is not None: # exact value of int or str
+ for port in port_handle:
+ native_handles = self._native_handle_by_pg_id[port][stream_num]
+ self.trex_client.remove_streams(native_handles, port) # actual remove
+ del self._native_handle_by_pg_id[port][stream_num]
+ del self._streams_history[port][stream_num]
+ return
+ if type(stream_id) is list: # list of values/strings
+ for each_stream_id in stream_id:
+ self._remove_stream(each_stream_id, port_handle) # recurse
+ return
+ if type(stream_id) is str: # range or list in string
+ if ',' in stream_id:
+ for each_stream_id_element in stream_id.split(','):
+ self._remove_stream(each_stream_id_element, port_handle) # recurse
+ return
+ if '-' in stream_id:
+ stream_id_min, stream_id_max = stream_id.split('-', 1)
+ stream_id_min = get_number(stream_id_min)
+ stream_id_max = get_number(stream_id_max)
+ if stream_id_min is None:
+ raise STLError('_remove_stream: wrong range param %s' % stream_id_min)
+ if stream_id_max is None:
+ raise STLError('_remove_stream: wrong range param %s' % stream_id_max)
+ if stream_id_max < stream_id_min:
+ raise STLError('_remove_stream: right range param is smaller than left one: %s-%s' % (stream_id_min, stream_id_max))
+ for each_stream_id in xrange(stream_id_min, stream_id_max + 1):
+ self._remove_stream(each_stream_id, port_handle) # recurse
+ return
+ raise STLError('_remove_stream: wrong stream_id param %s' % stream_id)
+
+ @staticmethod
+ def _parse_port_list(port_list):
+ if type(port_list) is str:
+ return [int(port) for port in port_list.strip().split()]
+ elif type(port_list) is list:
+ return [int(port) for port in port_list]
+ elif is_integer(port_list):
+ return [int(port_list)]
+ raise STLError('port_list should be string with ports, list, or single number')
+
+def STLHltStream(**user_kwargs):
+ kwargs = merge_kwargs(traffic_config_kwargs, user_kwargs)
+ # verify rate is given by at most one arg
+ rate_args = set(['rate_pps', 'rate_bps', 'rate_percent'])
+ intersect_rate_args = list(rate_args & set(user_kwargs.keys()))
+ if len(intersect_rate_args) > 1:
+ raise STLError('More than one rate argument specified: %s' % intersect_rate_args)
+ try:
+ rate_key = intersect_rate_args[0]
+ except IndexError:
+ rate_key = 'rate_percent'
+ if rate_key is 'rate_percent' and float(kwargs['rate_percent']) > 100:
+ raise STLError('rate_percent should not exceed 100%')
+
+ if kwargs['length_mode'] == 'imix': # several streams with given length
+ streams_arr = []
+ user_kwargs['length_mode'] = 'fixed'
+ if kwargs['l3_imix1_size'] < 32 or kwargs['l3_imix2_size'] < 32 or kwargs['l3_imix3_size'] < 32 or kwargs['l3_imix4_size'] < 32:
+ raise STLError('l3_imix*_size should be at least 32')
+ save_to_yaml = kwargs.get('save_to_yaml')
+ total_rate = float(kwargs[rate_key])
+ if rate_key == 'rate_pps': # ratio in packets as is
+ imix1_weight = kwargs['l3_imix1_ratio']
+ imix2_weight = kwargs['l3_imix2_ratio']
+ imix3_weight = kwargs['l3_imix3_ratio']
+ imix4_weight = kwargs['l3_imix4_ratio']
+ if rate_key == 'rate_bps': # ratio dependent on L2 size too
+ imix1_weight = kwargs['l3_imix1_ratio'] * kwargs['l3_imix1_size']
+ imix2_weight = kwargs['l3_imix2_ratio'] * kwargs['l3_imix2_size']
+ imix3_weight = kwargs['l3_imix3_ratio'] * kwargs['l3_imix3_size']
+ imix4_weight = kwargs['l3_imix4_ratio'] * kwargs['l3_imix4_size']
+ elif rate_key == 'rate_percent': # ratio dependent on L1 size too
+ imix1_weight = kwargs['l3_imix1_ratio'] * (kwargs['l3_imix1_size'] + 20)
+ imix2_weight = kwargs['l3_imix2_ratio'] * (kwargs['l3_imix2_size'] + 20)
+ imix3_weight = kwargs['l3_imix3_ratio'] * (kwargs['l3_imix3_size'] + 20)
+ imix4_weight = kwargs['l3_imix4_ratio'] * (kwargs['l3_imix4_size'] + 20)
+ total_weight = float(imix1_weight + imix2_weight + imix3_weight + imix4_weight)
+ if total_weight == 0:
+ raise STLError('Used length_mode imix, but all the ratios are 0')
+ if kwargs['l3_imix1_ratio'] > 0:
+ if save_to_yaml and type(save_to_yaml) is str:
+ user_kwargs['save_to_yaml'] = save_to_yaml.replace('.yaml', '_imix1.yaml')
+ user_kwargs['frame_size'] = kwargs['l3_imix1_size']
+ user_kwargs[rate_key] = total_rate * imix1_weight / total_weight
+ streams_arr.append(STLHltStream(**user_kwargs))
+ if kwargs['l3_imix2_ratio'] > 0:
+ if save_to_yaml and type(save_to_yaml) is str:
+ user_kwargs['save_to_yaml'] = save_to_yaml.replace('.yaml', '_imix2.yaml')
+ user_kwargs['frame_size'] = kwargs['l3_imix2_size']
+ user_kwargs[rate_key] = total_rate * imix2_weight / total_weight
+ streams_arr.append(STLHltStream(**user_kwargs))
+ if kwargs['l3_imix3_ratio'] > 0:
+ if save_to_yaml and type(save_to_yaml) is str:
+ user_kwargs['save_to_yaml'] = save_to_yaml.replace('.yaml', '_imix3.yaml')
+ user_kwargs['frame_size'] = kwargs['l3_imix3_size']
+ user_kwargs[rate_key] = total_rate * imix3_weight / total_weight
+ streams_arr.append(STLHltStream(**user_kwargs))
+ if kwargs['l3_imix4_ratio'] > 0:
+ if save_to_yaml and type(save_to_yaml) is str:
+ user_kwargs['save_to_yaml'] = save_to_yaml.replace('.yaml', '_imix4.yaml')
+ user_kwargs['frame_size'] = kwargs['l3_imix4_size']
+ user_kwargs[rate_key] = total_rate * imix4_weight / total_weight
+ streams_arr.append(STLHltStream(**user_kwargs))
+ return streams_arr
+
+ # packet generation
+ packet = generate_packet(**user_kwargs)
+
+ # stream generation
+ try:
+ rate_types_dict = {'rate_pps': 'pps', 'rate_bps': 'bps_L2', 'rate_percent': 'percentage'}
+ rate_stateless = {rate_types_dict[rate_key]: float(kwargs[rate_key])}
+ transmit_mode = kwargs['transmit_mode']
+ pkts_per_burst = kwargs['pkts_per_burst']
+ if transmit_mode == 'continuous':
+ transmit_mode_obj = STLTXCont(**rate_stateless)
+ elif transmit_mode == 'single_burst':
+ transmit_mode_obj = STLTXSingleBurst(total_pkts = pkts_per_burst, **rate_stateless)
+ elif transmit_mode == 'multi_burst':
+ transmit_mode_obj = STLTXMultiBurst(total_pkts = pkts_per_burst, count = int(kwargs['burst_loop_count']),
+ ibg = kwargs['inter_burst_gap'], **rate_stateless)
+ else:
+ raise STLError('transmit_mode %s not supported/implemented')
+ except Exception as e:
+ raise STLError('Could not create transmit_mode object %s: %s' % (transmit_mode, e if isinstance(e, STLError) else traceback.format_exc()))
+
+ try:
+ if kwargs['l3_protocol'] == 'ipv4' and not kwargs['disable_flow_stats']:
+ pg_id = kwargs.get('pg_id', kwargs.get('flow_stats_id'))
+ else:
+ pg_id = None
+ stream = STLStream(packet = packet,
+ random_seed = 1 if is_true(kwargs['consistent_random']) else 0,
+ #enabled = True,
+ #self_start = True,
+ flow_stats = STLFlowStats(pg_id) if pg_id else None,
+ mode = transmit_mode_obj,
+ )
+ except Exception as e:
+ raise STLError('Could not create stream: %s' % e if isinstance(e, STLError) else traceback.format_exc())
+
+ debug_filename = kwargs.get('save_to_yaml')
+ if type(debug_filename) is str:
+ print('saving to %s' % debug_filename)
+ stream.dump_to_yaml(debug_filename)
+ return stream
+
+packet_cache = LRU_cache(maxlen = 20)
+
+def generate_packet(**user_kwargs):
+ correct_macs(user_kwargs)
+ if repr(user_kwargs) in packet_cache:
+ return packet_cache[repr(user_kwargs)]
+ kwargs = merge_kwargs(traffic_config_kwargs, user_kwargs)
+ correct_sizes(kwargs) # we are producing the packet - 4 bytes fcs
+ correct_direction(kwargs, kwargs)
+
+ vm_cmds = []
+ vm_variables_cache = {} # we will keep in cache same variables (inc/dec, var size in bytes, number of steps, step)
+ fix_ipv4_checksum = False
+
+ ### L2 ###
+ if kwargs['l2_encap'] in ('ethernet_ii', 'ethernet_ii_vlan'):
+ #fields_desc = [ MACField("dst","00:00:00:01:00:00"),
+ # MACField("src","00:00:00:02:00:00"),
+ # XShortEnumField("type", 0x9000, ETHER_TYPES) ]
+ if kwargs['ignore_macs']: # workaround for lack of ARP
+ kwargs['mac_src'] = None
+ kwargs['mac_dst'] = None
+ kwargs['mac_src_mode'] = 'fixed'
+ kwargs['mac_dst_mode'] = 'fixed'
+ ethernet_kwargs = {}
+ if kwargs['mac_src']:
+ ethernet_kwargs['src'] = kwargs['mac_src']
+ if kwargs['mac_dst']:
+ ethernet_kwargs['dst'] = kwargs['mac_dst']
+ l2_layer = Ether(**ethernet_kwargs)
+
+ # Eth VM, change only 32 lsb
+ if kwargs['mac_src_mode'] != 'fixed':
+ count = int(kwargs['mac_src_count']) - 1
+ if count < 0:
+ raise STLError('mac_src_count has to be at least 1')
+ if count > 0 or kwargs['mac_src_mode'] == 'random':
+ mac_src = ipv4_str_to_num(mac2str(kwargs['mac_src'])[2:]) # take only 32 lsb
+
+ step = kwargs['mac_src_step']
+
+ if type(step) is str:
+ step = ipv4_str_to_num(mac2str(step)[2:]) # take only 32 lsb
+
+ if step < 1:
+ raise STLError('mac_src_step has to be at least 1')
+
+ if kwargs['mac_src_mode'] == 'increment':
+ add_val = mac_src - 0x7fffffff
+ var_name = '%s_%s_%s_%s' % ('inc', 4, count, step)
+ if var_name not in vm_variables_cache:
+ vm_cmds.append(STLVmFlowVar(name = var_name, size = 4, op = 'inc', step = step,
+ min_value = 0x7fffffff,
+ max_value = 0x7fffffff + count * step))
+ vm_variables_cache[var_name] = True
+ elif kwargs['mac_src_mode'] == 'decrement':
+ add_val = mac_src - 0x7fffffff
+ var_name = '%s_%s_%s_%s' % ('dec', 4, count, step)
+ if var_name not in vm_variables_cache:
+ vm_cmds.append(STLVmFlowVar(name = var_name, size = 4, op = 'dec', step = step,
+ min_value = 0x7fffffff - count * step,
+ max_value = 0x7fffffff))
+ vm_variables_cache[var_name] = True
+ elif kwargs['mac_src_mode'] == 'random':
+ add_val = 0
+ var_name = 'mac_src_random'
+ vm_cmds.append(STLVmFlowVar(name = var_name, size = 4, op = 'random', max_value = 0xffffffff))
+ else:
+ raise STLError('mac_src_mode %s is not supported' % kwargs['mac_src_mode'])
+ vm_cmds.append(STLVmWrFlowVar(fv_name = var_name, pkt_offset = 'Ethernet.src', offset_fixup = 2, add_val = add_val))
+
+ if kwargs['mac_dst_mode'] != 'fixed':
+ count = int(kwargs['mac_dst_count']) - 1
+ if count < 0:
+ raise STLError('mac_dst_count has to be at least 1')
+ if count > 0 or kwargs['mac_dst_mode'] == 'random':
+ mac_dst = ipv4_str_to_num(mac2str(kwargs['mac_dst'])[2:]) # take only 32 lsb
+ step = kwargs['mac_dst_step']
+
+ if type(step) is str:
+ step = ipv4_str_to_num(mac2str(step)[2:]) # take only 32 lsb
+
+ if step < 1:
+ raise STLError('mac_dst_step has to be at least 1')
+
+ if kwargs['mac_dst_mode'] == 'increment':
+ add_val = mac_dst - 0x7fffffff
+ var_name = '%s_%s_%s_%s' % ('inc', 4, count, step)
+ if var_name not in vm_variables_cache:
+ vm_cmds.append(STLVmFlowVar(name = var_name, size = 4, op = 'inc', step = step,
+ min_value = 0x7fffffff,
+ max_value = 0x7fffffff + count * step))
+ vm_variables_cache[var_name] = True
+ elif kwargs['mac_dst_mode'] == 'decrement':
+ add_val = mac_dst - 0x7fffffff
+ var_name = '%s_%s_%s_%s' % ('dec', 4, count, step)
+ if var_name not in vm_variables_cache:
+ vm_cmds.append(STLVmFlowVar(name = var_name, size = 4, op = 'dec', step = step,
+ min_value = 0x7fffffff - count * step,
+ max_value = 0x7fffffff))
+ vm_variables_cache[var_name] = True
+ elif kwargs['mac_dst_mode'] == 'random':
+ add_val = 0
+ var_name = 'mac_dst_random'
+ vm_cmds.append(STLVmFlowVar(name = var_name, size = 4, op = 'random', max_value = 0xffffffff))
+ else:
+ raise STLError('mac_dst_mode %s is not supported' % kwargs['mac_dst_mode'])
+ vm_cmds.append(STLVmWrFlowVar(fv_name = var_name, pkt_offset = 'Ethernet.dst', offset_fixup = 2, add_val = add_val))
+
+ if kwargs['l2_encap'] == 'ethernet_ii_vlan' or (kwargs['l2_encap'] == 'ethernet_ii' and vlan_in_args(user_kwargs)):
+ #fields_desc = [ BitField("prio", 0, 3),
+ # BitField("id", 0, 1),
+ # BitField("vlan", 1, 12),
+ # XShortEnumField("type", 0x0000, ETHER_TYPES) ]
+ for i, vlan_kwargs in enumerate(split_vlan_args(kwargs)):
+ vlan_id = int(vlan_kwargs['vlan_id'])
+ dot1q_kwargs = {'prio': int(vlan_kwargs['vlan_user_priority']),
+ 'vlan': vlan_id,
+ 'id': int(vlan_kwargs['vlan_cfi'])}
+ vlan_protocol_tag_id = vlan_kwargs['vlan_protocol_tag_id']
+ if vlan_protocol_tag_id is not None:
+ if type(vlan_protocol_tag_id) is str:
+ vlan_protocol_tag_id = int(vlan_protocol_tag_id, 16)
+ dot1q_kwargs['type'] = vlan_protocol_tag_id
+ l2_layer /= Dot1Q(**dot1q_kwargs)
+
+ # vlan VM
+ vlan_id_mode = vlan_kwargs['vlan_id_mode']
+ if vlan_id_mode != 'fixed':
+ count = int(vlan_kwargs['vlan_id_count']) - 1
+ if count < 0:
+ raise STLError('vlan_id_count has to be at least 1')
+ if count > 0 or vlan_id_mode == 'random':
+ var_name = 'vlan_id%s' % i
+ step = int(vlan_kwargs['vlan_id_step'])
+ if step < 1:
+ raise STLError('vlan_id_step has to be at least 1')
+ if vlan_id_mode == 'increment':
+ add_val = vlan_id - 0x7fff
+ var_name = '%s_%s_%s_%s' % ('dec', 2, count, step)
+ if var_name not in vm_variables_cache:
+ vm_cmds.append(STLVmFlowVar(name = var_name, size = 2, op = 'inc', step = step,
+ min_value = 0x7fff,
+ max_value = 0x7fff + count * step))
+ vm_variables_cache[var_name] = True
+ elif vlan_id_mode == 'decrement':
+ add_val = vlan_id - 0x7fff
+ var_name = '%s_%s_%s_%s' % ('dec', 2, count, step)
+ if var_name not in vm_variables_cache:
+ vm_cmds.append(STLVmFlowVar(name = var_name, size = 2, op = 'dec', step = step,
+ min_value = 0x7fff - count * step,
+ max_value = 0x7fff))
+ vm_variables_cache[var_name] = True
+ elif vlan_id_mode == 'random':
+ add_val = 0
+ var_name = 'vlan_id_random'
+ vm_cmds.append(STLVmFlowVar(name = var_name, size = 2, op = 'random', max_value = 0xffff))
+ else:
+ raise STLError('vlan_id_mode %s is not supported' % vlan_id_mode)
+ vm_cmds.append(STLVmWrMaskFlowVar(fv_name = var_name, pkt_offset = '802|1Q:%s.vlan' % i,
+ pkt_cast_size = 2, mask = 0xfff, add_value = add_val))
+ else:
+ raise NotImplementedError("l2_encap does not support the desired encapsulation '%s'" % kwargs['l2_encap'])
+ base_pkt = l2_layer
+
+ ### L3 ###
+ if kwargs['l3_protocol'] is None:
+ l3_layer = None
+ elif kwargs['l3_protocol'] == 'ipv4':
+ #fields_desc = [ BitField("version" , 4 , 4),
+ # BitField("ihl", None, 4),
+ # XByteField("tos", 0),
+ # ShortField("len", None),
+ # ShortField("id", 1),
+ # FlagsField("flags", 0, 3, ["MF","DF","evil"]),
+ # BitField("frag", 0, 13),
+ # ByteField("ttl", 64),
+ # ByteEnumField("proto", 0, IP_PROTOS),
+ # XShortField("chksum", None),
+ # Emph(IPField("src", "16.0.0.1")),
+ # Emph(IPField("dst", "48.0.0.1")),
+ # PacketListField("options", [], IPOption, length_from=lambda p:p.ihl*4-20) ]
+ ip_tos = get_TOS(user_kwargs, kwargs)
+ if ip_tos < 0 or ip_tos > 255:
+ raise STLError('TOS %s is not in range 0-255' % ip_tos)
+ l3_layer = IP(tos = ip_tos,
+ #len = kwargs['l3_length'], don't let user create corrupt packets
+ id = kwargs['ip_id'],
+ frag = kwargs['ip_fragment_offset'],
+ ttl = kwargs['ip_ttl'],
+ chksum = kwargs['ip_checksum'],
+ src = kwargs['ip_src_addr'],
+ dst = kwargs['ip_dst_addr'],
+ )
+ # IPv4 VM
+ if kwargs['ip_src_mode'] != 'fixed':
+ count = int(kwargs['ip_src_count']) - 1
+ if count < 0:
+ raise STLError('ip_src_count has to be at least 1')
+ if count > 0 or kwargs['ip_src_mode'] == 'random':
+ fix_ipv4_checksum = True
+ ip_src_addr = kwargs['ip_src_addr']
+ if type(ip_src_addr) is str:
+ ip_src_addr = ipv4_str_to_num(is_valid_ipv4(ip_src_addr))
+ step = kwargs['ip_src_step']
+ if type(step) is str:
+ step = ipv4_str_to_num(is_valid_ipv4(step))
+
+ if step < 1:
+ raise STLError('ip_src_step has to be at least 1')
+
+ if kwargs['ip_src_mode'] == 'increment':
+ add_val = ip_src_addr - 0x7fffffff
+ var_name = '%s_%s_%s_%s' % ('inc', 4, count, step)
+ if var_name not in vm_variables_cache:
+ vm_cmds.append(STLVmFlowVar(name = var_name, size = 4, op = 'inc', step = step,
+ min_value = 0x7fffffff,
+ max_value = 0x7fffffff + count * step))
+ vm_variables_cache[var_name] = True
+ elif kwargs['ip_src_mode'] == 'decrement':
+ add_val = ip_src_addr - 0x7fffffff
+ var_name = '%s_%s_%s_%s' % ('dec', 4, count, step)
+ if var_name not in vm_variables_cache:
+ vm_cmds.append(STLVmFlowVar(name = var_name, size = 4, op = 'dec', step = step,
+ min_value = 0x7fffffff - count * step,
+ max_value = 0x7fffffff))
+ vm_variables_cache[var_name] = True
+ elif kwargs['ip_src_mode'] == 'random':
+ add_val = 0
+ var_name = 'ip_src_random'
+ vm_cmds.append(STLVmFlowVar(name = var_name, size = 4, op = 'random', max_value = 0xffffffff))
+ else:
+ raise STLError('ip_src_mode %s is not supported' % kwargs['ip_src_mode'])
+ vm_cmds.append(STLVmWrFlowVar(fv_name = var_name, pkt_offset = 'IP.src', add_val = add_val))
+
+ if kwargs['ip_dst_mode'] != 'fixed':
+ count = int(kwargs['ip_dst_count']) - 1
+ if count < 0:
+ raise STLError('ip_dst_count has to be at least 1')
+ if count > 0 or kwargs['ip_dst_mode'] == 'random':
+ fix_ipv4_checksum = True
+ ip_dst_addr = kwargs['ip_dst_addr']
+ if type(ip_dst_addr) is str:
+ ip_dst_addr = ipv4_str_to_num(is_valid_ipv4(ip_dst_addr))
+ step = kwargs['ip_dst_step']
+
+ if type(step) is str:
+ step = ipv4_str_to_num(is_valid_ipv4(step))
+
+ if step < 1:
+ raise STLError('ip_dst_step has to be at least 1')
+
+ if kwargs['ip_dst_mode'] == 'increment':
+ add_val = ip_dst_addr - 0x7fffffff
+ var_name = '%s_%s_%s_%s' % ('inc', 4, count, step)
+ if var_name not in vm_variables_cache:
+ vm_cmds.append(STLVmFlowVar(name = var_name, size = 4, op = 'inc', step = step,
+ min_value = 0x7fffffff,
+ max_value = 0x7fffffff + count * step))
+ vm_variables_cache[var_name] = True
+ elif kwargs['ip_dst_mode'] == 'decrement':
+ add_val = ip_dst_addr - 0x7fffffff
+ var_name = '%s_%s_%s_%s' % ('dec', 4, count, step)
+ if var_name not in vm_variables_cache:
+ vm_cmds.append(STLVmFlowVar(name = var_name, size = 4, op = 'dec', step = step,
+ min_value = 0x7fffffff - count * step,
+ max_value = 0x7fffffff))
+ vm_variables_cache[var_name] = True
+ elif kwargs['ip_dst_mode'] == 'random':
+ add_val = 0
+ var_name = 'ip_dst_random'
+ vm_cmds.append(STLVmFlowVar(name = var_name, size = 4, op = 'random', max_value = 0xffffffff))
+ else:
+ raise STLError('ip_dst_mode %s is not supported' % kwargs['ip_dst_mode'])
+ vm_cmds.append(STLVmWrFlowVar(fv_name = var_name, pkt_offset = 'IP.dst', add_val = add_val))
+
+ elif kwargs['l3_protocol'] == 'ipv6':
+ #fields_desc = [ BitField("version" , 6 , 4),
+ # BitField("tc", 0, 8), #TODO: IPv6, ByteField ?
+ # BitField("fl", 0, 20),
+ # ShortField("plen", None),
+ # ByteEnumField("nh", 59, ipv6nh),
+ # ByteField("hlim", 64),
+ # IP6Field("dst", "::2"),
+ # #SourceIP6Field("src", "dst"), # dst is for src @ selection
+ # IP6Field("src", "::1") ]
+ ipv6_kwargs = {'tc': kwargs['ipv6_traffic_class'],
+ 'fl': kwargs['ipv6_flow_label'],
+ 'plen': kwargs['ipv6_length'],
+ 'hlim': kwargs['ipv6_hop_limit'],
+ 'src': kwargs['ipv6_src_addr'],
+ 'dst': kwargs['ipv6_dst_addr']}
+ if kwargs['ipv6_next_header'] is not None:
+ ipv6_kwargs['nh'] = kwargs['ipv6_next_header']
+ l3_layer = IPv6(**ipv6_kwargs)
+
+ # IPv6 VM, change only 32 lsb
+ if kwargs['ipv6_src_mode'] != 'fixed':
+ count = int(kwargs['ipv6_src_count']) - 1
+ if count < 0:
+ raise STLError('ipv6_src_count has to be at least 1')
+ if count > 0 or kwargs['ipv6_src_mode'] == 'random':
+ ipv6_src_addr_num = ipv4_str_to_num(is_valid_ipv6(kwargs['ipv6_src_addr'])[-4:])
+ step = kwargs['ipv6_src_step']
+
+ if type(step) is str: # convert ipv6 step to number
+ step = ipv4_str_to_num(is_valid_ipv6(step)[-4:])
+
+ if step < 1:
+ raise STLError('ipv6_src_step has to be at least 1')
+
+ if kwargs['ipv6_src_mode'] == 'increment':
+ add_val = ipv6_src_addr_num - 0x7fffffff
+ var_name = '%s_%s_%s_%s' % ('inc', 4, count, step)
+ if var_name not in vm_variables_cache:
+ vm_cmds.append(STLVmFlowVar(name = var_name, size = 4, op = 'inc', step = step,
+ min_value = 0x7fffffff,
+ max_value = 0x7fffffff + count * step))
+ vm_variables_cache[var_name] = True
+ elif kwargs['ipv6_src_mode'] == 'decrement':
+ add_val = ipv6_src_addr_num - 0x7fffffff
+ var_name = '%s_%s_%s_%s' % ('dec', 4, count, step)
+ if var_name not in vm_variables_cache:
+ vm_cmds.append(STLVmFlowVar(name = var_name, size = 4, op = 'dec', step = step,
+ min_value = 0x7fffffff - count * step,
+ max_value = 0x7fffffff))
+ vm_variables_cache[var_name] = True
+ elif kwargs['ipv6_src_mode'] == 'random':
+ add_val = 0
+ var_name = 'ipv6_src_random'
+ vm_cmds.append(STLVmFlowVar(name = var_name, size = 4, op = 'random', max_value = 0xffffffff))
+ else:
+ raise STLError('ipv6_src_mode %s is not supported' % kwargs['ipv6_src_mode'])
+ vm_cmds.append(STLVmWrFlowVar(fv_name = var_name, pkt_offset = 'IPv6.src', offset_fixup = 12, add_val = add_val))
+
+ if kwargs['ipv6_dst_mode'] != 'fixed':
+ count = int(kwargs['ipv6_dst_count']) - 1
+ if count < 0:
+ raise STLError('ipv6_dst_count has to be at least 1')
+ if count > 0 or kwargs['ipv6_dst_mode'] == 'random':
+ ipv6_dst_addr_num = ipv4_str_to_num(is_valid_ipv6(kwargs['ipv6_dst_addr'])[-4:])
+ step = kwargs['ipv6_dst_step']
+
+ if type(step) is str: # convert ipv6 step to number
+ step = ipv4_str_to_num(is_valid_ipv6(step)[-4:])
+
+ if step < 1:
+ raise STLError('ipv6_dst_step has to be at least 1')
+
+ if kwargs['ipv6_dst_mode'] == 'increment':
+ add_val = ipv6_dst_addr_num - 0x7fffffff
+ var_name = '%s_%s_%s_%s' % ('inc', 4, count, step)
+ if var_name not in vm_variables_cache:
+ vm_cmds.append(STLVmFlowVar(name = var_name, size = 4, op = 'inc', step = step,
+ min_value = 0x7fffffff,
+ max_value = 0x7fffffff + count * step))
+ vm_variables_cache[var_name] = True
+ elif kwargs['ipv6_dst_mode'] == 'decrement':
+ add_val = ipv6_dst_addr_num - 0x7fffffff
+ var_name = '%s_%s_%s_%s' % ('dec', 4, count, step)
+ if var_name not in vm_variables_cache:
+ vm_cmds.append(STLVmFlowVar(name = var_name, size = 4, op = 'dec', step = step,
+ min_value = 0x7fffffff - count * step,
+ max_value = 0x7fffffff))
+ vm_variables_cache[var_name] = True
+ elif kwargs['ipv6_dst_mode'] == 'random':
+ add_val = 0
+ var_name = 'ipv6_dst_random'
+ vm_cmds.append(STLVmFlowVar(name = var_name, size = 4, op = 'random', max_value = 0xffffffff))
+ else:
+ raise STLError('ipv6_dst_mode %s is not supported' % kwargs['ipv6_dst_mode'])
+ vm_cmds.append(STLVmWrFlowVar(fv_name = var_name, pkt_offset = 'IPv6.dst', offset_fixup = 12, add_val = add_val))
+
+ elif kwargs['l3_protocol'] is not None:
+ raise NotImplementedError("l3_protocol '%s' is not supported by TRex yet." % kwargs['l3_protocol'])
+ if l3_layer is not None:
+ base_pkt /= l3_layer
+
+ ### L4 ###
+ l4_layer = None
+ if kwargs['l4_protocol'] == 'tcp':
+ assert kwargs['l3_protocol'] in ('ipv4', 'ipv6'), 'TCP must be over ipv4/ipv6'
+ #fields_desc = [ ShortEnumField("sport", 20, TCP_SERVICES),
+ # ShortEnumField("dport", 80, TCP_SERVICES),
+ # IntField("seq", 0),
+ # IntField("ack", 0),
+ # BitField("dataofs", None, 4),
+ # BitField("reserved", 0, 4),
+ # FlagsField("flags", 0x2, 8, "FSRPAUEC"),
+ # ShortField("window", 8192),
+ # XShortField("chksum", None),
+ # ShortField("urgptr", 0),
+ # TCPOptionsField("options", {}) ]
+
+ tcp_flags = ('F' if kwargs['tcp_fin_flag'] else '' +
+ 'S' if kwargs['tcp_syn_flag'] else '' +
+ 'R' if kwargs['tcp_rst_flag'] else '' +
+ 'P' if kwargs['tcp_psh_flag'] else '' +
+ 'A' if kwargs['tcp_ack_flag'] else '' +
+ 'U' if kwargs['tcp_urg_flag'] else '')
+
+ l4_layer = TCP(sport = kwargs['tcp_src_port'],
+ dport = kwargs['tcp_dst_port'],
+ seq = kwargs['tcp_seq_num'],
+ ack = kwargs['tcp_ack_num'],
+ dataofs = kwargs['tcp_data_offset'],
+ flags = tcp_flags,
+ window = kwargs['tcp_window'],
+ chksum = kwargs['tcp_checksum'],
+ urgptr = kwargs['tcp_urgent_ptr'],
+ )
+ # TCP VM
+ if kwargs['tcp_src_port_mode'] != 'fixed':
+ count = int(kwargs['tcp_src_port_count']) - 1
+ if count < 0:
+ raise STLError('tcp_src_port_count has to be at least 1')
+ if count > 0 or kwargs['tcp_src_port_mode'] == 'random':
+ fix_ipv4_checksum = True
+ step = kwargs['tcp_src_port_step']
+ if step < 1:
+ raise STLError('tcp_src_port_step has to be at least 1')
+ if kwargs['tcp_src_port_mode'] == 'increment':
+ add_val = kwargs['tcp_src_port'] - 0x7fff
+ var_name = '%s_%s_%s_%s' % ('inc', 2, count, step)
+ if var_name not in vm_variables_cache:
+ vm_cmds.append(STLVmFlowVar(name = var_name, size = 2, op = 'inc', step = step,
+ min_value = 0x7fff,
+ max_value = 0x7fff + count * step))
+ vm_variables_cache[var_name] = True
+ elif kwargs['tcp_src_port_mode'] == 'decrement':
+ add_val = kwargs['tcp_src_port'] - 0x7fff
+ var_name = '%s_%s_%s_%s' % ('dec', 2, count, step)
+ if var_name not in vm_variables_cache:
+ vm_cmds.append(STLVmFlowVar(name = var_name, size = 2, op = 'dec', step = step,
+ min_value = 0x7fff - count * step,
+ max_value = 0x7fff))
+ vm_variables_cache[var_name] = True
+ elif kwargs['tcp_src_port_mode'] == 'random':
+ add_val = 0
+ var_name = 'tcp_src_random'
+ vm_cmds.append(STLVmFlowVar(name = var_name, size = 2, op = 'random', max_value = 0xffff))
+ else:
+ raise STLError('tcp_src_port_mode %s is not supported' % kwargs['tcp_src_port_mode'])
+ vm_cmds.append(STLVmWrFlowVar(fv_name = var_name, pkt_offset = 'TCP.sport', add_val = add_val))
+
+ if kwargs['tcp_dst_port_mode'] != 'fixed':
+ count = int(kwargs['tcp_dst_port_count']) - 1
+ if count < 0:
+ raise STLError('tcp_dst_port_count has to be at least 1')
+ if count > 0 or kwargs['tcp_dst_port_mode'] == 'random':
+ fix_ipv4_checksum = True
+ step = kwargs['tcp_dst_port_step']
+ if step < 1:
+ raise STLError('tcp_dst_port_step has to be at least 1')
+ if kwargs['tcp_dst_port_mode'] == 'increment':
+ add_val = kwargs['tcp_dst_port'] - 0x7fff
+ var_name = '%s_%s_%s_%s' % ('inc', 2, count, step)
+ if var_name not in vm_variables_cache:
+ vm_cmds.append(STLVmFlowVar(name = var_name, size = 2, op = 'inc', step = step,
+ min_value = 0x7fff,
+ max_value = 0x7fff + count * step))
+ vm_variables_cache[var_name] = True
+ elif kwargs['tcp_dst_port_mode'] == 'decrement':
+ add_val = kwargs['tcp_dst_port'] - 0x7fff
+ var_name = '%s_%s_%s_%s' % ('dec', 2, count, step)
+ if var_name not in vm_variables_cache:
+ vm_cmds.append(STLVmFlowVar(name = var_name, size = 2, op = 'dec', step = step,
+ min_value = 0x7fff - count * step,
+ max_value = 0x7fff))
+ vm_variables_cache[var_name] = True
+ elif kwargs['tcp_dst_port_mode'] == 'random':
+ add_val = 0
+ var_name = 'tcp_dst_random'
+ vm_cmds.append(STLVmFlowVar(name = var_name, size = 2, op = 'random', max_value = 0xffff))
+ else:
+ raise STLError('tcp_dst_port_mode %s is not supported' % kwargs['tcp_dst_port_mode'])
+ vm_cmds.append(STLVmWrFlowVar(fv_name = var_name, pkt_offset = 'TCP.dport', add_val = add_val))
+
+ elif kwargs['l4_protocol'] == 'udp':
+ assert kwargs['l3_protocol'] in ('ipv4', 'ipv6'), 'UDP must be over ipv4/ipv6'
+ #fields_desc = [ ShortEnumField("sport", 53, UDP_SERVICES),
+ # ShortEnumField("dport", 53, UDP_SERVICES),
+ # ShortField("len", None),
+ # XShortField("chksum", None), ]
+ l4_layer = UDP(sport = kwargs['udp_src_port'],
+ dport = kwargs['udp_dst_port'],
+ len = kwargs['udp_length'], chksum = None)
+ # UDP VM
+ if kwargs['udp_src_port_mode'] != 'fixed':
+ count = int(kwargs['udp_src_port_count']) - 1
+ if count < 0:
+ raise STLError('udp_src_port_count has to be at least 1')
+ if count > 0 or kwargs['udp_src_port_mode'] == 'random':
+ fix_ipv4_checksum = True
+ step = kwargs['udp_src_port_step']
+ if step < 1:
+ raise STLError('udp_src_port_step has to be at least 1')
+ if kwargs['udp_src_port_mode'] == 'increment':
+ add_val = kwargs['udp_src_port'] - 0x7fff
+ var_name = '%s_%s_%s_%s' % ('inc', 2, count, step)
+ if var_name not in vm_variables_cache:
+ vm_cmds.append(STLVmFlowVar(name = var_name, size = 2, op = 'inc', step = step,
+ min_value = 0x7fff,
+ max_value = 0x7fff + count * step))
+ vm_variables_cache[var_name] = True
+ elif kwargs['udp_src_port_mode'] == 'decrement':
+ add_val = kwargs['udp_src_port'] - 0x7fff
+ var_name = '%s_%s_%s_%s' % ('dec', 2, count, step)
+ if var_name not in vm_variables_cache:
+ vm_cmds.append(STLVmFlowVar(name = var_name, size = 2, op = 'dec', step = step,
+ min_value = 0x7fff - count * step,
+ max_value = 0x7fff))
+ vm_variables_cache[var_name] = True
+ elif kwargs['udp_src_port_mode'] == 'random':
+ add_val = 0
+ var_name = 'udp_src_random'
+ vm_cmds.append(STLVmFlowVar(name = var_name, size = 2, op = 'random', max_value = 0xffff))
+ else:
+ raise STLError('udp_src_port_mode %s is not supported' % kwargs['udp_src_port_mode'])
+ vm_cmds.append(STLVmWrFlowVar(fv_name = var_name, pkt_offset = 'UDP.sport', add_val = add_val))
+
+ if kwargs['udp_dst_port_mode'] != 'fixed':
+ count = int(kwargs['udp_dst_port_count']) - 1
+ if count < 0:
+ raise STLError('udp_dst_port_count has to be at least 1')
+ if count > 0 or kwargs['udp_dst_port_mode'] == 'random':
+ fix_ipv4_checksum = True
+ step = kwargs['udp_dst_port_step']
+ if step < 1:
+ raise STLError('udp_dst_port_step has to be at least 1')
+ if kwargs['udp_dst_port_mode'] == 'increment':
+ add_val = kwargs['udp_dst_port'] - 0x7fff
+ var_name = '%s_%s_%s_%s' % ('inc', 2, count, step)
+ if var_name not in vm_variables_cache:
+ vm_cmds.append(STLVmFlowVar(name = var_name, size = 2, op = 'inc', step = step,
+ min_value = 0x7fff,
+ max_value = 0x7fff + count * step))
+ elif kwargs['udp_dst_port_mode'] == 'decrement':
+ add_val = kwargs['udp_dst_port'] - 0x7fff
+ var_name = '%s_%s_%s_%s' % ('dec', 2, count, step)
+ if var_name not in vm_variables_cache:
+ vm_cmds.append(STLVmFlowVar(name = var_name, size = 2, op = 'dec', step = step,
+ min_value = 0x7fff - count * step,
+ max_value = 0x7fff))
+ elif kwargs['udp_dst_port_mode'] == 'random':
+ add_val = 0
+ var_name = 'udp_dst_random'
+ vm_cmds.append(STLVmFlowVar(name = var_name, size = 2, op = 'random', max_value = 0xffff))
+ else:
+ raise STLError('udp_dst_port_mode %s is not supported' % kwargs['udp_dst_port_mode'])
+ vm_cmds.append(STLVmWrFlowVar(fv_name = var_name, pkt_offset = 'UDP.dport', add_val = add_val))
+ elif kwargs['l4_protocol'] is not None:
+ raise NotImplementedError("l4_protocol '%s' is not supported by TRex yet." % kwargs['l4_protocol'])
+ if l4_layer is not None:
+ base_pkt /= l4_layer
+
+ trim_dict = {'increment': 'inc', 'decrement': 'dec', 'random': 'random'}
+ length_mode = kwargs['length_mode']
+ if length_mode == 'auto':
+ payload_len = 0
+ elif length_mode == 'fixed':
+ if 'frame_size' in user_kwargs: # L2 has higher priority over L3
+ payload_len = kwargs['frame_size'] - len(base_pkt)
+ elif 'l3_length' in user_kwargs:
+ payload_len = kwargs['l3_length'] - (len(base_pkt) - len(l2_layer))
+ else: # default
+ payload_len = kwargs['frame_size'] - len(base_pkt)
+ elif length_mode == 'imix':
+ raise STLError("length_mode 'imix' should be treated at stream creating level.")
+ elif length_mode in trim_dict:
+ if 'frame_size_min' in user_kwargs or 'frame_size_max' in user_kwargs: # size is determined by L2, higher priority over L3 size
+ if kwargs['frame_size_min'] < 44 or kwargs['frame_size_max'] < 44:
+ raise STLError('frame_size_min and frame_size_max should be at least 44')
+ if kwargs['frame_size_min'] > kwargs['frame_size_max']:
+ raise STLError('frame_size_min is bigger than frame_size_max')
+ if kwargs['frame_size_min'] != kwargs['frame_size_max']:
+ fix_ipv4_checksum = True
+ vm_cmds.append(STLVmFlowVar(name = 'pkt_len', size = 2, op = trim_dict[length_mode], step = kwargs['frame_size_step'],
+ min_value = kwargs['frame_size_min'],
+ max_value = kwargs['frame_size_max']))
+ vm_cmds.append(STLVmTrimPktSize('pkt_len'))
+ payload_len = kwargs['frame_size_max'] - len(base_pkt)
+ else: # size is determined by L3
+ if kwargs['l3_length_min'] < 40 or kwargs['l3_length_max'] < 40:
+ raise STLError('l3_length_min and l3_length_max should be at least 40')
+ if kwargs['l3_length_min'] > kwargs['l3_length_max']:
+ raise STLError('l3_length_min is bigger than l3_length_max')
+ if kwargs['l3_length_min'] != kwargs['l3_length_max']:
+ fix_ipv4_checksum = True
+ vm_cmds.append(STLVmFlowVar(name = 'pkt_len', size = 2, op = trim_dict[length_mode], step = kwargs['l3_length_step'],
+ min_value = kwargs['l3_length_min'] + len(l2_layer),
+ max_value = kwargs['l3_length_max'] + len(l2_layer)))
+ payload_len = kwargs['l3_length_max'] + len(l2_layer) - len(base_pkt)
+ vm_cmds.append(STLVmTrimPktSize('pkt_len'))
+
+ if (l3_layer and l3_layer.name == 'IP'):
+ vm_cmds.append(STLVmWrFlowVar(fv_name = 'pkt_len', pkt_offset = 'IP.len', add_val = -len(l2_layer)))
+ if (l4_layer and l4_layer.name == 'UDP'):
+ vm_cmds.append(STLVmWrFlowVar(fv_name = 'pkt_len', pkt_offset = 'UDP.len', add_val = -len(l2_layer) - len(l3_layer)))
+ else:
+ raise STLError('length_mode should be one of the following: %s' % ['auto', 'fixed'] + trim_dict.keys())
+
+ if payload_len < 0:
+ raise STLError('Packet length is bigger than defined by frame_size* or l3_length*. We got payload size %s' % payload_len)
+ base_pkt /= '!' * payload_len
+
+ pkt = STLPktBuilder()
+ pkt.set_packet(base_pkt)
+ if fix_ipv4_checksum and l3_layer.name == 'IP' and kwargs['ip_checksum'] is None:
+ vm_cmds.append(STLVmFixIpv4(offset = 'IP'))
+ if vm_cmds:
+ split_by_field = None
+ if kwargs['split_by_cores'] == 'split':
+ max_length = 0
+ for cmd in vm_cmds:
+ if isinstance(cmd, STLVmFlowVar):
+ if cmd.op not in ('inc', 'dec'):
+ continue
+ length = float(cmd.max_value - cmd.min_value) / cmd.step
+ if cmd.name == 'ip_src' and length > 7: # priority is to split by ip_src
+ split_by_field = 'ip_src'
+ break
+ if length > max_length:
+ max_length = length
+ split_by_field = cmd.name
+ elif kwargs['split_by_cores'] == 'single':
+ raise STLError("split_by_cores 'single' not implemented yet")
+ elif kwargs['split_by_cores'] != 'duplicate':
+ raise STLError("split_by_cores '%s' is not supported" % kwargs['split_by_cores'])
+ pkt.add_command(STLScVmRaw(vm_cmds, split_by_field))
+
+ # debug (only the base packet, without VM)
+ debug_filename = kwargs.get('save_to_pcap')
+ if type(debug_filename) is str:
+ pkt.dump_pkt_to_pcap(debug_filename)
+ packet_cache[repr(user_kwargs)] = pkt
+ return pkt
+
+def get_TOS(user_kwargs, kwargs):
+ TOS0 = set(['ip_precedence', 'ip_tos_field', 'ip_mbz'])
+ TOS1 = set(['ip_precedence', 'ip_delay', 'ip_throughput', 'ip_reliability', 'ip_cost', 'ip_reserved'])
+ TOS2 = set(['ip_dscp', 'ip_cu'])
+ user_args = set(user_kwargs.keys())
+ if user_args & (TOS1 - TOS0) and user_args & (TOS0 - TOS1):
+ raise STLError('You have mixed %s and %s TOS parameters' % (TOS0, TOS1))
+ if user_args & (TOS2 - TOS0) and user_args & (TOS0 - TOS2):
+ raise STLError('You have mixed %s and %s TOS parameters' % (TOS0, TOS2))
+ if user_args & (TOS2 - TOS1) and user_args & (TOS1 - TOS2):
+ raise STLError('You have mixed %s and %s TOS parameters' % (TOS1, TOS2))
+ if user_args & (TOS0 - TOS1 - TOS2):
+ return (kwargs['ip_precedence'] << 5) + (kwargs['ip_tos_field'] << 2) + kwargs['ip_mbz']
+ if user_args & (TOS1 - TOS2):
+ return (kwargs['ip_precedence'] << 5) + (kwargs['ip_delay'] << 4) + (kwargs['ip_throughput'] << 3) + (kwargs['ip_reliability'] << 2) + (kwargs['ip_cost'] << 1) + kwargs['ip_reserved']
+ return (kwargs['ip_dscp'] << 2) + kwargs['ip_cu']
+
+def vlan_in_args(user_kwargs):
+ for arg in user_kwargs:
+ if arg.startswith('vlan_'):
+ return True
+ return False
+
+def split_vlan_arg(vlan_arg):
+ if type(vlan_arg) is list:
+ return vlan_arg
+ if is_integer(vlan_arg) or vlan_arg is None:
+ return [vlan_arg]
+ if type(vlan_arg) is str:
+ return vlan_arg.replace('{', '').replace('}', '').strip().split()
+ raise STLError('vlan argument invalid (expecting list, int, long, str, None): %s' % vlan_arg)
+
+def split_vlan_args(kwargs):
+ vlan_args_dict = {}
+ for arg, value in kwargs.items():
+ if arg.startswith('vlan_'):
+ vlan_args_dict[arg] = split_vlan_arg(value)
+ dot1q_headers_count = max([len(x) for x in vlan_args_dict.values()])
+ vlan_args_per_header = [{} for _ in range(dot1q_headers_count)]
+ for arg, value in vlan_args_dict.items():
+ for i in range(dot1q_headers_count):
+ if len(value) > i:
+ vlan_args_per_header[i][arg] = value[i]
+ else:
+ vlan_args_per_header[i][arg] = traffic_config_kwargs[arg]
+ return vlan_args_per_header
+
+def correct_direction(user_kwargs, kwargs):
+ if kwargs['direction'] == 0:
+ return
+ user_kwargs['mac_src'] = kwargs['mac_src2']
+ user_kwargs['mac_dst'] = kwargs['mac_dst2']
+ if kwargs['l3_protocol'] == 'ipv4':
+ for arg in kwargs.keys():
+ if 'ip_src_' in arg:
+ dst_arg = 'ip_dst_' + arg[7:]
+ user_kwargs[arg], user_kwargs[dst_arg] = kwargs[dst_arg], kwargs[arg]
+ elif kwargs['l3_protocol'] == 'ipv6':
+ for arg in kwargs.keys():
+ if 'ipv6_src_' in arg:
+ dst_arg = 'ipv6_dst_' + arg[9:]
+ user_kwargs[arg], user_kwargs[dst_arg] = kwargs[dst_arg], kwargs[arg]
+
+# we produce packets without fcs, so need to reduce produced sizes
+def correct_sizes(kwargs):
+ for arg, value in kwargs.items():
+ if is_integer(value):
+ if arg.endswith(('_length', '_size', '_size_min', '_size_max', '_length_min', '_length_max')):
+ kwargs[arg] -= 4
diff --git a/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_jsonrpc_client.py b/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_jsonrpc_client.py
new file mode 100644
index 00000000..1461fcec
--- /dev/null
+++ b/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_jsonrpc_client.py
@@ -0,0 +1,284 @@
+#!/router/bin/python
+
+import zmq
+import json
+import re
+from collections import namedtuple
+import zlib
+import struct
+
+from .trex_stl_types import *
+from .utils.common import random_id_gen
+from .utils.zipmsg import ZippedMsg
+
+class bcolors:
+ BLUE = '\033[94m'
+ GREEN = '\033[32m'
+ YELLOW = '\033[93m'
+ RED = '\033[31m'
+ MAGENTA = '\033[35m'
+ ENDC = '\033[0m'
+ BOLD = '\033[1m'
+ UNDERLINE = '\033[4m'
+
+# sub class to describe a batch
+class BatchMessage(object):
+ def __init__ (self, rpc_client):
+ self.rpc_client = rpc_client
+ self.batch_list = []
+
+ def add (self, method_name, params = None, api_class = 'core'):
+
+ id, msg = self.rpc_client.create_jsonrpc_v2(method_name, params, api_class, encode = False)
+ self.batch_list.append(msg)
+
+ def invoke(self, block = False):
+ if not self.rpc_client.connected:
+ return RC_ERR("Not connected to server")
+
+ msg = json.dumps(self.batch_list)
+
+ return self.rpc_client.send_msg(msg)
+
+
+# JSON RPC v2.0 client
+class JsonRpcClient(object):
+
+ def __init__ (self, default_server, default_port, client):
+ self.client_api = client.api_h
+ self.logger = client.logger
+ self.connected = False
+
+ # default values
+ self.port = default_port
+ self.server = default_server
+
+ self.id_gen = random_id_gen()
+ self.zipper = ZippedMsg()
+
+ def get_connection_details (self):
+ rc = {}
+ rc['server'] = self.server
+ rc['port'] = self.port
+
+ return rc
+
+ # pretty print for JSON
+ def pretty_json (self, json_str, use_colors = True):
+ pretty_str = json.dumps(json.loads(json_str), indent = 4, separators=(',', ': '), sort_keys = True)
+
+ if not use_colors:
+ return pretty_str
+
+ try:
+ # int numbers
+ pretty_str = re.sub(r'([ ]*:[ ]+)(\-?[1-9][0-9]*[^.])',r'\1{0}\2{1}'.format(bcolors.BLUE, bcolors.ENDC), pretty_str)
+ # float
+ pretty_str = re.sub(r'([ ]*:[ ]+)(\-?[1-9][0-9]*\.[0-9]+)',r'\1{0}\2{1}'.format(bcolors.MAGENTA, bcolors.ENDC), pretty_str)
+ # strings
+
+ pretty_str = re.sub(r'([ ]*:[ ]+)("[^"]*")',r'\1{0}\2{1}'.format(bcolors.RED, bcolors.ENDC), pretty_str)
+ pretty_str = re.sub(r"('[^']*')", r'{0}\1{1}'.format(bcolors.MAGENTA, bcolors.RED), pretty_str)
+ except :
+ pass
+
+ return pretty_str
+
+ def verbose_msg (self, msg):
+ self.logger.log("\n\n[verbose] " + msg, level = self.logger.VERBOSE_HIGH)
+
+
+ # batch messages
+ def create_batch (self):
+ return BatchMessage(self)
+
+ def create_jsonrpc_v2 (self, method_name, params = None, api_class = 'core', encode = True):
+ msg = {}
+ msg["jsonrpc"] = "2.0"
+ msg["method"] = method_name
+ msg["id"] = next(self.id_gen)
+
+ msg["params"] = params if params is not None else {}
+
+ # if this RPC has an API class - add it's handler
+ if api_class:
+ msg["params"]["api_h"] = self.client_api[api_class]
+
+
+ if encode:
+ return id, json.dumps(msg)
+ else:
+ return id, msg
+
+
+ def invoke_rpc_method (self, method_name, params = None, api_class = 'core'):
+ if not self.connected:
+ return RC_ERR("Not connected to server")
+
+ id, msg = self.create_jsonrpc_v2(method_name, params, api_class)
+
+ return self.send_msg(msg)
+
+
+ def send_msg (self, msg):
+ # print before
+ if self.logger.check_verbose(self.logger.VERBOSE_HIGH):
+ self.verbose_msg("Sending Request To Server:\n\n" + self.pretty_json(msg) + "\n")
+
+ # encode string to buffer
+ buffer = msg.encode()
+
+ if self.zipper.check_threshold(buffer):
+ response = self.send_raw_msg(self.zipper.compress(buffer))
+ if response:
+ response = self.zipper.decompress(response)
+ else:
+ response = self.send_raw_msg(buffer)
+
+ if not response:
+ return response
+
+ # return to string
+ response = response.decode()
+
+ # print after
+ if self.logger.check_verbose(self.logger.VERBOSE_HIGH):
+ self.verbose_msg("Server Response:\n\n" + self.pretty_json(response) + "\n")
+
+ # process response (batch and regular)
+ try:
+ response_json = json.loads(response)
+ except (TypeError, ValueError):
+ return RC_ERR("*** [RPC] - Failed to decode response from server")
+
+ if isinstance(response_json, list):
+ return self.process_batch_response(response_json)
+ else:
+ return self.process_single_response(response_json)
+
+
+
+ # low level send of string message
+ def send_raw_msg (self, msg):
+
+ tries = 0
+ while True:
+ try:
+ self.socket.send(msg)
+ break
+ except zmq.Again:
+ tries += 1
+ if tries > 5:
+ self.disconnect()
+ return RC_ERR("*** [RPC] - Failed to send message to server")
+
+
+ tries = 0
+ while True:
+ try:
+ response = self.socket.recv()
+ break
+ except zmq.Again:
+ tries += 1
+ if tries > 5:
+ self.disconnect()
+ return RC_ERR("*** [RPC] - Failed to get server response from {0}".format(self.transport))
+
+
+ return response
+
+
+
+ # processs a single response from server
+ def process_single_response (self, response_json):
+
+ if (response_json.get("jsonrpc") != "2.0"):
+ return RC_ERR("Malformed Response ({0})".format(str(response_json)))
+
+ # error reported by server
+ if ("error" in response_json):
+ if "specific_err" in response_json["error"]:
+ return RC_ERR(response_json["error"]["specific_err"])
+ else:
+ return RC_ERR(response_json["error"]["message"])
+
+
+ # if no error there should be a result
+ if ("result" not in response_json):
+ return RC_ERR("Malformed Response ({0})".format(str(response_json)))
+
+ return RC_OK(response_json["result"])
+
+
+
+ # process a batch response
+ def process_batch_response (self, response_json):
+ rc_batch = RC()
+
+ for single_response in response_json:
+ rc = self.process_single_response(single_response)
+ rc_batch.add(rc)
+
+ return rc_batch
+
+
+ def disconnect (self):
+ if self.connected:
+ self.socket.close(linger = 0)
+ self.context.destroy(linger = 0)
+ self.connected = False
+ return RC_OK()
+ else:
+ return RC_ERR("Not connected to server")
+
+
+ def connect(self, server = None, port = None):
+ if self.connected:
+ self.disconnect()
+
+ self.context = zmq.Context()
+
+ self.server = (server if server else self.server)
+ self.port = (port if port else self.port)
+
+ # Socket to talk to server
+ self.transport = "tcp://{0}:{1}".format(self.server, self.port)
+
+ self.socket = self.context.socket(zmq.REQ)
+ try:
+ self.socket.connect(self.transport)
+ except zmq.error.ZMQError as e:
+ return RC_ERR("ZMQ Error: Bad server or port name: " + str(e))
+
+ self.socket.setsockopt(zmq.SNDTIMEO, 10000)
+ self.socket.setsockopt(zmq.RCVTIMEO, 10000)
+
+ self.connected = True
+
+ rc = self.invoke_rpc_method('ping', api_class = None)
+ if not rc:
+ self.connected = False
+ return rc
+
+ return RC_OK()
+
+
+ def reconnect(self):
+ # connect using current values
+ return self.connect()
+
+ if not self.connected:
+ return RC_ERR("Not connected to server")
+
+ # reconnect
+ return self.connect(self.server, self.port)
+
+
+ def is_connected(self):
+ return self.connected
+
+ def __del__(self):
+ self.logger.log("Shutting down RPC client\n")
+ if hasattr(self, "context"):
+ self.context.destroy(linger=0)
+
diff --git a/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_packet_builder_interface.py b/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_packet_builder_interface.py
new file mode 100644
index 00000000..b6e7c026
--- /dev/null
+++ b/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_packet_builder_interface.py
@@ -0,0 +1,43 @@
+
+# base object class for a packet builder
+class CTrexPktBuilderInterface(object):
+
+ def compile (self):
+ """
+ Compiles the packet and VM
+ """
+ raise Exception("implement me")
+
+
+ def dump_pkt(self):
+ """
+ Dumps the packet as a decimal array of bytes (each item x gets value between 0-255)
+
+ :parameters:
+ None
+
+ :return:
+ + packet representation as array of bytes
+
+ :raises:
+ + :exc:`CTRexPktBuilder.EmptyPacketError`, in case packet is empty.
+
+ """
+
+ raise Exception("implement me")
+
+
+ def get_vm_data(self):
+ """
+ Dumps the instructions
+
+ :parameters:
+ None
+
+ :return:
+ + json object of instructions
+
+ """
+
+ raise Exception("implement me")
+
diff --git a/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_packet_builder_scapy.py b/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_packet_builder_scapy.py
new file mode 100755
index 00000000..dc06f9fb
--- /dev/null
+++ b/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_packet_builder_scapy.py
@@ -0,0 +1,1698 @@
+import random
+import string
+import struct
+import socket
+import json
+import yaml
+import binascii
+import base64
+import inspect
+import copy
+
+from .trex_stl_packet_builder_interface import CTrexPktBuilderInterface
+from .trex_stl_types import *
+from scapy.all import *
+
+class CTRexPacketBuildException(Exception):
+ """
+ This is the general Packet Building error exception class.
+ """
+ def __init__(self, code, message):
+ self.code = code
+ self.message = message
+
+ def __str__(self):
+ return self.__repr__()
+
+ def __repr__(self):
+ return u"[errcode:%r] %r" % (self.code, self.message)
+
+################################################################################################
+
+def safe_ord (c):
+ if type(c) is str:
+ return ord(c)
+ elif type(c) is int:
+ return c
+ else:
+ raise TypeError("Cannot convert: {0} of type: {1}".format(c, type(c)))
+
+def _buffer_to_num(str_buffer):
+ validate_type('str_buffer', str_buffer, bytes)
+ res=0
+ for i in str_buffer:
+ res = res << 8
+ res += safe_ord(i)
+ return res
+
+
+def ipv4_str_to_num (ipv4_buffer):
+ validate_type('ipv4_buffer', ipv4_buffer, bytes)
+ assert len(ipv4_buffer)==4, 'Size of ipv4_buffer is not 4'
+ return _buffer_to_num(ipv4_buffer)
+
+def mac_str_to_num (mac_buffer):
+ validate_type('mac_buffer', mac_buffer, bytes)
+ assert len(mac_buffer)==6, 'Size of mac_buffer is not 6'
+ return _buffer_to_num(mac_buffer)
+
+
+def is_valid_ipv4(ip_addr):
+ """
+ Return buffer in network order
+ """
+ if type(ip_addr) == bytes and len(ip_addr) == 4:
+ return ip_addr
+
+ if type(ip_addr)== int:
+ ip_addr = socket.inet_ntoa(struct.pack("!I", ip_addr))
+
+ try:
+ return socket.inet_pton(socket.AF_INET, ip_addr)
+ except AttributeError: # no inet_pton here, sorry
+ return socket.inet_aton(ip_addr)
+ except socket.error: # not a valid address
+ raise CTRexPacketBuildException(-10,"Not valid ipv4 format");
+
+
+def is_valid_ipv6(ipv6_addr):
+ """
+ Return buffer in network order
+ """
+ if type(ipv6_addr) == bytes and len(ipv6_addr) == 16:
+ return ipv6_addr
+ try:
+ return socket.inet_pton(socket.AF_INET6, ipv6_addr)
+ except AttributeError: # no inet_pton here, sorry
+ raise CTRexPacketBuildException(-10, 'No inet_pton function available')
+ except:
+ raise CTRexPacketBuildException(-10, 'Not valid ipv6 format')
+
+class CTRexScriptsBase(object):
+ """
+ VM Script base class
+ """
+ def clone (self):
+ return copy.deepcopy(self)
+
+
+class CTRexScFieldRangeBase(CTRexScriptsBase):
+
+ FILED_TYPES = ['inc', 'dec', 'rand']
+
+ def __init__(self, field_name,
+ field_type
+ ):
+ super(CTRexScFieldRangeBase, self).__init__()
+ self.field_name =field_name
+ self.field_type =field_type
+ if not self.field_type in CTRexScFieldRangeBase.FILED_TYPES :
+ raise CTRexPacketBuildException(-12, 'Field type should be in %s' % FILED_TYPES);
+
+
+class CTRexScFieldRangeValue(CTRexScFieldRangeBase):
+ """
+ Range of field values
+ """
+ def __init__(self, field_name,
+ field_type,
+ min_value,
+ max_value
+ ):
+ super(CTRexScFieldRangeValue, self).__init__(field_name,field_type)
+ self.min_value =min_value;
+ self.max_value =max_value;
+ if min_value > max_value:
+ raise CTRexPacketBuildException(-12, 'Invalid range: min is greater than max.');
+ if min_value == max_value:
+ raise CTRexPacketBuildException(-13, "Invalid range: min value is equal to max value.");
+
+
+class CTRexScIpv4SimpleRange(CTRexScFieldRangeBase):
+ """
+ Range of ipv4 ip
+ """
+ def __init__(self, field_name, field_type, min_ip, max_ip):
+ super(CTRexScIpv4SimpleRange, self).__init__(field_name,field_type)
+ self.min_ip = min_ip
+ self.max_ip = max_ip
+ mmin=ipv4_str_to_num (is_valid_ipv4(min_ip))
+ mmax=ipv4_str_to_num (is_valid_ipv4(max_ip))
+ if mmin > mmax :
+ raise CTRexPacketBuildException(-11, 'CTRexScIpv4SimpleRange m_min ip is bigger than max');
+
+
+class CTRexScIpv4TupleGen(CTRexScriptsBase):
+ """
+ Range tuple
+ """
+ FLAGS_ULIMIT_FLOWS =1
+
+ def __init__(self, min_ipv4, max_ipv4, num_flows=100000, min_port=1025, max_port=65535, flags=0):
+ super(CTRexScIpv4TupleGen, self).__init__()
+ self.min_ip = min_ipv4
+ self.max_ip = max_ipv4
+ mmin=ipv4_str_to_num (is_valid_ipv4(min_ipv4))
+ mmax=ipv4_str_to_num (is_valid_ipv4(max_ipv4))
+ if mmin > mmax :
+ raise CTRexPacketBuildException(-11, 'CTRexScIpv4SimpleRange m_min ip is bigger than max');
+
+ self.num_flows=num_flows;
+
+ self.min_port =min_port
+ self.max_port =max_port
+ self.flags = flags
+
+
+class CTRexScTrimPacketSize(CTRexScriptsBase):
+ """
+ Trim packet size. Field type is CTRexScFieldRangeBase.FILED_TYPES = ["inc","dec","rand"]
+ """
+ def __init__(self,field_type="rand",min_pkt_size=None, max_pkt_size=None):
+ super(CTRexScTrimPacketSize, self).__init__()
+ self.field_type = field_type
+ self.min_pkt_size = min_pkt_size
+ self.max_pkt_size = max_pkt_size
+ if max_pkt_size != None and min_pkt_size !=None :
+ if min_pkt_size == max_pkt_size:
+ raise CTRexPacketBuildException(-11, 'CTRexScTrimPacketSize min_pkt_size is the same as max_pkt_size ');
+
+ if min_pkt_size > max_pkt_size:
+ raise CTRexPacketBuildException(-11, 'CTRexScTrimPacketSize min_pkt_size is bigger than max_pkt_size ');
+
+
+class STLScVmRaw(CTRexScriptsBase):
+ """
+ Raw instructions
+ """
+ def __init__(self,list_of_commands=None,split_by_field=None,cache_size=None):
+ """
+ Include a list of a basic instructions objects.
+
+ :parameters:
+ list_of_commands : list
+ list of instructions
+
+ split_by_field : string
+ by which field to split to threads
+
+ cache_size : uint16_t
+ In case it is bigger than zero, FE results will be cached - this will speedup of the program at the cost of limiting the number of possible packets to the number of cache. The cache size is limited to the pool size
+
+ The following example splits the generated traffic by "ip_src" variable.
+
+ .. code-block:: python
+
+ # Split by
+
+ # TCP SYN
+ base_pkt = Ether()/IP(dst="48.0.0.1")/TCP(dport=80,flags="S")
+
+
+ # vm
+ vm = STLScVmRaw( [ STLVmFlowVar(name="ip_src",
+ min_value="16.0.0.0",
+ max_value="16.0.0.254",
+ size=4, op="inc"),
+
+
+ STLVmWrFlowVar(fv_name="ip_src", pkt_offset= "IP.src" ),
+
+ STLVmFixIpv4(offset = "IP"), # fix checksum
+ ]
+ ,split_by_field = "ip_src",
+ cache_size = 1000
+ )
+
+ """
+
+ super(STLScVmRaw, self).__init__()
+ self.split_by_field = split_by_field
+ self.cache_size = cache_size
+
+ if list_of_commands==None:
+ self.commands =[]
+ else:
+ self.commands = list_of_commands
+
+ def add_cmd (self,cmd):
+ self.commands.append(cmd)
+
+
+
+################################################################################################
+# VM raw instructions
+################################################################################################
+
+class CTRexVmInsBase(object):
+ """
+ Instruction base
+ """
+ def __init__(self, ins_type):
+ self.type = ins_type
+ validate_type('ins_type', ins_type, str)
+
+class CTRexVmInsFixIpv4(CTRexVmInsBase):
+ def __init__(self, offset):
+ super(CTRexVmInsFixIpv4, self).__init__("fix_checksum_ipv4")
+ self.pkt_offset = offset
+ validate_type('offset', offset, int)
+
+class CTRexVmInsFixHwCs(CTRexVmInsBase):
+ L4_TYPE_UDP = 11
+ L4_TYPE_TCP = 13
+
+ def __init__(self, l2_len,l3_len,l4_type):
+ super(CTRexVmInsFixHwCs, self).__init__("fix_checksum_hw")
+ self.l2_len = l2_len
+ validate_type('l2_len', l2_len, int)
+ self.l3_len = l3_len
+ validate_type('l3_len', l3_len, int)
+ self.l4_type = l4_type
+ validate_type('l4_type', l4_type, int)
+
+
+
+class CTRexVmInsFlowVar(CTRexVmInsBase):
+ #TBD add more validation tests
+
+ OPERATIONS =['inc', 'dec', 'random']
+ VALID_SIZES =[1, 2, 4, 8]
+
+ def __init__(self, fv_name, size, op, init_value, min_value, max_value,step):
+ super(CTRexVmInsFlowVar, self).__init__("flow_var")
+ self.name = fv_name;
+ validate_type('fv_name', fv_name, str)
+ self.size = size
+ self.op = op
+ self.init_value = init_value
+ validate_type('init_value', init_value, int)
+ assert init_value >= 0, 'init_value (%s) is negative' % init_value
+ self.min_value=min_value
+ validate_type('min_value', min_value, int)
+ assert min_value >= 0, 'min_value (%s) is negative' % min_value
+ self.max_value=max_value
+ validate_type('max_value', max_value, int)
+ assert max_value >= 0, 'max_value (%s) is negative' % max_value
+ self.step=step
+ validate_type('step', step, int)
+ assert step >= 0, 'step (%s) is negative' % step
+
+class CTRexVmInsFlowVarRandLimit(CTRexVmInsBase):
+ #TBD add more validation tests
+
+ VALID_SIZES =[1, 2, 4, 8]
+
+ def __init__(self, fv_name, size, limit, seed, min_value, max_value):
+ super(CTRexVmInsFlowVarRandLimit, self).__init__("flow_var_rand_limit")
+ self.name = fv_name;
+ validate_type('fv_name', fv_name, str)
+ self.size = size
+ self.limit=limit
+ validate_type('limit', limit, int)
+ assert limit >= 0, 'limit (%s) is negative' % limit
+ self.seed=seed
+ validate_type('seed', seed, int)
+ self.min_value=min_value
+ validate_type('min_value', min_value, int)
+ assert min_value >= 0, 'min_value (%s) is negative' % min_value
+ self.max_value=max_value
+ validate_type('max_value', max_value, int)
+ assert max_value >= 0, 'max_value (%s) is negative' % max_value
+
+
+class CTRexVmInsWrFlowVar(CTRexVmInsBase):
+ def __init__(self, fv_name, pkt_offset, add_value=0, is_big_endian=True):
+ super(CTRexVmInsWrFlowVar, self).__init__("write_flow_var")
+ self.name = fv_name
+ validate_type('fv_name', fv_name, str)
+ self.pkt_offset = pkt_offset
+ validate_type('pkt_offset', pkt_offset, int)
+ self.add_value = add_value
+ validate_type('add_value', add_value, int)
+ self.is_big_endian = is_big_endian
+ validate_type('is_big_endian', is_big_endian, bool)
+
+class CTRexVmInsWrMaskFlowVar(CTRexVmInsBase):
+ def __init__(self, fv_name, pkt_offset,pkt_cast_size,mask,shift,add_value, is_big_endian=True):
+ super(CTRexVmInsWrMaskFlowVar, self).__init__("write_mask_flow_var")
+ self.name = fv_name
+ validate_type('fv_name', fv_name, str)
+ self.pkt_offset = pkt_offset
+ validate_type('pkt_offset', pkt_offset, int)
+ self.pkt_cast_size = pkt_cast_size
+ validate_type('pkt_cast_size', pkt_cast_size, int)
+ self.mask = mask
+ validate_type('mask', mask, int)
+ self.shift = shift
+ validate_type('shift', shift, int)
+ self.add_value =add_value
+ validate_type('add_value', add_value, int)
+ self.is_big_endian = is_big_endian
+ validate_type('is_big_endian', is_big_endian, bool)
+
+class CTRexVmInsTrimPktSize(CTRexVmInsBase):
+ def __init__(self,fv_name):
+ super(CTRexVmInsTrimPktSize, self).__init__("trim_pkt_size")
+ self.name = fv_name
+ validate_type('fv_name', fv_name, str)
+
+class CTRexVmInsTupleGen(CTRexVmInsBase):
+ def __init__(self, fv_name, ip_min, ip_max, port_min, port_max, limit_flows, flags=0):
+ super(CTRexVmInsTupleGen, self).__init__("tuple_flow_var")
+ self.name =fv_name
+ validate_type('fv_name', fv_name, str)
+ self.ip_min = ip_min;
+ self.ip_max = ip_max;
+ self.port_min = port_min;
+ self.port_max = port_max;
+ self.limit_flows = limit_flows;
+ self.flags =flags;
+
+
+################################################################################################
+#
+class CTRexVmEngine(object):
+
+ def __init__(self):
+ """
+ Inlcude list of instructions.
+ """
+ super(CTRexVmEngine, self).__init__()
+ self.ins=[]
+ self.split_by_var = ''
+ self.cache_size = 0
+
+
+ # return as json
+ def get_json (self):
+ inst_array = [];
+ # dump it as dict
+ for obj in self.ins:
+ inst_array.append(obj.__dict__);
+
+ d={'instructions': inst_array, 'split_by_var': self.split_by_var};
+ if self.cache_size >0 :
+ d['cache']=self.cache_size
+ return d
+
+ def add_ins (self,ins):
+ #assert issubclass(ins, CTRexVmInsBase)
+ self.ins.append(ins);
+
+ def dump (self):
+ cnt=0;
+ for obj in self.ins:
+ print("ins",cnt)
+ cnt = cnt +1
+ print(obj.__dict__)
+
+ def dump_bjson (self):
+ print(json.dumps(self.get_json(), sort_keys=True, indent=4))
+
+ def dump_as_yaml (self):
+ print(yaml.dump(self.get_json(), default_flow_style=False))
+
+
+
+################################################################################################
+
+class CTRexScapyPktUtl(object):
+
+ def __init__(self, scapy_pkt):
+ self.pkt = scapy_pkt
+
+ def pkt_iter (self):
+ p=self.pkt;
+ while True:
+ yield p
+ p=p.payload
+ if p ==None or isinstance(p,NoPayload):
+ break;
+
+ def get_list_iter(self):
+ l=list(self.pkt_iter())
+ return l
+
+
+ def get_pkt_layers(self):
+ """
+ Return string 'IP:UDP:TCP'
+ """
+ l=self.get_list_iter ();
+ l1=map(lambda p: p.name,l );
+ return ":".join(l1);
+
+ def _layer_offset(self, name, cnt = 0):
+ """
+ Return offset of layer. Example: 'IP',1 returns offfset of layer ip:1
+ """
+ save_cnt=cnt
+ for pkt in self.pkt_iter ():
+ if pkt.name == name:
+ if cnt==0:
+ return (pkt, pkt.offset)
+ else:
+ cnt=cnt -1
+
+ raise CTRexPacketBuildException(-11,("no layer %s-%d" % (name, save_cnt)));
+
+
+ def layer_offset(self, name, cnt = 0):
+ """
+ Return offset of layer. Example: 'IP',1 returns offfset of layer ip:1
+ """
+ save_cnt=cnt
+ for pkt in self.pkt_iter ():
+ if pkt.name == name:
+ if cnt==0:
+ return pkt.offset
+ else:
+ cnt=cnt -1
+
+ raise CTRexPacketBuildException(-11,("no layer %s-%d" % (name, save_cnt)));
+
+ def get_field_offet(self, layer, layer_cnt, field_name):
+ """
+ Return offset of layer. Example: 'IP',1 returns offfset of layer ip:1
+ """
+ t=self._layer_offset(layer,layer_cnt);
+ l_offset=t[1];
+ layer_pkt=t[0]
+
+ #layer_pkt.dump_fields_offsets ()
+
+ for f in layer_pkt.fields_desc:
+ if f.name == field_name:
+ return (l_offset+f.offset,f.get_size_bytes ());
+
+ raise CTRexPacketBuildException(-11, "No layer %s-%d." % (name, save_cnt, field_name));
+
+ def get_layer_offet_by_str(self, layer_des):
+ """
+ Return layer offset by string.
+
+ :parameters:
+
+ IP:0
+ IP:1
+ return offset
+
+
+ """
+ l1=layer_des.split(":")
+ layer=""
+ layer_cnt=0;
+
+ if len(l1)==1:
+ layer=l1[0];
+ else:
+ layer=l1[0];
+ layer_cnt=int(l1[1]);
+
+ return self.layer_offset(layer, layer_cnt)
+
+
+
+ def get_field_offet_by_str(self, field_des):
+ """
+ Return field_des (offset,size) layer:cnt.field
+ Example:
+ 802|1Q.vlan get 802.1Q->valn replace | with .
+ IP.src
+ IP:0.src (first IP.src like IP.src)
+ Example: IP:1.src for internal IP
+
+ Return (offset, size) as tuple.
+
+
+ """
+
+ s=field_des.split(".");
+ if len(s)!=2:
+ raise CTRexPacketBuildException(-11, ("Field desription should be layer:cnt.field Example: IP.src or IP:1.src"));
+
+
+ layer_ex = s[0].replace("|",".")
+ field = s[1]
+
+ l1=layer_ex.split(":")
+ layer=""
+ layer_cnt=0;
+
+ if len(l1)==1:
+ layer=l1[0];
+ else:
+ layer=l1[0];
+ layer_cnt=int(l1[1]);
+
+ return self.get_field_offet(layer,layer_cnt,field)
+
+ def has_IPv4 (self):
+ return self.pkt.has_layer("IP");
+
+ def has_IPv6 (self):
+ return self.pkt.has_layer("IPv6");
+
+ def has_UDP (self):
+ return self.pkt.has_layer("UDP");
+
+################################################################################################
+
+class CTRexVmDescBase(object):
+ """
+ Instruction base
+ """
+ def __init__(self):
+ pass;
+
+ def get_obj(self):
+ return self;
+
+ def get_json(self):
+ return self.get_obj().__dict__
+
+ def dump_bjson(self):
+ print(json.dumps(self.get_json(), sort_keys=True, indent=4))
+
+ def dump_as_yaml(self):
+ print(yaml.dump(self.get_json(), default_flow_style=False))
+
+
+ def get_var_ref (self):
+ '''
+ Virtual function returns a ref var name.
+ '''
+ return None
+
+ def get_var_name(self):
+ '''
+ Virtual function returns the varible name if it exists.
+ '''
+ return None
+
+ def compile(self,parent):
+ '''
+ Virtual function to take parent that has function name_to_offset.
+ '''
+ pass;
+
+
+def valid_fv_size (size):
+ if not (size in CTRexVmInsFlowVar.VALID_SIZES):
+ raise CTRexPacketBuildException(-11,("Flow var has invalid size %d ") % size );
+
+def valid_fv_ops (op):
+ if not (op in CTRexVmInsFlowVar.OPERATIONS):
+ raise CTRexPacketBuildException(-11,("Flow var has invalid op %s ") % op );
+
+def get_max_by_size (size):
+ d={
+ 1:((1<<8) -1),
+ 2:((1<<16)-1),
+ 4:((1<<32)-1),
+ 8:0xffffffffffffffff
+ };
+ return d[size]
+
+def convert_val (val):
+ if is_integer(val):
+ return val
+ if type(val) == str:
+ return ipv4_str_to_num (is_valid_ipv4(val))
+ raise CTRexPacketBuildException(-11,("init val invalid %s ") % val );
+
+def check_for_int (val):
+ validate_type('val', val, int)
+
+
+class STLVmFlowVar(CTRexVmDescBase):
+
+ def __init__(self, name, init_value=None, min_value=0, max_value=255, size=4, step=1,op="inc"):
+ """
+ Flow variable instruction. Allocates a variable on a stream context. The size argument determines the variable size.
+ The operation can be inc, dec, and random.
+ For increment and decrement operations, can set the "step" size.
+ For all operations, can set initialization value, minimum and maximum value.
+
+ :parameters:
+ name : string
+ Name of the stream variable
+
+ init_value : int
+ Init value of the variable. If not specified, it will be min_value
+
+ min_value : int
+ Min value
+
+ max_value : int
+ Max value
+
+ size : int
+ Number of bytes of the variable. Possible values: 1,2,4,8 for uint8_t, uint16_t, uint32_t, uint64_t
+
+ step : int
+ Step in case of "inc" or "dec" operations
+
+ op : string
+ Possible values: "inc", "dec", "random"
+
+ .. code-block:: python
+
+ # Example1
+
+ # input
+ STLVmFlowVar(min_value=0, max_value=3, size=1,op="inc")
+
+ # output 0,1,2,3,0,1,2,3 ..
+
+ # input
+ STLVmFlowVar(min_value=0, max_value=3, size=1,op="dec")
+
+ # output 0,3,2,1,0,3,2,1 ..
+
+
+ # input
+ STLVmFlowVar(min_value=0, max_value=3, size=1,op="random")
+
+ # output 1,1,2,3,1,2,1,0 ..
+
+ # input
+ STLVmFlowVar(min_value=0, max_value=10, size=1,op="inc",step=3)
+
+ # output 0,3,6,9,0,3,6,9,0..
+
+
+ """
+ super(STLVmFlowVar, self).__init__()
+ self.name = name;
+ validate_type('name', name, str)
+ self.size =size
+ valid_fv_size(size)
+ self.op =op
+ valid_fv_ops (op)
+
+ # choose default value for init val
+ if init_value == None:
+ init_value = max_value if op == "dec" else min_value
+
+ self.init_value = convert_val (init_value)
+ self.min_value = convert_val (min_value);
+ self.max_value = convert_val (max_value)
+ self.step = convert_val (step)
+
+ if self.min_value > self.max_value :
+ raise CTRexPacketBuildException(-11,("max %d is lower than min %d ") % (self.max_value,self.min_value) );
+
+ def get_obj (self):
+ return CTRexVmInsFlowVar(self.name,self.size,self.op,self.init_value,self.min_value,self.max_value,self.step);
+
+ def get_var_name(self):
+ return [self.name]
+
+class STLVmFlowVarRepetableRandom(CTRexVmDescBase):
+
+ def __init__(self, name, size=4, limit=100, seed=None, min_value=0, max_value=None):
+ """
+ Flow variable instruction for repeatable random with limit number of generating numbers. Allocates memory on a stream context.
+ The size argument determines the variable size. Could be 1,2,4 or 8
+
+ :parameters:
+ name : string
+ Name of the stream variable
+
+ size : int
+ Number of bytes of the variable. Possible values: 1,2,4,8 for uint8_t, uint16_t, uint32_t, uint64_t
+
+ limit : int
+ The number of distinct repetable random number
+
+ seed : int
+ For deterministic result, you can set this to a uint16_t number
+
+ min_value : int
+ Min value
+
+ max_value : int
+ Max value
+
+
+ .. code-block:: python
+
+ # Example1
+
+ # input , 1 byte or random with limit of 5
+ STLVmFlowVarRepetableRandom("var1",size=1,limit=5)
+
+ # output 255,1,7,129,8, ==> repeat 255,1,7,129,8
+
+ STLVmFlowVarRepetableRandom("var1",size=4,limit=100,min_value=0x12345678, max_value=0x32345678)
+
+
+ """
+ super(STLVmFlowVarRepetableRandom, self).__init__()
+ self.name = name;
+ validate_type('name', name, str)
+ self.size =size
+ valid_fv_size(size)
+ self.limit =limit
+
+ if seed == None:
+ self.seed = random.randint(1, 32000)
+ else:
+ self.seed = seed
+
+ self.min_value = convert_val (min_value);
+
+ if max_value == None :
+ self.max_value = get_max_by_size (self.size)
+ else:
+ self.max_value = convert_val (max_value)
+
+ if self.min_value > self.max_value :
+ raise CTRexPacketBuildException(-11,("max %d is lower than min %d ") % (self.max_value,self.min_value) );
+
+ def get_obj (self):
+ return CTRexVmInsFlowVarRandLimit(self.name, self.size, self.limit, self.seed, self.min_value, self.max_value);
+
+ def get_var_name(self):
+ return [self.name]
+
+class STLVmFixChecksumHw(CTRexVmDescBase):
+ def __init__(self, l3_offset,l4_offset,l4_type):
+ """
+ Fix Ipv4 header checksum and TCP/UDP checksum using hardware assist.
+ Use this if the packet header has changed or data payload has changed as it is necessary to fix the checksums.
+ This instruction works on NICS that support this hardware offload.
+
+ For fixing only IPv4 header checksum use STLVmFixIpv4. This instruction should be used if both L4 and L3 need to be fixed.
+
+ example for supported packets
+
+ Ether()/(IPv4|IPv6)/(UDP|TCP)
+ Ether()/(IPv4|IPv6)/(UDP|TCP)
+ SomeTunnel()/(IPv4|IPv6)/(UDP|TCP)
+ SomeTunnel()/(IPv4|IPv6)/(UDP|TCP)
+
+
+ :parameters:
+ l3_offset : offset in bytes
+ **IPv4/IPv6 header** offset from packet start. It is **not** the offset of the checksum field itself.
+ in could be string in case of scapy packet. format IP[:[id]]
+
+ l4_offset : offset in bytes to UDP/TCP header
+
+ l4_type : CTRexVmInsFixHwCs.L4_TYPE_UDP or CTRexVmInsFixHwCs.L4_TYPE_TCP
+
+ see full example stl/syn_attack_fix_cs_hw.py
+
+ .. code-block:: python
+
+ # Example2
+
+ pkt = Ether()/IP(src="16.0.0.1",dst="48.0.0.1")/UDP(dport=12,sport=1025)
+
+ # by offset
+ STLVmFixChecksumHw(l3_offset=14,l4_offset=14+20,l4_type=CTRexVmInsFixHwCs.L4_TYPE_UDP)
+
+ # in case of scapy packet can be defined by header name
+ STLVmFixChecksumHw(l3_offset="IP",l4_offset="UDP",l4_type=CTRexVmInsFixHwCs.L4_TYPE_UDP)
+
+ # string for second "IP" header in the packet is IP:1
+ STLVmFixChecksumHw(offset="IP:1")
+
+ """
+
+ super(STLVmFixChecksumHw, self).__init__()
+ self.l3_offset = l3_offset; # could be a name of offset
+ self.l4_offset = l4_offset; # could be a name of offset
+ self.l4_type = l4_type
+
+
+ def get_obj (self):
+ return CTRexVmInsFixHwCs(self.l2_len,self.l3_len,self.l4_type);
+
+ def compile(self,parent):
+ if type(self.l3_offset)==str:
+ self.l2_len = parent._pkt_layer_offset(self.l3_offset);
+ if type(self.l4_offset)==str:
+ self.l4_offset = parent._pkt_layer_offset(self.l4_offset);
+
+ assert self.l4_offset >= self.l2_len+8, 'l4_offset should be higher than l3_offset offset'
+ self.l3_len = self.l4_offset - self.l2_len;
+
+
+class STLVmFixIpv4(CTRexVmDescBase):
+ def __init__(self, offset):
+ """
+ Fix IPv4 header checksum. Use this if the packet header has changed and it is necessary to change the checksum.
+
+ :parameters:
+ offset : uint16_t or string
+ **IPv4 header** offset from packet start. It is **not** the offset of the checksum field itself.
+ in could be string in case of scapy packet. format IP[:[id]]
+
+ .. code-block:: python
+
+ # Example2
+
+ pkt = Ether()/IP(src="16.0.0.1",dst="48.0.0.1")/UDP(dport=12,sport=1025)
+
+ # by offset
+ STLVmFixIpv4(offset=14)
+
+ # in case of scapy packet can be defined by header name
+ STLVmFixIpv4(offset="IP")
+
+ # string for second "IP" header in the packet is IP:1
+ STLVmFixIpv4(offset="IP:1")
+
+ """
+
+ super(STLVmFixIpv4, self).__init__()
+ self.offset = offset; # could be a name of offset
+
+ def get_obj (self):
+ return CTRexVmInsFixIpv4(self.offset);
+
+ def compile(self,parent):
+ if type(self.offset)==str:
+ self.offset = parent._pkt_layer_offset(self.offset);
+
+class STLVmWrFlowVar(CTRexVmDescBase):
+ def __init__(self, fv_name, pkt_offset, offset_fixup=0, add_val=0, is_big=True):
+ """
+ Write a stream variable into a packet field.
+ The write position is determined by the packet offset + offset fixup. The size of the write is determined by the stream variable.
+ Example: Offset 10, fixup 0, variable size 4. This function writes at 10, 11, 12, and 13.
+
+ For inromation about chaning the write size, offset, or fixup, see the `STLVmWrMaskFlowVar` command.
+ The Field name/offset can be given by name in the following format: ``header[:id].field``.
+
+
+ :parameters:
+ fv_name : string
+ Stream variable to write to a packet offset.
+
+ pkt_offset : string or in
+ Name of the field or offset in bytes from packet start.
+
+ offset_fixup : int
+ Number of bytes to move forward. If negative, move backward.
+
+ add_val : int
+ Value to add to the stream variable before writing it to the packet field. Can be used as a constant offset.
+
+ is_big : bool
+ How to write the variable to the the packet. True=big-endian, False=little-endian
+
+ .. code-block:: python
+
+ # Example3
+
+ pkt = Ether()/IP(src="16.0.0.1",dst="48.0.0.1")/UDP(dport=12,sport=1025)
+
+
+ # write to ip.src offset
+ STLVmWrFlowVar (fv_name="tuple", pkt_offset= "IP.src" )
+
+ # packet offset is varible
+ STLVmWrFlowVar (fv_name="tuple", pkt_offset= 26 )
+
+ # add l3_len_fix before writing fv_rand into IP.len field
+ STLVmWrFlowVar(fv_name="fv_rand", pkt_offset= "IP.len", add_val=l3_len_fix)
+
+ """
+
+ super(STLVmWrFlowVar, self).__init__()
+ self.name =fv_name
+ validate_type('fv_name', fv_name, str)
+ self.offset_fixup =offset_fixup
+ validate_type('offset_fixup', offset_fixup, int)
+ self.pkt_offset =pkt_offset
+ self.add_val =add_val
+ validate_type('add_val', add_val, int)
+ self.is_big =is_big;
+ validate_type('is_big', is_big, bool)
+
+ def get_var_ref (self):
+ return self.name
+
+ def get_obj (self):
+ return CTRexVmInsWrFlowVar(self.name,self.pkt_offset+self.offset_fixup,self.add_val,self.is_big)
+
+ def compile(self,parent):
+ if type(self.pkt_offset)==str:
+ t=parent._name_to_offset(self.pkt_offset)
+ self.pkt_offset = t[0]
+
+class STLVmWrMaskFlowVar(CTRexVmDescBase):
+ def __init__(self, fv_name, pkt_offset, pkt_cast_size=1, mask=0xff, shift=0, add_value=0, offset_fixup=0, is_big=True):
+
+ """
+ Write a stream variable into a packet field with some operations.
+ Using this instruction, the variable size and the field can have different sizes.
+
+ Pseudocode of this code::
+
+ uint32_t val=(cast_to_size)rd_from_variable("name") # read flow-var
+ val+=m_add_value # add value
+
+ if (m_shift>0) { # shift
+ val=val<<m_shift
+ }else{
+ if (m_shift<0) {
+ val=val>>(-m_shift)
+ }
+ }
+
+ pkt_val=rd_from_pkt(pkt_offset) # RMW to the packet
+ pkt_val = (pkt_val & ~m_mask) | (val & m_mask)
+ wr_to_pkt(pkt_offset,pkt_val)
+
+
+ :parameters:
+ fv_name : string
+ The stream variable name to write to a packet field
+
+ pkt_cast_size : uint8_t
+ The size in bytes of the packet field
+
+
+ mask : uint32_t
+ The mask of the field. 1 means to write. 0 don't care
+
+ shift : uint8_t
+ How many bits to shift
+
+ pkt_offset : string or in
+ the name of the field or offset in byte from packet start.
+
+ offset_fixup : int
+ how many bytes to go forward. In case of a negative value go backward
+
+ add_val : int
+ value to add to stream variable before writing it to packet field. can be used as a constant offset
+
+ is_big : bool
+ how to write the variable to the the packet. is it big-edian or little edian
+
+ Example 1 - Cast from uint16_t (var) to uint8_t (pkt)::
+
+
+ base_pkt = Ether()/IP(src="16.0.0.1",dst="48.0.0.1")/UDP(dport=12,sport=1025)
+
+ vm = STLScVmRaw( [ STLVmFlowVar(name="mac_src",
+ min_value=1,
+ max_value=30,
+ size=2,
+ op="dec",step=1),
+ STLVmWrMaskFlowVar(fv_name="mac_src",
+ pkt_offset= 11,
+ pkt_cast_size=1,
+ mask=0xff) # mask command ->write it as one byte
+ ]
+ )
+
+ pkt = Ether()/IP(src="16.0.0.1",dst="48.0.0.1")/UDP(dport=12,sport=1025)
+
+ Example 2 - Change MSB of uint16_t variable::
+
+
+ vm = STLScVmRaw( [ STLVmFlowVar(name="mac_src",
+ min_value=1,
+ max_value=30,
+ size=2, op="dec",step=1),
+ STLVmWrMaskFlowVar(fv_name="mac_src",
+ pkt_offset= 10,
+ pkt_cast_size=2,
+ mask=0xff00,
+ shift=8) # take the var shift it 8 (x256) write only to LSB
+ ]
+ )
+
+
+
+ Example 3 - Every 2 packets, change the MAC (shift right)::
+
+ vm = STLScVmRaw( [ STLVmFlowVar(name="mac_src",
+ min_value=1,
+ max_value=30,
+ size=2, op="dec",step=1),
+ STLVmWrMaskFlowVar(fv_name="mac_src",
+ pkt_offset= 10,
+ pkt_cast_size=1,
+ mask=0x1,
+ shift=-1) # take var mac_src>>1 and write the LSB every two packet there should be a change
+ ]
+ )
+
+
+ """
+
+ super(STLVmWrMaskFlowVar, self).__init__()
+ self.name =fv_name
+ validate_type('fv_name', fv_name, str)
+ self.offset_fixup =offset_fixup
+ validate_type('offset_fixup', offset_fixup, int)
+ self.pkt_offset =pkt_offset
+ self.pkt_cast_size =pkt_cast_size
+ validate_type('pkt_cast_size', pkt_cast_size, int)
+ if not (pkt_cast_size in [1,2,4]):
+ raise CTRexPacketBuildException(-10,"not valid cast size");
+
+ self.mask = mask
+ validate_type('mask', mask, int)
+ self.shift = shift
+ validate_type('shift', shift, int)
+ self.add_value = add_value
+ validate_type('add_value', add_value, int)
+
+ self.is_big =is_big;
+ validate_type('is_big', is_big, bool)
+
+ def get_var_ref (self):
+ return self.name
+
+ def get_obj (self):
+ return CTRexVmInsWrMaskFlowVar(self.name,self.pkt_offset+self.offset_fixup,self.pkt_cast_size,self.mask,self.shift,self.add_value,self.is_big)
+
+ def compile(self,parent):
+ if type(self.pkt_offset)==str:
+ t=parent._name_to_offset(self.pkt_offset)
+ self.pkt_offset = t[0]
+
+
+class STLVmTrimPktSize(CTRexVmDescBase):
+ """
+ Trim the packet size by the stream variable size. This instruction only changes the total packet size, and does not repair the fields to match the new size.
+
+
+ :parameters:
+ fv_name : string
+ Stream variable name. The value of this variable is the new total packet size.
+
+
+ For Example::
+
+ def create_stream (self):
+ # pkt
+ p_l2 = Ether();
+ p_l3 = IP(src="16.0.0.1",dst="48.0.0.1")
+ p_l4 = UDP(dport=12,sport=1025)
+ pyld_size = max(0, self.max_pkt_size_l3 - len(p_l3/p_l4));
+ base_pkt = p_l2/p_l3/p_l4/('\x55'*(pyld_size))
+
+ l3_len_fix =-(len(p_l2));
+ l4_len_fix =-(len(p_l2/p_l3));
+
+
+ # vm
+ vm = STLScVmRaw( [ STLVmFlowVar(name="fv_rand", min_value=64,
+ max_value=len(base_pkt),
+ size=2, op="inc"),
+
+ STLVmTrimPktSize("fv_rand"), # change total packet size <<<
+
+ STLVmWrFlowVar(fv_name="fv_rand",
+ pkt_offset= "IP.len",
+ add_val=l3_len_fix), # fix ip len
+
+ STLVmFixIpv4(offset = "IP"), # fix checksum
+
+ STLVmWrFlowVar(fv_name="fv_rand",
+ pkt_offset= "UDP.len",
+ add_val=l4_len_fix) # fix udp len
+ ]
+ )
+
+ pkt = STLPktBuilder(pkt = base_pkt,
+ vm = vm)
+
+ return STLStream(packet = pkt,
+ mode = STLTXCont())
+
+
+ """
+
+ def __init__(self,fv_name):
+ super(STLVmTrimPktSize, self).__init__()
+ self.name = fv_name
+ validate_type('fv_name', fv_name, str)
+
+ def get_var_ref (self):
+ return self.name
+
+ def get_obj (self):
+ return CTRexVmInsTrimPktSize(self.name)
+
+
+
+class STLVmTupleGen(CTRexVmDescBase):
+ def __init__(self,name, ip_min="0.0.0.1", ip_max="0.0.0.10", port_min=1025, port_max=65535, limit_flows=100000, flags=0):
+ """
+ Generate a struct with two variables: ``var_name.ip`` as uint32_t and ``var_name.port`` as uint16_t
+ The variables are dependent. When the ip variable value reaches its maximum, the port is incremented.
+
+ For:
+
+ * ip_min = 10.0.0.1
+ * ip_max = 10.0.0.5
+ * port_min = 1025
+ * port_max = 1028
+ * limit_flows = 10
+
+ The result:
+
+ +------------+------------+-----------+
+ | ip | port | flow_id |
+ +============+============+===========+
+ | 10.0.0.1 | 1025 | 1 |
+ +------------+------------+-----------+
+ | 10.0.0.2 | 1025 | 2 |
+ +------------+------------+-----------+
+ | 10.0.0.3 | 1025 | 3 |
+ +------------+------------+-----------+
+ | 10.0.0.4 | 1025 | 4 |
+ +------------+------------+-----------+
+ | 10.0.0.5 | 1025 | 5 |
+ +------------+------------+-----------+
+ | 10.0.0.1 | 1026 | 6 |
+ +------------+------------+-----------+
+ | 10.0.0.2 | 1026 | 7 |
+ +------------+------------+-----------+
+ | 10.0.0.3 | 1026 | 8 |
+ +------------+------------+-----------+
+ | 10.0.0.4 | 1026 | 9 |
+ +------------+------------+-----------+
+ | 10.0.0.5 | 1026 | 10 |
+ +------------+------------+-----------+
+ | 10.0.0.1 | 1025 | 1 |
+ +------------+------------+-----------+
+
+
+ :parameters:
+ name : string
+ Name of the stream struct.
+
+ ip_min : string or int
+ Min value of the ip value. Number or IPv4 format.
+
+ ip_max : string or int
+ Max value of the ip value. Number or IPv4 format.
+
+ port_min : int
+ Min value of port variable.
+
+ port_max : int
+ Max value of port variable.
+
+ limit_flows : int
+ Limit of number of flows.
+
+ flags : 0
+
+ ="0.0.0.10", port_min=1025, port_max=65535, limit_flows=100000, flags=0
+
+ .. code-block:: python
+
+ # Example5
+
+ def create_stream (self):
+ # pkt
+ p_l2 = Ether();
+ p_l3 = IP(src="16.0.0.1",dst="48.0.0.1")
+ p_l4 = UDP(dport=12,sport=1025)
+ pyld_size = max(0, self.max_pkt_size_l3 - len(p_l3/p_l4));
+ base_pkt = p_l2/p_l3/p_l4/('\x55'*(pyld_size))
+
+ l3_len_fix =-(len(p_l2));
+ l4_len_fix =-(len(p_l2/p_l3));
+
+
+ # vm
+ vm = STLScVmRaw( [ STLVmFlowVar(name="fv_rand", min_value=64,
+ max_value=len(base_pkt),
+ size=2, op="inc"),
+
+ STLVmTrimPktSize("fv_rand"), # change total packet size <<<
+
+ STLVmWrFlowVar(fv_name="fv_rand",
+ pkt_offset= "IP.len",
+ add_val=l3_len_fix), # fix ip len
+
+ STLVmFixIpv4(offset = "IP"), # fix checksum
+
+ STLVmWrFlowVar(fv_name="fv_rand",
+ pkt_offset= "UDP.len",
+ add_val=l4_len_fix) # fix udp len
+ ]
+ )
+
+ pkt = STLPktBuilder(pkt = base_pkt,
+ vm = vm)
+
+ return STLStream(packet = pkt,
+ mode = STLTXCont())
+
+
+ """
+
+ super(STLVmTupleGen, self).__init__()
+ self.name = name
+ validate_type('name', name, str)
+ self.ip_min = convert_val(ip_min);
+ self.ip_max = convert_val(ip_max);
+ self.port_min = port_min;
+ check_for_int (port_min)
+ self.port_max = port_max;
+ check_for_int(port_max)
+ self.limit_flows = limit_flows;
+ check_for_int(limit_flows)
+ self.flags =flags;
+ check_for_int(flags)
+
+ def get_var_name(self):
+ return [self.name+".ip",self.name+".port"]
+
+ def get_obj (self):
+ return CTRexVmInsTupleGen(self.name, self.ip_min, self.ip_max, self.port_min, self.port_max, self.limit_flows, self.flags);
+
+
+################################################################################################
+
+class STLPktBuilder(CTrexPktBuilderInterface):
+
+ def __init__(self, pkt = None, pkt_buffer = None, vm = None, path_relative_to_profile = False, build_raw = False, remove_fcs = True):
+ """
+
+ This class defines a method for building a template packet and Field Engine using the Scapy package.
+ Using this class the user can also define how TRex will handle the packet by specifying the Field engine settings.
+ The pkt can be a Scapy pkt or pcap file name.
+ If using a pcap file, and path_relative_to_profile is True, then the function loads the pcap file from a path relative to the profile.
+
+
+ .. code-block:: python
+
+ # Example6
+
+ # packet is scapy
+ STLPktBuilder( pkt = Ether()/IP(src="16.0.0.1",dst="48.0.0.1")/UDP(dport=12,sport=1025)/(10*'x') )
+
+
+ # packet is taken from pcap file relative to python
+ STLPktBuilder( pkt ="stl/yaml/udp_64B_no_crc.pcap")
+
+ # packet is taken from pcap file relative to profile file
+ STLPktBuilder( pkt ="stl/yaml/udp_64B_no_crc.pcap",
+ path_relative_to_profile = True )
+
+
+ vm = STLScVmRaw( [ STLVmTupleGen ( ip_min="16.0.0.1", ip_max="16.0.0.2",
+ port_min=1025, port_max=65535,
+ name="tuple"), # define tuple gen
+
+ STLVmWrFlowVar (fv_name="tuple.ip", pkt_offset= "IP.src" ), # write ip to packet IP.src
+ STLVmFixIpv4(offset = "IP"), # fix checksum
+ STLVmWrFlowVar (fv_name="tuple.port", pkt_offset= "UDP.sport" ) #write udp.port
+ ]
+ )
+
+ base_pkt = Ether()/IP(src="16.0.0.1",dst="48.0.0.1")/UDP(dport=12,sport=1025)
+ pad = max(0, size - len(base_pkt)) * 'x'
+
+ STLPktBuilder(pkt = base_pkt/pad, vm= vm)
+
+
+ :parameters:
+
+ pkt : string,
+ Scapy object or pcap filename.
+
+ pkt_buffer : bytes
+ Packet as buffer.
+
+ vm : list or base on :class:`trex_stl_lib.trex_stl_packet_builder_scapy.STLScVmRaw`
+ List of instructions to manipulate packet fields.
+
+ path_relative_to_profile : bool
+ If pkt is a pcap file, determines whether to load it relative to profile file.
+
+ build_raw : bool
+ If a buffer is specified (by pkt_buffer), determines whether to build Scapy. Useful in cases where it is necessary to take the offset from Scapy.
+
+ remove_fcs : bool
+ If a buffer is specified (by pkt_buffer), determines whether to remove FCS.
+
+
+
+ """
+ super(STLPktBuilder, self).__init__()
+
+ validate_type('pkt', pkt, (type(None), str, Packet))
+ validate_type('pkt_buffer', pkt_buffer, (type(None), bytes))
+
+ self.pkt = None # as input
+ self.pkt_raw = None # from raw pcap file
+ self.vm_scripts = [] # list of high level instructions
+ self.vm_low_level = None
+ self.is_pkt_built = False
+ self.metadata=""
+ self.path_relative_to_profile = path_relative_to_profile
+ self.remove_fcs = remove_fcs
+ self.is_binary_source = pkt_buffer != None
+
+
+ if pkt != None and pkt_buffer != None:
+ raise CTRexPacketBuildException(-15, "Packet builder cannot be provided with both pkt and pkt_buffer.")
+
+ # process packet
+ if pkt != None:
+ self.set_packet(pkt)
+
+ elif pkt_buffer != None:
+ self.set_pkt_as_str(pkt_buffer)
+
+ # process VM
+ if vm != None:
+ if not isinstance(vm, (STLScVmRaw, list)):
+ raise CTRexPacketBuildException(-14, "Bad value for variable vm.")
+
+ self.add_command(vm if isinstance(vm, STLScVmRaw) else STLScVmRaw(vm))
+
+ # raw source build to see MAC presence/ fields offset by name in VM
+ if build_raw and self.pkt_raw and not self.pkt:
+ self.__lazy_build_packet()
+
+ # if we have packet and VM - compile now
+ if (self.pkt or self.pkt_raw) and (self.vm_scripts):
+ self.compile()
+
+
+ def dump_vm_data_as_yaml(self):
+ print(yaml.dump(self.get_vm_data(), default_flow_style=False))
+
+ def get_vm_data(self):
+ """
+ Dumps the instructions
+
+ :parameters:
+ None
+
+ :return:
+ + json object of instructions
+
+ :raises:
+ + :exc:`AssertionError`, in case VM is not compiled (is None).
+ """
+
+ assert self.vm_low_level is not None, 'vm_low_level is None, please use compile()'
+
+ return self.vm_low_level.get_json()
+
+ def dump_pkt(self, encode = True):
+ """
+ Dumps the packet as a decimal array of bytes (each item x gets value in range 0-255)
+
+ :parameters:
+ encode : bool
+ Encode using base64. (disable for debug)
+
+ Default: **True**
+
+ :return:
+ + packet representation as array of bytes
+
+ :raises:
+ + :exc:`AssertionError`, in case packet is empty.
+
+ """
+ pkt_buf = self._get_pkt_as_str()
+ return {'binary': base64.b64encode(pkt_buf).decode() if encode else pkt_buf,
+ 'meta': self.metadata}
+
+
+ def dump_pkt_to_pcap(self, file_path):
+ wrpcap(file_path, self._get_pkt_as_str())
+
+ def add_command (self, script):
+ self.vm_scripts.append(script.clone());
+
+ def dump_scripts (self):
+ self.vm_low_level.dump_as_yaml()
+
+ def dump_as_hex (self):
+ pkt_buf = self._get_pkt_as_str()
+ print(hexdump(pkt_buf))
+
+ def pkt_layers_desc (self):
+ """
+ Return layer description in this format: IP:TCP:Pyload
+
+ """
+ pkt_buf = self._get_pkt_as_str()
+ return self.pkt_layers_desc_from_buffer(pkt_buf)
+
+ @staticmethod
+ def pkt_layers_desc_from_buffer (pkt_buf):
+ scapy_pkt = Ether(pkt_buf);
+ pkt_utl = CTRexScapyPktUtl(scapy_pkt);
+ return pkt_utl.get_pkt_layers()
+
+
+ def set_pkt_as_str (self, pkt_buffer):
+ validate_type('pkt_buffer', pkt_buffer, bytes)
+ self.pkt_raw = pkt_buffer
+
+
+ def set_pcap_file (self, pcap_file):
+ """
+ Load raw pcap file into a buffer. Loads only the first packet.
+
+ :parameters:
+ pcap_file : file_name
+
+ :raises:
+ + :exc:`AssertionError`, if packet is empty.
+
+ """
+ f_path = self._get_pcap_file_path (pcap_file)
+
+ p=RawPcapReader(f_path)
+ was_set = False
+
+ for pkt in p:
+ was_set=True;
+ self.pkt_raw = pkt[0]
+ break
+ if not was_set :
+ raise CTRexPacketBuildException(-14, "No buffer inside the pcap file {0}".format(f_path))
+
+ def to_pkt_dump(self):
+ p = self.pkt
+ if p and isinstance(p, Packet):
+ p.show2();
+ hexdump(p);
+ return;
+ p = self.pkt_raw;
+ if p:
+ scapy_pkt = Ether(p);
+ scapy_pkt.show2();
+ hexdump(p);
+
+
+ def set_packet (self, pkt):
+ """
+ Scapy packet
+
+ Example::
+
+ pkt =Ether()/IP(src="16.0.0.1",dst="48.0.0.1")/UDP(dport=12,sport=1025)/IP()/('x'*10)
+
+ """
+ if isinstance(pkt, Packet):
+ self.pkt = pkt;
+ else:
+ if isinstance(pkt, str):
+ self.set_pcap_file(pkt)
+ else:
+ raise CTRexPacketBuildException(-14, "bad packet" )
+
+ def is_default_src_mac (self):
+ if self.is_binary_source:
+ return True
+ p = self.pkt
+ if isinstance(p, Packet):
+ if isinstance(p,Ether):
+ if 'src' in p.fields :
+ return False
+ return True
+
+ def is_default_dst_mac (self):
+ if self.is_binary_source:
+ return True
+ p = self.pkt
+ if isinstance(p, Packet):
+ if isinstance(p,Ether):
+ if 'dst' in p.fields :
+ return False
+ return True
+
+ def compile (self):
+ if self.pkt == None and self.pkt_raw == None:
+ raise CTRexPacketBuildException(-14, "Packet is empty")
+
+
+ self.vm_low_level = CTRexVmEngine()
+
+ # compile the VM
+ for sc in self.vm_scripts:
+ if isinstance(sc, STLScVmRaw):
+ self._compile_raw(sc)
+
+ def get_pkt_len (self):
+ if self.pkt:
+ return len(self.pkt)
+ elif self.pkt_raw:
+ return len(self.pkt_raw)
+ else:
+ raise CTRexPacketBuildException(-14, "Packet is empty")
+
+ ####################################################
+ # private
+
+
+ def _get_pcap_file_path (self,pcap_file_name):
+ f_path = pcap_file_name
+ if os.path.isabs(pcap_file_name):
+ f_path = pcap_file_name
+ else:
+ if self.path_relative_to_profile:
+ p = self._get_path_relative_to_profile () # loader
+ if p :
+ f_path=os.path.abspath(os.path.join(os.path.dirname(p),pcap_file_name))
+
+ return f_path
+
+
+ def _get_path_relative_to_profile (self):
+ p = inspect.stack()
+ for obj in p:
+ if obj[3]=='get_streams':
+ return obj[1]
+ return None
+
+ def _compile_raw (self,obj):
+
+ # make sure we have varibles once
+ vars={};
+
+ # add it add var to dit
+ for desc in obj.commands:
+ var_names = desc.get_var_name()
+
+ if var_names :
+ for var_name in var_names:
+ if var_name in vars:
+ raise CTRexPacketBuildException(-11,("Variable %s defined twice ") % (var_name) );
+ else:
+ vars[var_name]=1
+
+ # check that all write exits
+ for desc in obj.commands:
+ var_name = desc.get_var_ref()
+ if var_name :
+ if not var_name in vars:
+ raise CTRexPacketBuildException(-11,("Variable %s does not exist ") % (var_name) );
+ desc.compile(self);
+
+ for desc in obj.commands:
+ self.vm_low_level.add_ins(desc.get_obj());
+
+ # set split_by_var
+ if obj.split_by_field :
+ validate_type('obj.split_by_field', obj.split_by_field, str)
+ self.vm_low_level.split_by_var = obj.split_by_field
+
+ #set cache size
+ if obj.cache_size :
+ validate_type('obj.cache_size', obj.cache_size, int)
+ self.vm_low_level.cache_size = obj.cache_size
+
+
+
+ # lazy packet build only on demand
+ def __lazy_build_packet (self):
+ # alrady built ? bail out
+ if self.is_pkt_built:
+ return
+
+ # for buffer, promote to a scapy packet
+ if self.pkt_raw:
+ self.pkt = Ether(self.pkt_raw)
+ self.pkt_raw = None
+
+ # regular scapy packet
+ elif not self.pkt:
+ # should not reach here
+ raise CTRexPacketBuildException(-11, 'Empty packet')
+
+ if self.remove_fcs and self.pkt.lastlayer().name == 'Padding':
+ self.pkt.lastlayer().underlayer.remove_payload()
+
+ self.pkt.build()
+ self.is_pkt_built = True
+
+ def _pkt_layer_offset (self,layer_name):
+
+ self.__lazy_build_packet()
+
+ p_utl=CTRexScapyPktUtl(self.pkt);
+ return p_utl.get_layer_offet_by_str(layer_name)
+
+ def _name_to_offset(self,field_name):
+
+ self.__lazy_build_packet()
+
+ p_utl=CTRexScapyPktUtl(self.pkt);
+ return p_utl.get_field_offet_by_str(field_name)
+
+ def _get_pkt_as_str(self):
+
+ if self.pkt:
+ return bytes(self.pkt)
+
+ if self.pkt_raw:
+ return self.pkt_raw
+
+ raise CTRexPacketBuildException(-11, 'Empty packet');
+
+ def _add_tuple_gen(self,tuple_gen):
+
+ pass;
+
+
+def STLIPRange (src = None,
+ dst = None,
+ fix_chksum = True):
+
+ vm = []
+
+ if src:
+ vm += [
+ STLVmFlowVar(name="src", min_value = src['start'], max_value = src['end'], size = 4, op = "inc", step = src['step']),
+ STLVmWrFlowVar(fv_name="src",pkt_offset= "IP.src")
+ ]
+
+ if dst:
+ vm += [
+ STLVmFlowVar(name="dst", min_value = dst['start'], max_value = dst['end'], size = 4, op = "inc", step = dst['step']),
+ STLVmWrFlowVar(fv_name="dst",pkt_offset= "IP.dst")
+ ]
+
+ if fix_chksum:
+ vm.append( STLVmFixIpv4(offset = "IP"))
+
+
+ return vm
+
diff --git a/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_port.py b/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_port.py
new file mode 100644
index 00000000..cec3761f
--- /dev/null
+++ b/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_port.py
@@ -0,0 +1,794 @@
+
+from collections import namedtuple, OrderedDict
+
+from .trex_stl_packet_builder_scapy import STLPktBuilder
+from .trex_stl_streams import STLStream
+from .trex_stl_types import *
+from . import trex_stl_stats
+from .utils.constants import FLOW_CTRL_DICT_REVERSED
+
+import base64
+import copy
+from datetime import datetime, timedelta
+
+StreamOnPort = namedtuple('StreamOnPort', ['compiled_stream', 'metadata'])
+
+########## utlity ############
+def mult_to_factor (mult, max_bps_l2, max_pps, line_util):
+ if mult['type'] == 'raw':
+ return mult['value']
+
+ if mult['type'] == 'bps':
+ return mult['value'] / max_bps_l2
+
+ if mult['type'] == 'pps':
+ return mult['value'] / max_pps
+
+ if mult['type'] == 'percentage':
+ return mult['value'] / line_util
+
+
+# describes a single port
+class Port(object):
+ STATE_DOWN = 0
+ STATE_IDLE = 1
+ STATE_STREAMS = 2
+ STATE_TX = 3
+ STATE_PAUSE = 4
+ STATE_PCAP_TX = 5
+
+ MASK_ALL = ((1 << 64) - 1)
+
+ PortState = namedtuple('PortState', ['state_id', 'state_name'])
+ STATES_MAP = {STATE_DOWN: "DOWN",
+ STATE_IDLE: "IDLE",
+ STATE_STREAMS: "IDLE",
+ STATE_TX: "TRANSMITTING",
+ STATE_PAUSE: "PAUSE",
+ STATE_PCAP_TX : "TRANSMITTING"}
+
+
+ def __init__ (self, port_id, user, comm_link, session_id, info):
+ self.port_id = port_id
+ self.state = self.STATE_IDLE
+ self.handler = None
+ self.comm_link = comm_link
+ self.transmit = comm_link.transmit
+ self.transmit_batch = comm_link.transmit_batch
+ self.user = user
+
+ self.info = dict(info)
+
+ self.streams = {}
+ self.profile = None
+ self.session_id = session_id
+ self.attr = {}
+
+ self.port_stats = trex_stl_stats.CPortStats(self)
+
+ self.next_available_id = 1
+ self.tx_stopped_ts = None
+ self.has_rx_streams = False
+
+ self.owner = ''
+ self.last_factor_type = None
+
+ # decorator to verify port is up
+ def up(func):
+ def func_wrapper(*args):
+ port = args[0]
+
+ if not port.is_up():
+ return port.err("{0} - port is down".format(func.__name__))
+
+ return func(*args)
+
+ return func_wrapper
+
+ # owned
+ def owned(func):
+ def func_wrapper(*args):
+ port = args[0]
+
+ if not port.is_up():
+ return port.err("{0} - port is down".format(func.__name__))
+
+ if not port.is_acquired():
+ return port.err("{0} - port is not owned".format(func.__name__))
+
+ return func(*args)
+
+ return func_wrapper
+
+
+ # decorator to check server is readable (port not down and etc.)
+ def writeable(func):
+ def func_wrapper(*args, **kwargs):
+ port = args[0]
+
+ if not port.is_up():
+ return port.err("{0} - port is down".format(func.__name__))
+
+ if not port.is_acquired():
+ return port.err("{0} - port is not owned".format(func.__name__))
+
+ if not port.is_writeable():
+ return port.err("{0} - port is not in a writeable state".format(func.__name__))
+
+ return func(*args, **kwargs)
+
+ return func_wrapper
+
+
+
+ def err(self, msg):
+ return RC_ERR("port {0} : {1}\n".format(self.port_id, msg))
+
+ def ok(self, data = ""):
+ return RC_OK(data)
+
+ def get_speed_bps (self):
+ return (self.info['speed'] * 1000 * 1000 * 1000)
+
+ def get_formatted_speed (self):
+ return "{0} Gbps".format(self.info['speed'])
+
+ def is_acquired(self):
+ return (self.handler != None)
+
+ def is_up (self):
+ return (self.state != self.STATE_DOWN)
+
+ def is_active(self):
+ return (self.state == self.STATE_TX ) or (self.state == self.STATE_PAUSE) or (self.state == self.STATE_PCAP_TX)
+
+ def is_transmitting (self):
+ return (self.state == self.STATE_TX) or (self.state == self.STATE_PCAP_TX)
+
+ def is_paused (self):
+ return (self.state == self.STATE_PAUSE)
+
+ def is_writeable (self):
+ # operations on port can be done on state idle or state streams
+ return ((self.state == self.STATE_IDLE) or (self.state == self.STATE_STREAMS))
+
+ def get_owner (self):
+ if self.is_acquired():
+ return self.user
+ else:
+ return self.owner
+
+ def __allocate_stream_id (self):
+ id = self.next_available_id
+ self.next_available_id += 1
+ return id
+
+
+ # take the port
+ @up
+ def acquire(self, force = False, sync_streams = True):
+ params = {"port_id": self.port_id,
+ "user": self.user,
+ "session_id": self.session_id,
+ "force": force}
+
+ rc = self.transmit("acquire", params)
+ if not rc:
+ return self.err(rc.err())
+
+ self.handler = rc.data()
+
+ if sync_streams:
+ return self.sync_streams()
+ else:
+ return self.ok()
+
+
+ # sync all the streams with the server
+ @up
+ def sync_streams (self):
+ params = {"port_id": self.port_id}
+
+ rc = self.transmit("get_all_streams", params)
+ if rc.bad():
+ return self.err(rc.err())
+
+ for k, v in rc.data()['streams'].items():
+ self.streams[k] = {'next_id': v['next_stream_id'],
+ 'pkt' : base64.b64decode(v['packet']['binary']),
+ 'mode' : v['mode']['type'],
+ 'rate' : STLStream.get_rate_from_field(v['mode']['rate'])}
+ return self.ok()
+
+ # release the port
+ @up
+ def release(self):
+ params = {"port_id": self.port_id,
+ "handler": self.handler}
+
+ rc = self.transmit("release", params)
+
+ if rc.good():
+
+ self.handler = None
+ self.owner = ''
+
+ return self.ok()
+ else:
+ return self.err(rc.err())
+
+
+
+ @up
+ def sync(self):
+
+ params = {"port_id": self.port_id}
+
+ rc = self.transmit("get_port_status", params)
+ if rc.bad():
+ return self.err(rc.err())
+
+ # sync the port
+ port_state = rc.data()['state']
+
+ if port_state == "DOWN":
+ self.state = self.STATE_DOWN
+ elif port_state == "IDLE":
+ self.state = self.STATE_IDLE
+ elif port_state == "STREAMS":
+ self.state = self.STATE_STREAMS
+ elif port_state == "TX":
+ self.state = self.STATE_TX
+ elif port_state == "PAUSE":
+ self.state = self.STATE_PAUSE
+ elif port_state == "PCAP_TX":
+ self.state = self.STATE_PCAP_TX
+ else:
+ raise Exception("port {0}: bad state received from server '{1}'".format(self.port_id, port_state))
+
+ self.owner = rc.data()['owner']
+
+ self.next_available_id = int(rc.data()['max_stream_id']) + 1
+
+ # attributes
+ self.attr = rc.data()['attr']
+ if 'speed' in rc.data():
+ self.info['speed'] = rc.data()['speed'] // 1000
+
+ return self.ok()
+
+
+
+ # add streams
+ @writeable
+ def add_streams (self, streams_list):
+
+ # listify
+ streams_list = streams_list if isinstance(streams_list, list) else [streams_list]
+
+ lookup = {}
+
+ # allocate IDs
+ for stream in streams_list:
+
+ # allocate stream id
+ stream_id = stream.get_id() if stream.get_id() is not None else self.__allocate_stream_id()
+ if stream_id in self.streams:
+ return self.err('Stream ID: {0} already exists'.format(stream_id))
+
+ # name
+ name = stream.get_name() if stream.get_name() is not None else id(stream)
+ if name in lookup:
+ return self.err("multiple streams with duplicate name: '{0}'".format(name))
+ lookup[name] = stream_id
+
+ batch = []
+ for stream in streams_list:
+
+ name = stream.get_name() if stream.get_name() is not None else id(stream)
+ stream_id = lookup[name]
+ next_id = -1
+
+ next = stream.get_next()
+ if next:
+ if not next in lookup:
+ return self.err("stream dependency error - unable to find '{0}'".format(next))
+ next_id = lookup[next]
+
+ stream_json = stream.to_json()
+ stream_json['next_stream_id'] = next_id
+
+ params = {"handler": self.handler,
+ "port_id": self.port_id,
+ "stream_id": stream_id,
+ "stream": stream_json}
+
+ cmd = RpcCmdData('add_stream', params, 'core')
+ batch.append(cmd)
+
+
+ rc = self.transmit_batch(batch)
+
+ ret = RC()
+ for i, single_rc in enumerate(rc):
+ if single_rc.rc:
+ stream_id = batch[i].params['stream_id']
+ next_id = batch[i].params['stream']['next_stream_id']
+ self.streams[stream_id] = {'next_id' : next_id,
+ 'pkt' : streams_list[i].get_pkt(),
+ 'mode' : streams_list[i].get_mode(),
+ 'rate' : streams_list[i].get_rate(),
+ 'has_flow_stats' : streams_list[i].has_flow_stats()}
+
+ ret.add(RC_OK(data = stream_id))
+
+ self.has_rx_streams = self.has_rx_streams or streams_list[i].has_flow_stats()
+
+ else:
+ ret.add(RC(*single_rc))
+
+ self.state = self.STATE_STREAMS if (len(self.streams) > 0) else self.STATE_IDLE
+
+ return ret if ret else self.err(str(ret))
+
+
+
+ # remove stream from port
+ @writeable
+ def remove_streams (self, stream_id_list):
+
+ # single element to list
+ stream_id_list = stream_id_list if isinstance(stream_id_list, list) else [stream_id_list]
+
+ # verify existance
+ if not all([stream_id in self.streams for stream_id in stream_id_list]):
+ return self.err("stream {0} does not exists".format(stream_id))
+
+ batch = []
+
+ for stream_id in stream_id_list:
+ params = {"handler": self.handler,
+ "port_id": self.port_id,
+ "stream_id": stream_id}
+
+ cmd = RpcCmdData('remove_stream', params, 'core')
+ batch.append(cmd)
+
+
+ rc = self.transmit_batch(batch)
+ for i, single_rc in enumerate(rc):
+ if single_rc:
+ id = batch[i].params['stream_id']
+ del self.streams[id]
+
+ self.state = self.STATE_STREAMS if (len(self.streams) > 0) else self.STATE_IDLE
+
+ # recheck if any RX stats streams present on the port
+ self.has_rx_streams = any([stream['has_flow_stats'] for stream in self.streams.values()])
+
+ return self.ok() if rc else self.err(rc.err())
+
+
+ # remove all the streams
+ @writeable
+ def remove_all_streams (self):
+
+ params = {"handler": self.handler,
+ "port_id": self.port_id}
+
+ rc = self.transmit("remove_all_streams", params)
+ if not rc:
+ return self.err(rc.err())
+
+ self.streams = {}
+
+ self.state = self.STATE_IDLE
+ self.has_rx_streams = False
+
+ return self.ok()
+
+
+ # get a specific stream
+ def get_stream (self, stream_id):
+ if stream_id in self.streams:
+ return self.streams[stream_id]
+ else:
+ return None
+
+ def get_all_streams (self):
+ return self.streams
+
+
+ @writeable
+ def start (self, mul, duration, force, mask):
+
+ if self.state == self.STATE_IDLE:
+ return self.err("unable to start traffic - no streams attached to port")
+
+ params = {"handler": self.handler,
+ "port_id": self.port_id,
+ "mul": mul,
+ "duration": duration,
+ "force": force,
+ "core_mask": mask if mask is not None else self.MASK_ALL}
+
+ # must set this before to avoid race with the async response
+ last_state = self.state
+ self.state = self.STATE_TX
+
+ rc = self.transmit("start_traffic", params)
+
+ if rc.bad():
+ self.state = last_state
+ return self.err(rc.err())
+
+ # save this for TUI
+ self.last_factor_type = mul['type']
+
+ return self.ok()
+
+
+ # stop traffic
+ # with force ignores the cached state and sends the command
+ @owned
+ def stop (self, force = False):
+
+ # if not is not active and not force - go back
+ if not self.is_active() and not force:
+ return self.ok()
+
+ params = {"handler": self.handler,
+ "port_id": self.port_id}
+
+ rc = self.transmit("stop_traffic", params)
+ if rc.bad():
+ return self.err(rc.err())
+
+ self.state = self.STATE_STREAMS
+ self.last_factor_type = None
+
+ # timestamp for last tx
+ self.tx_stopped_ts = datetime.now()
+
+ return self.ok()
+
+
+ # return True if port has any stream configured with RX stats
+ def has_rx_enabled (self):
+ return self.has_rx_streams
+
+
+ # return true if rx_delay_ms has passed since the last port stop
+ def has_rx_delay_expired (self, rx_delay_ms):
+ assert(self.has_rx_enabled())
+
+ # if active - it's not safe to remove RX filters
+ if self.is_active():
+ return False
+
+ # either no timestamp present or time has already passed
+ return not self.tx_stopped_ts or (datetime.now() - self.tx_stopped_ts) > timedelta(milliseconds = rx_delay_ms)
+
+
+ @writeable
+ def remove_rx_filters (self):
+ assert(self.has_rx_enabled())
+
+ if self.state == self.STATE_IDLE:
+ return self.ok()
+
+
+ params = {"handler": self.handler,
+ "port_id": self.port_id}
+
+ rc = self.transmit("remove_rx_filters", params)
+ if rc.bad():
+ return self.err(rc.err())
+
+ return self.ok()
+
+ @owned
+ def pause (self):
+
+ if (self.state == self.STATE_PCAP_TX) :
+ return self.err("pause is not supported during PCAP TX")
+
+ if (self.state != self.STATE_TX) :
+ return self.err("port is not transmitting")
+
+ params = {"handler": self.handler,
+ "port_id": self.port_id}
+
+ rc = self.transmit("pause_traffic", params)
+ if rc.bad():
+ return self.err(rc.err())
+
+ self.state = self.STATE_PAUSE
+
+ return self.ok()
+
+ @owned
+ def resume (self):
+
+ if (self.state != self.STATE_PAUSE) :
+ return self.err("port is not in pause mode")
+
+ params = {"handler": self.handler,
+ "port_id": self.port_id}
+
+ # only valid state after stop
+
+ rc = self.transmit("resume_traffic", params)
+ if rc.bad():
+ return self.err(rc.err())
+
+ self.state = self.STATE_TX
+
+ return self.ok()
+
+ @owned
+ def update (self, mul, force):
+
+ if (self.state == self.STATE_PCAP_TX) :
+ return self.err("update is not supported during PCAP TX")
+
+ if (self.state != self.STATE_TX) :
+ return self.err("port is not transmitting")
+
+ params = {"handler": self.handler,
+ "port_id": self.port_id,
+ "mul": mul,
+ "force": force}
+
+ rc = self.transmit("update_traffic", params)
+ if rc.bad():
+ return self.err(rc.err())
+
+ # save this for TUI
+ self.last_factor_type = mul['type']
+
+ return self.ok()
+
+ @owned
+ def validate (self):
+
+ if (self.state == self.STATE_IDLE):
+ return self.err("no streams attached to port")
+
+ params = {"handler": self.handler,
+ "port_id": self.port_id}
+
+ rc = self.transmit("validate", params)
+ if rc.bad():
+ return self.err(rc.err())
+
+ self.profile = rc.data()
+
+ return self.ok()
+
+
+ @owned
+ def set_attr (self, attr_dict):
+
+ params = {"handler": self.handler,
+ "port_id": self.port_id,
+ "attr": attr_dict}
+
+ rc = self.transmit("set_port_attr", params)
+ if rc.bad():
+ return self.err(rc.err())
+
+
+ #self.attr.update(attr_dict)
+
+ return self.ok()
+
+ @writeable
+ def push_remote (self, pcap_filename, ipg_usec, speedup, count, duration, is_dual, slave_handler):
+
+ params = {"handler": self.handler,
+ "port_id": self.port_id,
+ "pcap_filename": pcap_filename,
+ "ipg_usec": ipg_usec if ipg_usec is not None else -1,
+ "speedup": speedup,
+ "count": count,
+ "duration": duration,
+ "is_dual": is_dual,
+ "slave_handler": slave_handler}
+
+ rc = self.transmit("push_remote", params)
+ if rc.bad():
+ return self.err(rc.err())
+
+ self.state = self.STATE_PCAP_TX
+ return self.ok()
+
+
+ def get_profile (self):
+ return self.profile
+
+
+ def print_profile (self, mult, duration):
+ if not self.get_profile():
+ return
+
+ rate = self.get_profile()['rate']
+ graph = self.get_profile()['graph']
+
+ print(format_text("Profile Map Per Port\n", 'underline', 'bold'))
+
+ factor = mult_to_factor(mult, rate['max_bps_l2'], rate['max_pps'], rate['max_line_util'])
+
+ print("Profile max BPS L2 (base / req): {:^12} / {:^12}".format(format_num(rate['max_bps_l2'], suffix = "bps"),
+ format_num(rate['max_bps_l2'] * factor, suffix = "bps")))
+
+ print("Profile max BPS L1 (base / req): {:^12} / {:^12}".format(format_num(rate['max_bps_l1'], suffix = "bps"),
+ format_num(rate['max_bps_l1'] * factor, suffix = "bps")))
+
+ print("Profile max PPS (base / req): {:^12} / {:^12}".format(format_num(rate['max_pps'], suffix = "pps"),
+ format_num(rate['max_pps'] * factor, suffix = "pps"),))
+
+ print("Profile line util. (base / req): {:^12} / {:^12}".format(format_percentage(rate['max_line_util']),
+ format_percentage(rate['max_line_util'] * factor)))
+
+
+ # duration
+ exp_time_base_sec = graph['expected_duration'] / (1000 * 1000)
+ exp_time_factor_sec = exp_time_base_sec / factor
+
+ # user configured a duration
+ if duration > 0:
+ if exp_time_factor_sec > 0:
+ exp_time_factor_sec = min(exp_time_factor_sec, duration)
+ else:
+ exp_time_factor_sec = duration
+
+
+ print("Duration (base / req): {:^12} / {:^12}".format(format_time(exp_time_base_sec),
+ format_time(exp_time_factor_sec)))
+ print("\n")
+
+ # generate port info
+ def get_info (self):
+ info = dict(self.info)
+
+ info['status'] = self.get_port_state_name()
+
+ if 'link' in self.attr:
+ info['link'] = 'UP' if self.attr['link']['up'] else 'DOWN'
+ else:
+ info['link'] = 'N/A'
+
+ if 'fc' in self.attr:
+ info['fc'] = FLOW_CTRL_DICT_REVERSED.get(self.attr['fc']['mode'], 'N/A')
+ else:
+ info['fc'] = 'N/A'
+
+ if 'promiscuous' in self.attr:
+ info['prom'] = "on" if self.attr['promiscuous']['enabled'] else "off"
+ else:
+ info['prom'] = "N/A"
+
+ if 'description' not in info:
+ info['description'] = "N/A"
+
+ if 'is_fc_supported' in info:
+ info['fc_supported'] = 'yes' if info['is_fc_supported'] else 'no'
+ else:
+ info['fc_supported'] = 'N/A'
+
+ if 'is_led_supported' in info:
+ info['led_change_supported'] = 'yes' if info['is_led_supported'] else 'no'
+ else:
+ info['led_change_supported'] = 'N/A'
+
+ if 'is_link_supported' in info:
+ info['link_change_supported'] = 'yes' if info['is_link_supported'] else 'no'
+ else:
+ info['link_change_supported'] = 'N/A'
+
+ if 'is_virtual' in info:
+ info['is_virtual'] = 'yes' if info['is_virtual'] else 'no'
+ else:
+ info['is_virtual'] = 'N/A'
+
+ return info
+
+
+ def get_port_state_name(self):
+ return self.STATES_MAP.get(self.state, "Unknown")
+
+ ################# stats handler ######################
+ def generate_port_stats(self):
+ return self.port_stats.generate_stats()
+
+ def generate_port_status(self):
+
+ info = self.get_info()
+
+ return {"driver": info['driver'],
+ "description": info.get('description', 'N/A')[:18],
+ "HW src mac": info['hw_macaddr'],
+ "SW src mac": info['src_macaddr'],
+ "SW dst mac": info['dst_macaddr'],
+ "PCI Address": info['pci_addr'],
+ "NUMA Node": info['numa'],
+ "--": "",
+ "---": "",
+ "link speed": "{speed} Gb/s".format(speed=info['speed']),
+ "port status": info['status'],
+ "link status": info['link'],
+ "promiscuous" : info['prom'],
+ "flow ctrl" : info['fc'],
+ }
+
+ def clear_stats(self):
+ return self.port_stats.clear_stats()
+
+
+ def get_stats (self):
+ return self.port_stats.get_stats()
+
+
+ def invalidate_stats(self):
+ return self.port_stats.invalidate()
+
+ ################# stream printout ######################
+ def generate_loaded_streams_sum(self):
+ if self.state == self.STATE_DOWN:
+ return {}
+
+ data = {}
+ for id, obj in self.streams.items():
+
+ # lazy build scapy repr.
+ if not 'pkt_type' in obj:
+ obj['pkt_type'] = STLPktBuilder.pkt_layers_desc_from_buffer(obj['pkt'])
+
+ data[id] = OrderedDict([ ('id', id),
+ ('packet_type', obj['pkt_type']),
+ ('L2 len', len(obj['pkt']) + 4),
+ ('mode', obj['mode']),
+ ('rate', obj['rate']),
+ ('next_stream', obj['next_id'] if not '-1' else 'None')
+ ])
+
+ return {"streams" : OrderedDict(sorted(data.items())) }
+
+
+
+ ################# events handler ######################
+ def async_event_port_job_done (self):
+ # until thread is locked - order is important
+ self.tx_stopped_ts = datetime.now()
+ self.state = self.STATE_STREAMS
+ self.last_factor_type = None
+
+ def async_event_port_attr_changed (self, attr):
+ self.info['speed'] = attr['speed'] // 1000
+ self.attr = attr
+
+ # rest of the events are used for TUI / read only sessions
+ def async_event_port_stopped (self):
+ if not self.is_acquired():
+ self.state = self.STATE_STREAMS
+
+ def async_event_port_paused (self):
+ if not self.is_acquired():
+ self.state = self.STATE_PAUSE
+
+ def async_event_port_started (self):
+ if not self.is_acquired():
+ self.state = self.STATE_TX
+
+ def async_event_port_resumed (self):
+ if not self.is_acquired():
+ self.state = self.STATE_TX
+
+ def async_event_acquired (self, who):
+ self.handler = None
+ self.owner = who
+
+ def async_event_released (self):
+ self.owner = ''
+
diff --git a/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_sim.py b/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_sim.py
new file mode 100644
index 00000000..540bba68
--- /dev/null
+++ b/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_sim.py
@@ -0,0 +1,620 @@
+# -*- coding: utf-8 -*-
+
+"""
+Itay Marom
+Cisco Systems, Inc.
+
+Copyright (c) 2015-2015 Cisco Systems, Inc.
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+ http://www.apache.org/licenses/LICENSE-2.0
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+"""
+# simulator can be run as a standalone
+from . import trex_stl_ext
+from .trex_stl_exceptions import *
+from .trex_stl_streams import *
+from .utils import parsing_opts
+from .trex_stl_client import STLClient
+from .utils import pcap
+from trex_stl_lib.trex_stl_packet_builder_scapy import RawPcapReader, RawPcapWriter, hexdump
+
+from random import randint
+from random import choice as rand_choice
+
+from yaml import YAMLError
+
+import re
+import json
+import argparse
+import tempfile
+import subprocess
+import os
+from operator import itemgetter
+
+class BpSimException(Exception):
+ pass
+
+
+# stateless simulation
+class STLSim(object):
+ MASK_ALL = ((1 << 64) - 1)
+
+ def __init__ (self, bp_sim_path, handler = 0, port_id = 0, api_h = "dummy"):
+
+ self.bp_sim_path = os.path.abspath(bp_sim_path)
+ if not os.path.exists(self.bp_sim_path):
+ raise STLError('BP sim path %s does not exist' % self.bp_sim_path)
+
+ # dummies
+ self.handler = handler
+ self.api_h = api_h
+ self.port_id = port_id
+
+
+ def generate_start_cmd (self, mult = "1", force = True, duration = -1):
+ return {"id":1,
+ "jsonrpc": "2.0",
+ "method": "start_traffic",
+ "params": {"handler": self.handler,
+ "api_h" : self.api_h,
+ "force": force,
+ "port_id": self.port_id,
+ "mul": parsing_opts.decode_multiplier(mult),
+ "duration": duration,
+ "core_mask": self.MASK_ALL}
+ }
+
+
+
+ # run command
+ # input_list - a list of streams or YAML files
+ # outfile - pcap file to save output, if None its a dry run
+ # dp_core_count - how many DP cores to use
+ # dp_core_index - simulate only specific dp core without merging
+ # is_debug - debug or release image
+ # pkt_limit - how many packets to simulate
+ # mult - multiplier
+ # mode - can be 'valgrind, 'gdb', 'json' or 'none'
+ def run (self,
+ input_list,
+ outfile = None,
+ dp_core_count = 1,
+ dp_core_index = None,
+ is_debug = True,
+ pkt_limit = 5000,
+ mult = "1",
+ duration = -1,
+ mode = 'none',
+ silent = False,
+ tunables = None):
+
+ if not mode in ['none', 'gdb', 'valgrind', 'json', 'yaml','pkt','native']:
+ raise STLArgumentError('mode', mode)
+
+ # listify
+ input_list = input_list if isinstance(input_list, list) else [input_list]
+
+ # check streams arguments
+ if not all([isinstance(i, (STLStream, str)) for i in input_list]):
+ raise STLArgumentError('input_list', input_list)
+
+ # split to two type
+ input_files = [x for x in input_list if isinstance(x, str)]
+ stream_list = [x for x in input_list if isinstance(x, STLStream)]
+
+ # handle YAMLs
+ if tunables == None:
+ tunables = {}
+
+ for input_file in input_files:
+ try:
+ if not 'direction' in tunables:
+ tunables['direction'] = self.port_id % 2
+
+ profile = STLProfile.load(input_file, **tunables)
+
+ except STLError as e:
+ s = format_text("\nError while loading profile '{0}'\n".format(input_file), 'bold')
+ s += "\n" + e.brief()
+ raise STLError(s)
+
+ stream_list += profile.get_streams()
+
+
+ # load streams
+ cmds_json = []
+
+ id_counter = 1
+
+ lookup = {}
+
+ # allocate IDs
+ for stream in stream_list:
+ if stream.get_id() is not None:
+ stream_id = stream.get_id()
+ else:
+ stream_id = id_counter
+ id_counter += 1
+
+ name = stream.get_name() if stream.get_name() is not None else id(stream)
+ if name in lookup:
+ raise STLError("multiple streams with name: '{0}'".format(name))
+ lookup[name] = stream_id
+
+ # resolve names
+ for stream in stream_list:
+
+ name = stream.get_name() if stream.get_name() is not None else id(stream)
+ stream_id = lookup[name]
+
+ next_id = -1
+ next = stream.get_next()
+ if next:
+ if not next in lookup:
+ raise STLError("stream dependency error - unable to find '{0}'".format(next))
+ next_id = lookup[next]
+
+
+ stream_json = stream.to_json()
+ stream_json['next_stream_id'] = next_id
+
+ cmd = {"id":1,
+ "jsonrpc": "2.0",
+ "method": "add_stream",
+ "params": {"handler": self.handler,
+ "api_h": self.api_h,
+ "port_id": self.port_id,
+ "stream_id": stream_id,
+ "stream": stream_json}
+ }
+
+ cmds_json.append(cmd)
+
+ # generate start command
+ cmds_json.append(self.generate_start_cmd(mult = mult,
+ force = True,
+ duration = duration))
+
+ if mode == 'json':
+ print(json.dumps(cmds_json, indent = 4, separators=(',', ': '), sort_keys = True))
+ return
+ elif mode == 'yaml':
+ print(STLProfile(stream_list).dump_to_yaml())
+ return
+ elif mode == 'pkt':
+ print(STLProfile(stream_list).dump_as_pkt())
+ return
+ elif mode == 'native':
+ print(STLProfile(stream_list).dump_to_code())
+ return
+
+
+ # start simulation
+ self.outfile = outfile
+ self.dp_core_count = dp_core_count
+ self.dp_core_index = dp_core_index
+ self.is_debug = is_debug
+ self.pkt_limit = pkt_limit
+ self.mult = mult
+ self.duration = duration,
+ self.mode = mode
+ self.silent = silent
+
+ self.__run(cmds_json)
+
+
+ # internal run
+ def __run (self, cmds_json):
+
+ # write to temp file
+ f = tempfile.NamedTemporaryFile(delete = False)
+
+ msg = json.dumps(cmds_json).encode()
+
+ f.write(msg)
+ f.close()
+
+ # launch bp-sim
+ try:
+ self.execute_bp_sim(f.name)
+ finally:
+ os.unlink(f.name)
+
+
+
+ def execute_bp_sim (self, json_filename):
+ if self.is_debug:
+ exe = os.path.join(self.bp_sim_path, 'bp-sim-64-debug')
+ else:
+ exe = os.path.join(self.bp_sim_path, 'bp-sim-64')
+
+ if not os.path.exists(exe):
+ raise STLError("'{0}' does not exists, please build it before calling the simulation".format(exe))
+
+
+ cmd = [exe,
+ '--pcap',
+ '--sl',
+ '--cores',
+ str(self.dp_core_count),
+ '--limit',
+ str(self.pkt_limit),
+ '-f',
+ json_filename]
+
+ # out or dry
+ if not self.outfile:
+ cmd += ['--dry']
+ cmd += ['-o', '/dev/null']
+ else:
+ cmd += ['-o', self.outfile]
+
+ if self.dp_core_index != None:
+ cmd += ['--core_index', str(self.dp_core_index)]
+
+ if self.mode == 'valgrind':
+ cmd = ['valgrind', '--leak-check=full', '--error-exitcode=1'] + cmd
+
+ elif self.mode == 'gdb':
+ cmd = ['/usr/bin/gdb', '--args'] + cmd
+
+ print("executing command: '{0}'".format(" ".join(cmd)))
+
+ if self.silent:
+ FNULL = open(os.devnull, 'wb')
+ rc = subprocess.call(cmd, stdout=FNULL)
+ else:
+ rc = subprocess.call(cmd)
+
+ if rc != 0:
+ raise STLError('simulation has failed with error code {0}'.format(rc))
+
+ self.merge_results()
+
+
+ def merge_results (self):
+ if not self.outfile:
+ return
+
+ if self.dp_core_count == 1:
+ return
+
+ if self.dp_core_index != None:
+ return
+
+
+ if not self.silent:
+ print("Mering cores output to a single pcap file...\n")
+ inputs = ["{0}-{1}".format(self.outfile, index) for index in range(0, self.dp_core_count)]
+ pcap.merge_cap_files(inputs, self.outfile, delete_src = True)
+
+
+
+def is_valid_file(filename):
+ if not os.path.isfile(filename):
+ raise argparse.ArgumentTypeError("The file '%s' does not exist" % filename)
+
+ return filename
+
+
+def unsigned_int (x):
+ x = int(x)
+ if x < 0:
+ raise argparse.ArgumentTypeError("argument must be >= 0")
+
+ return x
+
+def setParserOptions():
+ parser = argparse.ArgumentParser(prog="stl_sim.py")
+
+ parser.add_argument("-f",
+ dest ="input_file",
+ help = "input file in YAML or Python format",
+ type = is_valid_file,
+ required=True)
+
+ parser.add_argument("-o",
+ dest = "output_file",
+ default = None,
+ help = "output file in ERF format")
+
+
+ parser.add_argument("-c", "--cores",
+ help = "DP core count [default is 1]",
+ dest = "dp_core_count",
+ default = 1,
+ type = int,
+ choices = list(range(1, 9)))
+
+ parser.add_argument("-n", "--core_index",
+ help = "Record only a specific core",
+ dest = "dp_core_index",
+ default = None,
+ type = int)
+
+ parser.add_argument("-i", "--port",
+ help = "Simulate a specific port ID [default is 0]",
+ dest = "port_id",
+ default = 0,
+ type = int)
+
+
+ parser.add_argument("-r", "--release",
+ help = "runs on release image instead of debug [default is False]",
+ action = "store_true",
+ default = False)
+
+
+ parser.add_argument("-s", "--silent",
+ help = "runs on silent mode (no stdout) [default is False]",
+ action = "store_true",
+ default = False)
+
+ parser.add_argument("-l", "--limit",
+ help = "limit test total packet count [default is 5000]",
+ default = 5000,
+ type = unsigned_int)
+
+ parser.add_argument('-m', '--multiplier',
+ help = parsing_opts.match_multiplier_help,
+ dest = 'mult',
+ default = "1",
+ type = parsing_opts.match_multiplier_strict)
+
+ parser.add_argument('-d', '--duration',
+ help = "run duration",
+ dest = 'duration',
+ default = -1,
+ type = float)
+
+
+ parser.add_argument('-t',
+ help = 'sets tunable for a profile',
+ dest = 'tunables',
+ default = None,
+ type = parsing_opts.decode_tunables)
+
+ parser.add_argument('-p', '--path',
+ help = "BP sim path",
+ dest = 'bp_sim_path',
+ default = None,
+ type = str)
+
+
+ group = parser.add_mutually_exclusive_group()
+
+ group.add_argument("-x", "--valgrind",
+ help = "run under valgrind [default is False]",
+ action = "store_true",
+ default = False)
+
+ group.add_argument("-g", "--gdb",
+ help = "run under GDB [default is False]",
+ action = "store_true",
+ default = False)
+
+ group.add_argument("--json",
+ help = "generate JSON output only to stdout [default is False]",
+ action = "store_true",
+ default = False)
+
+ group.add_argument("--pkt",
+ help = "Parse the packet and show it as hex",
+ action = "store_true",
+ default = False)
+
+ group.add_argument("--yaml",
+ help = "generate YAML from input file [default is False]",
+ action = "store_true",
+ default = False)
+
+ group.add_argument("--native",
+ help = "generate Python code with stateless profile from input file [default is False]",
+ action = "store_true",
+ default = False)
+
+ group.add_argument("--test_multi_core",
+ help = "runs the profile with c=1-8",
+ action = "store_true",
+ default = False)
+
+ return parser
+
+
+def validate_args (parser, options):
+
+ if options.dp_core_index:
+ if not options.dp_core_index in range(0, options.dp_core_count):
+ parser.error("DP core index valid range is 0 to {0}".format(options.dp_core_count - 1))
+
+ # zero is ok - no limit, but other values must be at least as the number of cores
+ if (options.limit != 0) and options.limit < options.dp_core_count:
+ parser.error("limit cannot be lower than number of DP cores")
+
+
+# a more flexible check
+def compare_caps (cap1, cap2, max_diff_sec = (5 * 1e-6)):
+ pkts1 = list(RawPcapReader(cap1))
+ pkts2 = list(RawPcapReader(cap2))
+
+ if len(pkts1) != len(pkts2):
+ print('{0} contains {1} packets vs. {2} contains {3} packets'.format(cap1, len(pkts1), cap2, len(pkts2)))
+ return False
+
+ # to be less strict we define equality if all packets from cap1 exists and in cap2
+ # and vice versa
+ # 'exists' means the same packet with abs(TS1-TS2) < 5nsec
+ # its O(n^2) but who cares, right ?
+ for i, pkt1 in enumerate(pkts1):
+ ts1 = float(pkt1[1][0]) + (float(pkt1[1][1]) / 1e6)
+ found = None
+ for j, pkt2 in enumerate(pkts2):
+ ts2 = float(pkt2[1][0]) + (float(pkt2[1][1]) / 1e6)
+
+ if abs(ts1-ts2) > max_diff_sec:
+ break
+
+ if pkt1[0] == pkt2[0]:
+ found = j
+ break
+
+
+ if found is None:
+ print(format_text("cannot find packet #{0} from {1} in {2}\n".format(i, cap1, cap2), 'bold'))
+ return False
+ else:
+ del pkts2[found]
+
+ return True
+
+
+def hexdiff (d1, d2):
+ rc = []
+
+ if len(d1) != len(d2):
+ return rc
+
+ for i in range(len(d1)):
+ if d1[i] != d2[i]:
+ rc.append(i)
+ return rc
+
+def prettyhex (h, diff_list):
+ if type(h[0]) == str:
+ h = [ord(x) for x in h]
+
+ for i in range(len(h)):
+
+ if i in diff_list:
+ sys.stdout.write("->'0x%02x'<-" % h[i])
+ else:
+ sys.stdout.write(" '0x%02x' " % h[i])
+ if ((i % 9) == 8):
+ print("")
+
+ print("")
+
+# a more strict comparsion 1 <--> 1
+def compare_caps_strict (cap1, cap2, max_diff_sec = (5 * 1e-6)):
+ pkts1 = list(RawPcapReader(cap1))
+ pkts2 = list(RawPcapReader(cap2))
+
+ if len(pkts1) != len(pkts2):
+ print('{0} contains {1} packets vs. {1} contains {2} packets'.format(cap1, len(pkts1), cap2, len(pkts2)))
+ return False
+
+ # a strict check
+ for pkt1, pkt2, i in zip(pkts1, pkts2, range(1, len(pkts1))):
+ ts1 = float(pkt1[1][0]) + (float(pkt1[1][1]) / 1e6)
+ ts2 = float(pkt2[1][0]) + (float(pkt2[1][1]) / 1e6)
+
+ if abs(ts1-ts2) > 0.000005: # 5 nsec
+ print(format_text("TS error: cap files '{0}', '{1}' differ in cap #{2} - '{3}' vs. '{4}'\n".format(cap1, cap2, i, ts1, ts2), 'bold'))
+ return False
+
+ if pkt1[0] != pkt2[0]:
+ print(format_text("RAW error: cap files '{0}', '{1}' differ in cap #{2}\n".format(cap1, cap2, i), 'bold'))
+
+ diff_list = hexdiff(pkt1[0], pkt2[0])
+
+ print("{0} - packet #{1}:\n".format(cap1, i))
+ prettyhex(pkt1[0], diff_list)
+
+ print("\n{0} - packet #{1}:\n".format(cap2, i))
+ prettyhex(pkt2[0], diff_list)
+
+ print("")
+ return False
+
+ return True
+
+
+def test_multi_core (r, options):
+
+ for core_count in range(1, 9):
+ r.run(input_list = options.input_file,
+ outfile = '{0}.cap'.format(core_count),
+ dp_core_count = core_count,
+ is_debug = (not options.release),
+ pkt_limit = options.limit,
+ mult = options.mult,
+ duration = options.duration,
+ mode = 'none',
+ silent = True,
+ tunables = options.tunables)
+
+ print("")
+
+ for core_count in range(1, 9):
+ print(format_text("comparing {0} cores to 1 core:\n".format(core_count), 'underline'))
+ rc = compare_caps_strict('1.cap', '{0}.cap'.format(core_count))
+ if rc:
+ print("[Passed]\n")
+
+ return
+
+
+def main (args = None):
+ parser = setParserOptions()
+ options = parser.parse_args(args = args)
+
+ validate_args(parser, options)
+
+
+
+ if options.valgrind:
+ mode = 'valgrind'
+ elif options.gdb:
+ mode = 'gdb'
+ elif options.json:
+ mode = 'json'
+ elif options.yaml:
+ mode = 'yaml'
+ elif options.native:
+ mode = 'native'
+ elif options.pkt:
+ mode = 'pkt'
+ elif options.test_multi_core:
+ mode = 'test_multi_core'
+ else:
+ mode = 'none'
+
+ try:
+ r = STLSim(bp_sim_path = options.bp_sim_path, port_id = options.port_id)
+
+ if mode == 'test_multi_core':
+ test_multi_core(r, options)
+ else:
+ r.run(input_list = options.input_file,
+ outfile = options.output_file,
+ dp_core_count = options.dp_core_count,
+ dp_core_index = options.dp_core_index,
+ is_debug = (not options.release),
+ pkt_limit = options.limit,
+ mult = options.mult,
+ duration = options.duration,
+ mode = mode,
+ silent = options.silent,
+ tunables = options.tunables)
+
+ except KeyboardInterrupt as e:
+ print("\n\n*** Caught Ctrl + C... Exiting...\n\n")
+ return (-1)
+
+ except STLError as e:
+ print(e)
+ return (-1)
+
+ return (0)
+
+
+if __name__ == '__main__':
+ main()
+
+
diff --git a/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_stats.py b/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_stats.py
new file mode 100644
index 00000000..9f601484
--- /dev/null
+++ b/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_stats.py
@@ -0,0 +1,1549 @@
+#!/router/bin/python
+
+from .utils import text_tables
+from .utils.text_opts import format_text, format_threshold, format_num
+from .trex_stl_types import StatNotAvailable, is_integer
+from .trex_stl_exceptions import STLError
+
+from collections import namedtuple, OrderedDict, deque
+import sys
+import copy
+import datetime
+import time
+import re
+import math
+import threading
+import pprint
+
+GLOBAL_STATS = 'g'
+PORT_STATS = 'p'
+PORT_GRAPH = 'pg'
+PORT_STATUS = 'ps'
+STREAMS_STATS = 's'
+LATENCY_STATS = 'ls'
+LATENCY_HISTOGRAM = 'lh'
+CPU_STATS = 'c'
+MBUF_STATS = 'm'
+EXTENDED_STATS = 'x'
+EXTENDED_INC_ZERO_STATS = 'xz'
+
+ALL_STATS_OPTS = [GLOBAL_STATS, PORT_STATS, PORT_STATUS, STREAMS_STATS, LATENCY_STATS, PORT_GRAPH, LATENCY_HISTOGRAM, CPU_STATS, MBUF_STATS, EXTENDED_STATS, EXTENDED_INC_ZERO_STATS]
+COMPACT = [GLOBAL_STATS, PORT_STATS]
+GRAPH_PORT_COMPACT = [GLOBAL_STATS, PORT_GRAPH]
+SS_COMPAT = [GLOBAL_STATS, STREAMS_STATS] # stream stats
+LS_COMPAT = [GLOBAL_STATS, LATENCY_STATS] # latency stats
+LH_COMPAT = [GLOBAL_STATS, LATENCY_HISTOGRAM] # latency histogram
+UT_COMPAT = [GLOBAL_STATS, CPU_STATS, MBUF_STATS] # utilization
+
+ExportableStats = namedtuple('ExportableStats', ['raw_data', 'text_table'])
+
+def round_float (f):
+ return float("%.2f" % f) if type(f) is float else f
+
+def try_int(i):
+ try:
+ return int(i)
+ except:
+ return i
+
+# deep mrege of dicts dst = src + dst
+def deep_merge_dicts (dst, src):
+ for k, v in src.items():
+ # if not exists - deep copy it
+ if not k in dst:
+ dst[k] = copy.deepcopy(v)
+ else:
+ if isinstance(v, dict):
+ deep_merge_dicts(dst[k], v)
+
+# BPS L1 from pps and BPS L2
+def calc_bps_L1 (bps, pps):
+ if (pps == 0) or (bps == 0):
+ return 0
+
+ factor = bps / (pps * 8.0)
+ return bps * ( 1 + (20 / factor) )
+#
+
+def is_intable (value):
+ try:
+ int(value)
+ return True
+ except ValueError:
+ return False
+
+# use to calculate diffs relative to the previous values
+# for example, BW
+def calculate_diff (samples):
+ total = 0.0
+
+ weight_step = 1.0 / sum(range(0, len(samples)))
+ weight = weight_step
+
+ for i in range(0, len(samples) - 1):
+ current = samples[i] if samples[i] > 0 else 1
+ next = samples[i + 1] if samples[i + 1] > 0 else 1
+
+ s = 100 * ((float(next) / current) - 1.0)
+
+ # block change by 100%
+ total += (min(s, 100) * weight)
+ weight += weight_step
+
+ return total
+
+
+# calculate by absolute values and not relatives (useful for CPU usage in % and etc.)
+def calculate_diff_raw (samples):
+ total = 0.0
+
+ weight_step = 1.0 / sum(range(0, len(samples)))
+ weight = weight_step
+
+ for i in range(0, len(samples) - 1):
+ current = samples[i]
+ next = samples[i + 1]
+
+ total += ( (next - current) * weight )
+ weight += weight_step
+
+ return total
+
+get_number_of_bytes_cache = {}
+# get number of bytes: '64b'->64, '9kb'->9000 etc.
+def get_number_of_bytes(val):
+ if val not in get_number_of_bytes_cache:
+ get_number_of_bytes_cache[val] = int(val[:-1].replace('k', '000'))
+ return get_number_of_bytes_cache[val]
+
+# a simple object to keep a watch over a field
+class WatchedField(object):
+
+ def __init__ (self, name, suffix, high_th, low_th, events_handler):
+ self.name = name
+ self.suffix = suffix
+ self.high_th = high_th
+ self.low_th = low_th
+ self.events_handler = events_handler
+
+ self.hot = False
+ self.current = None
+
+ def update (self, value):
+ if value is None:
+ return
+
+ if value > self.high_th and not self.hot:
+ self.events_handler.log_warning("{0} is high: {1}{2}".format(self.name, value, self.suffix))
+ self.hot = True
+
+ if value < self.low_th and self.hot:
+ self.hot = False
+
+ self.current = value
+
+
+
+class CTRexInfoGenerator(object):
+ """
+ This object is responsible of generating stats and information from objects maintained at
+ STLClient and the ports.
+ """
+
+ def __init__(self, global_stats_ref, ports_dict_ref, rx_stats_ref, latency_stats_ref, util_stats_ref, xstats_ref, async_monitor):
+ self._global_stats = global_stats_ref
+ self._ports_dict = ports_dict_ref
+ self._rx_stats_ref = rx_stats_ref
+ self._latency_stats_ref = latency_stats_ref
+ self._util_stats_ref = util_stats_ref
+ self._xstats_ref = xstats_ref
+ self._async_monitor = async_monitor
+
+ def generate_single_statistic(self, port_id_list, statistic_type):
+ if statistic_type == GLOBAL_STATS:
+ return self._generate_global_stats()
+
+ elif statistic_type == PORT_STATS:
+ return self._generate_port_stats(port_id_list)
+
+ elif statistic_type == PORT_GRAPH:
+ return self._generate_port_graph(port_id_list)
+
+ elif statistic_type == PORT_STATUS:
+ return self._generate_port_status(port_id_list)
+
+ elif statistic_type == STREAMS_STATS:
+ return self._generate_streams_stats()
+
+ elif statistic_type == LATENCY_STATS:
+ return self._generate_latency_stats()
+
+ elif statistic_type == LATENCY_HISTOGRAM:
+ return self._generate_latency_histogram()
+
+ elif statistic_type == CPU_STATS:
+ return self._generate_cpu_util_stats()
+
+ elif statistic_type == MBUF_STATS:
+ return self._generate_mbuf_util_stats()
+
+ elif statistic_type == EXTENDED_STATS:
+ return self._generate_xstats(port_id_list, include_zero_lines = False)
+
+ elif statistic_type == EXTENDED_INC_ZERO_STATS:
+ return self._generate_xstats(port_id_list, include_zero_lines = True)
+
+ else:
+ # ignore by returning empty object
+ return {}
+
+ def generate_streams_info(self, port_id_list, stream_id_list):
+ relevant_ports = self.__get_relevant_ports(port_id_list)
+ return_data = OrderedDict()
+
+ for port_obj in relevant_ports:
+ streams_data = self._generate_single_port_streams_info(port_obj, stream_id_list)
+ if not streams_data:
+ continue
+ hdr_key = "Port {port}:".format(port= port_obj.port_id)
+
+ # TODO: test for other ports with same stream structure, and join them
+ return_data[hdr_key] = streams_data
+
+ return return_data
+
+ def _generate_global_stats(self):
+ global_stats = self._global_stats
+
+ stats_data_left = OrderedDict([("connection", "{host}, Port {port}".format(host=global_stats.connection_info.get("server"),
+ port=global_stats.connection_info.get("sync_port"))),
+ ("version", "{ver}".format(ver=global_stats.server_version.get("version", "N/A"))),
+
+ ("cpu_util.", "{0}% @ {2} cores ({3} per port) {1}".format( format_threshold(round_float(global_stats.get("m_cpu_util")), [85, 100], [0, 85]),
+ global_stats.get_trend_gui("m_cpu_util", use_raw = True),
+ global_stats.system_info.get('dp_core_count'),
+ global_stats.system_info.get('dp_core_count_per_port'),
+ )),
+
+ ("rx_cpu_util.", "{0}% {1}".format( format_threshold(round_float(global_stats.get("m_rx_cpu_util")), [85, 100], [0, 85]),
+ global_stats.get_trend_gui("m_rx_cpu_util", use_raw = True))),
+
+ ("async_util.", "{0}% / {1}".format( format_threshold(round_float(self._async_monitor.get_cpu_util()), [85, 100], [0, 85]),
+ format_num(self._async_monitor.get_bps() / 8.0, suffix = "B/sec"))),
+ ])
+
+ stats_data_right = OrderedDict([
+ ("total_tx_L2", "{0} {1}".format( global_stats.get("m_tx_bps", format=True, suffix="b/sec"),
+ global_stats.get_trend_gui("m_tx_bps"))),
+
+ ("total_tx_L1", "{0} {1}".format( global_stats.get("m_tx_bps_L1", format=True, suffix="b/sec"),
+ global_stats.get_trend_gui("m_tx_bps_L1"))),
+
+ ("total_rx", "{0} {1}".format( global_stats.get("m_rx_bps", format=True, suffix="b/sec"),
+ global_stats.get_trend_gui("m_rx_bps"))),
+
+ ("total_pps", "{0} {1}".format( global_stats.get("m_tx_pps", format=True, suffix="pkt/sec"),
+ global_stats.get_trend_gui("m_tx_pps"))),
+
+ ("drop_rate", "{0}".format( format_num(global_stats.get("m_rx_drop_bps"),
+ suffix = 'b/sec',
+ opts = 'green' if (global_stats.get("m_rx_drop_bps")== 0) else 'red'),
+ )),
+
+ ("queue_full", "{0}".format( format_num(global_stats.get_rel("m_total_queue_full"),
+ suffix = 'pkts',
+ compact = False,
+ opts = 'green' if (global_stats.get_rel("m_total_queue_full")== 0) else 'red'))),
+ ])
+
+ # build table representation
+ stats_table = text_tables.TRexTextInfo()
+ stats_table.set_cols_align(["l", "l"])
+ stats_table.set_deco(0)
+ stats_table.set_cols_width([50, 45])
+ max_lines = max(len(stats_data_left), len(stats_data_right))
+ for line_num in range(max_lines):
+ row = []
+ if line_num < len(stats_data_left):
+ key = list(stats_data_left.keys())[line_num]
+ row.append('{:<12} : {}'.format(key, stats_data_left[key]))
+ else:
+ row.append('')
+ if line_num < len(stats_data_right):
+ key = list(stats_data_right.keys())[line_num]
+ row.append('{:<12} : {}'.format(key, stats_data_right[key]))
+ else:
+ row.append('')
+ stats_table.add_row(row)
+
+ return {"global_statistics": ExportableStats(None, stats_table)}
+
+ def _generate_streams_stats (self):
+ flow_stats = self._rx_stats_ref
+ # for TUI - maximum 4
+ pg_ids = list(filter(is_intable, flow_stats.latest_stats.keys()))[:4]
+ stream_count = len(pg_ids)
+
+ sstats_data = OrderedDict([ ('Tx pps', []),
+ ('Tx bps L2', []),
+ ('Tx bps L1', []),
+ ('---', [''] * stream_count),
+ ('Rx pps', []),
+ ('Rx bps', []),
+ ('----', [''] * stream_count),
+ ('opackets', []),
+ ('ipackets', []),
+ ('obytes', []),
+ ('ibytes', []),
+ ('-----', [''] * stream_count),
+ ('tx_pkts', []),
+ ('rx_pkts', []),
+ ('tx_bytes', []),
+ ('rx_bytes', [])
+ ])
+
+
+
+ # maximum 4
+ for pg_id in pg_ids:
+
+ sstats_data['Tx pps'].append(flow_stats.get([pg_id, 'tx_pps_lpf', 'total'], format = True, suffix = "pps"))
+ sstats_data['Tx bps L2'].append(flow_stats.get([pg_id, 'tx_bps_lpf', 'total'], format = True, suffix = "bps"))
+
+ sstats_data['Tx bps L1'].append(flow_stats.get([pg_id, 'tx_bps_L1_lpf', 'total'], format = True, suffix = "bps"))
+
+ sstats_data['Rx pps'].append(flow_stats.get([pg_id, 'rx_pps_lpf', 'total'], format = True, suffix = "pps"))
+ sstats_data['Rx bps'].append(flow_stats.get([pg_id, 'rx_bps_lpf', 'total'], format = True, suffix = "bps"))
+
+ sstats_data['opackets'].append(flow_stats.get_rel([pg_id, 'tx_pkts', 'total']))
+ sstats_data['ipackets'].append(flow_stats.get_rel([pg_id, 'rx_pkts', 'total']))
+ sstats_data['obytes'].append(flow_stats.get_rel([pg_id, 'tx_bytes', 'total']))
+ sstats_data['ibytes'].append(flow_stats.get_rel([pg_id, 'rx_bytes', 'total']))
+ sstats_data['tx_bytes'].append(flow_stats.get_rel([pg_id, 'tx_bytes', 'total'], format = True, suffix = "B"))
+ sstats_data['rx_bytes'].append(flow_stats.get_rel([pg_id, 'rx_bytes', 'total'], format = True, suffix = "B"))
+ sstats_data['tx_pkts'].append(flow_stats.get_rel([pg_id, 'tx_pkts', 'total'], format = True, suffix = "pkts"))
+ sstats_data['rx_pkts'].append(flow_stats.get_rel([pg_id, 'rx_pkts', 'total'], format = True, suffix = "pkts"))
+
+
+ stats_table = text_tables.TRexTextTable()
+ stats_table.set_cols_align(["l"] + ["r"] * stream_count)
+ stats_table.set_cols_width([10] + [17] * stream_count)
+ stats_table.set_cols_dtype(['t'] + ['t'] * stream_count)
+
+ stats_table.add_rows([[k] + v
+ for k, v in sstats_data.items()],
+ header=False)
+
+ header = ["PG ID"] + [key for key in pg_ids]
+ stats_table.header(header)
+
+ return {"streams_statistics": ExportableStats(sstats_data, stats_table)}
+
+ def _generate_latency_stats(self):
+ lat_stats = self._latency_stats_ref
+ latency_window_size = 14
+
+ # for TUI - maximum 5
+ pg_ids = list(filter(is_intable, lat_stats.latest_stats.keys()))[:5]
+ stream_count = len(pg_ids)
+ lstats_data = OrderedDict([('TX pkts', []),
+ ('RX pkts', []),
+ ('Max latency', []),
+ ('Avg latency', []),
+ ('-- Window --', [''] * stream_count),
+ ('Last (max)', []),
+ ] + [('Last-%s' % i, []) for i in range(1, latency_window_size)] + [
+ ('---', [''] * stream_count),
+ ('Jitter', []),
+ ('----', [''] * stream_count),
+ ('Errors', []),
+ ])
+
+ with lat_stats.lock:
+ history = [x for x in lat_stats.history]
+ flow_stats = self._rx_stats_ref.get_stats()
+ for pg_id in pg_ids:
+ lstats_data['TX pkts'].append(flow_stats[pg_id]['tx_pkts']['total'] if pg_id in flow_stats else '')
+ lstats_data['RX pkts'].append(flow_stats[pg_id]['rx_pkts']['total'] if pg_id in flow_stats else '')
+ lstats_data['Avg latency'].append(try_int(lat_stats.get([pg_id, 'latency', 'average'])))
+ lstats_data['Max latency'].append(try_int(lat_stats.get([pg_id, 'latency', 'total_max'])))
+ lstats_data['Last (max)'].append(try_int(lat_stats.get([pg_id, 'latency', 'last_max'])))
+ for i in range(1, latency_window_size):
+ val = history[-i - 1].get(pg_id, {}).get('latency', {}).get('last_max', '') if len(history) > i else ''
+ lstats_data['Last-%s' % i].append(try_int(val))
+ lstats_data['Jitter'].append(try_int(lat_stats.get([pg_id, 'latency', 'jitter'])))
+ errors = 0
+ seq_too_low = lat_stats.get([pg_id, 'err_cntrs', 'seq_too_low'])
+ if is_integer(seq_too_low):
+ errors += seq_too_low
+ seq_too_high = lat_stats.get([pg_id, 'err_cntrs', 'seq_too_high'])
+ if is_integer(seq_too_high):
+ errors += seq_too_high
+ lstats_data['Errors'].append(format_num(errors,
+ opts = 'green' if errors == 0 else 'red'))
+
+
+ stats_table = text_tables.TRexTextTable()
+ stats_table.set_cols_align(["l"] + ["r"] * stream_count)
+ stats_table.set_cols_width([12] + [14] * stream_count)
+ stats_table.set_cols_dtype(['t'] + ['t'] * stream_count)
+ stats_table.add_rows([[k] + v
+ for k, v in lstats_data.items()],
+ header=False)
+
+ header = ["PG ID"] + [key for key in pg_ids]
+ stats_table.header(header)
+
+ return {"latency_statistics": ExportableStats(lstats_data, stats_table)}
+
+ def _generate_latency_histogram(self):
+ lat_stats = self._latency_stats_ref.latest_stats
+ max_histogram_size = 17
+
+ # for TUI - maximum 5
+ pg_ids = list(filter(is_intable, lat_stats.keys()))[:5]
+
+ merged_histogram = {}
+ for pg_id in pg_ids:
+ merged_histogram.update(lat_stats[pg_id]['latency']['histogram'])
+ histogram_size = min(max_histogram_size, len(merged_histogram))
+
+ stream_count = len(pg_ids)
+ stats_table = text_tables.TRexTextTable()
+ stats_table.set_cols_align(["l"] + ["r"] * stream_count)
+ stats_table.set_cols_width([12] + [14] * stream_count)
+ stats_table.set_cols_dtype(['t'] + ['t'] * stream_count)
+
+ for i in range(max_histogram_size - histogram_size):
+ if i == 0 and not merged_histogram:
+ stats_table.add_row([' No Data'] + [' '] * stream_count)
+ else:
+ stats_table.add_row([' '] * (stream_count + 1))
+ for key in list(reversed(sorted(merged_histogram.keys())))[:histogram_size]:
+ hist_vals = []
+ for pg_id in pg_ids:
+ hist_vals.append(lat_stats[pg_id]['latency']['histogram'].get(key, ' '))
+ stats_table.add_row([key] + hist_vals)
+
+ stats_table.add_row(['- Counters -'] + [' '] * stream_count)
+ err_cntrs_dict = OrderedDict()
+ for pg_id in pg_ids:
+ for err_cntr in sorted(lat_stats[pg_id]['err_cntrs'].keys()):
+ if err_cntr not in err_cntrs_dict:
+ err_cntrs_dict[err_cntr] = [lat_stats[pg_id]['err_cntrs'][err_cntr]]
+ else:
+ err_cntrs_dict[err_cntr].append(lat_stats[pg_id]['err_cntrs'][err_cntr])
+ for err_cntr, val_list in err_cntrs_dict.items():
+ stats_table.add_row([err_cntr] + val_list)
+ header = ["PG ID"] + [key for key in pg_ids]
+ stats_table.header(header)
+ return {"latency_histogram": ExportableStats(None, stats_table)}
+
+ def _generate_cpu_util_stats(self):
+ util_stats = self._util_stats_ref.get_stats(use_1sec_cache = True)
+
+ stats_table = text_tables.TRexTextTable()
+ if util_stats:
+ if 'cpu' not in util_stats:
+ raise Exception("Excepting 'cpu' section in stats %s" % util_stats)
+ cpu_stats = util_stats['cpu']
+ hist_len = len(cpu_stats[0]["history"])
+ avg_len = min(5, hist_len)
+ show_len = min(15, hist_len)
+ stats_table.header(['Thread', 'Avg', 'Latest'] + list(range(-1, 0 - show_len, -1)))
+ stats_table.set_cols_align(['l'] + ['r'] * (show_len + 1))
+ stats_table.set_cols_width([10, 3, 6] + [3] * (show_len - 1))
+ stats_table.set_cols_dtype(['t'] * (show_len + 2))
+
+ for i in range(min(18, len(cpu_stats))):
+ history = cpu_stats[i]["history"]
+ ports = cpu_stats[i]["ports"]
+ avg = int(round(sum(history[:avg_len]) / avg_len))
+
+ # decode active ports for core
+ if ports == [-1, -1]:
+ interfaces = "(IDLE)"
+ elif not -1 in ports:
+ interfaces = "({:},{:})".format(ports[0], ports[1])
+ else:
+ interfaces = "({:})".format(ports[0] if ports[0] != -1 else ports[1])
+
+ thread = "{:2} {:^7}".format(i, interfaces)
+ stats_table.add_row([thread, avg] + history[:show_len])
+ else:
+ stats_table.add_row(['No Data.'])
+ return {'cpu_util(%)': ExportableStats(None, stats_table)}
+
+ def _generate_mbuf_util_stats(self):
+ util_stats = self._util_stats_ref.get_stats(use_1sec_cache = True)
+ stats_table = text_tables.TRexTextTable()
+ if util_stats:
+ if 'mbuf_stats' not in util_stats:
+ raise Exception("Excepting 'mbuf_stats' section in stats %s" % util_stats)
+ mbuf_stats = util_stats['mbuf_stats']
+ for mbufs_per_socket in mbuf_stats.values():
+ first_socket_mbufs = mbufs_per_socket
+ break
+ if not self._util_stats_ref.mbuf_types_list:
+ mbuf_keys = list(first_socket_mbufs.keys())
+ mbuf_keys.sort(key = get_number_of_bytes)
+ self._util_stats_ref.mbuf_types_list = mbuf_keys
+ types_len = len(self._util_stats_ref.mbuf_types_list)
+ stats_table.set_cols_align(['l'] + ['r'] * (types_len + 1))
+ stats_table.set_cols_width([10] + [7] * (types_len + 1))
+ stats_table.set_cols_dtype(['t'] * (types_len + 2))
+ stats_table.header([''] + self._util_stats_ref.mbuf_types_list + ['RAM(MB)'])
+ total_list = []
+ sum_totals = 0
+ for mbuf_type in self._util_stats_ref.mbuf_types_list:
+ sum_totals += first_socket_mbufs[mbuf_type][1] * get_number_of_bytes(mbuf_type) + 64
+ total_list.append(first_socket_mbufs[mbuf_type][1])
+ sum_totals *= len(list(mbuf_stats.values()))
+ total_list.append(int(sum_totals/1e6))
+ stats_table.add_row(['Total:'] + total_list)
+ stats_table.add_row(['Used:'] + [''] * (types_len + 1))
+ for socket_name in sorted(list(mbuf_stats.keys())):
+ mbufs = mbuf_stats[socket_name]
+ socket_show_name = socket_name.replace('cpu-', '').replace('-', ' ').capitalize() + ':'
+ sum_used = 0
+ used_list = []
+ percentage_list = []
+ for mbuf_type in self._util_stats_ref.mbuf_types_list:
+ used = mbufs[mbuf_type][1] - mbufs[mbuf_type][0]
+ sum_used += used * get_number_of_bytes(mbuf_type) + 64
+ used_list.append(used)
+ percentage_list.append('%s%%' % int(100 * used / mbufs[mbuf_type][1]))
+ used_list.append(int(sum_used/1e6))
+ stats_table.add_row([socket_show_name] + used_list)
+ stats_table.add_row(['Percent:'] + percentage_list + [''])
+ else:
+ stats_table.add_row(['No Data.'])
+ return {'mbuf_util': ExportableStats(None, stats_table)}
+
+ def _generate_xstats(self, port_id_list, include_zero_lines = False):
+ relevant_ports = [port.port_id for port in self.__get_relevant_ports(port_id_list)]
+ # get the data on relevant ports
+ xstats_data = OrderedDict()
+ for port_id in relevant_ports:
+ for key, val in self._xstats_ref.get_stats(port_id).items():
+ if key not in xstats_data:
+ xstats_data[key] = []
+ xstats_data[key].append(val)
+
+ # put into table
+ stats_table = text_tables.TRexTextTable()
+ stats_table.header(['Name:'] + ['Port %s:' % port_id for port_id in relevant_ports])
+ stats_table.set_cols_align(['l'] + ['r'] * len(relevant_ports))
+ stats_table.set_cols_width([30] + [15] * len(relevant_ports))
+ stats_table.set_cols_dtype(['t'] * (len(relevant_ports) + 1))
+ for key, arr in xstats_data.items():
+ if include_zero_lines or list(filter(None, arr)):
+ key = key[:28]
+ stats_table.add_row([key] + arr)
+ return {'xstats:': ExportableStats(None, stats_table)}
+
+ @staticmethod
+ def _get_rational_block_char(value, range_start, interval):
+ # in Konsole, utf-8 is sometimes printed with artifacts, return ascii for now
+ #return 'X' if value >= range_start + float(interval) / 2 else ' '
+
+ if sys.__stdout__.encoding != 'UTF-8':
+ return 'X' if value >= range_start + float(interval) / 2 else ' '
+
+ value -= range_start
+ ratio = float(value) / interval
+ if ratio <= 0.0625:
+ return u' ' # empty block
+ if ratio <= 0.1875:
+ return u'\u2581' # 1/8
+ if ratio <= 0.3125:
+ return u'\u2582' # 2/8
+ if ratio <= 0.4375:
+ return u'\u2583' # 3/8
+ if ratio <= 0.5625:
+ return u'\u2584' # 4/8
+ if ratio <= 0.6875:
+ return u'\u2585' # 5/8
+ if ratio <= 0.8125:
+ return u'\u2586' # 6/8
+ if ratio <= 0.9375:
+ return u'\u2587' # 7/8
+ return u'\u2588' # full block
+
+ def _generate_port_graph(self, port_id_list):
+ relevant_port = self.__get_relevant_ports(port_id_list)[0]
+ hist_len = len(relevant_port.port_stats.history)
+ hist_maxlen = relevant_port.port_stats.history.maxlen
+ util_tx_hist = [0] * (hist_maxlen - hist_len) + [round(relevant_port.port_stats.history[i]['tx_percentage']) for i in range(hist_len)]
+ util_rx_hist = [0] * (hist_maxlen - hist_len) + [round(relevant_port.port_stats.history[i]['rx_percentage']) for i in range(hist_len)]
+
+
+ stats_table = text_tables.TRexTextTable()
+ stats_table.header([' Util(%)', 'TX', 'RX'])
+ stats_table.set_cols_align(['c', 'c', 'c'])
+ stats_table.set_cols_width([8, hist_maxlen, hist_maxlen])
+ stats_table.set_cols_dtype(['t', 't', 't'])
+
+ for y in range(95, -1, -5):
+ stats_table.add_row([y, ''.join([self._get_rational_block_char(util_tx, y, 5) for util_tx in util_tx_hist]),
+ ''.join([self._get_rational_block_char(util_rx, y, 5) for util_rx in util_rx_hist])])
+
+ return {"port_graph": ExportableStats({}, stats_table)}
+
+ def _generate_port_stats(self, port_id_list):
+ relevant_ports = self.__get_relevant_ports(port_id_list)
+
+ return_stats_data = {}
+ per_field_stats = OrderedDict([("owner", []),
+ ('link', []),
+ ("state", []),
+ ("speed", []),
+ ("CPU util.", []),
+ ("--", []),
+ ("Tx bps L2", []),
+ ("Tx bps L1", []),
+ ("Tx pps", []),
+ ("Line Util.", []),
+
+ ("---", []),
+ ("Rx bps", []),
+ ("Rx pps", []),
+
+ ("----", []),
+ ("opackets", []),
+ ("ipackets", []),
+ ("obytes", []),
+ ("ibytes", []),
+ ("tx-bytes", []),
+ ("rx-bytes", []),
+ ("tx-pkts", []),
+ ("rx-pkts", []),
+
+ ("-----", []),
+ ("oerrors", []),
+ ("ierrors", []),
+
+ ])
+
+ total_stats = CPortStats(None)
+
+ for port_obj in relevant_ports:
+ # fetch port data
+ port_stats = port_obj.generate_port_stats()
+
+ total_stats += port_obj.port_stats
+
+ # populate to data structures
+ return_stats_data[port_obj.port_id] = port_stats
+ self.__update_per_field_dict(port_stats, per_field_stats)
+
+ total_cols = len(relevant_ports)
+ header = ["port"] + [port.port_id for port in relevant_ports]
+
+ if (total_cols > 1):
+ self.__update_per_field_dict(total_stats.generate_stats(), per_field_stats)
+ header += ['total']
+ total_cols += 1
+
+ stats_table = text_tables.TRexTextTable()
+ stats_table.set_cols_align(["l"] + ["r"] * total_cols)
+ stats_table.set_cols_width([10] + [17] * total_cols)
+ stats_table.set_cols_dtype(['t'] + ['t'] * total_cols)
+
+ stats_table.add_rows([[k] + v
+ for k, v in per_field_stats.items()],
+ header=False)
+
+ stats_table.header(header)
+
+ return {"port_statistics": ExportableStats(return_stats_data, stats_table)}
+
+ def _generate_port_status(self, port_id_list):
+ relevant_ports = self.__get_relevant_ports(port_id_list)
+
+ return_stats_data = {}
+ per_field_status = OrderedDict([("driver", []),
+ ("description", []),
+ ("link status", []),
+ ("link speed", []),
+ ("port status", []),
+ ("promiscuous", []),
+ ("flow ctrl", []),
+ ("--", []),
+ ("HW src mac", []),
+ ("SW src mac", []),
+ ("SW dst mac", []),
+ ("---", []),
+ ("PCI Address", []),
+ ("NUMA Node", []),
+ ]
+ )
+
+ for port_obj in relevant_ports:
+ # fetch port data
+ # port_stats = self._async_stats.get_port_stats(port_obj.port_id)
+ port_status = port_obj.generate_port_status()
+
+ # populate to data structures
+ return_stats_data[port_obj.port_id] = port_status
+
+ self.__update_per_field_dict(port_status, per_field_status)
+
+ stats_table = text_tables.TRexTextTable()
+ stats_table.set_cols_align(["l"] + ["c"]*len(relevant_ports))
+ stats_table.set_cols_width([15] + [20] * len(relevant_ports))
+
+ stats_table.add_rows([[k] + v
+ for k, v in per_field_status.items()],
+ header=False)
+ stats_table.header(["port"] + [port.port_id
+ for port in relevant_ports])
+
+ return {"port_status": ExportableStats(return_stats_data, stats_table)}
+
+ def _generate_single_port_streams_info(self, port_obj, stream_id_list):
+
+ return_streams_data = port_obj.generate_loaded_streams_sum()
+
+ if not return_streams_data.get("streams"):
+ # we got no streams available
+ return None
+
+ # FORMAT VALUES ON DEMAND
+
+ # because we mutate this - deep copy before
+ return_streams_data = copy.deepcopy(return_streams_data)
+
+ p_type_field_len = 0
+
+ for stream_id, stream_id_sum in return_streams_data['streams'].items():
+ stream_id_sum['packet_type'] = self._trim_packet_headers(stream_id_sum['packet_type'], 30)
+ p_type_field_len = max(p_type_field_len, len(stream_id_sum['packet_type']))
+
+ info_table = text_tables.TRexTextTable()
+ info_table.set_cols_align(["c"] + ["l"] + ["r"] + ["c"] + ["r"] + ["c"])
+ info_table.set_cols_width([10] + [p_type_field_len] + [8] + [16] + [15] + [12])
+ info_table.set_cols_dtype(["t"] + ["t"] + ["t"] + ["t"] + ["t"] + ["t"])
+
+ info_table.add_rows([v.values()
+ for k, v in return_streams_data['streams'].items()],
+ header=False)
+ info_table.header(["ID", "packet type", "length", "mode", "rate", "next stream"])
+
+ return ExportableStats(return_streams_data, info_table)
+
+
+ def __get_relevant_ports(self, port_id_list):
+ # fetch owned ports
+ ports = [port_obj
+ for _, port_obj in self._ports_dict.items()
+ if port_obj.port_id in port_id_list]
+
+ # display only the first FOUR options, by design
+ if len(ports) > 4:
+ #self.logger is not defined
+ #self.logger.log(format_text("[WARNING]: ", 'magenta', 'bold'), format_text("displaying up to 4 ports", 'magenta'))
+ ports = ports[:4]
+ return ports
+
+ def __update_per_field_dict(self, dict_src_data, dict_dest_ref):
+ for key, val in dict_src_data.items():
+ if key in dict_dest_ref:
+ dict_dest_ref[key].append(val)
+
+ @staticmethod
+ def _trim_packet_headers(headers_str, trim_limit):
+ if len(headers_str) < trim_limit:
+ # do nothing
+ return headers_str
+ else:
+ return (headers_str[:trim_limit-3] + "...")
+
+
+
+class CTRexStats(object):
+ """ This is an abstract class to represent a stats object """
+
+ def __init__(self):
+ self.reference_stats = {}
+ self.latest_stats = {}
+ self.last_update_ts = time.time()
+ self.history = deque(maxlen = 47)
+ self.lock = threading.Lock()
+ self.has_baseline = False
+
+ ######## abstract methods ##########
+
+ # get stats for user / API
+ def get_stats (self):
+ raise NotImplementedError()
+
+ # generate format stats (for TUI)
+ def generate_stats(self):
+ raise NotImplementedError()
+
+ # called when a snapshot arrives - add more fields
+ def _update (self, snapshot, baseline):
+ raise NotImplementedError()
+
+
+ ######## END abstract methods ##########
+
+ def update(self, snapshot, baseline):
+
+ # no update is valid before baseline
+ if not self.has_baseline and not baseline:
+ return
+
+ # call the underlying method
+ rc = self._update(snapshot)
+ if not rc:
+ return
+
+ # sync one time
+ if not self.has_baseline and baseline:
+ self.reference_stats = copy.deepcopy(self.latest_stats)
+ self.has_baseline = True
+
+ # save history
+ with self.lock:
+ self.history.append(self.latest_stats)
+
+
+ def clear_stats(self):
+ self.reference_stats = copy.deepcopy(self.latest_stats)
+ self.history.clear()
+
+
+ def invalidate (self):
+ self.latest_stats = {}
+
+
+ def _get (self, src, field, default = None):
+ if isinstance(field, list):
+ # deep
+ value = src
+ for level in field:
+ if not level in value:
+ return default
+ value = value[level]
+ else:
+ # flat
+ if not field in src:
+ return default
+ value = src[field]
+
+ return value
+
+ def get(self, field, format=False, suffix="", opts = None):
+ value = self._get(self.latest_stats, field)
+ if value == None:
+ return 'N/A'
+
+ return value if not format else format_num(value, suffix = suffix, opts = opts)
+
+
+ def get_rel(self, field, format=False, suffix=""):
+ ref_value = self._get(self.reference_stats, field)
+ latest_value = self._get(self.latest_stats, field)
+
+ # latest value is an aggregation - must contain the value
+ if latest_value == None:
+ return 'N/A'
+
+ if ref_value == None:
+ ref_value = 0
+
+ value = latest_value - ref_value
+
+ return value if not format else format_num(value, suffix)
+
+
+ # get trend for a field
+ def get_trend (self, field, use_raw = False, percision = 10.0):
+ if field not in self.latest_stats:
+ return 0
+
+ # not enough history - no trend
+ if len(self.history) < 5:
+ return 0
+
+ # absolute value is too low 0 considered noise
+ if self.latest_stats[field] < percision:
+ return 0
+
+ # must lock, deque is not thread-safe for iteration
+ with self.lock:
+ field_samples = [sample[field] for sample in list(self.history)[-5:]]
+
+ if use_raw:
+ return calculate_diff_raw(field_samples)
+ else:
+ return calculate_diff(field_samples)
+
+
+ def get_trend_gui (self, field, show_value = False, use_raw = False, up_color = 'red', down_color = 'green'):
+ v = self.get_trend(field, use_raw)
+
+ value = abs(v)
+
+ # use arrows if utf-8 is supported
+ if sys.__stdout__.encoding == 'UTF-8':
+ arrow = u'\u25b2' if v > 0 else u'\u25bc'
+ else:
+ arrow = ''
+
+ if sys.version_info < (3,0):
+ arrow = arrow.encode('utf-8')
+
+ color = up_color if v > 0 else down_color
+
+ # change in 1% is not meaningful
+ if value < 1:
+ return ""
+
+ elif value > 5:
+
+ if show_value:
+ return format_text("{0}{0}{0} {1:.2f}%".format(arrow,v), color)
+ else:
+ return format_text("{0}{0}{0}".format(arrow), color)
+
+ elif value > 2:
+
+ if show_value:
+ return format_text("{0}{0} {1:.2f}%".format(arrow,v), color)
+ else:
+ return format_text("{0}{0}".format(arrow), color)
+
+ else:
+ if show_value:
+ return format_text("{0} {1:.2f}%".format(arrow,v), color)
+ else:
+ return format_text("{0}".format(arrow), color)
+
+
+
+class CGlobalStats(CTRexStats):
+
+ def __init__(self, connection_info, server_version, ports_dict_ref, events_handler):
+ super(CGlobalStats, self).__init__()
+
+ self.connection_info = connection_info
+ self.server_version = server_version
+ self._ports_dict = ports_dict_ref
+ self.events_handler = events_handler
+
+ self.watched_cpu_util = WatchedField('CPU util.', '%', 85, 60, events_handler)
+ self.watched_rx_cpu_util = WatchedField('RX core util.', '%', 85, 60, events_handler)
+
+ def get_stats (self):
+ stats = {}
+
+ # absolute
+ stats['cpu_util'] = self.get("m_cpu_util")
+ stats['rx_cpu_util'] = self.get("m_rx_cpu_util")
+ stats['bw_per_core'] = self.get("m_bw_per_core")
+
+ stats['tx_bps'] = self.get("m_tx_bps")
+ stats['tx_pps'] = self.get("m_tx_pps")
+
+ stats['rx_bps'] = self.get("m_rx_bps")
+ stats['rx_pps'] = self.get("m_rx_pps")
+ stats['rx_drop_bps'] = self.get("m_rx_drop_bps")
+
+ # relatives
+ stats['queue_full'] = self.get_rel("m_total_queue_full")
+
+ return stats
+
+
+
+ def _update(self, snapshot):
+ # L1 bps
+ bps = snapshot.get("m_tx_bps")
+ pps = snapshot.get("m_tx_pps")
+
+ snapshot['m_tx_bps_L1'] = calc_bps_L1(bps, pps)
+
+
+ # simple...
+ self.latest_stats = snapshot
+
+ self.watched_cpu_util.update(snapshot.get('m_cpu_util'))
+ self.watched_rx_cpu_util.update(snapshot.get('m_rx_cpu_util'))
+
+ return True
+
+
+class CPortStats(CTRexStats):
+
+ def __init__(self, port_obj):
+ super(CPortStats, self).__init__()
+ self._port_obj = port_obj
+
+ @staticmethod
+ def __merge_dicts (target, src):
+ for k, v in src.items():
+ if k in target:
+ target[k] += v
+ else:
+ target[k] = v
+
+
+ def __add__ (self, x):
+ if not isinstance(x, CPortStats):
+ raise TypeError("cannot add non stats object to stats")
+
+ # main stats
+ if not self.latest_stats:
+ self.latest_stats = {}
+
+ self.__merge_dicts(self.latest_stats, x.latest_stats)
+
+ # reference stats
+ if x.reference_stats:
+ if not self.reference_stats:
+ self.reference_stats = x.reference_stats.copy()
+ else:
+ self.__merge_dicts(self.reference_stats, x.reference_stats)
+
+ # history - should be traverse with a lock
+ with self.lock, x.lock:
+ if not self.history:
+ self.history = copy.deepcopy(x.history)
+ else:
+ for h1, h2 in zip(self.history, x.history):
+ self.__merge_dicts(h1, h2)
+
+ return self
+
+ # for port we need to do something smarter
+ def get_stats (self):
+ stats = {}
+
+ stats['opackets'] = self.get_rel("opackets")
+ stats['ipackets'] = self.get_rel("ipackets")
+ stats['obytes'] = self.get_rel("obytes")
+ stats['ibytes'] = self.get_rel("ibytes")
+ stats['oerrors'] = self.get_rel("oerrors")
+ stats['ierrors'] = self.get_rel("ierrors")
+
+ stats['tx_bps'] = self.get("m_total_tx_bps")
+ stats['tx_pps'] = self.get("m_total_tx_pps")
+ stats['tx_bps_L1'] = self.get("m_total_tx_bps_L1")
+ stats['tx_util'] = self.get("m_tx_util")
+
+ stats['rx_bps'] = self.get("m_total_rx_bps")
+ stats['rx_pps'] = self.get("m_total_rx_pps")
+ stats['rx_bps_L1'] = self.get("m_total_rx_bps_L1")
+ stats['rx_util'] = self.get("m_rx_util")
+
+ return stats
+
+
+
+ def _update(self, snapshot):
+ speed = self._port_obj.get_speed_bps()
+
+ # L1 bps
+ tx_bps = snapshot.get("m_total_tx_bps")
+ tx_pps = snapshot.get("m_total_tx_pps")
+ rx_bps = snapshot.get("m_total_rx_bps")
+ rx_pps = snapshot.get("m_total_rx_pps")
+ ts_diff = 0.5 # TODO: change this to real ts diff from server
+
+ bps_tx_L1 = calc_bps_L1(tx_bps, tx_pps)
+ bps_rx_L1 = calc_bps_L1(rx_bps, rx_pps)
+
+ snapshot['m_total_tx_bps_L1'] = bps_tx_L1
+ if speed:
+ snapshot['m_tx_util'] = (bps_tx_L1 / speed) * 100.0
+ else:
+ snapshot['m_tx_util'] = 0
+
+ snapshot['m_total_rx_bps_L1'] = bps_rx_L1
+ if speed:
+ snapshot['m_rx_util'] = (bps_rx_L1 / speed) * 100.0
+ else:
+ snapshot['m_rx_util'] = 0
+
+ # TX line util not smoothed
+ diff_tx_pkts = snapshot.get('opackets', 0) - self.latest_stats.get('opackets', 0)
+ diff_tx_bytes = snapshot.get('obytes', 0) - self.latest_stats.get('obytes', 0)
+ tx_bps_L1 = calc_bps_L1(8.0 * diff_tx_bytes / ts_diff, float(diff_tx_pkts) / ts_diff)
+ if speed:
+ snapshot['tx_percentage'] = 100.0 * tx_bps_L1 / speed
+ else:
+ snapshot['tx_percentage'] = 0
+
+ # RX line util not smoothed
+ diff_rx_pkts = snapshot.get('ipackets', 0) - self.latest_stats.get('ipackets', 0)
+ diff_rx_bytes = snapshot.get('ibytes', 0) - self.latest_stats.get('ibytes', 0)
+ rx_bps_L1 = calc_bps_L1(8.0 * diff_rx_bytes / ts_diff, float(diff_rx_pkts) / ts_diff)
+ if speed:
+ snapshot['rx_percentage'] = 100.0 * rx_bps_L1 / speed
+ else:
+ snapshot['rx_percentage'] = 0
+
+ # simple...
+ self.latest_stats = snapshot
+
+ return True
+
+
+ def generate_stats(self):
+
+ port_state = self._port_obj.get_port_state_name() if self._port_obj else ""
+ if port_state == "TRANSMITTING":
+ port_state = format_text(port_state, 'green', 'bold')
+ elif port_state == "PAUSE":
+ port_state = format_text(port_state, 'magenta', 'bold')
+ else:
+ port_state = format_text(port_state, 'bold')
+
+ if self._port_obj:
+ if 'link' in self._port_obj.attr:
+ if self._port_obj.attr.get('link', {}).get('up') == False:
+ link_state = format_text('DOWN', 'red', 'bold')
+ else:
+ link_state = 'UP'
+ else:
+ link_state = 'N/A'
+ else:
+ link_state = ''
+
+ # default rate format modifiers
+ rate_format = {'bpsl1': None, 'bps': None, 'pps': None, 'percentage': 'bold'}
+
+ # mark owned ports by color
+ if self._port_obj:
+ owner = self._port_obj.get_owner()
+ rate_format[self._port_obj.last_factor_type] = ('blue', 'bold')
+ if self._port_obj.is_acquired():
+ owner = format_text(owner, 'green')
+
+ else:
+ owner = ''
+
+
+ return {"owner": owner,
+ "state": "{0}".format(port_state),
+ 'link': link_state,
+ "speed": self._port_obj.get_formatted_speed() if self._port_obj else '',
+ "CPU util.": "{0} {1}%".format(self.get_trend_gui("m_cpu_util", use_raw = True),
+ format_threshold(round_float(self.get("m_cpu_util")), [85, 100], [0, 85])) if self._port_obj else '' ,
+ "--": " ",
+ "---": " ",
+ "----": " ",
+ "-----": " ",
+
+ "Tx bps L1": "{0} {1}".format(self.get_trend_gui("m_total_tx_bps_L1", show_value = False),
+ self.get("m_total_tx_bps_L1", format = True, suffix = "bps", opts = rate_format['bpsl1'])),
+
+ "Tx bps L2": "{0} {1}".format(self.get_trend_gui("m_total_tx_bps", show_value = False),
+ self.get("m_total_tx_bps", format = True, suffix = "bps", opts = rate_format['bps'])),
+
+ "Line Util.": "{0} {1}".format(self.get_trend_gui("m_tx_util", show_value = False) if self._port_obj else "",
+ self.get("m_tx_util", format = True, suffix = "%", opts = rate_format['percentage']) if self._port_obj else ""),
+
+ "Rx bps": "{0} {1}".format(self.get_trend_gui("m_total_rx_bps", show_value = False),
+ self.get("m_total_rx_bps", format = True, suffix = "bps")),
+
+ "Tx pps": "{0} {1}".format(self.get_trend_gui("m_total_tx_pps", show_value = False),
+ self.get("m_total_tx_pps", format = True, suffix = "pps", opts = rate_format['pps'])),
+
+ "Rx pps": "{0} {1}".format(self.get_trend_gui("m_total_rx_pps", show_value = False),
+ self.get("m_total_rx_pps", format = True, suffix = "pps")),
+
+ "opackets" : self.get_rel("opackets"),
+ "ipackets" : self.get_rel("ipackets"),
+ "obytes" : self.get_rel("obytes"),
+ "ibytes" : self.get_rel("ibytes"),
+
+ "tx-bytes": self.get_rel("obytes", format = True, suffix = "B"),
+ "rx-bytes": self.get_rel("ibytes", format = True, suffix = "B"),
+ "tx-pkts": self.get_rel("opackets", format = True, suffix = "pkts"),
+ "rx-pkts": self.get_rel("ipackets", format = True, suffix = "pkts"),
+
+ "oerrors" : format_num(self.get_rel("oerrors"),
+ compact = False,
+ opts = 'green' if (self.get_rel("oerrors")== 0) else 'red'),
+
+ "ierrors" : format_num(self.get_rel("ierrors"),
+ compact = False,
+ opts = 'green' if (self.get_rel("ierrors")== 0) else 'red'),
+
+ }
+
+
+class CLatencyStats(CTRexStats):
+ def __init__(self, ports):
+ super(CLatencyStats, self).__init__()
+
+
+ # for API
+ def get_stats (self):
+ return copy.deepcopy(self.latest_stats)
+
+
+ def _update(self, snapshot):
+ if snapshot is None:
+ snapshot = {}
+ output = {}
+
+ output['global'] = {}
+ for field in ['bad_hdr', 'old_flow']:
+ if 'global' in snapshot and field in snapshot['global']:
+ output['global'][field] = snapshot['global'][field]
+ else:
+ output['global'][field] = 0
+
+ # we care only about the current active keys
+ pg_ids = list(filter(is_intable, snapshot.keys()))
+
+ for pg_id in pg_ids:
+ current_pg = snapshot.get(pg_id)
+ int_pg_id = int(pg_id)
+ output[int_pg_id] = {}
+ output[int_pg_id]['err_cntrs'] = current_pg['err_cntrs']
+ output[int_pg_id]['latency'] = {}
+
+ if 'latency' in current_pg:
+ for field in ['jitter', 'average', 'total_max', 'last_max']:
+ if field in current_pg['latency']:
+ output[int_pg_id]['latency'][field] = current_pg['latency'][field]
+ else:
+ output[int_pg_id]['latency'][field] = StatNotAvailable(field)
+
+ if 'histogram' in current_pg['latency']:
+ output[int_pg_id]['latency']['histogram'] = {int(elem): current_pg['latency']['histogram'][elem]
+ for elem in current_pg['latency']['histogram']}
+ min_val = min(output[int_pg_id]['latency']['histogram'].keys())
+ if min_val == 0:
+ min_val = 2
+ output[int_pg_id]['latency']['total_min'] = min_val
+ else:
+ output[int_pg_id]['latency']['total_min'] = StatNotAvailable('total_min')
+ output[int_pg_id]['latency']['histogram'] = {}
+
+ self.latest_stats = output
+ return True
+
+
+# RX stats objects - COMPLEX :-(
+class CRxStats(CTRexStats):
+ def __init__(self, ports):
+ super(CRxStats, self).__init__()
+ self.ports = ports
+
+
+ # calculates a diff between previous snapshot
+ # and current one
+ def calculate_diff_sec (self, current, prev):
+ if not 'ts' in current:
+ raise ValueError("INTERNAL ERROR: RX stats snapshot MUST contain 'ts' field")
+
+ if prev:
+ prev_ts = prev['ts']
+ now_ts = current['ts']
+ diff_sec = (now_ts['value'] - prev_ts['value']) / float(now_ts['freq'])
+ else:
+ diff_sec = 0.0
+
+ return diff_sec
+
+
+ # this is the heart of the complex
+ def process_single_pg (self, current_pg, prev_pg):
+
+ # start with the previous PG
+ output = copy.deepcopy(prev_pg)
+
+ for field in ['tx_pkts', 'tx_bytes', 'rx_pkts', 'rx_bytes']:
+ # is in the first time ? (nothing in prev)
+ if field not in output:
+ output[field] = {}
+
+ # does the current snapshot has this field ?
+ if field in current_pg:
+ for port, pv in current_pg[field].items():
+ if not is_intable(port):
+ continue
+
+ output[field][port] = pv
+
+ # sum up
+ total = None
+ for port, pv in output[field].items():
+ if not is_intable(port):
+ continue
+ if total is None:
+ total = 0
+ total += pv
+
+ output[field]['total'] = total
+
+
+ return output
+
+
+ def process_snapshot (self, current, prev):
+
+ # final output
+ output = {}
+
+ # copy timestamp field
+ output['ts'] = current['ts']
+
+ # global (not per pg_id) error counters
+ output['global'] = {}
+ for field in ['rx_err', 'tx_err']:
+ output['global'][field] = {}
+ if 'global' in current and field in current['global']:
+ for port in current['global'][field]:
+ output['global'][field][int(port)] = current['global'][field][port]
+
+ # we care only about the current active keys
+ pg_ids = list(filter(is_intable, current.keys()))
+
+ for pg_id in pg_ids:
+
+ current_pg = current.get(pg_id, {})
+
+ # first time - we do not care
+ if current_pg.get('first_time'):
+ # new value - ignore history
+ output[pg_id] = self.process_single_pg(current_pg, {})
+ self.reference_stats[pg_id] = {}
+
+ # 'dry' B/W
+ self.calculate_bw_for_pg(output[pg_id])
+
+ else:
+ # aggregate the two values
+ prev_pg = prev.get(pg_id, {})
+ output[pg_id] = self.process_single_pg(current_pg, prev_pg)
+
+ # calculate B/W
+ diff_sec = self.calculate_diff_sec(current, prev)
+ self.calculate_bw_for_pg(output[pg_id], prev_pg, diff_sec)
+
+
+ # cleanp old reference values - they are dead
+ ref_pg_ids = list(filter(is_intable, self.reference_stats.keys()))
+
+ deleted_pg_ids = set(ref_pg_ids).difference(pg_ids)
+ for d_pg_id in deleted_pg_ids:
+ del self.reference_stats[d_pg_id]
+
+ return output
+
+
+
+ def calculate_bw_for_pg (self, pg_current, pg_prev = None, diff_sec = 0.0):
+ # no previous values
+ if (not pg_prev) or not (diff_sec > 0):
+ pg_current['tx_pps'] = {}
+ pg_current['tx_bps'] = {}
+ pg_current['tx_bps_L1'] = {}
+ pg_current['tx_line_util'] = {}
+ pg_current['rx_pps'] = {}
+ pg_current['rx_bps'] = {}
+ pg_current['rx_bps_L1'] = {}
+ pg_current['rx_line_util'] = {}
+
+ pg_current['tx_pps_lpf'] = {}
+ pg_current['tx_bps_lpf'] = {}
+ pg_current['tx_bps_L1_lpf'] = {}
+ pg_current['rx_pps_lpf'] = {}
+ pg_current['rx_bps_lpf'] = {}
+ pg_current['rx_bps_L1_lpf'] = {}
+ return
+
+ # TX
+ for port in pg_current['tx_pkts'].keys():
+
+ prev_tx_pps = pg_prev['tx_pps'].get(port)
+ now_tx_pkts = pg_current['tx_pkts'].get(port)
+ prev_tx_pkts = pg_prev['tx_pkts'].get(port)
+ pg_current['tx_pps'][port], pg_current['tx_pps_lpf'][port] = self.calc_pps(prev_tx_pps, now_tx_pkts, prev_tx_pkts, diff_sec)
+
+ prev_tx_bps = pg_prev['tx_bps'].get(port)
+ now_tx_bytes = pg_current['tx_bytes'].get(port)
+ prev_tx_bytes = pg_prev['tx_bytes'].get(port)
+
+ pg_current['tx_bps'][port], pg_current['tx_bps_lpf'][port] = self.calc_bps(prev_tx_bps, now_tx_bytes, prev_tx_bytes, diff_sec)
+
+ if pg_current['tx_bps'].get(port) != None and pg_current['tx_pps'].get(port) != None:
+ pg_current['tx_bps_L1'][port] = calc_bps_L1(pg_current['tx_bps'][port], pg_current['tx_pps'][port])
+ pg_current['tx_bps_L1_lpf'][port] = calc_bps_L1(pg_current['tx_bps_lpf'][port], pg_current['tx_pps_lpf'][port])
+ else:
+ pg_current['tx_bps_L1'][port] = None
+ pg_current['tx_bps_L1_lpf'][port] = None
+
+
+ # RX
+ for port in pg_current['rx_pkts'].keys():
+
+ prev_rx_pps = pg_prev['rx_pps'].get(port)
+ now_rx_pkts = pg_current['rx_pkts'].get(port)
+ prev_rx_pkts = pg_prev['rx_pkts'].get(port)
+ pg_current['rx_pps'][port], pg_current['rx_pps_lpf'][port] = self.calc_pps(prev_rx_pps, now_rx_pkts, prev_rx_pkts, diff_sec)
+
+ prev_rx_bps = pg_prev['rx_bps'].get(port)
+ now_rx_bytes = pg_current['rx_bytes'].get(port)
+ prev_rx_bytes = pg_prev['rx_bytes'].get(port)
+ pg_current['rx_bps'][port], pg_current['rx_bps_lpf'][port] = self.calc_bps(prev_rx_bps, now_rx_bytes, prev_rx_bytes, diff_sec)
+ if pg_current['rx_bps'].get(port) != None and pg_current['rx_pps'].get(port) != None:
+ pg_current['rx_bps_L1'][port] = calc_bps_L1(pg_current['rx_bps'][port], pg_current['rx_pps'][port])
+ pg_current['rx_bps_L1_lpf'][port] = calc_bps_L1(pg_current['rx_bps_lpf'][port], pg_current['rx_pps_lpf'][port])
+ else:
+ pg_current['rx_bps_L1'][port] = None
+ pg_current['rx_bps_L1_lpf'][port] = None
+
+
+ def calc_pps (self, prev_bw, now, prev, diff_sec):
+ return self.calc_bw(prev_bw, now, prev, diff_sec, False)
+
+
+ def calc_bps (self, prev_bw, now, prev, diff_sec):
+ return self.calc_bw(prev_bw, now, prev, diff_sec, True)
+
+ # returns tuple - first value is real, second is low pass filtered
+ def calc_bw (self, prev_bw, now, prev, diff_sec, is_bps):
+ # B/W is not valid when the values are None
+ if (now is None) or (prev is None):
+ return (None, None)
+
+ # calculate the B/W for current snapshot
+ current_bw = (now - prev) / diff_sec
+ if is_bps:
+ current_bw *= 8
+
+ # previous B/W is None ? ignore it
+ if prev_bw is None:
+ prev_bw = 0
+
+ return (current_bw, 0.5 * prev_bw + 0.5 * current_bw)
+
+
+
+
+ def _update (self, snapshot):
+ #print(snapshot)
+ # generate a new snapshot
+ new_snapshot = self.process_snapshot(snapshot, self.latest_stats)
+
+ #print new_snapshot
+ # advance
+ self.latest_stats = new_snapshot
+
+
+ return True
+
+
+
+ # for API
+ def get_stats (self):
+ stats = {}
+
+ for pg_id, value in self.latest_stats.items():
+ # skip non ints
+ if not is_intable(pg_id):
+ # 'global' stats are in the same level of the pg_ids. We do want them to go to the user
+ if pg_id == 'global':
+ stats[pg_id] = value
+ continue
+ # bare counters
+ stats[int(pg_id)] = {}
+ for field in ['tx_pkts', 'tx_bytes', 'rx_pkts', 'rx_bytes']:
+ val = self.get_rel([pg_id, field, 'total'])
+ stats[int(pg_id)][field] = {'total': val if val != 'N/A' else StatNotAvailable(field)}
+ for port in value[field].keys():
+ if is_intable(port):
+ val = self.get_rel([pg_id, field, port])
+ stats[int(pg_id)][field][int(port)] = val if val != 'N/A' else StatNotAvailable(field)
+
+ # BW values
+ for field in ['tx_pps', 'tx_bps', 'tx_bps_L1', 'rx_pps', 'rx_bps', 'rx_bps_L1']:
+ val = self.get([pg_id, field, 'total'])
+ stats[int(pg_id)][field] = {'total': val if val != 'N/A' else StatNotAvailable(field)}
+ for port in value[field].keys():
+ if is_intable(port):
+ val = self.get([pg_id, field, port])
+ stats[int(pg_id)][field][int(port)] = val if val != 'N/A' else StatNotAvailable(field)
+
+ return stats
+
+class CUtilStats(CTRexStats):
+
+ def __init__(self, client):
+ super(CUtilStats, self).__init__()
+ self.client = client
+ self.history = deque(maxlen = 1)
+ self.mbuf_types_list = None
+ self.last_update_ts = -999
+
+ def get_stats(self, use_1sec_cache = False):
+ time_now = time.time()
+ if self.last_update_ts + 1 < time_now or not self.history or not use_1sec_cache:
+ if self.client.is_connected():
+ rc = self.client._transmit('get_utilization')
+ if not rc:
+ raise STLError(rc)
+ self.last_update_ts = time_now
+ self.history.append(rc.data())
+ else:
+ self.history.append({})
+
+ return self.history[-1]
+
+class CXStats(CTRexStats):
+
+ def __init__(self, client):
+ super(CXStats, self).__init__()
+ self.client = client
+ self.names = []
+ self.last_update_ts = -999
+
+ def clear_stats(self, port_id = None):
+ if port_id == None:
+ ports = self.client.get_all_ports()
+ elif type(port_id) is list:
+ ports = port_id
+ else:
+ ports = [port_id]
+
+ for port_id in ports:
+ self.reference_stats[port_id] = self.get_stats(port_id, relative = False)
+
+ def get_stats(self, port_id, use_1sec_cache = False, relative = True):
+ time_now = time.time()
+ if self.last_update_ts + 1 < time_now or not self.latest_stats or not use_1sec_cache:
+ if self.client.is_connected():
+ rc = self.client._transmit('get_port_xstats_values', params = {'port_id': port_id})
+ if not rc:
+ raise STLError(rc)
+ self.last_update_ts = time_now
+ values = rc.data().get('xstats_values', [])
+ if len(values) != len(self.names): # need to update names ("keys")
+ rc = self.client._transmit('get_port_xstats_names', params = {'port_id': port_id})
+ if not rc:
+ raise STLError(rc)
+ self.names = rc.data().get('xstats_names', [])
+ if len(values) != len(self.names):
+ raise STLError('Length of get_xstats_names: %s and get_port_xstats_values: %s' % (len(self.names), len(values)))
+ self.latest_stats[port_id] = OrderedDict([(key, val) for key, val in zip(self.names, values)])
+
+ stats = OrderedDict()
+ for key, val in self.latest_stats[port_id].items():
+ if relative:
+ stats[key] = self.get_rel([port_id, key])
+ else:
+ stats[key] = self.get([port_id, key])
+ return stats
+
+if __name__ == "__main__":
+ pass
+
diff --git a/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_std.py b/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_std.py
new file mode 100644
index 00000000..30fdb2dd
--- /dev/null
+++ b/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_std.py
@@ -0,0 +1,78 @@
+from .trex_stl_streams import *
+from .trex_stl_packet_builder_scapy import *
+
+# map ports
+# will destroy all streams/data on the ports
+def stl_map_ports (client, ports = None):
+ # by default use all ports
+ if ports is None:
+ ports = client.get_all_ports()
+
+ stl_send_3_pkts(client, ports)
+
+ tx_pkts = {}
+ pkts = 1
+ base_pkt = STLPktBuilder(pkt = Ether()/IP())
+
+ for port in ports:
+ tx_pkts[pkts] = port
+ stream = STLStream(packet = base_pkt,
+ mode = STLTXSingleBurst(pps = 100000, total_pkts = pkts * 3))
+
+ client.add_streams(stream, [port])
+
+ pkts *= 2
+
+ # inject
+ client.clear_stats()
+ client.start(ports, mult = "50%")
+ client.wait_on_traffic(ports)
+
+ stats = client.get_stats()
+
+ # cleanup
+ client.reset(ports = ports)
+
+ table = {'map': {}, 'bi' : [], 'unknown': []}
+
+ # actual mapping
+ for port in ports:
+
+ ipackets = int(round(stats[port]["ipackets"] / 3.0)) # majority out of 3 to clean random noises
+ table['map'][port] = None
+
+ for pkts in tx_pkts.keys():
+ if ( (pkts & ipackets) == pkts ):
+ tx_port = tx_pkts[pkts]
+ table['map'][port] = tx_port
+
+ unmapped = list(ports)
+ while len(unmapped) > 0:
+ port_a = unmapped.pop(0)
+ port_b = table['map'][port_a]
+
+ # if unknown - add to the unknown list
+ if port_b == None:
+ table['unknown'].append(port_a)
+ # self-loop, due to bug?
+ elif port_a == port_b:
+ continue
+ # bi-directional ports
+ elif (table['map'][port_b] == port_a):
+ unmapped.remove(port_b)
+ table['bi'].append( (port_a, port_b) )
+
+ return table
+
+# reset ports and send 3 packets from each acquired port
+def stl_send_3_pkts(client, ports = None):
+
+ base_pkt = STLPktBuilder(pkt = Ether()/IP())
+ stream = STLStream(packet = base_pkt,
+ mode = STLTXSingleBurst(pps = 100000, total_pkts = 3))
+
+ client.reset(ports)
+ client.add_streams(stream, ports)
+ client.start(ports, mult = "50%")
+ client.wait_on_traffic(ports)
+ client.reset(ports)
diff --git a/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_streams.py b/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_streams.py
new file mode 100755
index 00000000..e63f9125
--- /dev/null
+++ b/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_streams.py
@@ -0,0 +1,1346 @@
+#!/router/bin/python
+
+from .trex_stl_exceptions import *
+from .trex_stl_types import verify_exclusive_arg, validate_type
+from .trex_stl_packet_builder_interface import CTrexPktBuilderInterface
+from .trex_stl_packet_builder_scapy import *
+from collections import OrderedDict, namedtuple
+
+from scapy.utils import ltoa
+from scapy.error import Scapy_Exception
+import random
+import yaml
+import base64
+import string
+import traceback
+import copy
+import imp
+
+
+# base class for TX mode
+class STLTXMode(object):
+ """ mode rate speed """
+
+ def __init__ (self, pps = None, bps_L1 = None, bps_L2 = None, percentage = None):
+ """
+ Speed can be given in packets per second (pps), L2/L1 bps, or port percent
+ Use only one unit.
+ you can enter pps =10000 oe bps_L1=10
+
+ :parameters:
+ pps : float
+ Packets per second
+
+ bps_L1 : float
+ Bits per second L1 (with IPG)
+
+ bps_L2 : float
+ Bits per second L2 (Ethernet-FCS)
+
+ percentage : float
+ Link interface percent (0-100). Example: 10 is 10% of the port link setup
+
+ .. code-block:: python
+
+ # STLTXMode Example
+
+ mode = STLTXCont(pps = 10)
+
+ mode = STLTXCont(bps_L1 = 10000000) #10mbps L1
+
+ mode = STLTXCont(bps_L2 = 10000000) #10mbps L2
+
+ mode = STLTXCont(percentage = 10) #10%
+
+ """
+
+ args = [pps, bps_L1, bps_L2, percentage]
+
+ # default
+ if all([x is None for x in args]):
+ pps = 1.0
+ else:
+ verify_exclusive_arg(args)
+
+ self.fields = {'rate': {}}
+
+ if pps is not None:
+ validate_type('pps', pps, [float, int])
+
+ self.fields['rate']['type'] = 'pps'
+ self.fields['rate']['value'] = pps
+
+ elif bps_L1 is not None:
+ validate_type('bps_L1', bps_L1, [float, int])
+
+ self.fields['rate']['type'] = 'bps_L1'
+ self.fields['rate']['value'] = bps_L1
+
+ elif bps_L2 is not None:
+ validate_type('bps_L2', bps_L2, [float, int])
+
+ self.fields['rate']['type'] = 'bps_L2'
+ self.fields['rate']['value'] = bps_L2
+
+ elif percentage is not None:
+ validate_type('percentage', percentage, [float, int])
+ if not (percentage > 0 and percentage <= 100):
+ raise STLArgumentError('percentage', percentage)
+
+ self.fields['rate']['type'] = 'percentage'
+ self.fields['rate']['value'] = percentage
+
+
+
+ def to_json (self):
+ return self.fields
+
+
+# continuous mode
+class STLTXCont(STLTXMode):
+ """ Continuous mode """
+
+ def __init__ (self, **kwargs):
+ """
+ Continuous mode
+
+ see :class:`trex_stl_lib.trex_stl_streams.STLTXMode` for rate
+
+ .. code-block:: python
+
+ # STLTXCont Example
+
+ mode = STLTXCont(pps = 10)
+
+ """
+ super(STLTXCont, self).__init__(**kwargs)
+
+
+ self.fields['type'] = 'continuous'
+
+ @staticmethod
+ def __str__ ():
+ return "Continuous"
+
+# single burst mode
+class STLTXSingleBurst(STLTXMode):
+ """ Single burst mode """
+
+ def __init__ (self, total_pkts = 1, **kwargs):
+ """
+ Single burst mode
+
+ :parameters:
+ total_pkts : int
+ Number of packets for this burst
+
+ see :class:`trex_stl_lib.trex_stl_streams.STLTXMode` for rate
+
+ .. code-block:: python
+
+ # STLTXSingleBurst Example
+
+ mode = STLTXSingleBurst( pps = 10, total_pkts = 1)
+
+ """
+
+
+ if not isinstance(total_pkts, int):
+ raise STLArgumentError('total_pkts', total_pkts)
+
+ super(STLTXSingleBurst, self).__init__(**kwargs)
+
+ self.fields['type'] = 'single_burst'
+ self.fields['total_pkts'] = total_pkts
+
+ @staticmethod
+ def __str__ ():
+ return "Single Burst"
+
+# multi burst mode
+class STLTXMultiBurst(STLTXMode):
+ """ Multi-burst mode """
+
+ def __init__ (self,
+ pkts_per_burst = 1,
+ ibg = 0.0, # usec not SEC
+ count = 1,
+ **kwargs):
+ """
+ Multi-burst mode
+
+ :parameters:
+
+ pkts_per_burst: int
+ Number of packets per burst
+
+ ibg : float
+ Inter-burst gap in usec 1,000,000.0 is 1 sec
+
+ count : int
+ Number of bursts
+
+ see :class:`trex_stl_lib.trex_stl_streams.STLTXMode` for rate
+
+ .. code-block:: python
+
+ # STLTXMultiBurst Example
+
+ mode = STLTXMultiBurst(pps = 10, pkts_per_burst = 1,count 10, ibg=10.0)
+
+ """
+
+
+ if not isinstance(pkts_per_burst, int):
+ raise STLArgumentError('pkts_per_burst', pkts_per_burst)
+
+ if not isinstance(ibg, (int, float)):
+ raise STLArgumentError('ibg', ibg)
+
+ if not isinstance(count, int):
+ raise STLArgumentError('count', count)
+
+ super(STLTXMultiBurst, self).__init__(**kwargs)
+
+ self.fields['type'] = 'multi_burst'
+ self.fields['pkts_per_burst'] = pkts_per_burst
+ self.fields['ibg'] = ibg
+ self.fields['count'] = count
+
+ @staticmethod
+ def __str__ ():
+ return "Multi Burst"
+
+STLStreamDstMAC_CFG_FILE=0
+STLStreamDstMAC_PKT =1
+STLStreamDstMAC_ARP =2
+
+class STLFlowStatsInterface(object):
+ def __init__ (self, pg_id):
+ self.fields = {}
+ self.fields['enabled'] = True
+ self.fields['stream_id'] = pg_id
+
+ def to_json (self):
+ """ Dump as json"""
+ return dict(self.fields)
+
+ @staticmethod
+ def defaults ():
+ return {'enabled' : False}
+
+
+class STLFlowStats(STLFlowStatsInterface):
+ """ Define per stream basic stats
+
+ .. code-block:: python
+
+ # STLFlowStats Example
+
+ flow_stats = STLFlowStats(pg_id = 7)
+
+ """
+
+ def __init__(self, pg_id):
+ super(STLFlowStats, self).__init__(pg_id)
+ self.fields['rule_type'] = 'stats'
+
+
+class STLFlowLatencyStats(STLFlowStatsInterface):
+ """ Define per stream basic stats + latency, jitter, packet reorder/loss
+
+ .. code-block:: python
+
+ # STLFlowLatencyStats Example
+
+ flow_stats = STLFlowLatencyStats(pg_id = 7)
+
+ """
+
+ def __init__(self, pg_id):
+ super(STLFlowLatencyStats, self).__init__(pg_id)
+ self.fields['rule_type'] = 'latency'
+
+
+class STLStream(object):
+ """ One stream object. Includes mode, Field Engine mode packet template and Rx stats
+
+ .. code-block:: python
+
+ # STLStream Example
+
+
+ base_pkt = Ether()/IP(src="16.0.0.1",dst="48.0.0.1")/UDP(dport=12,sport=1025)
+ pad = max(0, size - len(base_pkt)) * 'x'
+
+ STLStream( isg = 10.0, # star in delay
+ name ='S0',
+ packet = STLPktBuilder(pkt = base_pkt/pad),
+ mode = STLTXSingleBurst( pps = 10, total_pkts = 1),
+ next = 'S1'), # point to next stream
+
+
+ """
+
+ def __init__ (self,
+ name = None,
+ packet = None,
+ mode = STLTXCont(pps = 1),
+ enabled = True,
+ self_start = True,
+ isg = 0.0,
+ flow_stats = None,
+ next = None,
+ stream_id = None,
+ action_count = 0,
+ random_seed =0,
+ mac_src_override_by_pkt=None,
+ mac_dst_override_mode=None #see STLStreamDstMAC_xx
+ ):
+ """
+ Stream object
+
+ :parameters:
+
+ name : string
+ Name of the stream. Required if this stream is dependent on another stream, and another stream needs to refer to this stream by name.
+
+ packet : STLPktBuilder see :class:`trex_stl_lib.trex_stl_packet_builder_scapy.STLPktBuilder`
+ Template packet and field engine program. Example: packet = STLPktBuilder(pkt = base_pkt/pad)
+
+ mode : :class:`trex_stl_lib.trex_stl_streams.STLTXCont` or :class:`trex_stl_lib.trex_stl_streams.STLTXSingleBurst` or :class:`trex_stl_lib.trex_stl_streams.STLTXMultiBurst`
+
+ enabled : bool
+ Indicates whether the stream is enabled.
+
+ self_start : bool
+ If False, another stream activates it.
+
+ isg : float
+ Inter-stream gap in usec. Time to wait until the stream sends the first packet.
+
+ flow_stats : :class:`trex_stl_lib.trex_stl_streams.STLFlowStats`
+ Per stream statistic object. See: STLFlowStats
+
+ next : string
+ Name of the stream to activate.
+
+ stream_id :
+ For use by HLTAPI.
+
+ action_count : uint16_t
+ If there is a next stream, number of loops before stopping. Default: 0 (unlimited).
+
+ random_seed: uint16_t
+ If given, the seed for this stream will be this value. Useful if you need a deterministic random value.
+
+ mac_src_override_by_pkt : bool
+ Template packet sets src MAC.
+
+ mac_dst_override_mode=None : STLStreamDstMAC_xx
+ Template packet sets dst MAC.
+ """
+
+
+ # type checking
+ validate_type('mode', mode, STLTXMode)
+ validate_type('packet', packet, (type(None), CTrexPktBuilderInterface))
+ validate_type('flow_stats', flow_stats, (type(None), STLFlowStatsInterface))
+ validate_type('enabled', enabled, bool)
+ validate_type('self_start', self_start, bool)
+ validate_type('isg', isg, (int, float))
+ validate_type('stream_id', stream_id, (type(None), int))
+ validate_type('random_seed',random_seed,int);
+
+ if (type(mode) == STLTXCont) and (next != None):
+ raise STLError("Continuous stream cannot have a next stream ID")
+
+ # tag for the stream and next - can be anything
+ self.name = name
+ self.next = next
+
+ self.mac_src_override_by_pkt = mac_src_override_by_pkt # save for easy construct code from stream object
+ self.mac_dst_override_mode = mac_dst_override_mode
+ self.id = stream_id
+
+
+ self.fields = {}
+
+ int_mac_src_override_by_pkt = 0;
+ int_mac_dst_override_mode = 0;
+
+
+ if mac_src_override_by_pkt == None:
+ int_mac_src_override_by_pkt=0
+ if packet :
+ if packet.is_default_src_mac ()==False:
+ int_mac_src_override_by_pkt=1
+
+ else:
+ int_mac_src_override_by_pkt = int(mac_src_override_by_pkt);
+
+ if mac_dst_override_mode == None:
+ int_mac_dst_override_mode = 0;
+ if packet :
+ if packet.is_default_dst_mac ()==False:
+ int_mac_dst_override_mode=STLStreamDstMAC_PKT
+ else:
+ int_mac_dst_override_mode = int(mac_dst_override_mode);
+
+
+ self.is_default_mac = not (int_mac_src_override_by_pkt or int_mac_dst_override_mode)
+
+ self.fields['flags'] = (int_mac_src_override_by_pkt&1) + ((int_mac_dst_override_mode&3)<<1)
+
+ self.fields['action_count'] = action_count
+
+ # basic fields
+ self.fields['enabled'] = enabled
+ self.fields['self_start'] = self_start
+ self.fields['isg'] = isg
+
+ if random_seed !=0 :
+ self.fields['random_seed'] = random_seed # optional
+
+ # mode
+ self.fields['mode'] = mode.to_json()
+ self.mode_desc = str(mode)
+
+
+ # packet
+ self.fields['packet'] = {}
+ self.fields['vm'] = {}
+
+ if not packet:
+ packet = STLPktBuilder(pkt = Ether()/IP())
+
+ self.scapy_pkt_builder = packet
+ # packet builder
+ packet.compile()
+
+ # packet and VM
+ self.fields['packet'] = packet.dump_pkt()
+ self.fields['vm'] = packet.get_vm_data()
+
+ self.pkt = base64.b64decode(self.fields['packet']['binary'])
+
+ # this is heavy, calculate lazy
+ self.packet_desc = None
+
+ if not flow_stats:
+ self.fields['flow_stats'] = STLFlowStats.defaults()
+ else:
+ self.fields['flow_stats'] = flow_stats.to_json()
+
+
+ def __str__ (self):
+ s = "Stream Name: {0}\n".format(self.name)
+ s += "Stream Next: {0}\n".format(self.next)
+ s += "Stream JSON:\n{0}\n".format(json.dumps(self.fields, indent = 4, separators=(',', ': '), sort_keys = True))
+ return s
+
+ def to_json (self):
+ """
+ Return json format
+ """
+ return dict(self.fields)
+
+ def get_id (self):
+ """ Get the stream id after resolution """
+ return self.id
+
+
+ def has_custom_mac_addr (self):
+ """ Return True if src or dst MAC were set as custom """
+ return not self.is_default_mac
+
+ def get_name (self):
+ """ Get the stream name """
+ return self.name
+
+ def get_next (self):
+ """ Get next stream object """
+ return self.next
+
+
+ def has_flow_stats (self):
+ """ Return True if stream was configured with flow stats """
+ return self.fields['flow_stats']['enabled']
+
+ def get_pkt (self):
+ """ Get packet as string """
+ return self.pkt
+
+ def get_pkt_len (self, count_crc = True):
+ """ Get packet number of bytes """
+ pkt_len = len(self.get_pkt())
+ if count_crc:
+ pkt_len += 4
+
+ return pkt_len
+
+
+ def get_pkt_type (self):
+ """ Get packet description. Example: IP:UDP """
+ if self.packet_desc == None:
+ self.packet_desc = STLPktBuilder.pkt_layers_desc_from_buffer(self.get_pkt())
+
+ return self.packet_desc
+
+ def get_mode (self):
+ return self.mode_desc
+
+ @staticmethod
+ def get_rate_from_field (rate_json):
+ """ Get rate from json """
+ t = rate_json['type']
+ v = rate_json['value']
+
+ if t == "pps":
+ return format_num(v, suffix = "pps")
+ elif t == "bps_L1":
+ return format_num(v, suffix = "bps (L1)")
+ elif t == "bps_L2":
+ return format_num(v, suffix = "bps (L2)")
+ elif t == "percentage":
+ return format_num(v, suffix = "%")
+
+ def get_rate (self):
+ return self.get_rate_from_field(self.fields['mode']['rate'])
+
+ def to_pkt_dump (self):
+ """ Print packet description from Scapy """
+ if self.name:
+ print("Stream Name: ",self.name)
+ scapy_b = self.scapy_pkt_builder;
+ if scapy_b and isinstance(scapy_b,STLPktBuilder):
+ scapy_b.to_pkt_dump()
+ else:
+ print("Nothing to dump")
+
+
+
+ def to_yaml (self):
+ """ Convert to YAML """
+ y = {}
+
+ if self.name:
+ y['name'] = self.name
+
+ if self.next:
+ y['next'] = self.next
+
+ y['stream'] = copy.deepcopy(self.fields)
+
+ # some shortcuts for YAML
+ rate_type = self.fields['mode']['rate']['type']
+ rate_value = self.fields['mode']['rate']['value']
+
+ y['stream']['mode'][rate_type] = rate_value
+ del y['stream']['mode']['rate']
+
+ return y
+
+ # returns the Python code (text) to build this stream, inside the code it will be in variable "stream"
+ def to_code (self):
+ """ Convert to Python code as profile """
+ packet = Ether(self.pkt)
+ layer = packet
+ imports_arr = []
+ # remove checksums, add imports if needed
+ while layer:
+ layer_class = layer.__class__.__name__
+ try: # check if class can be instantiated
+ eval('%s()' % layer_class)
+ except NameError: # no such layer
+ found_import = False
+ for module_path, module in sys.modules.items():
+ import_string = 'from %s import %s' % (module_path, layer_class)
+ if import_string in imports_arr:
+ found_import = True
+ break
+ if not module_path.startswith(('scapy.layers', 'scapy.contrib')):
+ continue
+ check_layer = getattr(module, layer_class, None)
+ if not check_layer:
+ continue
+ try:
+ check_layer()
+ imports_arr.append(import_string)
+ found_import = True
+ break
+ except: # can't by instantiated
+ continue
+ if not found_import:
+ raise STLError('Could not determine import of layer %s' % layer.name)
+ for chksum_name in ('cksum', 'chksum'):
+ if chksum_name in layer.fields:
+ del layer.fields[chksum_name]
+ layer = layer.payload
+ packet.hide_defaults() # remove fields with default values
+ payload = packet.getlayer('Raw')
+ packet_command = packet.command()
+
+ imports = '\n'.join(imports_arr)
+ if payload:
+ payload.remove_payload() # fcs etc.
+ data = payload.fields.get('load', '')
+
+ good_printable = [c for c in string.printable if ord(c) not in range(32)]
+ good_printable.remove("'")
+
+ if type(data) is str:
+ new_data = ''.join([c if c in good_printable else r'\x{0:02x}'.format(ord(c)) for c in data])
+ else:
+ new_data = ''.join([chr(c) if chr(c) in good_printable else r'\x{0:02x}'.format(c) for c in data])
+
+ payload_start = packet_command.find("Raw(load=")
+ if payload_start != -1:
+ packet_command = packet_command[:payload_start-1]
+ layers = packet_command.split('/')
+
+ if payload:
+ if len(new_data) and new_data == new_data[0] * len(new_data):
+ layers.append("Raw(load='%s' * %s)" % (new_data[0], len(new_data)))
+ else:
+ layers.append("Raw(load='%s')" % new_data)
+
+ packet_code = 'packet = (' + (' / \n ').join(layers) + ')'
+ vm_list = []
+ for inst in self.fields['vm']['instructions']:
+ if inst['type'] == 'flow_var':
+ vm_list.append("STLVmFlowVar(name='{name}', size={size}, op='{op}', init_value={init_value}, min_value={min_value}, max_value={max_value}, step={step})".format(**inst))
+ elif inst['type'] == 'write_flow_var':
+ vm_list.append("STLVmWrFlowVar(fv_name='{name}', pkt_offset={pkt_offset}, add_val={add_value}, is_big={is_big_endian})".format(**inst))
+ elif inst['type'] == 'write_mask_flow_var':
+ inst = copy.copy(inst)
+ inst['mask'] = hex(inst['mask'])
+ vm_list.append("STLVmWrMaskFlowVar(fv_name='{name}', pkt_offset={pkt_offset}, pkt_cast_size={pkt_cast_size}, mask={mask}, shift={shift}, add_value={add_value}, is_big={is_big_endian})".format(**inst))
+ elif inst['type'] == 'fix_checksum_ipv4':
+ vm_list.append("STLVmFixIpv4(offset={pkt_offset})".format(**inst))
+ elif inst['type'] == 'trim_pkt_size':
+ vm_list.append("STLVmTrimPktSize(fv_name='{name}')".format(**inst))
+ elif inst['type'] == 'tuple_flow_var':
+ inst = copy.copy(inst)
+ inst['ip_min'] = ltoa(inst['ip_min'])
+ inst['ip_max'] = ltoa(inst['ip_max'])
+ vm_list.append("STLVmTupleGen(name='{name}', ip_min='{ip_min}', ip_max='{ip_max}', port_min={port_min}, port_max={port_max}, limit_flows={limit_flows}, flags={flags})".format(**inst))
+ elif inst['type'] == 'flow_var_rand_limit':
+ vm_list.append("STLVmFlowVarRepetableRandom(name='{name}', size={size}, limit={limit}, seed={seed}, min_value={min_value}, max_value={max_value})".format(**inst))
+
+ vm_code = 'vm = STLScVmRaw([' + ',\n '.join(vm_list) + '], split_by_field = %s)' % STLStream.__add_quotes(self.fields['vm'].get('split_by_var'))
+ stream_params_list = []
+ stream_params_list.append('packet = STLPktBuilder(pkt = packet, vm = vm)')
+ if default_STLStream.name != self.name:
+ stream_params_list.append('name = %s' % STLStream.__add_quotes(self.name))
+ if default_STLStream.fields['enabled'] != self.fields['enabled']:
+ stream_params_list.append('enabled = %s' % self.fields['enabled'])
+ if default_STLStream.fields['self_start'] != self.fields['self_start']:
+ stream_params_list.append('self_start = %s' % self.fields['self_start'])
+ if default_STLStream.fields['isg'] != self.fields['isg']:
+ stream_params_list.append('isg = %s' % self.fields['isg'])
+ if default_STLStream.fields['flow_stats'] != self.fields['flow_stats']:
+ stream_params_list.append('flow_stats = STLFlowStats(%s)' % self.fields['flow_stats']['stream_id'])
+ if default_STLStream.next != self.next:
+ stream_params_list.append('next = %s' % STLStream.__add_quotes(self.next))
+ if default_STLStream.id != self.id:
+ stream_params_list.append('stream_id = %s' % self.id)
+ if default_STLStream.fields['action_count'] != self.fields['action_count']:
+ stream_params_list.append('action_count = %s' % self.fields['action_count'])
+ if 'random_seed' in self.fields:
+ stream_params_list.append('random_seed = %s' % self.fields.get('random_seed', 0))
+ if default_STLStream.mac_src_override_by_pkt != self.mac_src_override_by_pkt:
+ stream_params_list.append('mac_src_override_by_pkt = %s' % self.mac_src_override_by_pkt)
+ if default_STLStream.mac_dst_override_mode != self.mac_dst_override_mode:
+ stream_params_list.append('mac_dst_override_mode = %s' % self.mac_dst_override_mode)
+
+ mode_args = ''
+ for key, value in self.fields['mode'].items():
+ if key not in ('rate', 'type'):
+ mode_args += '%s = %s, ' % (key, value)
+ mode_args += '%s = %s' % (self.fields['mode']['rate']['type'], self.fields['mode']['rate']['value'])
+ if self.mode_desc == STLTXCont.__str__():
+ stream_params_list.append('mode = STLTXCont(%s)' % mode_args)
+ elif self.mode_desc == STLTXSingleBurst().__str__():
+ stream_params_list.append('mode = STLTXSingleBurst(%s)' % mode_args)
+ elif self.mode_desc == STLTXMultiBurst().__str__():
+ stream_params_list.append('mode = STLTXMultiBurst(%s)' % mode_args)
+ else:
+ raise STLError('Could not determine mode: %s' % self.mode_desc)
+
+ stream = "stream = STLStream(" + ',\n '.join(stream_params_list) + ')'
+ return '\n'.join([imports, packet_code, vm_code, stream])
+
+ # add quoted for string, or leave as is if other type
+ @staticmethod
+ def __add_quotes(arg):
+ if type(arg) is str:
+ return "'%s'" % arg
+ return arg
+
+ # used to replace non-printable characters with hex
+ @staticmethod
+ def __replchars_to_hex(match):
+ return r'\x{0:02x}'.format(ord(match.group()))
+
+ def dump_to_yaml (self, yaml_file = None):
+ """ Print as yaml """
+ yaml_dump = yaml.dump([self.to_yaml()], default_flow_style = False)
+
+ # write to file if provided
+ if yaml_file:
+ with open(yaml_file, 'w') as f:
+ f.write(yaml_dump)
+
+ return yaml_dump
+
+class YAMLLoader(object):
+
+ def __init__ (self, yaml_file):
+ self.yaml_path = os.path.dirname(yaml_file)
+ self.yaml_file = yaml_file
+
+
+ def __parse_packet (self, packet_dict):
+
+ packet_type = set(packet_dict).intersection(['binary', 'pcap'])
+ if len(packet_type) != 1:
+ raise STLError("Packet section must contain either 'binary' or 'pcap'")
+
+ if 'binary' in packet_type:
+ try:
+ pkt_str = base64.b64decode(packet_dict['binary'])
+ except TypeError:
+ raise STLError("'binary' field is not a valid packet format")
+
+ builder = STLPktBuilder(pkt_buffer = pkt_str)
+
+ elif 'pcap' in packet_type:
+ pcap = os.path.join(self.yaml_path, packet_dict['pcap'])
+
+ if not os.path.exists(pcap):
+ raise STLError("'pcap' - cannot find '{0}'".format(pcap))
+
+ builder = STLPktBuilder(pkt = pcap)
+
+ return builder
+
+
+ def __parse_mode (self, mode_obj):
+ if not mode_obj:
+ return None
+
+ rate_parser = set(mode_obj).intersection(['pps', 'bps_L1', 'bps_L2', 'percentage'])
+ if len(rate_parser) != 1:
+ raise STLError("'rate' must contain exactly one from 'pps', 'bps_L1', 'bps_L2', 'percentage'")
+
+ rate_type = rate_parser.pop()
+ rate = {rate_type : mode_obj[rate_type]}
+
+ mode_type = mode_obj.get('type')
+
+ if mode_type == 'continuous':
+ mode = STLTXCont(**rate)
+
+ elif mode_type == 'single_burst':
+ defaults = STLTXSingleBurst()
+ mode = STLTXSingleBurst(total_pkts = mode_obj.get('total_pkts', defaults.fields['total_pkts']),
+ **rate)
+
+ elif mode_type == 'multi_burst':
+ defaults = STLTXMultiBurst()
+ mode = STLTXMultiBurst(pkts_per_burst = mode_obj.get('pkts_per_burst', defaults.fields['pkts_per_burst']),
+ ibg = mode_obj.get('ibg', defaults.fields['ibg']),
+ count = mode_obj.get('count', defaults.fields['count']),
+ **rate)
+
+ else:
+ raise STLError("mode type can be 'continuous', 'single_burst' or 'multi_burst")
+
+
+ return mode
+
+
+
+ def __parse_flow_stats (self, flow_stats_obj):
+
+ # no such object
+ if not flow_stats_obj or flow_stats_obj.get('enabled') == False:
+ return None
+
+ pg_id = flow_stats_obj.get('stream_id')
+ if pg_id == None:
+ raise STLError("Enabled RX stats section must contain 'stream_id' field")
+
+ return STLFlowStats(pg_id = pg_id)
+
+
+ def __parse_stream (self, yaml_object):
+ s_obj = yaml_object['stream']
+
+ # parse packet
+ packet = s_obj.get('packet')
+ if not packet:
+ raise STLError("YAML file must contain 'packet' field")
+
+ builder = self.__parse_packet(packet)
+
+
+ # mode
+ mode = self.__parse_mode(s_obj.get('mode'))
+
+ # rx stats
+ flow_stats = self.__parse_flow_stats(s_obj.get('flow_stats'))
+
+
+ defaults = default_STLStream
+ # create the stream
+ stream = STLStream(name = yaml_object.get('name'),
+ packet = builder,
+ mode = mode,
+ flow_stats = flow_stats,
+ enabled = s_obj.get('enabled', defaults.fields['enabled']),
+ self_start = s_obj.get('self_start', defaults.fields['self_start']),
+ isg = s_obj.get('isg', defaults.fields['isg']),
+ next = yaml_object.get('next'),
+ action_count = s_obj.get('action_count', defaults.fields['action_count']),
+ mac_src_override_by_pkt = s_obj.get('mac_src_override_by_pkt', 0),
+ mac_dst_override_mode = s_obj.get('mac_src_override_by_pkt', 0)
+ )
+
+ # hack the VM fields for now
+ if 'vm' in s_obj:
+ stream.fields['vm'].update(s_obj['vm'])
+
+ return stream
+
+
+ def parse (self):
+ with open(self.yaml_file, 'r') as f:
+ # read YAML and pass it down to stream object
+ yaml_str = f.read()
+
+ try:
+ objects = yaml.load(yaml_str)
+ except yaml.parser.ParserError as e:
+ raise STLError(str(e))
+
+ streams = [self.__parse_stream(object) for object in objects]
+
+ return streams
+
+
+# profile class
+class STLProfile(object):
+ """ Describe a list of streams
+
+ .. code-block:: python
+
+ # STLProfile Example
+
+ profile = STLProfile( [ STLStream( isg = 10.0, # star in delay
+ name ='S0',
+ packet = STLPktBuilder(pkt = base_pkt/pad),
+ mode = STLTXSingleBurst( pps = 10, total_pkts = self.burst_size),
+ next = 'S1'), # point to next stream
+
+ STLStream( self_start = False, # stream is disabled enable trow S0
+ name ='S1',
+ packet = STLPktBuilder(pkt = base_pkt1/pad),
+ mode = STLTXSingleBurst( pps = 10, total_pkts = self.burst_size),
+ next = 'S2' ),
+
+ STLStream( self_start = False, # stream is disabled enable trow S0
+ name ='S2',
+ packet = STLPktBuilder(pkt = base_pkt2/pad),
+ mode = STLTXSingleBurst( pps = 10, total_pkts = self.burst_size )
+ )
+ ]).get_streams()
+
+
+
+ """
+
+ def __init__ (self, streams = None):
+ """
+
+ :parameters:
+
+ streams : list of :class:`trex_stl_lib.trex_stl_streams.STLStream`
+ a list of stream objects
+
+ """
+
+
+ if streams == None:
+ streams = []
+
+ if not type(streams) == list:
+ streams = [streams]
+
+ if not all([isinstance(stream, STLStream) for stream in streams]):
+ raise STLArgumentError('streams', streams, valid_values = STLStream)
+
+ self.streams = streams
+ self.meta = None
+
+
+ def get_streams (self):
+ """ Get the list of streams"""
+ return self.streams
+
+ def __str__ (self):
+ return '\n'.join([str(stream) for stream in self.streams])
+
+ def is_pauseable (self):
+ return all([x.get_mode() == "Continuous" for x in self.get_streams()])
+
+ def has_custom_mac_addr (self):
+ return any([x.has_custom_mac_addr() for x in self.get_streams()])
+
+ def has_flow_stats (self):
+ return any([x.has_flow_stats() for x in self.get_streams()])
+
+ @staticmethod
+ def load_yaml (yaml_file):
+ """ Load (from YAML file) a profile with a number of streams"""
+
+ # check filename
+ if not os.path.isfile(yaml_file):
+ raise STLError("file '{0}' does not exists".format(yaml_file))
+
+ yaml_loader = YAMLLoader(yaml_file)
+ streams = yaml_loader.parse()
+
+ profile = STLProfile(streams)
+ profile.meta = {'type': 'yaml'}
+
+ return profile
+
+ @staticmethod
+ def get_module_tunables(module):
+ # remove self and variables
+ func = module.register().get_streams
+ argc = func.__code__.co_argcount
+ tunables = func.__code__.co_varnames[1:argc]
+
+ # fetch defaults
+ defaults = func.__defaults__
+ if len(defaults) != (argc - 1):
+ raise STLError("Module should provide default values for all arguments on get_streams()")
+
+ output = {}
+ for t, d in zip(tunables, defaults):
+ output[t] = d
+
+ return output
+
+
+ @staticmethod
+ def load_py (python_file, direction = 0, port_id = 0, **kwargs):
+ """ Load from Python profile """
+
+ # check filename
+ if not os.path.isfile(python_file):
+ raise STLError("File '{0}' does not exist".format(python_file))
+
+ basedir = os.path.dirname(python_file)
+ sys.path.insert(0, basedir)
+
+ try:
+ file = os.path.basename(python_file).split('.')[0]
+ module = __import__(file, globals(), locals(), [], 0)
+ imp.reload(module) # reload the update
+
+ t = STLProfile.get_module_tunables(module)
+ #for arg in kwargs:
+ # if not arg in t:
+ # raise STLError("Profile {0} does not support tunable '{1}' - supported tunables are: '{2}'".format(python_file, arg, t))
+
+ streams = module.register().get_streams(direction = direction,
+ port_id = port_id,
+ **kwargs)
+ profile = STLProfile(streams)
+
+ profile.meta = {'type': 'python',
+ 'tunables': t}
+
+ return profile
+
+ except Exception as e:
+ a, b, tb = sys.exc_info()
+ x =''.join(traceback.format_list(traceback.extract_tb(tb)[1:])) + a.__name__ + ": " + str(b) + "\n"
+
+ summary = "\nPython Traceback follows:\n\n" + x
+ raise STLError(summary)
+
+
+ finally:
+ sys.path.remove(basedir)
+
+
+ # loop_count = 0 means loop forever
+ @staticmethod
+ def load_pcap (pcap_file,
+ ipg_usec = None,
+ speedup = 1.0,
+ loop_count = 1,
+ vm = None,
+ packet_hook = None,
+ split_mode = None):
+ """ Convert a pcap file with a number of packets to a list of connected streams.
+
+ packet1->packet2->packet3 etc
+
+ :parameters:
+
+ pcap_file : string
+ Name of the pcap file
+
+ ipg_usec : float
+ Inter packet gap in usec. If IPG is None, IPG is taken from pcap file
+
+ speedup : float
+ When reading the pcap file, divide IPG by this "speedup" factor. Resulting IPG is sped up by this factor.
+
+ loop_count : uint16_t
+ Number of loops to repeat the pcap file
+
+ vm : list
+ List of Field engine instructions
+
+ packet_hook : Callable or function
+ will be applied to every packet
+
+ is_split : str
+ should this PCAP be split to two profiles based on IPs / MACs
+ used for dual mode
+ can be 'MAC' or 'IP'
+
+ :return: STLProfile
+
+ """
+
+ # check filename
+ if not os.path.isfile(pcap_file):
+ raise STLError("file '{0}' does not exists".format(pcap_file))
+
+ # make sure IPG is not less than 1 usec
+ if ipg_usec is not None and ipg_usec < 0.001:
+ raise STLError("ipg_usec cannot be less than 0.001 usec: '{0}'".format(ipg_usec))
+
+ if loop_count < 0:
+ raise STLError("'loop_count' cannot be negative")
+
+
+ try:
+
+ if split_mode is None:
+ pkts = PCAPReader(pcap_file).read_all()
+ return STLProfile.__pkts_to_streams(pkts,
+ ipg_usec,
+ speedup,
+ loop_count,
+ vm,
+ packet_hook)
+ else:
+ pkts_a, pkts_b = PCAPReader(pcap_file).read_all(split_mode = split_mode)
+
+ profile_a = STLProfile.__pkts_to_streams(pkts_a,
+ ipg_usec,
+ speedup,
+ loop_count,
+ vm,
+ packet_hook,
+ start_delay_usec = 10000)
+
+ profile_b = STLProfile.__pkts_to_streams(pkts_b,
+ ipg_usec,
+ speedup,
+ loop_count,
+ vm,
+ packet_hook,
+ start_delay_usec = 10000)
+
+ return profile_a, profile_b
+
+
+ except Scapy_Exception as e:
+ raise STLError("failed to open PCAP file {0}: '{1}'".format(pcap_file, str(e)))
+
+
+ @staticmethod
+ def __pkts_to_streams (pkts, ipg_usec, speedup, loop_count, vm, packet_hook, start_delay_usec = 0):
+
+ streams = []
+
+ # 10 ms delay before starting the PCAP
+ last_ts_usec = -(start_delay_usec)
+
+ if packet_hook:
+ pkts = [(packet_hook(cap), meta) for (cap, meta) in pkts]
+
+
+ for i, (cap, meta) in enumerate(pkts, start = 1):
+ # IPG - if not provided, take from cap
+ if ipg_usec == None:
+ ts_usec = (meta[0] * 1e6 + meta[1]) / float(speedup)
+ else:
+ ts_usec = (ipg_usec * i) / float(speedup)
+
+ # handle last packet
+ if i == len(pkts):
+ next = 1
+ action_count = loop_count
+ else:
+ next = i + 1
+ action_count = 0
+
+ streams.append(STLStream(name = i,
+ packet = STLPktBuilder(pkt_buffer = cap, vm = vm),
+ mode = STLTXSingleBurst(total_pkts = 1, percentage = 100),
+ self_start = True if (i == 1) else False,
+ isg = (ts_usec - last_ts_usec), # seconds to usec
+ action_count = action_count,
+ next = next))
+
+ last_ts_usec = ts_usec
+
+
+ profile = STLProfile(streams)
+ profile.meta = {'type': 'pcap'}
+
+ return profile
+
+
+
+ @staticmethod
+ def load (filename, direction = 0, port_id = 0, **kwargs):
+ """ Load a profile by its type. Supported types are:
+ * py
+ * yaml
+ * pcap file that converted to profile automaticly
+
+ :Parameters:
+ filename : string as filename
+ direction : profile's direction (if supported by the profile)
+ port_id : which port ID this profile is being loaded to
+ kwargs : forward those key-value pairs to the profile
+
+ """
+
+ x = os.path.basename(filename).split('.')
+ suffix = x[1] if (len(x) == 2) else None
+
+ if suffix == 'py':
+ profile = STLProfile.load_py(filename, direction, port_id, **kwargs)
+
+ elif suffix == 'yaml':
+ profile = STLProfile.load_yaml(filename)
+
+ elif suffix in ['cap', 'pcap']:
+ profile = STLProfile.load_pcap(filename, speedup = 1, ipg_usec = 1e6)
+
+ else:
+ raise STLError("unknown profile file type: '{0}'".format(suffix))
+
+ profile.meta['stream_count'] = len(profile.get_streams()) if isinstance(profile.get_streams(), list) else 1
+ return profile
+
+ @staticmethod
+ def get_info (filename):
+ profile = STLProfile.load(filename)
+ return profile.meta
+
+ def dump_as_pkt (self):
+ """ Dump the profile as Scapy packet. If the packet is raw, convert it to Scapy before dumping it."""
+ cnt=0;
+ for stream in self.streams:
+ print("=======================")
+ print("Stream %d" % cnt)
+ print("=======================")
+ cnt = cnt +1
+ stream.to_pkt_dump()
+
+ def dump_to_yaml (self, yaml_file = None):
+ """ Convert the profile to yaml """
+ yaml_list = [stream.to_yaml() for stream in self.streams]
+ yaml_str = yaml.dump(yaml_list, default_flow_style = False)
+
+ # write to file if provided
+ if yaml_file:
+ with open(yaml_file, 'w') as f:
+ f.write(yaml_str)
+
+ return yaml_str
+
+ def dump_to_code (self, profile_file = None):
+ """ Convert the profile to Python native profile. """
+ profile_dump = '''# !!! Auto-generated code !!!
+from trex_stl_lib.api import *
+
+class STLS1(object):
+ def get_streams(self, direction = 0, **kwargs):
+ streams = []
+'''
+ for stream in self.streams:
+ profile_dump += ' '*8 + stream.to_code().replace('\n', '\n' + ' '*8) + '\n'
+ profile_dump += ' '*8 + 'streams.append(stream)\n'
+ profile_dump += '''
+ return streams
+
+def register():
+ return STLS1()
+'''
+ # write to file if provided
+ if profile_file:
+ with open(profile_file, 'w') as f:
+ f.write(profile_dump)
+
+ return profile_dump
+
+
+
+ def __len__ (self):
+ return len(self.streams)
+
+
+class PCAPReader(object):
+ def __init__ (self, pcap_file):
+ self.pcap_file = pcap_file
+
+ def read_all (self, split_mode = None):
+ if split_mode is None:
+ return RawPcapReader(self.pcap_file).read_all()
+
+ # we need to split
+ self.pcap = rdpcap(self.pcap_file)
+ self.graph = Graph()
+
+ self.pkt_groups = [ [], [] ]
+
+ if split_mode == 'MAC':
+ self.generate_mac_groups()
+ elif split_mode == 'IP':
+ self.generate_ip_groups()
+ else:
+ raise STLError('unknown split mode for PCAP')
+
+ return self.pkt_groups
+
+
+ # generate two groups based on MACs
+ def generate_mac_groups (self):
+ for i, pkt in enumerate(self.pcap):
+ if not isinstance(pkt, (Ether, Dot3) ):
+ raise STLError("Packet #{0} has an unknown L2 format: {1}".format(i, type(pkt)))
+ mac_src = pkt.fields['src']
+ mac_dst = pkt.fields['dst']
+ self.graph.add(mac_src, mac_dst)
+
+ # split the graph to two groups
+ mac_groups = self.graph.split()
+
+ for pkt in self.pcap:
+ mac_src = pkt.fields['src']
+ group = 1 if mac_src in mac_groups[1] else 0
+
+ time, raw = pkt.time, bytes(pkt)
+ self.pkt_groups[group].append((raw, (time, 0)))
+
+
+ # generate two groups based on IPs
+ def generate_ip_groups (self):
+ for pkt in self.pcap:
+ if not isinstance(pkt, (Ether, Dot3) ):
+ raise STLError("Packet #{0} has an unknown L2 format: {1}".format(i, type(pkt)))
+ # skip non IP packets
+ if not isinstance(pkt.payload, IP):
+ continue
+ ip_src = pkt.payload.fields['src']
+ ip_dst = pkt.payload.fields['dst']
+ self.graph.add(ip_src, ip_dst)
+
+ # split the graph to two groups
+ ip_groups = self.graph.split()
+
+ for pkt in self.pcap:
+ # default group - 0
+ group = 0
+
+ # if the packet is IP and IP SRC is in group 1 - move to group 1
+ if isinstance(pkt.payload, IP) and pkt.payload.fields['src'] in ip_groups[1]:
+ group = 1
+
+ time, raw = pkt.time, bytes(pkt)
+ self.pkt_groups[group].append((raw, (time, 0)))
+
+
+
+# a simple graph object - used to split to two groups
+class Graph(object):
+ def __init__ (self):
+ self.db = OrderedDict()
+ self.debug = False
+
+ def log (self, msg):
+ if self.debug:
+ print(msg)
+
+ # add a connection v1 --> v2
+ def add (self, v1, v2):
+ # init value for v1
+ if not v1 in self.db:
+ self.db[v1] = set()
+
+ # init value for v2
+ if not v2 in self.db:
+ self.db[v2] = set()
+
+ # ignore self to self edges
+ if v1 == v2:
+ return
+
+ # undirected - add two ways
+ self.db[v1].add(v2)
+ self.db[v2].add(v1)
+
+
+ # create a 2-color of the graph if possible
+ def split (self):
+ color_a = set()
+ color_b = set()
+
+ # start with all
+ nodes = list(self.db.keys())
+
+ # process one by one
+ while len(nodes) > 0:
+ node = nodes.pop(0)
+
+ friends = self.db[node]
+
+ # node has never been seen - move to color_a
+ if not node in color_a and not node in color_b:
+ self.log("<NEW> {0} --> A".format(node))
+ color_a.add(node)
+
+ # node color
+ node_color, other_color = (color_a, color_b) if node in color_a else (color_b, color_a)
+
+ # check that the coloring is possible
+ bad_friends = friends.intersection(node_color)
+ if bad_friends:
+ raise STLError("ERROR: failed to split PCAP file - {0} and {1} are in the same group".format(node, bad_friends))
+
+ # add all the friends to the other color
+ for friend in friends:
+ self.log("<FRIEND> {0} --> {1}".format(friend, 'A' if other_color is color_a else 'B'))
+ other_color.add(friend)
+
+
+ return color_a, color_b
+
+
+default_STLStream = STLStream()
+
diff --git a/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_types.py b/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_types.py
new file mode 100644
index 00000000..aa6c4218
--- /dev/null
+++ b/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_types.py
@@ -0,0 +1,167 @@
+
+from collections import namedtuple, OrderedDict
+from .utils.text_opts import *
+from .trex_stl_exceptions import *
+import types
+
+RpcCmdData = namedtuple('RpcCmdData', ['method', 'params', 'api_class'])
+TupleRC = namedtuple('RCT', ['rc', 'data', 'is_warn'])
+
+class RpcResponseStatus(namedtuple('RpcResponseStatus', ['success', 'id', 'msg'])):
+ __slots__ = ()
+ def __str__(self):
+ return "{id:^3} - {msg} ({stat})".format(id=self.id,
+ msg=self.msg,
+ stat="success" if self.success else "fail")
+
+# simple class to represent complex return value
+class RC():
+
+ def __init__ (self, rc = None, data = None, is_warn = False):
+ self.rc_list = []
+
+ if (rc != None):
+ self.rc_list.append(TupleRC(rc, data, is_warn))
+
+ def __nonzero__ (self):
+ return self.good()
+
+ def __bool__ (self):
+ return self.good()
+
+ def add (self, rc):
+ self.rc_list += rc.rc_list
+
+ def good (self):
+ return all([x.rc for x in self.rc_list])
+
+ def bad (self):
+ return not self.good()
+
+ def warn (self):
+ return any([x.is_warn for x in self.rc_list])
+
+ def data (self):
+ d = [x.data if x.rc else "" for x in self.rc_list]
+ return (d if len(d) != 1 else d[0])
+
+ def err (self):
+ e = [x.data if not x.rc else "" for x in self.rc_list]
+ return (e if len(e) != 1 else e[0])
+
+ def __str__ (self):
+ s = ""
+ for x in self.rc_list:
+ if x.data:
+ s += format_text("\n{0}".format(x.data), 'bold')
+ return s
+
+ def __iter__(self):
+ return self.rc_list.__iter__()
+
+
+ def prn_func (self, msg, newline = True):
+ if newline:
+ print(msg)
+ else:
+ print(msg),
+
+ def annotate (self, log_func = None, desc = None, show_status = True):
+
+ if not log_func:
+ log_func = self.prn_func
+
+ if desc:
+ log_func(format_text('\n{:<60}'.format(desc), 'bold'), newline = False)
+ else:
+ log_func("")
+
+ if self.bad():
+ # print all the errors
+ print("")
+ for x in self.rc_list:
+ if not x.rc:
+ log_func(format_text("\n{0}".format(x.data), 'bold'))
+
+ print("")
+ if show_status:
+ log_func(format_text("[FAILED]\n", 'red', 'bold'))
+
+
+ else:
+ if show_status:
+ log_func(format_text("[SUCCESS]\n", 'green', 'bold'))
+
+
+def RC_OK(data = ""):
+ return RC(True, data)
+
+def RC_ERR (err):
+ return RC(False, err)
+
+def RC_WARN (warn):
+ return RC(True, warn, is_warn = True)
+
+try:
+ long
+ long_exists = True
+except:
+ long_exists = False
+
+def is_integer(arg):
+ if type(arg) is int:
+ return True
+ if long_exists and type(arg) is long:
+ return True
+ return False
+
+# validate type of arg
+# example1: validate_type('somearg', somearg, [int, long])
+# example2: validate_type('another_arg', another_arg, str)
+def validate_type(arg_name, arg, valid_types):
+ if long_exists:
+ if valid_types is int:
+ valid_types = (int, long)
+ elif type(valid_types) is list and int in valid_types and long not in valid_types:
+ valid_types.append(long)
+ if type(valid_types) is list:
+ valid_types = tuple(valid_types)
+ if (type(valid_types) is type or # single type, not array of types
+ type(valid_types) is tuple or # several valid types as tuple
+ type(valid_types) is types.ClassType): # old style class
+ if isinstance(arg, valid_types):
+ return
+ raise STLTypeError(arg_name, type(arg), valid_types)
+ else:
+ raise STLError('validate_type: valid_types should be type or list or tuple of types')
+
+# throws STLError if not exactly one argument is present
+def verify_exclusive_arg (args_list):
+ if not (len(list(filter(lambda x: x is not None, args_list))) == 1):
+ raise STLError('exactly one parameter from {0} should be provided'.format(args_list))
+
+def listify (x):
+ if isinstance(x, list):
+ return x
+ else:
+ return [x]
+
+# shows as 'N/A', but does not let any compares for user to not mistake in automation
+class StatNotAvailable(str):
+ def __new__(cls, value, *args, **kwargs):
+ cls.stat_name = value
+ return super(StatNotAvailable, cls).__new__(cls, 'N/A')
+
+ def __cmp__(self, *args, **kwargs):
+ raise Exception("Stat '%s' not available at this setup" % self.stat_name)
+
+
+class LRU_cache(OrderedDict):
+ def __init__(self, maxlen = 20, *args, **kwargs):
+ OrderedDict.__init__(self, *args, **kwargs)
+ self.maxlen = maxlen
+
+ def __setitem__(self, *args, **kwargs):
+ OrderedDict.__setitem__(self, *args, **kwargs)
+ if len(self) > self.maxlen:
+ self.popitem(last = False)
diff --git a/scripts/automation/trex_control_plane/stl/trex_stl_lib/utils/GAObjClass.py b/scripts/automation/trex_control_plane/stl/trex_stl_lib/utils/GAObjClass.py
new file mode 100755
index 00000000..fe4fc893
--- /dev/null
+++ b/scripts/automation/trex_control_plane/stl/trex_stl_lib/utils/GAObjClass.py
@@ -0,0 +1,297 @@
+try: # Python2
+ import Queue
+ from urllib2 import *
+except: # Python3
+ import queue as Queue
+ from urllib.request import *
+ from urllib.error import *
+import threading
+import sys
+from time import sleep
+from pprint import pprint
+"""
+GAObjClass is a class destined to send Google Analytics Information.
+
+cid - unique number per user.
+command - the Event Category rubric appears on site. type: TEXT
+action - the Event Action rubric appears on site - type: TEXT
+label - the Event Label rubric - type: TEXT
+value - the event value metric - type: INTEGER
+
+QUOTAS:
+1 single payload - up to 8192Bytes
+batched:
+A maximum of 20 hits can be specified per request.
+The total size of all hit payloads cannot be greater than 16K bytes.
+No single hit payload can be greater than 8K bytes.
+"""
+url_single = 'https://www.google-analytics.com/collect' #sending single event
+url_batched = 'https://www.google-analytics.com/batch' #sending batched events
+url_debug = 'https://www.google-analytics.com/debug/collect' #verifying hit is valid
+url_conn = 'http://172.217.2.196' # testing internet connection to this address (google-analytics server)
+
+#..................................................................class GA_ObjClass................................................................
+class GA_ObjClass:
+ def __init__(self,cid,trackerID,appName,appVer):
+ self.cid = cid
+ self.trackerID = trackerID
+ self.appName = appName
+ self.appVer = appVer
+ self.payload = ''
+ self.payload = GA_ObjClass.generate_payload(self)
+ self.size = sys.getsizeof(self.payload)
+
+ def generate_payload(self):
+ self.payload+='v=1&t=event&tid='+str(self.trackerID)
+ self.payload+='&cid='+str(self.cid)
+ self.payload+='&an='+str(self.appName)
+ self.payload+='&av='+str(self.appVer)
+ return self.payload
+
+
+#..................................................................class GA_EVENT_ObjClass................................................................
+class GA_EVENT_ObjClass(GA_ObjClass):
+ def __init__(self,cid,trackerID,command,action,label,value,appName,appVer):
+ GA_ObjClass.__init__(self,cid,trackerID,appName,appVer)
+ self.command = command
+ self.action = action
+ self.label = label
+ self.value = value
+ self.payload = self.generate_payload()
+ self.size = sys.getsizeof(self.payload)
+
+ def generate_payload(self):
+ self.payload+='&ec='+str(self.command)
+ self.payload+='&ea='+str(self.action)
+ self.payload+='&el='+str(self.label)
+ self.payload+='&ev='+str(self.value)
+ return self.payload
+
+#..................................................................class GA_EXCEPTION_ObjClass................................................................
+#ExceptionFatal - BOOLEAN
+class GA_EXCEPTION_ObjClass(GA_ObjClass):
+ def __init__(self,cid,trackerID,ExceptionName,ExceptionFatal,appName,appVer):
+ GA_ObjClass.__init__(self,cid,trackerID,appName,appVer)
+ self.ExceptionName = ExceptionName
+ self.ExceptionFatal = ExceptionFatal
+ self.payload = self.generate_payload()
+
+ def generate_payload(self):
+ self.payload+='&exd='+str(self.ExceptionName)
+ self.payload+='&exf='+str(self.ExceptionFatal)
+ return self.payload
+
+
+
+#..................................................................class GA_TESTING_ObjClass................................................................
+class GA_TESTING_ObjClass(GA_ObjClass):
+ def __init__(self,cid,trackerID,TRexMode,TestName,SetupName,appName,ActionNumber,appVer,TestType,Mppspc,GoldenMin,GoldenMax):
+ GA_ObjClass.__init__(self,cid,trackerID,appName,appVer)
+ self.ActionNumber = ActionNumber
+ self.TRexMode = TRexMode
+ self.TestName = TestName
+ self.SetupName = SetupName
+ self.TestType = TestType
+ self.Mppspc = Mppspc
+ self.GoldenMin = GoldenMin
+ self.GoldenMax = GoldenMax
+ self.payload = self.generate_payload()
+ self.size = sys.getsizeof(self.payload)
+
+ def generate_payload(self):
+ self.payload+='&ec=TRexTests'
+ self.payload+='&ea='+str(self.ActionNumber)
+ self.payload+='&cd2='+str(self.TRexMode)
+ self.payload+='&cd1='+str(self.TestName)
+ self.payload+='&cd3='+str(self.SetupName)
+ self.payload+='&cd4='+str(self.TestType)
+ self.payload+='&cm1='+str(self.Mppspc)
+ self.payload+='&cm2='+str(self.GoldenMin)
+ self.payload+='&cm3='+str(self.GoldenMax)
+ return self.payload
+#.....................................................................class ga_Thread.................................................................
+"""
+
+Google analytics thread manager:
+
+will report and empty queue of google analytics items to GA server, every Timeout (parameter given on initialization)
+will perform connectivity check every timeout*10 seconds
+
+"""
+
+class ga_Thread (threading.Thread):
+ def __init__(self,threadID,gManager):
+ threading.Thread.__init__(self)
+ self.threadID = threadID
+ self.gManager = gManager
+ def run(self):
+ keepAliveCounter=0
+ #sys.stdout.write('thread started \n')
+ #sys.stdout.flush()
+ while True:
+ if (keepAliveCounter==10):
+ keepAliveCounter=0
+ if (self.gManager.internet_on()==True):
+ self.gManager.connectedToInternet=1
+ else:
+ self.gManager.connectedToInternet=0
+ sleep(self.gManager.Timeout)
+ keepAliveCounter+=1
+ if not self.gManager.GA_q.empty():
+ self.gManager.threadLock.acquire(1)
+# sys.stdout.write('lock acquired: reporting to GA \n')
+# sys.stdout.flush()
+ if (self.gManager.connectedToInternet==1):
+ self.gManager.emptyAndReportQ()
+ self.gManager.threadLock.release()
+# sys.stdout.write('finished \n')
+# sys.stdout.flush()
+#.....................................................................class GAmanager.................................................................
+"""
+
+Google ID - specify tracker property, example: UA-75220362-2 (when the suffix '2' specifies the analytics property profile)
+
+UserID - unique userID, this will differ between users on GA
+
+appName - s string to determine app name
+
+appVer - a string to determine app version
+
+QueueSize - the size of the queue that holds reported items. once the Queue is full:
+ on blocking mode:
+ will block program until next submission to GA server, which will make new space
+ on non-blocking mode:
+ will drop new requests
+
+Timout - the timeout the queue uses between data transmissions. Timeout should be shorter than the time it takes to generate 20 events. MIN VALUE = 11 seconds
+
+User Permission - the user must accept data transmission, use this flag as 1/0 flag, when UserPermission=1 allows data collection
+
+BlockingMode - set to 1 if you wish every Google Analytic Object will be submitted and processed, with no drops allowed.
+ this will block the running of the program until every item is processed
+
+*** Restriction - Google's restriction for amount of packages being sent per session per second is: 1 event per second, per session. session length is 30min ***
+"""
+
+class GAmanager:
+ def __init__(self,GoogleID,UserID,appName,appVer,QueueSize,Timeout,UserPermission,BlockingMode):
+ self.UserID = UserID
+ self.GoogleID = GoogleID
+ self.QueueSize = QueueSize
+ self.Timeout = Timeout
+ self.appName = appName
+ self.appVer = appVer
+ self.UserPermission = UserPermission
+ self.GA_q = Queue.Queue(QueueSize)
+ self.thread = ga_Thread(UserID,self)
+ self.threadLock = threading.Lock()
+ self.BlockingMode = BlockingMode
+ self.connectedToInternet =0
+ if (self.internet_on()==True):
+# sys.stdout.write('internet connection active \n')
+# sys.stdout.flush()
+ self.connectedToInternet=1
+ else:
+ self.connectedToInternet=0
+
+ def gaAddAction(self,Event,action,label,value):
+ self.gaAddObject(GA_EVENT_ObjClass(self.UserID,self.GoogleID,Event,action,label,value,self.appName,self.appVer))
+
+ def gaAddException(self,ExceptionName,ExceptionFatal):
+ self.gaAddObject(GA_EXCEPTION_ObjClass(self.UserID,self.GoogleID,ExceptionName,ExceptionFatal,self.appName,self.appVer))
+
+ def gaAddObject(self,Object):
+ if (self.BlockingMode==1):
+ while (self.GA_q.full()):
+ sleep(self.Timeout)
+# sys.stdout.write('blocking mode=1 \n queue full - sleeping for timeout \n') # within Timout, the thread will empty part of the queue
+# sys.stdout.flush()
+ lockState = self.threadLock.acquire(self.BlockingMode)
+ if (lockState==1):
+# sys.stdout.write('got lock, adding item \n')
+# sys.stdout.flush()
+ try:
+ self.GA_q.put_nowait(Object)
+# sys.stdout.write('got lock, item added \n')
+# sys.stdout.flush()
+ except Queue.Full:
+# sys.stdout.write('Queue full \n')
+# sys.stdout.flush()
+ pass
+ self.threadLock.release()
+
+ def emptyQueueToList(self,obj_list):
+ items=0
+ while ((not self.GA_q.empty()) and (items<20)):
+ obj_list.append(self.GA_q.get_nowait().payload)
+ items+=1
+# print items
+ return obj_list
+
+ def reportBatched(self,batched):
+ req = Request(url_batched, data=batched.encode('ascii'))
+ urlopen(req)
+# pprint(r.json())
+
+ def emptyAndReportQ(self):
+ obj_list = []
+ obj_list = self.emptyQueueToList(obj_list)
+ if (len(obj_list)==0):
+ return
+ batched = '\n'.join(obj_list)
+# print sys.getsizeof(batched)
+# print batched # - for debug
+ self.reportBatched(batched)
+
+ def printSelf(self):
+ print('remaining in queue:')
+ while not self.GA_q.empty():
+ obj = self.GA_q.get_nowait()
+ print(obj.payload)
+
+ def internet_on(self):
+ try:
+ urlopen(url_conn,timeout=10)
+ return True
+ except URLError as err: pass
+ return False
+
+ def activate(self):
+ if (self.UserPermission==1):
+ self.thread.start()
+
+
+#.....................................................................class GAmanager_Regression.................................................................
+"""
+ *-*-*-*-Google Analytics Regression Manager-*-*-*-*
+ attributes:
+GoogleID - the tracker ID that Google uses in order to track the activity of a property. for regression use: 'UA-75220362-4'
+AnalyticsUserID - text value - used by Google to differ between 2 users sending data. (will not be presented on reports). use only as a way to differ between different users
+TRexMode - text - will be presented on analysis. put here TRexMode
+appName - text - will be presented on analysis. put here appName as string describing app name
+appVer - text - will be presented on analysis. put here the appVer
+QueueSize - integer - determines the queue size. the queue will hold pending request before submission. RECOMMENDED VALUE: 20
+Timeout - integer (seconds) - the timeout in seconds between automated reports when activating reporting thread
+UserPermission - boolean (1/0) - required in order to send packets, should be 1.
+BlockingMode - boolean (1/0) - required when each tracked event is critical and program should halt until the event is reported
+SetupName - text - will be presented on analysis. put here setup name as string.
+"""
+class GAmanager_Regression(GAmanager):
+ def __init__(self, GoogleID, AnalyticsUserID, appName, appVer,
+ QueueSize, Timeout, UserPermission, BlockingMode):
+ GAmanager.__init__(self, GoogleID, AnalyticsUserID, appName, appVer,
+ QueueSize, Timeout, UserPermission, BlockingMode)
+ self.GoogleID = GoogleID
+ self.AnalyticsUserID = AnalyticsUserID
+
+ def gaAddTestQuery(self, TestName, TRexMode, SetupName, ActionNumber, TestType, Mppspc, GoldenMin, GoldenMax):
+ self.gaAddObject(GA_TESTING_ObjClass(self.AnalyticsUserID, self.GoogleID, TRexMode, TestName, SetupName, self.appName, ActionNumber, self.appVer, TestType, Mppspc, GoldenMin, GoldenMax))
+
+
+
+
+
+
+
+
+
diff --git a/scripts/automation/trex_control_plane/stl/trex_stl_lib/utils/__init__.py b/scripts/automation/trex_control_plane/stl/trex_stl_lib/utils/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/scripts/automation/trex_control_plane/stl/trex_stl_lib/utils/__init__.py
diff --git a/scripts/automation/trex_control_plane/stl/trex_stl_lib/utils/common.py b/scripts/automation/trex_control_plane/stl/trex_stl_lib/utils/common.py
new file mode 100644
index 00000000..72ee8972
--- /dev/null
+++ b/scripts/automation/trex_control_plane/stl/trex_stl_lib/utils/common.py
@@ -0,0 +1,88 @@
+import os
+import sys
+import string
+import random
+import time
+
+try:
+ import pwd
+except ImportError:
+ import getpass
+ pwd = None
+
+using_python_3 = True if sys.version_info.major == 3 else False
+
+def get_current_user():
+ if pwd:
+ return pwd.getpwuid(os.geteuid()).pw_name
+ else:
+ return getpass.getuser()
+
+
+def user_input():
+ if using_python_3:
+ return input()
+ else:
+ # using python version 2
+ return raw_input()
+
+
+class random_id_gen:
+ """
+ Emulated generator for creating a random chars id of specific length
+
+ :parameters:
+ length : int
+ the desired length of the generated id
+
+ default: 8
+
+ :return:
+ a random id with each next() request.
+ """
+ def __init__(self, length=8):
+ self.id_chars = string.ascii_lowercase + string.digits
+ self.length = length
+
+ def next(self):
+ return ''.join(random.choice(self.id_chars) for _ in range(self.length))
+
+ __next__ = next
+
+
+# try to get number from input, return None in case of fail
+def get_number(input):
+ try:
+ return long(input)
+ except:
+ try:
+ return int(input)
+ except:
+ return None
+
+def list_intersect(l1, l2):
+ return list(filter(lambda x: x in l2, l1))
+
+def list_difference (l1, l2):
+ return list(filter(lambda x: x not in l2, l1))
+
+def is_sub_list (l1, l2):
+ return set(l1) <= set(l2)
+
+# a simple passive timer
+class PassiveTimer(object):
+
+ # timeout_sec = None means forever
+ def __init__ (self, timeout_sec):
+ if timeout_sec != None:
+ self.expr_sec = time.time() + timeout_sec
+ else:
+ self.expr_sec = None
+
+ def has_expired (self):
+ # if no timeout was set - return always false
+ if self.expr_sec == None:
+ return False
+
+ return (time.time() > self.expr_sec)
+
diff --git a/scripts/automation/trex_control_plane/stl/trex_stl_lib/utils/constants.py b/scripts/automation/trex_control_plane/stl/trex_stl_lib/utils/constants.py
new file mode 100755
index 00000000..a4942094
--- /dev/null
+++ b/scripts/automation/trex_control_plane/stl/trex_stl_lib/utils/constants.py
@@ -0,0 +1,26 @@
+from collections import OrderedDict
+
+ON_OFF_DICT = OrderedDict([
+ ('on', True),
+ ('off', False),
+])
+
+UP_DOWN_DICT = OrderedDict([
+ ('up', True),
+ ('down', False),
+])
+
+FLOW_CTRL_DICT = OrderedDict([
+ ('none', 0), # Disable flow control
+ ('tx', 1), # Enable flowctrl on TX side (RX pause frames)
+ ('rx', 2), # Enable flowctrl on RX side (TX pause frames)
+ ('full', 3), # Enable flow control on both sides
+])
+
+
+
+# generate reverse dicts
+
+for var_name in list(vars().keys()):
+ if var_name.endswith('_DICT'):
+ exec('{0}_REVERSED = OrderedDict([(val, key) for key, val in {0}.items()])'.format(var_name))
diff --git a/scripts/automation/trex_control_plane/stl/trex_stl_lib/utils/filters.py b/scripts/automation/trex_control_plane/stl/trex_stl_lib/utils/filters.py
new file mode 100644
index 00000000..714f7807
--- /dev/null
+++ b/scripts/automation/trex_control_plane/stl/trex_stl_lib/utils/filters.py
@@ -0,0 +1,144 @@
+
+def shallow_copy(x):
+ return type(x)(x)
+
+
+class ToggleFilter(object):
+ """
+ This class provides a "sticky" filter, that works by "toggling" items of the original database on and off.
+ """
+ def __init__(self, db_ref, show_by_default=True):
+ """
+ Instantiate a ToggleFilter object
+
+ :parameters:
+ db_ref : iterable
+ an iterable object (i.e. list, set etc) that would serve as the reference db of the instance.
+ Changes in that object will affect the output of ToggleFilter instance.
+
+ show_by_default: bool
+ decide if by default all the items are "on", i.e. these items will be presented if no other
+ toggling occurred.
+
+ default value : **True**
+
+ """
+ self._data = db_ref
+ self._toggle_db = set()
+ self._filter_method = filter
+ self.__set_initial_state(show_by_default)
+
+ def reset (self):
+ """
+ Toggles off all the items
+ """
+ self._toggle_db = set()
+
+
+ def toggle_item(self, item_key):
+ """
+ Toggle a single item in/out.
+
+ :parameters:
+ item_key :
+ an item the by its value the filter can decide to toggle or not.
+ Example: int, str and so on.
+
+ :return:
+ + **True** if item toggled **into** the filtered items
+ + **False** if item toggled **out from** the filtered items
+
+ :raises:
+ + KeyError, in case if item key is not part of the toggled list and not part of the referenced db.
+
+ """
+ if item_key in self._toggle_db:
+ self._toggle_db.remove(item_key)
+ return False
+ elif item_key in self._data:
+ self._toggle_db.add(item_key)
+ return True
+ else:
+ raise KeyError("Provided item key isn't a key of the referenced data structure.")
+
+ def toggle_items(self, *args):
+ """
+ Toggle multiple items in/out with a single call. Each item will be ha.
+
+ :parameters:
+ args : iterable
+ an iterable object containing all item keys to be toggled in/out
+
+ :return:
+ + **True** if all toggled items were toggled **into** the filtered items
+ + **False** if at least one of the items was toggled **out from** the filtered items
+
+ :raises:
+ + KeyError, in case if ont of the item keys was not part of the toggled list and not part of the referenced db.
+
+ """
+ # in python 3, 'map' returns an iterator, so wrapping with 'list' call creates same effect for both python 2 and 3
+ return all(list(map(self.toggle_item, args)))
+
+ def filter_items(self):
+ """
+ Filters the pointed database by showing only the items mapped at toggle_db set.
+
+ :returns:
+ Filtered data of the original object.
+
+ """
+ return self._filter_method(self.__toggle_filter, self._data)
+
+ # private methods
+
+ def __set_initial_state(self, show_by_default):
+ try:
+ _ = (x for x in self._data)
+ if isinstance(self._data, dict):
+ self._filter_method = ToggleFilter.dict_filter
+ if show_by_default:
+ self._toggle_db = set(self._data.keys())
+ return
+ elif isinstance(self._data, list):
+ self._filter_method = ToggleFilter.list_filter
+ elif isinstance(self._data, set):
+ self._filter_method = ToggleFilter.set_filter
+ elif isinstance(self._data, tuple):
+ self._filter_method = ToggleFilter.tuple_filter
+ if show_by_default:
+ self._toggle_db = set(shallow_copy(self._data)) # assuming all relevant data with unique identifier
+ return
+ except TypeError:
+ raise TypeError("provided data object is not iterable")
+
+ def __toggle_filter(self, x):
+ return (x in self._toggle_db)
+
+ # static utility methods
+
+ @staticmethod
+ def dict_filter(function, iterable):
+ assert isinstance(iterable, dict)
+ return {k: v
+ for k,v in iterable.items()
+ if function(k)}
+
+ @staticmethod
+ def list_filter(function, iterable):
+ # in python 3, filter returns an iterator, so wrapping with list creates same effect for both python 2 and 3
+ return list(filter(function, iterable))
+
+ @staticmethod
+ def set_filter(function, iterable):
+ return {x
+ for x in iterable
+ if function(x)}
+
+ @staticmethod
+ def tuple_filter(function, iterable):
+ return tuple(filter(function, iterable))
+
+
+if __name__ == "__main__":
+ pass
diff --git a/scripts/automation/trex_control_plane/stl/trex_stl_lib/utils/parsing_opts.py b/scripts/automation/trex_control_plane/stl/trex_stl_lib/utils/parsing_opts.py
new file mode 100755
index 00000000..7eda8635
--- /dev/null
+++ b/scripts/automation/trex_control_plane/stl/trex_stl_lib/utils/parsing_opts.py
@@ -0,0 +1,596 @@
+import argparse
+from collections import namedtuple, OrderedDict
+from .common import list_intersect, list_difference
+from .text_opts import format_text
+from ..trex_stl_types import *
+from .constants import ON_OFF_DICT, UP_DOWN_DICT, FLOW_CTRL_DICT
+
+import sys
+import re
+import os
+
+ArgumentPack = namedtuple('ArgumentPack', ['name_or_flags', 'options'])
+ArgumentGroup = namedtuple('ArgumentGroup', ['type', 'args', 'options'])
+
+
+# list of available parsing options
+MULTIPLIER = 1
+MULTIPLIER_STRICT = 2
+PORT_LIST = 3
+ALL_PORTS = 4
+PORT_LIST_WITH_ALL = 5
+FILE_PATH = 6
+FILE_FROM_DB = 7
+SERVER_IP = 8
+STREAM_FROM_PATH_OR_FILE = 9
+DURATION = 10
+FORCE = 11
+DRY_RUN = 12
+XTERM = 13
+TOTAL = 14
+FULL_OUTPUT = 15
+IPG = 16
+SPEEDUP = 17
+COUNT = 18
+PROMISCUOUS = 19
+LINK_STATUS = 20
+LED_STATUS = 21
+TUNABLES = 22
+REMOTE_FILE = 23
+LOCKED = 24
+PIN_CORES = 25
+CORE_MASK = 26
+DUAL = 27
+FLOW_CTRL = 28
+SUPPORTED = 29
+
+GLOBAL_STATS = 50
+PORT_STATS = 51
+PORT_STATUS = 52
+STREAMS_STATS = 53
+STATS_MASK = 54
+CPU_STATS = 55
+MBUF_STATS = 56
+EXTENDED_STATS = 57
+EXTENDED_INC_ZERO_STATS = 58
+
+STREAMS_MASK = 60
+CORE_MASK_GROUP = 61
+
+# ALL_STREAMS = 61
+# STREAM_LIST_WITH_ALL = 62
+
+
+
+# list of ArgumentGroup types
+MUTEX = 1
+
+def check_negative(value):
+ ivalue = int(value)
+ if ivalue < 0:
+ raise argparse.ArgumentTypeError("non positive value provided: '{0}'".format(value))
+ return ivalue
+
+def match_time_unit(val):
+ '''match some val against time shortcut inputs '''
+ match = re.match("^(\d+(\.\d+)?)([m|h]?)$", val)
+ if match:
+ digit = float(match.group(1))
+ unit = match.group(3)
+ if not unit:
+ return digit
+ elif unit == 'm':
+ return digit*60
+ else:
+ return digit*60*60
+ else:
+ raise argparse.ArgumentTypeError("Duration should be passed in the following format: \n"
+ "-d 100 : in sec \n"
+ "-d 10m : in min \n"
+ "-d 1h : in hours")
+
+
+match_multiplier_help = """Multiplier should be passed in the following format:
+ [number][<empty> | bps | kbps | mbps | gbps | pps | kpps | mpps | %% ].
+
+ no suffix will provide an absoulute factor and percentage
+ will provide a percentage of the line rate. examples
+
+ '-m 10',
+ '-m 10kbps',
+ '-m 10kbpsl1',
+ '-m 10mpps',
+ '-m 23%% '
+
+ '-m 23%%' : is 23%% L1 bandwidth
+ '-m 23mbps': is 23mbps in L2 bandwidth (including FCS+4)
+ '-m 23mbpsl1': is 23mbps in L1 bandwidth
+
+ """
+
+
+# decodes multiplier
+# if allow_update - no +/- is allowed
+# divide states between how many entities the
+# value should be divided
+def decode_multiplier(val, allow_update = False, divide_count = 1):
+
+ factor_table = {None: 1, 'k': 1e3, 'm': 1e6, 'g': 1e9}
+ pattern = "^(\d+(\.\d+)?)(((k|m|g)?(bpsl1|pps|bps))|%)?"
+
+ # do we allow updates ? +/-
+ if not allow_update:
+ pattern += "$"
+ match = re.match(pattern, val)
+ op = None
+ else:
+ pattern += "([\+\-])?$"
+ match = re.match(pattern, val)
+ if match:
+ op = match.group(7)
+ else:
+ op = None
+
+ result = {}
+
+ if not match:
+ return None
+
+ # value in group 1
+ value = float(match.group(1))
+
+ # decode unit as whole
+ unit = match.group(3)
+
+ # k,m,g
+ factor = match.group(5)
+
+ # type of multiplier
+ m_type = match.group(6)
+
+ # raw type (factor)
+ if not unit:
+ result['type'] = 'raw'
+ result['value'] = value
+
+ # percentage
+ elif unit == '%':
+ result['type'] = 'percentage'
+ result['value'] = value
+
+ elif m_type == 'bps':
+ result['type'] = 'bps'
+ result['value'] = value * factor_table[factor]
+
+ elif m_type == 'pps':
+ result['type'] = 'pps'
+ result['value'] = value * factor_table[factor]
+
+ elif m_type == 'bpsl1':
+ result['type'] = 'bpsl1'
+ result['value'] = value * factor_table[factor]
+
+
+ if op == "+":
+ result['op'] = "add"
+ elif op == "-":
+ result['op'] = "sub"
+ else:
+ result['op'] = "abs"
+
+ if result['op'] != 'percentage':
+ result['value'] = result['value'] / divide_count
+
+ return result
+
+
+
+def match_multiplier(val):
+ '''match some val against multiplier shortcut inputs '''
+ result = decode_multiplier(val, allow_update = True)
+ if not result:
+ raise argparse.ArgumentTypeError(match_multiplier_help)
+
+ return val
+
+
+def match_multiplier_strict(val):
+ '''match some val against multiplier shortcut inputs '''
+ result = decode_multiplier(val, allow_update = False)
+ if not result:
+ raise argparse.ArgumentTypeError(match_multiplier_help)
+
+ return val
+
+def hex_int (val):
+ pattern = r"0x[1-9a-fA-F][0-9a-fA-F]*"
+
+ if not re.match(pattern, val):
+ raise argparse.ArgumentTypeError("{0} is not a valid positive HEX formatted number".format(val))
+
+ return int(val, 16)
+
+
+def is_valid_file(filename):
+ if not os.path.isfile(filename):
+ raise argparse.ArgumentTypeError("The file '%s' does not exist" % filename)
+
+ return filename
+
+
+
+def decode_tunables (tunable_str):
+ tunables = {}
+
+ # split by comma to tokens
+ tokens = tunable_str.split(',')
+
+ # each token is of form X=Y
+ for token in tokens:
+ m = re.search('(\S+)=(.+)', token)
+ if not m:
+ raise argparse.ArgumentTypeError("bad syntax for tunables: {0}".format(token))
+ val = m.group(2) # string
+ if val.startswith(("'", '"')) and val.endswith(("'", '"')) and len(val) > 1: # need to remove the quotes from value
+ val = val[1:-1]
+ elif val.startswith('0x'): # hex
+ val = int(val, 16)
+ else:
+ try:
+ if '.' in val: # float
+ val = float(val)
+ else: # int
+ val = int(val)
+ except:
+ pass
+ tunables[m.group(1)] = val
+
+ return tunables
+
+
+
+OPTIONS_DB = {MULTIPLIER: ArgumentPack(['-m', '--multiplier'],
+ {'help': match_multiplier_help,
+ 'dest': "mult",
+ 'default': "1",
+ 'type': match_multiplier}),
+
+ MULTIPLIER_STRICT: ArgumentPack(['-m', '--multiplier'],
+ {'help': match_multiplier_help,
+ 'dest': "mult",
+ 'default': "1",
+ 'type': match_multiplier_strict}),
+
+ TOTAL: ArgumentPack(['-t', '--total'],
+ {'help': "traffic will be divided between all ports specified",
+ 'dest': "total",
+ 'default': False,
+ 'action': "store_true"}),
+
+ IPG: ArgumentPack(['-i', '--ipg'],
+ {'help': "IPG value in usec between packets. default will be from the pcap",
+ 'dest': "ipg_usec",
+ 'default': None,
+ 'type': float}),
+
+
+ SPEEDUP: ArgumentPack(['-s', '--speedup'],
+ {'help': "Factor to accelerate the injection. effectively means IPG = IPG / SPEEDUP",
+ 'dest': "speedup",
+ 'default': 1.0,
+ 'type': float}),
+
+ COUNT: ArgumentPack(['-n', '--count'],
+ {'help': "How many times to perform action [default is 1, 0 means forever]",
+ 'dest': "count",
+ 'default': 1,
+ 'type': int}),
+
+ PROMISCUOUS: ArgumentPack(['--prom'],
+ {'help': "Set port promiscuous on/off",
+ 'choices': ON_OFF_DICT}),
+
+ LINK_STATUS: ArgumentPack(['--link'],
+ {'help': 'Set link status up/down',
+ 'choices': UP_DOWN_DICT}),
+
+ LED_STATUS: ArgumentPack(['--led'],
+ {'help': 'Set LED status on/off',
+ 'choices': ON_OFF_DICT}),
+
+ FLOW_CTRL: ArgumentPack(['--fc'],
+ {'help': 'Set Flow Control type',
+ 'dest': 'flow_ctrl',
+ 'choices': FLOW_CTRL_DICT}),
+
+ SUPPORTED: ArgumentPack(['--supp'],
+ {'help': 'Show which attributes are supported by current NICs',
+ 'default': None,
+ 'action': 'store_true'}),
+
+ TUNABLES: ArgumentPack(['-t'],
+ {'help': "Sets tunables for a profile. Example: '-t fsize=100,pg_id=7'",
+ 'metavar': 'T1=VAL[,T2=VAL ...]',
+ 'dest': "tunables",
+ 'default': None,
+ 'action': 'merge',
+ 'type': decode_tunables}),
+
+ PORT_LIST: ArgumentPack(['--port', '-p'],
+ {"nargs": '+',
+ 'dest':'ports',
+ 'metavar': 'PORTS',
+ 'action': 'merge',
+ 'type': int,
+ 'help': "A list of ports on which to apply the command",
+ 'default': []}),
+
+ ALL_PORTS: ArgumentPack(['-a'],
+ {"action": "store_true",
+ "dest": "all_ports",
+ 'help': "Set this flag to apply the command on all available ports",
+ 'default': False},),
+
+ DURATION: ArgumentPack(['-d'],
+ {'action': "store",
+ 'metavar': 'TIME',
+ 'dest': 'duration',
+ 'type': match_time_unit,
+ 'default': -1.0,
+ 'help': "Set duration time for job."}),
+
+ FORCE: ArgumentPack(['--force'],
+ {"action": "store_true",
+ 'default': False,
+ 'help': "Set if you want to stop active ports before appyling command."}),
+
+ REMOTE_FILE: ArgumentPack(['-r', '--remote'],
+ {"action": "store_true",
+ 'default': False,
+ 'help': "file path should be interpeted by the server (remote file)"}),
+
+ DUAL: ArgumentPack(['--dual'],
+ {"action": "store_true",
+ 'default': False,
+ 'help': "Transmit in a dual mode - requires ownership on the adjacent port"}),
+
+ FILE_PATH: ArgumentPack(['-f'],
+ {'metavar': 'FILE',
+ 'dest': 'file',
+ 'nargs': 1,
+ 'required': True,
+ 'type': is_valid_file,
+ 'help': "File path to load"}),
+
+ FILE_FROM_DB: ArgumentPack(['--db'],
+ {'metavar': 'LOADED_STREAM_PACK',
+ 'help': "A stream pack which already loaded into console cache."}),
+
+ SERVER_IP: ArgumentPack(['--server'],
+ {'metavar': 'SERVER',
+ 'help': "server IP"}),
+
+ DRY_RUN: ArgumentPack(['-n', '--dry'],
+ {'action': 'store_true',
+ 'dest': 'dry',
+ 'default': False,
+ 'help': "Dry run - no traffic will be injected"}),
+
+ XTERM: ArgumentPack(['-x', '--xterm'],
+ {'action': 'store_true',
+ 'dest': 'xterm',
+ 'default': False,
+ 'help': "Starts TUI in xterm window"}),
+
+ LOCKED: ArgumentPack(['-l', '--locked'],
+ {'action': 'store_true',
+ 'dest': 'locked',
+ 'default': False,
+ 'help': "Locks TUI on legend mode"}),
+
+ FULL_OUTPUT: ArgumentPack(['--full'],
+ {'action': 'store_true',
+ 'help': "Prompt full info in a JSON format"}),
+
+ GLOBAL_STATS: ArgumentPack(['-g'],
+ {'action': 'store_true',
+ 'help': "Fetch only global statistics"}),
+
+ PORT_STATS: ArgumentPack(['-p'],
+ {'action': 'store_true',
+ 'help': "Fetch only port statistics"}),
+
+ PORT_STATUS: ArgumentPack(['--ps'],
+ {'action': 'store_true',
+ 'help': "Fetch only port status data"}),
+
+ STREAMS_STATS: ArgumentPack(['-s'],
+ {'action': 'store_true',
+ 'help': "Fetch only streams stats"}),
+
+ CPU_STATS: ArgumentPack(['-c'],
+ {'action': 'store_true',
+ 'help': "Fetch only CPU utilization stats"}),
+
+ MBUF_STATS: ArgumentPack(['-m'],
+ {'action': 'store_true',
+ 'help': "Fetch only MBUF utilization stats"}),
+
+ EXTENDED_STATS: ArgumentPack(['-x'],
+ {'action': 'store_true',
+ 'help': "Fetch xstats of port, excluding lines with zero values"}),
+
+ EXTENDED_INC_ZERO_STATS: ArgumentPack(['--xz'],
+ {'action': 'store_true',
+ 'help': "Fetch xstats of port, including lines with zero values"}),
+
+ STREAMS_MASK: ArgumentPack(['--streams'],
+ {"nargs": '+',
+ 'dest':'streams',
+ 'metavar': 'STREAMS',
+ 'type': int,
+ 'help': "A list of stream IDs to query about. Default: analyze all streams",
+ 'default': []}),
+
+
+ PIN_CORES: ArgumentPack(['--pin'],
+ {'action': 'store_true',
+ 'dest': 'pin_cores',
+ 'default': False,
+ 'help': "Pin cores to interfaces - cores will be divided between interfaces (performance boot for symetric profiles)"}),
+
+ CORE_MASK: ArgumentPack(['--core_mask'],
+ {'action': 'store',
+ 'nargs': '+',
+ 'type': hex_int,
+ 'dest': 'core_mask',
+ 'default': None,
+ 'help': "Core mask - only cores responding to the bit mask will be active"}),
+
+ # advanced options
+ PORT_LIST_WITH_ALL: ArgumentGroup(MUTEX, [PORT_LIST,
+ ALL_PORTS],
+ {'required': False}),
+
+ STREAM_FROM_PATH_OR_FILE: ArgumentGroup(MUTEX, [FILE_PATH,
+ FILE_FROM_DB],
+ {'required': True}),
+ STATS_MASK: ArgumentGroup(MUTEX, [GLOBAL_STATS,
+ PORT_STATS,
+ PORT_STATUS,
+ STREAMS_STATS,
+ CPU_STATS,
+ MBUF_STATS,
+ EXTENDED_STATS,
+ EXTENDED_INC_ZERO_STATS,],
+ {}),
+
+
+ CORE_MASK_GROUP: ArgumentGroup(MUTEX, [PIN_CORES,
+ CORE_MASK],
+ {'required': False}),
+
+ }
+
+class _MergeAction(argparse._AppendAction):
+ def __call__(self, parser, namespace, values, option_string=None):
+ items = getattr(namespace, self.dest)
+ if not items:
+ items = values
+ elif type(items) is list and type(values) is list:
+ items.extend(values)
+ elif type(items) is dict and type(values) is dict: # tunables are dict
+ items.update(values)
+ else:
+ raise Exception("Argparser 'merge' option should be used on dict or list.")
+
+ setattr(namespace, self.dest, items)
+
+class CCmdArgParser(argparse.ArgumentParser):
+
+ def __init__(self, stateless_client, *args, **kwargs):
+ super(CCmdArgParser, self).__init__(*args, **kwargs)
+ self.stateless_client = stateless_client
+ self.cmd_name = kwargs.get('prog')
+ self.register('action', 'merge', _MergeAction)
+
+ # hook this to the logger
+ def _print_message(self, message, file=None):
+ self.stateless_client.logger.log(message)
+
+ def error(self, message):
+ self.print_usage()
+ self._print_message(('%s: error: %s\n') % (self.prog, message))
+ raise ValueError(message)
+
+ def has_ports_cfg (self, opts):
+ return hasattr(opts, "all_ports") or hasattr(opts, "ports")
+
+ def parse_args(self, args=None, namespace=None, default_ports=None, verify_acquired=False):
+ try:
+ opts = super(CCmdArgParser, self).parse_args(args, namespace)
+ if opts is None:
+ return RC_ERR("'{0}' - invalid arguments".format(self.cmd_name))
+
+ if not self.has_ports_cfg(opts):
+ return opts
+
+ # if all ports are marked or
+ if (getattr(opts, "all_ports", None) == True) or (getattr(opts, "ports", None) == []):
+ if default_ports is None:
+ opts.ports = self.stateless_client.get_acquired_ports()
+ else:
+ opts.ports = default_ports
+
+ # so maybe we have ports configured
+ invalid_ports = list_difference(opts.ports, self.stateless_client.get_all_ports())
+ if invalid_ports:
+ msg = "{0}: port(s) {1} are not valid port IDs".format(self.cmd_name, invalid_ports)
+ self.stateless_client.logger.log(format_text(msg, 'bold'))
+ return RC_ERR(msg)
+
+ # verify acquired ports
+ if verify_acquired:
+ acquired_ports = self.stateless_client.get_acquired_ports()
+
+ diff = list_difference(opts.ports, acquired_ports)
+ if diff:
+ msg = "{0} - port(s) {1} are not acquired".format(self.cmd_name, diff)
+ self.stateless_client.logger.log(format_text(msg, 'bold'))
+ return RC_ERR(msg)
+
+ # no acquire ports at all
+ if not acquired_ports:
+ msg = "{0} - no acquired ports".format(self.cmd_name)
+ self.stateless_client.logger.log(format_text(msg, 'bold'))
+ return RC_ERR(msg)
+
+
+ return opts
+
+ except ValueError as e:
+ return RC_ERR("'{0}' - {1}".format(self.cmd_name, str(e)))
+
+ except SystemExit:
+ # recover from system exit scenarios, such as "help", or bad arguments.
+ return RC_ERR("'{0}' - {1}".format(self.cmd_name, "no action"))
+
+
+def get_flags (opt):
+ return OPTIONS_DB[opt].name_or_flags
+
+def gen_parser(stateless_client, op_name, description, *args):
+ parser = CCmdArgParser(stateless_client, prog=op_name, conflict_handler='resolve',
+ description=description)
+ for param in args:
+ try:
+
+ if isinstance(param, int):
+ argument = OPTIONS_DB[param]
+ else:
+ argument = param
+
+ if isinstance(argument, ArgumentGroup):
+ if argument.type == MUTEX:
+ # handle as mutually exclusive group
+ group = parser.add_mutually_exclusive_group(**argument.options)
+ for sub_argument in argument.args:
+ group.add_argument(*OPTIONS_DB[sub_argument].name_or_flags,
+ **OPTIONS_DB[sub_argument].options)
+ else:
+ # ignore invalid objects
+ continue
+ elif isinstance(argument, ArgumentPack):
+ parser.add_argument(*argument.name_or_flags,
+ **argument.options)
+ else:
+ # ignore invalid objects
+ continue
+ except KeyError as e:
+ cause = e.args[0]
+ raise KeyError("The attribute '{0}' is missing as a field of the {1} option.\n".format(cause, param))
+ return parser
+
+
+if __name__ == "__main__":
+ pass
diff --git a/scripts/automation/trex_control_plane/stl/trex_stl_lib/utils/pcap.py b/scripts/automation/trex_control_plane/stl/trex_stl_lib/utils/pcap.py
new file mode 100644
index 00000000..ab4f98a7
--- /dev/null
+++ b/scripts/automation/trex_control_plane/stl/trex_stl_lib/utils/pcap.py
@@ -0,0 +1,29 @@
+import os
+from ..trex_stl_packet_builder_scapy import RawPcapReader, RawPcapWriter
+
+
+def __ts_key (a):
+ return float(a[1][0]) + (float(a[1][1]) / 1e6)
+
+def merge_cap_files (pcap_file_list, out_filename, delete_src = False):
+
+ if not all([os.path.exists(f) for f in pcap_file_list]):
+ print("failed to merge cap file list...\nnot all files exist\n")
+ return
+
+ out_pkts = []
+ for src in pcap_file_list:
+ pkts = RawPcapReader(src)
+ out_pkts += pkts
+ if delete_src:
+ os.unlink(src)
+
+ # sort by timestamp
+ out_pkts = sorted(out_pkts, key = __ts_key)
+
+ writer = RawPcapWriter(out_filename, linktype = 1)
+
+ writer._write_header(None)
+ for pkt in out_pkts:
+ writer._write_packet(pkt[0], sec=pkt[1][0], usec=pkt[1][1], caplen=pkt[1][2], wirelen=None)
+
diff --git a/scripts/automation/trex_control_plane/stl/trex_stl_lib/utils/text_opts.py b/scripts/automation/trex_control_plane/stl/trex_stl_lib/utils/text_opts.py
new file mode 100644
index 00000000..bfb96950
--- /dev/null
+++ b/scripts/automation/trex_control_plane/stl/trex_stl_lib/utils/text_opts.py
@@ -0,0 +1,195 @@
+import json
+import re
+
+TEXT_CODES = {'bold': {'start': '\x1b[1m',
+ 'end': '\x1b[22m'},
+ 'cyan': {'start': '\x1b[36m',
+ 'end': '\x1b[39m'},
+ 'blue': {'start': '\x1b[34m',
+ 'end': '\x1b[39m'},
+ 'red': {'start': '\x1b[31m',
+ 'end': '\x1b[39m'},
+ 'magenta': {'start': '\x1b[35m',
+ 'end': '\x1b[39m'},
+ 'green': {'start': '\x1b[32m',
+ 'end': '\x1b[39m'},
+ 'yellow': {'start': '\x1b[33m',
+ 'end': '\x1b[39m'},
+ 'underline': {'start': '\x1b[4m',
+ 'end': '\x1b[24m'}}
+
+class TextCodesStripper:
+ keys = [re.escape(v['start']) for k,v in TEXT_CODES.items()]
+ keys += [re.escape(v['end']) for k,v in TEXT_CODES.items()]
+ pattern = re.compile("|".join(keys))
+
+ @staticmethod
+ def strip (s):
+ return re.sub(TextCodesStripper.pattern, '', s)
+
+def format_num (size, suffix = "", compact = True, opts = None):
+ if opts is None:
+ opts = ()
+
+ txt = "NaN"
+
+ if type(size) == str:
+ return "N/A"
+
+ u = ''
+
+ if compact:
+ for unit in ['','K','M','G','T','P']:
+ if abs(size) < 1000.0:
+ u = unit
+ break
+ size /= 1000.0
+
+ if isinstance(size, float):
+ txt = "%3.2f" % (size)
+ else:
+ txt = "{:,}".format(size)
+
+ if u or suffix:
+ txt += " {:}{:}".format(u, suffix)
+
+ if isinstance(opts, tuple):
+ return format_text(txt, *opts)
+ else:
+ return format_text(txt, (opts))
+
+
+
+def format_time (t_sec):
+ if t_sec < 0:
+ return "infinite"
+
+ if t_sec == 0:
+ return "zero"
+
+ if t_sec < 1:
+ # low numbers
+ for unit in ['ms', 'usec', 'ns']:
+ t_sec *= 1000.0
+ if t_sec >= 1.0:
+ return '{:,.2f} [{:}]'.format(t_sec, unit)
+
+ return "NaN"
+
+ else:
+ # seconds
+ if t_sec < 60.0:
+ return '{:,.2f} [{:}]'.format(t_sec, 'sec')
+
+ # minutes
+ t_sec /= 60.0
+ if t_sec < 60.0:
+ return '{:,.2f} [{:}]'.format(t_sec, 'minutes')
+
+ # hours
+ t_sec /= 60.0
+ if t_sec < 24.0:
+ return '{:,.2f} [{:}]'.format(t_sec, 'hours')
+
+ # days
+ t_sec /= 24.0
+ return '{:,.2f} [{:}]'.format(t_sec, 'days')
+
+
+def format_percentage (size):
+ return "%0.2f %%" % (size)
+
+def bold(text):
+ return text_attribute(text, 'bold')
+
+
+def cyan(text):
+ return text_attribute(text, 'cyan')
+
+
+def blue(text):
+ return text_attribute(text, 'blue')
+
+
+def red(text):
+ return text_attribute(text, 'red')
+
+
+def magenta(text):
+ return text_attribute(text, 'magenta')
+
+
+def green(text):
+ return text_attribute(text, 'green')
+
+def yellow(text):
+ return text_attribute(text, 'yellow')
+
+def underline(text):
+ return text_attribute(text, 'underline')
+
+
+def text_attribute(text, attribute):
+ return "{start}{txt}{stop}".format(start=TEXT_CODES[attribute]['start'],
+ txt=text,
+ stop=TEXT_CODES[attribute]['end'])
+
+
+FUNC_DICT = {'blue': blue,
+ 'bold': bold,
+ 'green': green,
+ 'yellow': yellow,
+ 'cyan': cyan,
+ 'magenta': magenta,
+ 'underline': underline,
+ 'red': red}
+
+
+def format_text(text, *args):
+ return_string = text
+ for i in args:
+ func = FUNC_DICT.get(i)
+ if func:
+ return_string = func(return_string)
+
+ return return_string
+
+
+def format_threshold (value, red_zone, green_zone):
+ try:
+ if value >= red_zone[0] and value <= red_zone[1]:
+ return format_text("{0}".format(value), 'red')
+
+ if value >= green_zone[0] and value <= green_zone[1]:
+ return format_text("{0}".format(value), 'green')
+ except TypeError:
+ # if value is not comparable or not a number - skip this
+ pass
+
+ return "{0}".format(value)
+
+# pretty print for JSON
+def pretty_json (json_str, use_colors = True):
+ pretty_str = json.dumps(json.loads(json_str), indent = 4, separators=(',', ': '), sort_keys = True)
+
+ if not use_colors:
+ return pretty_str
+
+ try:
+ # int numbers
+ pretty_str = re.sub(r'([ ]*:[ ]+)(\-?[1-9][0-9]*[^.])',r'\1{0}'.format(blue(r'\2')), pretty_str)
+ # float
+ pretty_str = re.sub(r'([ ]*:[ ]+)(\-?[1-9][0-9]*\.[0-9]+)',r'\1{0}'.format(magenta(r'\2')), pretty_str)
+ # # strings
+ #
+ pretty_str = re.sub(r'([ ]*:[ ]+)("[^"]*")',r'\1{0}'.format(red(r'\2')), pretty_str)
+ pretty_str = re.sub(r"('[^']*')", r'{0}\1{1}'.format(TEXT_CODES['magenta']['start'],
+ TEXT_CODES['red']['start']), pretty_str)
+ except :
+ pass
+
+ return pretty_str
+
+
+if __name__ == "__main__":
+ pass
diff --git a/scripts/automation/trex_control_plane/stl/trex_stl_lib/utils/text_tables.py b/scripts/automation/trex_control_plane/stl/trex_stl_lib/utils/text_tables.py
new file mode 100644
index 00000000..393ba111
--- /dev/null
+++ b/scripts/automation/trex_control_plane/stl/trex_stl_lib/utils/text_tables.py
@@ -0,0 +1,35 @@
+from __future__ import print_function
+
+import sys
+from texttable import Texttable
+from .text_opts import format_text
+
+
+class TRexTextTable(Texttable):
+
+ def __init__(self):
+ Texttable.__init__(self)
+ # set class attributes so that it'll be more like TRex standard output
+ self.set_chars(['-', '|', '-', '-'])
+ self.set_deco(Texttable.HEADER | Texttable.VLINES)
+
+class TRexTextInfo(Texttable):
+
+ def __init__(self):
+ Texttable.__init__(self)
+ # set class attributes so that it'll be more like TRex standard output
+ self.set_chars(['-', ':', '-', '-'])
+ self.set_deco(Texttable.VLINES)
+
+def generate_trex_stats_table():
+ pass
+
+def print_table_with_header(texttable_obj, header="", untouched_header="", buffer=sys.stdout):
+ header = header.replace("_", " ").title() + untouched_header
+ print(format_text(header, 'cyan', 'underline') + "\n", file=buffer)
+
+ print((texttable_obj.draw() + "\n"), file=buffer)
+
+if __name__ == "__main__":
+ pass
+
diff --git a/scripts/automation/trex_control_plane/stl/trex_stl_lib/utils/zipmsg.py b/scripts/automation/trex_control_plane/stl/trex_stl_lib/utils/zipmsg.py
new file mode 100644
index 00000000..397ada16
--- /dev/null
+++ b/scripts/automation/trex_control_plane/stl/trex_stl_lib/utils/zipmsg.py
@@ -0,0 +1,32 @@
+import zlib
+import struct
+
+class ZippedMsg:
+
+ MSG_COMPRESS_THRESHOLD = 256
+ MSG_COMPRESS_HEADER_MAGIC = 0xABE85CEA
+
+ def check_threshold (self, msg):
+ return len(msg) >= self.MSG_COMPRESS_THRESHOLD
+
+ def compress (self, msg):
+ # compress
+ compressed = zlib.compress(msg)
+ new_msg = struct.pack(">II", self.MSG_COMPRESS_HEADER_MAGIC, len(msg)) + compressed
+ return new_msg
+
+
+ def decompress (self, msg):
+ if len(msg) < 8:
+ return None
+
+ t = struct.unpack(">II", msg[:8])
+ if (t[0] != self.MSG_COMPRESS_HEADER_MAGIC):
+ return None
+
+ x = zlib.decompress(msg[8:])
+ if len(x) != t[1]:
+ return None
+
+ return x
+
diff --git a/scripts/automation/trex_control_plane/unit_tests/__init__.py b/scripts/automation/trex_control_plane/unit_tests/__init__.py
new file mode 100755
index 00000000..d3f5a12f
--- /dev/null
+++ b/scripts/automation/trex_control_plane/unit_tests/__init__.py
@@ -0,0 +1 @@
+
diff --git a/scripts/automation/trex_control_plane/unit_tests/client_launching_test.py b/scripts/automation/trex_control_plane/unit_tests/client_launching_test.py
new file mode 100755
index 00000000..42d79af5
--- /dev/null
+++ b/scripts/automation/trex_control_plane/unit_tests/client_launching_test.py
@@ -0,0 +1,31 @@
+#!/router/bin/python
+from control_plane_general_test import CControlPlaneGeneral_Test
+from Client.trex_client import CTRexClient
+
+import socket
+from nose.tools import assert_raises
+
+
+class CClientLaunching_Test(CControlPlaneGeneral_Test):
+ def __init__(self):
+ super(CClientLaunching_Test, self).__init__()
+ pass
+
+ def setUp(self):
+ pass
+
+ def test_wrong_hostname(self):
+ # self.tmp_server = CTRexClient('some-invalid-hostname')
+ assert_raises (socket.gaierror, CTRexClient, 'some-invalid-hostname' )
+
+ # perform this test only if server is down, but server machine is up
+ def test_refused_connection(self):
+ assert_raises (socket.error, CTRexClient, 'trex-dan') # Assuming 'trex-dan' server is down! otherwise test fails
+
+
+ def test_verbose_mode(self):
+ tmp_client = CTRexClient(self.trex_server_name, verbose = True)
+ pass
+
+ def tearDown(self):
+ pass
diff --git a/scripts/automation/trex_control_plane/unit_tests/control_plane_general_test.py b/scripts/automation/trex_control_plane/unit_tests/control_plane_general_test.py
new file mode 100755
index 00000000..32ad5243
--- /dev/null
+++ b/scripts/automation/trex_control_plane/unit_tests/control_plane_general_test.py
@@ -0,0 +1,72 @@
+#!/router/bin/python
+
+__copyright__ = "Copyright 2015"
+
+"""
+Name:
+ control_plane_general_test.py
+
+
+Description:
+
+ This script creates the functionality to test the performance of the TRex traffic generator control plane.
+ The scenarios assumes a WORKING server is listening and processing the requests.
+
+::
+
+ Topology:
+
+ -------- --------
+ | | | |
+ | Client | <-----JSON-RPC------> | Server |
+ | | | |
+ -------- --------
+
+"""
+from nose.plugins import Plugin
+# import misc_methods
+import sys
+import os
+# from CPlatformUnderTest import *
+# from CPlatform import *
+import termstyle
+import threading
+from common.trex_exceptions import *
+from Client.trex_client import CTRexClient
+# import Client.outer_packages
+# import Client.trex_client
+
+TREX_SERVER = None
+
+class CTRexCP():
+ trex_server = None
+
+def setUpModule(module):
+ pass
+
+def tearDownModule(module):
+ pass
+
+
+class CControlPlaneGeneral_Test(object):#(unittest.TestCase):
+ """This class defines the general testcase of the control plane service"""
+ def __init__ (self):
+ self.trex_server_name = 'csi-kiwi-02'
+ self.trex = CTRexClient(self.trex_server_name)
+ pass
+
+ def setUp(self):
+ # initialize server connection for single client
+ # self.server = CTRexClient(self.trex_server)
+ pass
+
+ ########################################################################
+ #### DO NOT ADD TESTS TO THIS FILE ####
+ #### Added tests here will held once for EVERY test sub-class ####
+ ########################################################################
+
+ def tearDown(self):
+ pass
+
+ def check_for_trex_crash(self):
+ pass
diff --git a/scripts/automation/trex_control_plane/unit_tests/control_plane_unit_test.py b/scripts/automation/trex_control_plane/unit_tests/control_plane_unit_test.py
new file mode 100755
index 00000000..1120256c
--- /dev/null
+++ b/scripts/automation/trex_control_plane/unit_tests/control_plane_unit_test.py
@@ -0,0 +1,73 @@
+#!/router/bin/python
+
+__copyright__ = "Copyright 2014"
+
+
+
+import os
+import sys
+import nose_outer_packages
+import nose
+from nose.plugins import Plugin
+from rednose import RedNose
+import termstyle
+import control_plane_general_test
+
+class TRexCPConfiguringPlugin(Plugin):
+ def options(self, parser, env = os.environ):
+ super(TRexCPConfiguringPlugin, self).options(parser, env)
+ parser.add_option('-t', '--trex-server', action='store',
+ dest='trex_server', default='trex-dan',
+ help='Specify TRex server hostname. This server will be used to test control-plane functionality.')
+
+ def configure(self, options, conf):
+ if options.trex_server:
+ self.trex_server = options.trex_server
+
+ def begin (self):
+ # initialize CTRexCP global testing class, to be used by and accessible all tests
+ print "assigned trex_server name"
+ control_plane_general_test.CTRexCP.trex_server = self.trex_server
+
+ def finalize(self, result):
+ pass
+
+
+
+if __name__ == "__main__":
+
+ # setting defaults. By default we run all the test suite
+ specific_tests = False
+ disableLogCapture = False
+ long_test = False
+ report_dir = "reports"
+
+ nose_argv= sys.argv + ['-s', '-v', '--exe', '--rednose', '--detailed-errors']
+
+ try:
+ result = nose.run(argv = nose_argv, addplugins = [RedNose(), TRexCPConfiguringPlugin()])
+ if (result == True):
+ print termstyle.green("""
+ ..::''''::..
+ .;'' ``;.
+ :: :: :: ::
+ :: :: :: ::
+ :: :: :: ::
+ :: .:' :: :: `:. ::
+ :: : : ::
+ :: `:. .:' ::
+ `;..``::::''..;'
+ ``::,,,,::''
+
+ ___ ___ __________
+ / _ \/ _ | / __/ __/ /
+ / ___/ __ |_\ \_\ \/_/
+ /_/ /_/ |_/___/___(_)
+
+ """)
+ sys.exit(0)
+ else:
+ sys.exit(-1)
+
+ finally:
+ pass \ No newline at end of file
diff --git a/scripts/automation/trex_control_plane/unit_tests/functional_test.py b/scripts/automation/trex_control_plane/unit_tests/functional_test.py
new file mode 100755
index 00000000..30836985
--- /dev/null
+++ b/scripts/automation/trex_control_plane/unit_tests/functional_test.py
@@ -0,0 +1,160 @@
+#!/router/bin/python
+from control_plane_general_test import CControlPlaneGeneral_Test
+from Client.trex_client import CTRexClient
+
+import socket
+from nose.tools import assert_raises, assert_equal, assert_not_equal
+from common.trex_status_e import TRexStatus
+from common.trex_exceptions import *
+from enum import Enum
+import time
+
+
+class CTRexStartStop_Test(CControlPlaneGeneral_Test):
+ def __init__(self):
+ super(CTRexStartStop_Test, self).__init__()
+ self.valid_start_params = dict( c = 4,
+ m = 1.1,
+ d = 100,
+ f = 'avl/sfr_delay_10_1g.yaml',
+ nc = True,
+ p = True,
+ l = 1000)
+
+ def setUp(self):
+ pass
+
+ def test_mandatory_param_error(self):
+ start_params = dict( c = 4,
+ m = 1.1,
+ d = 70,
+ # f = 'avl/sfr_delay_10_1g.yaml', <-- f (mandatory) is not provided on purpose
+ nc = True,
+ p = True,
+ l = 1000)
+
+ assert_raises(TypeError, self.trex.start_trex, **start_params)
+
+ def test_parameter_name_error(self):
+ ret = self.trex.start_trex( c = 4,
+ wrong_key = 1.1, # <----- This key does not exists in TRex API
+ d = 70,
+ f = 'avl/sfr_delay_10_1g.yaml',
+ nc = True,
+ p = True,
+ l = 1000)
+
+ time.sleep(5)
+
+ # check for failure status
+ run_status = self.trex.get_running_status()
+ assert isinstance(run_status, dict)
+ assert_equal (run_status['state'], TRexStatus.Idle )
+ assert_equal (run_status['verbose'], "TRex run failed due to wrong input parameters, or due to reachability issues.")
+ assert_raises(TRexError, self.trex.get_running_info)
+
+ def test_too_early_sample(self):
+ ret = self.trex.start_trex(**self.valid_start_params)
+
+ assert ret==True
+ # issue get_running_info() too soon, without any(!) sleep
+ run_status = self.trex.get_running_status()
+ assert isinstance(run_status, dict)
+ assert_equal (run_status['state'], TRexStatus.Starting )
+ assert_raises(TRexWarning, self.trex.get_running_info)
+
+ ret = self.trex.stop_trex()
+ assert ret==True # make sure stop succeeded
+ assert self.trex.is_running() == False
+
+ def test_start_sampling_on_time(self):
+ ret = self.trex.start_trex(**self.valid_start_params)
+ assert ret==True
+ time.sleep(6)
+
+ run_status = self.trex.get_running_status()
+ assert isinstance(run_status, dict)
+ assert_equal (run_status['state'], TRexStatus.Running )
+
+ run_info = self.trex.get_running_info()
+ assert isinstance(run_info, dict)
+ ret = self.trex.stop_trex()
+ assert ret==True # make sure stop succeeded
+ assert self.trex.is_running() == False
+
+ def test_start_more_than_once_same_user(self):
+ assert self.trex.is_running() == False # first, make sure TRex is not running
+ ret = self.trex.start_trex(**self.valid_start_params) # start 1st TRex run
+ assert ret == True # make sure 1st run submitted successfuly
+ # time.sleep(1)
+ assert_raises(TRexInUseError, self.trex.start_trex, **self.valid_start_params) # try to start TRex again
+
+ ret = self.trex.stop_trex()
+ assert ret==True # make sure stop succeeded
+ assert self.trex.is_running() == False
+
+ def test_start_more_than_once_different_users(self):
+ assert self.trex.is_running() == False # first, make sure TRex is not running
+ ret = self.trex.start_trex(**self.valid_start_params) # start 1st TRex run
+ assert ret == True # make sure 1st run submitted successfuly
+ # time.sleep(1)
+
+ tmp_trex = CTRexClient(self.trex_server_name) # initialize another client connecting same server
+ assert_raises(TRexInUseError, tmp_trex.start_trex, **self.valid_start_params) # try to start TRex again
+
+ ret = self.trex.stop_trex()
+ assert ret==True # make sure stop succeeded
+ assert self.trex.is_running() == False
+
+ def test_simultaneous_sampling(self):
+ assert self.trex.is_running() == False # first, make sure TRex is not running
+ tmp_trex = CTRexClient(self.trex_server_name) # initialize another client connecting same server
+ ret = self.trex.start_trex(**self.valid_start_params) # start TRex run
+ assert ret == True # make sure 1st run submitted successfuly
+
+ time.sleep(6)
+ # now, sample server from both clients
+ while (self.trex.is_running()):
+ info_1 = self.trex.get_running_info()
+ info_2 = tmp_trex.get_running_info()
+
+ # make sure samples are consistent
+ if self.trex.get_result_obj().is_valid_hist():
+ assert tmp_trex.get_result_obj().is_valid_hist() == True
+ if self.trex.get_result_obj().is_done_warmup():
+ assert tmp_trex.get_result_obj().is_done_warmup() == True
+ # except TRexError as inst: # TRex might have stopped between is_running result and get_running_info() call
+ # # hence, ingore that case
+ # break
+
+ assert self.trex.is_running() == False
+
+ def test_fast_toggling(self):
+ assert self.trex.is_running() == False
+ for i in range(20):
+ ret = self.trex.start_trex(**self.valid_start_params) # start TRex run
+ assert ret == True
+ assert self.trex.is_running() == False # we expect the status to be 'Starting'
+ ret = self.trex.stop_trex()
+ assert ret == True
+ assert self.trex.is_running() == False
+ pass
+
+
+ def tearDown(self):
+ pass
+
+class CBasicQuery_Test(CControlPlaneGeneral_Test):
+ def __init__(self):
+ super(CBasicQuery_Test, self).__init__()
+ pass
+
+ def setUp(self):
+ pass
+
+ def test_is_running(self):
+ assert self.trex.is_running() == False
+
+
+ def tearDown(self):
+ pass
diff --git a/scripts/automation/trex_control_plane/unit_tests/nose_outer_packages.py b/scripts/automation/trex_control_plane/unit_tests/nose_outer_packages.py
new file mode 100755
index 00000000..b5b78db7
--- /dev/null
+++ b/scripts/automation/trex_control_plane/unit_tests/nose_outer_packages.py
@@ -0,0 +1,27 @@
+#!/router/bin/python
+
+import sys,site
+import platform,os
+
+CURRENT_PATH = os.path.dirname(os.path.realpath(__file__))
+ROOT_PATH = os.path.abspath(os.path.join(CURRENT_PATH, os.pardir)) # path to trex_control_plane directory
+PATH_TO_PYTHON_LIB = os.path.abspath(os.path.join(ROOT_PATH, 'python_lib'))
+
+
+TEST_MODULES = ['nose-1.3.4',
+ 'rednose-0.4.1',
+ 'termstyle'
+ ]
+
+def import_test_modules ():
+ sys.path.append(ROOT_PATH)
+ import_module_list(TEST_MODULES)
+
+def import_module_list (modules_list):
+ assert(isinstance(modules_list, list))
+ for p in modules_list:
+ full_path = os.path.join(PATH_TO_PYTHON_LIB, p)
+ fix_path = os.path.normcase(full_path) #CURRENT_PATH+p)
+ site.addsitedir(full_path)
+
+import_test_modules()
diff --git a/scripts/automation/trex_control_plane/unit_tests/sock.py b/scripts/automation/trex_control_plane/unit_tests/sock.py
new file mode 100755
index 00000000..29248e3e
--- /dev/null
+++ b/scripts/automation/trex_control_plane/unit_tests/sock.py
@@ -0,0 +1,552 @@
+import os
+import dpkt
+import struct
+import socket
+import sys
+import argparse;
+
+
+H_SCRIPT_VER = "0.1"
+
+class sock_driver(object):
+ args=None;
+
+
+def nl (buf):
+ return ( struct.unpack('>I', buf)[0]);
+
+def dump_tuple (t):
+ for obj in t:
+ print hex(obj),",",
+
+class CFlowRec:
+ def __init__ (self):
+ self.is_init_dir=False;
+ self.bytes=0;
+ self.data=None;
+
+ def __str__ (self):
+ if self.is_init_dir :
+ s=" client "
+ else:
+ s=" server "
+ s+= " %d " %(self.bytes)
+ return (s);
+
+
+
+class CPcapFileReader:
+ def __init__ (self,file_name):
+ self.file_name=file_name;
+ self.tuple=None;
+ self.swap=False;
+ self.info=[];
+
+ def dump_info (self):
+ for obj in self.info:
+ print obj
+ #print "'",obj.data,"'"
+
+ def is_client_side (self,swap):
+ if self.swap ==swap:
+ return (True);
+ else:
+ return (False);
+
+ def add_server(self,server,data):
+ r=CFlowRec();
+ r.is_init_dir =False;
+ r.bytes = server
+ r.data=data
+ self.info.append(r);
+
+ def add_client(self,client,data):
+ r=CFlowRec();
+ r.is_init_dir =True;
+ r.bytes = client
+ r.data=data
+ self.info.append(r);
+
+ def check_tcp_flow (self):
+ f = open(self.file_name)
+ pcap = dpkt.pcap.Reader(f)
+ for ts, buf in pcap:
+ eth = dpkt.ethernet.Ethernet(buf)
+ ip = eth.data
+ tcp = ip.data
+ if ip.p != 6 :
+ raise Exception("not a TCP flow ..");
+ if tcp.flags != dpkt.tcp.TH_SYN :
+ raise Exception("first packet should be with SYN");
+ break;
+ f.close();
+
+ def check_one_flow (self):
+ cnt=1
+ client=0;
+ server=0;
+ client_data=''
+ server_data=''
+ is_c=False # the direction
+ is_s=False
+ f = open(self.file_name)
+ pcap = dpkt.pcap.Reader(f)
+ for ts, buf in pcap:
+ eth = dpkt.ethernet.Ethernet(buf)
+ ip = eth.data
+ tcp = ip.data
+ pld = tcp.data;
+
+ pkt_swap=False
+ if nl(ip.src) > nl(ip.dst):
+ pkt_swap=True
+ tuple= (nl(ip.dst),nl(ip.src), tcp.dport ,tcp.sport,ip.p );
+ else:
+ tuple= (nl(ip.src),nl(ip.dst) ,tcp.sport,tcp.dport,ip.p );
+
+ if self.tuple == None:
+ self.swap=pkt_swap
+ self.tuple=tuple
+ else:
+ if self.tuple != tuple:
+ raise Exception("More than one flow - can't process this flow");
+
+
+ print " %5d," % (cnt),
+ if self.is_client_side (pkt_swap):
+ print "client",
+ if len(pld) >0 :
+ if is_c==False:
+ is_c=True
+ if is_s:
+ self.add_server(server,server_data);
+ server=0;
+ server_data=''
+ is_s=False;
+
+ client+=len(pld);
+ client_data=client_data+pld;
+ else:
+ if len(pld) >0 :
+ if is_s==False:
+ is_s=True
+ if is_c:
+ self.add_client(client,client_data);
+ client=0;
+ client_data=''
+ is_c=False;
+
+ server+=len(pld)
+ server_data=server_data+pld;
+
+ print "server",
+ print " %5d" % (len(pld)),
+ dump_tuple (tuple)
+ print
+
+ cnt=cnt+1
+
+ if is_c:
+ self.add_client(client,client_data);
+ if is_s:
+ self.add_server(server,server_data);
+
+ f.close();
+
+
+class CClientServerCommon(object):
+
+ def __init__ (self):
+ pass;
+
+ def send_info (self,data):
+ print "server send %d bytes" % (len(data))
+ self.connection.sendall(data)
+
+ def rcv_info (self,msg_size):
+ print "server wait for %d bytes" % (msg_size)
+
+ bytes_recd = 0
+ while bytes_recd < msg_size:
+ chunk = self.connection.recv(min(msg_size - bytes_recd, 2048))
+ if chunk == '':
+ raise RuntimeError("socket connection broken")
+ bytes_recd = bytes_recd + len(chunk)
+
+
+ def process (self,is_server):
+ pcapinfo=self.pcapr.info
+ for obj in pcapinfo:
+ if is_server:
+ if obj.is_init_dir:
+ self.rcv_info (obj.bytes);
+ else:
+ self.send_info (obj.data);
+ else:
+ if obj.is_init_dir:
+ self.send_info (obj.data);
+ else:
+ self.rcv_info (obj.bytes);
+
+ self.connection.close();
+ self.connection = None
+
+
+class CServer(CClientServerCommon) :
+ def __init__ (self,pcapr,port):
+ super(CServer, self).__init__()
+ sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+ sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
+
+ server_address = ('', port)
+ print 'starting up on %s port %s' % server_address
+ sock.bind(server_address)
+ sock.listen(1)
+
+ self.pcapr=pcapr; # save the info
+
+ while True:
+ # Wait for a connection
+ print 'waiting for a connection'
+ connection, client_address = sock.accept()
+
+ try:
+ print 'connection from', client_address
+ self.connection = connection;
+
+ self.process(True);
+ finally:
+ if self.connection :
+ self.connection.close()
+ self.connection = None
+
+
+class CClient(CClientServerCommon):
+ def __init__ (self,pcapr,ip,port):
+ super(CClient, self).__init__()
+ sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+ #sock.setsockopt(socket.SOL_SOCKET,socket.TCP_MAXSEG,300)
+ server_address = (ip, port)
+ print 'connecting to %s port %s' % server_address
+
+ sock.connect(server_address)
+ self.connection=sock;
+ self.pcapr=pcapr; # save the info
+
+ try:
+
+ self.process(False);
+ finally:
+ if self.connection :
+ self.connection.close()
+ self.connection = None
+
+
+def test_file_load ():
+ pcapr= CPcapFileReader("delay_10_http_browsing_0.pcap")
+ pcapr.check_tcp_flow ()
+ pcapr.check_one_flow ()
+ pcapr.dump_info();
+
+
+def process_options ():
+ parser = argparse.ArgumentParser(usage="""
+ sock [-s|-c] -f file_name
+
+ """,
+ description="offline process a pcap file",
+ epilog=" written by hhaim");
+
+ parser.add_argument("-f", dest="file_name",
+ help=""" the file name to process """,
+ required=True)
+
+ parser.add_argument('-c', action='store_true',
+ help='client side')
+
+ parser.add_argument('-s', action='store_true',
+ help='server side ')
+
+ parser.add_argument('--fix-time', action='store_true',
+ help='fix_time ')
+
+ parser.add_argument('--port', type=int, default=1000,
+ help='server_port ')
+
+ parser.add_argument('--ip', default='127.0.0.1',
+ help='socket ip ')
+
+ parser.add_argument('--debug', action='store_true',
+ help='debug mode')
+
+ parser.add_argument('--version', action='version',
+ version=H_SCRIPT_VER )
+
+
+
+ sock_driver.args = parser.parse_args();
+
+ if sock_driver.args.fix_time :
+ return ;
+ if (sock_driver.args.c ^ sock_driver.args.s) ==0:
+ raise Exception ("you must set either client or server mode");
+
+def load_pcap_file ():
+ pcapr= CPcapFileReader(sock_driver.args.file_name)
+ pcapr.check_tcp_flow ()
+ pcapr.check_one_flow ()
+ pcapr.dump_info();
+ return pcapr
+
+def run_client_side ():
+ pcapr=load_pcap_file ()
+ socket_client = CClient(pcapr,sock_driver.args.ip,sock_driver.args.port);
+
+
+def run_server_side ():
+ pcapr=load_pcap_file ()
+ socket_server = CServer(pcapr,sock_driver.args.port);
+
+
+class CPktWithTime:
+ def __init__ (self,pkt,ts):
+ self.pkt=pkt;
+ self.ts=ts
+ def __cmp__ (self,other):
+ return cmp(self.ts,other.ts);
+
+ def __repr__ (self):
+ s=" %x:%d" %(self.pkt,self.ts)
+ return s;
+
+
+class CPcapFixTime:
+ def __init__ (self,in_file_name,
+ out_file_name):
+ self.in_file_name = in_file_name;
+ self.out_file_name = out_file_name;
+ self.tuple=None;
+ self.swap=False;
+ self.rtt =0;
+ self.rtt_syn_ack_ack =0; # ack on the syn ack
+ self.pkts=[]
+
+ def calc_rtt (self):
+ f = open(self.in_file_name)
+ pcap = dpkt.pcap.Reader(f)
+ cnt=0;
+ first_time_set=False;
+ first_time=0;
+ last_syn_time=0;
+ rtt=0;
+ rtt_syn_ack_ack=0;
+
+ for ts, buf in pcap:
+ eth = dpkt.ethernet.Ethernet(buf)
+ ip = eth.data
+ tcp = ip.data
+
+ if first_time_set ==False:
+ first_time=ts;
+ first_time_set=True;
+ else:
+ rtt=ts-first_time;
+
+ if ip.p != 6 :
+ raise Exception("not a TCP flow ..");
+
+ if cnt==0 or cnt==1:
+ if (tcp.flags & dpkt.tcp.TH_SYN) != dpkt.tcp.TH_SYN :
+ raise Exception("first packet should be with SYN");
+
+ if cnt==1:
+ last_syn_time=ts;
+
+ if cnt==2:
+ rtt_syn_ack_ack=ts-last_syn_time;
+
+ if cnt > 1 :
+ break;
+ cnt = cnt +1;
+
+ f.close();
+ self.rtt_syn_ack_ack = rtt_syn_ack_ack;
+ return (rtt);
+
+ def is_client_side (self,swap):
+ if self.swap ==swap:
+ return (True);
+ else:
+ return (False);
+
+ def calc_timing (self):
+ self.rtt=self.calc_rtt ();
+
+ def fix_timing (self):
+
+ rtt=self.calc_rtt ();
+ print "RTT is %f msec" % (rtt*1000)
+
+ if (rtt/2)*1000<5:
+ raise Exception ("RTT is less than 5msec, you should replay it");
+
+ time_to_center=rtt/4;
+
+ f = open(self.in_file_name)
+ fo = open(self.out_file_name,"wb")
+ pcap = dpkt.pcap.Reader(f)
+ pcap_out = dpkt.pcap.Writer(fo)
+
+ for ts, buf in pcap:
+ eth = dpkt.ethernet.Ethernet(buf)
+ ip = eth.data
+ tcp = ip.data
+ pld = tcp.data;
+
+ pkt_swap=False
+ if nl(ip.src) > nl(ip.dst):
+ pkt_swap=True
+ tuple= (nl(ip.dst),nl(ip.src), tcp.dport ,tcp.sport,ip.p );
+ else:
+ tuple= (nl(ip.src),nl(ip.dst) ,tcp.sport,tcp.dport,ip.p );
+
+ if self.tuple == None:
+ self.swap=pkt_swap
+ self.tuple=tuple
+ else:
+ if self.tuple != tuple:
+ raise Exception("More than one flow - can't process this flow");
+
+ if self.is_client_side (pkt_swap):
+ self.pkts.append(CPktWithTime( buf,ts+time_to_center));
+ else:
+ self.pkts.append(CPktWithTime( buf,ts-time_to_center));
+
+ self.pkts.sort();
+ for pkt in self.pkts:
+ pcap_out.writepkt(pkt.pkt, pkt.ts)
+
+ f.close()
+ fo.close();
+
+
+
+def main ():
+ process_options ()
+
+ if sock_driver.args.fix_time:
+ pcap = CPcapFixTime(sock_driver.args.file_name ,sock_driver.args.file_name+".fix.pcap")
+ pcap.fix_timing ()
+
+ if sock_driver.args.c:
+ run_client_side ();
+
+ if sock_driver.args.s:
+ run_server_side ();
+
+
+files_to_convert=[
+'citrix_0',
+'exchange_0',
+'http_browsing_0',
+'http_get_0',
+'http_post_0',
+'https_0',
+'mail_pop_0',
+'mail_pop_1',
+'mail_pop_2',
+'oracle_0',
+'rtsp_0',
+'smtp_0',
+'smtp_1',
+'smtp_2'
+];
+
+
+#files_to_convert=[
+#'http_browsing_0',
+#];
+
+def test_pcap_file ():
+ for file in files_to_convert:
+ fn='tun_'+file+'.pcap';
+ fno='_tun_'+file+'_fixed.pcap';
+ print "convert ",fn
+ pcap = CPcapFixTime(fn,fno)
+ pcap.fix_timing ()
+
+
+
+
+class CPcapFileState:
+ def __init__ (self,file_name):
+ self.file_name = file_name
+ self.is_one_tcp_flow = False;
+ self.is_rtt_valid = False;
+ self.rtt=0;
+ self.rtt_ack=0;
+
+ def calc_stats (self):
+ file = CPcapFileReader(self.file_name);
+ try:
+ file.check_tcp_flow()
+ file.check_one_flow ()
+ self.is_one_tcp_flow = True;
+ except Exception :
+ self.is_one_tcp_flow = False;
+
+ print self.is_one_tcp_flow
+ if self.is_one_tcp_flow :
+ pcap= CPcapFixTime(self.file_name,"");
+ try:
+ pcap.calc_timing ()
+ print "rtt : %d %d \n" % (pcap.rtt*1000,pcap.rtt_syn_ack_ack*1000);
+ if (pcap.rtt*1000) > 10 and (pcap.rtt_syn_ack_ack*1000) >0.0 and (pcap.rtt_syn_ack_ack*1000) <2.0 :
+ self.is_rtt_valid = True
+ self.rtt = pcap.rtt*1000;
+ self.rtt_ack =pcap.rtt_syn_ack_ack*1000;
+ except Exception :
+ pass;
+
+
+def test_pcap_file (file_name):
+ p= CPcapFileState(file_name)
+ p.calc_stats();
+ if p.is_rtt_valid:
+ return True
+ else:
+ return False
+
+def iterate_tree_files (dirwalk,path_to):
+ fl=open("res.csv","w+");
+ cnt=0;
+ cnt_valid=0
+ for root, _, files in os.walk(dirwalk):
+ for f in files:
+ fullpath = os.path.join(root, f)
+ p= CPcapFileState(fullpath)
+ p.calc_stats();
+
+ valid=test_pcap_file (fullpath)
+ s='%s,%d,%d,%d \n' %(fullpath,p.is_rtt_valid,p.rtt,p.rtt_ack)
+ cnt = cnt +1 ;
+ if p.is_rtt_valid:
+ cnt_valid = cnt_valid +1;
+ diro=path_to+"/"+root;
+ fo = os.path.join(diro, f)
+ os.system("mkdir -p "+ diro);
+ pcap = CPcapFixTime(fullpath,fo)
+ pcap.fix_timing ()
+
+ print s
+ fl.write(s);
+ print " %d %% %d valids \n" % (100*cnt_valid/cnt,cnt);
+ fl.close();
+
+path_code="/scratch/tftp/pFidelity/pcap_repository"
+
+iterate_tree_files (path_code,"output")
+#test_pcap_file ()
+#test_pcap_file ()
+#main();
+
diff --git a/scripts/automation/trex_control_plane/unit_tests/test.py b/scripts/automation/trex_control_plane/unit_tests/test.py
new file mode 100755
index 00000000..dac765d6
--- /dev/null
+++ b/scripts/automation/trex_control_plane/unit_tests/test.py
@@ -0,0 +1,36 @@
+from mininet.topo import Topo
+from mininet.link import TCLink
+from mininet.net import Mininet
+from mininet.node import CPULimitedHost
+from mininet.link import TCLink
+from mininet.util import dumpNodeConnections
+from mininet.log import setLogLevel
+
+class MyTopo( Topo ):
+ "Simple topology example."
+
+ def __init__( self ):
+ "Create custom topo."
+
+ # Initialize topology
+ Topo.__init__( self )
+
+ # Add hosts and switches
+ leftHost = self.addHost( 'h1' )
+ rightHost = self.addHost( 'h2' )
+ Switch = self.addSwitch( 's1' )
+
+ # Add links
+ self.addLink( leftHost, Switch ,bw=10, delay='5ms')
+ self.addLink( Switch, rightHost )
+
+
+topos = { 'mytopo': ( lambda: MyTopo() ) }
+
+# 1. http server example
+#
+#mininet> h1 python -m SimpleHTTPServer 80 &
+#mininet> h2 wget -O - h1
+# 2. limit mss example
+#decrease the MTU ifconfig eth0 mtu 488
+
diff --git a/scripts/automation/trex_perf.py b/scripts/automation/trex_perf.py
new file mode 100755
index 00000000..08d15caf
--- /dev/null
+++ b/scripts/automation/trex_perf.py
@@ -0,0 +1,1266 @@
+#!/router/bin/python-2.7.4
+import h_avc
+
+
+import ConfigParser
+import threading
+import time,signal
+import argparse
+import sys
+import os
+sys.path.append(os.path.join('trex_control_plane', 'stf', 'trex_stf_lib'))
+from trex_client import CTRexClient
+import subprocess
+from time import sleep
+import signal
+import textwrap
+import getpass
+import random
+import datetime
+from datetime import timedelta
+import traceback
+import math
+import re
+import termios
+import errno
+import smtplib
+from email.MIMEMultipart import MIMEMultipart
+from email.MIMEBase import MIMEBase
+from email.MIMEText import MIMEText
+from email.Utils import COMMASPACE, formatdate
+from email import Encoders
+from email.mime.image import MIMEImage
+
+from distutils.version import StrictVersion
+
+class TrexRunException(Exception):
+ def __init__ (self, reason, cmd = None, std_log = None, err_log = None):
+ self.reason = reason
+ self.std_log = std_log
+ self.err_log = err_log
+ # generate the error message
+ self.message = "\nSummary of error:\n\n %s\n" % (reason)
+
+ if std_log:
+ self.message += "\nConsole Log:\n\n %s\n" % (self.std_log)
+
+ if err_log:
+ self.message += "\nStd Error Log:\n\n %s\n" % (self.err_log)
+
+ def __str__(self):
+ return self.message
+
+
+############################# utility functions start #################################
+
+def verify_glibc_version ():
+ x = subprocess.check_output("/usr/bin/ldd --version", shell=True)
+ m = re.match("^ldd \([^\)]+\) (.*)", x)
+ if not m:
+ raise Exception("Cannot determine LDD version")
+ current_version = m.group(1)
+
+ if StrictVersion(current_version) < StrictVersion("2.5"):
+ raise Exception("GNU ldd version required for graph plotting is at least 2.5, system is %s - please run simple 'find'" % current_version)
+
+def get_median(numericValues):
+ theValues = sorted(numericValues)
+ if len(theValues) % 2 == 1:
+ return theValues[(len(theValues)+1)/2-1]
+ else:
+ lower = theValues[len(theValues)/2-1]
+ upper = theValues[len(theValues)/2]
+ return (float(lower + upper)) / 2
+
+def list_to_clusters(l, n):
+ for i in xrange(0, len(l), n):
+ yield l[i:i+n]
+
+def cpu_histo_to_str (cpu_histo):
+ s = "\nCPU Samplings:\n\n"
+ period = 0
+
+ clusters = list(list_to_clusters(cpu_histo, 10))
+
+ for cluster in clusters:
+ period += 10
+ line = "%3s Seconds: [" % period
+
+ cluster += (10 - len(cluster)) * [None]
+
+ for x in cluster:
+ if (x != None):
+ line += "%5.1f%%, " % x
+ else:
+ line += " "
+
+ line = line[:-2] # trim the comma and space
+ line += " " # return the space
+
+ line += "]\n"
+
+ s += line
+
+ return s
+
+# Terminal Manager Class
+class TermMng:
+ def __enter__(self):
+ self.fd = sys.stdin.fileno()
+ self.old = termios.tcgetattr(self.fd)
+
+ # copy new and remove echo
+ new = self.old[:]
+ new[3] &= ~termios.ECHO
+
+ self.tcsetattr_flags = termios.TCSAFLUSH
+ if hasattr(termios, 'TCSASOFT'):
+ self.tcsetattr_flags |= termios.TCSASOFT
+
+ termios.tcsetattr(self.fd, self.tcsetattr_flags, new)
+
+ def __exit__ (self ,type, value, traceback):
+ termios.tcsetattr(self.fd, self.tcsetattr_flags, self.old)
+
+############################# utility functions stop #################################
+
+def send_mail(send_from, send_to, subject, html_text, txt_attachments=[], images=[], server="localhost"):
+ assert isinstance(send_to, list)
+ assert isinstance(txt_attachments, list)
+ assert isinstance(images, list)
+
+ # create a multi part message
+ msg = MIMEMultipart()
+ msg['From'] = send_from
+ msg['To'] = COMMASPACE.join(send_to)
+ msg['Date'] = formatdate(localtime=True)
+ msg['Subject'] = subject
+ msg['Cc'] = "imarom@cisco.com"
+
+ # add all images to the text as embbeded images
+ for image in images:
+ html_text += '<br><img src="cid:{0}"><br>'.format(image)
+ fp = open(image, 'rb')
+ image_object = MIMEImage(fp.read())
+ fp.close()
+ image_object.add_header('Content-ID', image)
+ msg.attach(image_object)
+
+ # attach the main report as embedded HTML
+ msg.attach( MIMEText(html_text, 'html') )
+
+ # attach regualr txt files
+ for f in txt_attachments:
+ part = MIMEBase('application', "octet-stream")
+ part.set_payload( open(f,"rb").read() )
+ Encoders.encode_base64(part)
+ part.add_header('Content-Disposition', 'attachment; filename="%s"' % os.path.basename(f))
+ msg.attach(part)
+
+ smtp = smtplib.SMTP(server)
+ smtp.sendmail(send_from, send_to, msg.as_string())
+ smtp.close()
+
+# convert HTML to image - returning a image file as a string
+def html2image (html_filename, image_filename):
+ cmd = "./phantom/phantomjs ./phantom/rasterize.js {0} {1}".format(html_filename, image_filename)
+ subprocess.call(cmd, shell=True)
+
+ assert os.path.exists(image_filename)
+
+ return (image_filename)
+
+# convert results of run to a string
+def run_results_to_str (results, cond_type):
+ output = ""
+
+ output += "M: {0:<12.6f}\n".format(results['m'])
+ output += "BW: {0:<12,.2f} [Mbps]\n".format(results['tx'])
+ output += "PPS: {0:<12,} [pkts]\n".format(int(results['total-pps']))
+ output += "CPU: {0:.4f} %\n".format(results['cpu_util'])
+ output += "Maximum Latency: {0:<12,} [usec]\n".format(int(results['maximum-latency']))
+ output += "Average Latency: {0:<12,} [usec]\n".format(int(results['average-latency']))
+ output += "Pkt Drop: {0:<12,} [pkts]\n".format(int(results['total-pkt-drop']))
+ output += "Condition: {0:<12} ({1})\n".format("Passed" if check_condition(cond_type, results) else "Failed", cond_type_to_str(cond_type))
+
+ return (output)
+
+############################# classes #################################
+class ErrorHandler(object):
+ def __init__ (self, exception, traceback):
+
+ if isinstance(exception, TrexRunException):
+ logger.log("\n*** Script Terminated Due To Trex Failure")
+ logger.log("\n********************** TRex Error - Report **************************\n")
+ logger.log(str(exception))
+ logger.flush()
+
+ elif isinstance(exception, IOError):
+ logger.log("\n*** Script Terminated Due To IO Error")
+ logger.log("\nEither Router address or the Trex config is bad or some file is missing - check traceback below")
+ logger.log("\n********************** IO Error - Report **************************\n")
+ logger.log(str(exception))
+ logger.log(str(traceback))
+ logger.flush()
+
+
+ else:
+ logger.log("\n*** Script Terminated Due To Fatal Error")
+ logger.log("\n********************** Internal Error - Report **************************\n")
+ logger.log(str(exception) + "\n")
+ logger.log(str(traceback))
+ logger.flush()
+
+
+ # call the handler
+ g_kill_cause = "error"
+ os.kill(os.getpid(), signal.SIGUSR1)
+
+
+# simple HTML table
+class HTMLTable:
+ def __init__ (self):
+ self.table_rows = []
+
+ def add_row (self, param, value):
+ self.table_rows.append([param, value])
+
+ def generate_table(self):
+ txt = '<table class="myWideTable" style="width:50%">'
+ txt += "<tr><th>Parameter</th><th>Results</th></tr>"
+
+ for row in self.table_rows:
+ txt += "<tr><td>{0}</td><td>{1}</td></tr>".format(row[0], row[1])
+
+ txt += "</table>"
+
+ return txt
+
+# process results and dispatch it
+class JobReporter:
+ def __init__ (self, job_summary):
+ self.job_summary = job_summary
+ pass
+
+ def __plot_results_to_str (self, plot_results):
+ output = "\nPlotted Points: \n\n"
+ for p in plot_results:
+ output += "BW : {0:8.2f}, ".format(p['tx'])
+ output += "PPS : {0:8,} ".format(int(p['total-pps']))
+ output += "CPU : {0:8.2f} %, ".format(p['cpu_util'])
+ output += "Max Latency : {0:10,}, ".format(int(p['maximum-latency']))
+ output += "Avg Latency : {0:10,}, ".format(int(p['average-latency']))
+ output += "Pkt Drop : {0:12,}, \n".format(int(p['total-pkt-drop']))
+
+ return (output + "\n")
+
+ def __summary_to_string (self):
+ output = ""
+
+ output += "\n-== Job Completed Successfully ==-\n\n"
+ output += "Job Report:\n\n"
+ output += "Job Name: {0}\n".format(self.job_summary['job_name'])
+ output += "YAML file: {0}\n".format(self.job_summary['yaml'])
+ output += "Job Type: {0}\n".format(self.job_summary['job_type_str'])
+ output += "Condition: {0}\n".format(self.job_summary['cond_name'])
+ output += "Job Dir: {0}\n".format(self.job_summary['job_dir'])
+ output += "Job Log: {0}\n".format(self.job_summary['log_filename'])
+ output += "Email Report: {0}\n".format(self.job_summary['email'])
+ output += "Job Total Time: {0}\n\n".format(self.job_summary['total_run_time'])
+
+ if (self.job_summary.get('find_results') != None):
+ find_results = self.job_summary['find_results']
+ output += ("Maximum BW Point Details:\n\n")
+ output += run_results_to_str(find_results, self.job_summary['cond_type'])
+
+ if (self.job_summary.get('plot_results') != None):
+ plot_results = self.job_summary['plot_results']
+ output += self.__plot_results_to_str(plot_results)
+
+ return output
+
+
+ # simple print to screen of the job summary
+ def print_summary (self):
+ summary = self.__summary_to_string()
+ logger.log(summary)
+
+ def __generate_graph_report (self, plot_results):
+ graph_data = str( [ [x['tx'], x['cpu_util']/100, x['maximum-latency'], x['average-latency']] for x in plot_results ] )
+ table_data = str( [ [x['tx'], x['total-pps'], x['cpu_util']/100, x['norm_cpu'], x['maximum-latency'], x['average-latency'], x['total-pkt-drop']] for x in plot_results ] )
+
+ with open ("graph_template.html", "r") as myfile:
+ data = myfile.read()
+ data = data.replace("!@#$template_fill_head!@#$", self.job_summary['yaml'])
+ data = data.replace("!@#$template_fill_graph!@#$", graph_data[1:(len(graph_data) - 1)])
+ data = data.replace("!@#$template_fill_table!@#$", table_data[1:(len(table_data) - 1)])
+
+ # generate HTML report
+ graph_filename = self.job_summary['graph_filename']
+ text_file = open(graph_filename, "w")
+ text_file.write(str(data))
+ text_file.close()
+
+ return graph_filename
+
+ def __generate_body_report (self):
+ job_setup_table = HTMLTable()
+
+ job_setup_table.add_row("User Name", self.job_summary['user'])
+ job_setup_table.add_row("Job Name", self.job_summary['job_name'])
+ job_setup_table.add_row("Job Type", self.job_summary['job_type_str'])
+ job_setup_table.add_row("Test Condition", self.job_summary['cond_name'])
+ job_setup_table.add_row("YAML File", self.job_summary['yaml'])
+ job_setup_table.add_row("Job Total Time", "{0}".format(self.job_summary['total_run_time']))
+
+ job_summary_table = HTMLTable()
+
+ find_results = self.job_summary['find_results']
+
+ if find_results != None:
+ job_summary_table.add_row("Maximum Bandwidth", "{0:,.2f} [Mbps]".format(find_results['tx']))
+ job_summary_table.add_row("Maximum PPS", "{0:,} [pkts]".format(int(find_results['total-pps'])))
+ job_summary_table.add_row("CPU Util.", "{0:.2f}%".format(find_results['cpu_util']))
+ job_summary_table.add_row("Maximum Latency", "{0:,} [usec]".format(int(find_results['maximum-latency'])))
+ job_summary_table.add_row("Average Latency", "{0:,} [usec]".format(int(find_results['average-latency'])))
+ job_summary_table.add_row("Total Pkt Drop", "{0:,} [pkts]".format(int(find_results['total-pkt-drop'])))
+
+ with open ("report_template.html", "r") as myfile:
+ data = myfile.read()
+ data = data.replace("!@#$template_fill_job_setup_table!@#$", job_setup_table.generate_table())
+ data = data.replace("!@#$template_fill_job_summary_table!@#$", job_summary_table.generate_table())
+
+ return data
+
+ # create an email report and send to the user
+ def send_email_report (self):
+ images = []
+
+ logger.log("\nCreating E-Mail Report...\n")
+
+ # generate main report
+ report_str = self.__generate_body_report()
+
+ # generate graph report (if exists)
+ plot_results = self.job_summary['plot_results']
+ if plot_results:
+ logger.log("Generating Plot Results HTML ...\n")
+ graph_filename = self.__generate_graph_report(plot_results)
+ logger.log("Converting HTML to image ...\n")
+ images.append(html2image(graph_filename, graph_filename + ".png"))
+
+ else:
+ graph_filename = None
+
+ # create email
+ from_addr = 'TrexReporter@cisco.com'
+ to_addr = []
+ to_addr.append(self.job_summary['email'])
+ to_addr.append('imarom@cisco.com')
+
+ attachments = []
+ attachments.append(self.job_summary['log_filename'])
+ logger.log("Attaching log {0}...".format(self.job_summary['log_filename']))
+
+ if graph_filename:
+ attachments.append(graph_filename)
+ logger.log("Attaching plotting report {0}...".format(graph_filename))
+
+ logger.flush()
+
+ send_mail(from_addr, to_addr, "TRex Performance Report", report_str, attachments, images)
+ logger.log("\nE-mail sent successfully to: " + self.job_summary['email'])
+
+# dummy logger in case logger creation failed
+class DummyLogger(object):
+ def __init__(self):
+ pass
+
+ def log(self, text, force = False, newline = True):
+ text_out = (text + "\n") if newline else text
+ sys.stdout.write(text_out)
+
+ def console(self, text, force = False, newline = True):
+ self.log(text, force, newline)
+
+ def flush (self):
+ pass
+
+# logger object
+class MyLogger(object):
+
+ def __init__(self, log_filename):
+ # Store the original stdout and stderr
+ sys.stdout.flush()
+ sys.stderr.flush()
+
+ self.stdout_fd = os.dup(sys.stdout.fileno())
+ self.devnull = os.open('/dev/null', os.O_WRONLY)
+ self.log_file = open(log_filename, 'w')
+ self.silenced = False
+ self.pending_log_file_prints = 0
+ self.active = True
+
+ def shutdown (self):
+ self.active = False
+
+ def reactive (self):
+ self.active = True
+
+ # silence all prints from stdout
+ def silence(self):
+ os.dup2(self.devnull, sys.stdout.fileno())
+ self.silenced = True
+
+ # restore stdout status
+ def restore(self):
+ sys.stdout.flush()
+ sys.stderr.flush()
+ # Restore normal stdout
+ os.dup2(self.stdout_fd, sys.stdout.fileno())
+ self.silenced = False
+
+ #print a message to the log (both stdout / log file)
+ def log(self, text, force = False, newline = True):
+ if not self.active:
+ return
+
+ self.log_file.write((text + "\n") if newline else text)
+ self.pending_log_file_prints += 1
+
+ if (self.pending_log_file_prints >= 10):
+ self.log_file.flush()
+ self.pending_log_file_prints = 0
+
+ self.console(text, force, newline)
+
+ # print a message to the console alone
+ def console(self, text, force = False, newline = True):
+ if not self.active:
+ return
+
+ _text = (text + "\n") if newline else text
+
+ # if we are silenced and not forced - go home
+ if self.silenced and not force:
+ return
+
+ if self.silenced:
+ os.write(self.stdout_fd, _text)
+ else:
+ sys.stdout.write(_text)
+
+ sys.stdout.flush()
+
+ # flush
+ def flush(self):
+ sys.stdout.flush()
+ self.log_file.flush()
+
+ def __del__(self):
+ os.close(self.devnull)
+ if self.log_file:
+ self.log_file.flush()
+ self.log_file.close()
+
+
+# simple progress bar
+class ProgressBar(threading.Thread):
+ def __init__(self, time_sec, router):
+ super(ProgressBar, self).__init__()
+ self.active = True
+ self.time_sec = time_sec + 15
+ self.router = router
+
+ def run (self):
+ global g_stop
+
+ col = 40
+ delta_for_sec = float(col) / self.time_sec
+
+ accu = 0.0
+
+ for i in range(self.time_sec):
+ if (self.active == False):
+ # print 100% - done
+ bar = "\r[" + ('#' * col) + "] {0:.2f} %".format(100)
+ logger.console(bar, force = True, newline = False)
+ break
+
+ if (g_stop == True):
+ break
+
+ sleep(1)
+ accu += delta_for_sec
+ bar = "\r[" + ('#' * int(accu)) + (' ' * (col - int(accu))) + "] {0:.2f} %".format( (accu/col) * 100 )
+ bar += " / Router CPU: {0:.2f} %".format(self.router.get_last_cpu_util())
+ logger.console(bar, force = True, newline = False)
+
+ logger.console("\r\n", force = True, newline = False)
+ logger.flush()
+
+ def stop (self):
+ self.active = False
+ self.join()
+
+# global vars
+
+g_stop = False
+logger = DummyLogger()
+
+# cleanup list is a list of callables to be run when cntrl+c is caught
+cleanup_list = []
+
+################ threads ########################
+
+# sampler
+class Sample_Thread (threading.Thread):
+ def __init__(self, threadID, router):
+
+ threading.Thread.__init__(self)
+ self.threadID = threadID
+ self.router = router
+ self.stop = False
+
+ def run(self):
+ self.router.clear_sampling_stats()
+
+ try:
+ while (self.stop==False) and (g_stop==False):
+ self.router.sample_stats()
+ time.sleep(1);
+ except Exception as e:
+ ErrorHandler(e, traceback.format_exc())
+
+ def do_stop(self):
+ self.stop = True
+
+
+def general_cleanup_on_error ():
+ global g_stop
+ global cleanup_list
+
+ # mark all the threads to finish
+ g_stop = True;
+
+ # shutdown and flush the logger
+ logger.shutdown()
+ if logger:
+ logger.flush()
+
+ # execute the registered callables
+ for c in cleanup_list:
+ c()
+
+ # dummy wait for threads to finish (TODO: make this more smart)
+ time.sleep(2)
+ exit(-1)
+
+# just a dummy for preventing chain calls
+def signal_handler_dummy (sig_id, frame):
+ pass
+
+def error_signal_handler (sig_id, frame):
+ # make sure no chain of calls
+ signal.signal(signal.SIGUSR1, signal_handler_dummy)
+ signal.signal(signal.SIGINT, signal_handler_dummy)
+
+ general_cleanup_on_error()
+
+def int_signal_handler(sig_id, frame):
+ # make sure no chain of calls
+ signal.signal(signal.SIGINT, signal_handler_dummy)
+ signal.signal(signal.SIGUSR1, signal_handler_dummy)
+
+ logger.log("\n\nCaught Cntrl+C... Cleaning up!\n\n")
+
+ general_cleanup_on_error()
+
+
+# Trex with sampling
+class CTRexWithRouter:
+ def __init__(self, trex, trex_params):
+ self.trex = trex;
+ self.trex_params = trex_params
+
+ if self.trex_params['router_type'] == "ASR":
+ self.router = h_avc.ASR1k(self.trex_params['router_interface'], self.trex_params['router_port'], self.trex_params['router_password'])
+ elif self.trex_params['router_type'] == "ISR":
+ self.router = h_avc.ISR(self.trex_params['router_interface'], self.trex_params['router_port'], self.trex_params['router_password'])
+ else:
+ raise Exception("unknown router type in config file")
+
+ self.router.connect()
+
+ def get_router (self):
+ return self.router
+
+ def run(self, m, duration):
+
+ self.sample_thread = Sample_Thread(1, self.router)
+ self.sample_thread.start();
+
+ # launch trex
+ try:
+# trex_res = self.trex.run(m, duration);
+ self.trex.start_trex(c = self.trex_params['trex_cores'],
+ m = m,
+ d = duration,
+ f = self.trex_params['trex_yaml_file'],
+ nc = True,
+ l = self.trex_params['trex_latency'],
+ limit_ports = self.trex_params['trex_limit_ports'])
+ self.trex.sample_to_run_finish(20) # collect trex-sample every 20 seconds.
+ except Exception:
+ self.sample_thread.do_stop() # signal to stop
+ self.sample_thread.join() # wait for it to realy stop
+ raise
+
+ self.sample_thread.do_stop() # signal to stop
+ self.sample_thread.join() # wait for it to realy stop
+
+ self.res = self.trex.get_result_obj()
+
+ results = {}
+ results['status'] = True
+ results['trex_results'] = self.res
+ results['avc_results'] = self.router.get_stats()
+
+ return (results)
+ #return(trex_res.get_status() == STATUS_OK);
+
+# sanity checks to see run really went well
+def sanity_test_run (trex_r, avc_r):
+ pass
+ #if (sum(avc_r['cpu_histo']) == 0):
+ #raise h_trex.TrexRunException("CPU utilization from router is zero, check connectivity")
+
+def _trex_run (job_summary, m, duration):
+
+ trex_thread = job_summary['trex_thread']
+
+ p = ProgressBar(duration, trex_thread.get_router())
+ p.start()
+
+ try:
+ results = trex_thread.run(m, duration)
+ except Exception as e:
+ p.stop()
+ raise
+
+ p.stop()
+
+ if (results == None):
+ raise Exception("Failed to run Trex")
+
+ # fetch values
+ trex_r = results['trex_results']
+ avc_r = results['avc_results']
+
+ sanity_test_run(trex_r, avc_r)
+
+ res_dict = {}
+
+ res_dict['m'] = m
+ total_tx_bps = trex_r.get_last_value("trex-global.data.m_tx_bps")
+ res_dict['tx'] = total_tx_bps / (1000 * 1000) # EVENTUALLY CONTAINS IN MBPS (EXTRACTED IN BPS)
+
+ res_dict['cpu_util'] = avc_r['cpu_util']
+
+ if int(res_dict['cpu_util']) == 0:
+ res_dict['norm_cpu']=1;
+ else:
+ res_dict['norm_cpu'] = (res_dict['tx'] / res_dict['cpu_util']) * 100
+
+ res_dict['maximum-latency'] = max ( trex_r.get_max_latency().values() ) #trex_r.res['maximum-latency']
+ res_dict['average-latency'] = trex_r.get_avg_latency()['all'] #trex_r.res['average-latency']
+
+ logger.log(cpu_histo_to_str(avc_r['cpu_histo']))
+
+ res_dict['total-pkt-drop'] = trex_r.get_total_drops()
+ res_dict['expected-bps'] = trex_r.get_expected_tx_rate()['m_tx_expected_bps']
+ res_dict['total-pps'] = get_median( trex_r.get_value_list("trex-global.data.m_tx_pps") )#trex_r.res['total-pps']
+ res_dict['m_total_pkt'] = trex_r.get_last_value("trex-global.data.m_total_tx_pkts")
+
+ res_dict['latency_condition'] = job_summary['trex_params']['trex_latency_condition']
+
+ return res_dict
+
+def trex_run (job_summary, m, duration):
+ res = _trex_run (job_summary, m, duration)
+ return res
+
+
+def m_to_mbps (job_summary, m):
+ return (m * job_summary['base_m_unit'])
+
+# find the correct range of M
+def find_m_range (job_summary):
+
+ trex = job_summary['trex']
+ trex_config = job_summary['trex_params']
+
+ # if not provided - guess the correct range of bandwidth
+ if not job_summary['m_range']:
+ m_range = [0.0, 0.0]
+ # 1 Mbps -> 1 Gbps
+ LOW_TX = 1.0 * 1000 * 1000
+ MAX_TX = 1.0 * 1000 * 1000 * 1000
+
+ # for 10g go to 10g
+ if trex_config['trex_machine_type'] == "10G":
+ MAX_TX *= 10
+
+ # dual injection can potentially reach X2 speed
+ if trex_config['trex_is_dual'] == True:
+ MAX_TX *= 2
+
+ else:
+ m_range = job_summary['m_range']
+ LOW_TX = m_range[0] * 1000 * 1000
+ MAX_TX = m_range[1] * 1000 * 1000
+
+
+ logger.log("\nSystem Settings - Min: {0:,} Mbps / Max: {1:,} Mbps".format(LOW_TX / (1000 * 1000), MAX_TX / (1000 * 1000)))
+ logger.log("\nTrying to get system minimum M and maximum M...")
+
+ res_dict = trex_run(job_summary, 1, 30)
+
+ # figure out low / high M
+ m_range[0] = (LOW_TX / res_dict['expected-bps']) * 1
+ m_range[1] = (MAX_TX / res_dict['expected-bps']) * 1
+
+
+ # return both the m_range and the base m unit for future calculation
+ results = {}
+ results['m_range'] = m_range
+ results['base_m_unit'] = res_dict['expected-bps'] /(1000 * 1000)
+
+ return (results)
+
+# calculate points between m_range[0] and m_range[1]
+def calculate_plot_points (job_summary, m_range, plot_count):
+
+ cond_type = job_summary['cond_type']
+ delta_m = (m_range[1] - m_range[0]) / plot_count
+
+ m_current = m_range[0]
+ m_end = m_range[1]
+
+ logger.log("\nStarting Plot Graph Task ...\n")
+ logger.log("Plotting Range Is From: {0:.2f} [Mbps] To: {1:.2f} [Mbps] Over {2} Points".format(m_to_mbps(job_summary, m_range[0]),
+ m_to_mbps(job_summary, m_range[1]),
+ plot_count))
+ logger.log("Delta Between Points is {0:.2f} [Mbps]".format(m_to_mbps(job_summary, delta_m)))
+ plot_points = []
+
+ duration = 180
+
+ iter = 1
+
+ trex = job_summary['trex']
+ while (iter <= plot_count):
+ logger.log("\nPlotting Point [{0}/{1}]:\n".format(iter, plot_count))
+ logger.log("Estimated BW ~= {0:,.2f} [Mbps]\n".format(m_to_mbps(job_summary, m_current)))
+ logger.log("M = {0:.6f}".format(m_current))
+ logger.log("Duration = {0} seconds\n".format(duration))
+
+ res_dict = trex_run(job_summary, m_current, duration)
+ print_trex_results(res_dict, cond_type)
+
+ plot_points.append(dict(res_dict))
+
+ m_current += delta_m
+ iter = iter + 1
+
+ # last point - make sure its the maximum point
+ if (iter == plot_count):
+ m_current = m_range[1]
+
+ #print "waiting for system to stabilize ..."
+ #time.sleep(30);
+
+ return plot_points
+
+
+def cond_type_to_str (cond_type):
+ return "Max Latency" if cond_type=='latency' else "Pkt Drop"
+
+# success condition (latency or drop)
+def check_condition (cond_type, res_dict):
+ if cond_type == 'latency':
+ if res_dict['maximum-latency'] < res_dict['latency_condition']:
+ return True
+ else:
+ return False
+
+ # drop condition is a bit more complex - it should create high latency in addition to 0.2% drop
+ elif cond_type == 'drop':
+ if (res_dict['maximum-latency'] > (res_dict['latency_condition']+2000) ) and (res_dict['total-pkt-drop'] > (0.002 * res_dict['m_total_pkt'])):
+ return False
+ else:
+ return True
+
+ assert(0)
+
+def print_trex_results (res_dict, cond_type):
+ logger.log("\nRun Results:\n")
+ output = run_results_to_str(res_dict, cond_type)
+ logger.log(output)
+
+
+######################## describe a find job ########################
+class FindJob:
+ # init a job object with min / max
+ def __init__ (self, min, max, job_summary):
+ self.min = float(min)
+ self.max = float(max)
+ self.job_summary = job_summary
+ self.cond_type = job_summary['cond_type']
+ self.success_points = []
+ self.iter_num = 1
+ self.found = False
+ self.iter_duration = 300
+
+ def _distance (self):
+ return ( (self.max - self.min) / min(self.max, self.min) )
+
+ def time_to_end (self):
+ time_in_sec = (self.iters_to_end() * self.iter_duration)
+ return timedelta(seconds = time_in_sec)
+
+ def iters_to_end (self):
+ # find 2% point
+ ma = self.max
+ mi = self.min
+ iter = 0
+
+ while True:
+ dist = (ma - mi) / min(ma , mi)
+ if dist < 0.02:
+ break
+ if random.choice(["up", "down"]) == "down":
+ ma = (ma + mi) / 2
+ else:
+ mi = (ma + mi) / 2
+
+ iter += 1
+
+ return (iter)
+
+ def _cur (self):
+ return ( (self.min + self.max) / 2 )
+
+ def _add_success_point (self, res_dict):
+ self.success_points.append(res_dict.copy())
+
+ def _is_found (self):
+ return (self.found)
+
+ def _next_iter_duration (self):
+ return (self.iter_duration)
+
+ # execute iteration
+ def _execute (self):
+ # reset the found var before running
+ self.found = False
+
+ # run and print results
+ res_dict = trex_run(self.job_summary, self._cur(), self.iter_duration)
+
+ self.iter_num += 1
+ cur = self._cur()
+
+ if (self._distance() < 0.02):
+ if (check_condition(self.cond_type, res_dict)):
+ # distance < 2% and success - we are done
+ self.found = True
+ else:
+ # lower to 90% of current and retry
+ self.min = cur * 0.9
+ self.max = cur
+ else:
+ # success
+ if (check_condition(self.cond_type, res_dict)):
+ self.min = cur
+ else:
+ self.max = cur
+
+ if (check_condition(self.cond_type, res_dict)):
+ self._add_success_point(res_dict)
+
+ return res_dict
+
+ # find the max M before
+ def find_max_m (self):
+
+ res_dict = {}
+ while not self._is_found():
+
+ logger.log("\n-> Starting Find Iteration #{0}\n".format(self.iter_num))
+ logger.log("Estimated BW ~= {0:,.2f} [Mbps]".format(m_to_mbps(self.job_summary, self._cur())))
+ logger.log("M = {0:.6f}".format(self._cur()))
+ logger.log("Duration = {0} seconds".format(self._next_iter_duration()))
+ logger.log("Current BW Range = {0:,.2f} [Mbps] / {1:,.2f} [Mbps]".format(m_to_mbps(self.job_summary, self.min), m_to_mbps(self.job_summary, self.max)))
+ logger.log("Est. Iterations Left = {0} Iterations".format(self.iters_to_end()))
+ logger.log("Est. Time Left = {0}\n".format(self.time_to_end()))
+
+ res_dict = self._execute()
+
+ print_trex_results(res_dict, self.cond_type)
+
+ find_results = res_dict.copy()
+ find_results['max_m'] = self._cur()
+ return (find_results)
+
+######################## describe a plot job ########################
+class PlotJob:
+ def __init__(self, findjob):
+ self.job_summary = findjob.job_summary
+
+ self.plot_points = list(findjob.success_points)
+ self.plot_points.sort(key = lambda item:item['tx'])
+
+ def plot (self, duration = 300):
+ return self.plot_points
+
+ # add points if needed
+ #iter = 0
+ #for point in self.success_points:
+ #iter += 1
+ #logger.log("\nPlotting Point [{0}/{1}]:\n".format(iter, self.plot_count))
+ #logger.log("Estimated BW ~= {0:,.2f} [Mbps]\n".format(m_to_mbps(self.job_summary, point['m'])))
+ #logger.log("M = {0:.6f}".format(point['m']))
+ #logger.log("Duration = {0} seconds\n".format(duration))
+
+ #res_dict = trex_run(self.job_summary, point['m'], duration)
+ #print_trex_results(res_dict, self.job_summary['cond_type'])
+
+ #self.plot_points.append(dict(res_dict))
+
+ #self.plot_points = list(self.success_points)
+
+ #print self.plot_points
+ #self.plot_points.sort(key = lambda item:item['m'])
+ #print self.plot_points
+
+ #return self.plot_points
+
+
+def generate_job_id ():
+ return (str(int(random.getrandbits(32))))
+
+def print_header ():
+ logger.log("--== TRex Performance Tool v1.0 (2014) ==--")
+
+# print startup summary
+def log_startup_summary (job_summary):
+
+ trex = job_summary['trex']
+ trex_config = job_summary['trex_params']
+
+ logger.log("\nWork Request Details:\n")
+ logger.log("Setup Details:\n")
+ logger.log("TRex Config File: {0}".format(job_summary['config_file']))
+ logger.log("Machine Name: {0}".format(trex_config['trex_name']))
+ logger.log("TRex Type: {0}".format(trex_config['trex_machine_type']))
+ logger.log("TRex Dual Int. Tx: {0}".format(trex_config['trex_is_dual']))
+ logger.log("Router Interface: {0}".format(trex_config['router_interface']))
+
+ logger.log("\nJob Details:\n")
+ logger.log("Job Name: {0}".format(job_summary['job_name']))
+ logger.log("YAML file: {0}".format(job_summary['yaml']))
+ logger.log("Job Type: {0}".format(job_summary['job_type_str']))
+ logger.log("Condition Type: {0}".format(job_summary['cond_name']))
+ logger.log("Job Log: {0}".format(job_summary['log_filename']))
+ logger.log("Email Report: {0}".format(job_summary['email']))
+
+# logger.log("\nTrex Command Used:\n{0}".format(trex.build_cmd(1, 10)))
+
+def load_trex_config_params (filename, yaml_file):
+ config = {}
+
+ parser = ConfigParser.ConfigParser()
+
+ try:
+ parser.read(filename)
+
+ config['trex_name'] = parser.get("trex", "machine_name")
+ config['trex_port'] = parser.get("trex", "machine_port")
+ config['trex_hisory_size'] = parser.getint("trex", "history_size")
+
+ config['trex_latency_condition'] = parser.getint("trex", "latency_condition")
+ config['trex_yaml_file'] = yaml_file
+
+ # support legacy data
+ config['trex_latency'] = parser.getint("trex", "latency")
+ config['trex_limit_ports'] = parser.getint("trex", "limit_ports")
+ config['trex_cores'] = parser.getint("trex", "cores")
+ config['trex_machine_type'] = parser.get("trex", "machine_type")
+ config['trex_is_dual'] = parser.getboolean("trex", "is_dual")
+
+ # optional Trex parameters
+ if parser.has_option("trex", "config_file"):
+ config['trex_config_file'] = parser.get("trex", "config_file")
+ else:
+ config['trex_config_file'] = None
+
+ if parser.has_option("trex", "misc_params"):
+ config['trex_misc_params'] = parser.get("trex", "misc_params")
+ else:
+ config['trex_misc_params'] = None
+
+ # router section
+
+ if parser.has_option("router", "port"):
+ config['router_port'] = parser.get("router", "port")
+ else:
+ # simple telnet port
+ config['router_port'] = 23
+
+ config['router_interface'] = parser.get("router", "interface")
+ config['router_password'] = parser.get("router", "password")
+ config['router_type'] = parser.get("router", "type")
+
+ except Exception as inst:
+ raise TrexRunException("\nBad configuration file: '{0}'\n\n{1}".format(filename, inst))
+
+ return config
+
+def prepare_for_run (job_summary):
+ global logger
+
+ # generate unique id
+ job_summary['job_id'] = generate_job_id()
+ job_summary['job_dir'] = "trex_job_{0}".format(job_summary['job_id'])
+
+ job_summary['start_time'] = datetime.datetime.now()
+
+ if not job_summary['email']:
+ job_summary['user'] = getpass.getuser()
+ job_summary['email'] = "{0}@cisco.com".format(job_summary['user'])
+
+ # create dir for reports
+ try:
+ job_summary['job_dir'] = os.path.abspath( os.path.join(os.getcwd(), 'logs', job_summary['job_dir']) )
+ print(job_summary['job_dir'])
+ os.makedirs( job_summary['job_dir'] )
+
+ except OSError as err:
+ if err.errno == errno.EACCES:
+ # fall back. try creating the dir name at /tmp path
+ job_summary['job_dir'] = os.path.join("/tmp/", "trex_job_{0}".format(job_summary['job_id']) )
+ os.makedirs(job_summary['job_dir'])
+
+ job_summary['log_filename'] = os.path.join(job_summary['job_dir'], "trex_log_{0}.txt".format(job_summary['job_id']))
+ job_summary['graph_filename'] = os.path.join(job_summary['job_dir'], "trex_graph_{0}.html".format(job_summary['job_id']))
+
+ # init logger
+ logger = MyLogger(job_summary['log_filename'])
+
+ # mark those as not populated yet
+ job_summary['find_results'] = None
+ job_summary['plot_results'] = None
+
+ # create trex client instance
+ trex_params = load_trex_config_params(job_summary['config_file'],job_summary['yaml'])
+ trex = CTRexClient(trex_host = trex_params['trex_name'],
+ max_history_size = trex_params['trex_hisory_size'],
+ trex_daemon_port = trex_params['trex_port'])
+
+ job_summary['trex'] = trex
+ job_summary['trex_params'] = trex_params
+
+ # create trex task thread
+ job_summary['trex_thread'] = CTRexWithRouter(trex, trex_params);
+
+ # in case of an error we need to call the remote cleanup
+ cleanup_list.append(trex.stop_trex)
+
+ # signal handler
+ signal.signal(signal.SIGINT, int_signal_handler)
+ signal.signal(signal.SIGUSR1, error_signal_handler)
+
+
+def after_run (job_summary):
+
+ job_summary['total_run_time'] = datetime.datetime.now() - job_summary['start_time']
+ reporter = JobReporter(job_summary)
+ reporter.print_summary()
+ reporter.send_email_report()
+
+def launch (job_summary):
+
+ prepare_for_run(job_summary)
+
+ print_header()
+
+ log_startup_summary(job_summary)
+
+ # find the correct M range if not provided
+ range_results = find_m_range(job_summary)
+
+ job_summary['base_m_unit'] = range_results['base_m_unit']
+
+ if job_summary['m_range']:
+ m_range = job_summary['m_range']
+ else:
+ m_range = range_results['m_range']
+
+ logger.log("\nJob Bandwidth Working Range:\n")
+ logger.log("Min M = {0:.6f} / {1:,.2f} [Mbps] \nMax M = {2:.6f} / {3:,.2f} [Mbps]".format(m_range[0], m_to_mbps(job_summary, m_range[0]), m_range[1], m_to_mbps(job_summary, m_range[1])))
+
+ # job time
+ findjob = FindJob(m_range[0], m_range[1], job_summary)
+ job_summary['find_results'] = findjob.find_max_m()
+
+ if job_summary['job_type'] == "all":
+ # plot points to graph
+ plotjob = PlotJob(findjob)
+ job_summary['plot_results'] = plotjob.plot()
+
+ after_run(job_summary)
+
+
+# populate the fields for run
+def populate_fields (job_summary, args):
+ job_summary['config_file'] = args.config_file
+ job_summary['job_type'] = args.job
+ job_summary['cond_type'] = args.cond_type
+ job_summary['yaml'] = args.yaml
+
+ if args.n:
+ job_summary['job_name'] = args.n
+ else:
+ job_summary['job_name'] = "Nameless"
+
+ # did the user provided an email
+ if args.e:
+ job_summary['email'] = args.e
+ else:
+ job_summary['email'] = None
+
+ # did the user provide a range ?
+ if args.m:
+ job_summary['m_range'] = args.m
+ else:
+ job_summary['m_range'] = None
+
+ # some pretty shows
+ job_summary['cond_name'] = 'Drop Pkt' if (args.cond_type == 'drop') else 'High Latency'
+
+ if args.job == "find":
+ job_summary['job_type_str'] = "Find Max BW"
+ elif args.job == "plot":
+ job_summary['job_type_str'] = "Plot Graph"
+ else:
+ job_summary['job_type_str'] = "Find Max BW & Plot Graph"
+
+ if args.job != "find":
+ verify_glibc_version()
+
+
+
+# verify file exists for argparse
+def is_valid_file (parser, err_msg, filename):
+ if not os.path.exists(filename):
+ parser.error("{0}: '{1}'".format(err_msg, filename))
+ else:
+ return (filename) # return an open file handle
+
+def entry ():
+
+ job_summary = {}
+
+ parser = argparse.ArgumentParser()
+
+ parser.add_argument("-n", help="Job Name",
+ type = str)
+
+ parser.add_argument("-m", help="M Range [default: auto calcuation]",
+ nargs = 2,
+ type = float)
+
+ parser.add_argument("-e", help="E-Mail for report [default: whoami@cisco.com]",
+ type = str)
+
+ parser.add_argument("-c", "--cfg", dest = "config_file", required = True,
+ help = "Configuration File For Trex/Router Pair",
+ type = lambda x: is_valid_file(parser, "config file does not exists",x))
+
+ parser.add_argument("job", help = "Job type",
+ type = str,
+ choices = ['find', 'plot', 'all'])
+
+ parser.add_argument("cond_type", help="type of failure condition",
+ type = str,
+ choices = ['latency','drop'])
+
+ parser.add_argument("-f", "--yaml", dest = "yaml", required = True,
+ help="YAML file to use", type = str)
+
+ args = parser.parse_args()
+
+ with TermMng():
+ try:
+ populate_fields(job_summary, args)
+ launch(job_summary)
+
+ except Exception as e:
+ ErrorHandler(e, traceback.format_exc())
+
+ logger.log("\nReport bugs to imarom@cisco.com\n")
+ g_stop = True
+
+def dummy_test ():
+ job_summary = {}
+ find_results = {}
+
+ job_summary['config_file'] = 'config/trex01-1g.cfg'
+ job_summary['yaml'] = 'dummy.yaml'
+ job_summary['email'] = 'imarom@cisco.com'
+ job_summary['job_name'] = 'test'
+ job_summary['job_type_str'] = 'test'
+
+ prepare_for_run(job_summary)
+
+ time.sleep(2)
+ job_summary['yaml'] = 'dummy.yaml'
+ job_summary['job_type'] = 'find'
+ job_summary['cond_name'] = 'Drop'
+ job_summary['cond_type'] = 'drop'
+ job_summary['job_id']= 94817231
+
+
+ find_results['tx'] = 210.23
+ find_results['m'] = 1.292812
+ find_results['total-pps'] = 1000
+ find_results['cpu_util'] = 74.0
+ find_results['maximum-latency'] = 4892
+ find_results['average-latency'] = 201
+ find_results['total-pkt-drop'] = 0
+
+
+ findjob = FindJob(1,1,job_summary)
+ plotjob = PlotJob(findjob)
+ job_summary['plot_results'] = plotjob.plot()
+
+ job_summary['find_results'] = find_results
+ job_summary['plot_results'] = [{'cpu_util': 2.0,'norm_cpu': 1.0, 'total-pps': 1000, 'expected-bps': 999980.0, 'average-latency': 85.0, 'tx': 0.00207*1000, 'total-pkt-drop': 0.0, 'maximum-latency': 221.0},
+ {'cpu_util': 8.0,'norm_cpu': 1.0, 'total-pps': 1000,'expected-bps': 48500000.0, 'average-latency': 87.0, 'tx': 0.05005*1000, 'total-pkt-drop': 0.0, 'maximum-latency': 279.0},
+ {'cpu_util': 14.0,'norm_cpu': 1.0, 'total-pps': 1000,'expected-bps': 95990000.0, 'average-latency': 92.0, 'tx': 0.09806*1000, 'total-pkt-drop': 0.0, 'maximum-latency': 273.0},
+ {'cpu_util': 20.0,'norm_cpu': 1.0, 'total-pps': 1000,'expected-bps': 143490000.0, 'average-latency': 95.0, 'tx': 0.14613*1000, 'total-pkt-drop': 0.0, 'maximum-latency': 271.0},
+ {'cpu_util': 25.0,'norm_cpu': 1.0, 'total-pps': 1000,'expected-bps': 190980000.0, 'average-latency': 97.0, 'tx': 0.1933*1000, 'total-pkt-drop': 0.0, 'maximum-latency': 302.0},
+ {'cpu_util': 31.0,'norm_cpu': 1.0, 'total-pps': 1000,'expected-bps': 238480000.0, 'average-latency': 98.0, 'tx': 0.24213*1000, 'total-pkt-drop': 1.0, 'maximum-latency': 292.0},
+ {'cpu_util': 37.0,'norm_cpu': 1.0, 'total-pps': 1000, 'expected-bps': 285970000.0, 'average-latency': 99.0, 'tx': 0.29011*1000, 'total-pkt-drop': 0.0, 'maximum-latency': 344.0},
+ {'cpu_util': 43.0,'norm_cpu': 1.0, 'total-pps': 1000, 'expected-bps': 333470000.0, 'average-latency': 100.0, 'tx': 0.3382*1000, 'total-pkt-drop': 0.0, 'maximum-latency': 351.0},
+ {'cpu_util': 48.0,'norm_cpu': 1.0, 'total-pps': 1000, 'expected-bps': 380970000.0, 'average-latency': 100.0, 'tx': 0.38595*1000, 'total-pkt-drop': 0.0, 'maximum-latency': 342.0},
+ {'cpu_util': 54.0,'norm_cpu': 1.0, 'total-pps': 1000, 'expected-bps': 428460000.0, 'average-latency': 19852.0, 'tx': 0.43438*1000, 'total-pkt-drop': 1826229.0, 'maximum-latency': 25344.0}]
+
+
+
+ after_run(job_summary)
+
+if __name__ == "__main__":
+ entry ()
+
diff --git a/scripts/automation/wkhtmltopdf-amd64 b/scripts/automation/wkhtmltopdf-amd64
new file mode 100755
index 00000000..a173d2cf
--- /dev/null
+++ b/scripts/automation/wkhtmltopdf-amd64
Binary files differ