summaryrefslogtreecommitdiffstats
path: root/scripts
diff options
context:
space:
mode:
Diffstat (limited to 'scripts')
-rwxr-xr-xscripts/automation/regression/functional_tests/trex_cfg_creator_test.py89
-rw-r--r--scripts/automation/regression/setups/kiwi02/benchmark.yaml50
-rw-r--r--scripts/automation/regression/setups/trex07/backup/benchmark.yaml244
-rw-r--r--scripts/automation/regression/setups/trex07/backup/config.yaml66
-rw-r--r--scripts/automation/regression/setups/trex07/benchmark.yaml173
-rw-r--r--scripts/automation/regression/setups/trex07/config.yaml29
-rw-r--r--scripts/automation/regression/setups/trex08/benchmark.yaml50
-rw-r--r--scripts/automation/regression/setups/trex09/benchmark.yaml2
-rwxr-xr-xscripts/automation/regression/stateful_tests/trex_general_test.py7
-rw-r--r--scripts/automation/regression/stateless_tests/stl_client_test.py8
-rw-r--r--scripts/automation/regression/stateless_tests/stl_performance_test.py6
-rw-r--r--scripts/automation/regression/stateless_tests/stl_rx_test.py16
-rw-r--r--scripts/automation/trex_control_plane/stl/console/trex_tui.py4
-rw-r--r--scripts/automation/trex_control_plane/stl/services/scapy_server/protocols.json194
-rwxr-xr-xscripts/automation/trex_control_plane/stl/services/scapy_server/scapy_service.py144
-rw-r--r--scripts/automation/trex_control_plane/stl/services/scapy_server/unit_tests/basetest.py3
-rw-r--r--scripts/automation/trex_control_plane/stl/services/scapy_server/unit_tests/test_scapy_service.py96
-rw-r--r--scripts/automation/trex_control_plane/stl/services/scapy_server/unit_tests/test_utils.py69
-rw-r--r--scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_async_client.py61
-rwxr-xr-xscripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_client.py46
-rwxr-xr-xscripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_streams.py18
-rwxr-xr-xscripts/automation/trex_control_plane/stl/trex_stl_lib/utils/parsing_opts.py12
-rwxr-xr-xscripts/dpdk_nic_bind.py32
-rwxr-xr-xscripts/dpdk_setup_ports.py373
-rw-r--r--scripts/dumy_libs/libibverbs.so.1bin0 -> 494224 bytes
-rw-r--r--scripts/dumy_libs/libnl-3.so.200bin0 -> 110096 bytes
-rw-r--r--scripts/dumy_libs/libnl-route-3.so.200bin0 -> 293640 bytes
-rwxr-xr-xscripts/exp/dns_ipv6_rxcheck-ex.erfbin304 -> 304 bytes
-rw-r--r--scripts/exp/dns_ipv6_rxcheck.erfbin304 -> 304 bytes
-rwxr-xr-xscripts/exp/dns_rxcheck-ex.erfbin256 -> 256 bytes
-rw-r--r--scripts/exp/dns_rxcheck.erfbin256 -> 256 bytes
-rwxr-xr-xscripts/exp/ipv6-0-ex.erfbin11200 -> 11200 bytes
-rw-r--r--scripts/exp/ipv6-0.erfbin11200 -> 11200 bytes
-rwxr-xr-xscripts/exp/ipv6_vlan-0-ex.erfbin11200 -> 11200 bytes
-rw-r--r--scripts/exp/ipv6_vlan-0.erfbin11200 -> 11200 bytes
-rwxr-xr-xscripts/exp/rtsp_short1_ipv6_rxcheck-ex.erfbin21560 -> 21560 bytes
-rw-r--r--scripts/exp/rtsp_short1_ipv6_rxcheck.erfbin21560 -> 21560 bytes
-rwxr-xr-xscripts/exp/rtsp_short1_rxcheck-ex.erfbin20912 -> 20912 bytes
-rw-r--r--scripts/exp/rtsp_short1_rxcheck.erfbin20912 -> 20912 bytes
-rwxr-xr-xscripts/find_python.sh51
-rwxr-xr-xscripts/t-rex-646
-rwxr-xr-xscripts/t-rex-64-debug-gdb7
42 files changed, 1479 insertions, 377 deletions
diff --git a/scripts/automation/regression/functional_tests/trex_cfg_creator_test.py b/scripts/automation/regression/functional_tests/trex_cfg_creator_test.py
index ab6ab6f6..66cb666c 100755
--- a/scripts/automation/regression/functional_tests/trex_cfg_creator_test.py
+++ b/scripts/automation/regression/functional_tests/trex_cfg_creator_test.py
@@ -25,7 +25,7 @@ def compare_lines(golden, output):
raise CompareLinesNumDiff('Number of lines on golden is: %s, in output: %s\nGolden:\n%s\nGenerated:\n%s\n' % (len(golden_lines), len(output_lines), golden, output))
for line_num, (golden_line, output_line) in enumerate(zip(golden_lines, output_lines)):
if golden_line != output_line:
- raise CompareLinesDiff('Produced YAML differs from golden at line %s.Golden: %s <-> Output: %s' % (line_num + 1, golden_line, output_line))
+ raise CompareLinesDiff('Produced YAML differs from golden at line %s.\nGolden: %s <-> Output: %s' % (line_num + 1, golden_line, output_line))
def create_config(cpu_topology, interfaces, *args, **kwargs):
config = ConfigCreator(cpu_topology, interfaces, *args, **kwargs)
@@ -102,17 +102,17 @@ class TRexCfgCreator_Test:
version: 2
interfaces: ['0b:00.0', '03:00.0']
port_info:
- - dest_mac: [0x00, 0x0c, 0x29, 0x92, 0xf1, 0xca] # MAC OF LOOPBACK TO IT'S DUAL INTERFACE
- src_mac: [0x00, 0x0c, 0x29, 0x92, 0xf1, 0xd4]
- - dest_mac: [0x00, 0x0c, 0x29, 0x92, 0xf1, 0xd4]
- src_mac: [0x00, 0x0c, 0x29, 0x92, 0xf1, 0xca]
+ - dest_mac: 00:0c:29:92:f1:ca # MAC OF LOOPBACK TO IT'S DUAL INTERFACE
+ src_mac: 00:0c:29:92:f1:d4
+ - dest_mac: 00:0c:29:92:f1:d4
+ src_mac: 00:0c:29:92:f1:ca
platform:
master_thread_id: 0
latency_thread_id: 1
dual_if:
- socket: 0
- threads: [2]
+ threads: [2,3,4]
'''
output = create_config(cpu_topology, interfaces)
verify_master_core0(output)
@@ -291,33 +291,33 @@ class TRexCfgCreator_Test:
interfaces: ['02:00.0', '02:00.1', '84:00.0', '84:00.1', '05:00.0', '05:00.1']
port_bandwidth_gb: 40
port_info:
- - dest_mac: [0x02, 0x00, 0x02, 0x00, 0x00, 0x00]
- src_mac: [0x01, 0x00, 0x01, 0x00, 0x00, 0x00]
- - dest_mac: [0x01, 0x00, 0x01, 0x00, 0x00, 0x00]
- src_mac: [0x02, 0x00, 0x02, 0x00, 0x00, 0x00]
+ - dest_mac: 02:00:02:00:00:00
+ src_mac: 01:00:01:00:00:00
+ - dest_mac: 01:00:01:00:00:00
+ src_mac: 02:00:02:00:00:00
- - dest_mac: [0x04, 0x00, 0x04, 0x00, 0x00, 0x00]
- src_mac: [0x03, 0x00, 0x03, 0x00, 0x00, 0x00]
- - dest_mac: [0x03, 0x00, 0x03, 0x00, 0x00, 0x00]
- src_mac: [0x04, 0x00, 0x04, 0x00, 0x00, 0x00]
+ - dest_mac: 04:00:04:00:00:00
+ src_mac: 03:00:03:00:00:00
+ - dest_mac: 03:00:03:00:00:00
+ src_mac: 04:00:04:00:00:00
- - dest_mac: [0x06, 0x00, 0x06, 0x00, 0x00, 0x00]
- src_mac: [0x05, 0x00, 0x05, 0x00, 0x00, 0x00]
- - dest_mac: [0x05, 0x00, 0x05, 0x00, 0x00, 0x00]
- src_mac: [0x06, 0x00, 0x06, 0x00, 0x00, 0x00]
+ - dest_mac: 06:00:06:00:00:00
+ src_mac: 05:00:05:00:00:00
+ - dest_mac: 05:00:05:00:00:00
+ src_mac: 06:00:06:00:00:00
platform:
master_thread_id: 0
- latency_thread_id: 16
+ latency_thread_id: 12
dual_if:
- socket: 0
- threads: [1,17,2,18,3,19,4]
+ threads: [1,2,3,16,17,18,19]
- socket: 1
- threads: [8,24,9,25,10,26,11]
+ threads: [8,9,10,11,24,25,26]
- socket: 0
- threads: [20,5,21,6,22,7,23]
+ threads: [4,5,6,7,20,21,22]
'''
output = create_config(cpu_topology, interfaces)
verify_master_core0(output)
@@ -431,25 +431,25 @@ class TRexCfgCreator_Test:
interfaces: ['02:00.0', '02:00.1', '84:00.0', '84:00.1']
port_bandwidth_gb: 40
port_info:
- - dest_mac: [0x02, 0x00, 0x02, 0x00, 0x00, 0x00]
- src_mac: [0x01, 0x00, 0x01, 0x00, 0x00, 0x00]
- - dest_mac: [0x01, 0x00, 0x01, 0x00, 0x00, 0x00]
- src_mac: [0x02, 0x00, 0x02, 0x00, 0x00, 0x00]
+ - dest_mac: 02:00:02:00:00:00
+ src_mac: 01:00:01:00:00:00
+ - dest_mac: 01:00:01:00:00:00
+ src_mac: 02:00:02:00:00:00
- - dest_mac: [0x04, 0x00, 0x04, 0x00, 0x00, 0x00]
- src_mac: [0x03, 0x00, 0x03, 0x00, 0x00, 0x00]
- - dest_mac: [0x03, 0x00, 0x03, 0x00, 0x00, 0x00]
- src_mac: [0x04, 0x00, 0x04, 0x00, 0x00, 0x00]
+ - dest_mac: 04:00:04:00:00:00
+ src_mac: 03:00:03:00:00:00
+ - dest_mac: 03:00:03:00:00:00
+ src_mac: 04:00:04:00:00:00
platform:
master_thread_id: 0
latency_thread_id: 31
dual_if:
- socket: 0
- threads: [1,17,2,18,3,19,4,20,5,21,6,22,7,23,16]
+ threads: [1,2,3,4,5,6,7,16,17,18,19,20,21,22,23]
- socket: 1
- threads: [8,24,9,25,10,26,11,27,12,28,13,29,14,30,15]
+ threads: [8,9,10,11,12,13,14,15,24,25,26,27,28,29,30]
'''
output = create_config(cpu_topology, interfaces)
verify_master_core0(output)
@@ -563,25 +563,25 @@ class TRexCfgCreator_Test:
interfaces: ['02:00.0', '02:00.1', '05:00.0', '05:00.1']
port_bandwidth_gb: 40
port_info:
- - dest_mac: [0x02, 0x00, 0x02, 0x00, 0x00, 0x00]
- src_mac: [0x01, 0x00, 0x01, 0x00, 0x00, 0x00]
- - dest_mac: [0x01, 0x00, 0x01, 0x00, 0x00, 0x00]
- src_mac: [0x02, 0x00, 0x02, 0x00, 0x00, 0x00]
+ - dest_mac: 02:00:02:00:00:00
+ src_mac: 01:00:01:00:00:00
+ - dest_mac: 01:00:01:00:00:00
+ src_mac: 02:00:02:00:00:00
- - dest_mac: [0x04, 0x00, 0x04, 0x00, 0x00, 0x00]
- src_mac: [0x03, 0x00, 0x03, 0x00, 0x00, 0x00]
- - dest_mac: [0x03, 0x00, 0x03, 0x00, 0x00, 0x00]
- src_mac: [0x04, 0x00, 0x04, 0x00, 0x00, 0x00]
+ - dest_mac: 04:00:04:00:00:00
+ src_mac: 03:00:03:00:00:00
+ - dest_mac: 03:00:03:00:00:00
+ src_mac: 04:00:04:00:00:00
platform:
master_thread_id: 0
- latency_thread_id: 16
+ latency_thread_id: 8
dual_if:
- socket: 0
- threads: [1,17,2,18,3,19,4]
+ threads: [1,2,3,16,17,18,19]
- socket: 0
- threads: [20,5,21,6,22,7,23]
+ threads: [4,5,6,7,20,21,22]
'''
output = create_config(cpu_topology, interfaces)
verify_master_core0(output)
@@ -694,5 +694,6 @@ class TRexCfgCreator_Test:
@classmethod
def tearDownClass(cls):
- sys.path.remove(CTRexScenario.scripts_path)
+ if CTRexScenario.scripts_path in sys.path:
+ sys.path.remove(CTRexScenario.scripts_path)
del sys.modules['dpdk_setup_ports']
diff --git a/scripts/automation/regression/setups/kiwi02/benchmark.yaml b/scripts/automation/regression/setups/kiwi02/benchmark.yaml
index e6621085..41688906 100644
--- a/scripts/automation/regression/setups/kiwi02/benchmark.yaml
+++ b/scripts/automation/regression/setups/kiwi02/benchmark.yaml
@@ -246,3 +246,53 @@ test_CPU_benchmark:
bw_per_core : 1
+
+test_performance_vm_single_cpu:
+ cfg:
+ mult : "90%"
+ mpps_per_core_golden :
+ min: 11.5
+ max: 13.1
+
+
+test_performance_vm_single_cpu_cached:
+ cfg:
+ mult : "90%"
+ mpps_per_core_golden :
+ min: 22.0
+ max: 25.0
+
+
+
+test_performance_syn_attack_single_cpu:
+ cfg:
+ mult : "90%"
+ mpps_per_core_golden :
+ min: 9.5
+ max: 11.5
+
+test_performance_vm_multi_cpus:
+ cfg:
+ core_count : 4
+ mult : "90%"
+ mpps_per_core_golden :
+ min: 9.7
+ max: 12.5
+
+
+test_performance_vm_multi_cpus_cached:
+ cfg:
+ core_count : 4
+ mult : "90%"
+ mpps_per_core_golden :
+ min: 19.0
+ max: 22.0
+
+test_performance_syn_attack_multi_cpus:
+ cfg:
+ core_count : 4
+ mult : "90%"
+ mpps_per_core_golden :
+ min: 8.5
+ max: 10.5
+
diff --git a/scripts/automation/regression/setups/trex07/backup/benchmark.yaml b/scripts/automation/regression/setups/trex07/backup/benchmark.yaml
new file mode 100644
index 00000000..0dc340b0
--- /dev/null
+++ b/scripts/automation/regression/setups/trex07/backup/benchmark.yaml
@@ -0,0 +1,244 @@
+###############################################################
+#### TRex benchmark configuration file ####
+###############################################################
+
+#### common templates ###
+
+stat_route_dict: &stat_route_dict
+ clients_start : 16.0.0.1
+ servers_start : 48.0.0.1
+ dual_port_mask : 1.0.0.0
+ client_destination_mask : 255.0.0.0
+ server_destination_mask : 255.0.0.0
+
+nat_dict: &nat_dict
+ clients_net_start : 16.0.0.0
+ client_acl_wildcard_mask : 0.0.0.255
+ dual_port_mask : 1.0.0.0
+ pool_start : 200.0.0.0
+ pool_netmask : 255.255.255.0
+
+
+### stateful ###
+
+test_jumbo:
+ multiplier : 17
+ cores : 1
+ bw_per_core : 543.232
+
+
+test_routing_imix:
+ multiplier : 10
+ cores : 1
+ bw_per_core : 34.128
+
+
+test_routing_imix_64:
+ multiplier : 430
+ cores : 1
+ bw_per_core : 5.893
+
+
+test_static_routing_imix: &test_static_routing_imix
+ stat_route_dict : *stat_route_dict
+ multiplier : 8
+ cores : 1
+ bw_per_core : 34.339
+
+test_static_routing_imix_asymmetric: *test_static_routing_imix
+
+
+test_ipv6_simple:
+ multiplier : 9
+ cores : 2
+ bw_per_core : 19.064
+
+
+test_nat_simple_mode1: &test_nat_simple
+ stat_route_dict : *stat_route_dict
+ nat_dict : *nat_dict
+ multiplier : 6000
+ cores : 1
+ nat_opened : 500000
+ allow_timeout_dev : True
+ bw_per_core : 44.445
+
+test_nat_simple_mode2: *test_nat_simple
+
+test_nat_simple_mode3: *test_nat_simple
+
+test_nat_learning: *test_nat_simple
+
+
+test_nbar_simple:
+ multiplier : 7.5
+ cores : 2
+ bw_per_core : 17.174
+ nbar_classification:
+ rtp : 32.57
+ http : 30.25
+ oracle_sqlnet : 11.23
+ exchange : 10.80
+ citrix : 5.62
+ rtsp : 2.84
+ dns : 1.95
+ smtp : 0.57
+ pop3 : 0.36
+ ssl : 0.17
+ sctp : 0.13
+ sip : 0.09
+ unknown : 3.41
+
+
+test_rx_check_http: &rx_http
+ multiplier : 15000
+ cores : 1
+ rx_sample_rate : 16
+ bw_per_core : 39.560
+
+test_rx_check_http_ipv6:
+ << : *rx_http
+ bw_per_core : 49.237
+
+test_rx_check_http_negative_disabled:
+ << : *rx_http
+ stat_route_dict : *stat_route_dict
+ nat_dict : *nat_dict
+
+
+test_rx_check_sfr: &rx_sfr
+ multiplier : 10
+ cores : 3
+ rx_sample_rate : 16
+ bw_per_core : 16.082
+
+test_rx_check_sfr_ipv6:
+ << : *rx_sfr
+ bw_per_core : 19.198
+
+
+
+### stateless ###
+
+test_CPU_benchmark:
+ profiles:
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 64}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 64, stream_count: 10}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 64, stream_count: 100}
+ cpu_util : 1
+ bw_per_core : 1
+
+# causes queue full
+# - name : stl/udp_for_benchmarks.py
+# kwargs : {packet_len: 64, stream_count: 1000}
+# cpu_util : 1
+# bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 128}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 256}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 512}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 1500}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 4000}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 9000}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 9000, stream_count: 10}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 9000, stream_count: 100}
+ cpu_util : 1
+ bw_per_core : 1
+
+# not enough memory + queue full if memory increase
+# - name : stl/udp_for_benchmarks.py
+# kwargs : {packet_len: 9000, stream_count: 1000}
+# cpu_util : 1
+# bw_per_core : 1
+
+ - name : stl/imix.py
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_1pkt_tuple_gen.py
+ kwargs : {packet_len: 64}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_1pkt_tuple_gen.py
+ kwargs : {packet_len: 128}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_1pkt_tuple_gen.py
+ kwargs : {packet_len: 256}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_1pkt_tuple_gen.py
+ kwargs : {packet_len: 512}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_1pkt_tuple_gen.py
+ kwargs : {packet_len: 1500}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_1pkt_tuple_gen.py
+ kwargs : {packet_len: 4000}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_1pkt_tuple_gen.py
+ kwargs : {packet_len: 9000}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/pcap.py
+ kwargs : {ipg_usec: 2, loop_count: 0}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_rand_len_9k.py
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/hlt/hlt_udp_rand_len_9k.py
+ cpu_util : 1
+ bw_per_core : 1
+
+
diff --git a/scripts/automation/regression/setups/trex07/backup/config.yaml b/scripts/automation/regression/setups/trex07/backup/config.yaml
new file mode 100644
index 00000000..db6e9bf8
--- /dev/null
+++ b/scripts/automation/regression/setups/trex07/backup/config.yaml
@@ -0,0 +1,66 @@
+################################################################
+#### TRex nightly test configuration file ####
+################################################################
+
+
+### TRex configuration:
+# hostname - can be DNS name or IP for the TRex machine for ssh to the box
+# password - root password for TRex machine
+# is_dual - should the TRex inject with -p ?
+# version_path - path to the TRex version and executable
+# cores - how many cores should be used
+# latency - rate of latency packets injected by the TRex
+# modes - list of modes (tagging) of this setup (loopback etc.)
+# * loopback - Trex works via loopback. Router and TFTP configurations may be skipped.
+# * VM - Virtual OS (accept low CPU utilization in tests, latency can get spikes)
+# * virt_nics - NICs are virtual (VMXNET3 etc.)
+
+### Router configuration:
+# hostname - the router hostname as apears in ______# cli prefix
+# ip_address - the router's ip that can be used to communicate with
+# image - the desired imaged wished to be loaded as the router's running config
+# line_password - router password when access via Telent
+# en_password - router password when changing to "enable" mode
+# interfaces - an array of client-server pairs, representing the interfaces configurations of the router
+# configurations - an array of configurations that could possibly loaded into the router during the test.
+# The "clean" configuration is a mandatory configuration the router will load with to run the basic test bench
+
+### TFTP configuration:
+# hostname - the tftp hostname
+# ip_address - the tftp's ip address
+# images_path - the tftp's relative path in which the router's images are located
+
+### Test_misc configuration:
+# expected_bw - the "golden" bandwidth (in Gbps) results planned on receiving from the test
+
+trex:
+ hostname : csi-trex-07
+ cores : 4
+
+router:
+ model : ASR1001x
+ hostname : csi-asr-01
+ ip_address : 10.56.216.120
+ image : asr1001x-universalk9.03.13.02.S.154-3.S2-ext.SPA.bin
+ line_password : cisco
+ en_password : cisco
+ mgmt_interface : GigabitEthernet0
+ clean_config : clean_config.cfg
+ intf_masking : 255.255.255.0
+ ipv6_mask : 64
+ interfaces :
+ - client :
+ name : Te0/0/0
+ src_mac_addr : 0000.0001.0002
+ dest_mac_addr : 0000.0001.0001
+ server :
+ name : Te0/0/1
+ src_mac_addr : 0000.0002.0002
+ dest_mac_addr : 0000.0002.0001
+ vrf_name : null
+
+tftp:
+ hostname : ats-asr-srv-1
+ ip_address : 10.56.217.7
+ root_dir : /scratch/tftp/
+ images_path : /asr1001x/
diff --git a/scripts/automation/regression/setups/trex07/benchmark.yaml b/scripts/automation/regression/setups/trex07/benchmark.yaml
index 0dc340b0..6e861836 100644
--- a/scripts/automation/regression/setups/trex07/benchmark.yaml
+++ b/scripts/automation/regression/setups/trex07/benchmark.yaml
@@ -4,120 +4,57 @@
#### common templates ###
-stat_route_dict: &stat_route_dict
- clients_start : 16.0.0.1
- servers_start : 48.0.0.1
- dual_port_mask : 1.0.0.0
- client_destination_mask : 255.0.0.0
- server_destination_mask : 255.0.0.0
-
-nat_dict: &nat_dict
- clients_net_start : 16.0.0.0
- client_acl_wildcard_mask : 0.0.0.255
- dual_port_mask : 1.0.0.0
- pool_start : 200.0.0.0
- pool_netmask : 255.255.255.0
-
-
-### stateful ###
-
test_jumbo:
- multiplier : 17
- cores : 1
- bw_per_core : 543.232
+ multiplier : 120
+ cores : 2
+ bw_per_core : 962.464
test_routing_imix:
- multiplier : 10
- cores : 1
- bw_per_core : 34.128
+ multiplier : 60
+ cores : 4
+ bw_per_core : 48.130
test_routing_imix_64:
- multiplier : 430
- cores : 1
- bw_per_core : 5.893
-
+ multiplier : 4000
+ cores : 7
+ bw_per_core : 12.699
-test_static_routing_imix: &test_static_routing_imix
- stat_route_dict : *stat_route_dict
- multiplier : 8
- cores : 1
- bw_per_core : 34.339
-test_static_routing_imix_asymmetric: *test_static_routing_imix
+test_static_routing_imix_asymmetric:
+ multiplier : 50
+ cores : 3
+ bw_per_core : 50.561
test_ipv6_simple:
- multiplier : 9
- cores : 2
- bw_per_core : 19.064
-
-
-test_nat_simple_mode1: &test_nat_simple
- stat_route_dict : *stat_route_dict
- nat_dict : *nat_dict
- multiplier : 6000
- cores : 1
- nat_opened : 500000
- allow_timeout_dev : True
- bw_per_core : 44.445
-
-test_nat_simple_mode2: *test_nat_simple
-
-test_nat_simple_mode3: *test_nat_simple
-
-test_nat_learning: *test_nat_simple
-
-
-test_nbar_simple:
- multiplier : 7.5
- cores : 2
- bw_per_core : 17.174
- nbar_classification:
- rtp : 32.57
- http : 30.25
- oracle_sqlnet : 11.23
- exchange : 10.80
- citrix : 5.62
- rtsp : 2.84
- dns : 1.95
- smtp : 0.57
- pop3 : 0.36
- ssl : 0.17
- sctp : 0.13
- sip : 0.09
- unknown : 3.41
+ multiplier : 50
+ cores : 7
+ bw_per_core : 19.5
test_rx_check_http: &rx_http
- multiplier : 15000
- cores : 1
- rx_sample_rate : 16
- bw_per_core : 39.560
+ multiplier : 99000
+ cores : 7
+ rx_sample_rate : 128
+ bw_per_core : 49.464
test_rx_check_http_ipv6:
<< : *rx_http
bw_per_core : 49.237
-test_rx_check_http_negative_disabled:
- << : *rx_http
- stat_route_dict : *stat_route_dict
- nat_dict : *nat_dict
-
-
test_rx_check_sfr: &rx_sfr
- multiplier : 10
- cores : 3
- rx_sample_rate : 16
- bw_per_core : 16.082
+ multiplier : 35
+ cores : 7
+ rx_sample_rate : 128
+ bw_per_core : 20.871
test_rx_check_sfr_ipv6:
<< : *rx_sfr
bw_per_core : 19.198
-
### stateless ###
test_CPU_benchmark:
@@ -178,10 +115,10 @@ test_CPU_benchmark:
cpu_util : 1
bw_per_core : 1
- - name : stl/udp_for_benchmarks.py
- kwargs : {packet_len: 9000, stream_count: 100}
- cpu_util : 1
- bw_per_core : 1
+ #- name : stl/udp_for_benchmarks.py
+ # kwargs : {packet_len: 9000, stream_count: 100}
+ # cpu_util : 1
+ # bw_per_core : 1
# not enough memory + queue full if memory increase
# - name : stl/udp_for_benchmarks.py
@@ -241,4 +178,56 @@ test_CPU_benchmark:
cpu_util : 1
bw_per_core : 1
-
+test_performance_vm_single_cpu:
+ cfg:
+ mult : "90%"
+ mpps_per_core_golden :
+ min: 9.6
+ max: 13.3
+
+
+test_performance_vm_single_cpu_cached:
+ cfg:
+ mult : "10%"
+ mpps_per_core_golden :
+ min: 16.0
+ max: 25.0
+
+
+
+test_performance_syn_attack_single_cpu:
+ cfg:
+ mult : "90%"
+ mpps_per_core_golden :
+ min: 9.0
+ max: 14.0
+
+test_performance_vm_multi_cpus:
+ cfg:
+ core_count : 7
+ mult : "90%"
+ mpps_per_core_golden :
+ min: 8.5
+ max: 12.0
+
+
+test_performance_vm_multi_cpus_cached:
+ cfg:
+ core_count : 7
+ mult : "35%"
+ mpps_per_core_golden :
+ min: 9.0
+ max: 15.0
+
+test_performance_syn_attack_multi_cpus:
+ cfg:
+ core_count : 7
+ mult : "90%"
+ mpps_per_core_golden :
+ min: 8.0
+ max: 16.0
+
+
+test_all_profiles :
+ mult : "5%"
+
diff --git a/scripts/automation/regression/setups/trex07/config.yaml b/scripts/automation/regression/setups/trex07/config.yaml
index db6e9bf8..10472c4f 100644
--- a/scripts/automation/regression/setups/trex07/config.yaml
+++ b/scripts/automation/regression/setups/trex07/config.yaml
@@ -35,32 +35,7 @@
trex:
hostname : csi-trex-07
- cores : 4
+ cores : 8
+ modes : ['loopback']
-router:
- model : ASR1001x
- hostname : csi-asr-01
- ip_address : 10.56.216.120
- image : asr1001x-universalk9.03.13.02.S.154-3.S2-ext.SPA.bin
- line_password : cisco
- en_password : cisco
- mgmt_interface : GigabitEthernet0
- clean_config : clean_config.cfg
- intf_masking : 255.255.255.0
- ipv6_mask : 64
- interfaces :
- - client :
- name : Te0/0/0
- src_mac_addr : 0000.0001.0002
- dest_mac_addr : 0000.0001.0001
- server :
- name : Te0/0/1
- src_mac_addr : 0000.0002.0002
- dest_mac_addr : 0000.0002.0001
- vrf_name : null
-tftp:
- hostname : ats-asr-srv-1
- ip_address : 10.56.217.7
- root_dir : /scratch/tftp/
- images_path : /asr1001x/
diff --git a/scripts/automation/regression/setups/trex08/benchmark.yaml b/scripts/automation/regression/setups/trex08/benchmark.yaml
index 8f83e8f9..935b3e55 100644
--- a/scripts/automation/regression/setups/trex08/benchmark.yaml
+++ b/scripts/automation/regression/setups/trex08/benchmark.yaml
@@ -179,3 +179,53 @@ test_CPU_benchmark:
bw_per_core : 1
+test_performance_vm_single_cpu:
+ cfg:
+ mult : "90%"
+ mpps_per_core_golden :
+ min: 15.1
+ max: 20.3
+
+
+test_performance_vm_single_cpu_cached:
+ cfg:
+ mult : "10%"
+ mpps_per_core_golden :
+ min: 29.1
+ max: 32.0
+
+
+
+test_performance_syn_attack_single_cpu:
+ cfg:
+ mult : "90%"
+ mpps_per_core_golden :
+ min: 13.2
+ max: 15.0
+
+test_performance_vm_multi_cpus:
+ cfg:
+ core_count : 7
+ mult : "40%"
+ mpps_per_core_golden :
+ min: 15.0
+ max: 20.0
+
+
+test_performance_vm_multi_cpus_cached:
+ cfg:
+ core_count : 7
+ mult : "40%"
+ mpps_per_core_golden :
+ min: 29.0
+ max: 34.0
+
+test_performance_syn_attack_multi_cpus:
+ cfg:
+ core_count : 7
+ mult : "40%"
+ mpps_per_core_golden :
+ min: 13.0
+ max: 17.0
+
+
diff --git a/scripts/automation/regression/setups/trex09/benchmark.yaml b/scripts/automation/regression/setups/trex09/benchmark.yaml
index 86f169ed..d1f5f56c 100644
--- a/scripts/automation/regression/setups/trex09/benchmark.yaml
+++ b/scripts/automation/regression/setups/trex09/benchmark.yaml
@@ -204,7 +204,7 @@ test_performance_syn_attack_single_cpu:
cfg:
mult : "90%"
mpps_per_core_golden :
- min: 13.8
+ min: 12.9
max: 14.5
test_performance_vm_multi_cpus:
diff --git a/scripts/automation/regression/stateful_tests/trex_general_test.py b/scripts/automation/regression/stateful_tests/trex_general_test.py
index e968d380..1843af00 100755
--- a/scripts/automation/regression/stateful_tests/trex_general_test.py
+++ b/scripts/automation/regression/stateful_tests/trex_general_test.py
@@ -198,11 +198,14 @@ class CTRexGeneral_Test(unittest.TestCase):
def check_for_trex_crash(self):
pass
- def get_benchmark_param (self, param, sub_param = None, test_name = None):
+ def get_benchmark_param (self, param, sub_param = None, test_name = None,default=None):
if not test_name:
test_name = self.get_name()
if test_name not in self.benchmark:
- self.skip('No data in benchmark.yaml for test: %s, param: %s. Skipping.' % (test_name, param))
+ if default ==None:
+ self.skip('No data in benchmark.yaml for test: %s, param: %s. Skipping.' % (test_name, param))
+ else:
+ return default
if sub_param:
return self.benchmark[test_name][param].get(sub_param)
else:
diff --git a/scripts/automation/regression/stateless_tests/stl_client_test.py b/scripts/automation/regression/stateless_tests/stl_client_test.py
index 36ac0ee1..acf5dc61 100644
--- a/scripts/automation/regression/stateless_tests/stl_client_test.py
+++ b/scripts/automation/regression/stateless_tests/stl_client_test.py
@@ -240,8 +240,12 @@ class STLClient_Test(CStlGeneral_Test):
self.skip('skipping profile tests for virtual / non loopback')
return
+ default_mult = self.get_benchmark_param('mult',default="30%")
+
try:
-
+ print("\n");
+
+
for profile in self.profiles:
print("now testing profile {0}...\n".format(profile))
@@ -269,7 +273,7 @@ class STLClient_Test(CStlGeneral_Test):
self.c.clear_stats()
- self.c.start(ports = [self.tx_port, self.rx_port], mult = "30%")
+ self.c.start(ports = [self.tx_port, self.rx_port], mult = default_mult)
time.sleep(100 / 1000.0)
if p1.is_pauseable() and p2.is_pauseable():
diff --git a/scripts/automation/regression/stateless_tests/stl_performance_test.py b/scripts/automation/regression/stateless_tests/stl_performance_test.py
index e5cecc03..641f0a33 100644
--- a/scripts/automation/regression/stateless_tests/stl_performance_test.py
+++ b/scripts/automation/regression/stateless_tests/stl_performance_test.py
@@ -61,7 +61,7 @@ class PerformanceReport(object):
SetupName = self.machine_name,
TestType = 'performance',
Mppspc = self.avg_mpps_per_core,
- ActionNumber = '<fill_me>',
+ ActionNumber = os.getenv("BUILD_ID","n/a"),
GoldenMin = golden_mpps['min'],
GoldenMax = golden_mpps['max'])
@@ -296,6 +296,10 @@ class STLPerformance_Test(CStlGeneral_Test):
# sample bps/pps
for _ in range(0, 20):
stats = self.c.get_stats(ports = 0)
+ if stats['global'][ 'queue_full']>10000:
+ assert 0, "Queue is full need to tune the multiplier"
+
+ # CPU results are not valid cannot use them
samples['bps'].append(stats[0]['tx_bps'])
samples['pps'].append(stats[0]['tx_pps'])
time.sleep(1)
diff --git a/scripts/automation/regression/stateless_tests/stl_rx_test.py b/scripts/automation/regression/stateless_tests/stl_rx_test.py
index 524ad4bf..d28fca54 100644
--- a/scripts/automation/regression/stateless_tests/stl_rx_test.py
+++ b/scripts/automation/regression/stateless_tests/stl_rx_test.py
@@ -51,6 +51,17 @@ class STLRX_Test(CStlGeneral_Test):
'latency_9k_enable': False,
'allow_packets_drop_num': 1, # allow 1 pkt drop
},
+
+ 'librte_pmd_mlx5': {
+ 'rate_percent': 80,
+ 'total_pkts': 1000,
+ 'rate_latency': 1,
+ 'latency_9k_enable': True,
+ 'latency_9k_max_average': 100,
+ 'latency_9k_max_latency': 250,
+ },
+
+
}
CStlGeneral_Test.setUp(self)
@@ -63,7 +74,6 @@ class STLRX_Test(CStlGeneral_Test):
port_info = self.c.get_port_info(ports = self.rx_port)[0]
self.speed = port_info['speed']
-
cap = port_info['rx']['caps']
if "flow_stats" not in cap or "latency" not in cap:
self.skip('port {0} does not support RX'.format(self.rx_port))
@@ -400,12 +410,14 @@ class STLRX_Test(CStlGeneral_Test):
s_port=random.sample(all_ports, random.randint(1, len(all_ports)) )
s_port=sorted(s_port)
- if self.speed == 40 :
+
+ if ((self.speed == 40) or (self.speed == 100)):
# the NIC does not support all full rate in case both port works let's filter odd ports
s_port=list(filter(lambda x: x % 2==0, s_port))
if len(s_port)==0:
s_port=[0];
+
error=1;
for j in range(0,5):
print(" {4} - duration {0} pgid {1} pkt_size {2} s_port {3} ".format(duration,pgid,pkt_size,s_port,j));
diff --git a/scripts/automation/trex_control_plane/stl/console/trex_tui.py b/scripts/automation/trex_control_plane/stl/console/trex_tui.py
index d7db6d30..bf6ed164 100644
--- a/scripts/automation/trex_control_plane/stl/console/trex_tui.py
+++ b/scripts/automation/trex_control_plane/stl/console/trex_tui.py
@@ -645,14 +645,14 @@ class TrexTUI():
# regular state
if self.state == self.STATE_ACTIVE:
# if no connectivity - move to lost connecitivty
- if not self.stateless_client.async_client.is_alive():
+ if not self.stateless_client.async_client.is_active():
self.stateless_client._invalidate_stats(self.pm.ports)
self.state = self.STATE_LOST_CONT
# lost connectivity
elif self.state == self.STATE_LOST_CONT:
- # got it back
+ # if the async is alive (might be zomibe, but alive) try to reconnect
if self.stateless_client.async_client.is_alive():
# move to state reconnect
self.state = self.STATE_RECONNECT
diff --git a/scripts/automation/trex_control_plane/stl/services/scapy_server/protocols.json b/scripts/automation/trex_control_plane/stl/services/scapy_server/protocols.json
new file mode 100644
index 00000000..f685c06f
--- /dev/null
+++ b/scripts/automation/trex_control_plane/stl/services/scapy_server/protocols.json
@@ -0,0 +1,194 @@
+[
+ {
+ "id": "Ether",
+ "name": "Ethernet II",
+ "fields": [
+ {
+ "id": "dst",
+ "name": "Destination",
+ "type": "MAC_ADDRESS",
+ "regex": "^([0-9A-Fa-f]{2}[:-]){5}([0-9A-Fa-f]{2})$"
+ },
+ {
+ "id": "src",
+ "name": "Source",
+ "type": "MAC_ADDRESS",
+ "regex": "^([0-9A-Fa-f]{2}[:-]){5}([0-9A-Fa-f]{2})$"
+ },
+ {
+ "id": "type",
+ "name": "Type"
+ }
+ ],
+ "payload": ["IP", "IPv6", "Dot1Q", "Raw"]
+ },
+ {
+ "id": "IP",
+ "name": "IPv4",
+ "fields": [
+ {
+ "id": "version",
+ "name": "Version"
+ },
+ {
+ "id": "ihl",
+ "name": "IHL",
+ "type": "NUMBER",
+ "auto": true
+ },
+ {
+ "id": "tos",
+ "name": "TOS",
+ "type": "NUMBER"
+ },
+ {
+ "id": "len",
+ "name": "Total Length",
+ "type": "NUMBER",
+ "auto": true
+ },
+ {
+ "id": "id",
+ "name": "Identification",
+ "type": "NUMBER"
+ },
+ {
+ "id": "flags",
+ "name": "Flags",
+ "type": "BITMASK",
+ "bits": [
+ {"name": "Reserved", "mask": 4, "values":[{"name":"Not Set", "value": 0}, {"name":"Set", "value": 4}]},
+ {"name": "Fragment", "mask": 2, "values":[{"name":"May fragment (0)", "value": 0}, {"name":"Don't fragment (1)", "value": 2}]},
+ {"name": "More Fragments(MF)", "mask": 1, "values":[{"name":"Not Set", "value": 0}, {"name":"Set", "value": 1}]}
+ ]
+ },
+ {
+ "id": "frag",
+ "name": "Fragment offset",
+ "type": "NUMBER"
+ },
+ {
+ "id": "ttl",
+ "name": "TTL",
+ "type": "NUMBER",
+ "min": 1,
+ "max": 255
+
+ },
+ {
+ "id": "proto",
+ "name": "Protocol"
+ },
+ {
+ "id": "chksum",
+ "name": "Checksum",
+ "type": "STRING",
+ "auto": true
+ },
+ {
+ "id": "src",
+ "name": "Source address",
+ "type": "IP_ADDRESS"
+ },
+ {
+ "id": "dst",
+ "name": "Destination address",
+ "type": "IP_ADDRESS"
+ },
+ {
+ "id": "options",
+ "name": "Options",
+ "type": "IP_OPTIONS"
+ }
+ ],
+ "payload": ["TCP", "UDP", "ICMP", "Raw"]
+ },
+ {
+ "id": "TCP",
+ "name": "TCP",
+ "fields": [
+ {
+ "id": "sport",
+ "name": "Source port",
+ "type": "NUMBER",
+ "min": 0,
+ "max": 65535
+
+ },
+ {
+ "id": "dport",
+ "name": "Destination port",
+ "type": "NUMBER",
+ "min": 0,
+ "max": 65535
+ },
+ {
+ "id": "seq",
+ "name": "Sequence number",
+ "type": "NUMBER"
+ },
+ {
+ "id": "ack",
+ "name": "Acknowledgment number",
+ "type": "NUMBER"
+ },
+ {
+ "id": "dataofs",
+ "name": "Data offset",
+ "type": "NUMBER"
+ },
+ {
+ "id": "reserved",
+ "name": "Reserved",
+ "type": "NUMBER"
+ },
+ {
+ "id": "flags",
+ "name": "Flags",
+ "auto": false,
+ "type": "BITMASK",
+ "bits": [
+ {"name": "URG", "mask": 32, "values":[{"name":"Not Set", "value": 0}, {"name":"Set", "value": 32}]},
+ {"name": "ACK", "mask": 16, "values":[{"name":"Not Set", "value": 0}, {"name":"Set", "value": 16}]},
+ {"name": "PSH", "mask": 8, "values":[{"name":"Not Set", "value": 0}, {"name":"Set", "value": 8}]},
+ {"name": "RST", "mask": 4, "values":[{"name":"Not Set", "value": 0}, {"name":"Set", "value": 4}]},
+ {"name": "SYN", "mask": 2, "values":[{"name":"Not Set", "value": 0}, {"name":"Set", "value": 2}]},
+ {"name": "FIN", "mask": 1, "values":[{"name":"Not Set", "value": 0}, {"name":"Set", "value": 1}]}
+ ]
+ },
+ {
+ "id": "window",
+ "name": "Window size",
+ "type": "NUMBER"
+ },
+ {
+ "id": "chksum",
+ "name": "Checksum",
+ "auto": true,
+ "type": "NUMBER"
+ },
+ {
+ "id": "urgptr",
+ "name": "Urgent pointer",
+ "type": "NUMBER"
+ },
+ {
+ "id": "options",
+ "name": "Options",
+ "type": "TCP_OPTIONS"
+ }
+ ]
+ },
+ {
+ "id": "Raw",
+ "name": "Raw",
+ "fields": [
+ {
+ "id": "load",
+ "name": "Payload",
+ "type": "BYTES"
+ }
+ ]
+ }
+]
+
diff --git a/scripts/automation/trex_control_plane/stl/services/scapy_server/scapy_service.py b/scripts/automation/trex_control_plane/stl/services/scapy_server/scapy_service.py
index 91257596..88514aa8 100755
--- a/scripts/automation/trex_control_plane/stl/services/scapy_server/scapy_service.py
+++ b/scripts/automation/trex_control_plane/stl/services/scapy_server/scapy_service.py
@@ -9,6 +9,7 @@ import tempfile
import hashlib
import base64
import numbers
+import random
import inspect
import json
from pprint import pprint
@@ -279,6 +280,64 @@ def get_sample_field_val(scapy_layer, fieldId):
except:
pass
+def generate_random_bytes(sz, seed, start, end):
+ # generate bytes of specified range with a fixed seed and size
+ rnd = random.Random()
+ n = end - start + 1
+ if is_python(2):
+ rnd = random.Random(seed)
+ res = [start + int(rnd.random()*n) for _i in range(sz)]
+ return ''.join(chr(x) for x in res)
+ else:
+ rnd = random.Random()
+ # to generate same random sequence as 2.x
+ rnd.seed(seed, version=1)
+ res = [start + int(rnd.random()*n) for _i in range(sz)]
+ return bytes(res)
+
+def generate_bytes_from_template(sz, template):
+ # generate bytes by repeating a template
+ res = str_to_bytes('') # new bytes array
+ if len(template) == 0:
+ return res
+ while len(res) < sz:
+ res = res + template
+ return res[:sz]
+
+def parse_template_code(template_code):
+ template_code = re.sub("0[xX]", '', template_code) # remove 0x
+ template_code = re.sub("[\s]", '', template_code) # remove spaces
+ return bytearray.fromhex(template_code)
+
+def verify_payload_size(size):
+ assert(size != None)
+ if (size > (1<<20)): # 1Mb ought to be enough for anybody
+ raise ValueError('size is too large')
+
+def generate_bytes(bytes_definition):
+ # accepts a bytes definition object
+ # {generate: random_bytes or random_ascii, seed: <seed_number>, size: <size_bytes>}
+ # {generate: template, template_base64: '<base64str>', size: <size_bytes>}
+ # {generate: template_code, template_text_code: '<template_code_str>', size: <size_bytes>}
+ gen_type = bytes_definition.get('generate')
+ if gen_type == None:
+ return b64_to_bytes(bytes_definition['base64'])
+ elif gen_type == 'template_code':
+ code = parse_template_code(bytes_definition["template_code"])
+ bytes_size = int(bytes_definition.get('size') or len(code))
+ verify_payload_size(bytes_size)
+ return generate_bytes_from_template(bytes_size, code)
+ else:
+ bytes_size = int(bytes_definition['size']) # required
+ seed = int(bytes_definition.get('seed') or 12345) # optional
+ verify_payload_size(bytes_size)
+ if gen_type == 'random_bytes':
+ return generate_random_bytes(bytes_size, seed, 0, 0xFF)
+ elif gen_type == 'random_ascii':
+ return generate_random_bytes(bytes_size, seed, 0x20, 0x7E)
+ elif gen_type == 'template':
+ return generate_bytes_from_template(bytes_size, b64_to_bytes(bytes_definition["template_base64"]))
+
class ScapyException(Exception): pass
class Scapy_service(Scapy_service_api):
@@ -312,7 +371,16 @@ class Scapy_service(Scapy_service_api):
self.version_major = '1'
self.version_minor = '01'
self.server_v_hashed = self._generate_version_hash(self.version_major,self.version_minor)
-
+ self.protocol_definitions = {} # protocolId -> prococol definition overrides data
+ self._load_definitions_from_json()
+
+ def _load_definitions_from_json(self):
+ # load protocol definitions from a json file
+ self.protocol_definitions = {}
+ with open('protocols.json', 'r') as f:
+ protocols = json.load(f)
+ for protocol in protocols:
+ self.protocol_definitions[ protocol['id'] ] = protocol
def _all_protocol_structs(self):
old_stdout = sys.stdout
@@ -370,9 +438,9 @@ class Scapy_service(Scapy_service_api):
if type(val) == type({}):
value_type = val['vtype']
if value_type == 'EXPRESSION':
- return eval(val['expr'], {})
+ return eval(val['expr'], scapy.all.__dict__)
elif value_type == 'BYTES': # bytes payload(ex Raw.load)
- return b64_to_bytes(val['base64'])
+ return generate_bytes(val)
elif value_type == 'OBJECT':
return val['value']
else:
@@ -382,7 +450,7 @@ class Scapy_service(Scapy_service_api):
else:
return val
- def _field_value_from_def(self, layer, fieldId, val):
+ def _field_value_from_def(self, scapy_pkt, layer, fieldId, val):
field_desc = layer.get_field(fieldId)
sample_val = get_sample_field_val(layer, fieldId)
# extensions for field values
@@ -394,6 +462,16 @@ class Scapy_service(Scapy_service_api):
return field_desc.randval()
elif value_type == 'MACHINE': # internal machine field repr
return field_desc.m2i(layer, b64_to_bytes(val['base64']))
+ elif value_type == 'BYTES':
+ if 'total_size' in val: # custom case for total pkt size
+ gen = {}
+ gen.update(val)
+ total_sz = gen['total_size']
+ del gen['total_size']
+ gen['size'] = total_sz - len(scapy_pkt)
+ return generate_bytes(gen)
+ else:
+ return generate_bytes(val)
if is_number(sample_val) and is_string(val):
# human-value. guess the type and convert to internal value
# seems setfieldval already does this for some fields,
@@ -583,22 +661,24 @@ class Scapy_service(Scapy_service_api):
def _verify_version_handler(self,client_v_handler):
return (self.server_v_hashed == client_v_handler)
- def _parse_packet_dict(self,layer,scapy_layers,scapy_layer_names):
- class_name = scapy_layer_names.index(layer['id'])
- class_p = scapy_layers[class_name] # class pointer
+ def _parse_packet_dict(self, layer, layer_classes, base_layer):
+ class_p = layer_classes[layer['id']] # class id -> class dict
scapy_layer = class_p()
if isinstance(scapy_layer, Raw):
scapy_layer.load = str_to_bytes("dummy")
+ if base_layer == None:
+ base_layer = scapy_layer
if 'fields' in layer:
- self._modify_layer(scapy_layer, layer['fields'])
+ self._modify_layer(base_layer, scapy_layer, layer['fields'])
return scapy_layer
def _packet_model_to_scapy_packet(self,data):
- layers = Packet.__subclasses__()
- layer_names = [ layer.__name__ for layer in layers]
- base_layer = self._parse_packet_dict(data[0],layers,layer_names)
+ layer_classes = {}
+ for layer_class in Packet.__subclasses__():
+ layer_classes[layer_class.__name__] = layer_class
+ base_layer = self._parse_packet_dict(data[0], layer_classes, None)
for i in range(1,len(data),1):
- packet_layer = self._parse_packet_dict(data[i],layers,layer_names)
+ packet_layer = self._parse_packet_dict(data[i], layer_classes, base_layer)
base_layer = base_layer/packet_layer
return base_layer
@@ -654,10 +734,9 @@ class Scapy_service(Scapy_service_api):
return pkt_class()
- def _get_payload_classes(self, pkt):
+ def _get_payload_classes(self, pkt_class):
# tries to find, which subclasses allowed.
# this can take long time, since it tries to build packets with all subclasses(O(N))
- pkt_class = type(pkt)
allowed_subclasses = []
for pkt_subclass in conf.layers:
if self._is_packet_class(pkt_subclass):
@@ -671,16 +750,29 @@ class Scapy_service(Scapy_service_api):
pass
return allowed_subclasses
- def _get_fields_definition(self, pkt_class):
+ def _get_fields_definition(self, pkt_class, fieldsDef):
+ # fieldsDef - array of field definitions(or empty array)
fields = []
for field_desc in pkt_class.fields_desc:
+ fieldId = field_desc.name
field_data = {
- "id": field_desc.name,
+ "id": fieldId,
"name": field_desc.name
}
+ for fieldDef in fieldsDef:
+ if fieldDef['id'] == fieldId:
+ field_data.update(fieldDef)
if isinstance(field_desc, EnumField):
try:
field_data["values_dict"] = field_desc.s2i
+ if field_data.get("type") == None:
+ if len(field_data["values_dict"] > 0):
+ field_data["type"] = "ENUM"
+ elif fieldId == 'load':
+ field_data["type"] = "BYTES"
+ else:
+ field_data["type"] = "STRING"
+ field_data["values_dict"] = field_desc.s2i
except:
# MultiEnumField doesn't have s2i. need better handling
pass
@@ -696,17 +788,23 @@ class Scapy_service(Scapy_service_api):
for pkt_class in all_classes:
if self._is_packet_class(pkt_class):
# enumerate all non-abstract Packet classes
+ protocolId = pkt_class.__name__
+ protoDef = self.protocol_definitions.get(protocolId) or {}
protocols.append({
- "id": pkt_class.__name__,
- "name": pkt_class.name,
- "fields": self._get_fields_definition(pkt_class)
+ "id": protocolId,
+ "name": protoDef.get('name') or pkt_class.name,
+ "fields": self._get_fields_definition(pkt_class, protoDef.get('fields') or [])
})
res = {"protocols": protocols}
return res
def get_payload_classes(self,client_v_handler, pkt_model_descriptor):
pkt = self._packet_model_to_scapy_packet(pkt_model_descriptor)
- return [c.__name__ for c in self._get_payload_classes(pkt)]
+ pkt_class = type(pkt.lastlayer())
+ protocolDef = self.protocol_definitions.get(pkt_class.__name__)
+ if protocolDef and protocolDef.get('payload'):
+ return protocolDef['payload']
+ return [c.__name__ for c in self._get_payload_classes(pkt_class)]
#input in string encoded base64
def check_update_of_dbs(self,client_v_handler,db_md5,field_md5):
@@ -725,10 +823,10 @@ class Scapy_service(Scapy_service_api):
else:
raise ScapyException("Fields DB is not up to date")
- def _modify_layer(self, scapy_layer, fields):
+ def _modify_layer(self, scapy_pkt, scapy_layer, fields):
for field in fields:
fieldId = str(field['id'])
- fieldval = self._field_value_from_def(scapy_layer, fieldId, field['value'])
+ fieldval = self._field_value_from_def(scapy_pkt, scapy_layer, fieldId, field['value'])
if fieldval is not None:
scapy_layer.setfieldval(fieldId, fieldval)
else:
@@ -767,7 +865,7 @@ class Scapy_service(Scapy_service_api):
# TODO: support replacing payload, instead of breaking
raise ScapyException("Protocol id inconsistent")
if 'fields' in model_layer:
- self._modify_layer(scapy_layer, model_layer['fields'])
+ self._modify_layer(scapy_pkt, scapy_layer, model_layer['fields'])
return self._pkt_data(scapy_pkt)
def read_pcap(self,client_v_handler,pcap_base64):
diff --git a/scripts/automation/trex_control_plane/stl/services/scapy_server/unit_tests/basetest.py b/scripts/automation/trex_control_plane/stl/services/scapy_server/unit_tests/basetest.py
index 17dd304a..1db2c62b 100644
--- a/scripts/automation/trex_control_plane/stl/services/scapy_server/unit_tests/basetest.py
+++ b/scripts/automation/trex_control_plane/stl/services/scapy_server/unit_tests/basetest.py
@@ -62,6 +62,9 @@ def reconstruct_pkt(bytes_b64, model_def):
def get_definitions(def_filter):
return pass_result(service.get_definitions(v_handler, def_filter))
+def get_definition_of(scapy_classname):
+ return pass_result(service.get_definitions(v_handler, [scapy_classname]))['protocols'][0]
+
def get_payload_classes(def_filter):
return pass_result(service.get_payload_classes(v_handler, def_filter))
diff --git a/scripts/automation/trex_control_plane/stl/services/scapy_server/unit_tests/test_scapy_service.py b/scripts/automation/trex_control_plane/stl/services/scapy_server/unit_tests/test_scapy_service.py
index 9cd473d7..d1207ca5 100644
--- a/scripts/automation/trex_control_plane/stl/services/scapy_server/unit_tests/test_scapy_service.py
+++ b/scripts/automation/trex_control_plane/stl/services/scapy_server/unit_tests/test_scapy_service.py
@@ -78,6 +78,35 @@ def test_build_Raw():
])
assert(str(pkt[Raw].load == "hi"))
+def test_build_fixed_pkt_size_bytes_gen():
+ pkt = build_pkt_get_scapy([
+ layer_def("Ether"),
+ layer_def("IP"),
+ layer_def("TCP"),
+ layer_def("Raw", load={
+ "vtype": "BYTES",
+ "generate": "template",
+ "total_size": 64,
+ "template_base64": bytes_to_b64(b"hi")
+ })
+ ])
+ print(len(pkt))
+ assert(len(pkt) == 64)
+
+def test_build_fixed_pkt_size_bytes_gen():
+ pkt = build_pkt_get_scapy([
+ layer_def("Ether"),
+ layer_def("IP"),
+ layer_def("TCP"),
+ layer_def("Raw", load={
+ "vtype": "BYTES",
+ "generate": "random_ascii",
+ "total_size": 256
+ })
+ ])
+ print(len(pkt))
+ assert(len(pkt) == 256)
+
def test_get_all():
service.get_all(v_handler)
@@ -98,6 +127,16 @@ def test_get_payload_classes():
assert("IP" in eth_payloads)
assert("Dot1Q" in eth_payloads)
assert("TCP" not in eth_payloads)
+ assert(eth_payloads[0] == "IP") # order(based on prococols.json)
+
+def test_get_tcp_payload_classes():
+ payloads = get_payload_classes([{"id":"TCP"}])
+ assert("Raw" in payloads)
+
+def test_get_dot1q_payload_classes():
+ payloads = get_payload_classes([{"id":"Dot1Q"}])
+ assert("Dot1Q" in payloads)
+ assert("IP" in payloads)
def test_pcap_read_and_write():
pkts_to_write = [bytes_to_b64(bytes(TEST_PKT))]
@@ -120,6 +159,28 @@ def test_layer_random_value():
ether_fields = fields_to_map(res['data'][0]['fields'])
assert(re.match(RE_MAC, ether_fields['src']['value']))
+def test_IP_options():
+ options_expr = "[IPOption_SSRR(copy_flag=0, routers=['1.2.3.4', '5.6.7.8'])]"
+ res = build_pkt([
+ layer_def("Ether"),
+ layer_def("IP", options={"vtype": "EXPRESSION", "expr": options_expr}),
+ ])
+ pkt = build_pkt_to_scapy(res)
+ options = pkt[IP].options
+ assert(options[0].__class__.__name__ == 'IPOption_SSRR')
+ assert(options[0].copy_flag == 0)
+ assert(options[0].routers == ['1.2.3.4', '5.6.7.8'])
+
+def test_TCP_options():
+ options_expr = "[('MSS', 1460), ('NOP', None), ('NOP', None), ('SAckOK', b'')]"
+ pkt = build_pkt_get_scapy([
+ layer_def("Ether"),
+ layer_def("IP"),
+ layer_def("TCP", options={"vtype": "EXPRESSION", "expr": options_expr}),
+ ])
+ options = pkt[TCP].options
+ assert(options[0] == ('MSS', 1460) )
+
def test_layer_wrong_structure():
payload = [
layer_def("Ether"),
@@ -153,3 +214,38 @@ def test_layer_wrong_structure():
assert(real_structure == ["Ether", "IP", "Raw", None, None])
assert(valid_structure_flags == [True, True, True, False, False])
+def test_ether_definitions():
+ etherDef = get_definition_of("Ether")
+ assert(etherDef['name'] == "Ethernet II")
+ etherFields = etherDef['fields']
+ assert(etherFields[0]['id'] == 'dst')
+ assert(etherFields[0]['name'] == 'Destination')
+ assert(etherFields[1]['id'] == 'src')
+ assert(etherFields[1]['name'] == 'Source')
+ assert(etherFields[2]['id'] == 'type')
+ assert(etherFields[2]['name'] == 'Type')
+
+def test_ether_definitions():
+ pdef = get_definition_of("ICMP")
+ assert(pdef['id'] == "ICMP")
+ assert(pdef['name'])
+ assert(pdef['fields'])
+
+def test_ip_definitions():
+ pdef = get_definition_of("IP")
+ fields = pdef['fields']
+ assert(fields[0]['id'] == 'version')
+
+ assert(fields[1]['id'] == 'ihl')
+ assert(fields[1]['auto'] == True)
+
+ assert(fields[3]['id'] == 'len')
+ assert(fields[3]['auto'] == True)
+
+ assert(fields[5]['id'] == 'flags')
+ assert(fields[5]['type'] == 'BITMASK')
+ assert(fields[5]['bits'][0]['name'] == 'Reserved')
+
+ assert(fields[9]['id'] == 'chksum')
+ assert(fields[9]['auto'] == True)
+
diff --git a/scripts/automation/trex_control_plane/stl/services/scapy_server/unit_tests/test_utils.py b/scripts/automation/trex_control_plane/stl/services/scapy_server/unit_tests/test_utils.py
new file mode 100644
index 00000000..ceb88b47
--- /dev/null
+++ b/scripts/automation/trex_control_plane/stl/services/scapy_server/unit_tests/test_utils.py
@@ -0,0 +1,69 @@
+# run with 'nosetests' utility
+
+from basetest import *
+from scapy_service import *
+
+def test_generate_random_bytes():
+ res = generate_random_bytes(10, 333, ord('0'), ord('9'))
+ print(res)
+ assert(len(res) == 10)
+ assert(res == b'5390532937') # random value with this seed
+
+def test_generate_bytes_from_template_empty():
+ res = generate_bytes_from_template(5, b"")
+ print(res)
+ assert(res == b"")
+
+def test_generate_bytes_from_template_neg():
+ res = generate_bytes_from_template(-5, b"qwe")
+ assert(res == b"")
+
+def test_generate_bytes_from_template_less():
+ res = generate_bytes_from_template(5, b"qwe")
+ print(res)
+ assert(res == b"qweqw")
+
+def test_generate_bytes_from_template_same():
+ res = generate_bytes_from_template(5, b"qwert")
+ print(res)
+ assert(res == b"qwert")
+
+def test_generate_bytes_from_template_more():
+ res = generate_bytes_from_template(5, b"qwerty")
+ print(res)
+ assert(res == b"qwert")
+
+def test_parse_template_code_with_trash():
+ res = parse_template_code("0xDE AD\n be ef \t0xDEAD")
+ print(res)
+ assert(res == bytearray.fromhex('DEADBEEFDEAD'))
+
+def test_generate_bytes():
+ res = generate_bytes({"generate":"random_bytes", "seed": 123, "size": 12})
+ print(res)
+ assert(len(res) == 12)
+
+def test_generate_ascii_default_seed():
+ res = generate_bytes({"generate":"random_ascii", "size": 14})
+ print(res)
+ assert(len(res) == 14)
+
+
+def test_generate_template_code_no_size():
+ res = generate_bytes({"generate":"template_code", "template_code": "BE EF"})
+ assert(res == bytearray.fromhex('BE EF'))
+
+def test_generate_template_code_less():
+ res = generate_bytes({"generate":"template_code", "template_code": "DE AD BE EF", "size": 2})
+ assert(res == bytearray.fromhex('DE AD'))
+
+def test_generate_template_code_more():
+ res = generate_bytes({"generate":"template_code", "template_code": "0xDEAD 0xBEEF", "size": 6})
+ assert(res == bytearray.fromhex('DE AD BE EF DE AD'))
+
+def test_generate_template_base64():
+ res = generate_bytes({"generate":"template", "template_base64": bytes_to_b64(b'hi'), "size": 5})
+ print(res)
+ assert(res == b'hihih')
+
+
diff --git a/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_async_client.py b/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_async_client.py
index 2c95844b..11e87592 100644
--- a/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_async_client.py
+++ b/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_async_client.py
@@ -137,6 +137,10 @@ class CTRexAsyncStatsManager():
class CTRexAsyncClient():
+ THREAD_STATE_ACTIVE = 1
+ THREAD_STATE_ZOMBIE = 2
+ THREAD_STATE_DEAD = 3
+
def __init__ (self, server, port, stateless_client):
self.port = port
@@ -159,7 +163,10 @@ class CTRexAsyncClient():
self.connected = False
self.zipped = ZippedMsg()
-
+
+ self.t_state = self.THREAD_STATE_DEAD
+
+
# connects the async channel
def connect (self):
@@ -173,8 +180,8 @@ class CTRexAsyncClient():
self.socket = self.context.socket(zmq.SUB)
- # before running the thread - mark as active
- self.active = True
+ # before running the thread - mark as active
+ self.t_state = self.THREAD_STATE_ACTIVE
self.t = threading.Thread(target = self._run)
# kill this thread on exit and don't add it to the join list
@@ -198,26 +205,26 @@ class CTRexAsyncClient():
return RC_OK()
-
-
# disconnect
def disconnect (self):
if not self.connected:
return
# mark for join
- self.active = False
-
- # signal that the context was destroyed (exit the thread loop)
+ self.t_state = self.THREAD_STATE_DEAD
self.context.term()
-
- # join
self.t.join()
+
# done
self.connected = False
+ # set the thread as a zombie (in case of server death)
+ def set_as_zombie (self):
+ self.last_data_recv_ts = None
+ self.t_state = self.THREAD_STATE_ZOMBIE
+
# thread function
def _run (self):
@@ -231,12 +238,19 @@ class CTRexAsyncClient():
self.monitor.reset()
- while self.active:
+ while self.t_state != self.THREAD_STATE_DEAD:
try:
with self.monitor:
line = self.socket.recv()
+ # last data recv.
+ self.last_data_recv_ts = time.time()
+
+ # if thread was marked as zomibe - it does nothing besides fetching messages
+ if self.t_state == self.THREAD_STATE_ZOMBIE:
+ continue
+
self.monitor.on_recv_msg(line)
# try to decomrpess
@@ -246,7 +260,6 @@ class CTRexAsyncClient():
line = line.decode()
- self.last_data_recv_ts = time.time()
# signal once
if not got_data:
@@ -259,13 +272,14 @@ class CTRexAsyncClient():
# signal once
if got_data:
self.event_handler.on_async_dead()
+ self.set_as_zombie()
got_data = False
continue
except zmq.ContextTerminated:
# outside thread signaled us to exit
- assert(not self.active)
+ assert(self.t_state != self.THREAD_STATE_ACTIVE)
break
msg = json.loads(line)
@@ -283,16 +297,29 @@ class CTRexAsyncClient():
# closing of socket must be from the same thread
self.socket.close(linger = 0)
- def is_thread_alive (self):
- return self.t.is_alive()
-
- # did we get info for the last 3 seconds ?
+
+ # return True if the subscriber got data in the last 3 seconds
+ # even if zombie - will return true if got data
def is_alive (self):
+
+ # maybe the thread has exited with exception
+ if not self.t.is_alive():
+ return False
+
+ # simply no data
if self.last_data_recv_ts == None:
return False
+ # timeout of data
return ( (time.time() - self.last_data_recv_ts) < 3 )
+
+ # more granular than active - it means that thread state is active we get info
+ # zomibes will return false
+ def is_active (self):
+ return self.is_alive() and self.t_state == self.THREAD_STATE_ACTIVE
+
+
def get_stats (self):
return self.stats
diff --git a/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_client.py b/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_client.py
index 9290acbf..cf328d2e 100755
--- a/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_client.py
+++ b/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_client.py
@@ -177,8 +177,8 @@ class EventsHandler(object):
def on_async_dead (self):
if self.client.connected:
msg = 'Lost connection to server'
- self.__add_event_log('local', 'info', msg, True)
self.client.connected = False
+ self.__add_event_log('local', 'info', msg, True)
def on_async_alive (self):
@@ -346,6 +346,8 @@ class EventsHandler(object):
# server stopped
elif (event_type == 100):
ev = "Server has stopped"
+ # to avoid any new messages on async
+ self.client.async_client.set_as_zombie()
self.__async_event_server_stopped()
show_event = True
@@ -2518,7 +2520,7 @@ class STLClient(object):
slave = port ^ 0x1
if slave in ports:
- raise STLError("dual mode: cannot provide adjacent ports ({0}, {1}) in a batch".format(master, slave))
+ raise STLError("dual mode: please specify only one of adjacent ports ({0}, {1}) in a batch".format(master, slave))
if not slave in self.get_acquired_ports():
raise STLError("dual mode: adjacent port {0} must be owned during dual mode".format(slave))
@@ -2567,7 +2569,7 @@ class STLClient(object):
self.logger.post_cmd(RC_ERR(e))
raise
- all_ports = ports + [p ^ 0x1 for p in ports]
+ all_ports = ports + [p ^ 0x1 for p in ports if profile_b]
self.remove_all_streams(ports = all_ports)
@@ -2576,7 +2578,8 @@ class STLClient(object):
slave = port ^ 0x1
self.add_streams(profile_a.get_streams(), master)
- self.add_streams(profile_b.get_streams(), slave)
+ if profile_b:
+ self.add_streams(profile_b.get_streams(), slave)
return self.start(ports = all_ports, duration = duration)
@@ -2738,7 +2741,7 @@ class STLClient(object):
while set(self.get_active_ports()).intersection(ports):
# make sure ASYNC thread is still alive - otherwise we will be stuck forever
- if not self.async_client.is_thread_alive():
+ if not self.async_client.is_active():
raise STLError("subscriber thread is dead")
time.sleep(0.01)
@@ -3521,21 +3524,28 @@ class STLClient(object):
@__console
def push_line (self, line):
'''Push a pcap file '''
+ args = [self,
+ "push",
+ self.push_line.__doc__,
+ parsing_opts.REMOTE_FILE,
+ parsing_opts.PORT_LIST_WITH_ALL,
+ parsing_opts.COUNT,
+ parsing_opts.DURATION,
+ parsing_opts.IPG,
+ parsing_opts.SPEEDUP,
+ parsing_opts.FORCE,
+ parsing_opts.DUAL]
+
+ parser = parsing_opts.gen_parser(*(args + [parsing_opts.FILE_PATH_NO_CHECK]))
+ opts = parser.parse_args(line.split(), verify_acquired = True)
- parser = parsing_opts.gen_parser(self,
- "push",
- self.push_line.__doc__,
- parsing_opts.FILE_PATH,
- parsing_opts.REMOTE_FILE,
- parsing_opts.PORT_LIST_WITH_ALL,
- parsing_opts.COUNT,
- parsing_opts.DURATION,
- parsing_opts.IPG,
- parsing_opts.SPEEDUP,
- parsing_opts.FORCE,
- parsing_opts.DUAL)
+ if not opts:
+ return opts
+
+ if not opts.remote:
+ parser = parsing_opts.gen_parser(*(args + [parsing_opts.FILE_PATH]))
+ opts = parser.parse_args(line.split(), verify_acquired = True)
- opts = parser.parse_args(line.split(), verify_acquired = True)
if not opts:
return opts
diff --git a/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_streams.py b/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_streams.py
index e63f9125..aa797773 100755
--- a/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_streams.py
+++ b/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_streams.py
@@ -1046,6 +1046,17 @@ class STLProfile(object):
else:
pkts_a, pkts_b = PCAPReader(pcap_file).read_all(split_mode = split_mode)
+ # swap the packets if a is empty, or the ts of first packet in b is earlier
+ if not pkts_a:
+ pkts_a, pkts_b = pkts_b, pkts_a
+ elif (ipg_usec is None) and pkts_b:
+ meta = pkts_a[0][1]
+ start_time_a = meta[0] * 1e6 + meta[1]
+ meta = pkts_b[0][1]
+ start_time_b = meta[0] * 1e6 + meta[1]
+ if start_time_b < start_time_a:
+ pkts_a, pkts_b = pkts_b, pkts_a
+
profile_a = STLProfile.__pkts_to_streams(pkts_a,
ipg_usec,
speedup,
@@ -1073,6 +1084,8 @@ class STLProfile(object):
def __pkts_to_streams (pkts, ipg_usec, speedup, loop_count, vm, packet_hook, start_delay_usec = 0):
streams = []
+ if speedup == 0:
+ raise STLError('Speedup should not be 0')
# 10 ms delay before starting the PCAP
last_ts_usec = -(start_delay_usec)
@@ -1084,7 +1097,10 @@ class STLProfile(object):
for i, (cap, meta) in enumerate(pkts, start = 1):
# IPG - if not provided, take from cap
if ipg_usec == None:
- ts_usec = (meta[0] * 1e6 + meta[1]) / float(speedup)
+ packet_time = meta[0] * 1e6 + meta[1]
+ if i == 1:
+ base_time = packet_time
+ ts_usec = (packet_time - base_time) / float(speedup)
else:
ts_usec = (ipg_usec * i) / float(speedup)
diff --git a/scripts/automation/trex_control_plane/stl/trex_stl_lib/utils/parsing_opts.py b/scripts/automation/trex_control_plane/stl/trex_stl_lib/utils/parsing_opts.py
index 97c9035a..e7f04546 100755
--- a/scripts/automation/trex_control_plane/stl/trex_stl_lib/utils/parsing_opts.py
+++ b/scripts/automation/trex_control_plane/stl/trex_stl_lib/utils/parsing_opts.py
@@ -43,7 +43,7 @@ CORE_MASK = 26
DUAL = 27
FLOW_CTRL = 28
SUPPORTED = 29
-RX_FILTER_MODE = 30
+FILE_PATH_NO_CHECK = 30
OUTPUT_FILENAME = 31
ALL_FILES = 32
@@ -54,6 +54,8 @@ IPV4 = 35
DEST = 36
RETRIES = 37
+RX_FILTER_MODE = 38
+
GLOBAL_STATS = 50
PORT_STATS = 51
PORT_STATUS = 52
@@ -440,6 +442,14 @@ OPTIONS_DB = {MULTIPLIER: ArgumentPack(['-m', '--multiplier'],
'type': is_valid_file,
'help': "File path to load"}),
+ FILE_PATH_NO_CHECK: ArgumentPack(['-f'],
+ {'metavar': 'FILE',
+ 'dest': 'file',
+ 'nargs': 1,
+ 'required': True,
+ 'type': str,
+ 'help': "File path to load"}),
+
FILE_FROM_DB: ArgumentPack(['--db'],
{'metavar': 'LOADED_STREAM_PACK',
'help': "A stream pack which already loaded into console cache."}),
diff --git a/scripts/dpdk_nic_bind.py b/scripts/dpdk_nic_bind.py
index a85ecb71..e797666b 100755
--- a/scripts/dpdk_nic_bind.py
+++ b/scripts/dpdk_nic_bind.py
@@ -54,7 +54,11 @@ if needed_path not in PATH:
# Each device within this is itself a dictionary of device properties
devices = {}
# list of supported DPDK drivers
-dpdk_drivers = [ "igb_uio", "vfio-pci", "uio_pci_generic" ]
+# ,
+
+dpdk_and_kernel=[ "mlx5_core", "mlx5_ib" ]
+
+dpdk_drivers = ["igb_uio", "vfio-pci", "uio_pci_generic" ]
# command-line arg flags
b_flag = None
@@ -568,10 +572,15 @@ def show_status():
if not has_driver(d):
no_drv.append(devices[d])
continue
- if devices[d]["Driver_str"] in dpdk_drivers:
+
+ if devices[d]["Driver_str"] in dpdk_and_kernel:
dpdk_drv.append(devices[d])
- else:
kernel_drv.append(devices[d])
+ else:
+ if devices[d]["Driver_str"] in dpdk_drivers:
+ dpdk_drv.append(devices[d])
+ else:
+ kernel_drv.append(devices[d])
# print each category separately, so we can clearly see what's used by DPDK
display_devices("Network devices using DPDK-compatible driver", dpdk_drv, \
@@ -609,7 +618,7 @@ def get_info_from_trex(pci_addr_list):
pci_info_dict[pci]['TRex_Driver'] = match.group(3)
return pci_info_dict
-def show_table():
+def show_table(get_macs = True):
'''Function called when the script is passed the "--table" option.
Similar to show_status() function, but shows more info: NUMA etc.'''
global dpdk_drivers
@@ -617,15 +626,16 @@ def show_table():
get_nic_details()
dpdk_drv = []
for d in devices.keys():
- if devices[d].get("Driver_str") in dpdk_drivers:
+ if devices[d].get("Driver_str") in (dpdk_drivers+dpdk_and_kernel):
dpdk_drv.append(d)
- for pci, info in get_info_from_trex(dpdk_drv).items():
- if pci not in dpdk_drv: # sanity check, should not happen
- print('Internal error while getting MACs of DPDK bound interfaces, unknown PCI: %s' % pci)
- return
- devices[pci].update(info)
-
+ if get_macs:
+ for pci, info in get_info_from_trex(dpdk_drv).items():
+ if pci not in dpdk_drv: # sanity check, should not happen
+ print('Internal error while getting MACs of DPDK bound interfaces, unknown PCI: %s' % pci)
+ return
+ devices[pci].update(info)
+
table = texttable.Texttable(max_width=-1)
table.header(['ID', 'NUMA', 'PCI', 'MAC', 'Name', 'Driver', 'Linux IF', 'Active'])
for id, pci in enumerate(sorted(devices.keys())):
diff --git a/scripts/dpdk_setup_ports.py b/scripts/dpdk_setup_ports.py
index 538d0700..8475bdee 100755
--- a/scripts/dpdk_setup_ports.py
+++ b/scripts/dpdk_setup_ports.py
@@ -14,6 +14,7 @@ import traceback
from collections import defaultdict, OrderedDict
from distutils.util import strtobool
import getpass
+import subprocess
class ConfigCreator(object):
mandatory_interface_fields = ['Slot_str', 'Device_str', 'NUMA']
@@ -41,51 +42,52 @@ class ConfigCreator(object):
cores[core] = cores[core][:1]
include_lcores = [int(x) for x in include_lcores]
exclude_lcores = [int(x) for x in exclude_lcores]
+
self.has_zero_lcore = False
+ self.lcores_per_numa = {}
+ total_lcores = 0
for numa, cores in self.cpu_topology.items():
+ self.lcores_per_numa[numa] = {'main': [], 'siblings': [], 'all': []}
for core, lcores in cores.items():
- for lcore in copy.copy(lcores):
+ total_lcores += len(lcores)
+ for lcore in list(lcores):
if include_lcores and lcore not in include_lcores:
cores[core].remove(lcore)
if exclude_lcores and lcore in exclude_lcores:
cores[core].remove(lcore)
if 0 in lcores:
self.has_zero_lcore = True
- cores[core].remove(0)
- zero_lcore_numa = numa
- zero_lcore_core = core
- zero_lcore_siblings = cores[core]
- if self.has_zero_lcore:
- del self.cpu_topology[zero_lcore_numa][zero_lcore_core]
- self.cpu_topology[zero_lcore_numa][zero_lcore_core] = zero_lcore_siblings
+ lcores.remove(0)
+ self.lcores_per_numa[numa]['siblings'].extend(lcores)
+ else:
+ self.lcores_per_numa[numa]['main'].extend(lcores[:1])
+ self.lcores_per_numa[numa]['siblings'].extend(lcores[1:])
+ self.lcores_per_numa[numa]['all'].extend(lcores)
+
for interface in self.interfaces:
for mandatory_interface_field in ConfigCreator.mandatory_interface_fields:
if mandatory_interface_field not in interface:
raise DpdkSetup("Expected '%s' field in interface dictionary, got: %s" % (mandatory_interface_field, interface))
+
Device_str = self._verify_devices_same_type(self.interfaces)
if '40Gb' in Device_str:
self.speed = 40
else:
self.speed = 10
- lcores_per_numa = OrderedDict()
- system_lcores = int(self.has_zero_lcore)
- for numa, core in self.cpu_topology.items():
- for lcores in core.values():
- if numa not in lcores_per_numa:
- lcores_per_numa[numa] = []
- lcores_per_numa[numa].extend(lcores)
- system_lcores += len(lcores)
- minimum_required_lcores = len(self.interfaces) / 2 + 2
- if system_lcores < minimum_required_lcores:
+
+ minimum_required_lcores = len(self.interfaces) // 2 + 2
+ if total_lcores < minimum_required_lcores:
raise DpdkSetup('Your system should have at least %s cores for %s interfaces, and it has: %s.' %
- (minimum_required_lcores, len(self.interfaces), system_lcores + (0 if self.has_zero_lcore else 1)))
+ (minimum_required_lcores, len(self.interfaces), total_lcores))
interfaces_per_numa = defaultdict(int)
+
for i in range(0, len(self.interfaces), 2):
- if self.interfaces[i]['NUMA'] != self.interfaces[i+1]['NUMA'] and not ignore_numa:
+ numa = self.interfaces[i]['NUMA']
+ if numa != self.interfaces[i+1]['NUMA'] and not ignore_numa:
raise DpdkSetup('NUMA of each pair of interfaces should be the same. Got NUMA %s for client interface %s, NUMA %s for server interface %s' %
- (self.interfaces[i]['NUMA'], self.interfaces[i]['Slot_str'], self.interfaces[i+1]['NUMA'], self.interfaces[i+1]['Slot_str']))
- interfaces_per_numa[self.interfaces[i]['NUMA']] += 2
- self.lcores_per_numa = lcores_per_numa
+ (numa, self.interfaces[i]['Slot_str'], self.interfaces[i+1]['NUMA'], self.interfaces[i+1]['Slot_str']))
+ interfaces_per_numa[numa] += 2
+
self.interfaces_per_numa = interfaces_per_numa
self.prefix = prefix
self.zmq_pub_port = zmq_pub_port
@@ -93,16 +95,15 @@ class ConfigCreator(object):
self.ignore_numa = ignore_numa
@staticmethod
- def _convert_mac(mac_string):
+ def verify_mac(mac_string):
if not ConfigCreator.mac_re.match(mac_string):
raise DpdkSetup('MAC address should be in format of 12:34:56:78:9a:bc, got: %s' % mac_string)
- return ', '.join([('0x%s' % elem).lower() for elem in mac_string.split(':')])
+ return mac_string.lower()
@staticmethod
def _exit_if_bad_ip(ip):
if not ConfigCreator._verify_ip(ip):
- print("Got bad IP %s" % ip)
- sys.exit(1)
+ raise DpdkSetup("Got bad IP %s" % ip)
@staticmethod
def _verify_ip(ip):
@@ -146,24 +147,28 @@ class ConfigCreator(object):
config_str += ' '*6 + '- ip: %s\n' % interface['ip']
config_str += ' '*8 + 'default_gw: %s\n' % interface['def_gw']
else:
- config_str += ' '*6 + '- dest_mac: [%s]' % self._convert_mac(interface['dest_mac'])
+ config_str += ' '*6 + '- dest_mac: %s' % self.verify_mac(interface['dest_mac'])
if interface.get('loopback_dest'):
config_str += " # MAC OF LOOPBACK TO IT'S DUAL INTERFACE\n"
else:
config_str += '\n'
- config_str += ' '*8 + 'src_mac: [%s]\n' % self._convert_mac(interface['src_mac'])
+ config_str += ' '*8 + 'src_mac: %s\n' % self.verify_mac(interface['src_mac'])
if index % 2:
config_str += '\n' # dual if barrier
+
if not self.ignore_numa:
config_str += ' platform:\n'
- if len(self.interfaces_per_numa.keys()) == 1 and -1 in self.interfaces_per_numa: # VM, use any cores, 1 core per dual_if
- lcores_pool = sorted([lcore for lcores in self.lcores_per_numa.values() for lcore in lcores])
- config_str += ' '*6 + 'master_thread_id: %s\n' % (0 if self.has_zero_lcore else lcores_pool.pop())
+ if len(self.interfaces_per_numa.keys()) == 1 and -1 in self.interfaces_per_numa: # VM, use any cores
+ lcores_pool = sorted([lcore for lcores in self.lcores_per_numa.values() for lcore in lcores['all']])
+ config_str += ' '*6 + 'master_thread_id: %s\n' % (0 if self.has_zero_lcore else lcores_pool.pop(0))
config_str += ' '*6 + 'latency_thread_id: %s\n' % lcores_pool.pop(0)
- lcores_per_dual_if = int(len(lcores_pool) / len(self.interfaces))
+ lcores_per_dual_if = int(len(lcores_pool) * 2 / len(self.interfaces))
config_str += ' '*6 + 'dual_if:\n'
for i in range(0, len(self.interfaces), 2):
- lcores_for_this_dual_if = [str(lcores_pool.pop(0)) for _ in range(lcores_per_dual_if)]
+ lcores_for_this_dual_if = list(map(str, sorted(lcores_pool[:lcores_per_dual_if])))
+ lcores_pool = lcores_pool[lcores_per_dual_if:]
+ if not lcores_for_this_dual_if:
+ raise DpdkSetup('lcores_for_this_dual_if is empty (internal bug, please report with details of setup)')
config_str += ' '*8 + '- socket: 0\n'
config_str += ' '*10 + 'threads: [%s]\n\n' % ','.join(lcores_for_this_dual_if)
else:
@@ -171,26 +176,46 @@ class ConfigCreator(object):
lcores_per_dual_if = 99
extra_lcores = 1 if self.has_zero_lcore else 2
# worst case 3 iterations, to ensure master and "rx" have cores left
- while (lcores_per_dual_if * sum(self.interfaces_per_numa.values()) / 2) + extra_lcores > sum([len(lcores) for lcores in self.lcores_per_numa.values()]):
+ while (lcores_per_dual_if * sum(self.interfaces_per_numa.values()) / 2) + extra_lcores > sum([len(lcores['all']) for lcores in self.lcores_per_numa.values()]):
lcores_per_dual_if -= 1
- for numa, cores in self.lcores_per_numa.items():
+ for numa, lcores_dict in self.lcores_per_numa.items():
if not self.interfaces_per_numa[numa]:
continue
- lcores_per_dual_if = min(lcores_per_dual_if, int(2 * len(cores) / self.interfaces_per_numa[numa]))
+ lcores_per_dual_if = min(lcores_per_dual_if, int(2 * len(lcores_dict['all']) / self.interfaces_per_numa[numa]))
lcores_pool = copy.deepcopy(self.lcores_per_numa)
# first, allocate lcores for dual_if section
dual_if_section = ' '*6 + 'dual_if:\n'
for i in range(0, len(self.interfaces), 2):
numa = self.interfaces[i]['NUMA']
dual_if_section += ' '*8 + '- socket: %s\n' % numa
- lcores_for_this_dual_if = [str(lcores_pool[numa].pop(0)) for _ in range(lcores_per_dual_if)]
+ lcores_for_this_dual_if = lcores_pool[numa]['all'][:lcores_per_dual_if]
+ lcores_pool[numa]['all'] = lcores_pool[numa]['all'][lcores_per_dual_if:]
+ for lcore in lcores_for_this_dual_if:
+ if lcore in lcores_pool[numa]['main']:
+ lcores_pool[numa]['main'].remove(lcore)
+ elif lcore in lcores_pool[numa]['siblings']:
+ lcores_pool[numa]['siblings'].remove(lcore)
+ else:
+ raise DpdkSetup('lcore not in main nor in siblings list (internal bug, please report with details of setup)')
if not lcores_for_this_dual_if:
raise DpdkSetup('Not enough cores at NUMA %s. This NUMA has %s processing units and %s interfaces.' % (numa, len(self.lcores_per_numa[numa]), self.interfaces_per_numa[numa]))
- dual_if_section += ' '*10 + 'threads: [%s]\n\n' % ','.join(lcores_for_this_dual_if)
+ dual_if_section += ' '*10 + 'threads: [%s]\n\n' % ','.join(list(map(str, sorted(lcores_for_this_dual_if))))
+
# take the cores left to master and rx
- lcores_pool_left = [lcore for lcores in lcores_pool.values() for lcore in lcores]
- config_str += ' '*6 + 'master_thread_id: %s\n' % (0 if self.has_zero_lcore else lcores_pool_left.pop(0))
- config_str += ' '*6 + 'latency_thread_id: %s\n' % lcores_pool_left.pop(0)
+ mains_left = [lcore for lcores in lcores_pool.values() for lcore in lcores['main']]
+ siblings_left = [lcore for lcores in lcores_pool.values() for lcore in lcores['siblings']]
+ if mains_left:
+ rx_core = mains_left.pop(0)
+ else:
+ rx_core = siblings_left.pop(0)
+ if self.has_zero_lcore:
+ master_core = 0
+ elif mains_left:
+ master_core = mains_left.pop(0)
+ else:
+ master_core = siblings_left.pop(0)
+ config_str += ' '*6 + 'master_thread_id: %s\n' % master_core
+ config_str += ' '*6 + 'latency_thread_id: %s\n' % rx_core
# add the dual_if section
config_str += dual_if_section
@@ -228,6 +253,7 @@ class CIfMap:
self.m_cfg_file =cfg_file;
self.m_cfg_dict={};
self.m_devices={};
+ self.m_is_mellanox_mode=False;
def dump_error (self,err):
s="""%s
@@ -265,6 +291,88 @@ Other network devices
s= self.dump_error (err)
raise DpdkSetup(s)
+ def set_only_mellanox_nics(self):
+ self.m_is_mellanox_mode=True;
+
+ def get_only_mellanox_nics(self):
+ return self.m_is_mellanox_mode
+
+
+ def read_pci (self,pci_id,reg_id):
+ out=subprocess.check_output(['setpci', '-s',pci_id, '%s.w' %(reg_id)])
+ out=out.decode(errors='replace');
+ return (out.strip());
+
+ def write_pci (self,pci_id,reg_id,val):
+ out=subprocess.check_output(['setpci','-s',pci_id, '%s.w=%s' %(reg_id,val)])
+ out=out.decode(errors='replace');
+ return (out.strip());
+
+ def tune_mlx5_device (self,pci_id):
+ # set PCIe Read to 1024 and not 512 ... need to add it to startup s
+ val=self.read_pci (pci_id,68)
+ if val[0]!='3':
+ val='3'+val[1:]
+ self.write_pci (pci_id,68,val)
+ assert(self.read_pci (pci_id,68)==val);
+
+ def get_mtu_mlx5 (self,dev_id):
+ if len(dev_id)>0:
+ out=subprocess.check_output(['ifconfig', dev_id])
+ out=out.decode(errors='replace');
+ obj=re.search(r'MTU:(\d+)',out,flags=re.MULTILINE|re.DOTALL);
+ if obj:
+ return int(obj.group(1));
+ else:
+ return -1
+
+ def set_mtu_mlx5 (self,dev_id,new_mtu):
+ if len(dev_id)>0:
+ out=subprocess.check_output(['ifconfig', dev_id,'mtu',str(new_mtu)])
+ out=out.decode(errors='replace');
+
+
+ def set_max_mtu_mlx5_device(self,dev_id):
+ mtu=9*1024+22
+ dev_mtu=self.get_mtu_mlx5 (dev_id);
+ if (dev_mtu>0) and (dev_mtu!=mtu):
+ self.set_mtu_mlx5(dev_id,mtu);
+ if self.get_mtu_mlx5(dev_id) != mtu:
+ print("Could not set MTU to %d" % mtu)
+ exit(-1);
+
+
+ def disable_flow_control_mlx5_device (self,dev_id):
+
+ if len(dev_id)>0:
+ my_stderr = open("/dev/null","wb")
+ cmd ='ethtool -A '+dev_id + ' rx off tx off '
+ subprocess.call(cmd, stdout=my_stderr,stderr=my_stderr, shell=True)
+ my_stderr.close();
+
+ def check_ofe_version (self):
+ ofed_info='/usr/bin/ofed_info'
+ ofed_ver= 'MLNX_OFED_LINUX-3.4-1.0.0.0'
+
+ if not os.path.isfile(ofed_info):
+ print("OFED %s is not installed on this setup" % ofed_info)
+ exit(-1);
+
+ try:
+ out = subprocess.check_output([ofed_info])
+ except Exception as e:
+ print("OFED %s can't run " % (ofed_info))
+ exit(-1);
+
+ lines=out.splitlines();
+
+ if len(lines)>1:
+ if not (ofed_ver in str(lines[0])):
+ print("installed OFED version is '%s' should be '%s' " % (lines[0],ofed_ver))
+ exit(-1);
+
+
+
def load_config_file (self):
fcfg=self.m_cfg_file
@@ -300,15 +408,19 @@ Other network devices
self.raise_error ('Error: port_limit should not be higher than number of interfaces in config file: %s\n' % fcfg)
- def do_bind_one (self,key):
- cmd='%s dpdk_nic_bind.py --bind=igb_uio %s ' % (sys.executable, key)
+ def do_bind_one (self,key,mellanox):
+ if mellanox:
+ drv="mlx5_core"
+ else:
+ drv="igb_uio"
+
+ cmd='%s dpdk_nic_bind.py --bind=%s %s ' % (sys.executable, drv,key)
print(cmd)
res=os.system(cmd);
if res!=0:
raise DpdkSetup('')
-
def pci_name_to_full_name (self,pci_name):
c='[0-9A-Fa-f]';
sp='[:]'
@@ -331,7 +443,7 @@ Other network devices
dpdk_nic_bind.get_nic_details()
self.m_devices= dpdk_nic_bind.devices
- def do_run (self):
+ def do_run (self,only_check_all_mlx=False):
self.run_dpdk_lspci ()
if map_driver.dump_interfaces is None or (map_driver.dump_interfaces == [] and map_driver.parent_cfg):
self.load_config_file()
@@ -344,27 +456,74 @@ Other network devices
if_list.append(dev['Slot'])
if_list = list(map(self.pci_name_to_full_name, if_list))
+
+
+ # check how many mellanox cards we have
+ Mellanox_cnt=0;
for key in if_list:
if key not in self.m_devices:
err=" %s does not exist " %key;
raise DpdkSetup(err)
+ if 'Vendor_str' not in self.m_devices[key]:
+ err=" %s does not have Vendor_str " %key;
+ raise DpdkSetup(err)
- if 'Driver_str' in self.m_devices[key]:
- if self.m_devices[key]['Driver_str'] not in dpdk_nic_bind.dpdk_drivers :
- self.do_bind_one (key)
+ if self.m_devices[key]['Vendor_str'].find("Mellanox")>-1 :
+ Mellanox_cnt=Mellanox_cnt+1
+
+
+ if not map_driver.dump_interfaces :
+ if ((Mellanox_cnt>0) and (Mellanox_cnt!= len(if_list))):
+ err=" All driver should be from one vendor. you have at least one driver from Mellanox but not all ";
+ raise DpdkSetup(err)
+
+
+ if not map_driver.dump_interfaces :
+ if Mellanox_cnt>0 :
+ self.set_only_mellanox_nics()
+
+ if self.get_only_mellanox_nics():
+ self.check_ofe_version ()
+ for key in if_list:
+ pci_id=self.m_devices[key]['Slot_str']
+ self.tune_mlx5_device (pci_id)
+ if 'Interface' in self.m_devices[key]:
+ dev_id=self.m_devices[key]['Interface']
+ self.disable_flow_control_mlx5_device (dev_id)
+ self.set_max_mtu_mlx5_device(dev_id)
+
+
+ if only_check_all_mlx:
+ if Mellanox_cnt >0:
+ exit(1);
else:
- self.do_bind_one (key)
+ exit(0);
- if if_list and map_driver.args.parent and dpdk_nic_bind.get_igb_uio_usage():
- pid = dpdk_nic_bind.get_pid_using_pci(if_list)
- if pid:
- cmdline = dpdk_nic_bind.read_pid_cmdline(pid)
- print('Some or all of given interfaces are in use by following process:\npid: %s, cmd: %s' % (pid, cmdline))
- if not dpdk_nic_bind.confirm('Ignore and proceed (y/N):'):
- sys.exit(1)
+ for key in if_list:
+ if key not in self.m_devices:
+ err=" %s does not exist " %key;
+ raise DpdkSetup(err)
+
+ if 'Driver_str' in self.m_devices[key]:
+ if self.m_devices[key]['Driver_str'] not in (dpdk_nic_bind.dpdk_drivers+dpdk_nic_bind.dpdk_and_kernel) :
+ self.do_bind_one (key,(Mellanox_cnt>0))
+ pass;
else:
- print('WARNING: Some other program is using DPDK driver.\nIf it is TRex and you did not configure it for dual run, current command will fail.')
+ self.do_bind_one (key,(Mellanox_cnt>0))
+ pass;
+
+ if (Mellanox_cnt==0):
+ # We are not in Mellanox case, we can do this check only in case of Intel (another process is running)
+ if if_list and map_driver.args.parent and (dpdk_nic_bind.get_igb_uio_usage()):
+ pid = dpdk_nic_bind.get_pid_using_pci(if_list)
+ if pid:
+ cmdline = dpdk_nic_bind.read_pid_cmdline(pid)
+ print('Some or all of given interfaces are in use by following process:\npid: %s, cmd: %s' % (pid, cmdline))
+ if not dpdk_nic_bind.confirm('Ignore and proceed (y/N):'):
+ sys.exit(1)
+ else:
+ print('WARNING: Some other program is using DPDK driver.\nIf it is TRex and you did not configure it for dual run, current command will fail.')
def do_return_to_linux(self):
if not self.m_devices:
@@ -432,7 +591,7 @@ Other network devices
# input: list of different descriptions of interfaces: index, pci, name etc.
# Binds to dpdk wanted interfaces, not bound to any driver.
# output: list of maps of devices in dpdk_* format (self.m_devices.values())
- def _get_wanted_interfaces(self, input_interfaces):
+ def _get_wanted_interfaces(self, input_interfaces, get_macs = True):
if type(input_interfaces) is not list:
raise DpdkSetup('type of input interfaces should be list')
if not len(input_interfaces):
@@ -460,47 +619,52 @@ Other network devices
dev['Interface_argv'] = interface
wanted_interfaces.append(dev)
- unbound = []
- dpdk_bound = []
- for interface in wanted_interfaces:
- if 'Driver_str' not in interface:
- unbound.append(interface['Slot'])
- elif interface.get('Driver_str') in dpdk_nic_bind.dpdk_drivers:
- dpdk_bound.append(interface['Slot'])
- if unbound or dpdk_bound:
- for pci, info in dpdk_nic_bind.get_info_from_trex(unbound + dpdk_bound).items():
- if pci not in self.m_devices:
- raise DpdkSetup('Internal error: PCI %s is not found among devices' % pci)
- self.m_devices[pci].update(info)
+ if get_macs:
+ unbound = []
+ dpdk_bound = []
+ for interface in wanted_interfaces:
+ if 'Driver_str' not in interface:
+ unbound.append(interface['Slot'])
+ elif interface.get('Driver_str') in dpdk_nic_bind.dpdk_drivers:
+ dpdk_bound.append(interface['Slot'])
+ if unbound or dpdk_bound:
+ for pci, info in dpdk_nic_bind.get_info_from_trex(unbound + dpdk_bound).items():
+ if pci not in self.m_devices:
+ raise DpdkSetup('Internal error: PCI %s is not found among devices' % pci)
+ self.m_devices[pci].update(info)
return wanted_interfaces
def do_create(self):
- # gather info about NICS from dpdk_nic_bind.py
- if not self.m_devices:
- self.run_dpdk_lspci()
- wanted_interfaces = self._get_wanted_interfaces(map_driver.args.create_interfaces)
ips = map_driver.args.ips
def_gws = map_driver.args.def_gws
dest_macs = map_driver.args.dest_macs
- if ips:
+ if map_driver.args.force_macs:
+ ip_config = False
+ if ips:
+ raise DpdkSetup("If using --force-macs, should not specify ips")
+ if def_gws:
+ raise DpdkSetup("If using --force-macs, should not specify default gateways")
+ elif ips:
ip_config = True
if not def_gws:
- print("If specifying ips, must specify also def-gws")
- sys.exit(1)
+ raise DpdkSetup("If specifying ips, must specify also def-gws")
if dest_macs:
- print("If specifying ips, should not specify dest--macs")
- sys.exit(1)
- if len(ips) != len(def_gws) or len(ips) != len(wanted_interfaces):
- print("Number of given IPs should equal number of given def-gws and number of interfaces")
- sys.exit(1)
+ raise DpdkSetup("If specifying ips, should not specify dest--macs")
+ if len(ips) != len(def_gws) or len(ips) != len(map_driver.args.create_interfaces):
+ raise DpdkSetup("Number of given IPs should equal number of given def-gws and number of interfaces")
else:
if dest_macs:
ip_config = False
else:
ip_config = True
+ # gather info about NICS from dpdk_nic_bind.py
+ if not self.m_devices:
+ self.run_dpdk_lspci()
+ wanted_interfaces = self._get_wanted_interfaces(map_driver.args.create_interfaces, get_macs = not ip_config)
+
for i, interface in enumerate(wanted_interfaces):
dual_index = i + 1 - (i % 2) * 2
if ip_config:
@@ -513,15 +677,17 @@ Other network devices
else:
interface['def_gw'] = ".".join(list(str(dual_index+1))*4)
else:
+ dual_if = wanted_interfaces[dual_index]
if 'MAC' not in interface:
raise DpdkSetup('Could not determine MAC of interface: %s. Please verify with -t flag.' % interface['Interface_argv'])
+ if 'MAC' not in dual_if:
+ raise DpdkSetup('Could not determine MAC of interface: %s. Please verify with -t flag.' % dual_if['Interface_argv'])
interface['src_mac'] = interface['MAC']
if isinstance(dest_macs, list) and len(dest_macs) > i:
interface['dest_mac'] = dest_macs[i]
-
- if 'dest_mac' not in wanted_interfaces[dual_index]:
- wanted_interfaces[dual_index]['dest_mac'] = interface['MAC'] # loopback
- wanted_interfaces[dual_index]['loopback_dest'] = True
+ else:
+ interface['dest_mac'] = dual_if['MAC']
+ interface['loopback_dest'] = True
config = ConfigCreator(self._get_cpu_topology(), wanted_interfaces, include_lcores = map_driver.args.create_include, exclude_lcores = map_driver.args.create_exclude,
only_first_thread = map_driver.args.no_ht, ignore_numa = map_driver.args.ignore_numa,
@@ -533,8 +699,7 @@ Other network devices
cpu_topology = self._get_cpu_topology()
total_lcores = sum([len(lcores) for cores in cpu_topology.values() for lcores in cores.values()])
if total_lcores < 1:
- print('Script could not determine number of cores of the system, exiting.')
- sys.exit(1)
+ raise DpdkSetup('Script could not determine number of cores of the system, exiting.')
elif total_lcores < 2:
if dpdk_nic_bind.confirm("You only have 1 core and can't run TRex at all. Ignore and continue? (y/N): "):
ignore_numa = True
@@ -545,9 +710,18 @@ Other network devices
ignore_numa = True
else:
sys.exit(1)
+
+ if map_driver.args.force_macs:
+ ip_based = False
+ elif dpdk_nic_bind.confirm("By default, IP based configuration file will be created. Do you want to use MAC based config? (y/N)"):
+ ip_based = False
+ else:
+ ip_based = True
+ ip_addr_digit = 1
+
if not self.m_devices:
self.run_dpdk_lspci()
- dpdk_nic_bind.show_table()
+ dpdk_nic_bind.show_table(get_macs = not ip_based)
print('Please choose even number of interfaces from the list above, either by ID , PCI or Linux IF')
print('Stateful will use order of interfaces: Client1 Server1 Client2 Server2 etc. for flows.')
print('Stateless can be in any order.')
@@ -576,12 +750,6 @@ Other network devices
if not dpdk_nic_bind.confirm('Ignore and continue? (y/N): '):
sys.exit(1)
- if dpdk_nic_bind.confirm("By default, IP based configuration file will be created. Do you want to change to MAC based config? (y/N)"):
- ip_based = False
- else:
- ip_based = True
- ip_addr_digit = 1
-
for i, interface in enumerate(wanted_interfaces):
if not ip_based:
if 'MAC' not in interface:
@@ -598,7 +766,6 @@ Other network devices
return
if ip_based:
- loopback_dest = True
if ip_addr_digit % 2 == 0:
dual_ip_digit = ip_addr_digit - 1
else:
@@ -677,7 +844,7 @@ To return to Linux the DPDK bound interfaces (for ifconfig etc.)
sudo ./dpdk_set_ports.py -l
To create TRex config file using interactive mode
- sudo ./dpdk_set_ports.py -l
+ sudo ./dpdk_set_ports.py -i
To create a default config file (example1)
sudo ./dpdk_setup_ports.py -c 02:00.0 02:00.1 -o /etc/trex_cfg.yaml
@@ -737,6 +904,10 @@ To see more detailed info on interfaces (table):
help="""Destination MACs to be used in created yaml file. Without them, will assume loopback (0<->1, 2<->3 etc.)""",
)
+ parser.add_argument("--force-macs", default=False, action='store_true',
+ help="""Use MACs in created config file.""",
+ )
+
parser.add_argument("--ips", nargs='*', default=[], action='store',
help="""IP addresses to be used in created yaml file. Without them, will assume loopback (0<->1, 2<->3 etc.)""",
)
@@ -786,6 +957,9 @@ To see more detailed info on interfaces (table):
def main ():
try:
+ if getpass.getuser() != 'root':
+ raise DpdkSetup('Please run this program as root/with sudo')
+
process_options ()
if map_driver.args.show:
@@ -816,8 +990,5 @@ def main ():
exit(-1)
if __name__ == '__main__':
- if getpass.getuser() != 'root':
- print('Please run this program as root/with sudo')
- exit(1)
main()
diff --git a/scripts/dumy_libs/libibverbs.so.1 b/scripts/dumy_libs/libibverbs.so.1
new file mode 100644
index 00000000..bd93569d
--- /dev/null
+++ b/scripts/dumy_libs/libibverbs.so.1
Binary files differ
diff --git a/scripts/dumy_libs/libnl-3.so.200 b/scripts/dumy_libs/libnl-3.so.200
new file mode 100644
index 00000000..9eaaac9d
--- /dev/null
+++ b/scripts/dumy_libs/libnl-3.so.200
Binary files differ
diff --git a/scripts/dumy_libs/libnl-route-3.so.200 b/scripts/dumy_libs/libnl-route-3.so.200
new file mode 100644
index 00000000..9e1835f1
--- /dev/null
+++ b/scripts/dumy_libs/libnl-route-3.so.200
Binary files differ
diff --git a/scripts/exp/dns_ipv6_rxcheck-ex.erf b/scripts/exp/dns_ipv6_rxcheck-ex.erf
index ebf95197..3984c0ef 100755
--- a/scripts/exp/dns_ipv6_rxcheck-ex.erf
+++ b/scripts/exp/dns_ipv6_rxcheck-ex.erf
Binary files differ
diff --git a/scripts/exp/dns_ipv6_rxcheck.erf b/scripts/exp/dns_ipv6_rxcheck.erf
index ebf95197..3984c0ef 100644
--- a/scripts/exp/dns_ipv6_rxcheck.erf
+++ b/scripts/exp/dns_ipv6_rxcheck.erf
Binary files differ
diff --git a/scripts/exp/dns_rxcheck-ex.erf b/scripts/exp/dns_rxcheck-ex.erf
index 21a610a6..a6135f34 100755
--- a/scripts/exp/dns_rxcheck-ex.erf
+++ b/scripts/exp/dns_rxcheck-ex.erf
Binary files differ
diff --git a/scripts/exp/dns_rxcheck.erf b/scripts/exp/dns_rxcheck.erf
index 21a610a6..a6135f34 100644
--- a/scripts/exp/dns_rxcheck.erf
+++ b/scripts/exp/dns_rxcheck.erf
Binary files differ
diff --git a/scripts/exp/ipv6-0-ex.erf b/scripts/exp/ipv6-0-ex.erf
index 21e0eabb..1e102856 100755
--- a/scripts/exp/ipv6-0-ex.erf
+++ b/scripts/exp/ipv6-0-ex.erf
Binary files differ
diff --git a/scripts/exp/ipv6-0.erf b/scripts/exp/ipv6-0.erf
index 21e0eabb..1e102856 100644
--- a/scripts/exp/ipv6-0.erf
+++ b/scripts/exp/ipv6-0.erf
Binary files differ
diff --git a/scripts/exp/ipv6_vlan-0-ex.erf b/scripts/exp/ipv6_vlan-0-ex.erf
index b97d760b..f7c82833 100755
--- a/scripts/exp/ipv6_vlan-0-ex.erf
+++ b/scripts/exp/ipv6_vlan-0-ex.erf
Binary files differ
diff --git a/scripts/exp/ipv6_vlan-0.erf b/scripts/exp/ipv6_vlan-0.erf
index b97d760b..f7c82833 100644
--- a/scripts/exp/ipv6_vlan-0.erf
+++ b/scripts/exp/ipv6_vlan-0.erf
Binary files differ
diff --git a/scripts/exp/rtsp_short1_ipv6_rxcheck-ex.erf b/scripts/exp/rtsp_short1_ipv6_rxcheck-ex.erf
index 06ac6484..a35e9f47 100755
--- a/scripts/exp/rtsp_short1_ipv6_rxcheck-ex.erf
+++ b/scripts/exp/rtsp_short1_ipv6_rxcheck-ex.erf
Binary files differ
diff --git a/scripts/exp/rtsp_short1_ipv6_rxcheck.erf b/scripts/exp/rtsp_short1_ipv6_rxcheck.erf
index 06ac6484..a35e9f47 100644
--- a/scripts/exp/rtsp_short1_ipv6_rxcheck.erf
+++ b/scripts/exp/rtsp_short1_ipv6_rxcheck.erf
Binary files differ
diff --git a/scripts/exp/rtsp_short1_rxcheck-ex.erf b/scripts/exp/rtsp_short1_rxcheck-ex.erf
index cd120df9..cfc3726a 100755
--- a/scripts/exp/rtsp_short1_rxcheck-ex.erf
+++ b/scripts/exp/rtsp_short1_rxcheck-ex.erf
Binary files differ
diff --git a/scripts/exp/rtsp_short1_rxcheck.erf b/scripts/exp/rtsp_short1_rxcheck.erf
index cd120df9..cfc3726a 100644
--- a/scripts/exp/rtsp_short1_rxcheck.erf
+++ b/scripts/exp/rtsp_short1_rxcheck.erf
Binary files differ
diff --git a/scripts/find_python.sh b/scripts/find_python.sh
index 4a3452cb..12b173be 100755
--- a/scripts/find_python.sh
+++ b/scripts/find_python.sh
@@ -4,31 +4,21 @@ set +e
# function finds python2
function find_python2 {
- # two candidates - machine python and cisco linux python
if [ -n "$PYTHON" ]; then #
return;
fi
-
-
- MACHINE_PYTHON=python
- CEL_PYTHON=/router/bin/python-2.7.4
- # try the machine python
- PYTHON=$MACHINE_PYTHON
- PCHECK=`$PYTHON -c "import sys; ver = sys.version_info[0] * 10 + sys.version_info[1];sys.exit(ver < 27)" > /dev/null 2>&1 `
- if [ $? -eq 0 ]; then
- return
- fi
-
- # try the CEL python
- PYTHON=$CEL_PYTHON
- PCHECK=`$PYTHON -c "import sys; ver = sys.version_info[0] * 10 + sys.version_info[1];sys.exit(ver < 27)" > /dev/null 2>&1 `
- if [ $? -eq 0 ]; then
- return
- fi
+ # try different Python paths
+ PYTHONS=( python /usr/bin/python /router/bin/python-2.7.4 )
+ for PYTHON in ${PYTHONS[*]}; do
+ $PYTHON -c "import sys; ver = sys.version_info[0] * 10 + sys.version_info[1];sys.exit(ver < 27)" > /dev/null 2>&1
+ if [ $? -eq 0 ]; then
+ return
+ fi
+ done;
- echo "*** $MACHINE_PYTHON - python version is too old, 2.7 at least is required"
+ echo "*** Python version is too old, 2.7 at least is required"
exit -1
}
@@ -40,19 +30,16 @@ function find_python3 {
return;
fi
- MACHINE_PYTHON=python3
- ITAY_PYTHON=/auto/proj-pcube-b/apps/PL-b/tools/python3.4/bin/python3
- PYTHON=$MACHINE_PYTHON
- PCHECK=`$PYTHON -c "import sys; ver = sys.version_info[0] * 10 + sys.version_info[1];sys.exit(ver != 34 and ver != 35)" > /dev/null 2>&1 `
- if [ $? -eq 0 ]; then
- return
- fi
- PYTHON=$ITAY_PYTHON
- PCHECK=`$PYTHON -c "import sys; ver = sys.version_info[0] * 10 + sys.version_info[1];sys.exit(ver != 34 and ver != 35)" > /dev/null 2>&1 `
- if [ $? -eq 0 ]; then
- return
- fi
- echo "*** $MACHINE_PYTHON - python version does not match, 3.4 or 3.5 is required"
+ # try different Python3 paths
+ PYTHONS=( python3 /usr/bin/python3 /auto/proj-pcube-b/apps/PL-b/tools/python3.4/bin/python3 )
+ for PYTHON in ${PYTHONS[*]}; do
+ $PYTHON -c "import sys; ver = sys.version_info[0] * 10 + sys.version_info[1];sys.exit(ver != 34 and ver != 35)" > /dev/null 2>&1
+ if [ $? -eq 0 ]; then
+ return
+ fi
+ done;
+
+ echo "*** Python3 version does not match, 3.4 or 3.5 is required"
exit -1
}
diff --git a/scripts/t-rex-64 b/scripts/t-rex-64
index d2388cfd..5515ba03 100755
--- a/scripts/t-rex-64
+++ b/scripts/t-rex-64
@@ -25,6 +25,12 @@ done <<< "$($PYTHON dpdk_setup_ports.py --dump-pci-description)"
cd $(dirname $0)
export LD_LIBRARY_PATH=$PWD
+#Add dummy lib in case we don't find it, e.g. there is no OFED installed
+if ldd _t-rex-64 | grep "libibverbs.so" | grep -q "not found"; then
+export LD_LIBRARY_PATH=$PWD:$PWD/dumy_libs
+fi
+
+
if [ -t 0 ] && [ -t 1 ]; then
export is_tty=true
saveterm="$(stty -g)"
diff --git a/scripts/t-rex-64-debug-gdb b/scripts/t-rex-64-debug-gdb
index 1e019407..83ab82e0 100755
--- a/scripts/t-rex-64-debug-gdb
+++ b/scripts/t-rex-64-debug-gdb
@@ -1,4 +1,11 @@
#! /bin/bash
export LD_LIBRARY_PATH=`pwd`
+
+#Add dummy lib in case we don't find it, e.g. there is no OFED installed
+if ldd _t-rex-64 | grep "libibverbs.so" | grep -q "not found"; then
+export LD_LIBRARY_PATH=$PWD:$PWD/dumy_libs
+fi
+
+
/usr/bin/gdb --args ./_t-rex-64-debug $@