summaryrefslogtreecommitdiffstats
path: root/scripts/automation
diff options
context:
space:
mode:
Diffstat (limited to 'scripts/automation')
-rwxr-xr-xscripts/automation/regression/functional_tests/trex_cfg_creator_test.py89
-rw-r--r--scripts/automation/regression/setups/kiwi02/benchmark.yaml50
-rw-r--r--scripts/automation/regression/setups/trex07/backup/benchmark.yaml244
-rw-r--r--scripts/automation/regression/setups/trex07/backup/config.yaml66
-rw-r--r--scripts/automation/regression/setups/trex07/benchmark.yaml173
-rw-r--r--scripts/automation/regression/setups/trex07/config.yaml29
-rw-r--r--scripts/automation/regression/setups/trex08/benchmark.yaml50
-rw-r--r--scripts/automation/regression/setups/trex09/benchmark.yaml2
-rwxr-xr-xscripts/automation/regression/stateful_tests/trex_general_test.py7
-rw-r--r--scripts/automation/regression/stateless_tests/stl_client_test.py8
-rw-r--r--scripts/automation/regression/stateless_tests/stl_performance_test.py6
-rw-r--r--scripts/automation/regression/stateless_tests/stl_rx_test.py16
-rw-r--r--scripts/automation/trex_control_plane/stl/console/trex_tui.py4
-rw-r--r--scripts/automation/trex_control_plane/stl/services/scapy_server/protocols.json194
-rwxr-xr-xscripts/automation/trex_control_plane/stl/services/scapy_server/scapy_service.py144
-rw-r--r--scripts/automation/trex_control_plane/stl/services/scapy_server/unit_tests/basetest.py3
-rw-r--r--scripts/automation/trex_control_plane/stl/services/scapy_server/unit_tests/test_scapy_service.py96
-rw-r--r--scripts/automation/trex_control_plane/stl/services/scapy_server/unit_tests/test_utils.py69
-rw-r--r--scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_async_client.py61
-rwxr-xr-xscripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_client.py46
-rwxr-xr-xscripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_streams.py18
-rwxr-xr-xscripts/automation/trex_control_plane/stl/trex_stl_lib/utils/parsing_opts.py12
22 files changed, 1154 insertions, 233 deletions
diff --git a/scripts/automation/regression/functional_tests/trex_cfg_creator_test.py b/scripts/automation/regression/functional_tests/trex_cfg_creator_test.py
index ab6ab6f6..66cb666c 100755
--- a/scripts/automation/regression/functional_tests/trex_cfg_creator_test.py
+++ b/scripts/automation/regression/functional_tests/trex_cfg_creator_test.py
@@ -25,7 +25,7 @@ def compare_lines(golden, output):
raise CompareLinesNumDiff('Number of lines on golden is: %s, in output: %s\nGolden:\n%s\nGenerated:\n%s\n' % (len(golden_lines), len(output_lines), golden, output))
for line_num, (golden_line, output_line) in enumerate(zip(golden_lines, output_lines)):
if golden_line != output_line:
- raise CompareLinesDiff('Produced YAML differs from golden at line %s.Golden: %s <-> Output: %s' % (line_num + 1, golden_line, output_line))
+ raise CompareLinesDiff('Produced YAML differs from golden at line %s.\nGolden: %s <-> Output: %s' % (line_num + 1, golden_line, output_line))
def create_config(cpu_topology, interfaces, *args, **kwargs):
config = ConfigCreator(cpu_topology, interfaces, *args, **kwargs)
@@ -102,17 +102,17 @@ class TRexCfgCreator_Test:
version: 2
interfaces: ['0b:00.0', '03:00.0']
port_info:
- - dest_mac: [0x00, 0x0c, 0x29, 0x92, 0xf1, 0xca] # MAC OF LOOPBACK TO IT'S DUAL INTERFACE
- src_mac: [0x00, 0x0c, 0x29, 0x92, 0xf1, 0xd4]
- - dest_mac: [0x00, 0x0c, 0x29, 0x92, 0xf1, 0xd4]
- src_mac: [0x00, 0x0c, 0x29, 0x92, 0xf1, 0xca]
+ - dest_mac: 00:0c:29:92:f1:ca # MAC OF LOOPBACK TO IT'S DUAL INTERFACE
+ src_mac: 00:0c:29:92:f1:d4
+ - dest_mac: 00:0c:29:92:f1:d4
+ src_mac: 00:0c:29:92:f1:ca
platform:
master_thread_id: 0
latency_thread_id: 1
dual_if:
- socket: 0
- threads: [2]
+ threads: [2,3,4]
'''
output = create_config(cpu_topology, interfaces)
verify_master_core0(output)
@@ -291,33 +291,33 @@ class TRexCfgCreator_Test:
interfaces: ['02:00.0', '02:00.1', '84:00.0', '84:00.1', '05:00.0', '05:00.1']
port_bandwidth_gb: 40
port_info:
- - dest_mac: [0x02, 0x00, 0x02, 0x00, 0x00, 0x00]
- src_mac: [0x01, 0x00, 0x01, 0x00, 0x00, 0x00]
- - dest_mac: [0x01, 0x00, 0x01, 0x00, 0x00, 0x00]
- src_mac: [0x02, 0x00, 0x02, 0x00, 0x00, 0x00]
+ - dest_mac: 02:00:02:00:00:00
+ src_mac: 01:00:01:00:00:00
+ - dest_mac: 01:00:01:00:00:00
+ src_mac: 02:00:02:00:00:00
- - dest_mac: [0x04, 0x00, 0x04, 0x00, 0x00, 0x00]
- src_mac: [0x03, 0x00, 0x03, 0x00, 0x00, 0x00]
- - dest_mac: [0x03, 0x00, 0x03, 0x00, 0x00, 0x00]
- src_mac: [0x04, 0x00, 0x04, 0x00, 0x00, 0x00]
+ - dest_mac: 04:00:04:00:00:00
+ src_mac: 03:00:03:00:00:00
+ - dest_mac: 03:00:03:00:00:00
+ src_mac: 04:00:04:00:00:00
- - dest_mac: [0x06, 0x00, 0x06, 0x00, 0x00, 0x00]
- src_mac: [0x05, 0x00, 0x05, 0x00, 0x00, 0x00]
- - dest_mac: [0x05, 0x00, 0x05, 0x00, 0x00, 0x00]
- src_mac: [0x06, 0x00, 0x06, 0x00, 0x00, 0x00]
+ - dest_mac: 06:00:06:00:00:00
+ src_mac: 05:00:05:00:00:00
+ - dest_mac: 05:00:05:00:00:00
+ src_mac: 06:00:06:00:00:00
platform:
master_thread_id: 0
- latency_thread_id: 16
+ latency_thread_id: 12
dual_if:
- socket: 0
- threads: [1,17,2,18,3,19,4]
+ threads: [1,2,3,16,17,18,19]
- socket: 1
- threads: [8,24,9,25,10,26,11]
+ threads: [8,9,10,11,24,25,26]
- socket: 0
- threads: [20,5,21,6,22,7,23]
+ threads: [4,5,6,7,20,21,22]
'''
output = create_config(cpu_topology, interfaces)
verify_master_core0(output)
@@ -431,25 +431,25 @@ class TRexCfgCreator_Test:
interfaces: ['02:00.0', '02:00.1', '84:00.0', '84:00.1']
port_bandwidth_gb: 40
port_info:
- - dest_mac: [0x02, 0x00, 0x02, 0x00, 0x00, 0x00]
- src_mac: [0x01, 0x00, 0x01, 0x00, 0x00, 0x00]
- - dest_mac: [0x01, 0x00, 0x01, 0x00, 0x00, 0x00]
- src_mac: [0x02, 0x00, 0x02, 0x00, 0x00, 0x00]
+ - dest_mac: 02:00:02:00:00:00
+ src_mac: 01:00:01:00:00:00
+ - dest_mac: 01:00:01:00:00:00
+ src_mac: 02:00:02:00:00:00
- - dest_mac: [0x04, 0x00, 0x04, 0x00, 0x00, 0x00]
- src_mac: [0x03, 0x00, 0x03, 0x00, 0x00, 0x00]
- - dest_mac: [0x03, 0x00, 0x03, 0x00, 0x00, 0x00]
- src_mac: [0x04, 0x00, 0x04, 0x00, 0x00, 0x00]
+ - dest_mac: 04:00:04:00:00:00
+ src_mac: 03:00:03:00:00:00
+ - dest_mac: 03:00:03:00:00:00
+ src_mac: 04:00:04:00:00:00
platform:
master_thread_id: 0
latency_thread_id: 31
dual_if:
- socket: 0
- threads: [1,17,2,18,3,19,4,20,5,21,6,22,7,23,16]
+ threads: [1,2,3,4,5,6,7,16,17,18,19,20,21,22,23]
- socket: 1
- threads: [8,24,9,25,10,26,11,27,12,28,13,29,14,30,15]
+ threads: [8,9,10,11,12,13,14,15,24,25,26,27,28,29,30]
'''
output = create_config(cpu_topology, interfaces)
verify_master_core0(output)
@@ -563,25 +563,25 @@ class TRexCfgCreator_Test:
interfaces: ['02:00.0', '02:00.1', '05:00.0', '05:00.1']
port_bandwidth_gb: 40
port_info:
- - dest_mac: [0x02, 0x00, 0x02, 0x00, 0x00, 0x00]
- src_mac: [0x01, 0x00, 0x01, 0x00, 0x00, 0x00]
- - dest_mac: [0x01, 0x00, 0x01, 0x00, 0x00, 0x00]
- src_mac: [0x02, 0x00, 0x02, 0x00, 0x00, 0x00]
+ - dest_mac: 02:00:02:00:00:00
+ src_mac: 01:00:01:00:00:00
+ - dest_mac: 01:00:01:00:00:00
+ src_mac: 02:00:02:00:00:00
- - dest_mac: [0x04, 0x00, 0x04, 0x00, 0x00, 0x00]
- src_mac: [0x03, 0x00, 0x03, 0x00, 0x00, 0x00]
- - dest_mac: [0x03, 0x00, 0x03, 0x00, 0x00, 0x00]
- src_mac: [0x04, 0x00, 0x04, 0x00, 0x00, 0x00]
+ - dest_mac: 04:00:04:00:00:00
+ src_mac: 03:00:03:00:00:00
+ - dest_mac: 03:00:03:00:00:00
+ src_mac: 04:00:04:00:00:00
platform:
master_thread_id: 0
- latency_thread_id: 16
+ latency_thread_id: 8
dual_if:
- socket: 0
- threads: [1,17,2,18,3,19,4]
+ threads: [1,2,3,16,17,18,19]
- socket: 0
- threads: [20,5,21,6,22,7,23]
+ threads: [4,5,6,7,20,21,22]
'''
output = create_config(cpu_topology, interfaces)
verify_master_core0(output)
@@ -694,5 +694,6 @@ class TRexCfgCreator_Test:
@classmethod
def tearDownClass(cls):
- sys.path.remove(CTRexScenario.scripts_path)
+ if CTRexScenario.scripts_path in sys.path:
+ sys.path.remove(CTRexScenario.scripts_path)
del sys.modules['dpdk_setup_ports']
diff --git a/scripts/automation/regression/setups/kiwi02/benchmark.yaml b/scripts/automation/regression/setups/kiwi02/benchmark.yaml
index e6621085..41688906 100644
--- a/scripts/automation/regression/setups/kiwi02/benchmark.yaml
+++ b/scripts/automation/regression/setups/kiwi02/benchmark.yaml
@@ -246,3 +246,53 @@ test_CPU_benchmark:
bw_per_core : 1
+
+test_performance_vm_single_cpu:
+ cfg:
+ mult : "90%"
+ mpps_per_core_golden :
+ min: 11.5
+ max: 13.1
+
+
+test_performance_vm_single_cpu_cached:
+ cfg:
+ mult : "90%"
+ mpps_per_core_golden :
+ min: 22.0
+ max: 25.0
+
+
+
+test_performance_syn_attack_single_cpu:
+ cfg:
+ mult : "90%"
+ mpps_per_core_golden :
+ min: 9.5
+ max: 11.5
+
+test_performance_vm_multi_cpus:
+ cfg:
+ core_count : 4
+ mult : "90%"
+ mpps_per_core_golden :
+ min: 9.7
+ max: 12.5
+
+
+test_performance_vm_multi_cpus_cached:
+ cfg:
+ core_count : 4
+ mult : "90%"
+ mpps_per_core_golden :
+ min: 19.0
+ max: 22.0
+
+test_performance_syn_attack_multi_cpus:
+ cfg:
+ core_count : 4
+ mult : "90%"
+ mpps_per_core_golden :
+ min: 8.5
+ max: 10.5
+
diff --git a/scripts/automation/regression/setups/trex07/backup/benchmark.yaml b/scripts/automation/regression/setups/trex07/backup/benchmark.yaml
new file mode 100644
index 00000000..0dc340b0
--- /dev/null
+++ b/scripts/automation/regression/setups/trex07/backup/benchmark.yaml
@@ -0,0 +1,244 @@
+###############################################################
+#### TRex benchmark configuration file ####
+###############################################################
+
+#### common templates ###
+
+stat_route_dict: &stat_route_dict
+ clients_start : 16.0.0.1
+ servers_start : 48.0.0.1
+ dual_port_mask : 1.0.0.0
+ client_destination_mask : 255.0.0.0
+ server_destination_mask : 255.0.0.0
+
+nat_dict: &nat_dict
+ clients_net_start : 16.0.0.0
+ client_acl_wildcard_mask : 0.0.0.255
+ dual_port_mask : 1.0.0.0
+ pool_start : 200.0.0.0
+ pool_netmask : 255.255.255.0
+
+
+### stateful ###
+
+test_jumbo:
+ multiplier : 17
+ cores : 1
+ bw_per_core : 543.232
+
+
+test_routing_imix:
+ multiplier : 10
+ cores : 1
+ bw_per_core : 34.128
+
+
+test_routing_imix_64:
+ multiplier : 430
+ cores : 1
+ bw_per_core : 5.893
+
+
+test_static_routing_imix: &test_static_routing_imix
+ stat_route_dict : *stat_route_dict
+ multiplier : 8
+ cores : 1
+ bw_per_core : 34.339
+
+test_static_routing_imix_asymmetric: *test_static_routing_imix
+
+
+test_ipv6_simple:
+ multiplier : 9
+ cores : 2
+ bw_per_core : 19.064
+
+
+test_nat_simple_mode1: &test_nat_simple
+ stat_route_dict : *stat_route_dict
+ nat_dict : *nat_dict
+ multiplier : 6000
+ cores : 1
+ nat_opened : 500000
+ allow_timeout_dev : True
+ bw_per_core : 44.445
+
+test_nat_simple_mode2: *test_nat_simple
+
+test_nat_simple_mode3: *test_nat_simple
+
+test_nat_learning: *test_nat_simple
+
+
+test_nbar_simple:
+ multiplier : 7.5
+ cores : 2
+ bw_per_core : 17.174
+ nbar_classification:
+ rtp : 32.57
+ http : 30.25
+ oracle_sqlnet : 11.23
+ exchange : 10.80
+ citrix : 5.62
+ rtsp : 2.84
+ dns : 1.95
+ smtp : 0.57
+ pop3 : 0.36
+ ssl : 0.17
+ sctp : 0.13
+ sip : 0.09
+ unknown : 3.41
+
+
+test_rx_check_http: &rx_http
+ multiplier : 15000
+ cores : 1
+ rx_sample_rate : 16
+ bw_per_core : 39.560
+
+test_rx_check_http_ipv6:
+ << : *rx_http
+ bw_per_core : 49.237
+
+test_rx_check_http_negative_disabled:
+ << : *rx_http
+ stat_route_dict : *stat_route_dict
+ nat_dict : *nat_dict
+
+
+test_rx_check_sfr: &rx_sfr
+ multiplier : 10
+ cores : 3
+ rx_sample_rate : 16
+ bw_per_core : 16.082
+
+test_rx_check_sfr_ipv6:
+ << : *rx_sfr
+ bw_per_core : 19.198
+
+
+
+### stateless ###
+
+test_CPU_benchmark:
+ profiles:
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 64}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 64, stream_count: 10}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 64, stream_count: 100}
+ cpu_util : 1
+ bw_per_core : 1
+
+# causes queue full
+# - name : stl/udp_for_benchmarks.py
+# kwargs : {packet_len: 64, stream_count: 1000}
+# cpu_util : 1
+# bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 128}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 256}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 512}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 1500}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 4000}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 9000}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 9000, stream_count: 10}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 9000, stream_count: 100}
+ cpu_util : 1
+ bw_per_core : 1
+
+# not enough memory + queue full if memory increase
+# - name : stl/udp_for_benchmarks.py
+# kwargs : {packet_len: 9000, stream_count: 1000}
+# cpu_util : 1
+# bw_per_core : 1
+
+ - name : stl/imix.py
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_1pkt_tuple_gen.py
+ kwargs : {packet_len: 64}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_1pkt_tuple_gen.py
+ kwargs : {packet_len: 128}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_1pkt_tuple_gen.py
+ kwargs : {packet_len: 256}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_1pkt_tuple_gen.py
+ kwargs : {packet_len: 512}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_1pkt_tuple_gen.py
+ kwargs : {packet_len: 1500}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_1pkt_tuple_gen.py
+ kwargs : {packet_len: 4000}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_1pkt_tuple_gen.py
+ kwargs : {packet_len: 9000}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/pcap.py
+ kwargs : {ipg_usec: 2, loop_count: 0}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_rand_len_9k.py
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/hlt/hlt_udp_rand_len_9k.py
+ cpu_util : 1
+ bw_per_core : 1
+
+
diff --git a/scripts/automation/regression/setups/trex07/backup/config.yaml b/scripts/automation/regression/setups/trex07/backup/config.yaml
new file mode 100644
index 00000000..db6e9bf8
--- /dev/null
+++ b/scripts/automation/regression/setups/trex07/backup/config.yaml
@@ -0,0 +1,66 @@
+################################################################
+#### TRex nightly test configuration file ####
+################################################################
+
+
+### TRex configuration:
+# hostname - can be DNS name or IP for the TRex machine for ssh to the box
+# password - root password for TRex machine
+# is_dual - should the TRex inject with -p ?
+# version_path - path to the TRex version and executable
+# cores - how many cores should be used
+# latency - rate of latency packets injected by the TRex
+# modes - list of modes (tagging) of this setup (loopback etc.)
+# * loopback - Trex works via loopback. Router and TFTP configurations may be skipped.
+# * VM - Virtual OS (accept low CPU utilization in tests, latency can get spikes)
+# * virt_nics - NICs are virtual (VMXNET3 etc.)
+
+### Router configuration:
+# hostname - the router hostname as apears in ______# cli prefix
+# ip_address - the router's ip that can be used to communicate with
+# image - the desired imaged wished to be loaded as the router's running config
+# line_password - router password when access via Telent
+# en_password - router password when changing to "enable" mode
+# interfaces - an array of client-server pairs, representing the interfaces configurations of the router
+# configurations - an array of configurations that could possibly loaded into the router during the test.
+# The "clean" configuration is a mandatory configuration the router will load with to run the basic test bench
+
+### TFTP configuration:
+# hostname - the tftp hostname
+# ip_address - the tftp's ip address
+# images_path - the tftp's relative path in which the router's images are located
+
+### Test_misc configuration:
+# expected_bw - the "golden" bandwidth (in Gbps) results planned on receiving from the test
+
+trex:
+ hostname : csi-trex-07
+ cores : 4
+
+router:
+ model : ASR1001x
+ hostname : csi-asr-01
+ ip_address : 10.56.216.120
+ image : asr1001x-universalk9.03.13.02.S.154-3.S2-ext.SPA.bin
+ line_password : cisco
+ en_password : cisco
+ mgmt_interface : GigabitEthernet0
+ clean_config : clean_config.cfg
+ intf_masking : 255.255.255.0
+ ipv6_mask : 64
+ interfaces :
+ - client :
+ name : Te0/0/0
+ src_mac_addr : 0000.0001.0002
+ dest_mac_addr : 0000.0001.0001
+ server :
+ name : Te0/0/1
+ src_mac_addr : 0000.0002.0002
+ dest_mac_addr : 0000.0002.0001
+ vrf_name : null
+
+tftp:
+ hostname : ats-asr-srv-1
+ ip_address : 10.56.217.7
+ root_dir : /scratch/tftp/
+ images_path : /asr1001x/
diff --git a/scripts/automation/regression/setups/trex07/benchmark.yaml b/scripts/automation/regression/setups/trex07/benchmark.yaml
index 0dc340b0..6e861836 100644
--- a/scripts/automation/regression/setups/trex07/benchmark.yaml
+++ b/scripts/automation/regression/setups/trex07/benchmark.yaml
@@ -4,120 +4,57 @@
#### common templates ###
-stat_route_dict: &stat_route_dict
- clients_start : 16.0.0.1
- servers_start : 48.0.0.1
- dual_port_mask : 1.0.0.0
- client_destination_mask : 255.0.0.0
- server_destination_mask : 255.0.0.0
-
-nat_dict: &nat_dict
- clients_net_start : 16.0.0.0
- client_acl_wildcard_mask : 0.0.0.255
- dual_port_mask : 1.0.0.0
- pool_start : 200.0.0.0
- pool_netmask : 255.255.255.0
-
-
-### stateful ###
-
test_jumbo:
- multiplier : 17
- cores : 1
- bw_per_core : 543.232
+ multiplier : 120
+ cores : 2
+ bw_per_core : 962.464
test_routing_imix:
- multiplier : 10
- cores : 1
- bw_per_core : 34.128
+ multiplier : 60
+ cores : 4
+ bw_per_core : 48.130
test_routing_imix_64:
- multiplier : 430
- cores : 1
- bw_per_core : 5.893
-
+ multiplier : 4000
+ cores : 7
+ bw_per_core : 12.699
-test_static_routing_imix: &test_static_routing_imix
- stat_route_dict : *stat_route_dict
- multiplier : 8
- cores : 1
- bw_per_core : 34.339
-test_static_routing_imix_asymmetric: *test_static_routing_imix
+test_static_routing_imix_asymmetric:
+ multiplier : 50
+ cores : 3
+ bw_per_core : 50.561
test_ipv6_simple:
- multiplier : 9
- cores : 2
- bw_per_core : 19.064
-
-
-test_nat_simple_mode1: &test_nat_simple
- stat_route_dict : *stat_route_dict
- nat_dict : *nat_dict
- multiplier : 6000
- cores : 1
- nat_opened : 500000
- allow_timeout_dev : True
- bw_per_core : 44.445
-
-test_nat_simple_mode2: *test_nat_simple
-
-test_nat_simple_mode3: *test_nat_simple
-
-test_nat_learning: *test_nat_simple
-
-
-test_nbar_simple:
- multiplier : 7.5
- cores : 2
- bw_per_core : 17.174
- nbar_classification:
- rtp : 32.57
- http : 30.25
- oracle_sqlnet : 11.23
- exchange : 10.80
- citrix : 5.62
- rtsp : 2.84
- dns : 1.95
- smtp : 0.57
- pop3 : 0.36
- ssl : 0.17
- sctp : 0.13
- sip : 0.09
- unknown : 3.41
+ multiplier : 50
+ cores : 7
+ bw_per_core : 19.5
test_rx_check_http: &rx_http
- multiplier : 15000
- cores : 1
- rx_sample_rate : 16
- bw_per_core : 39.560
+ multiplier : 99000
+ cores : 7
+ rx_sample_rate : 128
+ bw_per_core : 49.464
test_rx_check_http_ipv6:
<< : *rx_http
bw_per_core : 49.237
-test_rx_check_http_negative_disabled:
- << : *rx_http
- stat_route_dict : *stat_route_dict
- nat_dict : *nat_dict
-
-
test_rx_check_sfr: &rx_sfr
- multiplier : 10
- cores : 3
- rx_sample_rate : 16
- bw_per_core : 16.082
+ multiplier : 35
+ cores : 7
+ rx_sample_rate : 128
+ bw_per_core : 20.871
test_rx_check_sfr_ipv6:
<< : *rx_sfr
bw_per_core : 19.198
-
### stateless ###
test_CPU_benchmark:
@@ -178,10 +115,10 @@ test_CPU_benchmark:
cpu_util : 1
bw_per_core : 1
- - name : stl/udp_for_benchmarks.py
- kwargs : {packet_len: 9000, stream_count: 100}
- cpu_util : 1
- bw_per_core : 1
+ #- name : stl/udp_for_benchmarks.py
+ # kwargs : {packet_len: 9000, stream_count: 100}
+ # cpu_util : 1
+ # bw_per_core : 1
# not enough memory + queue full if memory increase
# - name : stl/udp_for_benchmarks.py
@@ -241,4 +178,56 @@ test_CPU_benchmark:
cpu_util : 1
bw_per_core : 1
-
+test_performance_vm_single_cpu:
+ cfg:
+ mult : "90%"
+ mpps_per_core_golden :
+ min: 9.6
+ max: 13.3
+
+
+test_performance_vm_single_cpu_cached:
+ cfg:
+ mult : "10%"
+ mpps_per_core_golden :
+ min: 16.0
+ max: 25.0
+
+
+
+test_performance_syn_attack_single_cpu:
+ cfg:
+ mult : "90%"
+ mpps_per_core_golden :
+ min: 9.0
+ max: 14.0
+
+test_performance_vm_multi_cpus:
+ cfg:
+ core_count : 7
+ mult : "90%"
+ mpps_per_core_golden :
+ min: 8.5
+ max: 12.0
+
+
+test_performance_vm_multi_cpus_cached:
+ cfg:
+ core_count : 7
+ mult : "35%"
+ mpps_per_core_golden :
+ min: 9.0
+ max: 15.0
+
+test_performance_syn_attack_multi_cpus:
+ cfg:
+ core_count : 7
+ mult : "90%"
+ mpps_per_core_golden :
+ min: 8.0
+ max: 16.0
+
+
+test_all_profiles :
+ mult : "5%"
+
diff --git a/scripts/automation/regression/setups/trex07/config.yaml b/scripts/automation/regression/setups/trex07/config.yaml
index db6e9bf8..10472c4f 100644
--- a/scripts/automation/regression/setups/trex07/config.yaml
+++ b/scripts/automation/regression/setups/trex07/config.yaml
@@ -35,32 +35,7 @@
trex:
hostname : csi-trex-07
- cores : 4
+ cores : 8
+ modes : ['loopback']
-router:
- model : ASR1001x
- hostname : csi-asr-01
- ip_address : 10.56.216.120
- image : asr1001x-universalk9.03.13.02.S.154-3.S2-ext.SPA.bin
- line_password : cisco
- en_password : cisco
- mgmt_interface : GigabitEthernet0
- clean_config : clean_config.cfg
- intf_masking : 255.255.255.0
- ipv6_mask : 64
- interfaces :
- - client :
- name : Te0/0/0
- src_mac_addr : 0000.0001.0002
- dest_mac_addr : 0000.0001.0001
- server :
- name : Te0/0/1
- src_mac_addr : 0000.0002.0002
- dest_mac_addr : 0000.0002.0001
- vrf_name : null
-tftp:
- hostname : ats-asr-srv-1
- ip_address : 10.56.217.7
- root_dir : /scratch/tftp/
- images_path : /asr1001x/
diff --git a/scripts/automation/regression/setups/trex08/benchmark.yaml b/scripts/automation/regression/setups/trex08/benchmark.yaml
index 8f83e8f9..935b3e55 100644
--- a/scripts/automation/regression/setups/trex08/benchmark.yaml
+++ b/scripts/automation/regression/setups/trex08/benchmark.yaml
@@ -179,3 +179,53 @@ test_CPU_benchmark:
bw_per_core : 1
+test_performance_vm_single_cpu:
+ cfg:
+ mult : "90%"
+ mpps_per_core_golden :
+ min: 15.1
+ max: 20.3
+
+
+test_performance_vm_single_cpu_cached:
+ cfg:
+ mult : "10%"
+ mpps_per_core_golden :
+ min: 29.1
+ max: 32.0
+
+
+
+test_performance_syn_attack_single_cpu:
+ cfg:
+ mult : "90%"
+ mpps_per_core_golden :
+ min: 13.2
+ max: 15.0
+
+test_performance_vm_multi_cpus:
+ cfg:
+ core_count : 7
+ mult : "40%"
+ mpps_per_core_golden :
+ min: 15.0
+ max: 20.0
+
+
+test_performance_vm_multi_cpus_cached:
+ cfg:
+ core_count : 7
+ mult : "40%"
+ mpps_per_core_golden :
+ min: 29.0
+ max: 34.0
+
+test_performance_syn_attack_multi_cpus:
+ cfg:
+ core_count : 7
+ mult : "40%"
+ mpps_per_core_golden :
+ min: 13.0
+ max: 17.0
+
+
diff --git a/scripts/automation/regression/setups/trex09/benchmark.yaml b/scripts/automation/regression/setups/trex09/benchmark.yaml
index 86f169ed..d1f5f56c 100644
--- a/scripts/automation/regression/setups/trex09/benchmark.yaml
+++ b/scripts/automation/regression/setups/trex09/benchmark.yaml
@@ -204,7 +204,7 @@ test_performance_syn_attack_single_cpu:
cfg:
mult : "90%"
mpps_per_core_golden :
- min: 13.8
+ min: 12.9
max: 14.5
test_performance_vm_multi_cpus:
diff --git a/scripts/automation/regression/stateful_tests/trex_general_test.py b/scripts/automation/regression/stateful_tests/trex_general_test.py
index e968d380..1843af00 100755
--- a/scripts/automation/regression/stateful_tests/trex_general_test.py
+++ b/scripts/automation/regression/stateful_tests/trex_general_test.py
@@ -198,11 +198,14 @@ class CTRexGeneral_Test(unittest.TestCase):
def check_for_trex_crash(self):
pass
- def get_benchmark_param (self, param, sub_param = None, test_name = None):
+ def get_benchmark_param (self, param, sub_param = None, test_name = None,default=None):
if not test_name:
test_name = self.get_name()
if test_name not in self.benchmark:
- self.skip('No data in benchmark.yaml for test: %s, param: %s. Skipping.' % (test_name, param))
+ if default ==None:
+ self.skip('No data in benchmark.yaml for test: %s, param: %s. Skipping.' % (test_name, param))
+ else:
+ return default
if sub_param:
return self.benchmark[test_name][param].get(sub_param)
else:
diff --git a/scripts/automation/regression/stateless_tests/stl_client_test.py b/scripts/automation/regression/stateless_tests/stl_client_test.py
index 36ac0ee1..acf5dc61 100644
--- a/scripts/automation/regression/stateless_tests/stl_client_test.py
+++ b/scripts/automation/regression/stateless_tests/stl_client_test.py
@@ -240,8 +240,12 @@ class STLClient_Test(CStlGeneral_Test):
self.skip('skipping profile tests for virtual / non loopback')
return
+ default_mult = self.get_benchmark_param('mult',default="30%")
+
try:
-
+ print("\n");
+
+
for profile in self.profiles:
print("now testing profile {0}...\n".format(profile))
@@ -269,7 +273,7 @@ class STLClient_Test(CStlGeneral_Test):
self.c.clear_stats()
- self.c.start(ports = [self.tx_port, self.rx_port], mult = "30%")
+ self.c.start(ports = [self.tx_port, self.rx_port], mult = default_mult)
time.sleep(100 / 1000.0)
if p1.is_pauseable() and p2.is_pauseable():
diff --git a/scripts/automation/regression/stateless_tests/stl_performance_test.py b/scripts/automation/regression/stateless_tests/stl_performance_test.py
index e5cecc03..641f0a33 100644
--- a/scripts/automation/regression/stateless_tests/stl_performance_test.py
+++ b/scripts/automation/regression/stateless_tests/stl_performance_test.py
@@ -61,7 +61,7 @@ class PerformanceReport(object):
SetupName = self.machine_name,
TestType = 'performance',
Mppspc = self.avg_mpps_per_core,
- ActionNumber = '<fill_me>',
+ ActionNumber = os.getenv("BUILD_ID","n/a"),
GoldenMin = golden_mpps['min'],
GoldenMax = golden_mpps['max'])
@@ -296,6 +296,10 @@ class STLPerformance_Test(CStlGeneral_Test):
# sample bps/pps
for _ in range(0, 20):
stats = self.c.get_stats(ports = 0)
+ if stats['global'][ 'queue_full']>10000:
+ assert 0, "Queue is full need to tune the multiplier"
+
+ # CPU results are not valid cannot use them
samples['bps'].append(stats[0]['tx_bps'])
samples['pps'].append(stats[0]['tx_pps'])
time.sleep(1)
diff --git a/scripts/automation/regression/stateless_tests/stl_rx_test.py b/scripts/automation/regression/stateless_tests/stl_rx_test.py
index 524ad4bf..d28fca54 100644
--- a/scripts/automation/regression/stateless_tests/stl_rx_test.py
+++ b/scripts/automation/regression/stateless_tests/stl_rx_test.py
@@ -51,6 +51,17 @@ class STLRX_Test(CStlGeneral_Test):
'latency_9k_enable': False,
'allow_packets_drop_num': 1, # allow 1 pkt drop
},
+
+ 'librte_pmd_mlx5': {
+ 'rate_percent': 80,
+ 'total_pkts': 1000,
+ 'rate_latency': 1,
+ 'latency_9k_enable': True,
+ 'latency_9k_max_average': 100,
+ 'latency_9k_max_latency': 250,
+ },
+
+
}
CStlGeneral_Test.setUp(self)
@@ -63,7 +74,6 @@ class STLRX_Test(CStlGeneral_Test):
port_info = self.c.get_port_info(ports = self.rx_port)[0]
self.speed = port_info['speed']
-
cap = port_info['rx']['caps']
if "flow_stats" not in cap or "latency" not in cap:
self.skip('port {0} does not support RX'.format(self.rx_port))
@@ -400,12 +410,14 @@ class STLRX_Test(CStlGeneral_Test):
s_port=random.sample(all_ports, random.randint(1, len(all_ports)) )
s_port=sorted(s_port)
- if self.speed == 40 :
+
+ if ((self.speed == 40) or (self.speed == 100)):
# the NIC does not support all full rate in case both port works let's filter odd ports
s_port=list(filter(lambda x: x % 2==0, s_port))
if len(s_port)==0:
s_port=[0];
+
error=1;
for j in range(0,5):
print(" {4} - duration {0} pgid {1} pkt_size {2} s_port {3} ".format(duration,pgid,pkt_size,s_port,j));
diff --git a/scripts/automation/trex_control_plane/stl/console/trex_tui.py b/scripts/automation/trex_control_plane/stl/console/trex_tui.py
index d7db6d30..bf6ed164 100644
--- a/scripts/automation/trex_control_plane/stl/console/trex_tui.py
+++ b/scripts/automation/trex_control_plane/stl/console/trex_tui.py
@@ -645,14 +645,14 @@ class TrexTUI():
# regular state
if self.state == self.STATE_ACTIVE:
# if no connectivity - move to lost connecitivty
- if not self.stateless_client.async_client.is_alive():
+ if not self.stateless_client.async_client.is_active():
self.stateless_client._invalidate_stats(self.pm.ports)
self.state = self.STATE_LOST_CONT
# lost connectivity
elif self.state == self.STATE_LOST_CONT:
- # got it back
+ # if the async is alive (might be zomibe, but alive) try to reconnect
if self.stateless_client.async_client.is_alive():
# move to state reconnect
self.state = self.STATE_RECONNECT
diff --git a/scripts/automation/trex_control_plane/stl/services/scapy_server/protocols.json b/scripts/automation/trex_control_plane/stl/services/scapy_server/protocols.json
new file mode 100644
index 00000000..f685c06f
--- /dev/null
+++ b/scripts/automation/trex_control_plane/stl/services/scapy_server/protocols.json
@@ -0,0 +1,194 @@
+[
+ {
+ "id": "Ether",
+ "name": "Ethernet II",
+ "fields": [
+ {
+ "id": "dst",
+ "name": "Destination",
+ "type": "MAC_ADDRESS",
+ "regex": "^([0-9A-Fa-f]{2}[:-]){5}([0-9A-Fa-f]{2})$"
+ },
+ {
+ "id": "src",
+ "name": "Source",
+ "type": "MAC_ADDRESS",
+ "regex": "^([0-9A-Fa-f]{2}[:-]){5}([0-9A-Fa-f]{2})$"
+ },
+ {
+ "id": "type",
+ "name": "Type"
+ }
+ ],
+ "payload": ["IP", "IPv6", "Dot1Q", "Raw"]
+ },
+ {
+ "id": "IP",
+ "name": "IPv4",
+ "fields": [
+ {
+ "id": "version",
+ "name": "Version"
+ },
+ {
+ "id": "ihl",
+ "name": "IHL",
+ "type": "NUMBER",
+ "auto": true
+ },
+ {
+ "id": "tos",
+ "name": "TOS",
+ "type": "NUMBER"
+ },
+ {
+ "id": "len",
+ "name": "Total Length",
+ "type": "NUMBER",
+ "auto": true
+ },
+ {
+ "id": "id",
+ "name": "Identification",
+ "type": "NUMBER"
+ },
+ {
+ "id": "flags",
+ "name": "Flags",
+ "type": "BITMASK",
+ "bits": [
+ {"name": "Reserved", "mask": 4, "values":[{"name":"Not Set", "value": 0}, {"name":"Set", "value": 4}]},
+ {"name": "Fragment", "mask": 2, "values":[{"name":"May fragment (0)", "value": 0}, {"name":"Don't fragment (1)", "value": 2}]},
+ {"name": "More Fragments(MF)", "mask": 1, "values":[{"name":"Not Set", "value": 0}, {"name":"Set", "value": 1}]}
+ ]
+ },
+ {
+ "id": "frag",
+ "name": "Fragment offset",
+ "type": "NUMBER"
+ },
+ {
+ "id": "ttl",
+ "name": "TTL",
+ "type": "NUMBER",
+ "min": 1,
+ "max": 255
+
+ },
+ {
+ "id": "proto",
+ "name": "Protocol"
+ },
+ {
+ "id": "chksum",
+ "name": "Checksum",
+ "type": "STRING",
+ "auto": true
+ },
+ {
+ "id": "src",
+ "name": "Source address",
+ "type": "IP_ADDRESS"
+ },
+ {
+ "id": "dst",
+ "name": "Destination address",
+ "type": "IP_ADDRESS"
+ },
+ {
+ "id": "options",
+ "name": "Options",
+ "type": "IP_OPTIONS"
+ }
+ ],
+ "payload": ["TCP", "UDP", "ICMP", "Raw"]
+ },
+ {
+ "id": "TCP",
+ "name": "TCP",
+ "fields": [
+ {
+ "id": "sport",
+ "name": "Source port",
+ "type": "NUMBER",
+ "min": 0,
+ "max": 65535
+
+ },
+ {
+ "id": "dport",
+ "name": "Destination port",
+ "type": "NUMBER",
+ "min": 0,
+ "max": 65535
+ },
+ {
+ "id": "seq",
+ "name": "Sequence number",
+ "type": "NUMBER"
+ },
+ {
+ "id": "ack",
+ "name": "Acknowledgment number",
+ "type": "NUMBER"
+ },
+ {
+ "id": "dataofs",
+ "name": "Data offset",
+ "type": "NUMBER"
+ },
+ {
+ "id": "reserved",
+ "name": "Reserved",
+ "type": "NUMBER"
+ },
+ {
+ "id": "flags",
+ "name": "Flags",
+ "auto": false,
+ "type": "BITMASK",
+ "bits": [
+ {"name": "URG", "mask": 32, "values":[{"name":"Not Set", "value": 0}, {"name":"Set", "value": 32}]},
+ {"name": "ACK", "mask": 16, "values":[{"name":"Not Set", "value": 0}, {"name":"Set", "value": 16}]},
+ {"name": "PSH", "mask": 8, "values":[{"name":"Not Set", "value": 0}, {"name":"Set", "value": 8}]},
+ {"name": "RST", "mask": 4, "values":[{"name":"Not Set", "value": 0}, {"name":"Set", "value": 4}]},
+ {"name": "SYN", "mask": 2, "values":[{"name":"Not Set", "value": 0}, {"name":"Set", "value": 2}]},
+ {"name": "FIN", "mask": 1, "values":[{"name":"Not Set", "value": 0}, {"name":"Set", "value": 1}]}
+ ]
+ },
+ {
+ "id": "window",
+ "name": "Window size",
+ "type": "NUMBER"
+ },
+ {
+ "id": "chksum",
+ "name": "Checksum",
+ "auto": true,
+ "type": "NUMBER"
+ },
+ {
+ "id": "urgptr",
+ "name": "Urgent pointer",
+ "type": "NUMBER"
+ },
+ {
+ "id": "options",
+ "name": "Options",
+ "type": "TCP_OPTIONS"
+ }
+ ]
+ },
+ {
+ "id": "Raw",
+ "name": "Raw",
+ "fields": [
+ {
+ "id": "load",
+ "name": "Payload",
+ "type": "BYTES"
+ }
+ ]
+ }
+]
+
diff --git a/scripts/automation/trex_control_plane/stl/services/scapy_server/scapy_service.py b/scripts/automation/trex_control_plane/stl/services/scapy_server/scapy_service.py
index 91257596..88514aa8 100755
--- a/scripts/automation/trex_control_plane/stl/services/scapy_server/scapy_service.py
+++ b/scripts/automation/trex_control_plane/stl/services/scapy_server/scapy_service.py
@@ -9,6 +9,7 @@ import tempfile
import hashlib
import base64
import numbers
+import random
import inspect
import json
from pprint import pprint
@@ -279,6 +280,64 @@ def get_sample_field_val(scapy_layer, fieldId):
except:
pass
+def generate_random_bytes(sz, seed, start, end):
+ # generate bytes of specified range with a fixed seed and size
+ rnd = random.Random()
+ n = end - start + 1
+ if is_python(2):
+ rnd = random.Random(seed)
+ res = [start + int(rnd.random()*n) for _i in range(sz)]
+ return ''.join(chr(x) for x in res)
+ else:
+ rnd = random.Random()
+ # to generate same random sequence as 2.x
+ rnd.seed(seed, version=1)
+ res = [start + int(rnd.random()*n) for _i in range(sz)]
+ return bytes(res)
+
+def generate_bytes_from_template(sz, template):
+ # generate bytes by repeating a template
+ res = str_to_bytes('') # new bytes array
+ if len(template) == 0:
+ return res
+ while len(res) < sz:
+ res = res + template
+ return res[:sz]
+
+def parse_template_code(template_code):
+ template_code = re.sub("0[xX]", '', template_code) # remove 0x
+ template_code = re.sub("[\s]", '', template_code) # remove spaces
+ return bytearray.fromhex(template_code)
+
+def verify_payload_size(size):
+ assert(size != None)
+ if (size > (1<<20)): # 1Mb ought to be enough for anybody
+ raise ValueError('size is too large')
+
+def generate_bytes(bytes_definition):
+ # accepts a bytes definition object
+ # {generate: random_bytes or random_ascii, seed: <seed_number>, size: <size_bytes>}
+ # {generate: template, template_base64: '<base64str>', size: <size_bytes>}
+ # {generate: template_code, template_text_code: '<template_code_str>', size: <size_bytes>}
+ gen_type = bytes_definition.get('generate')
+ if gen_type == None:
+ return b64_to_bytes(bytes_definition['base64'])
+ elif gen_type == 'template_code':
+ code = parse_template_code(bytes_definition["template_code"])
+ bytes_size = int(bytes_definition.get('size') or len(code))
+ verify_payload_size(bytes_size)
+ return generate_bytes_from_template(bytes_size, code)
+ else:
+ bytes_size = int(bytes_definition['size']) # required
+ seed = int(bytes_definition.get('seed') or 12345) # optional
+ verify_payload_size(bytes_size)
+ if gen_type == 'random_bytes':
+ return generate_random_bytes(bytes_size, seed, 0, 0xFF)
+ elif gen_type == 'random_ascii':
+ return generate_random_bytes(bytes_size, seed, 0x20, 0x7E)
+ elif gen_type == 'template':
+ return generate_bytes_from_template(bytes_size, b64_to_bytes(bytes_definition["template_base64"]))
+
class ScapyException(Exception): pass
class Scapy_service(Scapy_service_api):
@@ -312,7 +371,16 @@ class Scapy_service(Scapy_service_api):
self.version_major = '1'
self.version_minor = '01'
self.server_v_hashed = self._generate_version_hash(self.version_major,self.version_minor)
-
+ self.protocol_definitions = {} # protocolId -> prococol definition overrides data
+ self._load_definitions_from_json()
+
+ def _load_definitions_from_json(self):
+ # load protocol definitions from a json file
+ self.protocol_definitions = {}
+ with open('protocols.json', 'r') as f:
+ protocols = json.load(f)
+ for protocol in protocols:
+ self.protocol_definitions[ protocol['id'] ] = protocol
def _all_protocol_structs(self):
old_stdout = sys.stdout
@@ -370,9 +438,9 @@ class Scapy_service(Scapy_service_api):
if type(val) == type({}):
value_type = val['vtype']
if value_type == 'EXPRESSION':
- return eval(val['expr'], {})
+ return eval(val['expr'], scapy.all.__dict__)
elif value_type == 'BYTES': # bytes payload(ex Raw.load)
- return b64_to_bytes(val['base64'])
+ return generate_bytes(val)
elif value_type == 'OBJECT':
return val['value']
else:
@@ -382,7 +450,7 @@ class Scapy_service(Scapy_service_api):
else:
return val
- def _field_value_from_def(self, layer, fieldId, val):
+ def _field_value_from_def(self, scapy_pkt, layer, fieldId, val):
field_desc = layer.get_field(fieldId)
sample_val = get_sample_field_val(layer, fieldId)
# extensions for field values
@@ -394,6 +462,16 @@ class Scapy_service(Scapy_service_api):
return field_desc.randval()
elif value_type == 'MACHINE': # internal machine field repr
return field_desc.m2i(layer, b64_to_bytes(val['base64']))
+ elif value_type == 'BYTES':
+ if 'total_size' in val: # custom case for total pkt size
+ gen = {}
+ gen.update(val)
+ total_sz = gen['total_size']
+ del gen['total_size']
+ gen['size'] = total_sz - len(scapy_pkt)
+ return generate_bytes(gen)
+ else:
+ return generate_bytes(val)
if is_number(sample_val) and is_string(val):
# human-value. guess the type and convert to internal value
# seems setfieldval already does this for some fields,
@@ -583,22 +661,24 @@ class Scapy_service(Scapy_service_api):
def _verify_version_handler(self,client_v_handler):
return (self.server_v_hashed == client_v_handler)
- def _parse_packet_dict(self,layer,scapy_layers,scapy_layer_names):
- class_name = scapy_layer_names.index(layer['id'])
- class_p = scapy_layers[class_name] # class pointer
+ def _parse_packet_dict(self, layer, layer_classes, base_layer):
+ class_p = layer_classes[layer['id']] # class id -> class dict
scapy_layer = class_p()
if isinstance(scapy_layer, Raw):
scapy_layer.load = str_to_bytes("dummy")
+ if base_layer == None:
+ base_layer = scapy_layer
if 'fields' in layer:
- self._modify_layer(scapy_layer, layer['fields'])
+ self._modify_layer(base_layer, scapy_layer, layer['fields'])
return scapy_layer
def _packet_model_to_scapy_packet(self,data):
- layers = Packet.__subclasses__()
- layer_names = [ layer.__name__ for layer in layers]
- base_layer = self._parse_packet_dict(data[0],layers,layer_names)
+ layer_classes = {}
+ for layer_class in Packet.__subclasses__():
+ layer_classes[layer_class.__name__] = layer_class
+ base_layer = self._parse_packet_dict(data[0], layer_classes, None)
for i in range(1,len(data),1):
- packet_layer = self._parse_packet_dict(data[i],layers,layer_names)
+ packet_layer = self._parse_packet_dict(data[i], layer_classes, base_layer)
base_layer = base_layer/packet_layer
return base_layer
@@ -654,10 +734,9 @@ class Scapy_service(Scapy_service_api):
return pkt_class()
- def _get_payload_classes(self, pkt):
+ def _get_payload_classes(self, pkt_class):
# tries to find, which subclasses allowed.
# this can take long time, since it tries to build packets with all subclasses(O(N))
- pkt_class = type(pkt)
allowed_subclasses = []
for pkt_subclass in conf.layers:
if self._is_packet_class(pkt_subclass):
@@ -671,16 +750,29 @@ class Scapy_service(Scapy_service_api):
pass
return allowed_subclasses
- def _get_fields_definition(self, pkt_class):
+ def _get_fields_definition(self, pkt_class, fieldsDef):
+ # fieldsDef - array of field definitions(or empty array)
fields = []
for field_desc in pkt_class.fields_desc:
+ fieldId = field_desc.name
field_data = {
- "id": field_desc.name,
+ "id": fieldId,
"name": field_desc.name
}
+ for fieldDef in fieldsDef:
+ if fieldDef['id'] == fieldId:
+ field_data.update(fieldDef)
if isinstance(field_desc, EnumField):
try:
field_data["values_dict"] = field_desc.s2i
+ if field_data.get("type") == None:
+ if len(field_data["values_dict"] > 0):
+ field_data["type"] = "ENUM"
+ elif fieldId == 'load':
+ field_data["type"] = "BYTES"
+ else:
+ field_data["type"] = "STRING"
+ field_data["values_dict"] = field_desc.s2i
except:
# MultiEnumField doesn't have s2i. need better handling
pass
@@ -696,17 +788,23 @@ class Scapy_service(Scapy_service_api):
for pkt_class in all_classes:
if self._is_packet_class(pkt_class):
# enumerate all non-abstract Packet classes
+ protocolId = pkt_class.__name__
+ protoDef = self.protocol_definitions.get(protocolId) or {}
protocols.append({
- "id": pkt_class.__name__,
- "name": pkt_class.name,
- "fields": self._get_fields_definition(pkt_class)
+ "id": protocolId,
+ "name": protoDef.get('name') or pkt_class.name,
+ "fields": self._get_fields_definition(pkt_class, protoDef.get('fields') or [])
})
res = {"protocols": protocols}
return res
def get_payload_classes(self,client_v_handler, pkt_model_descriptor):
pkt = self._packet_model_to_scapy_packet(pkt_model_descriptor)
- return [c.__name__ for c in self._get_payload_classes(pkt)]
+ pkt_class = type(pkt.lastlayer())
+ protocolDef = self.protocol_definitions.get(pkt_class.__name__)
+ if protocolDef and protocolDef.get('payload'):
+ return protocolDef['payload']
+ return [c.__name__ for c in self._get_payload_classes(pkt_class)]
#input in string encoded base64
def check_update_of_dbs(self,client_v_handler,db_md5,field_md5):
@@ -725,10 +823,10 @@ class Scapy_service(Scapy_service_api):
else:
raise ScapyException("Fields DB is not up to date")
- def _modify_layer(self, scapy_layer, fields):
+ def _modify_layer(self, scapy_pkt, scapy_layer, fields):
for field in fields:
fieldId = str(field['id'])
- fieldval = self._field_value_from_def(scapy_layer, fieldId, field['value'])
+ fieldval = self._field_value_from_def(scapy_pkt, scapy_layer, fieldId, field['value'])
if fieldval is not None:
scapy_layer.setfieldval(fieldId, fieldval)
else:
@@ -767,7 +865,7 @@ class Scapy_service(Scapy_service_api):
# TODO: support replacing payload, instead of breaking
raise ScapyException("Protocol id inconsistent")
if 'fields' in model_layer:
- self._modify_layer(scapy_layer, model_layer['fields'])
+ self._modify_layer(scapy_pkt, scapy_layer, model_layer['fields'])
return self._pkt_data(scapy_pkt)
def read_pcap(self,client_v_handler,pcap_base64):
diff --git a/scripts/automation/trex_control_plane/stl/services/scapy_server/unit_tests/basetest.py b/scripts/automation/trex_control_plane/stl/services/scapy_server/unit_tests/basetest.py
index 17dd304a..1db2c62b 100644
--- a/scripts/automation/trex_control_plane/stl/services/scapy_server/unit_tests/basetest.py
+++ b/scripts/automation/trex_control_plane/stl/services/scapy_server/unit_tests/basetest.py
@@ -62,6 +62,9 @@ def reconstruct_pkt(bytes_b64, model_def):
def get_definitions(def_filter):
return pass_result(service.get_definitions(v_handler, def_filter))
+def get_definition_of(scapy_classname):
+ return pass_result(service.get_definitions(v_handler, [scapy_classname]))['protocols'][0]
+
def get_payload_classes(def_filter):
return pass_result(service.get_payload_classes(v_handler, def_filter))
diff --git a/scripts/automation/trex_control_plane/stl/services/scapy_server/unit_tests/test_scapy_service.py b/scripts/automation/trex_control_plane/stl/services/scapy_server/unit_tests/test_scapy_service.py
index 9cd473d7..d1207ca5 100644
--- a/scripts/automation/trex_control_plane/stl/services/scapy_server/unit_tests/test_scapy_service.py
+++ b/scripts/automation/trex_control_plane/stl/services/scapy_server/unit_tests/test_scapy_service.py
@@ -78,6 +78,35 @@ def test_build_Raw():
])
assert(str(pkt[Raw].load == "hi"))
+def test_build_fixed_pkt_size_bytes_gen():
+ pkt = build_pkt_get_scapy([
+ layer_def("Ether"),
+ layer_def("IP"),
+ layer_def("TCP"),
+ layer_def("Raw", load={
+ "vtype": "BYTES",
+ "generate": "template",
+ "total_size": 64,
+ "template_base64": bytes_to_b64(b"hi")
+ })
+ ])
+ print(len(pkt))
+ assert(len(pkt) == 64)
+
+def test_build_fixed_pkt_size_bytes_gen():
+ pkt = build_pkt_get_scapy([
+ layer_def("Ether"),
+ layer_def("IP"),
+ layer_def("TCP"),
+ layer_def("Raw", load={
+ "vtype": "BYTES",
+ "generate": "random_ascii",
+ "total_size": 256
+ })
+ ])
+ print(len(pkt))
+ assert(len(pkt) == 256)
+
def test_get_all():
service.get_all(v_handler)
@@ -98,6 +127,16 @@ def test_get_payload_classes():
assert("IP" in eth_payloads)
assert("Dot1Q" in eth_payloads)
assert("TCP" not in eth_payloads)
+ assert(eth_payloads[0] == "IP") # order(based on prococols.json)
+
+def test_get_tcp_payload_classes():
+ payloads = get_payload_classes([{"id":"TCP"}])
+ assert("Raw" in payloads)
+
+def test_get_dot1q_payload_classes():
+ payloads = get_payload_classes([{"id":"Dot1Q"}])
+ assert("Dot1Q" in payloads)
+ assert("IP" in payloads)
def test_pcap_read_and_write():
pkts_to_write = [bytes_to_b64(bytes(TEST_PKT))]
@@ -120,6 +159,28 @@ def test_layer_random_value():
ether_fields = fields_to_map(res['data'][0]['fields'])
assert(re.match(RE_MAC, ether_fields['src']['value']))
+def test_IP_options():
+ options_expr = "[IPOption_SSRR(copy_flag=0, routers=['1.2.3.4', '5.6.7.8'])]"
+ res = build_pkt([
+ layer_def("Ether"),
+ layer_def("IP", options={"vtype": "EXPRESSION", "expr": options_expr}),
+ ])
+ pkt = build_pkt_to_scapy(res)
+ options = pkt[IP].options
+ assert(options[0].__class__.__name__ == 'IPOption_SSRR')
+ assert(options[0].copy_flag == 0)
+ assert(options[0].routers == ['1.2.3.4', '5.6.7.8'])
+
+def test_TCP_options():
+ options_expr = "[('MSS', 1460), ('NOP', None), ('NOP', None), ('SAckOK', b'')]"
+ pkt = build_pkt_get_scapy([
+ layer_def("Ether"),
+ layer_def("IP"),
+ layer_def("TCP", options={"vtype": "EXPRESSION", "expr": options_expr}),
+ ])
+ options = pkt[TCP].options
+ assert(options[0] == ('MSS', 1460) )
+
def test_layer_wrong_structure():
payload = [
layer_def("Ether"),
@@ -153,3 +214,38 @@ def test_layer_wrong_structure():
assert(real_structure == ["Ether", "IP", "Raw", None, None])
assert(valid_structure_flags == [True, True, True, False, False])
+def test_ether_definitions():
+ etherDef = get_definition_of("Ether")
+ assert(etherDef['name'] == "Ethernet II")
+ etherFields = etherDef['fields']
+ assert(etherFields[0]['id'] == 'dst')
+ assert(etherFields[0]['name'] == 'Destination')
+ assert(etherFields[1]['id'] == 'src')
+ assert(etherFields[1]['name'] == 'Source')
+ assert(etherFields[2]['id'] == 'type')
+ assert(etherFields[2]['name'] == 'Type')
+
+def test_ether_definitions():
+ pdef = get_definition_of("ICMP")
+ assert(pdef['id'] == "ICMP")
+ assert(pdef['name'])
+ assert(pdef['fields'])
+
+def test_ip_definitions():
+ pdef = get_definition_of("IP")
+ fields = pdef['fields']
+ assert(fields[0]['id'] == 'version')
+
+ assert(fields[1]['id'] == 'ihl')
+ assert(fields[1]['auto'] == True)
+
+ assert(fields[3]['id'] == 'len')
+ assert(fields[3]['auto'] == True)
+
+ assert(fields[5]['id'] == 'flags')
+ assert(fields[5]['type'] == 'BITMASK')
+ assert(fields[5]['bits'][0]['name'] == 'Reserved')
+
+ assert(fields[9]['id'] == 'chksum')
+ assert(fields[9]['auto'] == True)
+
diff --git a/scripts/automation/trex_control_plane/stl/services/scapy_server/unit_tests/test_utils.py b/scripts/automation/trex_control_plane/stl/services/scapy_server/unit_tests/test_utils.py
new file mode 100644
index 00000000..ceb88b47
--- /dev/null
+++ b/scripts/automation/trex_control_plane/stl/services/scapy_server/unit_tests/test_utils.py
@@ -0,0 +1,69 @@
+# run with 'nosetests' utility
+
+from basetest import *
+from scapy_service import *
+
+def test_generate_random_bytes():
+ res = generate_random_bytes(10, 333, ord('0'), ord('9'))
+ print(res)
+ assert(len(res) == 10)
+ assert(res == b'5390532937') # random value with this seed
+
+def test_generate_bytes_from_template_empty():
+ res = generate_bytes_from_template(5, b"")
+ print(res)
+ assert(res == b"")
+
+def test_generate_bytes_from_template_neg():
+ res = generate_bytes_from_template(-5, b"qwe")
+ assert(res == b"")
+
+def test_generate_bytes_from_template_less():
+ res = generate_bytes_from_template(5, b"qwe")
+ print(res)
+ assert(res == b"qweqw")
+
+def test_generate_bytes_from_template_same():
+ res = generate_bytes_from_template(5, b"qwert")
+ print(res)
+ assert(res == b"qwert")
+
+def test_generate_bytes_from_template_more():
+ res = generate_bytes_from_template(5, b"qwerty")
+ print(res)
+ assert(res == b"qwert")
+
+def test_parse_template_code_with_trash():
+ res = parse_template_code("0xDE AD\n be ef \t0xDEAD")
+ print(res)
+ assert(res == bytearray.fromhex('DEADBEEFDEAD'))
+
+def test_generate_bytes():
+ res = generate_bytes({"generate":"random_bytes", "seed": 123, "size": 12})
+ print(res)
+ assert(len(res) == 12)
+
+def test_generate_ascii_default_seed():
+ res = generate_bytes({"generate":"random_ascii", "size": 14})
+ print(res)
+ assert(len(res) == 14)
+
+
+def test_generate_template_code_no_size():
+ res = generate_bytes({"generate":"template_code", "template_code": "BE EF"})
+ assert(res == bytearray.fromhex('BE EF'))
+
+def test_generate_template_code_less():
+ res = generate_bytes({"generate":"template_code", "template_code": "DE AD BE EF", "size": 2})
+ assert(res == bytearray.fromhex('DE AD'))
+
+def test_generate_template_code_more():
+ res = generate_bytes({"generate":"template_code", "template_code": "0xDEAD 0xBEEF", "size": 6})
+ assert(res == bytearray.fromhex('DE AD BE EF DE AD'))
+
+def test_generate_template_base64():
+ res = generate_bytes({"generate":"template", "template_base64": bytes_to_b64(b'hi'), "size": 5})
+ print(res)
+ assert(res == b'hihih')
+
+
diff --git a/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_async_client.py b/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_async_client.py
index 2c95844b..11e87592 100644
--- a/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_async_client.py
+++ b/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_async_client.py
@@ -137,6 +137,10 @@ class CTRexAsyncStatsManager():
class CTRexAsyncClient():
+ THREAD_STATE_ACTIVE = 1
+ THREAD_STATE_ZOMBIE = 2
+ THREAD_STATE_DEAD = 3
+
def __init__ (self, server, port, stateless_client):
self.port = port
@@ -159,7 +163,10 @@ class CTRexAsyncClient():
self.connected = False
self.zipped = ZippedMsg()
-
+
+ self.t_state = self.THREAD_STATE_DEAD
+
+
# connects the async channel
def connect (self):
@@ -173,8 +180,8 @@ class CTRexAsyncClient():
self.socket = self.context.socket(zmq.SUB)
- # before running the thread - mark as active
- self.active = True
+ # before running the thread - mark as active
+ self.t_state = self.THREAD_STATE_ACTIVE
self.t = threading.Thread(target = self._run)
# kill this thread on exit and don't add it to the join list
@@ -198,26 +205,26 @@ class CTRexAsyncClient():
return RC_OK()
-
-
# disconnect
def disconnect (self):
if not self.connected:
return
# mark for join
- self.active = False
-
- # signal that the context was destroyed (exit the thread loop)
+ self.t_state = self.THREAD_STATE_DEAD
self.context.term()
-
- # join
self.t.join()
+
# done
self.connected = False
+ # set the thread as a zombie (in case of server death)
+ def set_as_zombie (self):
+ self.last_data_recv_ts = None
+ self.t_state = self.THREAD_STATE_ZOMBIE
+
# thread function
def _run (self):
@@ -231,12 +238,19 @@ class CTRexAsyncClient():
self.monitor.reset()
- while self.active:
+ while self.t_state != self.THREAD_STATE_DEAD:
try:
with self.monitor:
line = self.socket.recv()
+ # last data recv.
+ self.last_data_recv_ts = time.time()
+
+ # if thread was marked as zomibe - it does nothing besides fetching messages
+ if self.t_state == self.THREAD_STATE_ZOMBIE:
+ continue
+
self.monitor.on_recv_msg(line)
# try to decomrpess
@@ -246,7 +260,6 @@ class CTRexAsyncClient():
line = line.decode()
- self.last_data_recv_ts = time.time()
# signal once
if not got_data:
@@ -259,13 +272,14 @@ class CTRexAsyncClient():
# signal once
if got_data:
self.event_handler.on_async_dead()
+ self.set_as_zombie()
got_data = False
continue
except zmq.ContextTerminated:
# outside thread signaled us to exit
- assert(not self.active)
+ assert(self.t_state != self.THREAD_STATE_ACTIVE)
break
msg = json.loads(line)
@@ -283,16 +297,29 @@ class CTRexAsyncClient():
# closing of socket must be from the same thread
self.socket.close(linger = 0)
- def is_thread_alive (self):
- return self.t.is_alive()
-
- # did we get info for the last 3 seconds ?
+
+ # return True if the subscriber got data in the last 3 seconds
+ # even if zombie - will return true if got data
def is_alive (self):
+
+ # maybe the thread has exited with exception
+ if not self.t.is_alive():
+ return False
+
+ # simply no data
if self.last_data_recv_ts == None:
return False
+ # timeout of data
return ( (time.time() - self.last_data_recv_ts) < 3 )
+
+ # more granular than active - it means that thread state is active we get info
+ # zomibes will return false
+ def is_active (self):
+ return self.is_alive() and self.t_state == self.THREAD_STATE_ACTIVE
+
+
def get_stats (self):
return self.stats
diff --git a/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_client.py b/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_client.py
index 9290acbf..cf328d2e 100755
--- a/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_client.py
+++ b/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_client.py
@@ -177,8 +177,8 @@ class EventsHandler(object):
def on_async_dead (self):
if self.client.connected:
msg = 'Lost connection to server'
- self.__add_event_log('local', 'info', msg, True)
self.client.connected = False
+ self.__add_event_log('local', 'info', msg, True)
def on_async_alive (self):
@@ -346,6 +346,8 @@ class EventsHandler(object):
# server stopped
elif (event_type == 100):
ev = "Server has stopped"
+ # to avoid any new messages on async
+ self.client.async_client.set_as_zombie()
self.__async_event_server_stopped()
show_event = True
@@ -2518,7 +2520,7 @@ class STLClient(object):
slave = port ^ 0x1
if slave in ports:
- raise STLError("dual mode: cannot provide adjacent ports ({0}, {1}) in a batch".format(master, slave))
+ raise STLError("dual mode: please specify only one of adjacent ports ({0}, {1}) in a batch".format(master, slave))
if not slave in self.get_acquired_ports():
raise STLError("dual mode: adjacent port {0} must be owned during dual mode".format(slave))
@@ -2567,7 +2569,7 @@ class STLClient(object):
self.logger.post_cmd(RC_ERR(e))
raise
- all_ports = ports + [p ^ 0x1 for p in ports]
+ all_ports = ports + [p ^ 0x1 for p in ports if profile_b]
self.remove_all_streams(ports = all_ports)
@@ -2576,7 +2578,8 @@ class STLClient(object):
slave = port ^ 0x1
self.add_streams(profile_a.get_streams(), master)
- self.add_streams(profile_b.get_streams(), slave)
+ if profile_b:
+ self.add_streams(profile_b.get_streams(), slave)
return self.start(ports = all_ports, duration = duration)
@@ -2738,7 +2741,7 @@ class STLClient(object):
while set(self.get_active_ports()).intersection(ports):
# make sure ASYNC thread is still alive - otherwise we will be stuck forever
- if not self.async_client.is_thread_alive():
+ if not self.async_client.is_active():
raise STLError("subscriber thread is dead")
time.sleep(0.01)
@@ -3521,21 +3524,28 @@ class STLClient(object):
@__console
def push_line (self, line):
'''Push a pcap file '''
+ args = [self,
+ "push",
+ self.push_line.__doc__,
+ parsing_opts.REMOTE_FILE,
+ parsing_opts.PORT_LIST_WITH_ALL,
+ parsing_opts.COUNT,
+ parsing_opts.DURATION,
+ parsing_opts.IPG,
+ parsing_opts.SPEEDUP,
+ parsing_opts.FORCE,
+ parsing_opts.DUAL]
+
+ parser = parsing_opts.gen_parser(*(args + [parsing_opts.FILE_PATH_NO_CHECK]))
+ opts = parser.parse_args(line.split(), verify_acquired = True)
- parser = parsing_opts.gen_parser(self,
- "push",
- self.push_line.__doc__,
- parsing_opts.FILE_PATH,
- parsing_opts.REMOTE_FILE,
- parsing_opts.PORT_LIST_WITH_ALL,
- parsing_opts.COUNT,
- parsing_opts.DURATION,
- parsing_opts.IPG,
- parsing_opts.SPEEDUP,
- parsing_opts.FORCE,
- parsing_opts.DUAL)
+ if not opts:
+ return opts
+
+ if not opts.remote:
+ parser = parsing_opts.gen_parser(*(args + [parsing_opts.FILE_PATH]))
+ opts = parser.parse_args(line.split(), verify_acquired = True)
- opts = parser.parse_args(line.split(), verify_acquired = True)
if not opts:
return opts
diff --git a/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_streams.py b/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_streams.py
index e63f9125..aa797773 100755
--- a/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_streams.py
+++ b/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_streams.py
@@ -1046,6 +1046,17 @@ class STLProfile(object):
else:
pkts_a, pkts_b = PCAPReader(pcap_file).read_all(split_mode = split_mode)
+ # swap the packets if a is empty, or the ts of first packet in b is earlier
+ if not pkts_a:
+ pkts_a, pkts_b = pkts_b, pkts_a
+ elif (ipg_usec is None) and pkts_b:
+ meta = pkts_a[0][1]
+ start_time_a = meta[0] * 1e6 + meta[1]
+ meta = pkts_b[0][1]
+ start_time_b = meta[0] * 1e6 + meta[1]
+ if start_time_b < start_time_a:
+ pkts_a, pkts_b = pkts_b, pkts_a
+
profile_a = STLProfile.__pkts_to_streams(pkts_a,
ipg_usec,
speedup,
@@ -1073,6 +1084,8 @@ class STLProfile(object):
def __pkts_to_streams (pkts, ipg_usec, speedup, loop_count, vm, packet_hook, start_delay_usec = 0):
streams = []
+ if speedup == 0:
+ raise STLError('Speedup should not be 0')
# 10 ms delay before starting the PCAP
last_ts_usec = -(start_delay_usec)
@@ -1084,7 +1097,10 @@ class STLProfile(object):
for i, (cap, meta) in enumerate(pkts, start = 1):
# IPG - if not provided, take from cap
if ipg_usec == None:
- ts_usec = (meta[0] * 1e6 + meta[1]) / float(speedup)
+ packet_time = meta[0] * 1e6 + meta[1]
+ if i == 1:
+ base_time = packet_time
+ ts_usec = (packet_time - base_time) / float(speedup)
else:
ts_usec = (ipg_usec * i) / float(speedup)
diff --git a/scripts/automation/trex_control_plane/stl/trex_stl_lib/utils/parsing_opts.py b/scripts/automation/trex_control_plane/stl/trex_stl_lib/utils/parsing_opts.py
index 97c9035a..e7f04546 100755
--- a/scripts/automation/trex_control_plane/stl/trex_stl_lib/utils/parsing_opts.py
+++ b/scripts/automation/trex_control_plane/stl/trex_stl_lib/utils/parsing_opts.py
@@ -43,7 +43,7 @@ CORE_MASK = 26
DUAL = 27
FLOW_CTRL = 28
SUPPORTED = 29
-RX_FILTER_MODE = 30
+FILE_PATH_NO_CHECK = 30
OUTPUT_FILENAME = 31
ALL_FILES = 32
@@ -54,6 +54,8 @@ IPV4 = 35
DEST = 36
RETRIES = 37
+RX_FILTER_MODE = 38
+
GLOBAL_STATS = 50
PORT_STATS = 51
PORT_STATUS = 52
@@ -440,6 +442,14 @@ OPTIONS_DB = {MULTIPLIER: ArgumentPack(['-m', '--multiplier'],
'type': is_valid_file,
'help': "File path to load"}),
+ FILE_PATH_NO_CHECK: ArgumentPack(['-f'],
+ {'metavar': 'FILE',
+ 'dest': 'file',
+ 'nargs': 1,
+ 'required': True,
+ 'type': str,
+ 'help': "File path to load"}),
+
FILE_FROM_DB: ArgumentPack(['--db'],
{'metavar': 'LOADED_STREAM_PACK',
'help': "A stream pack which already loaded into console cache."}),