summaryrefslogtreecommitdiffstats
path: root/scripts/automation/regression/setups/kiwi02
diff options
context:
space:
mode:
Diffstat (limited to 'scripts/automation/regression/setups/kiwi02')
-rw-r--r--scripts/automation/regression/setups/kiwi02/benchmark.yaml298
-rw-r--r--scripts/automation/regression/setups/kiwi02/config.yaml95
2 files changed, 393 insertions, 0 deletions
diff --git a/scripts/automation/regression/setups/kiwi02/benchmark.yaml b/scripts/automation/regression/setups/kiwi02/benchmark.yaml
new file mode 100644
index 00000000..41688906
--- /dev/null
+++ b/scripts/automation/regression/setups/kiwi02/benchmark.yaml
@@ -0,0 +1,298 @@
+###############################################################
+#### TRex benchmark configuration file ####
+###############################################################
+
+#### common templates ###
+
+stat_route_dict: &stat_route_dict
+ clients_start : 16.0.0.1
+ servers_start : 48.0.0.1
+ dual_port_mask : 1.0.0.0
+ client_destination_mask : 255.0.0.0
+ server_destination_mask : 255.0.0.0
+
+nat_dict: &nat_dict
+ clients_net_start : 16.0.0.0
+ client_acl_wildcard_mask : 0.0.0.255
+ dual_port_mask : 1.0.0.0
+ pool_start : 200.0.0.0
+ pool_netmask : 255.255.255.0
+
+
+### stateful ###
+
+test_jumbo:
+ multiplier : 55
+ cores : 1
+ bw_per_core : 647.305
+
+
+test_routing_imix:
+ multiplier : 32
+ cores : 2
+ bw_per_core : 39.131
+
+
+test_routing_imix_64:
+ multiplier : 2500
+ cores : 4
+ bw_per_core : 7.427
+
+
+test_static_routing_imix:
+ stat_route_dict : *stat_route_dict
+ multiplier : 32
+ cores : 2
+ bw_per_core : 39.039
+
+
+test_static_routing_imix_asymmetric:
+ stat_route_dict : *stat_route_dict
+ multiplier : 16
+ cores : 1
+ bw_per_core : 38.796
+
+
+test_ipv6_simple:
+ multiplier : 32
+ cores : 4
+ bw_per_core : 19.283
+
+
+test_nat_simple_mode1: &test_nat_simple
+ stat_route_dict : *stat_route_dict
+ nat_dict : *nat_dict
+ multiplier : 10000
+ cores : 1
+ allow_timeout_dev : True
+ bw_per_core : 45.304
+
+test_nat_simple_mode2: *test_nat_simple
+
+test_nat_simple_mode3: *test_nat_simple
+
+test_nat_learning:
+ << : *test_nat_simple
+ nat_opened : 100000
+
+
+test_nbar_simple:
+ multiplier : 20
+ cores : 2
+ bw_per_core : 18.243
+ nbar_classification:
+ http : 30.41
+ rtp_audio : 21.22
+ rtp : 11.4
+ oracle_sqlnet : 11.3
+ exchange : 10.95
+ citrix : 5.65
+ rtsp : 2.67
+ dns : 1.95
+ smtp : 0.57
+ pop3 : 0.36
+ sctp : 0.09
+ sip : 0.09
+ ssl : 0.06
+ unknown : 3.2
+
+
+test_rx_check_http: &rx_http
+ multiplier : 40000
+ cores : 2
+ rx_sample_rate : 32
+ error_tolerance : 0.01
+ bw_per_core : 38.071
+
+test_rx_check_http_ipv6:
+ << : *rx_http
+ bw_per_core : 46.733
+
+
+test_rx_check_sfr: &rx_sfr
+ multiplier : 25
+ cores : 4
+ rx_sample_rate : 32
+ error_tolerance : 0.01
+ bw_per_core : 16.915
+
+test_rx_check_sfr_ipv6:
+ << : *rx_sfr
+ bw_per_core : 20.323
+
+
+
+### stateless ###
+
+test_CPU_benchmark:
+ profiles:
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 64}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 64, stream_count: 10}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 64, stream_count: 100}
+ cpu_util : 1
+ bw_per_core : 1
+
+# causes queue full
+# - name : stl/udp_for_benchmarks.py
+# kwargs : {packet_len: 64, stream_count: 1000}
+# cpu_util : 1
+# bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 128}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 256}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 512}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 1500}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 4000}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 9000}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 9000, stream_count: 10}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 9000, stream_count: 100}
+ cpu_util : 1
+ bw_per_core : 1
+
+# not enough memory + queue full if memory increase
+# - name : stl/udp_for_benchmarks.py
+# kwargs : {packet_len: 9000, stream_count: 1000}
+# cpu_util : 1
+# bw_per_core : 1
+
+ - name : stl/imix.py
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_1pkt_tuple_gen.py
+ kwargs : {packet_len: 64}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_1pkt_tuple_gen.py
+ kwargs : {packet_len: 128}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_1pkt_tuple_gen.py
+ kwargs : {packet_len: 256}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_1pkt_tuple_gen.py
+ kwargs : {packet_len: 512}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_1pkt_tuple_gen.py
+ kwargs : {packet_len: 1500}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_1pkt_tuple_gen.py
+ kwargs : {packet_len: 4000}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_1pkt_tuple_gen.py
+ kwargs : {packet_len: 9000}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/pcap.py
+ kwargs : {ipg_usec: 2, loop_count: 0}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_rand_len_9k.py
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/hlt/hlt_udp_rand_len_9k.py
+ cpu_util : 1
+ bw_per_core : 1
+
+
+
+test_performance_vm_single_cpu:
+ cfg:
+ mult : "90%"
+ mpps_per_core_golden :
+ min: 11.5
+ max: 13.1
+
+
+test_performance_vm_single_cpu_cached:
+ cfg:
+ mult : "90%"
+ mpps_per_core_golden :
+ min: 22.0
+ max: 25.0
+
+
+
+test_performance_syn_attack_single_cpu:
+ cfg:
+ mult : "90%"
+ mpps_per_core_golden :
+ min: 9.5
+ max: 11.5
+
+test_performance_vm_multi_cpus:
+ cfg:
+ core_count : 4
+ mult : "90%"
+ mpps_per_core_golden :
+ min: 9.7
+ max: 12.5
+
+
+test_performance_vm_multi_cpus_cached:
+ cfg:
+ core_count : 4
+ mult : "90%"
+ mpps_per_core_golden :
+ min: 19.0
+ max: 22.0
+
+test_performance_syn_attack_multi_cpus:
+ cfg:
+ core_count : 4
+ mult : "90%"
+ mpps_per_core_golden :
+ min: 8.5
+ max: 10.5
+
diff --git a/scripts/automation/regression/setups/kiwi02/config.yaml b/scripts/automation/regression/setups/kiwi02/config.yaml
new file mode 100644
index 00000000..d6c13a22
--- /dev/null
+++ b/scripts/automation/regression/setups/kiwi02/config.yaml
@@ -0,0 +1,95 @@
+################################################################
+#### TRex nightly test configuration file ####
+################################################################
+
+
+### TRex configuration:
+# hostname - can be DNS name or IP for the TRex machine for ssh to the box
+# password - root password for TRex machine
+# is_dual - should the TRex inject with -p ?
+# version_path - path to the TRex version and executable
+# cores - how many cores should be used
+# latency - rate of latency packets injected by the TRex
+# modes - list of modes (tagging) of this setup (loopback, virtual etc.)
+# * loopback - Trex works via loopback. Router and TFTP configurations may be skipped.
+# * VM - Virtual OS (accept low CPU utilization in tests, latency can get spikes)
+# * virt_nics - NICs are virtual (VMXNET3 etc.)
+
+### Router configuration:
+# hostname - the router hostname as apears in ______# cli prefix
+# ip_address - the router's ip that can be used to communicate with
+# image - the desired imaged wished to be loaded as the router's running config
+# line_password - router password when access via Telent
+# en_password - router password when changing to "enable" mode
+# interfaces - an array of client-server pairs, representing the interfaces configurations of the router
+# configurations - an array of configurations that could possibly loaded into the router during the test.
+# The "clean" configuration is a mandatory configuration the router will load with to run the basic test bench
+
+### TFTP configuration:
+# hostname - the tftp hostname
+# ip_address - the tftp's ip address
+# images_path - the tftp's relative path in which the router's images are located
+
+### Test_misc configuration:
+# expected_bw - the "golden" bandwidth (in Gbps) results planned on receiving from the test
+
+trex:
+ hostname : 10.56.217.210 #10.56.192.189
+ cores : 4
+
+router:
+ model : ESP100
+ hostname : csi-mcp-asr1k-40
+ ip_address : 10.56.192.57
+ image : BLD_V155_2_S_XE315_THROTTLE_LATEST_20150424_100040-std.bin # is in harddisk of router
+ #image : asr1000rp2-adventerprisek9.2014-11-10_18.33_etis.bin
+ line_password : cisco
+ en_password : cisco
+ mgmt_interface : GigabitEthernet0
+ clean_config : /tmp/asr1001_TRex_clean_config.cfg
+ intf_masking : 255.255.255.0
+ ipv6_mask : 64
+ interfaces :
+ - client :
+ name : TenGigabitEthernet0/0/0
+ src_mac_addr : 0000.0001.0000
+ dest_mac_addr : 0000.0001.0000
+ server :
+ name : TenGigabitEthernet0/1/0
+ src_mac_addr : 0000.0001.0000
+ dest_mac_addr : 0000.0001.0000
+ vrf_name : duplicate
+ - client :
+ name : TenGigabitEthernet0/2/0
+ src_mac_addr : 0000.0001.0000
+ dest_mac_addr : 0000.0001.0000
+ server :
+ name : TenGigabitEthernet0/3/0
+ src_mac_addr : 0000.0001.0000
+ dest_mac_addr : 0000.0001.0000
+ vrf_name : duplicate
+ - client :
+ name : TenGigabitEthernet1/0/0
+ src_mac_addr : 0000.0001.0000
+ dest_mac_addr : 0000.0001.0000
+ server :
+ name : TenGigabitEthernet1/1/0
+ src_mac_addr : 0000.0001.0000
+ dest_mac_addr : 0000.0001.0000
+ vrf_name :
+ - client :
+ name : TenGigabitEthernet1/2/0
+ src_mac_addr : 0000.0001.0000
+ dest_mac_addr : 0000.0001.0000
+ server :
+ name : TenGigabitEthernet1/3/0
+ src_mac_addr : 0000.0001.0000
+ dest_mac_addr : 0000.0001.0000
+ vrf_name :
+
+
+tftp:
+ hostname : kiwi02_tftp_server
+ ip_address : 10.56.217.7
+ root_dir : /scratch/tftp/
+ images_path : hhaim/