diff options
Diffstat (limited to 'scripts/automation/regression/setups/trex25')
-rw-r--r-- | scripts/automation/regression/setups/trex25/benchmark.yaml | 252 | ||||
-rw-r--r-- | scripts/automation/regression/setups/trex25/config.yaml | 93 |
2 files changed, 345 insertions, 0 deletions
diff --git a/scripts/automation/regression/setups/trex25/benchmark.yaml b/scripts/automation/regression/setups/trex25/benchmark.yaml new file mode 100644 index 00000000..f87759f9 --- /dev/null +++ b/scripts/automation/regression/setups/trex25/benchmark.yaml @@ -0,0 +1,252 @@ +############################################################### +#### TRex benchmark configuration file #### +############################################################### + +#### common templates ### + +stat_route_dict: &stat_route_dict + clients_start : 16.0.0.1 + servers_start : 48.0.0.1 + dual_port_mask : 1.0.0.0 + client_destination_mask : 255.0.0.0 + server_destination_mask : 255.0.0.0 + +nat_dict: &nat_dict + clients_net_start : 16.0.0.0 + client_acl_wildcard_mask : 0.0.0.255 + dual_port_mask : 1.0.0.0 + pool_start : 200.0.0.0 + pool_netmask : 255.255.255.0 + + +### stateful ### + +test_jumbo: + multiplier : 6 + cores : 1 + bw_per_core : 443.970 + + +test_routing_imix: + multiplier : 4 + cores : 1 + bw_per_core : 26.509 + + +test_routing_imix_64: + multiplier : 600 + cores : 1 + bw_per_core : 6.391 + + +test_static_routing_imix: + stat_route_dict : *stat_route_dict + multiplier : 2.8 + cores : 1 + bw_per_core : 24.510 + + + +test_static_routing_imix_asymmetric: + stat_route_dict : *stat_route_dict + multiplier : 3.2 + cores : 1 + bw_per_core : 28.229 + + +test_ipv6_simple: + multiplier : 6 + cores : 1 + bw_per_core : 19.185 + + +test_nat_simple_mode1: &test_nat_simple + stat_route_dict : *stat_route_dict + nat_dict : *nat_dict + multiplier : 2200 + cores : 1 + allow_timeout_dev : True + bw_per_core : 32.171 + +test_nat_simple_mode2: *test_nat_simple + +test_nat_learning: + << : *test_nat_simple + nat_opened : 40000 + + +test_nbar_simple: + multiplier : 6 + cores : 1 + bw_per_core : 16.645 + nbar_classification: + http : 24.55 + rtp : 19.15 + sqlnet : 10.38 + secure-http : 5.11 + citrix : 4.68 + mapi : 4.04 + dns : 1.56 + sctp : 0.66 + smtp : 0.48 + pop3 : 0.30 + novadigm : 0.09 + sip : 0.08 + h323 : 0.05 + rtsp : 0.04 + unknown : 28.52 + + +test_rx_check_http: &rx_http + multiplier : 8800 + cores : 1 + rx_sample_rate : 16 + bw_per_core : 31.389 + +test_rx_check_http_ipv6: + << : *rx_http + bw_per_core : 37.114 + +test_rx_check_http_negative: + << : *rx_http + stat_route_dict : *stat_route_dict + nat_dict : *nat_dict + + +test_rx_check_sfr: &rx_sfr + multiplier : 6.8 + cores : 1 + rx_sample_rate : 16 + bw_per_core : 16.063 + +test_rx_check_sfr_ipv6: + << : *rx_sfr + bw_per_core : 19.663 + + +### stateless ### + +test_CPU_benchmark: + profiles: + - name : stl/udp_1pkt_simple.py + kwargs : {packet_len: 64} + cpu_util : 1 + bw_per_core : 1 + + - name : stl/udp_1pkt_simple.py + kwargs : {packet_len: 64, packet_count: 10} + cpu_util : 1 + bw_per_core : 1 + + - name : stl/udp_1pkt_simple.py + kwargs : {packet_len: 64, packet_count: 100} + cpu_util : 1 + bw_per_core : 1 + +# causes queue full +# - name : stl/udp_1pkt_simple.py +# kwargs : {packet_len: 64, packet_count: 1000} +# cpu_util : 1 +# bw_per_core : 1 + + - name : stl/udp_1pkt_simple.py + kwargs : {packet_len: 128} + cpu_util : 1 + bw_per_core : 1 + + - name : stl/udp_1pkt_simple.py + kwargs : {packet_len: 256} + cpu_util : 1 + bw_per_core : 1 + + - name : stl/udp_1pkt_simple.py + kwargs : {packet_len: 512} + cpu_util : 1 + bw_per_core : 1 + + - name : stl/udp_1pkt_simple.py + kwargs : {packet_len: 1500} + cpu_util : 1 + bw_per_core : 1 + + - name : stl/udp_1pkt_simple.py + kwargs : {packet_len: 4000} + cpu_util : 1 + bw_per_core : 1 + + - name : stl/udp_1pkt_simple.py + kwargs : {packet_len: 9000} + cpu_util : 1 + bw_per_core : 1 + +# problem stabilizing CPU utilization at this setup +# - name : stl/udp_1pkt_simple.py +# kwargs : {packet_len: 9000, packet_count: 10} +# cpu_util : 1 +# bw_per_core : 1 + +# problem stabilizing CPU utilization at this setup +# - name : stl/udp_1pkt_simple.py +# kwargs : {packet_len: 9000, packet_count: 100} +# cpu_util : 1 +# bw_per_core : 1 + +# not enough memory + queue full if memory increase +# - name : stl/udp_1pkt_simple.py +# kwargs : {packet_len: 9000, packet_count: 1000} +# cpu_util : 1 +# bw_per_core : 1 + + - name : stl/imix.py + cpu_util : 1 + bw_per_core : 1 + + - name : stl/udp_1pkt_tuple_gen.py + kwargs : {packet_len: 64} + cpu_util : 1 + bw_per_core : 1 + + - name : stl/udp_1pkt_tuple_gen.py + kwargs : {packet_len: 128} + cpu_util : 1 + bw_per_core : 1 + + - name : stl/udp_1pkt_tuple_gen.py + kwargs : {packet_len: 256} + cpu_util : 1 + bw_per_core : 1 + + - name : stl/udp_1pkt_tuple_gen.py + kwargs : {packet_len: 512} + cpu_util : 1 + bw_per_core : 1 + + - name : stl/udp_1pkt_tuple_gen.py + kwargs : {packet_len: 1500} + cpu_util : 1 + bw_per_core : 1 + + - name : stl/udp_1pkt_tuple_gen.py + kwargs : {packet_len: 4000} + cpu_util : 1 + bw_per_core : 1 + + - name : stl/udp_1pkt_tuple_gen.py + kwargs : {packet_len: 9000} + cpu_util : 1 + bw_per_core : 1 + + - name : stl/pcap.py + kwargs : {ipg_usec: 2, loop_count: 0} + cpu_util : 1 + bw_per_core : 1 + + - name : stl/udp_rand_len_9k.py + cpu_util : 1 + bw_per_core : 1 + + - name : stl/hlt/hlt_udp_rand_len_9k.py + cpu_util : 1 + bw_per_core : 1 + + diff --git a/scripts/automation/regression/setups/trex25/config.yaml b/scripts/automation/regression/setups/trex25/config.yaml new file mode 100644 index 00000000..821208a5 --- /dev/null +++ b/scripts/automation/regression/setups/trex25/config.yaml @@ -0,0 +1,93 @@ +################################################################ +#### T-Rex nightly test configuration file #### +################################################################ + + +### T-Rex configuration: +# hostname - can be DNS name or IP for the TRex machine for ssh to the box +# is_dual - should the TRex inject with -p ? +# version_path - path to the t-rex version and executable +# cores - how many cores should be used +# latency - rate of latency packets injected by the TRex +# modes - list of modes (tagging) of this setup (loopback, virtual etc.) +# * loopback - Trex works via loopback. Router and TFTP configurations may be skipped. +# * VM - Virtual OS (accept low CPU utilization in tests, latency can get spikes) +# * virt_nics - NICs are virtual (VMXNET3 etc.) + +### Router configuration: +# hostname - the router hostname as apears in ______# cli prefix +# ip_address - the router's ip that can be used to communicate with +# image - the desired imaged wished to be loaded as the router's running config +# line_password - router password when access via Telent +# en_password - router password when changing to "enable" mode +# interfaces - an array of client-server pairs, representing the interfaces configurations of the router +# configurations - an array of configurations that could possibly loaded into the router during the test. +# The "clean" configuration is a mandatory configuration the router will load with to run the basic test bench + +### TFTP configuration: +# hostname - the tftp hostname +# ip_address - the tftp's ip address +# images_path - the tftp's relative path in which the router's images are located + +### Test_misc configuration: +# expected_bw - the "golden" bandwidth (in Gbps) results planned on receiving from the test + +trex: + hostname : csi-trex-25 + cores : 2 + modes : ['1G'] + +router: + model : ASR1004(RP2) + hostname : csi-mcp-asr1k-4ru-12 + ip_address : 10.56.217.181 + image : asr1000rp2-adventerprisek9.BLD_V151_1_S_XE32_THROTTLE_LATEST_20100926_034325_2.bin + line_password : cisco + en_password : cisco + mgmt_interface : GigabitEthernet0/0/0 + clean_config : clean_config.cfg + intf_masking : 255.255.255.0 + ipv6_mask : 64 + interfaces : + - client : + name : GigabitEthernet0/1/0 + src_mac_addr : 0000.0001.0000 + dest_mac_addr : 0000.0001.0000 + server : + name : GigabitEthernet0/1/1 + src_mac_addr : 0000.0001.0000 + dest_mac_addr : 0000.0001.0000 + vrf_name : + - client : + name : GigabitEthernet0/1/2 + src_mac_addr : 0000.0001.0000 + dest_mac_addr : 0000.0001.0000 + server : + name : GigabitEthernet0/1/4 + src_mac_addr : 0000.0001.0000 + dest_mac_addr : 0000.0001.0000 + vrf_name : + - client : + name : GigabitEthernet0/1/5 + src_mac_addr : 0000.0001.0000 + dest_mac_addr : 0000.0001.0000 + server : + name : GigabitEthernet0/1/3 + src_mac_addr : 0000.0001.0000 + dest_mac_addr : 0000.0001.0000 + vrf_name : + - client : + name : GigabitEthernet0/1/6 + src_mac_addr : 0000.0001.0000 + dest_mac_addr : 0000.0001.0000 + server : + name : GigabitEthernet0/1/7 + src_mac_addr : 0000.0001.0000 + dest_mac_addr : 0000.0001.0000 + vrf_name : + +tftp: + hostname : ats-asr-srv-1 + ip_address : 10.56.128.23 + root_dir : /auto/avc-devtest/ + images_path : /images/1RU/ |