summaryrefslogtreecommitdiffstats
path: root/scripts/automation/regression
diff options
context:
space:
mode:
authoritraviv <itraviv@cisco.com>2016-07-31 11:56:41 +0300
committeritraviv <itraviv@cisco.com>2016-07-31 11:56:41 +0300
commit893d0feef9ba6fa3fb36c49f4b5bcad47cb2bf60 (patch)
tree689a09fa656f990672d2d62143dc173a46fe0316 /scripts/automation/regression
parentabf329075bd14f5f41c3753d560260ac809ec4f3 (diff)
parentdceb010b01e9f8a0e9c905370d39f149f01cab7e (diff)
Merge branch 'master' into scapy_server
Diffstat (limited to 'scripts/automation/regression')
-rwxr-xr-xscripts/automation/regression/CPlatform.py7
-rwxr-xr-xscripts/automation/regression/aggregate_results.py5
-rw-r--r--scripts/automation/regression/functional_tests/stl_basic_tests.py8
-rw-r--r--scripts/automation/regression/setups/kiwi02/benchmark.yaml2
-rw-r--r--scripts/automation/regression/setups/trex-dan/benchmark.yaml2
-rw-r--r--scripts/automation/regression/setups/trex07/benchmark.yaml374
-rw-r--r--scripts/automation/regression/setups/trex14/benchmark.yaml12
-rw-r--r--scripts/automation/regression/setups/trex15/benchmark.yaml155
-rw-r--r--scripts/automation/regression/setups/trex15/config.yaml39
-rw-r--r--scripts/automation/regression/setups/trex25/benchmark.yaml2
-rwxr-xr-xscripts/automation/regression/stateful_tests/trex_general_test.py71
-rwxr-xr-xscripts/automation/regression/stateful_tests/trex_nat_test.py3
-rwxr-xr-xscripts/automation/regression/stateful_tests/trex_rx_test.py2
-rwxr-xr-xscripts/automation/regression/stateless_tests/stl_examples_test.py2
-rw-r--r--scripts/automation/regression/stateless_tests/stl_general_test.py47
-rw-r--r--scripts/automation/regression/stateless_tests/stl_rx_test.py337
-rwxr-xr-xscripts/automation/regression/stateless_tests/trex_client_pkg_test.py6
-rw-r--r--scripts/automation/regression/trex.py3
-rwxr-xr-xscripts/automation/regression/trex_unit_test.py65
19 files changed, 853 insertions, 289 deletions
diff --git a/scripts/automation/regression/CPlatform.py b/scripts/automation/regression/CPlatform.py
index de1c22ce..dc5418cb 100755
--- a/scripts/automation/regression/CPlatform.py
+++ b/scripts/automation/regression/CPlatform.py
@@ -20,7 +20,7 @@ class CPlatform(object):
self.tftp_cfg = None
self.config_history = { 'basic_if_config' : False, 'tftp_server_config' : False }
- def configure_basic_interfaces(self, mtu = 4000):
+ def configure_basic_interfaces(self, mtu = 9050):
cache = CCommandCache()
for dual_if in self.if_mngr.get_dual_if_list():
@@ -46,7 +46,7 @@ class CPlatform(object):
- def configure_basic_filtered_interfaces(self, intf_list, mtu = 4000):
+ def configure_basic_filtered_interfaces(self, intf_list, mtu = 9050):
cache = CCommandCache()
for intf in intf_list:
@@ -70,11 +70,10 @@ class CPlatform(object):
res = self.cmd_link.run_single_command(cache)
if 'Rollback Done' not in res:
print('Failed to load clean config, trying again')
+ time.sleep(2)
if i < 4:
continue
raise Exception('Could not load clean config, response: %s' % res)
- if i > 0: # were errors, better to wait
- time.sleep(2)
def config_pbr (self, mode = 'config'):
idx = 1
diff --git a/scripts/automation/regression/aggregate_results.py b/scripts/automation/regression/aggregate_results.py
index eb0632ec..c7c61ea6 100755
--- a/scripts/automation/regression/aggregate_results.py
+++ b/scripts/automation/regression/aggregate_results.py
@@ -622,11 +622,13 @@ if __name__ == '__main__':
last_status = category_dict_status.get(scenario, 'Successful') # assume last is passed if no history
if err or len(error_tests): # has fails
+ exit_status = 1
if is_good_status(last_status):
current_status = 'Failure'
else:
current_status = 'Still Failing'
else:
+ exit_status = 0
if is_good_status(last_status):
current_status = 'Successful'
else:
@@ -652,3 +654,6 @@ if __name__ == '__main__':
with open(args.output_titlefile, 'w') as f:
print('Writing output file: %s' % args.output_titlefile)
f.write(mailtitle_output)
+
+# exit
+ sys.exit(exit_status)
diff --git a/scripts/automation/regression/functional_tests/stl_basic_tests.py b/scripts/automation/regression/functional_tests/stl_basic_tests.py
index 863307f1..e03c0742 100644
--- a/scripts/automation/regression/functional_tests/stl_basic_tests.py
+++ b/scripts/automation/regression/functional_tests/stl_basic_tests.py
@@ -85,7 +85,7 @@ class CStlBasic_Test(functional_general_test.CGeneralFunctional_Test):
pkts1 = list(RawPcapReader(output))
pkts2 = list(RawPcapReader(golden))
- assert_equal(len(pkts1), len(pkts2))
+ assert_equal(len(pkts1), len(pkts2), 'Lengths of generated pcap (%s) and golden (%s) are different' % (output, golden))
for pkt1, pkt2, i in zip(pkts1, pkts2, range(1, len(pkts1))):
ts1 = float(pkt1[1][0]) + (float(pkt1[1][1]) / 1e6)
@@ -143,7 +143,7 @@ class CStlBasic_Test(functional_general_test.CGeneralFunctional_Test):
os.unlink(output_cap)
try:
rc = self.run_sim(input_file, output_cap, options, silent)
- assert_equal(rc, True)
+ assert_equal(rc, True, 'Simulation on profile %s failed.' % profile)
#s='cp '+output_cap+' '+golden_file;
#print s
#os.system(s)
@@ -165,7 +165,7 @@ class CStlBasic_Test(functional_general_test.CGeneralFunctional_Test):
profile.dump_to_code(generated_filename)
rc = self.run_sim(generated_filename, output_cap, options, silent)
- assert_equal(rc, True)
+ assert_equal(rc, True, 'Simulation on profile %s (generated) failed.' % profile)
if compare:
self.compare_caps(output_cap, golden_file)
@@ -201,6 +201,8 @@ class CStlBasic_Test(functional_general_test.CGeneralFunctional_Test):
["multi_burst_2st_1000pkt.py","-m 1 -l 100",True],
["pcap.py", "-m 1", True],
["pcap_with_vm.py", "-m 1", True],
+ ["flow_stats.py", "-m 1 -l 1", True],
+ ["flow_stats_latency.py", "-m 1 -l 1", True],
# YAML test
["yaml/burst_1000_pkt.yaml","-m 1 -l 100",True],
diff --git a/scripts/automation/regression/setups/kiwi02/benchmark.yaml b/scripts/automation/regression/setups/kiwi02/benchmark.yaml
index 3332aa5e..e6621085 100644
--- a/scripts/automation/regression/setups/kiwi02/benchmark.yaml
+++ b/scripts/automation/regression/setups/kiwi02/benchmark.yaml
@@ -69,6 +69,8 @@ test_nat_simple_mode1: &test_nat_simple
test_nat_simple_mode2: *test_nat_simple
+test_nat_simple_mode3: *test_nat_simple
+
test_nat_learning:
<< : *test_nat_simple
nat_opened : 100000
diff --git a/scripts/automation/regression/setups/trex-dan/benchmark.yaml b/scripts/automation/regression/setups/trex-dan/benchmark.yaml
index e9f12c45..de56089b 100644
--- a/scripts/automation/regression/setups/trex-dan/benchmark.yaml
+++ b/scripts/automation/regression/setups/trex-dan/benchmark.yaml
@@ -69,6 +69,8 @@ test_nat_simple_mode1: &test_nat_simple
test_nat_simple_mode2: *test_nat_simple
+test_nat_simple_mode3: *test_nat_simple
+
test_nat_learning:
<< : *test_nat_simple
bw_per_core : 7.377
diff --git a/scripts/automation/regression/setups/trex07/benchmark.yaml b/scripts/automation/regression/setups/trex07/benchmark.yaml
index 4778de91..0dc340b0 100644
--- a/scripts/automation/regression/setups/trex07/benchmark.yaml
+++ b/scripts/automation/regression/setups/trex07/benchmark.yaml
@@ -2,169 +2,243 @@
#### TRex benchmark configuration file ####
###############################################################
-test_nbar_simple :
- multiplier : 7.5
- cores : 2
- exp_gbps : 3.5
- cpu_to_core_ratio : 20800000
- cpu2core_custom_dev: YES
- cpu2core_dev : 0.07
- exp_max_latency : 1000
-
- nbar_classification:
- rtp : 32.57
- http : 30.25
- oracle-sqlnet : 11.23
- exchange : 10.80
- citrix : 5.62
- rtsp : 2.84
- dns : 1.95
- smtp : 0.57
- pop3 : 0.36
- ssl : 0.17
- sctp : 0.13
- sip : 0.09
- unknown : 3.41
-
-test_rx_check :
- multiplier : 13
- cores : 3
- rx_sample_rate : 128
- exp_gbps : 6
- cpu_to_core_ratio : 37270000
- exp_bw : 13
- exp_latency : 1
-
-test_nat_simple : &test_nat_simple
- stat_route_dict :
+#### common templates ###
+
+stat_route_dict: &stat_route_dict
clients_start : 16.0.0.1
servers_start : 48.0.0.1
dual_port_mask : 1.0.0.0
client_destination_mask : 255.0.0.0
server_destination_mask : 255.0.0.0
- nat_dict :
+
+nat_dict: &nat_dict
clients_net_start : 16.0.0.0
client_acl_wildcard_mask : 0.0.0.255
dual_port_mask : 1.0.0.0
pool_start : 200.0.0.0
pool_netmask : 255.255.255.0
- multiplier : 12000
- cores : 1
- cpu_to_core_ratio : 37270000
- exp_bw : 1
- exp_latency : 1
- allow_timeout_dev : YES
-
-test_nat_simple_mode1 : *test_nat_simple
-test_nat_simple_mode2 : *test_nat_simple
-
-test_nat_learning :
- stat_route_dict :
- clients_start : 16.0.0.1
- servers_start : 48.0.0.1
- dual_port_mask : 1.0.0.0
- client_destination_mask : 255.0.0.0
- server_destination_mask : 255.0.0.0
- multiplier : 12000
- cores : 1
- nat_opened : 40000
- cpu_to_core_ratio : 270
- exp_bw : 8
- exp_latency : 1
- allow_timeout_dev : YES
-
-test_routing_imix_64 :
- multiplier : 430
- cores : 1
- cpu_to_core_ratio : 280
- exp_latency : 1
-
-test_routing_imix :
- multiplier : 10
- cores : 1
- cpu_to_core_ratio : 1800
- exp_latency : 1
-
-test_static_routing_imix :
- stat_route_dict :
- clients_start : 16.0.0.1
- servers_start : 48.0.0.1
- dual_port_mask : 1.0.0.0
- client_destination_mask : 255.0.0.0
- server_destination_mask : 255.0.0.0
- multiplier : 8
- cores : 1
- cpu_to_core_ratio : 1800
- exp_latency : 1
-test_static_routing_imix_asymmetric:
- stat_route_dict :
- clients_start : 16.0.0.1
- servers_start : 48.0.0.1
- dual_port_mask : 1.0.0.0
- client_destination_mask : 255.0.0.0
- server_destination_mask : 255.0.0.0
- multiplier : 8
- cores : 1
- cpu_to_core_ratio : 1800
- exp_latency : 1
-
-test_ipv6_simple :
- multiplier : 9
- cores : 2
- cpu_to_core_ratio : 30070000
- cpu2core_custom_dev: YES
- cpu2core_dev : 0.07
-
-
-test_rx_check_sfr:
- multiplier : 10
- cores : 2
- rx_sample_rate : 16
- # allow 0.03% errors, bad router
- error_tolerance : 0.03
-
-test_rx_check_http:
- multiplier : 15000
- cores : 1
- rx_sample_rate : 16
- # allow 0.03% errors, bad routerifconfig
- error_tolerance : 0.03
-test_rx_check_sfr_ipv6:
- multiplier : 10
- cores : 2
- rx_sample_rate : 16
- # allow 0.03% errors, bad router
- error_tolerance : 0.03
+### stateful ###
+
+test_jumbo:
+ multiplier : 17
+ cores : 1
+ bw_per_core : 543.232
+
+
+test_routing_imix:
+ multiplier : 10
+ cores : 1
+ bw_per_core : 34.128
+
+
+test_routing_imix_64:
+ multiplier : 430
+ cores : 1
+ bw_per_core : 5.893
+
+
+test_static_routing_imix: &test_static_routing_imix
+ stat_route_dict : *stat_route_dict
+ multiplier : 8
+ cores : 1
+ bw_per_core : 34.339
+
+test_static_routing_imix_asymmetric: *test_static_routing_imix
+
+
+test_ipv6_simple:
+ multiplier : 9
+ cores : 2
+ bw_per_core : 19.064
+
+
+test_nat_simple_mode1: &test_nat_simple
+ stat_route_dict : *stat_route_dict
+ nat_dict : *nat_dict
+ multiplier : 6000
+ cores : 1
+ nat_opened : 500000
+ allow_timeout_dev : True
+ bw_per_core : 44.445
+
+test_nat_simple_mode2: *test_nat_simple
+
+test_nat_simple_mode3: *test_nat_simple
+
+test_nat_learning: *test_nat_simple
+
+
+test_nbar_simple:
+ multiplier : 7.5
+ cores : 2
+ bw_per_core : 17.174
+ nbar_classification:
+ rtp : 32.57
+ http : 30.25
+ oracle_sqlnet : 11.23
+ exchange : 10.80
+ citrix : 5.62
+ rtsp : 2.84
+ dns : 1.95
+ smtp : 0.57
+ pop3 : 0.36
+ ssl : 0.17
+ sctp : 0.13
+ sip : 0.09
+ unknown : 3.41
+
+
+test_rx_check_http: &rx_http
+ multiplier : 15000
+ cores : 1
+ rx_sample_rate : 16
+ bw_per_core : 39.560
test_rx_check_http_ipv6:
- multiplier : 15000
- cores : 1
- rx_sample_rate : 16
- # allow 0.03% errors, bad router
- error_tolerance : 0.03
-
-test_rx_check_http_negative:
- multiplier : 13000
- cores : 1
- rx_sample_rate : 16
- # allow 0.03% errors, bad router
- error_tolerance : 0.03
- stat_route_dict :
- clients_start : 16.0.0.1
- servers_start : 48.0.0.1
- dual_port_mask : 1.0.0.0
- client_destination_mask : 255.0.0.0
- server_destination_mask : 255.0.0.0
- nat_dict :
- clients_net_start : 16.0.0.0
- client_acl_wildcard_mask : 0.0.0.255
- dual_port_mask : 1.0.0.0
- pool_start : 200.0.0.0
- pool_netmask : 255.255.255.0
+ << : *rx_http
+ bw_per_core : 49.237
+
+test_rx_check_http_negative_disabled:
+ << : *rx_http
+ stat_route_dict : *stat_route_dict
+ nat_dict : *nat_dict
+
+
+test_rx_check_sfr: &rx_sfr
+ multiplier : 10
+ cores : 3
+ rx_sample_rate : 16
+ bw_per_core : 16.082
+
+test_rx_check_sfr_ipv6:
+ << : *rx_sfr
+ bw_per_core : 19.198
+
+
+
+### stateless ###
+
+test_CPU_benchmark:
+ profiles:
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 64}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 64, stream_count: 10}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 64, stream_count: 100}
+ cpu_util : 1
+ bw_per_core : 1
+
+# causes queue full
+# - name : stl/udp_for_benchmarks.py
+# kwargs : {packet_len: 64, stream_count: 1000}
+# cpu_util : 1
+# bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 128}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 256}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 512}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 1500}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 4000}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 9000}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 9000, stream_count: 10}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 9000, stream_count: 100}
+ cpu_util : 1
+ bw_per_core : 1
+
+# not enough memory + queue full if memory increase
+# - name : stl/udp_for_benchmarks.py
+# kwargs : {packet_len: 9000, stream_count: 1000}
+# cpu_util : 1
+# bw_per_core : 1
+
+ - name : stl/imix.py
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_1pkt_tuple_gen.py
+ kwargs : {packet_len: 64}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_1pkt_tuple_gen.py
+ kwargs : {packet_len: 128}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_1pkt_tuple_gen.py
+ kwargs : {packet_len: 256}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_1pkt_tuple_gen.py
+ kwargs : {packet_len: 512}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_1pkt_tuple_gen.py
+ kwargs : {packet_len: 1500}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_1pkt_tuple_gen.py
+ kwargs : {packet_len: 4000}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_1pkt_tuple_gen.py
+ kwargs : {packet_len: 9000}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/pcap.py
+ kwargs : {ipg_usec: 2, loop_count: 0}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_rand_len_9k.py
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/hlt/hlt_udp_rand_len_9k.py
+ cpu_util : 1
+ bw_per_core : 1
-test_jumbo:
- multiplier : 17
- cores : 1
diff --git a/scripts/automation/regression/setups/trex14/benchmark.yaml b/scripts/automation/regression/setups/trex14/benchmark.yaml
index aa4ac2d4..04f13e79 100644
--- a/scripts/automation/regression/setups/trex14/benchmark.yaml
+++ b/scripts/automation/regression/setups/trex14/benchmark.yaml
@@ -57,17 +57,17 @@ test_ipv6_simple:
test_nat_simple_mode1: &test_nat_simple
stat_route_dict : *stat_route_dict
nat_dict : *nat_dict
- multiplier : 12000
+ multiplier : 6000
cores : 1
- nat_opened : 40000
+ nat_opened : 500000
allow_timeout_dev : True
bw_per_core : 44.445
test_nat_simple_mode2: *test_nat_simple
-test_nat_learning:
- << : *test_nat_simple
- nat_opened : 40000
+test_nat_simple_mode3: *test_nat_simple
+
+test_nat_learning: *test_nat_simple
test_nbar_simple:
@@ -101,7 +101,7 @@ test_rx_check_http_ipv6:
<< : *rx_http
bw_per_core : 49.237
-test_rx_check_http_negative:
+test_rx_check_http_negative_disabled:
<< : *rx_http
stat_route_dict : *stat_route_dict
nat_dict : *nat_dict
diff --git a/scripts/automation/regression/setups/trex15/benchmark.yaml b/scripts/automation/regression/setups/trex15/benchmark.yaml
new file mode 100644
index 00000000..b366b3fb
--- /dev/null
+++ b/scripts/automation/regression/setups/trex15/benchmark.yaml
@@ -0,0 +1,155 @@
+################################################################
+#### TRex benchmark configuration file ####
+################################################################
+
+### stateful ###
+
+test_jumbo:
+ multiplier : 2.8
+ cores : 1
+ bw_per_core : 106.652
+
+
+test_routing_imix:
+ multiplier : 0.5
+ cores : 1
+ bw_per_core : 11.577
+
+
+test_routing_imix_64:
+ multiplier : 28
+ cores : 1
+ bw_per_core : 2.030
+
+
+test_static_routing_imix_asymmetric:
+ multiplier : 0.8
+ cores : 1
+ bw_per_core : 13.742
+
+
+
+### stateless ###
+
+test_CPU_benchmark:
+ profiles:
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 64}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 64, stream_count: 10}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 64, stream_count: 100}
+ cpu_util : 1
+ bw_per_core : 1
+
+# causes queue full
+# - name : stl/udp_for_benchmarks.py
+# kwargs : {packet_len: 64, stream_count: 1000}
+# cpu_util : 1
+# bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 128}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 256}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 512}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 1500}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 4000}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 9000}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 9000, stream_count: 10}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 9000, stream_count: 100}
+ cpu_util : 1
+ bw_per_core : 1
+
+# not enough memory + queue full if memory increase
+# - name : stl/udp_for_benchmarks.py
+# kwargs : {packet_len: 9000, stream_count: 1000}
+# cpu_util : 1
+# bw_per_core : 1
+
+ - name : stl/imix.py
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_1pkt_tuple_gen.py
+ kwargs : {packet_len: 64}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_1pkt_tuple_gen.py
+ kwargs : {packet_len: 128}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_1pkt_tuple_gen.py
+ kwargs : {packet_len: 256}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_1pkt_tuple_gen.py
+ kwargs : {packet_len: 512}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_1pkt_tuple_gen.py
+ kwargs : {packet_len: 1500}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_1pkt_tuple_gen.py
+ kwargs : {packet_len: 4000}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_1pkt_tuple_gen.py
+ kwargs : {packet_len: 9000}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/pcap.py
+ kwargs : {ipg_usec: 4, loop_count: 0}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_rand_len_9k.py
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/hlt/hlt_udp_rand_len_9k.py
+ cpu_util : 1
+ bw_per_core : 1
+
+
diff --git a/scripts/automation/regression/setups/trex15/config.yaml b/scripts/automation/regression/setups/trex15/config.yaml
new file mode 100644
index 00000000..c5fc3b22
--- /dev/null
+++ b/scripts/automation/regression/setups/trex15/config.yaml
@@ -0,0 +1,39 @@
+################################################################
+#### TRex nightly test configuration file ####
+################################################################
+
+
+### TRex configuration:
+# hostname - can be DNS name or IP for the TRex machine for ssh to the box
+# password - root password for TRex machine
+# is_dual - should the TRex inject with -p ?
+# version_path - path to the TRex version and executable
+# cores - how many cores should be used
+# latency - rate of latency packets injected by the Trex
+# modes - list of modes (tagging) of this setup (loopback, virtual etc.)
+# * loopback - Trex works via loopback. Router and TFTP configurations may be skipped.
+# * VM - Virtual OS (accept low CPU utilization in tests, latency can get spikes)
+# * virt_nics - NICs are virtual (VMXNET3 etc. have their limitations in tests)
+
+### Router configuration:
+# hostname - the router hostname as apears in ______# cli prefix
+# ip_address - the router's ip that can be used to communicate with
+# image - the desired imaged wished to be loaded as the router's running config
+# line_password - router password when access via Telent
+# en_password - router password when changing to "enable" mode
+# interfaces - an array of client-server pairs, representing the interfaces configurations of the router
+# configurations - an array of configurations that could possibly loaded into the router during the test.
+# The "clean" configuration is a mandatory configuration the router will load with to run the basic test bench
+
+### TFTP configuration:
+# hostname - the tftp hostname
+# ip_address - the tftp's ip address
+# images_path - the tftp's relative path in which the router's images are located
+
+### Test_misc configuration:
+# expected_bw - the "golden" bandwidth (in Gbps) results planned on receiving from the test
+
+trex:
+ hostname : csi-trex-15
+ cores : 1
+ modes : [loopback, virt_nics, VM]
diff --git a/scripts/automation/regression/setups/trex25/benchmark.yaml b/scripts/automation/regression/setups/trex25/benchmark.yaml
index 19fab1fe..ccbdf6f5 100644
--- a/scripts/automation/regression/setups/trex25/benchmark.yaml
+++ b/scripts/automation/regression/setups/trex25/benchmark.yaml
@@ -70,6 +70,8 @@ test_nat_simple_mode1: &test_nat_simple
test_nat_simple_mode2: *test_nat_simple
+test_nat_simple_mode3: *test_nat_simple
+
test_nat_learning:
<< : *test_nat_simple
nat_opened : 40000
diff --git a/scripts/automation/regression/stateful_tests/trex_general_test.py b/scripts/automation/regression/stateful_tests/trex_general_test.py
index 82b1d9d1..8ff4fdaf 100755
--- a/scripts/automation/regression/stateful_tests/trex_general_test.py
+++ b/scripts/automation/regression/stateful_tests/trex_general_test.py
@@ -139,41 +139,42 @@ class CTRexGeneral_Test(unittest.TestCase):
if res[name] != float(val):
self.fail('TRex results[%s]==%f and not as expected %f ' % (name, res[name], val))
- def check_CPU_benchmark (self, trex_res, err = 25, minimal_cpu = 30, maximal_cpu = 85):
- #cpu_util = float(trex_res.get_last_value("trex-global.data.m_cpu_util"))
- cpu_util = sum(trex_res.get_value_list("trex-global.data.m_cpu_util")[-4:-1]) / 3.0 # mean of 3 values before last
-
- if '1G' in self.modes:
- minimal_cpu /= 10.0
-
- if not self.is_virt_nics:
- if cpu_util > maximal_cpu:
- self.fail("CPU is too high (%s%%), probably queue full." % cpu_util )
- #if cpu_util < minimal_cpu:
- # self.fail("CPU is too low (%s%%), can't verify performance in such low CPU%%." % cpu_util )
-
- test_norm_cpu = sum(trex_res.get_value_list("trex-global.data.m_bw_per_core")[-4:-1]) / 3.0
-
- print("TRex CPU utilization: %g%%, norm_cpu is : %g Gb/core" % (round(cpu_util, 2), round(test_norm_cpu)))
-
- expected_norm_cpu = self.get_benchmark_param('bw_per_core')
- if not expected_norm_cpu:
- expected_norm_cpu = 1
-
- calc_error_precent = abs(100.0 * test_norm_cpu / expected_norm_cpu - 100)
- print('Err percent: %s' % calc_error_precent)
- #if calc_error_precent > err and cpu_util > 10:
- # self.fail('Excepted bw_per_core ratio: %s, got: %g' % (expected_norm_cpu, round(test_norm_cpu)))
-
- # report benchmarks
- if self.GAManager:
- try:
- setup_test = '%s.%s' % (CTRexScenario.setup_name, self.get_name())
- self.GAManager.gaAddAction(Event = 'stateful_test', action = setup_test, label = 'bw_per_core', value = int(test_norm_cpu))
- self.GAManager.gaAddAction(Event = 'stateful_test', action = setup_test, label = 'bw_per_core_exp', value = int(expected_norm_cpu))
- self.GAManager.emptyAndReportQ()
- except Exception as e:
- print('Sending GA failed: %s' % e)
+ def check_CPU_benchmark (self, trex_res, err = 25, minimal_cpu = 10, maximal_cpu = 85):
+ cpu_util = trex_res.get_avg_steady_state_value('trex-global.data.m_cpu_util_raw')
+ trex_tx_bps = trex_res.get_avg_steady_state_value('trex-global.data.m_tx_bps')
+ expected_norm_cpu = self.get_benchmark_param('bw_per_core')
+ cores = self.get_benchmark_param('cores')
+ ports_count = trex_res.get_ports_count()
+ test_norm_cpu = trex_tx_bps / (cpu_util * ports_count * cores * 2.5e6)
+
+ if '1G' in self.modes:
+ minimal_cpu /= 10.0
+
+ if not self.is_virt_nics:
+ if cpu_util > maximal_cpu:
+ self.fail("CPU is too high (%s%%), probably queue full." % cpu_util )
+ #if cpu_util < minimal_cpu:
+ # self.fail("CPU is too low (%s%%), can't verify performance in such low CPU%%." % cpu_util )
+
+ print("TRex CPU utilization: %g%%, norm_cpu is : %g Gb/core" % (round(cpu_util, 2), round(test_norm_cpu, 2)))
+
+ if not expected_norm_cpu:
+ expected_norm_cpu = 1
+
+ calc_error_precent = abs(100.0 * test_norm_cpu / expected_norm_cpu - 100)
+ print('Err percent: %s' % calc_error_precent)
+ #if calc_error_precent > err and cpu_util > 10:
+ # self.fail('Excepted bw_per_core ratio: %s, got: %g' % (expected_norm_cpu, round(test_norm_cpu)))
+
+ # report benchmarks
+ if self.GAManager:
+ try:
+ setup_test = '%s.%s' % (CTRexScenario.setup_name, self.get_name())
+ self.GAManager.gaAddAction(Event = 'stateful_test', action = setup_test, label = 'bw_per_core', value = int(test_norm_cpu))
+ self.GAManager.gaAddAction(Event = 'stateful_test', action = setup_test, label = 'bw_per_core_exp', value = int(expected_norm_cpu))
+ self.GAManager.emptyAndReportQ()
+ except Exception as e:
+ print('Sending GA failed: %s' % e)
def check_results_gt (self, res, name, val):
if res is None:
diff --git a/scripts/automation/regression/stateful_tests/trex_nat_test.py b/scripts/automation/regression/stateful_tests/trex_nat_test.py
index 6e030ffe..c23f67c4 100755
--- a/scripts/automation/regression/stateful_tests/trex_nat_test.py
+++ b/scripts/automation/regression/stateful_tests/trex_nat_test.py
@@ -93,6 +93,9 @@ class CTRexNat_Test(CTRexGeneral_Test):#(unittest.TestCase):
def test_nat_simple_mode2(self):
self.nat_simple_helper(learn_mode=2)
+ def test_nat_simple_mode3(self):
+ self.nat_simple_helper(learn_mode=3)
+
def nat_simple_helper(self, learn_mode=1):
# test initializtion
self.router.configure_basic_interfaces()
diff --git a/scripts/automation/regression/stateful_tests/trex_rx_test.py b/scripts/automation/regression/stateful_tests/trex_rx_test.py
index 40528d16..c08ad1ea 100755
--- a/scripts/automation/regression/stateful_tests/trex_rx_test.py
+++ b/scripts/automation/regression/stateful_tests/trex_rx_test.py
@@ -250,7 +250,7 @@ class CTRexRx_Test(CTRexGeneral_Test):
print('Run until finish, expect errors')
old_errors = copy.deepcopy(self.fail_reasons)
- nat_dict = self.get_benchmark_param('nat_dict', test_name = 'test_nat_simple')
+ nat_dict = self.get_benchmark_param('nat_dict', test_name = 'test_nat_simple_mode1')
nat_obj = CNatConfig(nat_dict)
self.router.config_nat(nat_obj)
self.router.config_zbf()
diff --git a/scripts/automation/regression/stateless_tests/stl_examples_test.py b/scripts/automation/regression/stateless_tests/stl_examples_test.py
index d8b85dfc..71fc3287 100755
--- a/scripts/automation/regression/stateless_tests/stl_examples_test.py
+++ b/scripts/automation/regression/stateless_tests/stl_examples_test.py
@@ -10,14 +10,12 @@ class STLExamples_Test(CStlGeneral_Test):
def explicitSetUp(self):
# examples connect by their own
if self.is_connected():
- self.recover_after_trex_210_issue()
CTRexScenario.stl_trex.disconnect()
def explicitTearDown(self):
# connect back at end of tests
if not self.is_connected():
self.stl_trex.connect()
- self.recover_after_trex_210_issue()
def test_stl_examples(self):
examples_dir = '../trex_control_plane/stl/examples'
diff --git a/scripts/automation/regression/stateless_tests/stl_general_test.py b/scripts/automation/regression/stateless_tests/stl_general_test.py
index 82738f96..5ae2b326 100644
--- a/scripts/automation/regression/stateless_tests/stl_general_test.py
+++ b/scripts/automation/regression/stateless_tests/stl_general_test.py
@@ -6,10 +6,6 @@ from trex_stl_lib.api import *
import time
from nose.tools import nottest
-def setUpModule():
- if CTRexScenario.stl_trex.is_connected():
- CStlGeneral_Test.recover_after_trex_210_issue()
-
class CStlGeneral_Test(CTRexGeneral_Test):
"""This class defines the general stateless testcase of the TRex traffic generator"""
@@ -20,21 +16,6 @@ class CStlGeneral_Test(CTRexGeneral_Test):
if CTRexScenario.stl_init_error:
self.skip(CTRexScenario.stl_init_error)
- # workaround of http://trex-tgn.cisco.com/youtrack/issue/trex-210
- @staticmethod
- def recover_after_trex_210_issue():
- return
- for i in range(20):
- try:
- stl_map_ports(CTRexScenario.stl_trex)
- break
- except:
- CTRexScenario.stl_trex.disconnect()
- time.sleep(0.5)
- CTRexScenario.stl_trex.connect()
- # verify problem is solved
- stl_map_ports(CTRexScenario.stl_trex)
-
def connect(self, timeout = 100):
# need delay and check only because TRex process might be still starting
sys.stdout.write('Connecting')
@@ -85,18 +66,22 @@ class STLBasic_Test(CStlGeneral_Test):
@nottest
def test_connectivity(self):
if not self.is_loopback:
- if CTRexScenario.router_cfg['forceImageReload']:
- CTRexScenario.router.load_clean_config()
- CTRexScenario.router.configure_basic_interfaces()
- CTRexScenario.router.config_pbr(mode = "config")
-
- err = 'Client could not connect'
- CTRexScenario.stl_init_error = err
+ try:
+ if CTRexScenario.router_cfg['forceImageReload']:
+ CTRexScenario.router.load_clean_config()
+ CTRexScenario.router.configure_basic_interfaces()
+ CTRexScenario.router.config_pbr(mode = "config")
+ except Exception as e:
+ CTRexScenario.stl_init_error = 'Could not configure device, err: %s' % e
+ self.fail(CTRexScenario.stl_init_error)
+ return
if not self.connect():
- self.fail(err)
- err = 'Client could not map ports'
- CTRexScenario.stl_init_error = err
+ CTRexScenario.stl_init_error = 'Client could not connect'
+ self.fail(CTRexScenario.stl_init_error)
+ return
+ print('Connected')
if not self.map_ports():
- self.fail(err)
+ CTRexScenario.stl_init_error = 'Client could not map ports'
+ self.fail(CTRexScenario.stl_init_error)
+ return
print('Got ports mapping: %s' % CTRexScenario.stl_ports_map)
- CTRexScenario.stl_init_error = None
diff --git a/scripts/automation/regression/stateless_tests/stl_rx_test.py b/scripts/automation/regression/stateless_tests/stl_rx_test.py
index 80e6bee6..090261ff 100644
--- a/scripts/automation/regression/stateless_tests/stl_rx_test.py
+++ b/scripts/automation/regression/stateless_tests/stl_rx_test.py
@@ -3,16 +3,14 @@ from .stl_general_test import CStlGeneral_Test, CTRexScenario
from trex_stl_lib.api import *
import os, sys
+ERROR_LATENCY_TOO_HIGH = 1
+
class STLRX_Test(CStlGeneral_Test):
"""Tests for RX feature"""
def setUp(self):
- #if CTRexScenario.setup_name in ('trex08', 'trex09'):
- # self.skip('This test makes trex08 and trex09 sick. Fix those ASAP.')
- if self.is_virt_nics:
- self.skip('Skip this for virtual NICs for now')
- per_driver_params = {"rte_vmxnet3_pmd": [1, 50, 1], "rte_ixgbe_pmd": [30, 5000, 1], "rte_i40e_pmd": [80, 5000, 1],
- "rte_igb_pmd": [80, 500, 1], "rte_em_pmd": [1, 50, 1], "rte_virtio_pmd": [1, 50, 1]}
+ per_driver_params = {"rte_vmxnet3_pmd": [1, 50, 1,False], "rte_ixgbe_pmd": [30, 1000, 1,True,300,400], "rte_i40e_pmd": [80, 1000, 1,True,100,250],
+ "rte_igb_pmd": [80, 500, 1,False], "rte_em_pmd": [1, 50, 1,False], "rte_virtio_pmd": [1, 50, 1,False]}
CStlGeneral_Test.setUp(self)
assert 'bi' in CTRexScenario.stl_ports_map
@@ -22,22 +20,51 @@ class STLRX_Test(CStlGeneral_Test):
self.tx_port, self.rx_port = CTRexScenario.stl_ports_map['bi'][0]
port_info = self.c.get_port_info(ports = self.rx_port)[0]
+ self.speed = port_info['speed']
+
+
cap = port_info['rx']['caps']
if "flow_stats" not in cap or "latency" not in cap:
self.skip('port {0} does not support RX'.format(self.rx_port))
self.cap = cap
- self.rate_percent = per_driver_params[port_info['driver']][0]
- self.total_pkts = per_driver_params[port_info['driver']][1]
- if len(per_driver_params[port_info['driver']]) > 2:
- self.rate_lat = per_driver_params[port_info['driver']][2]
+ drv_name = port_info['driver']
+ self.rate_percent = per_driver_params[drv_name][0]
+ self.total_pkts = per_driver_params[drv_name][1]
+ if len(per_driver_params[drv_name]) > 2:
+ self.rate_lat = per_driver_params[drv_name][2]
else:
self.rate_lat = self.rate_percent
+ self.lat_pps = 1000
self.drops_expected = False
self.c.reset(ports = [self.tx_port, self.rx_port])
+ vm = STLScVmRaw( [ STLVmFlowVar ( "ip_src", min_value="10.0.0.1",
+ max_value="10.0.0.255", size=4, step=1,op="inc"),
+ STLVmWrFlowVar (fv_name="ip_src", pkt_offset= "IP.src" ), # write ip to packet IP.src
+ STLVmFixIpv4(offset = "IP") # fix checksum
+ ]
+ # Latency is bound to one core. We test that this option is not causing trouble
+ ,split_by_field = "ip_src"
+ ,cache_size =255 # Cache is ignored by latency flows. Need to test it is not crashing.
+ );
+
self.pkt = STLPktBuilder(pkt = Ether()/IP(src="16.0.0.1",dst="48.0.0.1")/UDP(dport=12,sport=1025)/('Your_paylaod_comes_here'))
self.large_pkt = STLPktBuilder(pkt = Ether()/IP(src="16.0.0.1",dst="48.0.0.1")/UDP(dport=12,sport=1025)/('a'*1000))
+ self.pkt_9k = STLPktBuilder(pkt = Ether()/IP(src="16.0.0.1",dst="48.0.0.1")/UDP(dport=12,sport=1025)/('a'*9000))
+ self.vm_pkt = STLPktBuilder(pkt = Ether()/IP(src="16.0.0.1",dst="48.0.0.1")
+ / UDP(dport=12,sport=1025)/('Your_paylaod_comes_here')
+ , vm = vm)
+ self.vm_large_pkt = STLPktBuilder(pkt = Ether()/IP(src="16.0.0.1",dst="48.0.0.1")/UDP(dport=12,sport=1025)/('a'*1000)
+ , vm = vm)
+ self.vm_9k_pkt = STLPktBuilder(pkt = Ether()/IP(src="16.0.0.1",dst="48.0.0.1")/UDP(dport=12,sport=1025)/('a'*9000)
+ ,vm = vm)
+
+ self.latency_9k_enable=per_driver_params[drv_name][3]
+ if self.latency_9k_enable:
+ self.latency_9k_max_average = per_driver_params[drv_name][4]
+ self.latency_9k_max_latency = per_driver_params[drv_name][5]
+
@classmethod
def tearDownClass(cls):
@@ -48,6 +75,36 @@ class STLRX_Test(CStlGeneral_Test):
CTRexScenario.stl_trex.connect()
+ def __verify_latency (self, latency_stats,max_latency,max_average):
+
+ error=0;
+ err_latency = latency_stats['err_cntrs']
+ latency = latency_stats['latency']
+
+ for key in err_latency :
+ error +=err_latency[key]
+ if error !=0 :
+ pprint.pprint(err_latency)
+ tmp = 'RX pkts ERROR - one of the error is on'
+ print(tmp)
+ assert False, tmp
+
+ if latency['average']> max_average:
+ pprint.pprint(latency_stats)
+ tmp = 'Average latency is too high {0} {1} '.format(latency['average'], max_average)
+ print(tmp)
+ return ERROR_LATENCY_TOO_HIGH
+
+ if latency['total_max']> max_latency:
+ pprint.pprint(latency_stats)
+ tmp = 'Max latency is too high {0} {1} '.format(latency['total_max'], max_latency)
+ print(tmp)
+ return ERROR_LATENCY_TOO_HIGH
+
+ return 0
+
+
+
def __verify_flow (self, pg_id, total_pkts, pkt_len, stats):
flow_stats = stats['flow_stats'].get(pg_id)
latency_stats = stats['latency'].get(pg_id)
@@ -113,7 +170,7 @@ class STLRX_Test(CStlGeneral_Test):
# one stream on TX --> RX
def test_one_stream(self):
- total_pkts = self.total_pkts * 10
+ total_pkts = self.total_pkts
try:
s1 = STLStream(name = 'rx',
@@ -128,7 +185,7 @@ class STLRX_Test(CStlGeneral_Test):
print("\ninjecting {0} packets on port {1}\n".format(total_pkts, self.tx_port))
- exp = {'pg_id': 5, 'total_pkts': total_pkts, 'pkt_len': self.pkt.get_pkt_len()}
+ exp = {'pg_id': 5, 'total_pkts': total_pkts, 'pkt_len': s1.get_pkt_len()}
self.__rx_iteration( [exp] )
@@ -138,6 +195,9 @@ class STLRX_Test(CStlGeneral_Test):
def test_multiple_streams(self):
+ if self.is_virt_nics:
+ self.skip('Skip this for virtual NICs')
+
num_latency_streams = 128
num_flow_stat_streams = 127
total_pkts = int(self.total_pkts / (num_latency_streams + num_flow_stat_streams))
@@ -156,7 +216,7 @@ class STLRX_Test(CStlGeneral_Test):
flow_stats = STLFlowLatencyStats(pg_id = pg_id),
mode = STLTXSingleBurst(total_pkts = total_pkts+pg_id, percentage = percent)))
- exp.append({'pg_id': pg_id, 'total_pkts': total_pkts+pg_id, 'pkt_len': self.pkt.get_pkt_len()})
+ exp.append({'pg_id': pg_id, 'total_pkts': total_pkts+pg_id, 'pkt_len': streams[-1].get_pkt_len()})
for pg_id in range(num_latency_streams + 1, num_latency_streams + num_flow_stat_streams):
@@ -165,7 +225,7 @@ class STLRX_Test(CStlGeneral_Test):
flow_stats = STLFlowStats(pg_id = pg_id),
mode = STLTXSingleBurst(total_pkts = total_pkts+pg_id, percentage = percent)))
- exp.append({'pg_id': pg_id, 'total_pkts': total_pkts+pg_id, 'pkt_len': self.pkt.get_pkt_len()})
+ exp.append({'pg_id': pg_id, 'total_pkts': total_pkts+pg_id, 'pkt_len': streams[-1].get_pkt_len()})
# add both streams to ports
self.c.add_streams(streams, ports = [self.tx_port])
@@ -180,45 +240,244 @@ class STLRX_Test(CStlGeneral_Test):
total_pkts = self.total_pkts
try:
- s1 = STLStream(name = 'rx',
- packet = self.pkt,
- flow_stats = STLFlowStats(pg_id = 5),
- mode = STLTXSingleBurst(total_pkts = total_pkts,
- percentage = self.rate_percent
- ))
+ streams_data = [
+ {'name': 'Flow stat. No latency', 'pkt': self.pkt, 'lat': False},
+ {'name': 'Latency, no field engine', 'pkt': self.pkt, 'lat': True},
+ {'name': 'Latency, short packet with field engine', 'pkt': self.vm_pkt, 'lat': True},
+ {'name': 'Latency, large packet field engine', 'pkt': self.vm_large_pkt, 'lat': True}
+ ]
+ if self.latency_9k_enable:
+ streams_data.append({'name': 'Latency, 9k packet with field engine', 'pkt': self.vm_9k_pkt, 'lat': True})
- s_lat = STLStream(name = 'rx',
- packet = self.pkt,
- flow_stats = STLFlowLatencyStats(pg_id = 5),
- mode = STLTXSingleBurst(total_pkts = total_pkts,
- percentage = self.rate_lat
- ))
+ streams = []
+ for data in streams_data:
+ if data['lat']:
+ flow_stats = STLFlowLatencyStats(pg_id = 5)
+ mode = STLTXSingleBurst(total_pkts = total_pkts, percentage = self.rate_percent)
+ else:
+ flow_stats = STLFlowStats(pg_id = 5)
+ mode = STLTXSingleBurst(total_pkts = total_pkts, pps = self.lat_pps)
+
+ s = STLStream(name = data['name'],
+ packet = data['pkt'],
+ flow_stats = flow_stats,
+ mode = mode
+ )
+ streams.append(s)
+
+ print("\ninjecting {0} packets on port {1}".format(total_pkts, self.tx_port))
+ exp = {'pg_id': 5, 'total_pkts': total_pkts}
+
+ for stream in streams:
+ self.c.add_streams([stream], ports = [self.tx_port])
+ print("Stream: {0}".format(stream.name))
+ exp['pkt_len'] = stream.get_pkt_len()
+ for i in range(0, 10):
+ print("Iteration {0}".format(i))
+ self.__rx_iteration( [exp] )
+ self.c.remove_all_streams(ports = [self.tx_port])
- print("\ninjecting {0} packets on port {1}\n".format(total_pkts, self.tx_port))
- exp = {'pg_id': 5, 'total_pkts': total_pkts, 'pkt_len': self.pkt.get_pkt_len()}
- exp_lat = {'pg_id': 5, 'total_pkts': total_pkts, 'pkt_len': self.pkt.get_pkt_len()}
+ except STLError as e:
+ assert False , '{0}'.format(e)
+
- self.c.add_streams([s1], ports = [self.tx_port])
- for i in range(0, 10):
- print("starting iteration {0}".format(i))
- self.__rx_iteration( [exp] )
- self.c.remove_all_streams(ports = [self.tx_port])
- self.c.add_streams([s_lat], ports = [self.tx_port])
- for i in range(0, 10):
- print("starting iteration {0} latency".format(i))
- self.__rx_iteration( [exp_lat] )
+ def __9k_stream(self,pgid,ports,precet,max_latency,avg_latency,duration,pkt_size):
+ my_pg_id=pgid
+ s_ports=ports;
+ all_ports=list(CTRexScenario.stl_ports_map['map'].keys());
+ if ports == None:
+ s_ports=all_ports
+ assert( type(s_ports)==list)
+
+ stream_pkt = STLPktBuilder(pkt = Ether()/IP(src="16.0.0.1",dst="48.0.0.1")/UDP(dport=12,sport=1025)/('a'*pkt_size))
+
+ try:
+ # reset all ports
+ self.c.reset(ports = all_ports)
+
+
+ for pid in s_ports:
+ s1 = STLStream(name = 'rx',
+ packet = self.pkt,
+ flow_stats = STLFlowLatencyStats(pg_id = my_pg_id+pid),
+ mode = STLTXCont(pps = 1000))
+
+ s2 = STLStream(name = 'bulk',
+ packet = stream_pkt,
+ mode = STLTXCont(percentage =precet))
+
+
+ # add both streams to ports
+ self.c.add_streams([s1,s2], ports = [pid])
+
+ self.c.clear_stats()
+
+ self.c.start(ports = s_ports,duration = duration)
+ self.c.wait_on_traffic(ports = s_ports,timeout = duration+10,rx_delay_ms = 100)
+ stats = self.c.get_stats()
+
+ for pid in s_ports:
+ latency_stats = stats['latency'].get(my_pg_id+pid)
+ #pprint.pprint(latency_stats)
+ if self.__verify_latency (latency_stats,max_latency,avg_latency) !=0:
+ return (ERROR_LATENCY_TOO_HIGH);
+
+ return 0
+ except STLError as e:
+ assert False , '{0}'.format(e)
+
+
+
+
+
+ # check low latency when you have stream of 9K stream
+ def test_9k_stream(self):
+ if self.is_virt_nics:
+ self.skip('Skip this for virtual NICs')
+
+ if self.latency_9k_enable == False:
+ print("SKIP")
+ return
+
+ for i in range(0,5):
+ print("Iteration {0}".format(i));
+ duration=random.randint(10, 70);
+ pgid=random.randint(1, 65000);
+ pkt_size=random.randint(1000, 9000);
+ all_ports = list(CTRexScenario.stl_ports_map['map'].keys());
+
+
+ s_port=random.sample(all_ports, random.randint(1, len(all_ports)) )
+ s_port=sorted(s_port)
+ if self.speed == 40 :
+ # the NIC does not support all full rate in case both port works let's filter odd ports
+ s_port=list(filter(lambda x: x % 2==0, s_port))
+ if len(s_port)==0:
+ s_port=[0];
+
+ error=1;
+ for j in range(0,5):
+ print(" {4} - duration {0} pgid {1} pkt_size {2} s_port {3} ".format(duration,pgid,pkt_size,s_port,j));
+ if self.__9k_stream(pgid,
+ s_port,90,
+ self.latency_9k_max_latency,
+ self.latency_9k_max_average,
+ duration,
+ pkt_size)==0:
+ error=0;
+ break;
+
+ if error:
+ assert False , "Latency too high"
+ else:
+ print("===>Iteration {0} PASS {1}".format(i,j));
+
+
+ def check_stats (self,stats,a,b,err):
+ if a != b:
+ tmp = 'ERROR field : {0}, read : {1} != expected : {2} '.format(err,a,b)
+ pprint.pprint(stats)
+ assert False,tmp
+
+
+
+ def send_1_burst(self,from_port,is_latency,pkts):
+
+ pid = from_port
+ base_pkt = Ether()/IP(src="16.0.0.1",dst="48.0.0.1")/UDP(dport=12,sport=1025)
+
+ pad = (60 - len(base_pkt)) * 'x'
+
+ stream_pkt = STLPktBuilder(pkt = base_pkt/pad)
+
+ all_ports=list(CTRexScenario.stl_ports_map['map'].keys());
+
+ dpid = CTRexScenario.stl_ports_map['map'][pid]
+ s_ports =[pid]
+
+ try:
+ # reset all ports
+ self.c.reset(ports = all_ports)
+
+
+ for pid in s_ports:
+ if is_latency:
+ s1 = STLStream(name = 'rx',
+ packet = stream_pkt,
+ flow_stats = STLFlowLatencyStats(pg_id = 5 + pid),
+ mode = STLTXSingleBurst(total_pkts = pkts,pps = 1000))
+ else:
+ s1 = STLStream(name = 'rx',
+ packet = stream_pkt,
+ mode = STLTXSingleBurst(total_pkts = pkts,pps = 1000))
+
+
+ # add both streams to ports
+ self.c.add_streams(s1, ports = [pid])
+
+ self.c.clear_stats()
+
+ self.c.start(ports = s_ports)
+ self.c.wait_on_traffic(ports = s_ports)
+
+ stats = self.c.get_stats()
+
+ ips = stats[dpid]
+ ops = stats[pid]
+ tps = stats['total']
+ tbytes = pkts*64
+
+ self.check_stats (stats,ops["obytes"], tbytes,"ops[obytes]")
+ self.check_stats (stats,ops["opackets"], pkts,"ops[opackets]")
+
+ self.check_stats (stats,ips["ibytes"], tbytes,"ips[ibytes]")
+ self.check_stats (stats,ips["ipackets"], pkts,"ips[ipackets]")
+
+ self.check_stats (stats,tps['ibytes'], tbytes,"tps[ibytes]")
+ self.check_stats (stats,tps['obytes'], tbytes,"tps[obytes]")
+ self.check_stats (stats,tps['ipackets'], pkts,"tps[ipackets]")
+ self.check_stats (stats,tps['opackets'], pkts,"tps[opackets]")
+
+ if is_latency:
+ ls=stats['flow_stats'][5+ pid]
+ self.check_stats (stats,ls['rx_pkts']['total'], pkts,"ls['rx_pkts']['total']")
+ self.check_stats (stats,ls['rx_pkts'][dpid], pkts,"ls['rx_pkts'][dpid]")
+
+ self.check_stats (stats,ls['tx_pkts']['total'], pkts,"ls['tx_pkts']['total']")
+ self.check_stats (stats,ls['tx_pkts'][pid], pkts,"ls['tx_pkts'][pid]")
+
+ self.check_stats (stats,ls['tx_bytes']['total'], tbytes,"ls['tx_bytes']['total']")
+ self.check_stats (stats,ls['tx_bytes'][pid], tbytes,"ls['tx_bytes'][pid]")
+
+
+ return 0
except STLError as e:
assert False , '{0}'.format(e)
+
+ def test_fcs_stream(self):
+ """ this test send 1 64 byte packet with latency and check that all counters are reported as 64 bytes"""
+
+ if self.is_virt_nics:
+ self.skip('Skip this for virtual NICs')
+
+ all_ports=list(CTRexScenario.stl_ports_map['map'].keys());
+ for port in all_ports:
+ for l in [True,False]:
+ print(" test port {0} latency : {1} ".format(port,l))
+ self.send_1_burst(port,l,100)
+
# this test adds more and more latency streams and re-test with incremental
def test_incremental_latency_streams (self):
+ if self.is_virt_nics:
+ self.skip('Skip this for virtual NICs')
total_pkts = self.total_pkts
percent = 0.5
@@ -251,7 +510,7 @@ class STLRX_Test(CStlGeneral_Test):
print("port {0} : {1} streams at {2}% of line rate\n".format(self.tx_port, i, total_percent))
- exp.append({'pg_id': i, 'total_pkts': total_pkts, 'pkt_len': my_pkt.get_pkt_len()})
+ exp.append({'pg_id': i, 'total_pkts': total_pkts, 'pkt_len': s1.get_pkt_len()})
self.__rx_iteration( exp )
diff --git a/scripts/automation/regression/stateless_tests/trex_client_pkg_test.py b/scripts/automation/regression/stateless_tests/trex_client_pkg_test.py
index 905882fe..14ef36f7 100755
--- a/scripts/automation/regression/stateless_tests/trex_client_pkg_test.py
+++ b/scripts/automation/regression/stateless_tests/trex_client_pkg_test.py
@@ -3,9 +3,6 @@ from .stl_general_test import CStlGeneral_Test, CTRexScenario
from misc_methods import run_command
from nose.plugins.attrib import attr
-def setUpModule():
- CStlGeneral_Test.unzip_client_package()
-
@attr('client_package')
class CTRexClientPKG_Test(CStlGeneral_Test):
"""This class tests TRex client package"""
@@ -14,14 +11,13 @@ class CTRexClientPKG_Test(CStlGeneral_Test):
CStlGeneral_Test.setUp(self)
# examples connect by their own
if CTRexScenario.stl_trex.is_connected():
- self.recover_after_trex_210_issue()
CTRexScenario.stl_trex.disconnect()
+ CStlGeneral_Test.unzip_client_package()
def tearDown(self):
# connect back at end of tests
if not CTRexScenario.stl_trex.is_connected():
CTRexScenario.stl_trex.connect()
- self.recover_after_trex_210_issue()
CStlGeneral_Test.tearDown(self)
def run_client_package_stl_example(self, python_version):
diff --git a/scripts/automation/regression/trex.py b/scripts/automation/regression/trex.py
index 9541ad76..aad8f041 100644
--- a/scripts/automation/regression/trex.py
+++ b/scripts/automation/regression/trex.py
@@ -38,7 +38,8 @@ class CTRexScenario:
is_copied = False
GAManager = None
no_daemon = False
- router_image = None
+ debug_image = False
+ test = None
class CTRexRunner:
"""This is an instance for generating a CTRexRunner"""
diff --git a/scripts/automation/regression/trex_unit_test.py b/scripts/automation/regression/trex_unit_test.py
index 83650164..4f13a50f 100755
--- a/scripts/automation/regression/trex_unit_test.py
+++ b/scripts/automation/regression/trex_unit_test.py
@@ -30,7 +30,7 @@ import outer_packages
import nose
from nose.plugins import Plugin
-import logging
+from nose.selector import Selector
import CustomLogger
import misc_methods
from rednose import RedNose
@@ -43,11 +43,37 @@ from trex_stl_lib.utils.GAObjClass import GAmanager
import trex
import socket
from pprint import pprint
-import subprocess
-import re
import time
from distutils.dir_util import mkpath
+# nose overrides
+
+# option to select wanted test by name without file, class etc.
+def new_Selector_wantMethod(self, method, orig_Selector_wantMethod = Selector.wantMethod):
+ result = orig_Selector_wantMethod(self, method)
+ return result and (not CTRexScenario.test or CTRexScenario.test in getattr(method, '__name__', ''))
+
+Selector.wantMethod = new_Selector_wantMethod
+
+def new_Selector_wantFunction(self, function, orig_Selector_wantFunction = Selector.wantFunction):
+ result = orig_Selector_wantFunction(self, function)
+ return result and (not CTRexScenario.test or CTRexScenario.test in getattr(function, '__name__', ''))
+
+Selector.wantFunction = new_Selector_wantFunction
+
+# override nose's strange representation of setUpClass errors
+def __suite_repr__(self):
+ if hasattr(self.context, '__module__'): # inside class, setUpClass etc.
+ class_repr = nose.suite._strclass(self.context)
+ else: # outside of class, setUpModule etc.
+ class_repr = nose.suite._strclass(self.__class__)
+ return '%s.%s' % (class_repr, getattr(self.context, '__name__', self.context))
+
+nose.suite.ContextSuite.__repr__ = __suite_repr__
+nose.suite.ContextSuite.__str__ = __suite_repr__
+
+# /nose overrides
+
def check_trex_path(trex_path):
if os.path.isfile('%s/trex_daemon_server' % trex_path):
return os.path.abspath(trex_path)
@@ -67,7 +93,7 @@ def get_trex_path():
def address_to_ip(address):
- for i in range(10):
+ for i in range(5):
try:
return socket.gethostbyname(address)
except:
@@ -132,6 +158,14 @@ class CTRexTestConfiguringPlugin(Plugin):
parser.add_option('--no-daemon', action="store_true", default = False,
dest="no_daemon",
help="Flag that specifies to use running stl server, no need daemons.")
+ parser.add_option('--debug-image', action="store_true", default = False,
+ dest="debug_image",
+ help="Flag that specifies to use t-rex-64-debug as TRex executable.")
+ parser.add_option('--trex-args', action='store', default = '',
+ dest="trex_args",
+ help="Additional TRex arguments (--no-watchdog etc.).")
+ parser.add_option('-t', '--test', action='store', default = '', dest='test',
+ help='Test name to run (without file, class etc.)')
def configure(self, options, conf):
@@ -143,6 +177,7 @@ class CTRexTestConfiguringPlugin(Plugin):
self.json_verbose = options.json_verbose
self.telnet_verbose = options.telnet_verbose
self.no_daemon = options.no_daemon
+ CTRexScenario.test = options.test
if self.collect_only or self.functional:
return
if CTRexScenario.setup_dir and options.config_path:
@@ -164,13 +199,16 @@ class CTRexTestConfiguringPlugin(Plugin):
self.loggerPath = options.log_path
# initialize CTRexScenario global testing class, to be used by all tests
CTRexScenario.configuration = self.configuration
- CTRexScenario.no_daemon = self.no_daemon
+ CTRexScenario.no_daemon = options.no_daemon
CTRexScenario.benchmark = self.benchmark
CTRexScenario.modes = set(self.modes)
CTRexScenario.server_logs = self.server_logs
+ CTRexScenario.debug_image = options.debug_image
if not self.no_daemon:
- CTRexScenario.trex = CTRexClient(trex_host = self.configuration.trex['trex_name'],
- verbose = self.json_verbose)
+ CTRexScenario.trex = CTRexClient(trex_host = self.configuration.trex['trex_name'],
+ verbose = self.json_verbose,
+ debug_image = options.debug_image,
+ trex_args = options.trex_args)
if not CTRexScenario.trex.check_master_connectivity():
print('Could not connect to master daemon')
sys.exit(-1)
@@ -202,6 +240,7 @@ class CTRexTestConfiguringPlugin(Plugin):
if not res:
print('Could not restart TRex daemon server')
sys.exit(-1)
+ print('Restarted.')
if self.kill_running:
CTRexScenario.trex.kill_all_trexes()
@@ -255,6 +294,9 @@ def save_setup_info():
setup_info += 'Server: %s, Modes: %s' % (cfg.trex.get('trex_name'), cfg.trex.get('modes'))
if cfg.router:
setup_info += '\nRouter: Model: %s, Image: %s' % (cfg.router.get('model'), CTRexScenario.router_image)
+ if CTRexScenario.debug_image:
+ setup_info += '\nDebug image: %s' % CTRexScenario.debug_image
+
with open('%s/report_%s.info' % (CTRexScenario.report_dir, CTRexScenario.setup_name), 'w') as f:
f.write(setup_info)
except Exception as err:
@@ -323,15 +365,14 @@ if __name__ == "__main__":
nose_argv += sys_args
- config_plugin = CTRexTestConfiguringPlugin()
- red_nose = RedNose()
+ addplugins = [RedNose(), CTRexTestConfiguringPlugin()]
result = True
try:
if len(CTRexScenario.test_types['functional_tests']):
additional_args = ['--func'] + CTRexScenario.test_types['functional_tests']
if xml_arg:
additional_args += ['--with-xunit', xml_arg.replace('.xml', '_functional.xml')]
- result = nose.run(argv = nose_argv + additional_args, addplugins = [red_nose, config_plugin])
+ result = nose.run(argv = nose_argv + additional_args, addplugins = addplugins)
if len(CTRexScenario.test_types['stateful_tests']):
additional_args = ['--stf']
if '--warmup' in sys.argv:
@@ -341,14 +382,14 @@ if __name__ == "__main__":
additional_args.extend(['-a', '!client_package'])
if xml_arg:
additional_args += ['--with-xunit', xml_arg.replace('.xml', '_stateful.xml')]
- result = nose.run(argv = nose_argv + additional_args, addplugins = [red_nose, config_plugin]) and result
+ result = nose.run(argv = nose_argv + additional_args, addplugins = addplugins) and result
if len(CTRexScenario.test_types['stateless_tests']):
additional_args = ['--stl', 'stateless_tests/stl_general_test.py:STLBasic_Test.test_connectivity'] + CTRexScenario.test_types['stateless_tests']
if not test_client_package:
additional_args.extend(['-a', '!client_package'])
if xml_arg:
additional_args += ['--with-xunit', xml_arg.replace('.xml', '_stateless.xml')]
- result = nose.run(argv = nose_argv + additional_args, addplugins = [red_nose, config_plugin]) and result
+ result = nose.run(argv = nose_argv + additional_args, addplugins = addplugins) and result
#except Exception as e:
# result = False
# print(e)