summaryrefslogtreecommitdiffstats
path: root/scripts/automation/regression
diff options
context:
space:
mode:
Diffstat (limited to 'scripts/automation/regression')
-rwxr-xr-xscripts/automation/regression/CPlatform.py7
-rw-r--r--scripts/automation/regression/functional_tests/stl_basic_tests.py6
-rw-r--r--scripts/automation/regression/setups/kiwi02/benchmark.yaml2
-rw-r--r--scripts/automation/regression/setups/trex-dan/benchmark.yaml2
-rw-r--r--scripts/automation/regression/setups/trex07/benchmark.yaml374
-rw-r--r--scripts/automation/regression/setups/trex14/benchmark.yaml12
-rw-r--r--scripts/automation/regression/setups/trex25/benchmark.yaml2
-rwxr-xr-xscripts/automation/regression/stateful_tests/trex_general_test.py71
-rwxr-xr-xscripts/automation/regression/stateful_tests/trex_nat_test.py3
-rwxr-xr-xscripts/automation/regression/stateful_tests/trex_rx_test.py2
-rwxr-xr-xscripts/automation/regression/stateless_tests/stl_examples_test.py2
-rw-r--r--scripts/automation/regression/stateless_tests/stl_general_test.py47
-rw-r--r--scripts/automation/regression/stateless_tests/stl_rx_test.py10
-rwxr-xr-xscripts/automation/regression/stateless_tests/trex_client_pkg_test.py6
-rw-r--r--scripts/automation/regression/trex.py2
-rwxr-xr-xscripts/automation/regression/trex_unit_test.py37
16 files changed, 333 insertions, 252 deletions
diff --git a/scripts/automation/regression/CPlatform.py b/scripts/automation/regression/CPlatform.py
index de1c22ce..dc5418cb 100755
--- a/scripts/automation/regression/CPlatform.py
+++ b/scripts/automation/regression/CPlatform.py
@@ -20,7 +20,7 @@ class CPlatform(object):
self.tftp_cfg = None
self.config_history = { 'basic_if_config' : False, 'tftp_server_config' : False }
- def configure_basic_interfaces(self, mtu = 4000):
+ def configure_basic_interfaces(self, mtu = 9050):
cache = CCommandCache()
for dual_if in self.if_mngr.get_dual_if_list():
@@ -46,7 +46,7 @@ class CPlatform(object):
- def configure_basic_filtered_interfaces(self, intf_list, mtu = 4000):
+ def configure_basic_filtered_interfaces(self, intf_list, mtu = 9050):
cache = CCommandCache()
for intf in intf_list:
@@ -70,11 +70,10 @@ class CPlatform(object):
res = self.cmd_link.run_single_command(cache)
if 'Rollback Done' not in res:
print('Failed to load clean config, trying again')
+ time.sleep(2)
if i < 4:
continue
raise Exception('Could not load clean config, response: %s' % res)
- if i > 0: # were errors, better to wait
- time.sleep(2)
def config_pbr (self, mode = 'config'):
idx = 1
diff --git a/scripts/automation/regression/functional_tests/stl_basic_tests.py b/scripts/automation/regression/functional_tests/stl_basic_tests.py
index a4e28ca9..e03c0742 100644
--- a/scripts/automation/regression/functional_tests/stl_basic_tests.py
+++ b/scripts/automation/regression/functional_tests/stl_basic_tests.py
@@ -85,7 +85,7 @@ class CStlBasic_Test(functional_general_test.CGeneralFunctional_Test):
pkts1 = list(RawPcapReader(output))
pkts2 = list(RawPcapReader(golden))
- assert_equal(len(pkts1), len(pkts2))
+ assert_equal(len(pkts1), len(pkts2), 'Lengths of generated pcap (%s) and golden (%s) are different' % (output, golden))
for pkt1, pkt2, i in zip(pkts1, pkts2, range(1, len(pkts1))):
ts1 = float(pkt1[1][0]) + (float(pkt1[1][1]) / 1e6)
@@ -143,7 +143,7 @@ class CStlBasic_Test(functional_general_test.CGeneralFunctional_Test):
os.unlink(output_cap)
try:
rc = self.run_sim(input_file, output_cap, options, silent)
- assert_equal(rc, True)
+ assert_equal(rc, True, 'Simulation on profile %s failed.' % profile)
#s='cp '+output_cap+' '+golden_file;
#print s
#os.system(s)
@@ -165,7 +165,7 @@ class CStlBasic_Test(functional_general_test.CGeneralFunctional_Test):
profile.dump_to_code(generated_filename)
rc = self.run_sim(generated_filename, output_cap, options, silent)
- assert_equal(rc, True)
+ assert_equal(rc, True, 'Simulation on profile %s (generated) failed.' % profile)
if compare:
self.compare_caps(output_cap, golden_file)
diff --git a/scripts/automation/regression/setups/kiwi02/benchmark.yaml b/scripts/automation/regression/setups/kiwi02/benchmark.yaml
index 3332aa5e..e6621085 100644
--- a/scripts/automation/regression/setups/kiwi02/benchmark.yaml
+++ b/scripts/automation/regression/setups/kiwi02/benchmark.yaml
@@ -69,6 +69,8 @@ test_nat_simple_mode1: &test_nat_simple
test_nat_simple_mode2: *test_nat_simple
+test_nat_simple_mode3: *test_nat_simple
+
test_nat_learning:
<< : *test_nat_simple
nat_opened : 100000
diff --git a/scripts/automation/regression/setups/trex-dan/benchmark.yaml b/scripts/automation/regression/setups/trex-dan/benchmark.yaml
index e9f12c45..de56089b 100644
--- a/scripts/automation/regression/setups/trex-dan/benchmark.yaml
+++ b/scripts/automation/regression/setups/trex-dan/benchmark.yaml
@@ -69,6 +69,8 @@ test_nat_simple_mode1: &test_nat_simple
test_nat_simple_mode2: *test_nat_simple
+test_nat_simple_mode3: *test_nat_simple
+
test_nat_learning:
<< : *test_nat_simple
bw_per_core : 7.377
diff --git a/scripts/automation/regression/setups/trex07/benchmark.yaml b/scripts/automation/regression/setups/trex07/benchmark.yaml
index 4778de91..0dc340b0 100644
--- a/scripts/automation/regression/setups/trex07/benchmark.yaml
+++ b/scripts/automation/regression/setups/trex07/benchmark.yaml
@@ -2,169 +2,243 @@
#### TRex benchmark configuration file ####
###############################################################
-test_nbar_simple :
- multiplier : 7.5
- cores : 2
- exp_gbps : 3.5
- cpu_to_core_ratio : 20800000
- cpu2core_custom_dev: YES
- cpu2core_dev : 0.07
- exp_max_latency : 1000
-
- nbar_classification:
- rtp : 32.57
- http : 30.25
- oracle-sqlnet : 11.23
- exchange : 10.80
- citrix : 5.62
- rtsp : 2.84
- dns : 1.95
- smtp : 0.57
- pop3 : 0.36
- ssl : 0.17
- sctp : 0.13
- sip : 0.09
- unknown : 3.41
-
-test_rx_check :
- multiplier : 13
- cores : 3
- rx_sample_rate : 128
- exp_gbps : 6
- cpu_to_core_ratio : 37270000
- exp_bw : 13
- exp_latency : 1
-
-test_nat_simple : &test_nat_simple
- stat_route_dict :
+#### common templates ###
+
+stat_route_dict: &stat_route_dict
clients_start : 16.0.0.1
servers_start : 48.0.0.1
dual_port_mask : 1.0.0.0
client_destination_mask : 255.0.0.0
server_destination_mask : 255.0.0.0
- nat_dict :
+
+nat_dict: &nat_dict
clients_net_start : 16.0.0.0
client_acl_wildcard_mask : 0.0.0.255
dual_port_mask : 1.0.0.0
pool_start : 200.0.0.0
pool_netmask : 255.255.255.0
- multiplier : 12000
- cores : 1
- cpu_to_core_ratio : 37270000
- exp_bw : 1
- exp_latency : 1
- allow_timeout_dev : YES
-
-test_nat_simple_mode1 : *test_nat_simple
-test_nat_simple_mode2 : *test_nat_simple
-
-test_nat_learning :
- stat_route_dict :
- clients_start : 16.0.0.1
- servers_start : 48.0.0.1
- dual_port_mask : 1.0.0.0
- client_destination_mask : 255.0.0.0
- server_destination_mask : 255.0.0.0
- multiplier : 12000
- cores : 1
- nat_opened : 40000
- cpu_to_core_ratio : 270
- exp_bw : 8
- exp_latency : 1
- allow_timeout_dev : YES
-
-test_routing_imix_64 :
- multiplier : 430
- cores : 1
- cpu_to_core_ratio : 280
- exp_latency : 1
-
-test_routing_imix :
- multiplier : 10
- cores : 1
- cpu_to_core_ratio : 1800
- exp_latency : 1
-
-test_static_routing_imix :
- stat_route_dict :
- clients_start : 16.0.0.1
- servers_start : 48.0.0.1
- dual_port_mask : 1.0.0.0
- client_destination_mask : 255.0.0.0
- server_destination_mask : 255.0.0.0
- multiplier : 8
- cores : 1
- cpu_to_core_ratio : 1800
- exp_latency : 1
-test_static_routing_imix_asymmetric:
- stat_route_dict :
- clients_start : 16.0.0.1
- servers_start : 48.0.0.1
- dual_port_mask : 1.0.0.0
- client_destination_mask : 255.0.0.0
- server_destination_mask : 255.0.0.0
- multiplier : 8
- cores : 1
- cpu_to_core_ratio : 1800
- exp_latency : 1
-
-test_ipv6_simple :
- multiplier : 9
- cores : 2
- cpu_to_core_ratio : 30070000
- cpu2core_custom_dev: YES
- cpu2core_dev : 0.07
-
-
-test_rx_check_sfr:
- multiplier : 10
- cores : 2
- rx_sample_rate : 16
- # allow 0.03% errors, bad router
- error_tolerance : 0.03
-
-test_rx_check_http:
- multiplier : 15000
- cores : 1
- rx_sample_rate : 16
- # allow 0.03% errors, bad routerifconfig
- error_tolerance : 0.03
-test_rx_check_sfr_ipv6:
- multiplier : 10
- cores : 2
- rx_sample_rate : 16
- # allow 0.03% errors, bad router
- error_tolerance : 0.03
+### stateful ###
+
+test_jumbo:
+ multiplier : 17
+ cores : 1
+ bw_per_core : 543.232
+
+
+test_routing_imix:
+ multiplier : 10
+ cores : 1
+ bw_per_core : 34.128
+
+
+test_routing_imix_64:
+ multiplier : 430
+ cores : 1
+ bw_per_core : 5.893
+
+
+test_static_routing_imix: &test_static_routing_imix
+ stat_route_dict : *stat_route_dict
+ multiplier : 8
+ cores : 1
+ bw_per_core : 34.339
+
+test_static_routing_imix_asymmetric: *test_static_routing_imix
+
+
+test_ipv6_simple:
+ multiplier : 9
+ cores : 2
+ bw_per_core : 19.064
+
+
+test_nat_simple_mode1: &test_nat_simple
+ stat_route_dict : *stat_route_dict
+ nat_dict : *nat_dict
+ multiplier : 6000
+ cores : 1
+ nat_opened : 500000
+ allow_timeout_dev : True
+ bw_per_core : 44.445
+
+test_nat_simple_mode2: *test_nat_simple
+
+test_nat_simple_mode3: *test_nat_simple
+
+test_nat_learning: *test_nat_simple
+
+
+test_nbar_simple:
+ multiplier : 7.5
+ cores : 2
+ bw_per_core : 17.174
+ nbar_classification:
+ rtp : 32.57
+ http : 30.25
+ oracle_sqlnet : 11.23
+ exchange : 10.80
+ citrix : 5.62
+ rtsp : 2.84
+ dns : 1.95
+ smtp : 0.57
+ pop3 : 0.36
+ ssl : 0.17
+ sctp : 0.13
+ sip : 0.09
+ unknown : 3.41
+
+
+test_rx_check_http: &rx_http
+ multiplier : 15000
+ cores : 1
+ rx_sample_rate : 16
+ bw_per_core : 39.560
test_rx_check_http_ipv6:
- multiplier : 15000
- cores : 1
- rx_sample_rate : 16
- # allow 0.03% errors, bad router
- error_tolerance : 0.03
-
-test_rx_check_http_negative:
- multiplier : 13000
- cores : 1
- rx_sample_rate : 16
- # allow 0.03% errors, bad router
- error_tolerance : 0.03
- stat_route_dict :
- clients_start : 16.0.0.1
- servers_start : 48.0.0.1
- dual_port_mask : 1.0.0.0
- client_destination_mask : 255.0.0.0
- server_destination_mask : 255.0.0.0
- nat_dict :
- clients_net_start : 16.0.0.0
- client_acl_wildcard_mask : 0.0.0.255
- dual_port_mask : 1.0.0.0
- pool_start : 200.0.0.0
- pool_netmask : 255.255.255.0
+ << : *rx_http
+ bw_per_core : 49.237
+
+test_rx_check_http_negative_disabled:
+ << : *rx_http
+ stat_route_dict : *stat_route_dict
+ nat_dict : *nat_dict
+
+
+test_rx_check_sfr: &rx_sfr
+ multiplier : 10
+ cores : 3
+ rx_sample_rate : 16
+ bw_per_core : 16.082
+
+test_rx_check_sfr_ipv6:
+ << : *rx_sfr
+ bw_per_core : 19.198
+
+
+
+### stateless ###
+
+test_CPU_benchmark:
+ profiles:
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 64}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 64, stream_count: 10}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 64, stream_count: 100}
+ cpu_util : 1
+ bw_per_core : 1
+
+# causes queue full
+# - name : stl/udp_for_benchmarks.py
+# kwargs : {packet_len: 64, stream_count: 1000}
+# cpu_util : 1
+# bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 128}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 256}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 512}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 1500}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 4000}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 9000}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 9000, stream_count: 10}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 9000, stream_count: 100}
+ cpu_util : 1
+ bw_per_core : 1
+
+# not enough memory + queue full if memory increase
+# - name : stl/udp_for_benchmarks.py
+# kwargs : {packet_len: 9000, stream_count: 1000}
+# cpu_util : 1
+# bw_per_core : 1
+
+ - name : stl/imix.py
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_1pkt_tuple_gen.py
+ kwargs : {packet_len: 64}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_1pkt_tuple_gen.py
+ kwargs : {packet_len: 128}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_1pkt_tuple_gen.py
+ kwargs : {packet_len: 256}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_1pkt_tuple_gen.py
+ kwargs : {packet_len: 512}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_1pkt_tuple_gen.py
+ kwargs : {packet_len: 1500}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_1pkt_tuple_gen.py
+ kwargs : {packet_len: 4000}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_1pkt_tuple_gen.py
+ kwargs : {packet_len: 9000}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/pcap.py
+ kwargs : {ipg_usec: 2, loop_count: 0}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_rand_len_9k.py
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/hlt/hlt_udp_rand_len_9k.py
+ cpu_util : 1
+ bw_per_core : 1
-test_jumbo:
- multiplier : 17
- cores : 1
diff --git a/scripts/automation/regression/setups/trex14/benchmark.yaml b/scripts/automation/regression/setups/trex14/benchmark.yaml
index aa4ac2d4..04f13e79 100644
--- a/scripts/automation/regression/setups/trex14/benchmark.yaml
+++ b/scripts/automation/regression/setups/trex14/benchmark.yaml
@@ -57,17 +57,17 @@ test_ipv6_simple:
test_nat_simple_mode1: &test_nat_simple
stat_route_dict : *stat_route_dict
nat_dict : *nat_dict
- multiplier : 12000
+ multiplier : 6000
cores : 1
- nat_opened : 40000
+ nat_opened : 500000
allow_timeout_dev : True
bw_per_core : 44.445
test_nat_simple_mode2: *test_nat_simple
-test_nat_learning:
- << : *test_nat_simple
- nat_opened : 40000
+test_nat_simple_mode3: *test_nat_simple
+
+test_nat_learning: *test_nat_simple
test_nbar_simple:
@@ -101,7 +101,7 @@ test_rx_check_http_ipv6:
<< : *rx_http
bw_per_core : 49.237
-test_rx_check_http_negative:
+test_rx_check_http_negative_disabled:
<< : *rx_http
stat_route_dict : *stat_route_dict
nat_dict : *nat_dict
diff --git a/scripts/automation/regression/setups/trex25/benchmark.yaml b/scripts/automation/regression/setups/trex25/benchmark.yaml
index 19fab1fe..ccbdf6f5 100644
--- a/scripts/automation/regression/setups/trex25/benchmark.yaml
+++ b/scripts/automation/regression/setups/trex25/benchmark.yaml
@@ -70,6 +70,8 @@ test_nat_simple_mode1: &test_nat_simple
test_nat_simple_mode2: *test_nat_simple
+test_nat_simple_mode3: *test_nat_simple
+
test_nat_learning:
<< : *test_nat_simple
nat_opened : 40000
diff --git a/scripts/automation/regression/stateful_tests/trex_general_test.py b/scripts/automation/regression/stateful_tests/trex_general_test.py
index 82b1d9d1..8ff4fdaf 100755
--- a/scripts/automation/regression/stateful_tests/trex_general_test.py
+++ b/scripts/automation/regression/stateful_tests/trex_general_test.py
@@ -139,41 +139,42 @@ class CTRexGeneral_Test(unittest.TestCase):
if res[name] != float(val):
self.fail('TRex results[%s]==%f and not as expected %f ' % (name, res[name], val))
- def check_CPU_benchmark (self, trex_res, err = 25, minimal_cpu = 30, maximal_cpu = 85):
- #cpu_util = float(trex_res.get_last_value("trex-global.data.m_cpu_util"))
- cpu_util = sum(trex_res.get_value_list("trex-global.data.m_cpu_util")[-4:-1]) / 3.0 # mean of 3 values before last
-
- if '1G' in self.modes:
- minimal_cpu /= 10.0
-
- if not self.is_virt_nics:
- if cpu_util > maximal_cpu:
- self.fail("CPU is too high (%s%%), probably queue full." % cpu_util )
- #if cpu_util < minimal_cpu:
- # self.fail("CPU is too low (%s%%), can't verify performance in such low CPU%%." % cpu_util )
-
- test_norm_cpu = sum(trex_res.get_value_list("trex-global.data.m_bw_per_core")[-4:-1]) / 3.0
-
- print("TRex CPU utilization: %g%%, norm_cpu is : %g Gb/core" % (round(cpu_util, 2), round(test_norm_cpu)))
-
- expected_norm_cpu = self.get_benchmark_param('bw_per_core')
- if not expected_norm_cpu:
- expected_norm_cpu = 1
-
- calc_error_precent = abs(100.0 * test_norm_cpu / expected_norm_cpu - 100)
- print('Err percent: %s' % calc_error_precent)
- #if calc_error_precent > err and cpu_util > 10:
- # self.fail('Excepted bw_per_core ratio: %s, got: %g' % (expected_norm_cpu, round(test_norm_cpu)))
-
- # report benchmarks
- if self.GAManager:
- try:
- setup_test = '%s.%s' % (CTRexScenario.setup_name, self.get_name())
- self.GAManager.gaAddAction(Event = 'stateful_test', action = setup_test, label = 'bw_per_core', value = int(test_norm_cpu))
- self.GAManager.gaAddAction(Event = 'stateful_test', action = setup_test, label = 'bw_per_core_exp', value = int(expected_norm_cpu))
- self.GAManager.emptyAndReportQ()
- except Exception as e:
- print('Sending GA failed: %s' % e)
+ def check_CPU_benchmark (self, trex_res, err = 25, minimal_cpu = 10, maximal_cpu = 85):
+ cpu_util = trex_res.get_avg_steady_state_value('trex-global.data.m_cpu_util_raw')
+ trex_tx_bps = trex_res.get_avg_steady_state_value('trex-global.data.m_tx_bps')
+ expected_norm_cpu = self.get_benchmark_param('bw_per_core')
+ cores = self.get_benchmark_param('cores')
+ ports_count = trex_res.get_ports_count()
+ test_norm_cpu = trex_tx_bps / (cpu_util * ports_count * cores * 2.5e6)
+
+ if '1G' in self.modes:
+ minimal_cpu /= 10.0
+
+ if not self.is_virt_nics:
+ if cpu_util > maximal_cpu:
+ self.fail("CPU is too high (%s%%), probably queue full." % cpu_util )
+ #if cpu_util < minimal_cpu:
+ # self.fail("CPU is too low (%s%%), can't verify performance in such low CPU%%." % cpu_util )
+
+ print("TRex CPU utilization: %g%%, norm_cpu is : %g Gb/core" % (round(cpu_util, 2), round(test_norm_cpu, 2)))
+
+ if not expected_norm_cpu:
+ expected_norm_cpu = 1
+
+ calc_error_precent = abs(100.0 * test_norm_cpu / expected_norm_cpu - 100)
+ print('Err percent: %s' % calc_error_precent)
+ #if calc_error_precent > err and cpu_util > 10:
+ # self.fail('Excepted bw_per_core ratio: %s, got: %g' % (expected_norm_cpu, round(test_norm_cpu)))
+
+ # report benchmarks
+ if self.GAManager:
+ try:
+ setup_test = '%s.%s' % (CTRexScenario.setup_name, self.get_name())
+ self.GAManager.gaAddAction(Event = 'stateful_test', action = setup_test, label = 'bw_per_core', value = int(test_norm_cpu))
+ self.GAManager.gaAddAction(Event = 'stateful_test', action = setup_test, label = 'bw_per_core_exp', value = int(expected_norm_cpu))
+ self.GAManager.emptyAndReportQ()
+ except Exception as e:
+ print('Sending GA failed: %s' % e)
def check_results_gt (self, res, name, val):
if res is None:
diff --git a/scripts/automation/regression/stateful_tests/trex_nat_test.py b/scripts/automation/regression/stateful_tests/trex_nat_test.py
index 6e030ffe..c23f67c4 100755
--- a/scripts/automation/regression/stateful_tests/trex_nat_test.py
+++ b/scripts/automation/regression/stateful_tests/trex_nat_test.py
@@ -93,6 +93,9 @@ class CTRexNat_Test(CTRexGeneral_Test):#(unittest.TestCase):
def test_nat_simple_mode2(self):
self.nat_simple_helper(learn_mode=2)
+ def test_nat_simple_mode3(self):
+ self.nat_simple_helper(learn_mode=3)
+
def nat_simple_helper(self, learn_mode=1):
# test initializtion
self.router.configure_basic_interfaces()
diff --git a/scripts/automation/regression/stateful_tests/trex_rx_test.py b/scripts/automation/regression/stateful_tests/trex_rx_test.py
index 40528d16..c08ad1ea 100755
--- a/scripts/automation/regression/stateful_tests/trex_rx_test.py
+++ b/scripts/automation/regression/stateful_tests/trex_rx_test.py
@@ -250,7 +250,7 @@ class CTRexRx_Test(CTRexGeneral_Test):
print('Run until finish, expect errors')
old_errors = copy.deepcopy(self.fail_reasons)
- nat_dict = self.get_benchmark_param('nat_dict', test_name = 'test_nat_simple')
+ nat_dict = self.get_benchmark_param('nat_dict', test_name = 'test_nat_simple_mode1')
nat_obj = CNatConfig(nat_dict)
self.router.config_nat(nat_obj)
self.router.config_zbf()
diff --git a/scripts/automation/regression/stateless_tests/stl_examples_test.py b/scripts/automation/regression/stateless_tests/stl_examples_test.py
index d8b85dfc..71fc3287 100755
--- a/scripts/automation/regression/stateless_tests/stl_examples_test.py
+++ b/scripts/automation/regression/stateless_tests/stl_examples_test.py
@@ -10,14 +10,12 @@ class STLExamples_Test(CStlGeneral_Test):
def explicitSetUp(self):
# examples connect by their own
if self.is_connected():
- self.recover_after_trex_210_issue()
CTRexScenario.stl_trex.disconnect()
def explicitTearDown(self):
# connect back at end of tests
if not self.is_connected():
self.stl_trex.connect()
- self.recover_after_trex_210_issue()
def test_stl_examples(self):
examples_dir = '../trex_control_plane/stl/examples'
diff --git a/scripts/automation/regression/stateless_tests/stl_general_test.py b/scripts/automation/regression/stateless_tests/stl_general_test.py
index 82738f96..5ae2b326 100644
--- a/scripts/automation/regression/stateless_tests/stl_general_test.py
+++ b/scripts/automation/regression/stateless_tests/stl_general_test.py
@@ -6,10 +6,6 @@ from trex_stl_lib.api import *
import time
from nose.tools import nottest
-def setUpModule():
- if CTRexScenario.stl_trex.is_connected():
- CStlGeneral_Test.recover_after_trex_210_issue()
-
class CStlGeneral_Test(CTRexGeneral_Test):
"""This class defines the general stateless testcase of the TRex traffic generator"""
@@ -20,21 +16,6 @@ class CStlGeneral_Test(CTRexGeneral_Test):
if CTRexScenario.stl_init_error:
self.skip(CTRexScenario.stl_init_error)
- # workaround of http://trex-tgn.cisco.com/youtrack/issue/trex-210
- @staticmethod
- def recover_after_trex_210_issue():
- return
- for i in range(20):
- try:
- stl_map_ports(CTRexScenario.stl_trex)
- break
- except:
- CTRexScenario.stl_trex.disconnect()
- time.sleep(0.5)
- CTRexScenario.stl_trex.connect()
- # verify problem is solved
- stl_map_ports(CTRexScenario.stl_trex)
-
def connect(self, timeout = 100):
# need delay and check only because TRex process might be still starting
sys.stdout.write('Connecting')
@@ -85,18 +66,22 @@ class STLBasic_Test(CStlGeneral_Test):
@nottest
def test_connectivity(self):
if not self.is_loopback:
- if CTRexScenario.router_cfg['forceImageReload']:
- CTRexScenario.router.load_clean_config()
- CTRexScenario.router.configure_basic_interfaces()
- CTRexScenario.router.config_pbr(mode = "config")
-
- err = 'Client could not connect'
- CTRexScenario.stl_init_error = err
+ try:
+ if CTRexScenario.router_cfg['forceImageReload']:
+ CTRexScenario.router.load_clean_config()
+ CTRexScenario.router.configure_basic_interfaces()
+ CTRexScenario.router.config_pbr(mode = "config")
+ except Exception as e:
+ CTRexScenario.stl_init_error = 'Could not configure device, err: %s' % e
+ self.fail(CTRexScenario.stl_init_error)
+ return
if not self.connect():
- self.fail(err)
- err = 'Client could not map ports'
- CTRexScenario.stl_init_error = err
+ CTRexScenario.stl_init_error = 'Client could not connect'
+ self.fail(CTRexScenario.stl_init_error)
+ return
+ print('Connected')
if not self.map_ports():
- self.fail(err)
+ CTRexScenario.stl_init_error = 'Client could not map ports'
+ self.fail(CTRexScenario.stl_init_error)
+ return
print('Got ports mapping: %s' % CTRexScenario.stl_ports_map)
- CTRexScenario.stl_init_error = None
diff --git a/scripts/automation/regression/stateless_tests/stl_rx_test.py b/scripts/automation/regression/stateless_tests/stl_rx_test.py
index 238ff53d..090261ff 100644
--- a/scripts/automation/regression/stateless_tests/stl_rx_test.py
+++ b/scripts/automation/regression/stateless_tests/stl_rx_test.py
@@ -9,7 +9,7 @@ class STLRX_Test(CStlGeneral_Test):
"""Tests for RX feature"""
def setUp(self):
- per_driver_params = {"rte_vmxnet3_pmd": [1, 50, 1,False], "rte_ixgbe_pmd": [30, 5000, 1,True,200,400], "rte_i40e_pmd": [80, 5000, 1,True,100,250],
+ per_driver_params = {"rte_vmxnet3_pmd": [1, 50, 1,False], "rte_ixgbe_pmd": [30, 1000, 1,True,300,400], "rte_i40e_pmd": [80, 1000, 1,True,100,250],
"rte_igb_pmd": [80, 500, 1,False], "rte_em_pmd": [1, 50, 1,False], "rte_virtio_pmd": [1, 50, 1,False]}
CStlGeneral_Test.setUp(self)
@@ -35,6 +35,7 @@ class STLRX_Test(CStlGeneral_Test):
self.rate_lat = per_driver_params[drv_name][2]
else:
self.rate_lat = self.rate_percent
+ self.lat_pps = 1000
self.drops_expected = False
self.c.reset(ports = [self.tx_port, self.rx_port])
@@ -252,15 +253,16 @@ class STLRX_Test(CStlGeneral_Test):
for data in streams_data:
if data['lat']:
flow_stats = STLFlowLatencyStats(pg_id = 5)
+ mode = STLTXSingleBurst(total_pkts = total_pkts, percentage = self.rate_percent)
else:
flow_stats = STLFlowStats(pg_id = 5)
+ mode = STLTXSingleBurst(total_pkts = total_pkts, pps = self.lat_pps)
s = STLStream(name = data['name'],
packet = data['pkt'],
flow_stats = flow_stats,
- mode = STLTXSingleBurst(total_pkts = total_pkts,
- percentage = self.rate_percent
- ))
+ mode = mode
+ )
streams.append(s)
print("\ninjecting {0} packets on port {1}".format(total_pkts, self.tx_port))
diff --git a/scripts/automation/regression/stateless_tests/trex_client_pkg_test.py b/scripts/automation/regression/stateless_tests/trex_client_pkg_test.py
index 905882fe..14ef36f7 100755
--- a/scripts/automation/regression/stateless_tests/trex_client_pkg_test.py
+++ b/scripts/automation/regression/stateless_tests/trex_client_pkg_test.py
@@ -3,9 +3,6 @@ from .stl_general_test import CStlGeneral_Test, CTRexScenario
from misc_methods import run_command
from nose.plugins.attrib import attr
-def setUpModule():
- CStlGeneral_Test.unzip_client_package()
-
@attr('client_package')
class CTRexClientPKG_Test(CStlGeneral_Test):
"""This class tests TRex client package"""
@@ -14,14 +11,13 @@ class CTRexClientPKG_Test(CStlGeneral_Test):
CStlGeneral_Test.setUp(self)
# examples connect by their own
if CTRexScenario.stl_trex.is_connected():
- self.recover_after_trex_210_issue()
CTRexScenario.stl_trex.disconnect()
+ CStlGeneral_Test.unzip_client_package()
def tearDown(self):
# connect back at end of tests
if not CTRexScenario.stl_trex.is_connected():
CTRexScenario.stl_trex.connect()
- self.recover_after_trex_210_issue()
CStlGeneral_Test.tearDown(self)
def run_client_package_stl_example(self, python_version):
diff --git a/scripts/automation/regression/trex.py b/scripts/automation/regression/trex.py
index 44f2faba..aad8f041 100644
--- a/scripts/automation/regression/trex.py
+++ b/scripts/automation/regression/trex.py
@@ -38,8 +38,8 @@ class CTRexScenario:
is_copied = False
GAManager = None
no_daemon = False
- router_image = None
debug_image = False
+ test = None
class CTRexRunner:
"""This is an instance for generating a CTRexRunner"""
diff --git a/scripts/automation/regression/trex_unit_test.py b/scripts/automation/regression/trex_unit_test.py
index 915cd682..4f13a50f 100755
--- a/scripts/automation/regression/trex_unit_test.py
+++ b/scripts/automation/regression/trex_unit_test.py
@@ -30,7 +30,7 @@ import outer_packages
import nose
from nose.plugins import Plugin
-import logging
+from nose.selector import Selector
import CustomLogger
import misc_methods
from rednose import RedNose
@@ -43,11 +43,24 @@ from trex_stl_lib.utils.GAObjClass import GAmanager
import trex
import socket
from pprint import pprint
-import subprocess
-import re
import time
from distutils.dir_util import mkpath
+# nose overrides
+
+# option to select wanted test by name without file, class etc.
+def new_Selector_wantMethod(self, method, orig_Selector_wantMethod = Selector.wantMethod):
+ result = orig_Selector_wantMethod(self, method)
+ return result and (not CTRexScenario.test or CTRexScenario.test in getattr(method, '__name__', ''))
+
+Selector.wantMethod = new_Selector_wantMethod
+
+def new_Selector_wantFunction(self, function, orig_Selector_wantFunction = Selector.wantFunction):
+ result = orig_Selector_wantFunction(self, function)
+ return result and (not CTRexScenario.test or CTRexScenario.test in getattr(function, '__name__', ''))
+
+Selector.wantFunction = new_Selector_wantFunction
+
# override nose's strange representation of setUpClass errors
def __suite_repr__(self):
if hasattr(self.context, '__module__'): # inside class, setUpClass etc.
@@ -59,6 +72,8 @@ def __suite_repr__(self):
nose.suite.ContextSuite.__repr__ = __suite_repr__
nose.suite.ContextSuite.__str__ = __suite_repr__
+# /nose overrides
+
def check_trex_path(trex_path):
if os.path.isfile('%s/trex_daemon_server' % trex_path):
return os.path.abspath(trex_path)
@@ -78,7 +93,7 @@ def get_trex_path():
def address_to_ip(address):
- for i in range(10):
+ for i in range(5):
try:
return socket.gethostbyname(address)
except:
@@ -149,6 +164,8 @@ class CTRexTestConfiguringPlugin(Plugin):
parser.add_option('--trex-args', action='store', default = '',
dest="trex_args",
help="Additional TRex arguments (--no-watchdog etc.).")
+ parser.add_option('-t', '--test', action='store', default = '', dest='test',
+ help='Test name to run (without file, class etc.)')
def configure(self, options, conf):
@@ -160,6 +177,7 @@ class CTRexTestConfiguringPlugin(Plugin):
self.json_verbose = options.json_verbose
self.telnet_verbose = options.telnet_verbose
self.no_daemon = options.no_daemon
+ CTRexScenario.test = options.test
if self.collect_only or self.functional:
return
if CTRexScenario.setup_dir and options.config_path:
@@ -181,7 +199,7 @@ class CTRexTestConfiguringPlugin(Plugin):
self.loggerPath = options.log_path
# initialize CTRexScenario global testing class, to be used by all tests
CTRexScenario.configuration = self.configuration
- CTRexScenario.no_daemon = self.no_daemon
+ CTRexScenario.no_daemon = options.no_daemon
CTRexScenario.benchmark = self.benchmark
CTRexScenario.modes = set(self.modes)
CTRexScenario.server_logs = self.server_logs
@@ -347,15 +365,14 @@ if __name__ == "__main__":
nose_argv += sys_args
- config_plugin = CTRexTestConfiguringPlugin()
- red_nose = RedNose()
+ addplugins = [RedNose(), CTRexTestConfiguringPlugin()]
result = True
try:
if len(CTRexScenario.test_types['functional_tests']):
additional_args = ['--func'] + CTRexScenario.test_types['functional_tests']
if xml_arg:
additional_args += ['--with-xunit', xml_arg.replace('.xml', '_functional.xml')]
- result = nose.run(argv = nose_argv + additional_args, addplugins = [red_nose, config_plugin])
+ result = nose.run(argv = nose_argv + additional_args, addplugins = addplugins)
if len(CTRexScenario.test_types['stateful_tests']):
additional_args = ['--stf']
if '--warmup' in sys.argv:
@@ -365,14 +382,14 @@ if __name__ == "__main__":
additional_args.extend(['-a', '!client_package'])
if xml_arg:
additional_args += ['--with-xunit', xml_arg.replace('.xml', '_stateful.xml')]
- result = nose.run(argv = nose_argv + additional_args, addplugins = [red_nose, config_plugin]) and result
+ result = nose.run(argv = nose_argv + additional_args, addplugins = addplugins) and result
if len(CTRexScenario.test_types['stateless_tests']):
additional_args = ['--stl', 'stateless_tests/stl_general_test.py:STLBasic_Test.test_connectivity'] + CTRexScenario.test_types['stateless_tests']
if not test_client_package:
additional_args.extend(['-a', '!client_package'])
if xml_arg:
additional_args += ['--with-xunit', xml_arg.replace('.xml', '_stateless.xml')]
- result = nose.run(argv = nose_argv + additional_args, addplugins = [red_nose, config_plugin]) and result
+ result = nose.run(argv = nose_argv + additional_args, addplugins = addplugins) and result
#except Exception as e:
# result = False
# print(e)