summaryrefslogtreecommitdiffstats
path: root/scripts/automation
diff options
context:
space:
mode:
Diffstat (limited to 'scripts/automation')
-rwxr-xr-xscripts/automation/regression/CPlatform.py7
-rw-r--r--scripts/automation/regression/functional_tests/stl_basic_tests.py6
-rw-r--r--scripts/automation/regression/setups/kiwi02/benchmark.yaml2
-rw-r--r--scripts/automation/regression/setups/trex-dan/benchmark.yaml2
-rw-r--r--scripts/automation/regression/setups/trex07/benchmark.yaml374
-rw-r--r--scripts/automation/regression/setups/trex14/benchmark.yaml12
-rw-r--r--scripts/automation/regression/setups/trex25/benchmark.yaml2
-rwxr-xr-xscripts/automation/regression/stateful_tests/trex_general_test.py71
-rwxr-xr-xscripts/automation/regression/stateful_tests/trex_nat_test.py3
-rwxr-xr-xscripts/automation/regression/stateful_tests/trex_rx_test.py2
-rwxr-xr-xscripts/automation/regression/stateless_tests/stl_examples_test.py2
-rw-r--r--scripts/automation/regression/stateless_tests/stl_general_test.py47
-rw-r--r--scripts/automation/regression/stateless_tests/stl_rx_test.py10
-rwxr-xr-xscripts/automation/regression/stateless_tests/trex_client_pkg_test.py6
-rw-r--r--scripts/automation/regression/trex.py2
-rwxr-xr-xscripts/automation/regression/trex_unit_test.py37
-rwxr-xr-xscripts/automation/trex_control_plane/stf/trex_stf_lib/trex_client.py126
-rwxr-xr-xscripts/automation/trex_control_plane/stl/console/trex_console.py9
-rw-r--r--scripts/automation/trex_control_plane/stl/console/trex_tui.py505
-rw-r--r--scripts/automation/trex_control_plane/stl/examples/stl_flow_latency_stats.py25
-rw-r--r--scripts/automation/trex_control_plane/stl/examples/stl_flow_stats.py45
-rw-r--r--scripts/automation/trex_control_plane/stl/examples/stl_imix.py2
-rwxr-xr-xscripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_client.py154
-rw-r--r--scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_stats.py129
-rwxr-xr-xscripts/automation/trex_control_plane/stl/trex_stl_lib/utils/parsing_opts.py20
25 files changed, 1172 insertions, 428 deletions
diff --git a/scripts/automation/regression/CPlatform.py b/scripts/automation/regression/CPlatform.py
index de1c22ce..dc5418cb 100755
--- a/scripts/automation/regression/CPlatform.py
+++ b/scripts/automation/regression/CPlatform.py
@@ -20,7 +20,7 @@ class CPlatform(object):
self.tftp_cfg = None
self.config_history = { 'basic_if_config' : False, 'tftp_server_config' : False }
- def configure_basic_interfaces(self, mtu = 4000):
+ def configure_basic_interfaces(self, mtu = 9050):
cache = CCommandCache()
for dual_if in self.if_mngr.get_dual_if_list():
@@ -46,7 +46,7 @@ class CPlatform(object):
- def configure_basic_filtered_interfaces(self, intf_list, mtu = 4000):
+ def configure_basic_filtered_interfaces(self, intf_list, mtu = 9050):
cache = CCommandCache()
for intf in intf_list:
@@ -70,11 +70,10 @@ class CPlatform(object):
res = self.cmd_link.run_single_command(cache)
if 'Rollback Done' not in res:
print('Failed to load clean config, trying again')
+ time.sleep(2)
if i < 4:
continue
raise Exception('Could not load clean config, response: %s' % res)
- if i > 0: # were errors, better to wait
- time.sleep(2)
def config_pbr (self, mode = 'config'):
idx = 1
diff --git a/scripts/automation/regression/functional_tests/stl_basic_tests.py b/scripts/automation/regression/functional_tests/stl_basic_tests.py
index a4e28ca9..e03c0742 100644
--- a/scripts/automation/regression/functional_tests/stl_basic_tests.py
+++ b/scripts/automation/regression/functional_tests/stl_basic_tests.py
@@ -85,7 +85,7 @@ class CStlBasic_Test(functional_general_test.CGeneralFunctional_Test):
pkts1 = list(RawPcapReader(output))
pkts2 = list(RawPcapReader(golden))
- assert_equal(len(pkts1), len(pkts2))
+ assert_equal(len(pkts1), len(pkts2), 'Lengths of generated pcap (%s) and golden (%s) are different' % (output, golden))
for pkt1, pkt2, i in zip(pkts1, pkts2, range(1, len(pkts1))):
ts1 = float(pkt1[1][0]) + (float(pkt1[1][1]) / 1e6)
@@ -143,7 +143,7 @@ class CStlBasic_Test(functional_general_test.CGeneralFunctional_Test):
os.unlink(output_cap)
try:
rc = self.run_sim(input_file, output_cap, options, silent)
- assert_equal(rc, True)
+ assert_equal(rc, True, 'Simulation on profile %s failed.' % profile)
#s='cp '+output_cap+' '+golden_file;
#print s
#os.system(s)
@@ -165,7 +165,7 @@ class CStlBasic_Test(functional_general_test.CGeneralFunctional_Test):
profile.dump_to_code(generated_filename)
rc = self.run_sim(generated_filename, output_cap, options, silent)
- assert_equal(rc, True)
+ assert_equal(rc, True, 'Simulation on profile %s (generated) failed.' % profile)
if compare:
self.compare_caps(output_cap, golden_file)
diff --git a/scripts/automation/regression/setups/kiwi02/benchmark.yaml b/scripts/automation/regression/setups/kiwi02/benchmark.yaml
index 3332aa5e..e6621085 100644
--- a/scripts/automation/regression/setups/kiwi02/benchmark.yaml
+++ b/scripts/automation/regression/setups/kiwi02/benchmark.yaml
@@ -69,6 +69,8 @@ test_nat_simple_mode1: &test_nat_simple
test_nat_simple_mode2: *test_nat_simple
+test_nat_simple_mode3: *test_nat_simple
+
test_nat_learning:
<< : *test_nat_simple
nat_opened : 100000
diff --git a/scripts/automation/regression/setups/trex-dan/benchmark.yaml b/scripts/automation/regression/setups/trex-dan/benchmark.yaml
index e9f12c45..de56089b 100644
--- a/scripts/automation/regression/setups/trex-dan/benchmark.yaml
+++ b/scripts/automation/regression/setups/trex-dan/benchmark.yaml
@@ -69,6 +69,8 @@ test_nat_simple_mode1: &test_nat_simple
test_nat_simple_mode2: *test_nat_simple
+test_nat_simple_mode3: *test_nat_simple
+
test_nat_learning:
<< : *test_nat_simple
bw_per_core : 7.377
diff --git a/scripts/automation/regression/setups/trex07/benchmark.yaml b/scripts/automation/regression/setups/trex07/benchmark.yaml
index 4778de91..0dc340b0 100644
--- a/scripts/automation/regression/setups/trex07/benchmark.yaml
+++ b/scripts/automation/regression/setups/trex07/benchmark.yaml
@@ -2,169 +2,243 @@
#### TRex benchmark configuration file ####
###############################################################
-test_nbar_simple :
- multiplier : 7.5
- cores : 2
- exp_gbps : 3.5
- cpu_to_core_ratio : 20800000
- cpu2core_custom_dev: YES
- cpu2core_dev : 0.07
- exp_max_latency : 1000
-
- nbar_classification:
- rtp : 32.57
- http : 30.25
- oracle-sqlnet : 11.23
- exchange : 10.80
- citrix : 5.62
- rtsp : 2.84
- dns : 1.95
- smtp : 0.57
- pop3 : 0.36
- ssl : 0.17
- sctp : 0.13
- sip : 0.09
- unknown : 3.41
-
-test_rx_check :
- multiplier : 13
- cores : 3
- rx_sample_rate : 128
- exp_gbps : 6
- cpu_to_core_ratio : 37270000
- exp_bw : 13
- exp_latency : 1
-
-test_nat_simple : &test_nat_simple
- stat_route_dict :
+#### common templates ###
+
+stat_route_dict: &stat_route_dict
clients_start : 16.0.0.1
servers_start : 48.0.0.1
dual_port_mask : 1.0.0.0
client_destination_mask : 255.0.0.0
server_destination_mask : 255.0.0.0
- nat_dict :
+
+nat_dict: &nat_dict
clients_net_start : 16.0.0.0
client_acl_wildcard_mask : 0.0.0.255
dual_port_mask : 1.0.0.0
pool_start : 200.0.0.0
pool_netmask : 255.255.255.0
- multiplier : 12000
- cores : 1
- cpu_to_core_ratio : 37270000
- exp_bw : 1
- exp_latency : 1
- allow_timeout_dev : YES
-
-test_nat_simple_mode1 : *test_nat_simple
-test_nat_simple_mode2 : *test_nat_simple
-
-test_nat_learning :
- stat_route_dict :
- clients_start : 16.0.0.1
- servers_start : 48.0.0.1
- dual_port_mask : 1.0.0.0
- client_destination_mask : 255.0.0.0
- server_destination_mask : 255.0.0.0
- multiplier : 12000
- cores : 1
- nat_opened : 40000
- cpu_to_core_ratio : 270
- exp_bw : 8
- exp_latency : 1
- allow_timeout_dev : YES
-
-test_routing_imix_64 :
- multiplier : 430
- cores : 1
- cpu_to_core_ratio : 280
- exp_latency : 1
-
-test_routing_imix :
- multiplier : 10
- cores : 1
- cpu_to_core_ratio : 1800
- exp_latency : 1
-
-test_static_routing_imix :
- stat_route_dict :
- clients_start : 16.0.0.1
- servers_start : 48.0.0.1
- dual_port_mask : 1.0.0.0
- client_destination_mask : 255.0.0.0
- server_destination_mask : 255.0.0.0
- multiplier : 8
- cores : 1
- cpu_to_core_ratio : 1800
- exp_latency : 1
-test_static_routing_imix_asymmetric:
- stat_route_dict :
- clients_start : 16.0.0.1
- servers_start : 48.0.0.1
- dual_port_mask : 1.0.0.0
- client_destination_mask : 255.0.0.0
- server_destination_mask : 255.0.0.0
- multiplier : 8
- cores : 1
- cpu_to_core_ratio : 1800
- exp_latency : 1
-
-test_ipv6_simple :
- multiplier : 9
- cores : 2
- cpu_to_core_ratio : 30070000
- cpu2core_custom_dev: YES
- cpu2core_dev : 0.07
-
-
-test_rx_check_sfr:
- multiplier : 10
- cores : 2
- rx_sample_rate : 16
- # allow 0.03% errors, bad router
- error_tolerance : 0.03
-
-test_rx_check_http:
- multiplier : 15000
- cores : 1
- rx_sample_rate : 16
- # allow 0.03% errors, bad routerifconfig
- error_tolerance : 0.03
-test_rx_check_sfr_ipv6:
- multiplier : 10
- cores : 2
- rx_sample_rate : 16
- # allow 0.03% errors, bad router
- error_tolerance : 0.03
+### stateful ###
+
+test_jumbo:
+ multiplier : 17
+ cores : 1
+ bw_per_core : 543.232
+
+
+test_routing_imix:
+ multiplier : 10
+ cores : 1
+ bw_per_core : 34.128
+
+
+test_routing_imix_64:
+ multiplier : 430
+ cores : 1
+ bw_per_core : 5.893
+
+
+test_static_routing_imix: &test_static_routing_imix
+ stat_route_dict : *stat_route_dict
+ multiplier : 8
+ cores : 1
+ bw_per_core : 34.339
+
+test_static_routing_imix_asymmetric: *test_static_routing_imix
+
+
+test_ipv6_simple:
+ multiplier : 9
+ cores : 2
+ bw_per_core : 19.064
+
+
+test_nat_simple_mode1: &test_nat_simple
+ stat_route_dict : *stat_route_dict
+ nat_dict : *nat_dict
+ multiplier : 6000
+ cores : 1
+ nat_opened : 500000
+ allow_timeout_dev : True
+ bw_per_core : 44.445
+
+test_nat_simple_mode2: *test_nat_simple
+
+test_nat_simple_mode3: *test_nat_simple
+
+test_nat_learning: *test_nat_simple
+
+
+test_nbar_simple:
+ multiplier : 7.5
+ cores : 2
+ bw_per_core : 17.174
+ nbar_classification:
+ rtp : 32.57
+ http : 30.25
+ oracle_sqlnet : 11.23
+ exchange : 10.80
+ citrix : 5.62
+ rtsp : 2.84
+ dns : 1.95
+ smtp : 0.57
+ pop3 : 0.36
+ ssl : 0.17
+ sctp : 0.13
+ sip : 0.09
+ unknown : 3.41
+
+
+test_rx_check_http: &rx_http
+ multiplier : 15000
+ cores : 1
+ rx_sample_rate : 16
+ bw_per_core : 39.560
test_rx_check_http_ipv6:
- multiplier : 15000
- cores : 1
- rx_sample_rate : 16
- # allow 0.03% errors, bad router
- error_tolerance : 0.03
-
-test_rx_check_http_negative:
- multiplier : 13000
- cores : 1
- rx_sample_rate : 16
- # allow 0.03% errors, bad router
- error_tolerance : 0.03
- stat_route_dict :
- clients_start : 16.0.0.1
- servers_start : 48.0.0.1
- dual_port_mask : 1.0.0.0
- client_destination_mask : 255.0.0.0
- server_destination_mask : 255.0.0.0
- nat_dict :
- clients_net_start : 16.0.0.0
- client_acl_wildcard_mask : 0.0.0.255
- dual_port_mask : 1.0.0.0
- pool_start : 200.0.0.0
- pool_netmask : 255.255.255.0
+ << : *rx_http
+ bw_per_core : 49.237
+
+test_rx_check_http_negative_disabled:
+ << : *rx_http
+ stat_route_dict : *stat_route_dict
+ nat_dict : *nat_dict
+
+
+test_rx_check_sfr: &rx_sfr
+ multiplier : 10
+ cores : 3
+ rx_sample_rate : 16
+ bw_per_core : 16.082
+
+test_rx_check_sfr_ipv6:
+ << : *rx_sfr
+ bw_per_core : 19.198
+
+
+
+### stateless ###
+
+test_CPU_benchmark:
+ profiles:
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 64}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 64, stream_count: 10}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 64, stream_count: 100}
+ cpu_util : 1
+ bw_per_core : 1
+
+# causes queue full
+# - name : stl/udp_for_benchmarks.py
+# kwargs : {packet_len: 64, stream_count: 1000}
+# cpu_util : 1
+# bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 128}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 256}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 512}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 1500}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 4000}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 9000}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 9000, stream_count: 10}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 9000, stream_count: 100}
+ cpu_util : 1
+ bw_per_core : 1
+
+# not enough memory + queue full if memory increase
+# - name : stl/udp_for_benchmarks.py
+# kwargs : {packet_len: 9000, stream_count: 1000}
+# cpu_util : 1
+# bw_per_core : 1
+
+ - name : stl/imix.py
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_1pkt_tuple_gen.py
+ kwargs : {packet_len: 64}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_1pkt_tuple_gen.py
+ kwargs : {packet_len: 128}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_1pkt_tuple_gen.py
+ kwargs : {packet_len: 256}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_1pkt_tuple_gen.py
+ kwargs : {packet_len: 512}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_1pkt_tuple_gen.py
+ kwargs : {packet_len: 1500}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_1pkt_tuple_gen.py
+ kwargs : {packet_len: 4000}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_1pkt_tuple_gen.py
+ kwargs : {packet_len: 9000}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/pcap.py
+ kwargs : {ipg_usec: 2, loop_count: 0}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_rand_len_9k.py
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/hlt/hlt_udp_rand_len_9k.py
+ cpu_util : 1
+ bw_per_core : 1
-test_jumbo:
- multiplier : 17
- cores : 1
diff --git a/scripts/automation/regression/setups/trex14/benchmark.yaml b/scripts/automation/regression/setups/trex14/benchmark.yaml
index aa4ac2d4..04f13e79 100644
--- a/scripts/automation/regression/setups/trex14/benchmark.yaml
+++ b/scripts/automation/regression/setups/trex14/benchmark.yaml
@@ -57,17 +57,17 @@ test_ipv6_simple:
test_nat_simple_mode1: &test_nat_simple
stat_route_dict : *stat_route_dict
nat_dict : *nat_dict
- multiplier : 12000
+ multiplier : 6000
cores : 1
- nat_opened : 40000
+ nat_opened : 500000
allow_timeout_dev : True
bw_per_core : 44.445
test_nat_simple_mode2: *test_nat_simple
-test_nat_learning:
- << : *test_nat_simple
- nat_opened : 40000
+test_nat_simple_mode3: *test_nat_simple
+
+test_nat_learning: *test_nat_simple
test_nbar_simple:
@@ -101,7 +101,7 @@ test_rx_check_http_ipv6:
<< : *rx_http
bw_per_core : 49.237
-test_rx_check_http_negative:
+test_rx_check_http_negative_disabled:
<< : *rx_http
stat_route_dict : *stat_route_dict
nat_dict : *nat_dict
diff --git a/scripts/automation/regression/setups/trex25/benchmark.yaml b/scripts/automation/regression/setups/trex25/benchmark.yaml
index 19fab1fe..ccbdf6f5 100644
--- a/scripts/automation/regression/setups/trex25/benchmark.yaml
+++ b/scripts/automation/regression/setups/trex25/benchmark.yaml
@@ -70,6 +70,8 @@ test_nat_simple_mode1: &test_nat_simple
test_nat_simple_mode2: *test_nat_simple
+test_nat_simple_mode3: *test_nat_simple
+
test_nat_learning:
<< : *test_nat_simple
nat_opened : 40000
diff --git a/scripts/automation/regression/stateful_tests/trex_general_test.py b/scripts/automation/regression/stateful_tests/trex_general_test.py
index 82b1d9d1..8ff4fdaf 100755
--- a/scripts/automation/regression/stateful_tests/trex_general_test.py
+++ b/scripts/automation/regression/stateful_tests/trex_general_test.py
@@ -139,41 +139,42 @@ class CTRexGeneral_Test(unittest.TestCase):
if res[name] != float(val):
self.fail('TRex results[%s]==%f and not as expected %f ' % (name, res[name], val))
- def check_CPU_benchmark (self, trex_res, err = 25, minimal_cpu = 30, maximal_cpu = 85):
- #cpu_util = float(trex_res.get_last_value("trex-global.data.m_cpu_util"))
- cpu_util = sum(trex_res.get_value_list("trex-global.data.m_cpu_util")[-4:-1]) / 3.0 # mean of 3 values before last
-
- if '1G' in self.modes:
- minimal_cpu /= 10.0
-
- if not self.is_virt_nics:
- if cpu_util > maximal_cpu:
- self.fail("CPU is too high (%s%%), probably queue full." % cpu_util )
- #if cpu_util < minimal_cpu:
- # self.fail("CPU is too low (%s%%), can't verify performance in such low CPU%%." % cpu_util )
-
- test_norm_cpu = sum(trex_res.get_value_list("trex-global.data.m_bw_per_core")[-4:-1]) / 3.0
-
- print("TRex CPU utilization: %g%%, norm_cpu is : %g Gb/core" % (round(cpu_util, 2), round(test_norm_cpu)))
-
- expected_norm_cpu = self.get_benchmark_param('bw_per_core')
- if not expected_norm_cpu:
- expected_norm_cpu = 1
-
- calc_error_precent = abs(100.0 * test_norm_cpu / expected_norm_cpu - 100)
- print('Err percent: %s' % calc_error_precent)
- #if calc_error_precent > err and cpu_util > 10:
- # self.fail('Excepted bw_per_core ratio: %s, got: %g' % (expected_norm_cpu, round(test_norm_cpu)))
-
- # report benchmarks
- if self.GAManager:
- try:
- setup_test = '%s.%s' % (CTRexScenario.setup_name, self.get_name())
- self.GAManager.gaAddAction(Event = 'stateful_test', action = setup_test, label = 'bw_per_core', value = int(test_norm_cpu))
- self.GAManager.gaAddAction(Event = 'stateful_test', action = setup_test, label = 'bw_per_core_exp', value = int(expected_norm_cpu))
- self.GAManager.emptyAndReportQ()
- except Exception as e:
- print('Sending GA failed: %s' % e)
+ def check_CPU_benchmark (self, trex_res, err = 25, minimal_cpu = 10, maximal_cpu = 85):
+ cpu_util = trex_res.get_avg_steady_state_value('trex-global.data.m_cpu_util_raw')
+ trex_tx_bps = trex_res.get_avg_steady_state_value('trex-global.data.m_tx_bps')
+ expected_norm_cpu = self.get_benchmark_param('bw_per_core')
+ cores = self.get_benchmark_param('cores')
+ ports_count = trex_res.get_ports_count()
+ test_norm_cpu = trex_tx_bps / (cpu_util * ports_count * cores * 2.5e6)
+
+ if '1G' in self.modes:
+ minimal_cpu /= 10.0
+
+ if not self.is_virt_nics:
+ if cpu_util > maximal_cpu:
+ self.fail("CPU is too high (%s%%), probably queue full." % cpu_util )
+ #if cpu_util < minimal_cpu:
+ # self.fail("CPU is too low (%s%%), can't verify performance in such low CPU%%." % cpu_util )
+
+ print("TRex CPU utilization: %g%%, norm_cpu is : %g Gb/core" % (round(cpu_util, 2), round(test_norm_cpu, 2)))
+
+ if not expected_norm_cpu:
+ expected_norm_cpu = 1
+
+ calc_error_precent = abs(100.0 * test_norm_cpu / expected_norm_cpu - 100)
+ print('Err percent: %s' % calc_error_precent)
+ #if calc_error_precent > err and cpu_util > 10:
+ # self.fail('Excepted bw_per_core ratio: %s, got: %g' % (expected_norm_cpu, round(test_norm_cpu)))
+
+ # report benchmarks
+ if self.GAManager:
+ try:
+ setup_test = '%s.%s' % (CTRexScenario.setup_name, self.get_name())
+ self.GAManager.gaAddAction(Event = 'stateful_test', action = setup_test, label = 'bw_per_core', value = int(test_norm_cpu))
+ self.GAManager.gaAddAction(Event = 'stateful_test', action = setup_test, label = 'bw_per_core_exp', value = int(expected_norm_cpu))
+ self.GAManager.emptyAndReportQ()
+ except Exception as e:
+ print('Sending GA failed: %s' % e)
def check_results_gt (self, res, name, val):
if res is None:
diff --git a/scripts/automation/regression/stateful_tests/trex_nat_test.py b/scripts/automation/regression/stateful_tests/trex_nat_test.py
index 6e030ffe..c23f67c4 100755
--- a/scripts/automation/regression/stateful_tests/trex_nat_test.py
+++ b/scripts/automation/regression/stateful_tests/trex_nat_test.py
@@ -93,6 +93,9 @@ class CTRexNat_Test(CTRexGeneral_Test):#(unittest.TestCase):
def test_nat_simple_mode2(self):
self.nat_simple_helper(learn_mode=2)
+ def test_nat_simple_mode3(self):
+ self.nat_simple_helper(learn_mode=3)
+
def nat_simple_helper(self, learn_mode=1):
# test initializtion
self.router.configure_basic_interfaces()
diff --git a/scripts/automation/regression/stateful_tests/trex_rx_test.py b/scripts/automation/regression/stateful_tests/trex_rx_test.py
index 40528d16..c08ad1ea 100755
--- a/scripts/automation/regression/stateful_tests/trex_rx_test.py
+++ b/scripts/automation/regression/stateful_tests/trex_rx_test.py
@@ -250,7 +250,7 @@ class CTRexRx_Test(CTRexGeneral_Test):
print('Run until finish, expect errors')
old_errors = copy.deepcopy(self.fail_reasons)
- nat_dict = self.get_benchmark_param('nat_dict', test_name = 'test_nat_simple')
+ nat_dict = self.get_benchmark_param('nat_dict', test_name = 'test_nat_simple_mode1')
nat_obj = CNatConfig(nat_dict)
self.router.config_nat(nat_obj)
self.router.config_zbf()
diff --git a/scripts/automation/regression/stateless_tests/stl_examples_test.py b/scripts/automation/regression/stateless_tests/stl_examples_test.py
index d8b85dfc..71fc3287 100755
--- a/scripts/automation/regression/stateless_tests/stl_examples_test.py
+++ b/scripts/automation/regression/stateless_tests/stl_examples_test.py
@@ -10,14 +10,12 @@ class STLExamples_Test(CStlGeneral_Test):
def explicitSetUp(self):
# examples connect by their own
if self.is_connected():
- self.recover_after_trex_210_issue()
CTRexScenario.stl_trex.disconnect()
def explicitTearDown(self):
# connect back at end of tests
if not self.is_connected():
self.stl_trex.connect()
- self.recover_after_trex_210_issue()
def test_stl_examples(self):
examples_dir = '../trex_control_plane/stl/examples'
diff --git a/scripts/automation/regression/stateless_tests/stl_general_test.py b/scripts/automation/regression/stateless_tests/stl_general_test.py
index 82738f96..5ae2b326 100644
--- a/scripts/automation/regression/stateless_tests/stl_general_test.py
+++ b/scripts/automation/regression/stateless_tests/stl_general_test.py
@@ -6,10 +6,6 @@ from trex_stl_lib.api import *
import time
from nose.tools import nottest
-def setUpModule():
- if CTRexScenario.stl_trex.is_connected():
- CStlGeneral_Test.recover_after_trex_210_issue()
-
class CStlGeneral_Test(CTRexGeneral_Test):
"""This class defines the general stateless testcase of the TRex traffic generator"""
@@ -20,21 +16,6 @@ class CStlGeneral_Test(CTRexGeneral_Test):
if CTRexScenario.stl_init_error:
self.skip(CTRexScenario.stl_init_error)
- # workaround of http://trex-tgn.cisco.com/youtrack/issue/trex-210
- @staticmethod
- def recover_after_trex_210_issue():
- return
- for i in range(20):
- try:
- stl_map_ports(CTRexScenario.stl_trex)
- break
- except:
- CTRexScenario.stl_trex.disconnect()
- time.sleep(0.5)
- CTRexScenario.stl_trex.connect()
- # verify problem is solved
- stl_map_ports(CTRexScenario.stl_trex)
-
def connect(self, timeout = 100):
# need delay and check only because TRex process might be still starting
sys.stdout.write('Connecting')
@@ -85,18 +66,22 @@ class STLBasic_Test(CStlGeneral_Test):
@nottest
def test_connectivity(self):
if not self.is_loopback:
- if CTRexScenario.router_cfg['forceImageReload']:
- CTRexScenario.router.load_clean_config()
- CTRexScenario.router.configure_basic_interfaces()
- CTRexScenario.router.config_pbr(mode = "config")
-
- err = 'Client could not connect'
- CTRexScenario.stl_init_error = err
+ try:
+ if CTRexScenario.router_cfg['forceImageReload']:
+ CTRexScenario.router.load_clean_config()
+ CTRexScenario.router.configure_basic_interfaces()
+ CTRexScenario.router.config_pbr(mode = "config")
+ except Exception as e:
+ CTRexScenario.stl_init_error = 'Could not configure device, err: %s' % e
+ self.fail(CTRexScenario.stl_init_error)
+ return
if not self.connect():
- self.fail(err)
- err = 'Client could not map ports'
- CTRexScenario.stl_init_error = err
+ CTRexScenario.stl_init_error = 'Client could not connect'
+ self.fail(CTRexScenario.stl_init_error)
+ return
+ print('Connected')
if not self.map_ports():
- self.fail(err)
+ CTRexScenario.stl_init_error = 'Client could not map ports'
+ self.fail(CTRexScenario.stl_init_error)
+ return
print('Got ports mapping: %s' % CTRexScenario.stl_ports_map)
- CTRexScenario.stl_init_error = None
diff --git a/scripts/automation/regression/stateless_tests/stl_rx_test.py b/scripts/automation/regression/stateless_tests/stl_rx_test.py
index 238ff53d..090261ff 100644
--- a/scripts/automation/regression/stateless_tests/stl_rx_test.py
+++ b/scripts/automation/regression/stateless_tests/stl_rx_test.py
@@ -9,7 +9,7 @@ class STLRX_Test(CStlGeneral_Test):
"""Tests for RX feature"""
def setUp(self):
- per_driver_params = {"rte_vmxnet3_pmd": [1, 50, 1,False], "rte_ixgbe_pmd": [30, 5000, 1,True,200,400], "rte_i40e_pmd": [80, 5000, 1,True,100,250],
+ per_driver_params = {"rte_vmxnet3_pmd": [1, 50, 1,False], "rte_ixgbe_pmd": [30, 1000, 1,True,300,400], "rte_i40e_pmd": [80, 1000, 1,True,100,250],
"rte_igb_pmd": [80, 500, 1,False], "rte_em_pmd": [1, 50, 1,False], "rte_virtio_pmd": [1, 50, 1,False]}
CStlGeneral_Test.setUp(self)
@@ -35,6 +35,7 @@ class STLRX_Test(CStlGeneral_Test):
self.rate_lat = per_driver_params[drv_name][2]
else:
self.rate_lat = self.rate_percent
+ self.lat_pps = 1000
self.drops_expected = False
self.c.reset(ports = [self.tx_port, self.rx_port])
@@ -252,15 +253,16 @@ class STLRX_Test(CStlGeneral_Test):
for data in streams_data:
if data['lat']:
flow_stats = STLFlowLatencyStats(pg_id = 5)
+ mode = STLTXSingleBurst(total_pkts = total_pkts, percentage = self.rate_percent)
else:
flow_stats = STLFlowStats(pg_id = 5)
+ mode = STLTXSingleBurst(total_pkts = total_pkts, pps = self.lat_pps)
s = STLStream(name = data['name'],
packet = data['pkt'],
flow_stats = flow_stats,
- mode = STLTXSingleBurst(total_pkts = total_pkts,
- percentage = self.rate_percent
- ))
+ mode = mode
+ )
streams.append(s)
print("\ninjecting {0} packets on port {1}".format(total_pkts, self.tx_port))
diff --git a/scripts/automation/regression/stateless_tests/trex_client_pkg_test.py b/scripts/automation/regression/stateless_tests/trex_client_pkg_test.py
index 905882fe..14ef36f7 100755
--- a/scripts/automation/regression/stateless_tests/trex_client_pkg_test.py
+++ b/scripts/automation/regression/stateless_tests/trex_client_pkg_test.py
@@ -3,9 +3,6 @@ from .stl_general_test import CStlGeneral_Test, CTRexScenario
from misc_methods import run_command
from nose.plugins.attrib import attr
-def setUpModule():
- CStlGeneral_Test.unzip_client_package()
-
@attr('client_package')
class CTRexClientPKG_Test(CStlGeneral_Test):
"""This class tests TRex client package"""
@@ -14,14 +11,13 @@ class CTRexClientPKG_Test(CStlGeneral_Test):
CStlGeneral_Test.setUp(self)
# examples connect by their own
if CTRexScenario.stl_trex.is_connected():
- self.recover_after_trex_210_issue()
CTRexScenario.stl_trex.disconnect()
+ CStlGeneral_Test.unzip_client_package()
def tearDown(self):
# connect back at end of tests
if not CTRexScenario.stl_trex.is_connected():
CTRexScenario.stl_trex.connect()
- self.recover_after_trex_210_issue()
CStlGeneral_Test.tearDown(self)
def run_client_package_stl_example(self, python_version):
diff --git a/scripts/automation/regression/trex.py b/scripts/automation/regression/trex.py
index 44f2faba..aad8f041 100644
--- a/scripts/automation/regression/trex.py
+++ b/scripts/automation/regression/trex.py
@@ -38,8 +38,8 @@ class CTRexScenario:
is_copied = False
GAManager = None
no_daemon = False
- router_image = None
debug_image = False
+ test = None
class CTRexRunner:
"""This is an instance for generating a CTRexRunner"""
diff --git a/scripts/automation/regression/trex_unit_test.py b/scripts/automation/regression/trex_unit_test.py
index 915cd682..4f13a50f 100755
--- a/scripts/automation/regression/trex_unit_test.py
+++ b/scripts/automation/regression/trex_unit_test.py
@@ -30,7 +30,7 @@ import outer_packages
import nose
from nose.plugins import Plugin
-import logging
+from nose.selector import Selector
import CustomLogger
import misc_methods
from rednose import RedNose
@@ -43,11 +43,24 @@ from trex_stl_lib.utils.GAObjClass import GAmanager
import trex
import socket
from pprint import pprint
-import subprocess
-import re
import time
from distutils.dir_util import mkpath
+# nose overrides
+
+# option to select wanted test by name without file, class etc.
+def new_Selector_wantMethod(self, method, orig_Selector_wantMethod = Selector.wantMethod):
+ result = orig_Selector_wantMethod(self, method)
+ return result and (not CTRexScenario.test or CTRexScenario.test in getattr(method, '__name__', ''))
+
+Selector.wantMethod = new_Selector_wantMethod
+
+def new_Selector_wantFunction(self, function, orig_Selector_wantFunction = Selector.wantFunction):
+ result = orig_Selector_wantFunction(self, function)
+ return result and (not CTRexScenario.test or CTRexScenario.test in getattr(function, '__name__', ''))
+
+Selector.wantFunction = new_Selector_wantFunction
+
# override nose's strange representation of setUpClass errors
def __suite_repr__(self):
if hasattr(self.context, '__module__'): # inside class, setUpClass etc.
@@ -59,6 +72,8 @@ def __suite_repr__(self):
nose.suite.ContextSuite.__repr__ = __suite_repr__
nose.suite.ContextSuite.__str__ = __suite_repr__
+# /nose overrides
+
def check_trex_path(trex_path):
if os.path.isfile('%s/trex_daemon_server' % trex_path):
return os.path.abspath(trex_path)
@@ -78,7 +93,7 @@ def get_trex_path():
def address_to_ip(address):
- for i in range(10):
+ for i in range(5):
try:
return socket.gethostbyname(address)
except:
@@ -149,6 +164,8 @@ class CTRexTestConfiguringPlugin(Plugin):
parser.add_option('--trex-args', action='store', default = '',
dest="trex_args",
help="Additional TRex arguments (--no-watchdog etc.).")
+ parser.add_option('-t', '--test', action='store', default = '', dest='test',
+ help='Test name to run (without file, class etc.)')
def configure(self, options, conf):
@@ -160,6 +177,7 @@ class CTRexTestConfiguringPlugin(Plugin):
self.json_verbose = options.json_verbose
self.telnet_verbose = options.telnet_verbose
self.no_daemon = options.no_daemon
+ CTRexScenario.test = options.test
if self.collect_only or self.functional:
return
if CTRexScenario.setup_dir and options.config_path:
@@ -181,7 +199,7 @@ class CTRexTestConfiguringPlugin(Plugin):
self.loggerPath = options.log_path
# initialize CTRexScenario global testing class, to be used by all tests
CTRexScenario.configuration = self.configuration
- CTRexScenario.no_daemon = self.no_daemon
+ CTRexScenario.no_daemon = options.no_daemon
CTRexScenario.benchmark = self.benchmark
CTRexScenario.modes = set(self.modes)
CTRexScenario.server_logs = self.server_logs
@@ -347,15 +365,14 @@ if __name__ == "__main__":
nose_argv += sys_args
- config_plugin = CTRexTestConfiguringPlugin()
- red_nose = RedNose()
+ addplugins = [RedNose(), CTRexTestConfiguringPlugin()]
result = True
try:
if len(CTRexScenario.test_types['functional_tests']):
additional_args = ['--func'] + CTRexScenario.test_types['functional_tests']
if xml_arg:
additional_args += ['--with-xunit', xml_arg.replace('.xml', '_functional.xml')]
- result = nose.run(argv = nose_argv + additional_args, addplugins = [red_nose, config_plugin])
+ result = nose.run(argv = nose_argv + additional_args, addplugins = addplugins)
if len(CTRexScenario.test_types['stateful_tests']):
additional_args = ['--stf']
if '--warmup' in sys.argv:
@@ -365,14 +382,14 @@ if __name__ == "__main__":
additional_args.extend(['-a', '!client_package'])
if xml_arg:
additional_args += ['--with-xunit', xml_arg.replace('.xml', '_stateful.xml')]
- result = nose.run(argv = nose_argv + additional_args, addplugins = [red_nose, config_plugin]) and result
+ result = nose.run(argv = nose_argv + additional_args, addplugins = addplugins) and result
if len(CTRexScenario.test_types['stateless_tests']):
additional_args = ['--stl', 'stateless_tests/stl_general_test.py:STLBasic_Test.test_connectivity'] + CTRexScenario.test_types['stateless_tests']
if not test_client_package:
additional_args.extend(['-a', '!client_package'])
if xml_arg:
additional_args += ['--with-xunit', xml_arg.replace('.xml', '_stateless.xml')]
- result = nose.run(argv = nose_argv + additional_args, addplugins = [red_nose, config_plugin]) and result
+ result = nose.run(argv = nose_argv + additional_args, addplugins = addplugins) and result
#except Exception as e:
# result = False
# print(e)
diff --git a/scripts/automation/trex_control_plane/stf/trex_stf_lib/trex_client.py b/scripts/automation/trex_control_plane/stf/trex_stf_lib/trex_client.py
index f044f623..ecf6083b 100755
--- a/scripts/automation/trex_control_plane/stf/trex_stf_lib/trex_client.py
+++ b/scripts/automation/trex_control_plane/stf/trex_stf_lib/trex_client.py
@@ -73,6 +73,8 @@ class CTRexClient(object):
sets a verbose output on supported class method.
default value : **False**
+ trex_args : string
+ additional arguments passed to TRex. For example, "-w 3 --no-watchdog"
:raises:
socket errors, in case server could not be reached.
@@ -82,22 +84,23 @@ class CTRexClient(object):
self.trex_host = socket.gethostbyname(trex_host)
except: # give it another try
self.trex_host = socket.gethostbyname(trex_host)
- self.trex_daemon_port = trex_daemon_port
- self.master_daemon_port = master_daemon_port
- self.trex_zmq_port = trex_zmq_port
- self.seq = None
- self._last_sample = time.time()
- self.__default_user = get_current_user()
- self.verbose = verbose
- self.result_obj = CTRexResult(max_history_size, filtered_latency_amount)
- self.decoder = JSONDecoder()
- self.history = jsonrpclib.history.History()
- self.master_daemon_path = "http://{hostname}:{port}/".format( hostname = self.trex_host, port = master_daemon_port )
- self.master_daemon = jsonrpclib.Server(self.master_daemon_path, history = self.history)
- self.trex_server_path = "http://{hostname}:{port}/".format( hostname = self.trex_host, port = trex_daemon_port )
- self.server = jsonrpclib.Server(self.trex_server_path, history = self.history)
- self.debug_image = debug_image
- self.trex_args = trex_args
+ self.trex_daemon_port = trex_daemon_port
+ self.master_daemon_port = master_daemon_port
+ self.trex_zmq_port = trex_zmq_port
+ self.seq = None
+ self._last_sample = time.time()
+ self.__default_user = get_current_user()
+ self.verbose = verbose
+ self.result_obj = CTRexResult(max_history_size, filtered_latency_amount)
+ self.decoder = JSONDecoder()
+ self.history = jsonrpclib.history.History()
+ self.master_daemon_path = "http://{hostname}:{port}/".format( hostname = self.trex_host, port = master_daemon_port )
+ self.master_daemon = jsonrpclib.Server(self.master_daemon_path, history = self.history)
+ self.trex_server_path = "http://{hostname}:{port}/".format( hostname = self.trex_host, port = trex_daemon_port )
+ self.server = jsonrpclib.Server(self.trex_server_path, history = self.history)
+ self.debug_image = debug_image
+ self.trex_args = trex_args
+ self.sample_to_run_finish = self.sample_until_finish # alias for legacy
def add (self, x, y):
@@ -310,7 +313,7 @@ class CTRexClient(object):
for i in range(int(timeout / poll_rate)):
if not self.get_trex_cmds():
return True
- sleep(poll_rate)
+ time.sleep(poll_rate)
if self.get_trex_cmds():
return False
return True
@@ -535,7 +538,7 @@ class CTRexClient(object):
finally:
self.prompt_verbose_data()
- def sample_until_condition (self, condition_func, time_between_samples = 5):
+ def sample_until_condition (self, condition_func, time_between_samples = 1):
"""
Automatically sets ongoing sampling of TRex data, with sampling rate described by time_between_samples.
@@ -549,7 +552,7 @@ class CTRexClient(object):
time_between_samples : int
determines the time between each sample of the server
- default value : **5**
+ default value : **1**
:return:
the first result object (see :class:`CTRexResult` for further details) of the TRex run on which the condition has been met.
@@ -579,15 +582,15 @@ class CTRexClient(object):
# this could come from provided method 'condition_func'
raise
- def sample_to_run_finish (self, time_between_samples = 5):
+ def sample_until_finish (self, time_between_samples = 1):
"""
- Automatically sets automatically sampling of TRex data with sampling rate described by time_between_samples until TRex run finished.
+ Automatically samples TRex data with sampling rate described by time_between_samples until TRex run finishes.
:parameters:
time_between_samples : int
determines the time between each sample of the server
- default value : **5**
+ default value : **1**
:return:
the latest result object (see :class:`CTRexResult` for further details) with sampled data.
@@ -609,7 +612,7 @@ class CTRexClient(object):
results = self.get_result_obj()
return results
- def sample_x_seconds (self, sample_time, time_between_samples = 5):
+ def sample_x_seconds (self, sample_time, time_between_samples = 1):
"""
Automatically sets ongoing sampling of TRex data for sample_time seconds, with sampling rate described by time_between_samples.
Does not stop the TRex afterwards!
@@ -623,7 +626,7 @@ class CTRexClient(object):
time_between_samples : int
determines the time between each sample of the server
- default value : **5**
+ default value : **1**
:return:
the first result object (see :class:`CTRexResult` for further details) of the TRex run after given sample_time.
@@ -637,12 +640,12 @@ class CTRexClient(object):
"""
# make sure TRex is running. raise exceptions here if any
self.wait_until_kickoff_finish()
- elapsed_time = 0
+ end_time = time.time() + sample_time
while self.is_running():
- if elapsed_time >= sample_time:
+ if time.time() < end_time:
+ time.sleep(time_between_samples)
+ else:
return self.get_result_obj()
- time.sleep(time_between_samples)
- elapsed_time += time_between_samples
raise UserWarning("TRex has stopped at %s seconds (before expected %s seconds)\nTry increasing test duration or decreasing sample_time" % (elapsed_time, sample_time))
def get_result_obj (self, copy_obj = True):
@@ -1271,7 +1274,7 @@ class CTRexResult(object):
.. tip:: | Use '.' to enter one level deeper in dictionary hierarchy.
| Use '[i]' to access the i'th indexed object of an array.
- tree_path_to_key : regex
+ regex : regex
apply a regex to filter results out from a multiple results set.
Filter applies only on keys of dictionary type.
@@ -1299,7 +1302,7 @@ class CTRexResult(object):
.. tip:: | Use '.' to enter one level deeper in dictionary hierarchy.
| Use '[i]' to access the i'th indexed object of an array.
- tree_path_to_key : regex
+ regex : regex
apply a regex to filter results out from a multiple results set.
Filter applies only on keys of dictionary type.
@@ -1352,7 +1355,7 @@ class CTRexResult(object):
if not len(self._history):
return -1
- return len(self.__get_value_by_path(self._history[-1], 'trex-global.data', 'opackets-\d+'))
+ return len(self.get_last_value('trex-global.data', 'opackets-\d+'))
def update_result_data (self, latest_dump):
@@ -1383,14 +1386,21 @@ class CTRexResult(object):
# check for up to 2% change between expected and actual
if (self._current_tx_rate['m_tx_bps'] > 0.98 * self._expected_tx_rate['m_tx_expected_bps']):
self._done_warmup = True
+ latest_dump['warmup_barrier'] = True
# handle latency data
if self.latency_checked:
- latency_per_port = self.get_last_value("trex-latecny-v2.data", "port-")
+ # fix typos, by "pointer"
+ if 'trex-latecny-v2' in latest_dump and 'trex-latency-v2' not in latest_dump:
+ latest_dump['trex-latency-v2'] = latest_dump['trex-latecny-v2']
+ if 'trex-latecny' in latest_dump and 'trex-latency' not in latest_dump:
+ latest_dump['trex-latency'] = latest_dump['trex-latecny']
+
+ latency_per_port = self.get_last_value("trex-latency-v2.data", "port-")
self._max_latency = self.__get_filtered_max_latency(latency_per_port, self.filtered_latency_amount)
- avg_latency = self.get_last_value("trex-latecny.data", "avg-")
+ avg_latency = self.get_last_value("trex-latency.data", "avg-")
self._avg_latency = CTRexResult.__avg_all_and_rename_keys(avg_latency)
- avg_win_latency_list = self.get_value_list("trex-latecny.data", "avg-")
+ avg_win_latency_list = self.get_value_list("trex-latency.data", "avg-")
self._avg_window_latency = CTRexResult.__calc_latency_win_stats(avg_win_latency_list)
tx_pkts = CTRexResult.__get_value_by_path(latest_dump, "trex-global.data.m_total_tx_pkts")
@@ -1427,12 +1437,12 @@ class CTRexResult(object):
for i, p in re.findall(r'(\d+)|([\w|-]+)', tree_path):
dct = dct[p or int(i)]
if regex is not None and isinstance(dct, dict):
- res = {}
- for key,val in dct.items():
- match = re.match(regex, key)
- if match:
- res[key]=val
- return res
+ res = {}
+ for key,val in dct.items():
+ match = re.match(regex, key)
+ if match:
+ res[key]=val
+ return res
else:
return dct
except (KeyError, TypeError):
@@ -1499,6 +1509,42 @@ class CTRexResult(object):
return result
+ # history iterator after warmup period
+ def _get_steady_state_history_iterator(self):
+ if not self.is_done_warmup():
+ raise Exception('Warm-up period not finished')
+ for index, res in enumerate(self._history):
+ if 'warmup_barrier' in res:
+ for steady_state_index in range(index, max(index, len(self._history) - 1)):
+ yield self._history[steady_state_index]
+ return
+ for index in range(len(self._history) - 1):
+ yield self._history[index]
+
+
+ def get_avg_steady_state_value(self, tree_path_to_key):
+ '''
+ Gets average value after warmup period.
+ For example: <result object>.get_avg_steady_state_value('trex-global.data.m_tx_bps')
+ Usually more accurate than latest history value.
+
+ :parameters:
+ tree_path_to_key : str
+ defines a path to desired data.
+
+ :return:
+ average value at steady state
+
+ :raises:
+ Exception in case steady state period was not reached or tree_path_to_key was not found in result.
+ '''
+ values_arr = [self.__get_value_by_path(res, tree_path_to_key) for res in self._get_steady_state_history_iterator()]
+ values_arr = list(filter(lambda x: x is not None, values_arr))
+ if not values_arr:
+ raise Exception('All the keys are None, probably wrong tree_path_to_key: %s' % tree_path_to_key)
+ return sum(values_arr) / float(len(values_arr))
+
+
if __name__ == "__main__":
c = CTRexClient('127.0.0.1')
print('restarting daemon')
diff --git a/scripts/automation/trex_control_plane/stl/console/trex_console.py b/scripts/automation/trex_control_plane/stl/console/trex_console.py
index ab70d357..9e3f2600 100755
--- a/scripts/automation/trex_control_plane/stl/console/trex_console.py
+++ b/scripts/automation/trex_control_plane/stl/console/trex_console.py
@@ -459,7 +459,6 @@ class TRexConsole(TRexGeneralCmd):
self.stateless_client.start_line(line)
-
def help_start(self):
@@ -567,7 +566,8 @@ class TRexConsole(TRexGeneralCmd):
parser = parsing_opts.gen_parser(self,
"tui",
self.do_tui.__doc__,
- parsing_opts.XTERM)
+ parsing_opts.XTERM,
+ parsing_opts.LOCKED)
opts = parser.parse_args(line.split())
if opts is None:
@@ -590,7 +590,7 @@ class TRexConsole(TRexGeneralCmd):
with self.stateless_client.logger.supress():
- self.tui.show()
+ self.tui.show(self.stateless_client, locked = opts.locked)
def help_tui (self):
@@ -872,7 +872,8 @@ def main():
# TUI
if options.tui:
- console.do_tui("-x" if options.xtui else "")
+ console.do_tui("-x" if options.xtui else "-l")
+
else:
console.start()
diff --git a/scripts/automation/trex_control_plane/stl/console/trex_tui.py b/scripts/automation/trex_control_plane/stl/console/trex_tui.py
index d3be4435..32c6b1e0 100644
--- a/scripts/automation/trex_control_plane/stl/console/trex_tui.py
+++ b/scripts/automation/trex_control_plane/stl/console/trex_tui.py
@@ -4,6 +4,7 @@ import os
import time
from collections import OrderedDict, deque
import datetime
+import readline
if sys.version_info > (3,0):
from io import StringIO
@@ -15,6 +16,10 @@ from trex_stl_lib.utils import text_tables
from trex_stl_lib import trex_stl_stats
from trex_stl_lib.utils.filters import ToggleFilter
+class TUIQuit(Exception):
+ pass
+
+
# for STL exceptions
from trex_stl_lib.api import *
@@ -210,7 +215,7 @@ class TrexTUILatencyStats(TrexTUIPanel):
super(TrexTUILatencyStats, self).__init__(mng, "lstats")
self.key_actions = OrderedDict()
self.key_actions['c'] = {'action': self.action_clear, 'legend': 'clear', 'show': True}
- self.key_actions['t'] = {'action': self.action_toggle_histogram, 'legend': 'toggle histogram', 'show': True}
+ self.key_actions['h'] = {'action': self.action_toggle_histogram, 'legend': 'histogram toggle', 'show': True}
self.is_histogram = False
@@ -238,6 +243,23 @@ class TrexTUILatencyStats(TrexTUIPanel):
self.stateless_client.latency_stats.clear_stats()
return ""
+
+# utilization stats
+class TrexTUIUtilizationStats(TrexTUIPanel):
+ def __init__ (self, mng):
+ super(TrexTUIUtilizationStats, self).__init__(mng, "ustats")
+ self.key_actions = {}
+
+ def show (self):
+ stats = self.stateless_client._get_formatted_stats(port_id_list = None, stats_mask = trex_stl_stats.UT_COMPAT)
+ # print stats to screen
+ for stat_type, stat_data in stats.items():
+ text_tables.print_table_with_header(stat_data.text_table, stat_type)
+
+ def get_key_actions (self):
+ return self.key_actions
+
+
# log
class TrexTUILog():
def __init__ (self):
@@ -258,24 +280,42 @@ class TrexTUILog():
print(msg)
+# a predicate to wrap function as a bool
+class Predicate(object):
+ def __init__ (self, func):
+ self.func = func
+
+ def __nonzero__ (self):
+ return True if self.func() else False
+ def __bool__ (self):
+ return True if self.func() else False
+
+
# Panels manager (contains server panels)
class TrexTUIPanelManager():
def __init__ (self, tui):
self.tui = tui
self.stateless_client = tui.stateless_client
self.ports = self.stateless_client.get_all_ports()
-
+ self.locked = False
self.panels = {}
self.panels['dashboard'] = TrexTUIDashBoard(self)
self.panels['sstats'] = TrexTUIStreamsStats(self)
self.panels['lstats'] = TrexTUILatencyStats(self)
+ self.panels['ustats'] = TrexTUIUtilizationStats(self)
self.key_actions = OrderedDict()
- self.key_actions['q'] = {'action': self.action_quit, 'legend': 'quit', 'show': True}
+
+ # we allow console only when ports are acquired
+ self.key_actions['ESC'] = {'action': self.action_none, 'legend': 'console', 'show': Predicate(lambda : not self.locked)}
+
+ self.key_actions['q'] = {'action': self.action_none, 'legend': 'quit', 'show': True}
self.key_actions['d'] = {'action': self.action_show_dash, 'legend': 'dashboard', 'show': True}
self.key_actions['s'] = {'action': self.action_show_sstats, 'legend': 'streams', 'show': True}
self.key_actions['l'] = {'action': self.action_show_lstats, 'legend': 'latency', 'show': True}
+ self.key_actions['u'] = {'action': self.action_show_ustats, 'legend': 'util', 'show': True}
+
# start with dashboard
self.main_panel = self.panels['dashboard']
@@ -290,7 +330,6 @@ class TrexTUIPanelManager():
self.show_log = False
-
def generate_legend (self):
self.legend = "\n{:<12}".format("browse:")
@@ -327,14 +366,18 @@ class TrexTUIPanelManager():
# on window switch or turn on / off of the TUI we call this
- def init (self, show_log = False):
+ def init (self, show_log = False, locked = False):
self.show_log = show_log
+ self.locked = locked
self.generate_legend()
- def show (self):
+ def show (self, show_legend):
self.main_panel.show()
self.print_connection_status()
- self.print_legend()
+
+ if show_legend:
+ self.generate_legend()
+ self.print_legend()
if self.show_log:
self.log.show()
@@ -350,21 +393,22 @@ class TrexTUIPanelManager():
msg = self.main_panel.get_key_actions()[ch]['action']()
else:
- msg = ""
+ return False
self.generate_legend()
-
- if msg == None:
- return False
- else:
- if msg:
- self.log.add_event(msg)
- return True
+ return True
+
+ #if msg == None:
+ # return False
+ #else:
+ # if msg:
+ # self.log.add_event(msg)
+ # return True
# actions
- def action_quit (self):
+ def action_none (self):
return None
def action_show_dash (self):
@@ -392,6 +436,11 @@ class TrexTUIPanelManager():
self.init(self.show_log)
return ""
+ def action_show_ustats(self):
+ self.main_panel = self.panels['ustats']
+ self.init(self.show_log)
+ return ""
+
# shows a textual top style window
class TrexTUI():
@@ -404,18 +453,7 @@ class TrexTUI():
self.stateless_client = stateless_client
self.pm = TrexTUIPanelManager(self)
-
-
-
- def handle_key_input (self):
- # try to read a single key
- ch = os.read(sys.stdin.fileno(), 1).decode()
- if ch != None and len(ch) > 0:
- return (self.pm.handle_key(ch), True)
-
- else:
- return (True, False)
-
+
def clear_screen (self):
#os.system('clear')
@@ -423,17 +461,16 @@ class TrexTUI():
sys.stdout.write("\x1b[2J\x1b[H")
+ def show (self, client, show_log = False, locked = False):
+ with AsyncKeys(client, locked) as async_keys:
+ self.async_keys = async_keys
+ self.show_internal(show_log, locked)
- def show (self, show_log = False):
- # init termios
- old_settings = termios.tcgetattr(sys.stdin)
- new_settings = termios.tcgetattr(sys.stdin)
- new_settings[3] = new_settings[3] & ~(termios.ECHO | termios.ICANON) # lflags
- new_settings[6][termios.VMIN] = 0 # cc
- new_settings[6][termios.VTIME] = 0 # cc
- termios.tcsetattr(sys.stdin, termios.TCSADRAIN, new_settings)
- self.pm.init(show_log)
+
+ def show_internal (self, show_log, locked):
+
+ self.pm.init(show_log, locked)
self.state = self.STATE_ACTIVE
self.draw_policer = 0
@@ -441,11 +478,11 @@ class TrexTUI():
try:
while True:
# draw and handle user input
- cont, force_draw = self.handle_key_input()
- self.draw_screen(force_draw)
- if not cont:
- break
- time.sleep(0.1)
+ status = self.async_keys.tick(self.pm)
+
+ self.draw_screen(status)
+ if status == AsyncKeys.STATUS_NONE:
+ time.sleep(0.001)
# regular state
if self.state == self.STATE_ACTIVE:
@@ -473,34 +510,402 @@ class TrexTUI():
self.state = self.STATE_LOST_CONT
- finally:
- # restore
- termios.tcsetattr(sys.stdin, termios.TCSADRAIN, old_settings)
+ except TUIQuit:
+ print("\nExiting TUI...")
print("")
# draw once
- def draw_screen (self, force_draw = False):
+ def draw_screen (self, status):
- if (self.draw_policer >= 5) or (force_draw):
+ # only redraw the keys line
+ if status == AsyncKeys.STATUS_REDRAW_KEYS:
+ self.clear_screen()
+ sys.stdout.write(self.last_snap)
+ self.async_keys.draw()
+ sys.stdout.flush()
+ return
+
+ if (self.draw_policer >= 500) or (status == AsyncKeys.STATUS_REDRAW_ALL):
# capture stdout to a string
old_stdout = sys.stdout
sys.stdout = mystdout = StringIO()
- self.pm.show()
+ self.pm.show(show_legend = self.async_keys.is_legend_mode())
+ self.last_snap = mystdout.getvalue()
+
+ self.async_keys.draw()
sys.stdout = old_stdout
self.clear_screen()
- print(mystdout.getvalue())
-
+ sys.stdout.write(mystdout.getvalue())
+
sys.stdout.flush()
self.draw_policer = 0
else:
self.draw_policer += 1
+
def get_state (self):
return self.state
+
+
+
+
+# handles async IO
+class AsyncKeys:
+
+ MODE_LEGEND = 1
+ MODE_CONSOLE = 2
+
+ STATUS_NONE = 0
+ STATUS_REDRAW_KEYS = 1
+ STATUS_REDRAW_ALL = 2
+
+ def __init__ (self, client, locked = False):
+ self.engine_console = AsyncKeysEngineConsole(self, client)
+ self.engine_legend = AsyncKeysEngineLegend(self)
+ self.locked = locked
+
+ if locked:
+ self.engine = self.engine_legend
+ self.locked = True
+ else:
+ self.engine = self.engine_console
+ self.locked = False
+
+ def __enter__ (self):
+ # init termios
+ self.old_settings = termios.tcgetattr(sys.stdin)
+ new_settings = termios.tcgetattr(sys.stdin)
+ new_settings[3] = new_settings[3] & ~(termios.ECHO | termios.ICANON) # lflags
+ new_settings[6][termios.VMIN] = 0 # cc
+ new_settings[6][termios.VTIME] = 0 # cc
+ termios.tcsetattr(sys.stdin, termios.TCSADRAIN, new_settings)
+ return self
+
+ def __exit__ (self, type, value, traceback):
+ termios.tcsetattr(sys.stdin, termios.TCSADRAIN, self.old_settings)
+
+
+ def is_legend_mode (self):
+ return self.engine.get_type() == AsyncKeys.MODE_LEGEND
+
+ def is_console_mode (self):
+ return self.engine.get_type == AsyncKeys.MODE_CONSOLE
+
+ def switch (self):
+ if self.is_legend_mode():
+ self.engine = self.engine_console
+ else:
+ self.engine = self.engine_legend
+
+
+ def tick (self, pm):
+ seq = ''
+ # drain all chars
+ while True:
+ ch = os.read(sys.stdin.fileno(), 1).decode()
+ if not ch:
+ break
+ seq += ch
+
+ if not seq:
+ return self.STATUS_NONE
+
+ # ESC for switch
+ if seq == '\x1b':
+ if not self.locked:
+ self.switch()
+ return self.STATUS_REDRAW_ALL
+
+ # EOF (ctrl + D)
+ if seq == '\x04':
+ raise TUIQuit()
+
+ # pass tick to engine
+ return self.engine.tick(seq, pm)
+
+
+ def draw (self):
+ self.engine.draw()
+
+
+
+# Legend engine
+class AsyncKeysEngineLegend:
+ def __init__ (self, async):
+ self.async = async
+
+ def get_type (self):
+ return self.async.MODE_LEGEND
+
+ def tick (self, seq, pm):
+
+ if seq == 'q':
+ raise TUIQuit()
+
+ # ignore escapes
+ if len(seq) > 1:
+ return AsyncKeys.STATUS_NONE
+
+ rc = pm.handle_key(seq)
+ return AsyncKeys.STATUS_REDRAW_ALL if rc else AsyncKeys.STATUS_NONE
+
+ def draw (self):
+ pass
+
+
+# console engine
+class AsyncKeysEngineConsole:
+ def __init__ (self, async, client):
+ self.async = async
+ self.lines = deque(maxlen = 100)
+
+ self.ac = {'start' : client.start_line,
+ 'stop' : client.stop_line,
+ 'pause' : client.pause_line,
+ 'resume': client.resume_line,
+ 'update': client.update_line,
+ 'quit' : self.action_quit,
+ 'q' : self.action_quit,
+ 'exit' : self.action_quit,
+ 'help' : self.action_help,
+ '?' : self.action_help}
+
+ # fetch readline history and add relevants
+ for i in range(0, readline.get_current_history_length()):
+ cmd = readline.get_history_item(i)
+ if cmd and cmd.split()[0] in self.ac:
+ self.lines.appendleft(CmdLine(cmd))
+
+ # new line
+ self.lines.appendleft(CmdLine(''))
+ self.line_index = 0
+ self.last_status = ''
+
+ def action_quit (self, _):
+ raise TUIQuit()
+
+ def action_help (self, _):
+ return ' '.join([format_text(cmd, 'bold') for cmd in self.ac.keys()])
+
+ def get_type (self):
+ return self.async.MODE_CONSOLE
+
+
+ def handle_escape_char (self, seq):
+ # up
+ if seq == '\x1b[A':
+ self.line_index = min(self.line_index + 1, len(self.lines) - 1)
+
+ # down
+ elif seq == '\x1b[B':
+ self.line_index = max(self.line_index - 1, 0)
+
+ # left
+ elif seq == '\x1b[D':
+ self.lines[self.line_index].go_left()
+
+ # right
+ elif seq == '\x1b[C':
+ self.lines[self.line_index].go_right()
+
+ # del
+ elif seq == '\x1b[3~':
+ self.lines[self.line_index].del_key()
+
+ # home
+ elif seq == '\x1b[H':
+ self.lines[self.line_index].home_key()
+
+ # end
+ elif seq == '\x1b[F':
+ self.lines[self.line_index].end_key()
+ return True
+
+ # unknown key
+ else:
+ return AsyncKeys.STATUS_NONE
+
+ return AsyncKeys.STATUS_REDRAW_KEYS
+
+
+ def tick (self, seq, _):
+
+ # handle escape chars
+ if len(seq) > 1:
+ return self.handle_escape_char(seq)
+
+ # handle each char
+ for ch in seq:
+ return self.handle_single_key(ch)
+
+
+
+ def handle_single_key (self, ch):
+
+ # newline
+ if ch == '\n':
+ self.handle_cmd()
+
+ # backspace
+ elif ch == '\x7f':
+ self.lines[self.line_index].backspace()
+
+ # TAB
+ elif ch == '\t':
+ cur = self.lines[self.line_index].get()
+ if not cur:
+ return
+
+ matching_cmds = [x for x in self.ac if x.startswith(cur)]
+
+ common = os.path.commonprefix([x for x in self.ac if x.startswith(cur)])
+ if common:
+ if len(matching_cmds) == 1:
+ self.lines[self.line_index].set(common + ' ')
+ self.last_status = ''
+ else:
+ self.lines[self.line_index].set(common)
+ self.last_status = 'ambigious: '+ ' '.join([format_text(cmd, 'bold') for cmd in matching_cmds])
+
+
+ # simple char
+ else:
+ self.lines[self.line_index] += ch
+
+ return AsyncKeys.STATUS_REDRAW_KEYS
+
+
+ def split_cmd (self, cmd):
+ s = cmd.split(' ', 1)
+ op = s[0]
+ param = s[1] if len(s) == 2 else ''
+ return op, param
+
+
+ def handle_cmd (self):
+ cmd = self.lines[self.line_index].get().strip()
+ if not cmd:
+ return
+
+ op, param = self.split_cmd(cmd)
+
+ func = self.ac.get(op)
+ if func:
+ func_rc = func(param)
+
+ # take out the empty line
+ empty_line = self.lines.popleft()
+ assert(empty_line.ro_line == '')
+
+ if not self.lines or self.lines[0].ro_line != cmd:
+ self.lines.appendleft(CmdLine(cmd))
+
+ # back in
+ self.lines.appendleft(empty_line)
+ self.line_index = 0
+ readline.add_history(cmd)
+
+ # back to readonly
+ for line in self.lines:
+ line.invalidate()
+
+ assert(self.lines[0].modified == False)
+ if not func:
+ self.last_status = "unknown command: '{0}'".format(format_text(cmd.split()[0], 'bold'))
+ else:
+ if isinstance(func_rc, str):
+ self.last_status = func_rc
+ else:
+ self.last_status = format_text("[OK]", 'green') if func_rc else format_text(str(func_rc).replace('\n', ''), 'red')
+
+
+ def draw (self):
+ sys.stdout.write("\nPress 'ESC' for navigation panel...\n")
+ sys.stdout.write("status: {0}\n".format(self.last_status))
+ sys.stdout.write("\ntui>")
+ self.lines[self.line_index].draw()
+
+
+# a readline alike command line - can be modified during edit
+class CmdLine(object):
+ def __init__ (self, line):
+ self.ro_line = line
+ self.w_line = None
+ self.modified = False
+ self.cursor_index = len(line)
+
+ def get (self):
+ if self.modified:
+ return self.w_line
+ else:
+ return self.ro_line
+
+ def set (self, line, cursor_pos = None):
+ self.w_line = line
+ self.modified = True
+
+ if cursor_pos is None:
+ self.cursor_index = len(self.w_line)
+ else:
+ self.cursor_index = cursor_pos
+
+
+ def __add__ (self, other):
+ assert(0)
+
+
+ def __str__ (self):
+ return self.get()
+
+
+ def __iadd__ (self, other):
+
+ self.set(self.get()[:self.cursor_index] + other + self.get()[self.cursor_index:],
+ cursor_pos = self.cursor_index + len(other))
+
+ return self
+
+
+ def backspace (self):
+ if self.cursor_index == 0:
+ return
+
+ self.set(self.get()[:self.cursor_index - 1] + self.get()[self.cursor_index:],
+ self.cursor_index - 1)
+
+
+ def del_key (self):
+ if self.cursor_index == len(self.get()):
+ return
+
+ self.set(self.get()[:self.cursor_index] + self.get()[self.cursor_index + 1:],
+ self.cursor_index)
+
+ def home_key (self):
+ self.cursor_index = 0
+
+ def end_key (self):
+ self.cursor_index = len(self.get())
+
+ def invalidate (self):
+ self.modified = False
+ self.w_line = None
+ self.cursor_index = len(self.ro_line)
+
+ def go_left (self):
+ self.cursor_index = max(0, self.cursor_index - 1)
+
+ def go_right (self):
+ self.cursor_index = min(len(self.get()), self.cursor_index + 1)
+
+ def draw (self):
+ sys.stdout.write(self.get())
+ sys.stdout.write('\b' * (len(self.get()) - self.cursor_index))
+
diff --git a/scripts/automation/trex_control_plane/stl/examples/stl_flow_latency_stats.py b/scripts/automation/trex_control_plane/stl/examples/stl_flow_latency_stats.py
index ac0e212b..d8a99479 100644
--- a/scripts/automation/trex_control_plane/stl/examples/stl_flow_latency_stats.py
+++ b/scripts/automation/trex_control_plane/stl/examples/stl_flow_latency_stats.py
@@ -1,10 +1,12 @@
+# Example showing how to define stream for latency measurement, and how to parse the latency information
+
import stl_path
from trex_stl_lib.api import *
import time
import pprint
-def rx_example (tx_port, rx_port, burst_size, bw):
+def rx_example (tx_port, rx_port, burst_size, pps):
print("\nGoing to inject {0} packets on port {1} - checking RX stats on port {2}\n".format(burst_size, tx_port, rx_port))
@@ -19,7 +21,7 @@ def rx_example (tx_port, rx_port, burst_size, bw):
packet = pkt,
flow_stats = STLFlowLatencyStats(pg_id = 5),
mode = STLTXSingleBurst(total_pkts = total_pkts,
- percentage = bw))
+ pps = pps))
# connect to server
c.connect()
@@ -32,7 +34,7 @@ def rx_example (tx_port, rx_port, burst_size, bw):
print("\nInjecting {0} packets on port {1}\n".format(total_pkts, tx_port))
- rc = rx_iteration(c, tx_port, rx_port, total_pkts, pkt.get_pkt_len(), bw)
+ rc = rx_iteration(c, tx_port, rx_port, total_pkts, pkt.get_pkt_len())
if not rc:
passed = False
@@ -44,12 +46,12 @@ def rx_example (tx_port, rx_port, burst_size, bw):
c.disconnect()
if passed:
- print("\nTest has passed :-)\n")
+ print("\nTest passed :-)\n")
else:
- print("\nTest has failed :-(\n")
+ print("\nTest failed :-(\n")
# RX one iteration
-def rx_iteration (c, tx_port, rx_port, total_pkts, pkt_len, bw):
+def rx_iteration (c, tx_port, rx_port, total_pkts, pkt_len):
c.clear_stats()
@@ -58,7 +60,8 @@ def rx_iteration (c, tx_port, rx_port, total_pkts, pkt_len, bw):
stats = c.get_stats()
flow_stats = stats['flow_stats'].get(5)
- lat_stats = stats['latency'].get(5)
+ global_lat_stats = stats['latency']
+ lat_stats = global_lat_stats.get(5)
if not flow_stats:
print("no flow stats available")
return False
@@ -74,6 +77,8 @@ def rx_iteration (c, tx_port, rx_port, total_pkts, pkt_len, bw):
dup = lat_stats['err_cntrs']['dup']
sth = lat_stats['err_cntrs']['seq_too_high']
stl = lat_stats['err_cntrs']['seq_too_low']
+ old_flow = global_lat_stats['global']['old_flow']
+ bad_hdr = global_lat_stats['global']['bad_hdr']
lat = lat_stats['latency']
jitter = lat['jitter']
avg = lat['average']
@@ -89,6 +94,10 @@ def rx_iteration (c, tx_port, rx_port, total_pkts, pkt_len, bw):
return False
print('Error counters: dropped:{0}, ooo:{1} dup:{2} seq too high:{3} seq too low:{4}'.format(drops, ooo, dup, sth, stl))
+ if old_flow:
+ print ('Packets arriving too late after flow stopped: {0}'.format(old_flow))
+ if bad_hdr:
+ print ('Latency packets with corrupted info: {0}'.format(bad_hdr))
print('Latency info:')
print(" Maximum latency(usec): {0}".format(tot_max))
print(" Minimum latency(usec): {0}".format(tot_min))
@@ -131,5 +140,5 @@ def rx_iteration (c, tx_port, rx_port, total_pkts, pkt_len, bw):
return True
# run the tests
-rx_example(tx_port = 1, rx_port = 0, burst_size = 500000, bw = 50)
+rx_example(tx_port = 0, rx_port = 1, burst_size = 1000, pps = 1000)
diff --git a/scripts/automation/trex_control_plane/stl/examples/stl_flow_stats.py b/scripts/automation/trex_control_plane/stl/examples/stl_flow_stats.py
index ed4902fa..3c630ece 100644
--- a/scripts/automation/trex_control_plane/stl/examples/stl_flow_stats.py
+++ b/scripts/automation/trex_control_plane/stl/examples/stl_flow_stats.py
@@ -1,3 +1,5 @@
+# Example showing how to define stream for getting per flow statistics, and how to parse the received statistics
+
import stl_path
from trex_stl_lib.api import *
@@ -27,18 +29,14 @@ def rx_example (tx_port, rx_port, burst_size, bw):
# prepare our ports
c.reset(ports = [tx_port, rx_port])
- # add both streams to ports
+ # add stream to port
c.add_streams([s1], ports = [tx_port])
- print("\ninjecting {0} packets on port {1}\n".format(total_pkts, tx_port))
-
- for i in range(0, 10):
- print("\nStarting iteration: {0}:".format(i))
- rc = rx_iteration(c, tx_port, rx_port, total_pkts, pkt.get_pkt_len(), bw)
- if not rc:
- passed = False
- break
+ print("\ngoing to inject {0} packets on port {1}\n".format(total_pkts, tx_port))
+ rc = rx_iteration(c, tx_port, rx_port, total_pkts, s1.get_pkt_len())
+ if not rc:
+ passed = False
except STLError as e:
passed = False
@@ -48,19 +46,21 @@ def rx_example (tx_port, rx_port, burst_size, bw):
c.disconnect()
if passed:
- print("\nTest has passed :-)\n")
+ print("\nTest passed :-)\n")
else:
- print("\nTest has failed :-(\n")
+ print("\nTest failed :-(\n")
# RX one iteration
-def rx_iteration (c, tx_port, rx_port, total_pkts, pkt_len, bw):
-
+def rx_iteration (c, tx_port, rx_port, total_pkts, pkt_len):
+ ret = True
+
c.clear_stats()
c.start(ports = [tx_port])
c.wait_on_traffic(ports = [tx_port])
- flow_stats = c.get_stats()['flow_stats'].get(5)
+ global_flow_stats = c.get_stats()['flow_stats']
+ flow_stats = global_flow_stats.get(5)
if not flow_stats:
print("no flow stats available")
return False
@@ -78,26 +78,33 @@ def rx_iteration (c, tx_port, rx_port, total_pkts, pkt_len, bw):
if tx_pkts != total_pkts:
print("TX pkts mismatch - got: {0}, expected: {1}".format(tx_pkts, total_pkts))
pprint.pprint(flow_stats)
- return False
+ ret = False
else:
print("TX pkts match - {0}".format(tx_pkts))
if tx_bytes != (total_pkts * pkt_len):
print("TX bytes mismatch - got: {0}, expected: {1}".format(tx_bytes, (total_pkts * pkt_len)))
pprint.pprint(flow_stats)
- return False
+ ret = False
else:
print("TX bytes match - {0}".format(tx_bytes))
if rx_pkts != total_pkts:
print("RX pkts mismatch - got: {0}, expected: {1}".format(rx_pkts, total_pkts))
pprint.pprint(flow_stats)
- return False
+ ret = False
else:
print("RX pkts match - {0}".format(rx_pkts))
- return True
+
+ for field in ['rx_err', 'tx_err']:
+ for port in global_flow_stats['global'][field].keys():
+ if global_flow_stats['global'][field][port] != 0:
+ print ("\n{0} on port {1}: {2} - You should consider increasing rx_delay_ms value in wait_on_traffic"
+ .format(field, port, global_flow_stats['global'][field][port]))
+
+ return ret
# run the tests
-rx_example(tx_port = 1, rx_port = 2, burst_size = 500000, bw = 50)
+rx_example(tx_port = 0, rx_port = 1, burst_size = 500, bw = 50)
diff --git a/scripts/automation/trex_control_plane/stl/examples/stl_imix.py b/scripts/automation/trex_control_plane/stl/examples/stl_imix.py
index 46d86b2b..875186ba 100644
--- a/scripts/automation/trex_control_plane/stl/examples/stl_imix.py
+++ b/scripts/automation/trex_control_plane/stl/examples/stl_imix.py
@@ -5,6 +5,7 @@ import time
import json
from pprint import pprint
import argparse
+import sys
# IMIX test
# it maps the ports to sides
@@ -97,6 +98,7 @@ def imix_test (server, mult):
except STLError as e:
passed = False
print(e)
+ sys.exit(1)
finally:
c.disconnect()
diff --git a/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_client.py b/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_client.py
index c1833754..7c8a5fbf 100755
--- a/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_client.py
+++ b/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_client.py
@@ -511,8 +511,7 @@ class STLClient(object):
self.connected = False
# API classes
- self.api_vers = [ {'type': 'core', 'major': 1, 'minor':2 }
- ]
+ self.api_vers = [ {'type': 'core', 'major': 1, 'minor': 3 } ]
self.api_h = {'core': None}
# logger
@@ -555,14 +554,17 @@ class STLClient(object):
self.latency_stats = trex_stl_stats.CLatencyStats(self.ports)
+ self.util_stats = trex_stl_stats.CUtilStats(self)
+
self.stats_generator = trex_stl_stats.CTRexInfoGenerator(self.global_stats,
self.ports,
self.flow_stats,
self.latency_stats,
+ self.util_stats,
self.async_client.monitor)
-
+
############# private functions - used by the class itself ###########
@@ -1284,7 +1286,7 @@ class STLClient(object):
**total** and per port statistics contain dictionary with following format.
- Most of the bytes counters (unless specified otherwise) are in L2 layer including the FCS. e.g. minimum packet size in 64 bytes
+ Most of the bytes counters (unless specified otherwise) are in L2 layer, including the Ethernet FCS. e.g. minimum packet size is 64 bytes
=============================== ===============
key Meaning
@@ -1303,21 +1305,34 @@ class STLClient(object):
.. _flow_stats:
- **flow_stats** contains dictionaries per packet group id (pg id). Each one with the following structure.
+ **flow_stats** contains :ref:`global dictionary <flow_stats_global>`, and dictionaries per packet group id (pg id). See structures below.
+
+ **per pg_id flow stat** dictionaries have following structure:
================= ===============
key Meaning
================= ===============
- rx_bps Receivd bytes per second rate
- rx_bps_l1 Receivd bytes per second rate, including layer one
+ rx_bps Received bytes per second rate
+ rx_bps_l1 Received bytes per second rate, including layer one
rx_bytes Total number of received bytes
rx_pkts Total number of received packets
rx_pps Received packets per second
- tx_bps Transmitted bytes per second rate
- tx_bps_l1 Transmitted bytes per second rate, including layer one
+ tx_bps Transmit bytes per second rate
+ tx_bps_l1 Transmit bytes per second rate, including layer one
tx_bytes Total number of sent bytes
tx_pkts Total number of sent packets
- tx_pps Transmit packets per second
+ tx_pps Transmit packets per second rate
+ ================= ===============
+
+ .. _flow_stats_global:
+
+ **global flow stats** dictionary has the following structure:
+
+ ================= ===============
+ key Meaning
+ ================= ===============
+ rx_err Number of flow statistics packets received that we could not associate to any pg_id. This can happen if latency on the used setup is large. See :ref:`wait_on_traffic <wait_on_traffic>` rx_delay_ms parameter for details.
+ tx_err Number of flow statistics packets transmitted that we could not associate to any pg_id. This is never expected. If you see this different than 0, please report.
================= ===============
.. _global:
@@ -1340,7 +1355,9 @@ class STLClient(object):
.. _latency:
- **latency** contains dictionary per packet group id (pg id). Each one with the following structure.
+ **latency** contains :ref:`global dictionary <lat_stats_global>`, and dictionaries per packet group id (pg id). Each one with the following structure.
+
+ **per pg_id latency stat** dictionaries have following structure:
=========================== ===============
key Meaning
@@ -1394,7 +1411,16 @@ class STLClient(object):
total_min Minimum latency measured over the stream lifetime (in usec).
================= ===============
+ .. _lat_stats_global:
+
+ **global latency stats** dictionary has the following structure:
+ ================= ===============
+ key Meaning
+ ================= ===============
+ old_flow Number of latency statistics packets received that we could not associate to any pg_id. This can happen if latency on the used setup is large. See :ref:`wait_on_traffic <wait_on_traffic>` rx_delay_ms parameter for details.
+ bad_hdr Number of latency packets received with bad latency data. This can happen becuase of garbage packets in the network, or if the DUT causes packet corruption.
+ ================= ===============
:raises:
None
@@ -1664,6 +1690,23 @@ class STLClient(object):
if not rc:
raise STLError(rc)
+ @__api_check(True)
+ def get_util_stats(self):
+ """
+ Get utilization stats:
+ History of TRex CPU utilization per thread (list of lists)
+ MBUFs memory consumption per CPU socket.
+
+ :parameters:
+ None
+
+ :raises:
+ + :exc:`STLError`
+
+ """
+ self.logger.pre_cmd('Getting Utilization stats')
+ return self.util_stats.get_stats()
+
@__api_check(True)
def reset(self, ports = None):
@@ -1878,8 +1921,6 @@ class STLClient(object):
raise STLError(rc)
-
-
@__api_check(True)
def stop (self, ports = None, rx_delay_ms = 10):
"""
@@ -2274,6 +2315,8 @@ class STLClient(object):
@__api_check(True)
def wait_on_traffic (self, ports = None, timeout = 60, rx_delay_ms = 10):
"""
+ .. _wait_on_traffic:
+
Block until traffic on specified port(s) has ended
:parameters:
@@ -2284,12 +2327,11 @@ class STLClient(object):
timeout in seconds
rx_delay_ms : int
- time to wait until RX filters are removed
- this value should reflect the time it takes
- packets which were transmitted to arrive
+ Time to wait (in milliseconds) after last packet was sent, until RX filters used for
+ measuring flow statistics and latency are removed.
+ This value should reflect the time it takes packets which were transmitted to arrive
to the destination.
- after this time the RX filters will be removed
-
+ After this time, RX filters will be removed, and packets arriving for per flow statistics feature and latency flows will be counted as errors.
:raises:
+ :exc:`STLTimeoutError` - in case timeout has expired
@@ -2385,13 +2427,14 @@ class STLClient(object):
rc = f(*args)
except STLError as e:
client.logger.log("Log:\n" + format_text(e.brief() + "\n", 'bold'))
- return
+ return RC_ERR(e.brief())
# if got true - print time
if rc:
delta = time.time() - time1
client.logger.log(format_time(delta) + "\n")
+ return rc
return wrap
@@ -2544,14 +2587,14 @@ class STLClient(object):
opts = parser.parse_args(line.split(), default_ports = self.get_acquired_ports(), verify_acquired = True)
if opts is None:
- return
+ return RC_ERR("invalid arguments for 'start'")
active_ports = list_intersect(self.get_active_ports(), opts.ports)
if active_ports:
if not opts.force:
msg = "Port(s) {0} are active - please stop them or add '--force'\n".format(active_ports)
self.logger.log(format_text(msg, 'bold'))
- return
+ return RC_ERR(msg)
else:
self.stop(active_ports)
@@ -2569,8 +2612,10 @@ class STLClient(object):
else:
# must be exact
if len(opts.ports) != len(opts.tunables):
- self.logger.log('tunables section count must be 1 or exactly as the number of ports: got {0}'.format(len(opts.tunables)))
- return
+ msg = 'tunables section count must be 1 or exactly as the number of ports: got {0}'.format(len(opts.tunables))
+ self.logger.log(msg)
+ return RC_ERR(msg)
+
tunables = opts.tunables
@@ -2590,9 +2635,10 @@ class STLClient(object):
self.add_streams(profile.get_streams(), ports = port)
except STLError as e:
- self.logger.log(format_text("\nError while loading profile '{0}'\n".format(opts.file[0]), 'bold'))
+ msg = format_text("\nError while loading profile '{0}'\n".format(opts.file[0]), 'bold')
+ self.logger.log(msg)
self.logger.log(e.brief() + "\n")
- return
+ return RC_ERR(msg)
if opts.dry:
@@ -2604,8 +2650,7 @@ class STLClient(object):
opts.duration,
opts.total)
- # true means print time
- return True
+ return RC_OK()
@@ -2619,23 +2664,25 @@ class STLClient(object):
opts = parser.parse_args(line.split(), default_ports = self.get_active_ports(), verify_acquired = True)
if opts is None:
- return
+ return RC_ERR("invalid arguments for 'stop'")
# find the relevant ports
ports = list_intersect(opts.ports, self.get_active_ports())
if not ports:
if not opts.ports:
- self.logger.log('stop - no active ports')
+ msg = 'stop - no active ports'
else:
- self.logger.log('stop - no active traffic on ports {0}'.format(opts.ports))
- return
+ msg = 'stop - no active traffic on ports {0}'.format(opts.ports)
+
+ self.logger.log(msg)
+ return RC_ERR(msg)
# call API
self.stop(ports)
# true means print time
- return True
+ return RC_OK()
@__console
@@ -2651,22 +2698,24 @@ class STLClient(object):
opts = parser.parse_args(line.split(), default_ports = self.get_active_ports(), verify_acquired = True)
if opts is None:
- return
+ return RC_ERR("invalid arguments for 'update'")
# find the relevant ports
ports = list_intersect(opts.ports, self.get_active_ports())
if not ports:
if not opts.ports:
- self.logger.log('update - no active ports')
+ msg = 'update - no active ports'
else:
- self.logger.log('update - no active traffic on ports {0}'.format(opts.ports))
- return
+ msg = 'update - no active traffic on ports {0}'.format(opts.ports)
+
+ self.logger.log(msg)
+ return RC_ERR(msg)
self.update(ports, opts.mult, opts.total, opts.force)
# true means print time
- return True
+ return RC_OK()
@__console
@@ -2679,26 +2728,29 @@ class STLClient(object):
opts = parser.parse_args(line.split(), default_ports = self.get_transmitting_ports(), verify_acquired = True)
if opts is None:
- return
+ return RC_ERR("invalid arguments for 'pause'")
# check for already paused case
if opts.ports and is_sub_list(opts.ports, self.get_paused_ports()):
- self.logger.log('pause - all of port(s) {0} are already paused'.format(opts.ports))
- return
+ msg = 'pause - all of port(s) {0} are already paused'.format(opts.ports)
+ self.logger.log(msg)
+ return RC_ERR(msg)
# find the relevant ports
ports = list_intersect(opts.ports, self.get_transmitting_ports())
if not ports:
if not opts.ports:
- self.logger.log('pause - no transmitting ports')
+ msg = 'pause - no transmitting ports'
else:
- self.logger.log('pause - none of ports {0} are transmitting'.format(opts.ports))
- return
+ msg = 'pause - none of ports {0} are transmitting'.format(opts.ports)
+
+ self.logger.log(msg)
+ return RC_ERR(msg)
self.pause(ports)
# true means print time
- return True
+ return RC_OK()
@__console
@@ -2711,22 +2763,24 @@ class STLClient(object):
opts = parser.parse_args(line.split(), default_ports = self.get_paused_ports(), verify_acquired = True)
if opts is None:
- return
+ return RC_ERR("invalid arguments for 'resume'")
# find the relevant ports
ports = list_intersect(opts.ports, self.get_paused_ports())
if not ports:
if not opts.ports:
- self.logger.log('resume - no paused ports')
+ msg = 'resume - no paused ports'
else:
- self.logger.log('resume - none of ports {0} are paused'.format(opts.ports))
- return
+ msg = 'resume - none of ports {0} are paused'.format(opts.ports)
+
+ self.logger.log(msg)
+ return RC_ERR(msg)
self.resume(ports)
# true means print time
- return True
+ return RC_OK()
@__console
@@ -2931,7 +2985,7 @@ class STLClient(object):
if profile_type == 'python':
self.logger.log('Type: {:^12}'.format('Python Module'))
- self.logger.log('Tunables: {:^12}'.format(['{0} = {1}'.format(k ,v) for k, v in info['tunables'].items()]))
+ self.logger.log('Tunables: {:^12}'.format(str(['{0} = {1}'.format(k ,v) for k, v in info['tunables'].items()])))
elif profile_type == 'yaml':
self.logger.log('Type: {:^12}'.format('YAML'))
diff --git a/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_stats.py b/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_stats.py
index 3effa1f0..af4d6f69 100644
--- a/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_stats.py
+++ b/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_stats.py
@@ -11,7 +11,6 @@ import datetime
import time
import re
import math
-import copy
import threading
import pprint
@@ -22,13 +21,16 @@ PORT_STATUS = 'ps'
STREAMS_STATS = 's'
LATENCY_STATS = 'ls'
LATENCY_HISTOGRAM = 'lh'
+CPU_STATS = 'c'
+MBUF_STATS = 'm'
-ALL_STATS_OPTS = [GLOBAL_STATS, PORT_STATS, PORT_STATUS, STREAMS_STATS, LATENCY_STATS, PORT_GRAPH, LATENCY_HISTOGRAM]
+ALL_STATS_OPTS = [GLOBAL_STATS, PORT_STATS, PORT_STATUS, STREAMS_STATS, LATENCY_STATS, PORT_GRAPH, LATENCY_HISTOGRAM, CPU_STATS, MBUF_STATS]
COMPACT = [GLOBAL_STATS, PORT_STATS]
GRAPH_PORT_COMPACT = [GLOBAL_STATS, PORT_GRAPH]
SS_COMPAT = [GLOBAL_STATS, STREAMS_STATS] # stream stats
LS_COMPAT = [GLOBAL_STATS, LATENCY_STATS] # latency stats
LH_COMPAT = [GLOBAL_STATS, LATENCY_HISTOGRAM] # latency histogram
+UT_COMPAT = [GLOBAL_STATS, CPU_STATS, MBUF_STATS] # utilization
ExportableStats = namedtuple('ExportableStats', ['raw_data', 'text_table'])
@@ -104,6 +106,13 @@ def calculate_diff_raw (samples):
return total
+get_number_of_bytes_cache = {}
+# get number of bytes: '64b'->64, '9kb'->9000 etc.
+def get_number_of_bytes(val):
+ if val not in get_number_of_bytes_cache:
+ get_number_of_bytes_cache[val] = int(val[:-1].replace('k', '000'))
+ return get_number_of_bytes_cache[val]
+
# a simple object to keep a watch over a field
class WatchedField(object):
@@ -138,11 +147,12 @@ class CTRexInfoGenerator(object):
STLClient and the ports.
"""
- def __init__(self, global_stats_ref, ports_dict_ref, rx_stats_ref, latency_stats_ref, async_monitor):
+ def __init__(self, global_stats_ref, ports_dict_ref, rx_stats_ref, latency_stats_ref, util_stats_ref, async_monitor):
self._global_stats = global_stats_ref
self._ports_dict = ports_dict_ref
self._rx_stats_ref = rx_stats_ref
self._latency_stats_ref = latency_stats_ref
+ self._util_stats_ref = util_stats_ref
self._async_monitor = async_monitor
def generate_single_statistic(self, port_id_list, statistic_type):
@@ -167,6 +177,12 @@ class CTRexInfoGenerator(object):
elif statistic_type == LATENCY_HISTOGRAM:
return self._generate_latency_histogram()
+ elif statistic_type == CPU_STATS:
+ return self._generate_cpu_util_stats()
+
+ elif statistic_type == MBUF_STATS:
+ return self._generate_mbuf_util_stats()
+
else:
# ignore by returning empty object
return {}
@@ -404,6 +420,73 @@ class CTRexInfoGenerator(object):
stats_table.header(header)
return {"latency_histogram": ExportableStats(None, stats_table)}
+ def _generate_cpu_util_stats(self):
+ util_stats = self._util_stats_ref.get_stats(use_1sec_cache = True)
+ stats_table = text_tables.TRexTextTable()
+ if util_stats:
+ if 'cpu' not in util_stats:
+ raise Exception("Excepting 'cpu' section in stats %s" % util_stats)
+ cpu_stats = util_stats['cpu']
+ hist_len = len(cpu_stats[0])
+ avg_len = min(5, hist_len)
+ show_len = min(15, hist_len)
+ stats_table.header(['Thread', 'Avg', 'Latest'] + list(range(-1, 0 - show_len, -1)))
+ stats_table.set_cols_align(['l'] + ['r'] * (show_len + 1))
+ stats_table.set_cols_width([8, 3, 6] + [3] * (show_len - 1))
+ stats_table.set_cols_dtype(['t'] * (show_len + 2))
+ for i in range(min(14, len(cpu_stats))):
+ avg = int(round(sum(cpu_stats[i][:avg_len]) / avg_len))
+ stats_table.add_row([i, avg] + cpu_stats[i][:show_len])
+ else:
+ stats_table.add_row(['No Data.'])
+ return {'cpu_util(%)': ExportableStats(None, stats_table)}
+
+ def _generate_mbuf_util_stats(self):
+ util_stats = self._util_stats_ref.get_stats(use_1sec_cache = True)
+ stats_table = text_tables.TRexTextTable()
+ if util_stats:
+ if 'mbuf_stats' not in util_stats:
+ raise Exception("Excepting 'mbuf_stats' section in stats %s" % util_stats)
+ mbuf_stats = util_stats['mbuf_stats']
+ for mbufs_per_socket in mbuf_stats.values():
+ first_socket_mbufs = mbufs_per_socket
+ break
+ if not self._util_stats_ref.mbuf_types_list:
+ mbuf_keys = list(first_socket_mbufs.keys())
+ mbuf_keys.sort(key = get_number_of_bytes)
+ self._util_stats_ref.mbuf_types_list = mbuf_keys
+ types_len = len(self._util_stats_ref.mbuf_types_list)
+ stats_table.set_cols_align(['l'] + ['r'] * (types_len + 1))
+ stats_table.set_cols_width([10] + [7] * (types_len + 1))
+ stats_table.set_cols_dtype(['t'] * (types_len + 2))
+ stats_table.header([''] + self._util_stats_ref.mbuf_types_list + ['RAM(MB)'])
+ total_list = []
+ sum_totals = 0
+ for mbuf_type in self._util_stats_ref.mbuf_types_list:
+ sum_totals += first_socket_mbufs[mbuf_type][1] * get_number_of_bytes(mbuf_type) + 64
+ total_list.append(first_socket_mbufs[mbuf_type][1])
+ sum_totals *= len(list(mbuf_stats.values()))
+ total_list.append(int(sum_totals/1e6))
+ stats_table.add_row(['Total:'] + total_list)
+ stats_table.add_row(['Used:'] + [''] * (types_len + 1))
+ for socket_name in sorted(list(mbuf_stats.keys())):
+ mbufs = mbuf_stats[socket_name]
+ socket_show_name = socket_name.replace('cpu-', '').replace('-', ' ').capitalize() + ':'
+ sum_used = 0
+ used_list = []
+ percentage_list = []
+ for mbuf_type in self._util_stats_ref.mbuf_types_list:
+ used = mbufs[mbuf_type][1] - mbufs[mbuf_type][0]
+ sum_used += used * get_number_of_bytes(mbuf_type) + 64
+ used_list.append(used)
+ percentage_list.append('%s%%' % int(100 * used / mbufs[mbuf_type][1]))
+ used_list.append(int(sum_used/1e6))
+ stats_table.add_row([socket_show_name] + used_list)
+ stats_table.add_row(['Percent:'] + percentage_list + [''])
+ else:
+ stats_table.add_row(['No Data.'])
+ return {'mbuf_util': ExportableStats(None, stats_table)}
+
@staticmethod
def _get_rational_block_char(value, range_start, interval):
# in Konsole, utf-8 is sometimes printed with artifacts, return ascii for now
@@ -1010,6 +1093,13 @@ class CLatencyStats(CTRexStats):
snapshot = {}
output = {}
+ output['global'] = {}
+ for field in ['bad_hdr', 'old_flow']:
+ if 'global' in snapshot and field in snapshot['global']:
+ output['global'][field] = snapshot['global'][field]
+ else:
+ output['global'][field] = 0
+
# we care only about the current active keys
pg_ids = list(filter(is_intable, snapshot.keys()))
@@ -1107,6 +1197,14 @@ class CRxStats(CTRexStats):
# copy timestamp field
output['ts'] = current['ts']
+ # global (not per pg_id) error counters
+ output['global'] = {}
+ for field in ['rx_err', 'tx_err']:
+ output['global'][field] = {}
+ if 'global' in current and field in current['global']:
+ for port in current['global'][field]:
+ output['global'][field][int(port)] = current['global'][field][port]
+
# we care only about the current active keys
pg_ids = list(filter(is_intable, current.keys()))
@@ -1254,6 +1352,9 @@ class CRxStats(CTRexStats):
for pg_id, value in self.latest_stats.items():
# skip non ints
if not is_intable(pg_id):
+ # 'global' stats are in the same level of the pg_ids. We do want them to go to the user
+ if pg_id == 'global':
+ stats[pg_id] = value
continue
# bare counters
stats[int(pg_id)] = {}
@@ -1276,7 +1377,27 @@ class CRxStats(CTRexStats):
return stats
-
+class CUtilStats(CTRexStats):
+
+ def __init__(self, client):
+ super(CUtilStats, self).__init__()
+ self.client = client
+ self.history = deque(maxlen = 1)
+ self.mbuf_types_list = None
+ self.last_update_ts = 0
+
+ def get_stats(self, use_1sec_cache = False):
+ time_now = time.time()
+ if self.last_update_ts + 1 < time_now or not self.history or not use_1sec_cache:
+ if self.client.is_connected():
+ rc = self.client._transmit('get_utilization')
+ if not rc:
+ raise Exception(rc)
+ self.last_update_ts = time_now
+ self.history.append(rc.data())
+ else:
+ self.history.append({})
+ return self.history[-1]
if __name__ == "__main__":
pass
diff --git a/scripts/automation/trex_control_plane/stl/trex_stl_lib/utils/parsing_opts.py b/scripts/automation/trex_control_plane/stl/trex_stl_lib/utils/parsing_opts.py
index bb28d3af..1b417d65 100755
--- a/scripts/automation/trex_control_plane/stl/trex_stl_lib/utils/parsing_opts.py
+++ b/scripts/automation/trex_control_plane/stl/trex_stl_lib/utils/parsing_opts.py
@@ -33,12 +33,15 @@ NO_PROMISCUOUS = 20
PROMISCUOUS_SWITCH = 21
TUNABLES = 22
REMOTE_FILE = 23
+LOCKED = 24
GLOBAL_STATS = 50
PORT_STATS = 51
PORT_STATUS = 52
STREAMS_STATS = 53
STATS_MASK = 54
+CPU_STATS = 55
+MBUF_STATS = 56
STREAMS_MASK = 60
# ALL_STREAMS = 61
@@ -319,6 +322,11 @@ OPTIONS_DB = {MULTIPLIER: ArgumentPack(['-m', '--multiplier'],
'default': False,
'help': "Starts TUI in xterm window"}),
+ LOCKED: ArgumentPack(['-l', '--locked'],
+ {'action': 'store_true',
+ 'dest': 'locked',
+ 'default': False,
+ 'help': "Locks TUI on legend mode"}),
FULL_OUTPUT: ArgumentPack(['--full'],
{'action': 'store_true',
@@ -340,6 +348,14 @@ OPTIONS_DB = {MULTIPLIER: ArgumentPack(['-m', '--multiplier'],
{'action': 'store_true',
'help': "Fetch only streams stats"}),
+ CPU_STATS: ArgumentPack(['-c'],
+ {'action': 'store_true',
+ 'help': "Fetch only CPU utilization stats"}),
+
+ MBUF_STATS: ArgumentPack(['-m'],
+ {'action': 'store_true',
+ 'help': "Fetch only MBUF utilization stats"}),
+
STREAMS_MASK: ArgumentPack(['--streams'],
{"nargs": '+',
'dest':'streams',
@@ -365,7 +381,9 @@ OPTIONS_DB = {MULTIPLIER: ArgumentPack(['-m', '--multiplier'],
STATS_MASK: ArgumentGroup(MUTEX, [GLOBAL_STATS,
PORT_STATS,
PORT_STATUS,
- STREAMS_STATS],
+ STREAMS_STATS,
+ CPU_STATS,
+ MBUF_STATS],
{})
}