summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--scripts/automation/regression/setups/trex07/benchmark.yaml370
-rwxr-xr-xscripts/automation/regression/stateful_tests/trex_general_test.py71
-rwxr-xr-xscripts/automation/regression/stateful_tests/trex_rx_test.py2
-rw-r--r--scripts/automation/regression/trex.py1
-rwxr-xr-xscripts/automation/regression/trex_unit_test.py43
-rwxr-xr-xscripts/automation/trex_control_plane/stf/trex_stf_lib/trex_client.py98
-rwxr-xr-xscripts/master_daemon.py2
7 files changed, 362 insertions, 225 deletions
diff --git a/scripts/automation/regression/setups/trex07/benchmark.yaml b/scripts/automation/regression/setups/trex07/benchmark.yaml
index 4778de91..38e25162 100644
--- a/scripts/automation/regression/setups/trex07/benchmark.yaml
+++ b/scripts/automation/regression/setups/trex07/benchmark.yaml
@@ -2,169 +2,243 @@
#### TRex benchmark configuration file ####
###############################################################
-test_nbar_simple :
- multiplier : 7.5
- cores : 2
- exp_gbps : 3.5
- cpu_to_core_ratio : 20800000
- cpu2core_custom_dev: YES
- cpu2core_dev : 0.07
- exp_max_latency : 1000
-
- nbar_classification:
- rtp : 32.57
- http : 30.25
- oracle-sqlnet : 11.23
- exchange : 10.80
- citrix : 5.62
- rtsp : 2.84
- dns : 1.95
- smtp : 0.57
- pop3 : 0.36
- ssl : 0.17
- sctp : 0.13
- sip : 0.09
- unknown : 3.41
-
-test_rx_check :
- multiplier : 13
- cores : 3
- rx_sample_rate : 128
- exp_gbps : 6
- cpu_to_core_ratio : 37270000
- exp_bw : 13
- exp_latency : 1
-
-test_nat_simple : &test_nat_simple
- stat_route_dict :
+#### common templates ###
+
+stat_route_dict: &stat_route_dict
clients_start : 16.0.0.1
servers_start : 48.0.0.1
dual_port_mask : 1.0.0.0
client_destination_mask : 255.0.0.0
server_destination_mask : 255.0.0.0
- nat_dict :
+
+nat_dict: &nat_dict
clients_net_start : 16.0.0.0
client_acl_wildcard_mask : 0.0.0.255
dual_port_mask : 1.0.0.0
pool_start : 200.0.0.0
pool_netmask : 255.255.255.0
- multiplier : 12000
- cores : 1
- cpu_to_core_ratio : 37270000
- exp_bw : 1
- exp_latency : 1
- allow_timeout_dev : YES
-
-test_nat_simple_mode1 : *test_nat_simple
-test_nat_simple_mode2 : *test_nat_simple
-
-test_nat_learning :
- stat_route_dict :
- clients_start : 16.0.0.1
- servers_start : 48.0.0.1
- dual_port_mask : 1.0.0.0
- client_destination_mask : 255.0.0.0
- server_destination_mask : 255.0.0.0
- multiplier : 12000
- cores : 1
- nat_opened : 40000
- cpu_to_core_ratio : 270
- exp_bw : 8
- exp_latency : 1
- allow_timeout_dev : YES
-
-test_routing_imix_64 :
- multiplier : 430
- cores : 1
- cpu_to_core_ratio : 280
- exp_latency : 1
-
-test_routing_imix :
- multiplier : 10
- cores : 1
- cpu_to_core_ratio : 1800
- exp_latency : 1
-
-test_static_routing_imix :
- stat_route_dict :
- clients_start : 16.0.0.1
- servers_start : 48.0.0.1
- dual_port_mask : 1.0.0.0
- client_destination_mask : 255.0.0.0
- server_destination_mask : 255.0.0.0
- multiplier : 8
- cores : 1
- cpu_to_core_ratio : 1800
- exp_latency : 1
-test_static_routing_imix_asymmetric:
- stat_route_dict :
- clients_start : 16.0.0.1
- servers_start : 48.0.0.1
- dual_port_mask : 1.0.0.0
- client_destination_mask : 255.0.0.0
- server_destination_mask : 255.0.0.0
- multiplier : 8
- cores : 1
- cpu_to_core_ratio : 1800
- exp_latency : 1
-
-test_ipv6_simple :
- multiplier : 9
- cores : 2
- cpu_to_core_ratio : 30070000
- cpu2core_custom_dev: YES
- cpu2core_dev : 0.07
-
-
-test_rx_check_sfr:
- multiplier : 10
- cores : 2
- rx_sample_rate : 16
- # allow 0.03% errors, bad router
- error_tolerance : 0.03
-
-test_rx_check_http:
- multiplier : 15000
- cores : 1
- rx_sample_rate : 16
- # allow 0.03% errors, bad routerifconfig
- error_tolerance : 0.03
-test_rx_check_sfr_ipv6:
- multiplier : 10
- cores : 2
- rx_sample_rate : 16
- # allow 0.03% errors, bad router
- error_tolerance : 0.03
+### stateful ###
+
+test_jumbo:
+ multiplier : 17
+ cores : 1
+ bw_per_core : 543.232
+
+
+test_routing_imix:
+ multiplier : 10
+ cores : 1
+ bw_per_core : 34.128
+
+
+test_routing_imix_64:
+ multiplier : 430
+ cores : 1
+ bw_per_core : 5.893
+
+
+test_static_routing_imix: &test_static_routing_imix
+ stat_route_dict : *stat_route_dict
+ multiplier : 8
+ cores : 1
+ bw_per_core : 34.339
+
+test_static_routing_imix_asymmetric: *test_static_routing_imix
+
+
+test_ipv6_simple:
+ multiplier : 9
+ cores : 2
+ bw_per_core : 19.064
+
+
+test_nat_simple_mode1: &test_nat_simple
+ stat_route_dict : *stat_route_dict
+ nat_dict : *nat_dict
+ multiplier : 12000
+ cores : 1
+ nat_opened : 40000
+ allow_timeout_dev : True
+ bw_per_core : 44.445
+
+test_nat_simple_mode2: *test_nat_simple
+
+test_nat_learning:
+ << : *test_nat_simple
+ nat_opened : 40000
+
+
+test_nbar_simple:
+ multiplier : 7.5
+ cores : 2
+ bw_per_core : 17.174
+ nbar_classification:
+ rtp : 32.57
+ http : 30.25
+ oracle_sqlnet : 11.23
+ exchange : 10.80
+ citrix : 5.62
+ rtsp : 2.84
+ dns : 1.95
+ smtp : 0.57
+ pop3 : 0.36
+ ssl : 0.17
+ sctp : 0.13
+ sip : 0.09
+ unknown : 3.41
+
+
+test_rx_check_http: &rx_http
+ multiplier : 15000
+ cores : 1
+ rx_sample_rate : 16
+ bw_per_core : 39.560
test_rx_check_http_ipv6:
- multiplier : 15000
- cores : 1
- rx_sample_rate : 16
- # allow 0.03% errors, bad router
- error_tolerance : 0.03
+ << : *rx_http
+ bw_per_core : 49.237
test_rx_check_http_negative:
- multiplier : 13000
- cores : 1
- rx_sample_rate : 16
- # allow 0.03% errors, bad router
- error_tolerance : 0.03
- stat_route_dict :
- clients_start : 16.0.0.1
- servers_start : 48.0.0.1
- dual_port_mask : 1.0.0.0
- client_destination_mask : 255.0.0.0
- server_destination_mask : 255.0.0.0
- nat_dict :
- clients_net_start : 16.0.0.0
- client_acl_wildcard_mask : 0.0.0.255
- dual_port_mask : 1.0.0.0
- pool_start : 200.0.0.0
- pool_netmask : 255.255.255.0
+ << : *rx_http
+ stat_route_dict : *stat_route_dict
+ nat_dict : *nat_dict
+
+
+test_rx_check_sfr: &rx_sfr
+ multiplier : 10
+ cores : 3
+ rx_sample_rate : 16
+ bw_per_core : 16.082
+
+test_rx_check_sfr_ipv6:
+ << : *rx_sfr
+ bw_per_core : 19.198
+
+
+
+### stateless ###
+
+test_CPU_benchmark:
+ profiles:
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 64}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 64, stream_count: 10}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 64, stream_count: 100}
+ cpu_util : 1
+ bw_per_core : 1
+
+# causes queue full
+# - name : stl/udp_for_benchmarks.py
+# kwargs : {packet_len: 64, stream_count: 1000}
+# cpu_util : 1
+# bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 128}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 256}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 512}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 1500}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 4000}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 9000}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 9000, stream_count: 10}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 9000, stream_count: 100}
+ cpu_util : 1
+ bw_per_core : 1
+
+# not enough memory + queue full if memory increase
+# - name : stl/udp_for_benchmarks.py
+# kwargs : {packet_len: 9000, stream_count: 1000}
+# cpu_util : 1
+# bw_per_core : 1
+
+ - name : stl/imix.py
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_1pkt_tuple_gen.py
+ kwargs : {packet_len: 64}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_1pkt_tuple_gen.py
+ kwargs : {packet_len: 128}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_1pkt_tuple_gen.py
+ kwargs : {packet_len: 256}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_1pkt_tuple_gen.py
+ kwargs : {packet_len: 512}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_1pkt_tuple_gen.py
+ kwargs : {packet_len: 1500}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_1pkt_tuple_gen.py
+ kwargs : {packet_len: 4000}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_1pkt_tuple_gen.py
+ kwargs : {packet_len: 9000}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/pcap.py
+ kwargs : {ipg_usec: 2, loop_count: 0}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_rand_len_9k.py
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/hlt/hlt_udp_rand_len_9k.py
+ cpu_util : 1
+ bw_per_core : 1
-test_jumbo:
- multiplier : 17
- cores : 1
diff --git a/scripts/automation/regression/stateful_tests/trex_general_test.py b/scripts/automation/regression/stateful_tests/trex_general_test.py
index 82b1d9d1..8ff4fdaf 100755
--- a/scripts/automation/regression/stateful_tests/trex_general_test.py
+++ b/scripts/automation/regression/stateful_tests/trex_general_test.py
@@ -139,41 +139,42 @@ class CTRexGeneral_Test(unittest.TestCase):
if res[name] != float(val):
self.fail('TRex results[%s]==%f and not as expected %f ' % (name, res[name], val))
- def check_CPU_benchmark (self, trex_res, err = 25, minimal_cpu = 30, maximal_cpu = 85):
- #cpu_util = float(trex_res.get_last_value("trex-global.data.m_cpu_util"))
- cpu_util = sum(trex_res.get_value_list("trex-global.data.m_cpu_util")[-4:-1]) / 3.0 # mean of 3 values before last
-
- if '1G' in self.modes:
- minimal_cpu /= 10.0
-
- if not self.is_virt_nics:
- if cpu_util > maximal_cpu:
- self.fail("CPU is too high (%s%%), probably queue full." % cpu_util )
- #if cpu_util < minimal_cpu:
- # self.fail("CPU is too low (%s%%), can't verify performance in such low CPU%%." % cpu_util )
-
- test_norm_cpu = sum(trex_res.get_value_list("trex-global.data.m_bw_per_core")[-4:-1]) / 3.0
-
- print("TRex CPU utilization: %g%%, norm_cpu is : %g Gb/core" % (round(cpu_util, 2), round(test_norm_cpu)))
-
- expected_norm_cpu = self.get_benchmark_param('bw_per_core')
- if not expected_norm_cpu:
- expected_norm_cpu = 1
-
- calc_error_precent = abs(100.0 * test_norm_cpu / expected_norm_cpu - 100)
- print('Err percent: %s' % calc_error_precent)
- #if calc_error_precent > err and cpu_util > 10:
- # self.fail('Excepted bw_per_core ratio: %s, got: %g' % (expected_norm_cpu, round(test_norm_cpu)))
-
- # report benchmarks
- if self.GAManager:
- try:
- setup_test = '%s.%s' % (CTRexScenario.setup_name, self.get_name())
- self.GAManager.gaAddAction(Event = 'stateful_test', action = setup_test, label = 'bw_per_core', value = int(test_norm_cpu))
- self.GAManager.gaAddAction(Event = 'stateful_test', action = setup_test, label = 'bw_per_core_exp', value = int(expected_norm_cpu))
- self.GAManager.emptyAndReportQ()
- except Exception as e:
- print('Sending GA failed: %s' % e)
+ def check_CPU_benchmark (self, trex_res, err = 25, minimal_cpu = 10, maximal_cpu = 85):
+ cpu_util = trex_res.get_avg_steady_state_value('trex-global.data.m_cpu_util_raw')
+ trex_tx_bps = trex_res.get_avg_steady_state_value('trex-global.data.m_tx_bps')
+ expected_norm_cpu = self.get_benchmark_param('bw_per_core')
+ cores = self.get_benchmark_param('cores')
+ ports_count = trex_res.get_ports_count()
+ test_norm_cpu = trex_tx_bps / (cpu_util * ports_count * cores * 2.5e6)
+
+ if '1G' in self.modes:
+ minimal_cpu /= 10.0
+
+ if not self.is_virt_nics:
+ if cpu_util > maximal_cpu:
+ self.fail("CPU is too high (%s%%), probably queue full." % cpu_util )
+ #if cpu_util < minimal_cpu:
+ # self.fail("CPU is too low (%s%%), can't verify performance in such low CPU%%." % cpu_util )
+
+ print("TRex CPU utilization: %g%%, norm_cpu is : %g Gb/core" % (round(cpu_util, 2), round(test_norm_cpu, 2)))
+
+ if not expected_norm_cpu:
+ expected_norm_cpu = 1
+
+ calc_error_precent = abs(100.0 * test_norm_cpu / expected_norm_cpu - 100)
+ print('Err percent: %s' % calc_error_precent)
+ #if calc_error_precent > err and cpu_util > 10:
+ # self.fail('Excepted bw_per_core ratio: %s, got: %g' % (expected_norm_cpu, round(test_norm_cpu)))
+
+ # report benchmarks
+ if self.GAManager:
+ try:
+ setup_test = '%s.%s' % (CTRexScenario.setup_name, self.get_name())
+ self.GAManager.gaAddAction(Event = 'stateful_test', action = setup_test, label = 'bw_per_core', value = int(test_norm_cpu))
+ self.GAManager.gaAddAction(Event = 'stateful_test', action = setup_test, label = 'bw_per_core_exp', value = int(expected_norm_cpu))
+ self.GAManager.emptyAndReportQ()
+ except Exception as e:
+ print('Sending GA failed: %s' % e)
def check_results_gt (self, res, name, val):
if res is None:
diff --git a/scripts/automation/regression/stateful_tests/trex_rx_test.py b/scripts/automation/regression/stateful_tests/trex_rx_test.py
index 40528d16..c08ad1ea 100755
--- a/scripts/automation/regression/stateful_tests/trex_rx_test.py
+++ b/scripts/automation/regression/stateful_tests/trex_rx_test.py
@@ -250,7 +250,7 @@ class CTRexRx_Test(CTRexGeneral_Test):
print('Run until finish, expect errors')
old_errors = copy.deepcopy(self.fail_reasons)
- nat_dict = self.get_benchmark_param('nat_dict', test_name = 'test_nat_simple')
+ nat_dict = self.get_benchmark_param('nat_dict', test_name = 'test_nat_simple_mode1')
nat_obj = CNatConfig(nat_dict)
self.router.config_nat(nat_obj)
self.router.config_zbf()
diff --git a/scripts/automation/regression/trex.py b/scripts/automation/regression/trex.py
index 44f2faba..7286b166 100644
--- a/scripts/automation/regression/trex.py
+++ b/scripts/automation/regression/trex.py
@@ -40,6 +40,7 @@ class CTRexScenario:
no_daemon = False
router_image = None
debug_image = False
+ test = None
class CTRexRunner:
"""This is an instance for generating a CTRexRunner"""
diff --git a/scripts/automation/regression/trex_unit_test.py b/scripts/automation/regression/trex_unit_test.py
index 915cd682..4ff21b80 100755
--- a/scripts/automation/regression/trex_unit_test.py
+++ b/scripts/automation/regression/trex_unit_test.py
@@ -30,7 +30,7 @@ import outer_packages
import nose
from nose.plugins import Plugin
-import logging
+from nose.selector import Selector
import CustomLogger
import misc_methods
from rednose import RedNose
@@ -43,11 +43,30 @@ from trex_stl_lib.utils.GAObjClass import GAmanager
import trex
import socket
from pprint import pprint
-import subprocess
-import re
import time
from distutils.dir_util import mkpath
+# nose overrides
+
+# option to select wanted test by name without file, class etc.
+def new_Selector_wantMethod(self, method, orig_Selector_wantMethod = Selector.wantMethod):
+ result = orig_Selector_wantMethod(self, method)
+ if not CTRexScenario.test:
+ return result
+ else:
+ return CTRexScenario.test in getattr(method, '__name__', '')
+
+Selector.wantMethod = new_Selector_wantMethod
+
+def new_Selector_wantFunction(self, function, orig_Selector_wantFunction = Selector.wantFunction):
+ result = orig_Selector_wantFunction(self, function)
+ if not CTRexScenario.test:
+ return result
+ else:
+ return CTRexScenario.test in getattr(function, '__name__', '')
+
+Selector.wantFunction = new_Selector_wantFunction
+
# override nose's strange representation of setUpClass errors
def __suite_repr__(self):
if hasattr(self.context, '__module__'): # inside class, setUpClass etc.
@@ -59,6 +78,8 @@ def __suite_repr__(self):
nose.suite.ContextSuite.__repr__ = __suite_repr__
nose.suite.ContextSuite.__str__ = __suite_repr__
+# /nose overrides
+
def check_trex_path(trex_path):
if os.path.isfile('%s/trex_daemon_server' % trex_path):
return os.path.abspath(trex_path)
@@ -78,7 +99,7 @@ def get_trex_path():
def address_to_ip(address):
- for i in range(10):
+ for i in range(5):
try:
return socket.gethostbyname(address)
except:
@@ -149,6 +170,8 @@ class CTRexTestConfiguringPlugin(Plugin):
parser.add_option('--trex-args', action='store', default = '',
dest="trex_args",
help="Additional TRex arguments (--no-watchdog etc.).")
+ parser.add_option('-t', '--test', action='store', default = '', dest='test',
+ help='Test name to run (without file, class etc.)')
def configure(self, options, conf):
@@ -160,6 +183,7 @@ class CTRexTestConfiguringPlugin(Plugin):
self.json_verbose = options.json_verbose
self.telnet_verbose = options.telnet_verbose
self.no_daemon = options.no_daemon
+ CTRexScenario.test = options.test
if self.collect_only or self.functional:
return
if CTRexScenario.setup_dir and options.config_path:
@@ -181,7 +205,7 @@ class CTRexTestConfiguringPlugin(Plugin):
self.loggerPath = options.log_path
# initialize CTRexScenario global testing class, to be used by all tests
CTRexScenario.configuration = self.configuration
- CTRexScenario.no_daemon = self.no_daemon
+ CTRexScenario.no_daemon = options.no_daemon
CTRexScenario.benchmark = self.benchmark
CTRexScenario.modes = set(self.modes)
CTRexScenario.server_logs = self.server_logs
@@ -347,15 +371,14 @@ if __name__ == "__main__":
nose_argv += sys_args
- config_plugin = CTRexTestConfiguringPlugin()
- red_nose = RedNose()
+ addplugins = [RedNose(), CTRexTestConfiguringPlugin()]
result = True
try:
if len(CTRexScenario.test_types['functional_tests']):
additional_args = ['--func'] + CTRexScenario.test_types['functional_tests']
if xml_arg:
additional_args += ['--with-xunit', xml_arg.replace('.xml', '_functional.xml')]
- result = nose.run(argv = nose_argv + additional_args, addplugins = [red_nose, config_plugin])
+ result = nose.run(argv = nose_argv + additional_args, addplugins = addplugins)
if len(CTRexScenario.test_types['stateful_tests']):
additional_args = ['--stf']
if '--warmup' in sys.argv:
@@ -365,14 +388,14 @@ if __name__ == "__main__":
additional_args.extend(['-a', '!client_package'])
if xml_arg:
additional_args += ['--with-xunit', xml_arg.replace('.xml', '_stateful.xml')]
- result = nose.run(argv = nose_argv + additional_args, addplugins = [red_nose, config_plugin]) and result
+ result = nose.run(argv = nose_argv + additional_args, addplugins = addplugins) and result
if len(CTRexScenario.test_types['stateless_tests']):
additional_args = ['--stl', 'stateless_tests/stl_general_test.py:STLBasic_Test.test_connectivity'] + CTRexScenario.test_types['stateless_tests']
if not test_client_package:
additional_args.extend(['-a', '!client_package'])
if xml_arg:
additional_args += ['--with-xunit', xml_arg.replace('.xml', '_stateless.xml')]
- result = nose.run(argv = nose_argv + additional_args, addplugins = [red_nose, config_plugin]) and result
+ result = nose.run(argv = nose_argv + additional_args, addplugins = addplugins) and result
#except Exception as e:
# result = False
# print(e)
diff --git a/scripts/automation/trex_control_plane/stf/trex_stf_lib/trex_client.py b/scripts/automation/trex_control_plane/stf/trex_stf_lib/trex_client.py
index a13fe31f..c8827afe 100755
--- a/scripts/automation/trex_control_plane/stf/trex_stf_lib/trex_client.py
+++ b/scripts/automation/trex_control_plane/stf/trex_stf_lib/trex_client.py
@@ -535,7 +535,7 @@ class CTRexClient(object):
finally:
self.prompt_verbose_data()
- def sample_until_condition (self, condition_func, time_between_samples = 5):
+ def sample_until_condition (self, condition_func, time_between_samples = 1):
"""
Automatically sets ongoing sampling of TRex data, with sampling rate described by time_between_samples.
@@ -549,7 +549,7 @@ class CTRexClient(object):
time_between_samples : int
determines the time between each sample of the server
- default value : **5**
+ default value : **1**
:return:
the first result object (see :class:`CTRexResult` for further details) of the TRex run on which the condition has been met.
@@ -579,7 +579,7 @@ class CTRexClient(object):
# this could come from provided method 'condition_func'
raise
- def sample_to_run_finish (self, time_between_samples = 5):
+ def sample_to_run_finish (self, time_between_samples = 1):
"""
Automatically sets automatically sampling of TRex data with sampling rate described by time_between_samples until TRex run finished.
@@ -587,7 +587,7 @@ class CTRexClient(object):
time_between_samples : int
determines the time between each sample of the server
- default value : **5**
+ default value : **1**
:return:
the latest result object (see :class:`CTRexResult` for further details) with sampled data.
@@ -609,7 +609,7 @@ class CTRexClient(object):
results = self.get_result_obj()
return results
- def sample_x_seconds (self, sample_time, time_between_samples = 5):
+ def sample_x_seconds (self, sample_time, time_between_samples = 1):
"""
Automatically sets ongoing sampling of TRex data for sample_time seconds, with sampling rate described by time_between_samples.
Does not stop the TRex afterwards!
@@ -623,7 +623,7 @@ class CTRexClient(object):
time_between_samples : int
determines the time between each sample of the server
- default value : **5**
+ default value : **1**
:return:
the first result object (see :class:`CTRexResult` for further details) of the TRex run after given sample_time.
@@ -1271,7 +1271,7 @@ class CTRexResult(object):
.. tip:: | Use '.' to enter one level deeper in dictionary hierarchy.
| Use '[i]' to access the i'th indexed object of an array.
- tree_path_to_key : regex
+ regex : regex
apply a regex to filter results out from a multiple results set.
Filter applies only on keys of dictionary type.
@@ -1299,7 +1299,7 @@ class CTRexResult(object):
.. tip:: | Use '.' to enter one level deeper in dictionary hierarchy.
| Use '[i]' to access the i'th indexed object of an array.
- tree_path_to_key : regex
+ regex : regex
apply a regex to filter results out from a multiple results set.
Filter applies only on keys of dictionary type.
@@ -1352,7 +1352,7 @@ class CTRexResult(object):
if not len(self._history):
return -1
- return len(self.__get_value_by_path(self._history[-1], 'trex-global.data', 'opackets-\d+'))
+ return len(self.get_last_value('trex-global.data', 'opackets-\d+'))
def update_result_data (self, latest_dump):
@@ -1383,6 +1383,7 @@ class CTRexResult(object):
# check for up to 2% change between expected and actual
if (self._current_tx_rate['m_tx_bps'] > 0.98 * self._expected_tx_rate['m_tx_expected_bps']):
self._done_warmup = True
+ latest_dump['warmup_barrier'] = True
# handle latency data
if self.latency_checked:
@@ -1427,12 +1428,12 @@ class CTRexResult(object):
for i, p in re.findall(r'(\d+)|([\w|-]+)', tree_path):
dct = dct[p or int(i)]
if regex is not None and isinstance(dct, dict):
- res = {}
- for key,val in dct.items():
- match = re.match(regex, key)
- if match:
- res[key]=val
- return res
+ res = {}
+ for key,val in dct.items():
+ match = re.match(regex, key)
+ if match:
+ res[key]=val
+ return res
else:
return dct
except (KeyError, TypeError):
@@ -1480,24 +1481,61 @@ class CTRexResult(object):
@staticmethod
def __get_filtered_max_latency (src_dict, filtered_latency_amount = 0.001):
result = {}
- for port, data in src_dict.items():
- if not port.startswith('port-'):
- continue
- max_port = 'max-%s' % port[5:]
- res = data['hist']
- if not len(res['histogram']):
- result[max_port] = 0
- continue
- result[max_port] = 5 # if sum below will not get to filtered amount, use this value
- sum_high = 0.0
- for elem in reversed(res['histogram']):
- sum_high += elem['val']
- if sum_high >= filtered_latency_amount * res['cnt']:
- result[max_port] = elem['key'] + int('5' + repr(elem['key'])[2:])
- break
+ if src_dict:
+ for port, data in src_dict.items():
+ if not port.startswith('port-'):
+ continue
+ max_port = 'max-%s' % port[5:]
+ res = data['hist']
+ if not len(res['histogram']):
+ result[max_port] = 0
+ continue
+ result[max_port] = 5 # if sum below will not get to filtered amount, use this value
+ sum_high = 0.0
+ for elem in reversed(res['histogram']):
+ sum_high += elem['val']
+ if sum_high >= filtered_latency_amount * res['cnt']:
+ result[max_port] = elem['key'] + int('5' + repr(elem['key'])[2:])
+ break
return result
+ # history iterator after warmup period
+ def _get_steady_state_history_iterator(self):
+ if not self.is_done_warmup():
+ raise Exception('Warm-up period not finished')
+ for index, res in enumerate(self._history):
+ if 'warmup_barrier' in res:
+ for steady_state_index in range(index, max(index, len(self._history) - 1)):
+ yield self._history[steady_state_index]
+ return
+ for index in range(len(self._history) - 1):
+ yield self._history[index]
+
+
+ def get_avg_steady_state_value(self, tree_path_to_key):
+ '''
+ Gets average value after warmup period.
+ For example: <result object>.get_avg_steady_state_value('trex-global.data.m_tx_bps')
+ Usually more accurate than latest history value.
+
+ :parameters:
+ tree_path_to_key : str
+ defines a path to desired data.
+
+ :return:
+ average value at steady state
+
+ :raises:
+ Exception in case steady state period was not reached or tree_path_to_key was not found in result.
+ '''
+ values_arr = [self.__get_value_by_path(res, tree_path_to_key) for res in self._get_steady_state_history_iterator()]
+ values_arr = list(filter(lambda x: x is not None, values_arr))
+ if not values_arr:
+ raise Exception('All the keys are None, probably wrong tree_path_to_key: %s' % tree_path_to_key)
+ return sum(values_arr) / float(len(values_arr))
+
+
if __name__ == "__main__":
c = CTRexClient('127.0.0.1')
print('restarting daemon')
diff --git a/scripts/master_daemon.py b/scripts/master_daemon.py
index aa49f207..a44f55a8 100755
--- a/scripts/master_daemon.py
+++ b/scripts/master_daemon.py
@@ -181,7 +181,7 @@ parser.add_argument('--type', '--daemon-type', '--daemon_type', choices = daemon
action = 'store', help = 'Specify daemon type to start/stop etc.\nDefault is master_daemon.')
args = parser.parse_args()
-args.trex_dir = os.path.normpath(args.trex_dir)
+args.trex_dir = os.path.abspath(args.trex_dir)
args.daemon_type = args.daemon_type or 'master_daemon'
stl_rpc_proxy_dir = os.path.join(args.trex_dir, 'automation', 'trex_control_plane', 'stl', 'examples')