diff options
50 files changed, 3267 insertions, 1326 deletions
diff --git a/scripts/automation/regression/CPlatform.py b/scripts/automation/regression/CPlatform.py index 314aae63..de1c22ce 100755 --- a/scripts/automation/regression/CPlatform.py +++ b/scripts/automation/regression/CPlatform.py @@ -73,7 +73,8 @@ class CPlatform(object): if i < 4: continue raise Exception('Could not load clean config, response: %s' % res) - return + if i > 0: # were errors, better to wait + time.sleep(2) def config_pbr (self, mode = 'config'): idx = 1 @@ -174,7 +175,8 @@ class CPlatform(object): # finish handling pre-config cache pre_commit_set = list(pre_commit_set) -# pre_commit_set.append('exit') + if len(pre_commit_set): + pre_commit_set.append('exit') pre_commit_cache.add('CONF', pre_commit_set ) # deploy the configs (order is important!) self.cmd_link.run_command( [pre_commit_cache, cache] ) @@ -227,7 +229,7 @@ class CPlatform(object): # define the relevant VRF name pre_commit_set.add('{mode}ip vrf {dup}'.format( mode = unconfig_str, dup = dual_if.get_vrf_name()) ) - + # assign VRF to interfaces, config interfaces with relevant route-map client_if_command_set.append ('{mode}ip vrf forwarding {dup}'.format( mode = unconfig_str, dup = dual_if.get_vrf_name()) ) server_if_command_set.append ('{mode}ip vrf forwarding {dup}'.format( mode = unconfig_str, dup = dual_if.get_vrf_name()) ) @@ -290,7 +292,8 @@ class CPlatform(object): # finish handling pre-config cache pre_commit_set = list(pre_commit_set) -# pre_commit_set.append('exit') + if len(pre_commit_set): + pre_commit_set.append('exit') pre_commit_cache.add('CONF', pre_commit_set ) # assign generated config list to cache cache.add('CONF', conf_t_command_set) @@ -618,8 +621,8 @@ class CPlatform(object): """ pre_commit_cache = CCommandCache() - pre_commit_cache.add('EXEC', ['clear counters','\r'] ) - self.cmd_link.run_single_command( pre_commit_cache ) + pre_commit_cache.add('EXEC', ['clear counters', '\r'] ) + self.cmd_link.run_single_command( pre_commit_cache , read_until = ['#', '\[confirm\]']) def clear_nbar_stats(self): """ clear_nbar_stats(self) -> None @@ -725,7 +728,7 @@ class CPlatform(object): progress_thread = CProgressDisp.ProgressThread(notifyMessage = "Copying image via tftp, this may take a while...\n") progress_thread.start() - response = self.cmd_link.run_single_command(cache, timeout = 900, read_until = ['\?', '\#']) + response = self.cmd_link.run_single_command(cache, timeout = 900, read_until = ['\?', '#']) print("RESPONSE:") print(response) progress_thread.join() diff --git a/scripts/automation/regression/aggregate_results.py b/scripts/automation/regression/aggregate_results.py index 35ec80d0..eb0632ec 100755 --- a/scripts/automation/regression/aggregate_results.py +++ b/scripts/automation/regression/aggregate_results.py @@ -457,7 +457,7 @@ if __name__ == '__main__': if len(error_tests): html_output += '\n<button onclick=tgl_cat("cat_tglr_{error}")>{error}</button>'.format(error = ERROR_CATEGORY) # Setups buttons - for category in setups.keys(): + for category in sorted(setups.keys()): category_arr.append(category) html_output += '\n<button onclick=tgl_cat("cat_tglr_%s")>%s</button>' % (category_arr[-1], category) # Functional buttons diff --git a/scripts/automation/regression/functional_tests/filters_test.py b/scripts/automation/regression/functional_tests/filters_test.py new file mode 100644 index 00000000..fbb8a126 --- /dev/null +++ b/scripts/automation/regression/functional_tests/filters_test.py @@ -0,0 +1,100 @@ +#!/router/bin/python + +import functional_general_test +from trex_stl_lib.utils import filters +from nose.tools import assert_equal +from nose.tools import assert_not_equal +from nose.tools import assert_raises +from nose.tools import assert_true, assert_false +from nose.tools import raises + + +class ToggleFilter_Test(functional_general_test.CGeneralFunctional_Test): + + def setUp(self): + self.list_db = [1, 2, 3, 4, 5] + self.set_db = {1, 2, 3, 4, 5} + self.tuple_db = (1, 2, 3, 4, 5) + self.dict_db = {str(x): x**2 + for x in range(5)} + + def test_init_with_dict(self): + toggle_filter = filters.ToggleFilter(self.dict_db) + assert_equal(toggle_filter._toggle_db, set(self.dict_db.keys())) + assert_equal(toggle_filter.filter_items(), self.dict_db) + + + def test_init_with_list(self): + toggle_filter = filters.ToggleFilter(self.list_db) + assert_equal(toggle_filter._toggle_db, set(self.list_db)) + assert_equal(toggle_filter.filter_items(), self.list_db) + + def test_init_with_set(self): + toggle_filter = filters.ToggleFilter(self.set_db) + assert_equal(toggle_filter._toggle_db, self.set_db) + assert_equal(toggle_filter.filter_items(), self.set_db) + + def test_init_with_tuple(self): + toggle_filter = filters.ToggleFilter(self.tuple_db) + assert_equal(toggle_filter._toggle_db, set(self.tuple_db)) + assert_equal(toggle_filter.filter_items(), self.tuple_db) + + @raises(TypeError) + def test_init_with_non_iterable(self): + toggle_filter = filters.ToggleFilter(15) + + def test_dict_toggeling(self): + toggle_filter = filters.ToggleFilter(self.dict_db) + assert_false(toggle_filter.toggle_item("3")) + assert_equal(toggle_filter._toggle_db, {'0', '1', '2', '4'}) + assert_true(toggle_filter.toggle_item("3")) + assert_equal(toggle_filter._toggle_db, {'0', '1', '2', '3', '4'}) + assert_false(toggle_filter.toggle_item("2")) + assert_false(toggle_filter.toggle_item("4")) + self.dict_db.update({'5': 25, '6': 36}) + assert_true(toggle_filter.toggle_item("6")) + + assert_equal(toggle_filter.filter_items(), {'0': 0, '1': 1, '3': 9, '6': 36}) + + del self.dict_db['1'] + assert_equal(toggle_filter.filter_items(), {'0': 0, '3': 9, '6': 36}) + + def test_dict_toggeling_negative(self): + toggle_filter = filters.ToggleFilter(self.dict_db) + assert_raises(KeyError, toggle_filter.toggle_item, "100") + + def test_list_toggeling(self): + toggle_filter = filters.ToggleFilter(self.list_db) + assert_false(toggle_filter.toggle_item(3)) + assert_equal(toggle_filter._toggle_db, {1, 2, 4, 5}) + assert_true(toggle_filter.toggle_item(3)) + assert_equal(toggle_filter._toggle_db, {1, 2, 3, 4, 5}) + assert_false(toggle_filter.toggle_item(2)) + assert_false(toggle_filter.toggle_item(4)) + self.list_db.extend([6 ,7]) + assert_true(toggle_filter.toggle_item(6)) + + assert_equal(toggle_filter.filter_items(), [1, 3 , 5, 6]) + + self.list_db.remove(1) + assert_equal(toggle_filter.filter_items(), [3, 5, 6]) + + def test_list_toggling_negative(self): + toggle_filter = filters.ToggleFilter(self.list_db) + assert_raises(KeyError, toggle_filter.toggle_item, 10) + + def test_toggle_multiple_items(self): + toggle_filter = filters.ToggleFilter(self.list_db) + assert_false(toggle_filter.toggle_items(1, 3, 5)) + assert_equal(toggle_filter._toggle_db, {2, 4}) + assert_true(toggle_filter.toggle_items(1, 5)) + assert_equal(toggle_filter._toggle_db, {1, 2, 4, 5}) + + def test_dont_show_after_init(self): + toggle_filter = filters.ToggleFilter(self.list_db, show_by_default = False) + assert_equal(toggle_filter._toggle_db, set()) + assert_equal(toggle_filter.filter_items(), []) + + + def tearDown(self): + pass diff --git a/scripts/automation/regression/platform_cmd_link.py b/scripts/automation/regression/platform_cmd_link.py index d2143a5d..d034fac3 100755 --- a/scripts/automation/regression/platform_cmd_link.py +++ b/scripts/automation/regression/platform_cmd_link.py @@ -95,11 +95,9 @@ class CCommandLink(object): def __transmit (self, cmd_list, **kwargs): self.history.extend(cmd_list) - if not self.silent_mode: - print('\n'.join(cmd_list)) # prompting the pushed platform commands if not self.virtual_mode: # transmit the command to platform. - return self.telnet_con.write_ios_cmd(cmd_list, **kwargs) + return self.telnet_con.write_ios_cmd(cmd_list, verbose = not self.silent_mode, **kwargs) def run_command (self, cmd_list, **kwargs): response = '' @@ -420,7 +418,7 @@ class CIosTelnet(telnetlib.Telnet): except Exception as inst: raise - def write_ios_cmd (self, cmd_list, result_from = 0, timeout = 3, **kwargs): + def write_ios_cmd (self, cmd_list, result_from = 0, timeout = 60, **kwargs): assert (isinstance (cmd_list, list) == True) self.read_until(self.pr, timeout = 1) @@ -431,19 +429,22 @@ class CIosTelnet(telnetlib.Telnet): wf = self.pr for idx, cmd in enumerate(cmd_list): + start_time = time.time() self.write(cmd+'\r\n') - if idx < result_from: - # don't care for return string - if type(wf) is list: - self.expect(wf, timeout)[2] - else: - self.read_until(wf, timeout) + if kwargs.get('verbose'): + print('-->\n%s' % cmd) + if type(wf) is list: + output = self.expect(wf, timeout)[2] else: - # care for return string - if type(wf) is list: - res += self.expect(wf, timeout)[2] - else: - res += self.read_until(wf, timeout) + output = self.read_until(wf, timeout) + if idx >= result_from: + res += output + if kwargs.get('verbose'): + print('<-- (%ss)\n%s' % (round(time.time() - start_time, 2), output)) + if time.time() - start_time > timeout - 1: + raise Exception('Timeout while performing telnet command: %s' % cmd) + if 'Invalid' in res: + print('Warning: telnet command probably failed.\nCommand: %s\nResponse: %s' % (cmd_list, res)) # return res.split('\r\n') return res # return the received response as a string, each line is seperated by '\r\n'. diff --git a/scripts/automation/regression/setups/kiwi02/benchmark.yaml b/scripts/automation/regression/setups/kiwi02/benchmark.yaml index 60febc8f..343d4120 100644 --- a/scripts/automation/regression/setups/kiwi02/benchmark.yaml +++ b/scripts/automation/regression/setups/kiwi02/benchmark.yaml @@ -1,153 +1,246 @@ -################################################################ -#### T-Rex benchmark configuration file #### -################################################################ - -test_nbar_simple : - multiplier : 20 - cores : 2 - exp_gbps : 4.5 - cpu_to_core_ratio : 37270000 - cpu2core_custom_dev: YES - cpu2core_dev : 0.07 - exp_max_latency : 1000 - - nbar_classification: - http : 30.41 - rtp_audio : 21.22 - rtp : 11.4 - oracle_sqlnet : 11.3 - exchange : 10.95 - citrix : 5.65 - rtsp : 2.67 - dns : 1.95 - smtp : 0.57 - pop3 : 0.36 - sctp : 0.09 - sip : 0.09 - ssl : 0.06 - unknown : 3.2 - -test_rx_check : - multiplier : 25 - cores : 4 - rx_sample_rate : 128 - exp_gbps : 0.5 - cpu_to_core_ratio : 37270000 - exp_bw : 1 - exp_latency : 1 - -test_nat_simple : &test_nat_simple - stat_route_dict : +############################################################### +#### TRex benchmark configuration file #### +############################################################### + +#### common templates ### + +stat_route_dict: &stat_route_dict clients_start : 16.0.0.1 servers_start : 48.0.0.1 dual_port_mask : 1.0.0.0 client_destination_mask : 255.0.0.0 server_destination_mask : 255.0.0.0 - nat_dict : + +nat_dict: &nat_dict clients_net_start : 16.0.0.0 client_acl_wildcard_mask : 0.0.0.255 dual_port_mask : 1.0.0.0 pool_start : 200.0.0.0 pool_netmask : 255.255.255.0 - multiplier : 10000 - cpu_to_core_ratio : 37270000 - cores : 1 - exp_bw : 1 - exp_latency : 1 - allow_timeout_dev : YES - -test_nat_simple_mode1 : *test_nat_simple -test_nat_simple_mode2 : *test_nat_simple - -test_nat_learning : - stat_route_dict : - clients_start : 16.0.0.1 - servers_start : 48.0.0.1 - dual_port_mask : 1.0.0.0 - client_destination_mask : 255.0.0.0 - server_destination_mask : 255.0.0.0 - multiplier : 10000 - cores : 1 - nat_opened : 100000 - cpu_to_core_ratio : 37270000 - exp_bw : 1 - exp_latency : 1 - allow_timeout_dev : YES - -test_routing_imix_64 : - multiplier : 2500 - cores : 4 - cpu_to_core_ratio : 8900 - exp_latency : 1 - -test_routing_imix : - multiplier : 32 - cores : 2 - cpu_to_core_ratio : 8900 - exp_latency : 1 - -test_static_routing_imix : - stat_route_dict : - clients_start : 16.0.0.1 - servers_start : 48.0.0.1 - dual_port_mask : 1.0.0.0 - client_destination_mask : 255.0.0.0 - server_destination_mask : 255.0.0.0 - multiplier : 32 - cores : 2 - cpu_to_core_ratio : 3766666 - exp_latency : 1 -test_static_routing_imix_asymmetric: - stat_route_dict : - clients_start : 16.0.0.1 - servers_start : 48.0.0.1 - dual_port_mask : 1.0.0.0 - client_destination_mask : 255.0.0.0 - server_destination_mask : 255.0.0.0 - multiplier : 16 - cores : 1 - cpu_to_core_ratio : 3766666 - exp_latency : 1 - -test_ipv6_simple : - multiplier : 32 - cores : 4 - cpu_to_core_ratio : 30070000 - cpu2core_custom_dev: YES - cpu2core_dev : 0.07 - - -test_rx_check_sfr: - multiplier : 25 - cores : 4 - rx_sample_rate : 32 - error_tolerance : 0.01 - -test_rx_check_http: - multiplier : 40000 - cores : 2 - rx_sample_rate : 32 - error_tolerance : 0.01 -test_rx_check_sfr_ipv6: - multiplier : 25 - cores : 4 - rx_sample_rate : 32 - error_tolerance : 0.01 +### stateful ### + +test_jumbo: + multiplier : 55 + cores : 1 + bw_per_core : 647.305 + + +test_routing_imix: + multiplier : 32 + cores : 2 + bw_per_core : 39.131 + + +test_routing_imix_64: + multiplier : 2500 + cores : 4 + bw_per_core : 7.427 + + +test_static_routing_imix: + stat_route_dict : *stat_route_dict + multiplier : 32 + cores : 2 + bw_per_core : 39.039 + + +test_static_routing_imix_asymmetric: + stat_route_dict : *stat_route_dict + multiplier : 16 + cores : 1 + bw_per_core : 38.796 + + +test_ipv6_simple: + multiplier : 32 + cores : 4 + bw_per_core : 19.283 + + +test_nat_simple_mode1: &test_nat_simple + stat_route_dict : *stat_route_dict + nat_dict : *nat_dict + multiplier : 10000 + cores : 1 + allow_timeout_dev : True + bw_per_core : 45.304 + +test_nat_simple_mode2: *test_nat_simple + +test_nat_learning: + << : *test_nat_simple + nat_opened : 100000 + + +test_nbar_simple: + multiplier : 20 + cores : 2 + bw_per_core : 18.243 + nbar_classification: + http : 30.41 + rtp_audio : 21.22 + rtp : 11.4 + oracle_sqlnet : 11.3 + exchange : 10.95 + citrix : 5.65 + rtsp : 2.67 + dns : 1.95 + smtp : 0.57 + pop3 : 0.36 + sctp : 0.09 + sip : 0.09 + ssl : 0.06 + unknown : 3.2 + + +test_rx_check_http: &rx_http + multiplier : 40000 + cores : 2 + rx_sample_rate : 32 + error_tolerance : 0.01 + bw_per_core : 38.071 test_rx_check_http_ipv6: - multiplier : 40000 - cores : 2 - rx_sample_rate : 32 - error_tolerance : 0.01 + << : *rx_http + bw_per_core : 46.733 + + +test_rx_check_sfr: &rx_sfr + multiplier : 25 + cores : 4 + rx_sample_rate : 32 + error_tolerance : 0.01 + bw_per_core : 16.915 + +test_rx_check_sfr_ipv6: + << : *rx_sfr + bw_per_core : 20.323 + + + +### stateless ### + +test_CPU_benchmark: + profiles: + - name : stl/udp_1pkt_simple.py + kwargs : {packet_len: 64} + cpu_util : 1 + bw_per_core : 1 + + - name : stl/udp_1pkt_simple.py + kwargs : {packet_len: 64, packet_count: 10} + cpu_util : 1 + bw_per_core : 1 + + - name : stl/udp_1pkt_simple.py + kwargs : {packet_len: 64, packet_count: 100} + cpu_util : 1 + bw_per_core : 1 + +# causes queue full +# - name : stl/udp_1pkt_simple.py +# kwargs : {packet_len: 64, packet_count: 1000} +# cpu_util : 1 +# bw_per_core : 1 + + - name : stl/udp_1pkt_simple.py + kwargs : {packet_len: 128} + cpu_util : 1 + bw_per_core : 1 + + - name : stl/udp_1pkt_simple.py + kwargs : {packet_len: 256} + cpu_util : 1 + bw_per_core : 1 + + - name : stl/udp_1pkt_simple.py + kwargs : {packet_len: 512} + cpu_util : 1 + bw_per_core : 1 + + - name : stl/udp_1pkt_simple.py + kwargs : {packet_len: 1500} + cpu_util : 1 + bw_per_core : 1 + + - name : stl/udp_1pkt_simple.py + kwargs : {packet_len: 4000} + cpu_util : 1 + bw_per_core : 1 + + - name : stl/udp_1pkt_simple.py + kwargs : {packet_len: 9000} + cpu_util : 1 + bw_per_core : 1 + + - name : stl/udp_1pkt_simple.py + kwargs : {packet_len: 9000, packet_count: 10} + cpu_util : 1 + bw_per_core : 1 + + - name : stl/udp_1pkt_simple.py + kwargs : {packet_len: 9000, packet_count: 100} + cpu_util : 1 + bw_per_core : 1 + +# not enough memory + queue full if memory increase +# - name : stl/udp_1pkt_simple.py +# kwargs : {packet_len: 9000, packet_count: 1000} +# cpu_util : 1 +# bw_per_core : 1 + + - name : stl/imix.py + cpu_util : 1 + bw_per_core : 1 + + - name : stl/udp_1pkt_tuple_gen.py + kwargs : {packet_len: 64} + cpu_util : 1 + bw_per_core : 1 + + - name : stl/udp_1pkt_tuple_gen.py + kwargs : {packet_len: 128} + cpu_util : 1 + bw_per_core : 1 + + - name : stl/udp_1pkt_tuple_gen.py + kwargs : {packet_len: 256} + cpu_util : 1 + bw_per_core : 1 + + - name : stl/udp_1pkt_tuple_gen.py + kwargs : {packet_len: 512} + cpu_util : 1 + bw_per_core : 1 + + - name : stl/udp_1pkt_tuple_gen.py + kwargs : {packet_len: 1500} + cpu_util : 1 + bw_per_core : 1 + + - name : stl/udp_1pkt_tuple_gen.py + kwargs : {packet_len: 4000} + cpu_util : 1 + bw_per_core : 1 + + - name : stl/udp_1pkt_tuple_gen.py + kwargs : {packet_len: 9000} + cpu_util : 1 + bw_per_core : 1 + + - name : stl/pcap.py + kwargs : {ipg_usec: 2, loop_count: 0} + cpu_util : 1 + bw_per_core : 1 + + - name : stl/udp_rand_len_9k.py + cpu_util : 1 + bw_per_core : 1 + + - name : stl/hlt/hlt_udp_rand_len_9k.py + cpu_util : 1 + bw_per_core : 1 -test_rx_check_http_negative: - multiplier : 40000 - cores : 2 - rx_sample_rate : 32 - error_tolerance : 0.01 -test_jumbo: - multiplier : 55 - cores : 1 diff --git a/scripts/automation/regression/setups/trex-dan/benchmark.yaml b/scripts/automation/regression/setups/trex-dan/benchmark.yaml index 33e5a771..c8b046d4 100644 --- a/scripts/automation/regression/setups/trex-dan/benchmark.yaml +++ b/scripts/automation/regression/setups/trex-dan/benchmark.yaml @@ -2,160 +2,250 @@ #### TRex benchmark configuration file #### ############################################################### -test_nbar_simple : - multiplier : 1.5 - cores : 2 - exp_gbps : 0.5 - cpu_to_core_ratio : 20800000 - cpu2core_custom_dev: YES - cpu2core_dev : 0.07 - exp_max_latency : 1000 - - nbar_classification: - http : 30.3 - rtp_audio : 21.06 - oracle_sqlnet : 11.25 - rtp : 11.1 - exchange : 10.16 - citrix : 5.6 - rtsp : 2.84 - sctp : 0.65 - ssl : 0.8 - sip : 0.09 - dns : 1.95 - smtp : 0.57 - pop3 : 0.36 - unknown : 3.19 - -test_rx_check : - multiplier : 0.8 - cores : 1 - rx_sample_rate : 128 - exp_gbps : 0.5 - cpu_to_core_ratio : 37270000 - exp_bw : 1 - exp_latency : 1 - -test_nat_simple : &test_nat_simple - stat_route_dict : +#### common templates ### + +stat_route_dict: &stat_route_dict clients_start : 16.0.0.1 servers_start : 48.0.0.1 dual_port_mask : 1.0.0.0 client_destination_mask : 255.0.0.0 server_destination_mask : 255.0.0.0 - nat_dict : + +nat_dict: &nat_dict clients_net_start : 16.0.0.0 client_acl_wildcard_mask : 0.0.0.255 dual_port_mask : 1.0.0.0 pool_start : 200.0.0.0 pool_netmask : 255.255.255.0 - multiplier : 550 - cores : 1 - cpu_to_core_ratio : 37270000 - exp_bw : 1 - exp_latency : 1 - allow_timeout_dev : YES - -test_nat_simple_mode1 : *test_nat_simple -test_nat_simple_mode2 : *test_nat_simple - -test_nat_learning : - stat_route_dict : - clients_start : 16.0.0.1 - servers_start : 48.0.0.1 - dual_port_mask : 1.0.0.0 - client_destination_mask : 255.0.0.0 - server_destination_mask : 255.0.0.0 - multiplier : 550 - cores : 1 - nat_opened : 40000 - cpu_to_core_ratio : 270 - exp_bw : 1 - exp_latency : 1 - allow_timeout_dev : YES - -test_routing_imix_64 : - multiplier : 150 - cores : 4 - cpu_to_core_ratio : 280 - exp_latency : 1 - -test_routing_imix : - multiplier : 1 - cores : 1 - cpu_to_core_ratio : 1800 - exp_latency : 1 - -test_static_routing_imix : - stat_route_dict : - clients_start : 16.0.0.1 - servers_start : 48.0.0.1 - dual_port_mask : 1.0.0.0 - client_destination_mask : 255.0.0.0 - server_destination_mask : 255.0.0.0 - multiplier : 0.7 - cores : 1 - cpu_to_core_ratio : 1800 - exp_latency : 1 -test_static_routing_imix_asymmetric: - stat_route_dict : - clients_start : 16.0.0.1 - servers_start : 48.0.0.1 - dual_port_mask : 1.0.0.0 - client_destination_mask : 255.0.0.0 - server_destination_mask : 255.0.0.0 - multiplier : 0.8 - cores : 1 - cpu_to_core_ratio : 1800 - exp_latency : 1 -test_ipv6_simple : - multiplier : 1.5 - cores : 2 - cpu_to_core_ratio : 30070000 - cpu2core_custom_dev: YES - cpu2core_dev : 0.07 +### stateful ### +test_jumbo: + multiplier : 2.8 + cores : 1 + bw_per_core : 67.030 -test_rx_check_sfr: - multiplier : 1.7 - cores : 2 - rx_sample_rate : 16 -test_rx_check_http: - multiplier : 2200 - cores : 1 - rx_sample_rate : 16 +test_routing_imix: + multiplier : 1 + cores : 1 + bw_per_core : 3.979 + + +test_routing_imix_64: + multiplier : 150 + cores : 4 + bw_per_core : .681 + + +test_static_routing_imix: + stat_route_dict : *stat_route_dict + multiplier : 0.7 + cores : 1 + bw_per_core : 3.837 -test_rx_check_sfr_ipv6: - multiplier : 1.7 - cores : 2 - rx_sample_rate : 16 + +test_static_routing_imix_asymmetric: + stat_route_dict : *stat_route_dict + multiplier : 0.8 + cores : 1 + bw_per_core : 3.939 + + +test_ipv6_simple: + multiplier : 1.5 + cores : 2 + bw_per_core : 4.719 + + +test_nat_simple_mode1: &test_nat_simple + stat_route_dict : *stat_route_dict + nat_dict : *nat_dict + multiplier : 550 + cores : 1 + allow_timeout_dev : True + bw_per_core : 7.465 + +test_nat_simple_mode2: *test_nat_simple + +test_nat_learning: + << : *test_nat_simple + bw_per_core : 7.377 + nat_opened : 40000 + + +test_nbar_simple: + multiplier : 1.5 + cores : 2 + bw_per_core : 4.465 + nbar_classification: + http : 30.3 + rtp_audio : 21.06 + oracle_sqlnet : 11.25 + rtp : 11.1 + exchange : 10.16 + citrix : 5.6 + rtsp : 2.84 + sctp : 0.65 + ssl : 0.8 + sip : 0.09 + dns : 1.95 + smtp : 0.57 + pop3 : 0.36 + unknown : 3.19 + + +test_rx_check_http: &rx_http + multiplier : 2200 + cores : 1 + rx_sample_rate : 16 + bw_per_core : 8.142 test_rx_check_http_ipv6: - multiplier : 2200 - cores : 1 - rx_sample_rate : 16 + << : *rx_http + bw_per_core : 8.591 test_rx_check_http_negative: - multiplier : 2200 - cores : 1 - rx_sample_rate : 16 - stat_route_dict : - clients_start : 16.0.0.1 - servers_start : 48.0.0.1 - dual_port_mask : 1.0.0.0 - client_destination_mask : 255.0.0.0 - server_destination_mask : 255.0.0.0 - nat_dict : - clients_net_start : 16.0.0.0 - client_acl_wildcard_mask : 0.0.0.255 - dual_port_mask : 1.0.0.0 - pool_start : 200.0.0.0 - pool_netmask : 255.255.255.0 + << : *rx_http + stat_route_dict : *stat_route_dict + nat_dict : *nat_dict + bw_per_core : 8.037 + + +test_rx_check_sfr: &rx_sfr + multiplier : 1.7 + cores : 2 + rx_sample_rate : 16 + bw_per_core : 4.473 + +test_rx_check_sfr_ipv6: + << : *rx_sfr + bw_per_core : 4.773 + + + +### stateless ### + +test_CPU_benchmark: + profiles: + - name : stl/udp_1pkt_simple.py + kwargs : {packet_len: 64} + cpu_util : 1 + bw_per_core : 1 + + - name : stl/udp_1pkt_simple.py + kwargs : {packet_len: 64, packet_count: 10} + cpu_util : 1 + bw_per_core : 1 + + - name : stl/udp_1pkt_simple.py + kwargs : {packet_len: 64, packet_count: 100} + cpu_util : 1 + bw_per_core : 1 + +# causes queue full +# - name : stl/udp_1pkt_simple.py +# kwargs : {packet_len: 64, packet_count: 1000} +# cpu_util : 1 +# bw_per_core : 1 + + - name : stl/udp_1pkt_simple.py + kwargs : {packet_len: 128} + cpu_util : 1 + bw_per_core : 1 + + - name : stl/udp_1pkt_simple.py + kwargs : {packet_len: 256} + cpu_util : 1 + bw_per_core : 1 + + - name : stl/udp_1pkt_simple.py + kwargs : {packet_len: 512} + cpu_util : 1 + bw_per_core : 1 + + - name : stl/udp_1pkt_simple.py + kwargs : {packet_len: 1500} + cpu_util : 1 + bw_per_core : 1 + + - name : stl/udp_1pkt_simple.py + kwargs : {packet_len: 4000} + cpu_util : 1 + bw_per_core : 1 + + - name : stl/udp_1pkt_simple.py + kwargs : {packet_len: 9000} + cpu_util : 1 + bw_per_core : 1 + + - name : stl/udp_1pkt_simple.py + kwargs : {packet_len: 9000, packet_count: 10} + cpu_util : 1 + bw_per_core : 1 + + - name : stl/udp_1pkt_simple.py + kwargs : {packet_len: 9000, packet_count: 100} + cpu_util : 1 + bw_per_core : 1 + +# not enough memory + queue full if memory increase +# - name : stl/udp_1pkt_simple.py +# kwargs : {packet_len: 9000, packet_count: 1000} +# cpu_util : 1 +# bw_per_core : 1 + + - name : stl/imix.py + cpu_util : 1 + bw_per_core : 1 + + - name : stl/udp_1pkt_tuple_gen.py + kwargs : {packet_len: 64} + cpu_util : 1 + bw_per_core : 1 + + - name : stl/udp_1pkt_tuple_gen.py + kwargs : {packet_len: 128} + cpu_util : 1 + bw_per_core : 1 + + - name : stl/udp_1pkt_tuple_gen.py + kwargs : {packet_len: 256} + cpu_util : 1 + bw_per_core : 1 + + - name : stl/udp_1pkt_tuple_gen.py + kwargs : {packet_len: 512} + cpu_util : 1 + bw_per_core : 1 + + - name : stl/udp_1pkt_tuple_gen.py + kwargs : {packet_len: 1500} + cpu_util : 1 + bw_per_core : 1 + + - name : stl/udp_1pkt_tuple_gen.py + kwargs : {packet_len: 4000} + cpu_util : 1 + bw_per_core : 1 + + - name : stl/udp_1pkt_tuple_gen.py + kwargs : {packet_len: 9000} + cpu_util : 1 + bw_per_core : 1 + + - name : stl/pcap.py + kwargs : {ipg_usec: 2, loop_count: 0} + cpu_util : 1 + bw_per_core : 1 + + - name : stl/udp_rand_len_9k.py + cpu_util : 1 + bw_per_core : 1 + + - name : stl/hlt/hlt_udp_rand_len_9k.py + cpu_util : 1 + bw_per_core : 1 -test_jumbo: - multiplier : 2.8 - cores : 1 diff --git a/scripts/automation/regression/setups/trex04/benchmark.yaml b/scripts/automation/regression/setups/trex04/benchmark.yaml index e5459dce..d2b1c4f2 100644 --- a/scripts/automation/regression/setups/trex04/benchmark.yaml +++ b/scripts/automation/regression/setups/trex04/benchmark.yaml @@ -2,61 +2,154 @@ #### T-Rex benchmark configuration file #### ################################################################ +### stateful ### + +test_jumbo: + multiplier : 2.8 + cores : 1 + bw_per_core : 106.652 + + +test_routing_imix: + multiplier : 0.5 + cores : 1 + bw_per_core : 11.577 + + +test_routing_imix_64: + multiplier : 28 + cores : 1 + bw_per_core : 2.030 -test_rx_check : - multiplier : 0.8 - cores : 1 - rx_sample_rate : 128 - exp_gbps : 0.5 - cpu_to_core_ratio : 37270000 - exp_bw : 1 - exp_latency : 1 - - -test_routing_imix_64 : - multiplier : 28 - cores : 1 - cpu_to_core_ratio : 280 - exp_latency : 1 - -test_routing_imix : - multiplier : 0.5 - cores : 1 - cpu_to_core_ratio : 1800 - exp_latency : 1 - -test_static_routing_imix : - stat_route_dict : - clients_start : 16.0.0.1 - servers_start : 48.0.0.1 - dual_port_mask : 1.0.0.0 - client_destination_mask : 255.0.0.0 - server_destination_mask : 255.0.0.0 - multiplier : 0.8 - cores : 1 - cpu_to_core_ratio : 1800 - exp_latency : 1 test_static_routing_imix_asymmetric: - stat_route_dict : - clients_start : 16.0.0.1 - servers_start : 48.0.0.1 - dual_port_mask : 1.0.0.0 - client_destination_mask : 255.0.0.0 - server_destination_mask : 255.0.0.0 - multiplier : 0.8 - cores : 1 - cpu_to_core_ratio : 1800 - exp_latency : 1 - -test_ipv6_simple : - multiplier : 0.5 - cores : 1 - cpu_to_core_ratio : 30070000 - cpu2core_custom_dev: YES - cpu2core_dev : 0.07 + multiplier : 0.8 + cores : 1 + bw_per_core : 13.742 + + + +### stateless ### + +test_CPU_benchmark: + profiles: + - name : stl/udp_1pkt_simple.py + kwargs : {packet_len: 64} + cpu_util : 1 + bw_per_core : 1 + + - name : stl/udp_1pkt_simple.py + kwargs : {packet_len: 64, packet_count: 10} + cpu_util : 1 + bw_per_core : 1 + + - name : stl/udp_1pkt_simple.py + kwargs : {packet_len: 64, packet_count: 100} + cpu_util : 1 + bw_per_core : 1 + +# causes queue full +# - name : stl/udp_1pkt_simple.py +# kwargs : {packet_len: 64, packet_count: 1000} +# cpu_util : 1 +# bw_per_core : 1 + + - name : stl/udp_1pkt_simple.py + kwargs : {packet_len: 128} + cpu_util : 1 + bw_per_core : 1 + + - name : stl/udp_1pkt_simple.py + kwargs : {packet_len: 256} + cpu_util : 1 + bw_per_core : 1 + + - name : stl/udp_1pkt_simple.py + kwargs : {packet_len: 512} + cpu_util : 1 + bw_per_core : 1 + + - name : stl/udp_1pkt_simple.py + kwargs : {packet_len: 1500} + cpu_util : 1 + bw_per_core : 1 + + - name : stl/udp_1pkt_simple.py + kwargs : {packet_len: 4000} + cpu_util : 1 + bw_per_core : 1 + + - name : stl/udp_1pkt_simple.py + kwargs : {packet_len: 9000} + cpu_util : 1 + bw_per_core : 1 + + - name : stl/udp_1pkt_simple.py + kwargs : {packet_len: 9000, packet_count: 10} + cpu_util : 1 + bw_per_core : 1 + + - name : stl/udp_1pkt_simple.py + kwargs : {packet_len: 9000, packet_count: 100} + cpu_util : 1 + bw_per_core : 1 + +# not enough memory + queue full if memory increase +# - name : stl/udp_1pkt_simple.py +# kwargs : {packet_len: 9000, packet_count: 1000} +# cpu_util : 1 +# bw_per_core : 1 + + - name : stl/imix.py + cpu_util : 1 + bw_per_core : 1 + + - name : stl/udp_1pkt_tuple_gen.py + kwargs : {packet_len: 64} + cpu_util : 1 + bw_per_core : 1 + + - name : stl/udp_1pkt_tuple_gen.py + kwargs : {packet_len: 128} + cpu_util : 1 + bw_per_core : 1 + + - name : stl/udp_1pkt_tuple_gen.py + kwargs : {packet_len: 256} + cpu_util : 1 + bw_per_core : 1 + + - name : stl/udp_1pkt_tuple_gen.py + kwargs : {packet_len: 512} + cpu_util : 1 + bw_per_core : 1 + + - name : stl/udp_1pkt_tuple_gen.py + kwargs : {packet_len: 1500} + cpu_util : 1 + bw_per_core : 1 + + - name : stl/udp_1pkt_tuple_gen.py + kwargs : {packet_len: 4000} + cpu_util : 1 + bw_per_core : 1 + + - name : stl/udp_1pkt_tuple_gen.py + kwargs : {packet_len: 9000} + cpu_util : 1 + bw_per_core : 1 + + - name : stl/pcap.py + kwargs : {ipg_usec: 4, loop_count: 0} + cpu_util : 1 + bw_per_core : 1 + + - name : stl/udp_rand_len_9k.py + cpu_util : 1 + bw_per_core : 1 + + - name : stl/hlt/hlt_udp_rand_len_9k.py + cpu_util : 1 + bw_per_core : 1 -test_jumbo: - multiplier : 2.8 - cores : 1
\ No newline at end of file diff --git a/scripts/automation/regression/setups/trex07/benchmark.yaml b/scripts/automation/regression/setups/trex07/benchmark.yaml new file mode 100644 index 00000000..4778de91 --- /dev/null +++ b/scripts/automation/regression/setups/trex07/benchmark.yaml @@ -0,0 +1,170 @@ +############################################################### +#### TRex benchmark configuration file #### +############################################################### + +test_nbar_simple : + multiplier : 7.5 + cores : 2 + exp_gbps : 3.5 + cpu_to_core_ratio : 20800000 + cpu2core_custom_dev: YES + cpu2core_dev : 0.07 + exp_max_latency : 1000 + + nbar_classification: + rtp : 32.57 + http : 30.25 + oracle-sqlnet : 11.23 + exchange : 10.80 + citrix : 5.62 + rtsp : 2.84 + dns : 1.95 + smtp : 0.57 + pop3 : 0.36 + ssl : 0.17 + sctp : 0.13 + sip : 0.09 + unknown : 3.41 + +test_rx_check : + multiplier : 13 + cores : 3 + rx_sample_rate : 128 + exp_gbps : 6 + cpu_to_core_ratio : 37270000 + exp_bw : 13 + exp_latency : 1 + +test_nat_simple : &test_nat_simple + stat_route_dict : + clients_start : 16.0.0.1 + servers_start : 48.0.0.1 + dual_port_mask : 1.0.0.0 + client_destination_mask : 255.0.0.0 + server_destination_mask : 255.0.0.0 + nat_dict : + clients_net_start : 16.0.0.0 + client_acl_wildcard_mask : 0.0.0.255 + dual_port_mask : 1.0.0.0 + pool_start : 200.0.0.0 + pool_netmask : 255.255.255.0 + multiplier : 12000 + cores : 1 + cpu_to_core_ratio : 37270000 + exp_bw : 1 + exp_latency : 1 + allow_timeout_dev : YES + +test_nat_simple_mode1 : *test_nat_simple +test_nat_simple_mode2 : *test_nat_simple + +test_nat_learning : + stat_route_dict : + clients_start : 16.0.0.1 + servers_start : 48.0.0.1 + dual_port_mask : 1.0.0.0 + client_destination_mask : 255.0.0.0 + server_destination_mask : 255.0.0.0 + multiplier : 12000 + cores : 1 + nat_opened : 40000 + cpu_to_core_ratio : 270 + exp_bw : 8 + exp_latency : 1 + allow_timeout_dev : YES + +test_routing_imix_64 : + multiplier : 430 + cores : 1 + cpu_to_core_ratio : 280 + exp_latency : 1 + +test_routing_imix : + multiplier : 10 + cores : 1 + cpu_to_core_ratio : 1800 + exp_latency : 1 + +test_static_routing_imix : + stat_route_dict : + clients_start : 16.0.0.1 + servers_start : 48.0.0.1 + dual_port_mask : 1.0.0.0 + client_destination_mask : 255.0.0.0 + server_destination_mask : 255.0.0.0 + multiplier : 8 + cores : 1 + cpu_to_core_ratio : 1800 + exp_latency : 1 + +test_static_routing_imix_asymmetric: + stat_route_dict : + clients_start : 16.0.0.1 + servers_start : 48.0.0.1 + dual_port_mask : 1.0.0.0 + client_destination_mask : 255.0.0.0 + server_destination_mask : 255.0.0.0 + multiplier : 8 + cores : 1 + cpu_to_core_ratio : 1800 + exp_latency : 1 + +test_ipv6_simple : + multiplier : 9 + cores : 2 + cpu_to_core_ratio : 30070000 + cpu2core_custom_dev: YES + cpu2core_dev : 0.07 + + +test_rx_check_sfr: + multiplier : 10 + cores : 2 + rx_sample_rate : 16 + # allow 0.03% errors, bad router + error_tolerance : 0.03 + +test_rx_check_http: + multiplier : 15000 + cores : 1 + rx_sample_rate : 16 + # allow 0.03% errors, bad routerifconfig + error_tolerance : 0.03 + +test_rx_check_sfr_ipv6: + multiplier : 10 + cores : 2 + rx_sample_rate : 16 + # allow 0.03% errors, bad router + error_tolerance : 0.03 + +test_rx_check_http_ipv6: + multiplier : 15000 + cores : 1 + rx_sample_rate : 16 + # allow 0.03% errors, bad router + error_tolerance : 0.03 + +test_rx_check_http_negative: + multiplier : 13000 + cores : 1 + rx_sample_rate : 16 + # allow 0.03% errors, bad router + error_tolerance : 0.03 + stat_route_dict : + clients_start : 16.0.0.1 + servers_start : 48.0.0.1 + dual_port_mask : 1.0.0.0 + client_destination_mask : 255.0.0.0 + server_destination_mask : 255.0.0.0 + nat_dict : + clients_net_start : 16.0.0.0 + client_acl_wildcard_mask : 0.0.0.255 + dual_port_mask : 1.0.0.0 + pool_start : 200.0.0.0 + pool_netmask : 255.255.255.0 + + +test_jumbo: + multiplier : 17 + cores : 1 diff --git a/scripts/automation/regression/setups/trex07/config.yaml b/scripts/automation/regression/setups/trex07/config.yaml new file mode 100644 index 00000000..beb73435 --- /dev/null +++ b/scripts/automation/regression/setups/trex07/config.yaml @@ -0,0 +1,66 @@ +################################################################ +#### T-Rex nightly test configuration file #### +################################################################ + + +### T-Rex configuration: +# hostname - can be DNS name or IP for the TRex machine for ssh to the box +# password - root password for TRex machine +# is_dual - should the TRex inject with -p ? +# version_path - path to the t-rex version and executable +# cores - how many cores should be used +# latency - rate of latency packets injected by the TRex +# modes - list of modes (tagging) of this setup (loopback etc.) +# * loopback - Trex works via loopback. Router and TFTP configurations may be skipped. +# * VM - Virtual OS (accept low CPU utilization in tests, latency can get spikes) +# * virt_nics - NICs are virtual (VMXNET3 etc.) + +### Router configuration: +# hostname - the router hostname as apears in ______# cli prefix +# ip_address - the router's ip that can be used to communicate with +# image - the desired imaged wished to be loaded as the router's running config +# line_password - router password when access via Telent +# en_password - router password when changing to "enable" mode +# interfaces - an array of client-server pairs, representing the interfaces configurations of the router +# configurations - an array of configurations that could possibly loaded into the router during the test. +# The "clean" configuration is a mandatory configuration the router will load with to run the basic test bench + +### TFTP configuration: +# hostname - the tftp hostname +# ip_address - the tftp's ip address +# images_path - the tftp's relative path in which the router's images are located + +### Test_misc configuration: +# expected_bw - the "golden" bandwidth (in Gbps) results planned on receiving from the test + +trex: + hostname : csi-trex-07 + cores : 4 + +router: + model : ASR1001x + hostname : csi-asr-01 + ip_address : 10.56.216.120 + image : asr1001x-universalk9.03.13.02.S.154-3.S2-ext.SPA.bin + line_password : cisco + en_password : cisco + mgmt_interface : GigabitEthernet0 + clean_config : clean_config.cfg + intf_masking : 255.255.255.0 + ipv6_mask : 64 + interfaces : + - client : + name : Te0/0/0 + src_mac_addr : 0000.0001.0002 + dest_mac_addr : 0000.0001.0001 + server : + name : Te0/0/1 + src_mac_addr : 0000.0002.0002 + dest_mac_addr : 0000.0002.0001 + vrf_name : null + +tftp: + hostname : ats-asr-srv-1 + ip_address : 10.56.217.7 + root_dir : /scratch/tftp/ + images_path : /asr1001x/ diff --git a/scripts/automation/regression/setups/trex08/benchmark.yaml b/scripts/automation/regression/setups/trex08/benchmark.yaml index d4bb8283..c5c2fde7 100644 --- a/scripts/automation/regression/setups/trex08/benchmark.yaml +++ b/scripts/automation/regression/setups/trex08/benchmark.yaml @@ -2,58 +2,174 @@ #### TRex benchmark configuration file #### ############################################################### -test_routing_imix_64 : - multiplier : 8000 - cores : 7 - cpu_to_core_ratio : 280 - exp_latency : 1 - -test_routing_imix : - multiplier : 80 - cores : 4 - cpu_to_core_ratio : 1800 - exp_latency : 1 +### stateful ### + +test_jumbo: + multiplier : 150 + cores : 2 + bw_per_core : 962.464 + + +test_routing_imix: + multiplier : 80 + cores : 4 + bw_per_core : 55.130 + + +test_routing_imix_64: + multiplier : 8000 + cores : 7 + bw_per_core : 11.699 + test_static_routing_imix_asymmetric: - stat_route_dict : - clients_start : 16.0.0.1 - servers_start : 48.0.0.1 - dual_port_mask : 1.0.0.0 - client_destination_mask : 255.0.0.0 - server_destination_mask : 255.0.0.0 - multiplier : 70 - cores : 3 - cpu_to_core_ratio : 1800 - exp_latency : 1 - -test_ipv6_simple : - multiplier : 80 - cores : 7 - cpu_to_core_ratio : 30070000 - cpu2core_custom_dev: YES - cpu2core_dev : 0.07 + multiplier : 70 + cores : 3 + bw_per_core : 50.561 -test_rx_check_sfr: - multiplier : 80 - cores : 7 - rx_sample_rate : 128 +test_ipv6_simple: + multiplier : 80 + cores : 7 + bw_per_core : 25.948 -test_rx_check_sfr_ipv6_disabled: - multiplier : 80 - cores : 7 - rx_sample_rate : 128 test_rx_check_http: - multiplier : 99000 - cores : 3 - rx_sample_rate : 128 + multiplier : 99000 + cores : 3 + rx_sample_rate : 128 + bw_per_core : 49.464 + + +test_rx_check_sfr: + multiplier : 80 + cores : 7 + rx_sample_rate : 128 + bw_per_core : 20.871 + + + +### stateless ### + +test_CPU_benchmark: + profiles: + - name : stl/udp_1pkt_simple.py + kwargs : {packet_len: 64} + cpu_util : 1 + bw_per_core : 1 + + - name : stl/udp_1pkt_simple.py + kwargs : {packet_len: 64, packet_count: 10} + cpu_util : 1 + bw_per_core : 1 + + - name : stl/udp_1pkt_simple.py + kwargs : {packet_len: 64, packet_count: 100} + cpu_util : 1 + bw_per_core : 1 + +# causes queue full +# - name : stl/udp_1pkt_simple.py +# kwargs : {packet_len: 64, packet_count: 1000} +# cpu_util : 1 +# bw_per_core : 1 + + - name : stl/udp_1pkt_simple.py + kwargs : {packet_len: 128} + cpu_util : 1 + bw_per_core : 1 + + - name : stl/udp_1pkt_simple.py + kwargs : {packet_len: 256} + cpu_util : 1 + bw_per_core : 1 + + - name : stl/udp_1pkt_simple.py + kwargs : {packet_len: 512} + cpu_util : 1 + bw_per_core : 1 + + - name : stl/udp_1pkt_simple.py + kwargs : {packet_len: 1500} + cpu_util : 1 + bw_per_core : 1 + + - name : stl/udp_1pkt_simple.py + kwargs : {packet_len: 4000} + cpu_util : 1 + bw_per_core : 1 + + - name : stl/udp_1pkt_simple.py + kwargs : {packet_len: 9000} + cpu_util : 1 + bw_per_core : 1 + + - name : stl/udp_1pkt_simple.py + kwargs : {packet_len: 9000, packet_count: 10} + cpu_util : 1 + bw_per_core : 1 + + - name : stl/udp_1pkt_simple.py + kwargs : {packet_len: 9000, packet_count: 100} + cpu_util : 1 + bw_per_core : 1 + +# not enough memory + queue full if memory increase +# - name : stl/udp_1pkt_simple.py +# kwargs : {packet_len: 9000, packet_count: 1000} +# cpu_util : 1 +# bw_per_core : 1 + + - name : stl/imix.py + cpu_util : 1 + bw_per_core : 1 + + - name : stl/udp_1pkt_tuple_gen.py + kwargs : {packet_len: 64} + cpu_util : 1 + bw_per_core : 1 + + - name : stl/udp_1pkt_tuple_gen.py + kwargs : {packet_len: 128} + cpu_util : 1 + bw_per_core : 1 + + - name : stl/udp_1pkt_tuple_gen.py + kwargs : {packet_len: 256} + cpu_util : 1 + bw_per_core : 1 + + - name : stl/udp_1pkt_tuple_gen.py + kwargs : {packet_len: 512} + cpu_util : 1 + bw_per_core : 1 + + - name : stl/udp_1pkt_tuple_gen.py + kwargs : {packet_len: 1500} + cpu_util : 1 + bw_per_core : 1 + + - name : stl/udp_1pkt_tuple_gen.py + kwargs : {packet_len: 4000} + cpu_util : 1 + bw_per_core : 1 + + - name : stl/udp_1pkt_tuple_gen.py + kwargs : {packet_len: 9000} + cpu_util : 1 + bw_per_core : 1 + + - name : stl/pcap.py + kwargs : {ipg_usec: 2, loop_count: 0} + cpu_util : 1 + bw_per_core : 1 + + - name : stl/udp_rand_len_9k.py + cpu_util : 1 + bw_per_core : 1 + + - name : stl/hlt/hlt_udp_rand_len_9k.py + cpu_util : 1 + bw_per_core : 1 -test_rx_check_http_ipv6_disabled: - multiplier : 99000 - cores : 3 - rx_sample_rate : 128 -test_jumbo: - multiplier : 150 - cores : 2
\ No newline at end of file diff --git a/scripts/automation/regression/setups/trex09/benchmark.yaml b/scripts/automation/regression/setups/trex09/benchmark.yaml index 3f7b9a95..1f8fe47a 100644 --- a/scripts/automation/regression/setups/trex09/benchmark.yaml +++ b/scripts/automation/regression/setups/trex09/benchmark.yaml @@ -1,118 +1,175 @@ -############################################################### -#### TRex benchmark configuration file #### -############################################################### - -test_nbar_simple : - multiplier : 1.5 - cores : 1 - exp_gbps : 0.5 - cpu_to_core_ratio : 20800000 - cpu2core_custom_dev: YES - cpu2core_dev : 0.07 - exp_max_latency : 1000 - - nbar_classification: - http : 30.3 - rtp_audio : 21.06 - oracle_sqlnet : 11.25 - rtp : 11.1 - exchange : 10.16 - citrix : 5.6 - rtsp : 2.84 - sctp : 0.65 - ssl : 0.8 - sip : 0.09 - dns : 1.95 - smtp : 0.57 - pop3 : 0.36 - unknown : 3.19 - -test_rx_check : - multiplier : 0.8 - cores : 1 - rx_sample_rate : 128 - exp_gbps : 0.5 - cpu_to_core_ratio : 37270000 - exp_bw : 1 - exp_latency : 1 - -test_nat_simple : - stat_route_dict : - clients_start : 16.0.0.1 - servers_start : 48.0.0.1 - dual_port_mask : 1.0.0.0 - client_destination_mask : 255.0.0.0 - server_destination_mask : 255.0.0.0 - nat_dict : - clients_net_start : 16.0.0.0 - client_acl_wildcard_mask : 0.0.0.255 - dual_port_mask : 1.0.0.0 - pool_start : 200.0.0.0 - pool_netmask : 255.255.255.0 - multiplier : 150 - cores : 1 - cpu_to_core_ratio : 37270000 - exp_bw : 1 - exp_latency : 1 - allow_timeout_dev : YES - -test_nat_learning : - stat_route_dict : - clients_start : 16.0.0.1 - servers_start : 48.0.0.1 - dual_port_mask : 1.0.0.0 - client_destination_mask : 255.0.0.0 - server_destination_mask : 255.0.0.0 - multiplier : 150 - cores : 1 - nat_opened : 40000 - cpu_to_core_ratio : 270 - exp_bw : 1 - exp_latency : 1 - allow_timeout_dev : YES - -test_routing_imix_64 : - multiplier : 28 - cores : 1 - cpu_to_core_ratio : 280 - exp_latency : 1 - -test_routing_imix : - multiplier : 1 - cores : 1 - cpu_to_core_ratio : 1800 - exp_latency : 1 - -test_static_routing_imix : - stat_route_dict : - clients_start : 16.0.0.1 - servers_start : 48.0.0.1 - dual_port_mask : 1.0.0.0 - client_destination_mask : 255.0.0.0 - server_destination_mask : 255.0.0.0 - multiplier : 0.7 - cores : 1 - cpu_to_core_ratio : 1800 - exp_latency : 1 +################################################################ +#### T-Rex benchmark configuration file #### +################################################################ + +### stateful ### + +test_jumbo: + multiplier : 110 + cores : 1 + bw_per_core : 767.198 + + +test_routing_imix: + multiplier : 64 + cores : 2 + bw_per_core : 35.889 + + +test_routing_imix_64: + multiplier : 5000 + cores : 2 + bw_per_core : 10.672 + test_static_routing_imix_asymmetric: - stat_route_dict : - clients_start : 16.0.0.1 - servers_start : 48.0.0.1 - dual_port_mask : 1.0.0.0 - client_destination_mask : 255.0.0.0 - server_destination_mask : 255.0.0.0 - multiplier : 0.8 - cores : 1 - cpu_to_core_ratio : 1800 - exp_latency : 1 - -test_ipv6_simple : - multiplier : 1.5 - cores : 1 - cpu_to_core_ratio : 30070000 - cpu2core_custom_dev: YES - cpu2core_dev : 0.07 + multiplier : 32 + cores : 1 + bw_per_core : 52.738 + + +test_ipv6_simple: + multiplier : 64 + cores : 4 + bw_per_core : 22.808 + + +test_rx_check_http: + multiplier : 90000 + cores : 2 + rx_sample_rate : 32 + bw_per_core : 46.075 + + +test_rx_check_sfr: + multiplier : 50 + cores : 3 + rx_sample_rate : 32 + bw_per_core : 20.469 + + + +### stateless ### + +test_CPU_benchmark: + profiles: + - name : stl/udp_1pkt_simple.py + kwargs : {packet_len: 64} + cpu_util : 1 + bw_per_core : 1 + + - name : stl/udp_1pkt_simple.py + kwargs : {packet_len: 64, packet_count: 10} + cpu_util : 1 + bw_per_core : 1 + + - name : stl/udp_1pkt_simple.py + kwargs : {packet_len: 64, packet_count: 100} + cpu_util : 1 + bw_per_core : 1 + +# causes queue full +# - name : stl/udp_1pkt_simple.py +# kwargs : {packet_len: 64, packet_count: 1000} +# cpu_util : 1 +# bw_per_core : 1 + + - name : stl/udp_1pkt_simple.py + kwargs : {packet_len: 128} + cpu_util : 1 + bw_per_core : 1 + + - name : stl/udp_1pkt_simple.py + kwargs : {packet_len: 256} + cpu_util : 1 + bw_per_core : 1 + + - name : stl/udp_1pkt_simple.py + kwargs : {packet_len: 512} + cpu_util : 1 + bw_per_core : 1 + + - name : stl/udp_1pkt_simple.py + kwargs : {packet_len: 1500} + cpu_util : 1 + bw_per_core : 1 + + - name : stl/udp_1pkt_simple.py + kwargs : {packet_len: 4000} + cpu_util : 1 + bw_per_core : 1 + + - name : stl/udp_1pkt_simple.py + kwargs : {packet_len: 9000} + cpu_util : 1 + bw_per_core : 1 + + - name : stl/udp_1pkt_simple.py + kwargs : {packet_len: 9000, packet_count: 10} + cpu_util : 1 + bw_per_core : 1 + + - name : stl/udp_1pkt_simple.py + kwargs : {packet_len: 9000, packet_count: 100} + cpu_util : 1 + bw_per_core : 1 + +# not enough memory + queue full if memory increase +# - name : stl/udp_1pkt_simple.py +# kwargs : {packet_len: 9000, packet_count: 1000} +# cpu_util : 1 +# bw_per_core : 1 + + - name : stl/imix.py + cpu_util : 1 + bw_per_core : 1 + + - name : stl/udp_1pkt_tuple_gen.py + kwargs : {packet_len: 64} + cpu_util : 1 + bw_per_core : 1 + + - name : stl/udp_1pkt_tuple_gen.py + kwargs : {packet_len: 128} + cpu_util : 1 + bw_per_core : 1 + + - name : stl/udp_1pkt_tuple_gen.py + kwargs : {packet_len: 256} + cpu_util : 1 + bw_per_core : 1 + + - name : stl/udp_1pkt_tuple_gen.py + kwargs : {packet_len: 512} + cpu_util : 1 + bw_per_core : 1 + + - name : stl/udp_1pkt_tuple_gen.py + kwargs : {packet_len: 1500} + cpu_util : 1 + bw_per_core : 1 + + - name : stl/udp_1pkt_tuple_gen.py + kwargs : {packet_len: 4000} + cpu_util : 1 + bw_per_core : 1 + + - name : stl/udp_1pkt_tuple_gen.py + kwargs : {packet_len: 9000} + cpu_util : 1 + bw_per_core : 1 + + - name : stl/pcap.py + kwargs : {ipg_usec: 2, loop_count: 0} + cpu_util : 1 + bw_per_core : 1 + + - name : stl/udp_rand_len_9k.py + cpu_util : 1 + bw_per_core : 1 + - name : stl/hlt/hlt_udp_rand_len_9k.py + cpu_util : 1 + bw_per_core : 1 diff --git a/scripts/automation/regression/setups/trex11/config.yaml b/scripts/automation/regression/setups/trex11/config.yaml index 876a1afd..7bb5b6d4 100644 --- a/scripts/automation/regression/setups/trex11/config.yaml +++ b/scripts/automation/regression/setups/trex11/config.yaml @@ -34,36 +34,5 @@ trex: hostname : csi-trex-11 - cores : 2 + cores : 1 modes : ['loopback', 'virtual'] - -router: - model : 1RU - hostname : ASR1001_T-Rex -# ip_address : 10.56.199.247 - ip_address : 10.56.199.247123123123 - image : asr1001-universalk9.BLD_V155_1_S_XE314_THROTTLE_LATEST_20141112_090734-std.bin - #image : asr1001-universalk9.BLD_V155_2_S_XE315_THROTTLE_LATEST_20150121_110036-std.bin - #image : asr1001-universalk9.BLD_V155_2_S_XE315_THROTTLE_LATEST_20150324_100047-std.bin - line_password : lab - en_password : lab - mgmt_interface : GigabitEthernet0/0/0 - clean_config : /Configurations/danklei/asr1001_TRex_clean_config.cfg - intf_masking : 255.255.255.0 - ipv6_mask : 64 - interfaces : - - client : - name : GigabitEthernet0/0/1 - src_mac_addr : 0000.0001.0000 - dest_mac_addr : 0000.0001.0000 - server : - name : GigabitEthernet0/0/2 - src_mac_addr : 0000.0001.0000 - dest_mac_addr : 0000.0001.0000 - vrf_name : null - -tftp: - hostname : ats-asr-srv-1 - ip_address : 10.56.128.23 - root_dir : /auto/avc-devtest/ - images_path : /images/1RU/ diff --git a/scripts/automation/regression/setups/trex12/benchmark.yaml b/scripts/automation/regression/setups/trex12/benchmark.yaml index 7985f15e..0ebc2bcb 100644 --- a/scripts/automation/regression/setups/trex12/benchmark.yaml +++ b/scripts/automation/regression/setups/trex12/benchmark.yaml @@ -2,165 +2,181 @@ #### TRex benchmark configuration file #### ############################################################### -test_nbar_simple : - multiplier : 7.5 - cores : 2 - exp_gbps : 3.5 - cpu_to_core_ratio : 20800000 - cpu2core_custom_dev: YES - cpu2core_dev : 0.07 - exp_max_latency : 1000 - - nbar_classification: - http : 30.18 - rtp-audio : 21.27 - rtp : 11.26 - oracle_sqlnet : 11.2 - exchange : 10.78 - citrix : 5.61 - rtsp : 2.82 - dns : 1.94 - smtp : 0.57 - pop3 : 0.36 - ssl : 0.16 - sctp : 0.13 - sip : 0.09 - unknown : 3.54 - -test_rx_check : - multiplier : 13 - cores : 4 - rx_sample_rate : 128 - exp_gbps : 6 - cpu_to_core_ratio : 37270000 - exp_bw : 13 - exp_latency : 1 - -test_nat_simple : - stat_route_dict : - clients_start : 16.0.0.1 - servers_start : 48.0.0.1 - dual_port_mask : 1.0.0.0 - client_destination_mask : 255.0.0.0 - server_destination_mask : 255.0.0.0 - nat_dict : - clients_net_start : 16.0.0.0 - client_acl_wildcard_mask : 0.0.0.255 - dual_port_mask : 1.0.0.0 - pool_start : 200.0.0.0 - pool_netmask : 255.255.255.0 - multiplier : 12000 - cores : 1 - cpu_to_core_ratio : 37270000 - exp_bw : 1 - exp_latency : 1 - allow_timeout_dev : YES - -test_nat_learning : - stat_route_dict : - clients_start : 16.0.0.1 - servers_start : 48.0.0.1 - dual_port_mask : 1.0.0.0 - client_destination_mask : 255.0.0.0 - server_destination_mask : 255.0.0.0 - multiplier : 12000 - cores : 1 - nat_opened : 40000 - cpu_to_core_ratio : 270 - exp_bw : 8 - exp_latency : 1 - allow_timeout_dev : YES - -test_routing_imix_64 : - multiplier : 430 - cores : 1 - cpu_to_core_ratio : 280 - exp_latency : 1 - -test_routing_imix : - multiplier : 10 - cores : 1 - cpu_to_core_ratio : 1800 - exp_latency : 1 - -test_static_routing_imix : - stat_route_dict : - clients_start : 16.0.0.1 - servers_start : 48.0.0.1 - dual_port_mask : 1.0.0.0 - client_destination_mask : 255.0.0.0 - server_destination_mask : 255.0.0.0 - multiplier : 10 - cores : 1 - cpu_to_core_ratio : 1800 - exp_latency : 1 +### stateful ### + +test_jumbo: + multiplier : 14 + cores : 1 + bw_per_core : 689.664 + + +test_routing_imix: + multiplier : 8 + cores : 1 + bw_per_core : 45.422 + + +test_routing_imix_64: + multiplier : 2200 + cores : 1 + bw_per_core : 11.655 + test_static_routing_imix_asymmetric: - stat_route_dict : - clients_start : 16.0.0.1 - servers_start : 48.0.0.1 - dual_port_mask : 1.0.0.0 - client_destination_mask : 255.0.0.0 - server_destination_mask : 255.0.0.0 - multiplier : 8 - cores : 1 - cpu_to_core_ratio : 1800 - exp_latency : 1 - -test_ipv6_simple : - multiplier : 18 - cores : 4 - cpu_to_core_ratio : 30070000 - cpu2core_custom_dev: YES - cpu2core_dev : 0.07 - - -test_rx_check_sfr: - multiplier : 15 - cores : 3 - rx_sample_rate : 16 - # allow 0.03% errors, bad router - error_tolerance : 0.03 - -test_rx_check_http: - multiplier : 15000 - cores : 1 - rx_sample_rate : 16 - # allow 0.03% errors, bad router - error_tolerance : 0.03 + multiplier : 4 + cores : 1 + bw_per_core : 45.294 -test_rx_check_sfr_ipv6: - multiplier : 15 - cores : 3 - rx_sample_rate : 16 - # allow 0.03% errors, bad router - error_tolerance : 0.03 + +test_ipv6_simple: + multiplier : 8 + cores : 1 + bw_per_core : 29.332 + + +test_rx_check_http: &rx_http + multiplier : 11000 + cores : 1 + rx_sample_rate : 16 + bw_per_core : 47.813 test_rx_check_http_ipv6: - multiplier : 15000 - cores : 1 - rx_sample_rate : 16 - # allow 0.03% errors, bad router - error_tolerance : 0.03 - -test_rx_check_http_negative: - multiplier : 13000 - cores : 1 - rx_sample_rate : 16 - stat_route_dict : - clients_start : 16.0.0.1 - servers_start : 48.0.0.1 - dual_port_mask : 1.0.0.0 - client_destination_mask : 255.0.0.0 - server_destination_mask : 255.0.0.0 - nat_dict : - clients_net_start : 16.0.0.0 - client_acl_wildcard_mask : 0.0.0.255 - dual_port_mask : 1.0.0.0 - pool_start : 200.0.0.0 - pool_netmask : 255.255.255.0 + << : *rx_http + bw_per_core : 55.607 + + +test_rx_check_sfr: &rx_sfr + multiplier : 8 + cores : 1 + rx_sample_rate : 16 + bw_per_core : 24.203 + +test_rx_check_sfr_ipv6: + << : *rx_sfr + bw_per_core : 28.867 + + +### stateless ### + +test_CPU_benchmark: + profiles: + - name : stl/udp_1pkt_simple.py + kwargs : {packet_len: 64} + cpu_util : 1 + bw_per_core : 1 + + - name : stl/udp_1pkt_simple.py + kwargs : {packet_len: 64, packet_count: 10} + cpu_util : 1 + bw_per_core : 1 + + - name : stl/udp_1pkt_simple.py + kwargs : {packet_len: 64, packet_count: 100} + cpu_util : 1 + bw_per_core : 1 + +# causes queue full +# - name : stl/udp_1pkt_simple.py +# kwargs : {packet_len: 64, packet_count: 1000} +# cpu_util : 1 +# bw_per_core : 1 + + - name : stl/udp_1pkt_simple.py + kwargs : {packet_len: 128} + cpu_util : 1 + bw_per_core : 1 + + - name : stl/udp_1pkt_simple.py + kwargs : {packet_len: 256} + cpu_util : 1 + bw_per_core : 1 + + - name : stl/udp_1pkt_simple.py + kwargs : {packet_len: 512} + cpu_util : 1 + bw_per_core : 1 + + - name : stl/udp_1pkt_simple.py + kwargs : {packet_len: 1500} + cpu_util : 1 + bw_per_core : 1 + + - name : stl/udp_1pkt_simple.py + kwargs : {packet_len: 4000} + cpu_util : 1 + bw_per_core : 1 + + - name : stl/udp_1pkt_simple.py + kwargs : {packet_len: 9000} + cpu_util : 1 + bw_per_core : 1 + + - name : stl/udp_1pkt_simple.py + kwargs : {packet_len: 9000, packet_count: 10} + cpu_util : 1 + bw_per_core : 1 + + - name : stl/udp_1pkt_simple.py + kwargs : {packet_len: 9000, packet_count: 100} + cpu_util : 1 + bw_per_core : 1 + +# not enough memory + queue full if memory increase +# - name : stl/udp_1pkt_simple.py +# kwargs : {packet_len: 9000, packet_count: 1000} +# cpu_util : 1 +# bw_per_core : 1 + + - name : stl/imix.py + cpu_util : 1 + bw_per_core : 1 + + - name : stl/udp_1pkt_tuple_gen.py + kwargs : {packet_len: 64} + cpu_util : 1 + bw_per_core : 1 + + - name : stl/udp_1pkt_tuple_gen.py + kwargs : {packet_len: 128} + cpu_util : 1 + bw_per_core : 1 + + - name : stl/udp_1pkt_tuple_gen.py + kwargs : {packet_len: 256} + cpu_util : 1 + bw_per_core : 1 + + - name : stl/udp_1pkt_tuple_gen.py + kwargs : {packet_len: 512} + cpu_util : 1 + bw_per_core : 1 + + - name : stl/udp_1pkt_tuple_gen.py + kwargs : {packet_len: 1500} + cpu_util : 1 + bw_per_core : 1 + + - name : stl/udp_1pkt_tuple_gen.py + kwargs : {packet_len: 4000} + cpu_util : 1 + bw_per_core : 1 + + - name : stl/udp_1pkt_tuple_gen.py + kwargs : {packet_len: 9000} + cpu_util : 1 + bw_per_core : 1 + + - name : stl/pcap.py + kwargs : {ipg_usec: 2, loop_count: 0} + cpu_util : 1 + bw_per_core : 1 + + - name : stl/udp_rand_len_9k.py + cpu_util : 1 + bw_per_core : 1 + + - name : stl/hlt/hlt_udp_rand_len_9k.py + cpu_util : 1 + bw_per_core : 1 -test_jumbo: - multiplier : 28 - cores : 1 diff --git a/scripts/automation/regression/setups/trex12/config.yaml b/scripts/automation/regression/setups/trex12/config.yaml index af17db45..56471ac7 100644 --- a/scripts/automation/regression/setups/trex12/config.yaml +++ b/scripts/automation/regression/setups/trex12/config.yaml @@ -35,34 +35,6 @@ trex: hostname : csi-trex-12 -# version_path : /auto/proj-pcube-b/apps/PL-b/tools/bp_sim2/v1.57/ #/auto/srg-sce-swinfra-usr/emb/users/danklei/Work/asr1k/emb/private/bpsim/main/scripts cores : 1 - modes : [VM] + modes : ['loopback', '1G'] -router: - model : ASR1001x - hostname : csi-asr-01 - ip_address : 10.56.216.103 - image : asr1001x-universalk9_npe.BLD_V155_2_S_XE315_THROTTLE_LATEST_20151121_110441-std_2.SSA.bin - line_password : cisco - en_password : cisco - mgmt_interface : GigabitEthernet0 - clean_config : /Configurations/danklei/asr1001_TRex_clean_config.cfg - intf_masking : 255.255.255.0 - ipv6_mask : 64 - interfaces : - - client : - name : Te0/0/0 - src_mac_addr : 0000.0001.0000 - dest_mac_addr : 0000.0001.0000 - server : - name : Te0/0/1 - src_mac_addr : 0000.0001.0000 - dest_mac_addr : 0000.0001.0000 - vrf_name : null - -tftp: - hostname : ats-asr-srv-1 - ip_address : 10.56.128.23 - root_dir : /auto/avc-devtest/ - images_path : /images/RP2/ diff --git a/scripts/automation/regression/setups/trex14/benchmark.yaml b/scripts/automation/regression/setups/trex14/benchmark.yaml index e602ad1a..afa27a82 100644 --- a/scripts/automation/regression/setups/trex14/benchmark.yaml +++ b/scripts/automation/regression/setups/trex14/benchmark.yaml @@ -2,170 +2,244 @@ #### TRex benchmark configuration file #### ############################################################### -test_nbar_simple : - multiplier : 7.5 - cores : 2 - exp_gbps : 3.5 - cpu_to_core_ratio : 20800000 - cpu2core_custom_dev: YES - cpu2core_dev : 0.07 - exp_max_latency : 1000 - - nbar_classification: - http : 32.58 - rtp-audio : 21.21 - oracle_sqlnet : 11.41 - exchange : 11.22 - rtp : 11.2 - citrix : 5.65 - rtsp : 2.87 - dns : 1.96 - smtp : 0.57 - pop3 : 0.37 - ssl : 0.28 - sctp : 0.13 - sip : 0.09 - unknown : 0.45 - -test_rx_check : - multiplier : 13 - cores : 4 - rx_sample_rate : 128 - exp_gbps : 6 - cpu_to_core_ratio : 37270000 - exp_bw : 13 - exp_latency : 1 - -test_nat_simple : &test_nat_simple - stat_route_dict : +#### common templates ### + +stat_route_dict: &stat_route_dict clients_start : 16.0.0.1 servers_start : 48.0.0.1 dual_port_mask : 1.0.0.0 client_destination_mask : 255.0.0.0 server_destination_mask : 255.0.0.0 - nat_dict : + +nat_dict: &nat_dict clients_net_start : 16.0.0.0 client_acl_wildcard_mask : 0.0.0.255 dual_port_mask : 1.0.0.0 pool_start : 200.0.0.0 pool_netmask : 255.255.255.0 - multiplier : 12000 - cores : 1 - cpu_to_core_ratio : 37270000 - exp_bw : 1 - exp_latency : 1 - allow_timeout_dev : YES - -test_nat_simple_mode1 : *test_nat_simple -test_nat_simple_mode2 : *test_nat_simple - -test_nat_learning : - stat_route_dict : - clients_start : 16.0.0.1 - servers_start : 48.0.0.1 - dual_port_mask : 1.0.0.0 - client_destination_mask : 255.0.0.0 - server_destination_mask : 255.0.0.0 - multiplier : 12000 - cores : 1 - nat_opened : 40000 - cpu_to_core_ratio : 270 - exp_bw : 8 - exp_latency : 1 - allow_timeout_dev : YES - -test_routing_imix_64 : - multiplier : 430 - cores : 1 - cpu_to_core_ratio : 280 - exp_latency : 1 - -test_routing_imix : - multiplier : 10 - cores : 1 - cpu_to_core_ratio : 1800 - exp_latency : 1 - -test_static_routing_imix : - stat_route_dict : - clients_start : 16.0.0.1 - servers_start : 48.0.0.1 - dual_port_mask : 1.0.0.0 - client_destination_mask : 255.0.0.0 - server_destination_mask : 255.0.0.0 - multiplier : 8 - cores : 1 - cpu_to_core_ratio : 1800 - exp_latency : 1 -test_static_routing_imix_asymmetric: - stat_route_dict : - clients_start : 16.0.0.1 - servers_start : 48.0.0.1 - dual_port_mask : 1.0.0.0 - client_destination_mask : 255.0.0.0 - server_destination_mask : 255.0.0.0 - multiplier : 8 - cores : 1 - cpu_to_core_ratio : 1800 - exp_latency : 1 - -test_ipv6_simple : - multiplier : 9 - cores : 2 - cpu_to_core_ratio : 30070000 - cpu2core_custom_dev: YES - cpu2core_dev : 0.07 - - -test_rx_check_sfr: - multiplier : 10 - cores : 3 - rx_sample_rate : 16 - # allow 0.03% errors, bad router - error_tolerance : 0.03 - -test_rx_check_http: - multiplier : 15000 - cores : 1 - rx_sample_rate : 16 - # allow 0.03% errors, bad routerifconfig - error_tolerance : 0.03 -test_rx_check_sfr_ipv6: - multiplier : 10 - cores : 3 - rx_sample_rate : 16 - # allow 0.03% errors, bad router - error_tolerance : 0.03 +### stateful ### + +test_jumbo: + multiplier : 17 + cores : 1 + bw_per_core : 543.232 + + +test_routing_imix: + multiplier : 10 + cores : 1 + bw_per_core : 34.128 + + +test_routing_imix_64: + multiplier : 430 + cores : 1 + bw_per_core : 5.893 + + +test_static_routing_imix: &test_static_routing_imix + stat_route_dict : *stat_route_dict + multiplier : 8 + cores : 1 + bw_per_core : 34.339 + +test_static_routing_imix_asymmetric: *test_static_routing_imix + + +test_ipv6_simple: + multiplier : 9 + cores : 2 + bw_per_core : 19.064 + + +test_nat_simple_mode1: &test_nat_simple + stat_route_dict : *stat_route_dict + nat_dict : *nat_dict + multiplier : 12000 + cores : 1 + nat_opened : 40000 + allow_timeout_dev : True + bw_per_core : 44.445 + +test_nat_simple_mode2: *test_nat_simple + +test_nat_learning: + << : *test_nat_simple + nat_opened : 40000 + + +test_nbar_simple: + multiplier : 7.5 + cores : 2 + bw_per_core : 17.174 + nbar_classification: + http : 32.58 + rtp-audio : 21.21 + oracle_sqlnet : 11.41 + exchange : 11.22 + rtp : 11.2 + citrix : 5.65 + rtsp : 2.87 + dns : 1.96 + smtp : 0.57 + pop3 : 0.37 + ssl : 0.28 + sctp : 0.13 + sip : 0.09 + unknown : 0.45 + + +test_rx_check_http: &rx_http + multiplier : 15000 + cores : 1 + rx_sample_rate : 16 + bw_per_core : 39.560 test_rx_check_http_ipv6: - multiplier : 15000 - cores : 1 - rx_sample_rate : 16 - # allow 0.03% errors, bad router - error_tolerance : 0.03 + << : *rx_http + bw_per_core : 49.237 test_rx_check_http_negative: - multiplier : 13000 - cores : 1 - rx_sample_rate : 16 - # allow 0.03% errors, bad router - error_tolerance : 0.03 - stat_route_dict : - clients_start : 16.0.0.1 - servers_start : 48.0.0.1 - dual_port_mask : 1.0.0.0 - client_destination_mask : 255.0.0.0 - server_destination_mask : 255.0.0.0 - nat_dict : - clients_net_start : 16.0.0.0 - client_acl_wildcard_mask : 0.0.0.255 - dual_port_mask : 1.0.0.0 - pool_start : 200.0.0.0 - pool_netmask : 255.255.255.0 + << : *rx_http + stat_route_dict : *stat_route_dict + nat_dict : *nat_dict + + +test_rx_check_sfr: &rx_sfr + multiplier : 10 + cores : 3 + rx_sample_rate : 16 + bw_per_core : 16.082 + +test_rx_check_sfr_ipv6: + << : *rx_sfr + bw_per_core : 19.198 + + + +### stateless ### + +test_CPU_benchmark: + profiles: + - name : stl/udp_1pkt_simple.py + kwargs : {packet_len: 64} + cpu_util : 1 + bw_per_core : 1 + + - name : stl/udp_1pkt_simple.py + kwargs : {packet_len: 64, packet_count: 10} + cpu_util : 1 + bw_per_core : 1 + + - name : stl/udp_1pkt_simple.py + kwargs : {packet_len: 64, packet_count: 100} + cpu_util : 1 + bw_per_core : 1 + +# causes queue full +# - name : stl/udp_1pkt_simple.py +# kwargs : {packet_len: 64, packet_count: 1000} +# cpu_util : 1 +# bw_per_core : 1 + + - name : stl/udp_1pkt_simple.py + kwargs : {packet_len: 128} + cpu_util : 1 + bw_per_core : 1 + + - name : stl/udp_1pkt_simple.py + kwargs : {packet_len: 256} + cpu_util : 1 + bw_per_core : 1 + + - name : stl/udp_1pkt_simple.py + kwargs : {packet_len: 512} + cpu_util : 1 + bw_per_core : 1 + + - name : stl/udp_1pkt_simple.py + kwargs : {packet_len: 1500} + cpu_util : 1 + bw_per_core : 1 + + - name : stl/udp_1pkt_simple.py + kwargs : {packet_len: 4000} + cpu_util : 1 + bw_per_core : 1 + + - name : stl/udp_1pkt_simple.py + kwargs : {packet_len: 9000} + cpu_util : 1 + bw_per_core : 1 + + - name : stl/udp_1pkt_simple.py + kwargs : {packet_len: 9000, packet_count: 10} + cpu_util : 1 + bw_per_core : 1 + + - name : stl/udp_1pkt_simple.py + kwargs : {packet_len: 9000, packet_count: 100} + cpu_util : 1 + bw_per_core : 1 + +# not enough memory + queue full if memory increase +# - name : stl/udp_1pkt_simple.py +# kwargs : {packet_len: 9000, packet_count: 1000} +# cpu_util : 1 +# bw_per_core : 1 + + - name : stl/imix.py + cpu_util : 1 + bw_per_core : 1 + + - name : stl/udp_1pkt_tuple_gen.py + kwargs : {packet_len: 64} + cpu_util : 1 + bw_per_core : 1 + + - name : stl/udp_1pkt_tuple_gen.py + kwargs : {packet_len: 128} + cpu_util : 1 + bw_per_core : 1 + + - name : stl/udp_1pkt_tuple_gen.py + kwargs : {packet_len: 256} + cpu_util : 1 + bw_per_core : 1 + + - name : stl/udp_1pkt_tuple_gen.py + kwargs : {packet_len: 512} + cpu_util : 1 + bw_per_core : 1 + + - name : stl/udp_1pkt_tuple_gen.py + kwargs : {packet_len: 1500} + cpu_util : 1 + bw_per_core : 1 + + - name : stl/udp_1pkt_tuple_gen.py + kwargs : {packet_len: 4000} + cpu_util : 1 + bw_per_core : 1 + + - name : stl/udp_1pkt_tuple_gen.py + kwargs : {packet_len: 9000} + cpu_util : 1 + bw_per_core : 1 + + - name : stl/pcap.py + kwargs : {ipg_usec: 2, loop_count: 0} + cpu_util : 1 + bw_per_core : 1 + + - name : stl/udp_rand_len_9k.py + cpu_util : 1 + bw_per_core : 1 + + - name : stl/hlt/hlt_udp_rand_len_9k.py + cpu_util : 1 + bw_per_core : 1 -test_jumbo: - multiplier : 17 - cores : 1 diff --git a/scripts/automation/regression/setups/trex14/config.yaml b/scripts/automation/regression/setups/trex14/config.yaml index 10938ff3..1a528a9b 100644 --- a/scripts/automation/regression/setups/trex14/config.yaml +++ b/scripts/automation/regression/setups/trex14/config.yaml @@ -35,7 +35,8 @@ trex: hostname : csi-trex-14 - cores : 1 + cores : 4 + modes : [] router: model : ASR1001x diff --git a/scripts/automation/regression/setups/trex17/benchmark.yaml b/scripts/automation/regression/setups/trex17/benchmark.yaml index e5459dce..d9191a42 100644 --- a/scripts/automation/regression/setups/trex17/benchmark.yaml +++ b/scripts/automation/regression/setups/trex17/benchmark.yaml @@ -2,61 +2,154 @@ #### T-Rex benchmark configuration file #### ################################################################ +### stateful ### + +test_jumbo: + multiplier : 2.8 + cores : 1 + bw_per_core : 66.489 + + +test_routing_imix: + multiplier : 0.5 + cores : 1 + bw_per_core : 5.530 + + +test_routing_imix_64: + multiplier : 28 + cores : 1 + bw_per_core : 0.859 -test_rx_check : - multiplier : 0.8 - cores : 1 - rx_sample_rate : 128 - exp_gbps : 0.5 - cpu_to_core_ratio : 37270000 - exp_bw : 1 - exp_latency : 1 - - -test_routing_imix_64 : - multiplier : 28 - cores : 1 - cpu_to_core_ratio : 280 - exp_latency : 1 - -test_routing_imix : - multiplier : 0.5 - cores : 1 - cpu_to_core_ratio : 1800 - exp_latency : 1 - -test_static_routing_imix : - stat_route_dict : - clients_start : 16.0.0.1 - servers_start : 48.0.0.1 - dual_port_mask : 1.0.0.0 - client_destination_mask : 255.0.0.0 - server_destination_mask : 255.0.0.0 - multiplier : 0.8 - cores : 1 - cpu_to_core_ratio : 1800 - exp_latency : 1 test_static_routing_imix_asymmetric: - stat_route_dict : - clients_start : 16.0.0.1 - servers_start : 48.0.0.1 - dual_port_mask : 1.0.0.0 - client_destination_mask : 255.0.0.0 - server_destination_mask : 255.0.0.0 - multiplier : 0.8 - cores : 1 - cpu_to_core_ratio : 1800 - exp_latency : 1 - -test_ipv6_simple : - multiplier : 0.5 - cores : 1 - cpu_to_core_ratio : 30070000 - cpu2core_custom_dev: YES - cpu2core_dev : 0.07 + multiplier : 0.8 + cores : 1 + bw_per_core : 9.635 + + + +### stateless ### + +test_CPU_benchmark: + profiles: + - name : stl/udp_1pkt_simple.py + kwargs : {packet_len: 64} + cpu_util : 1 + bw_per_core : 1 + + - name : stl/udp_1pkt_simple.py + kwargs : {packet_len: 64, packet_count: 10} + cpu_util : 1 + bw_per_core : 1 + + - name : stl/udp_1pkt_simple.py + kwargs : {packet_len: 64, packet_count: 100} + cpu_util : 1 + bw_per_core : 1 + +# causes queue full +# - name : stl/udp_1pkt_simple.py +# kwargs : {packet_len: 64, packet_count: 1000} +# cpu_util : 1 +# bw_per_core : 1 + + - name : stl/udp_1pkt_simple.py + kwargs : {packet_len: 128} + cpu_util : 1 + bw_per_core : 1 + + - name : stl/udp_1pkt_simple.py + kwargs : {packet_len: 256} + cpu_util : 1 + bw_per_core : 1 + + - name : stl/udp_1pkt_simple.py + kwargs : {packet_len: 512} + cpu_util : 1 + bw_per_core : 1 + + - name : stl/udp_1pkt_simple.py + kwargs : {packet_len: 1500} + cpu_util : 1 + bw_per_core : 1 + + - name : stl/udp_1pkt_simple.py + kwargs : {packet_len: 4000} + cpu_util : 1 + bw_per_core : 1 + + - name : stl/udp_1pkt_simple.py + kwargs : {packet_len: 9000} + cpu_util : 1 + bw_per_core : 1 + + - name : stl/udp_1pkt_simple.py + kwargs : {packet_len: 9000, packet_count: 10} + cpu_util : 1 + bw_per_core : 1 + + - name : stl/udp_1pkt_simple.py + kwargs : {packet_len: 9000, packet_count: 100} + cpu_util : 1 + bw_per_core : 1 + +# not enough memory + queue full if memory increase +# - name : stl/udp_1pkt_simple.py +# kwargs : {packet_len: 9000, packet_count: 1000} +# cpu_util : 1 +# bw_per_core : 1 + + - name : stl/imix.py + cpu_util : 1 + bw_per_core : 1 + + - name : stl/udp_1pkt_tuple_gen.py + kwargs : {packet_len: 64} + cpu_util : 1 + bw_per_core : 1 + + - name : stl/udp_1pkt_tuple_gen.py + kwargs : {packet_len: 128} + cpu_util : 1 + bw_per_core : 1 + + - name : stl/udp_1pkt_tuple_gen.py + kwargs : {packet_len: 256} + cpu_util : 1 + bw_per_core : 1 + + - name : stl/udp_1pkt_tuple_gen.py + kwargs : {packet_len: 512} + cpu_util : 1 + bw_per_core : 1 + + - name : stl/udp_1pkt_tuple_gen.py + kwargs : {packet_len: 1500} + cpu_util : 1 + bw_per_core : 1 + + - name : stl/udp_1pkt_tuple_gen.py + kwargs : {packet_len: 4000} + cpu_util : 1 + bw_per_core : 1 + + - name : stl/udp_1pkt_tuple_gen.py + kwargs : {packet_len: 9000} + cpu_util : 1 + bw_per_core : 1 + + - name : stl/pcap.py + kwargs : {ipg_usec: 2, loop_count: 0} + cpu_util : 1 + bw_per_core : 1 + + - name : stl/udp_rand_len_9k.py + cpu_util : 1 + bw_per_core : 1 + + - name : stl/hlt/hlt_udp_rand_len_9k.py + cpu_util : 1 + bw_per_core : 1 -test_jumbo: - multiplier : 2.8 - cores : 1
\ No newline at end of file diff --git a/scripts/automation/regression/setups/trex25/benchmark.yaml b/scripts/automation/regression/setups/trex25/benchmark.yaml new file mode 100644 index 00000000..f87759f9 --- /dev/null +++ b/scripts/automation/regression/setups/trex25/benchmark.yaml @@ -0,0 +1,252 @@ +############################################################### +#### TRex benchmark configuration file #### +############################################################### + +#### common templates ### + +stat_route_dict: &stat_route_dict + clients_start : 16.0.0.1 + servers_start : 48.0.0.1 + dual_port_mask : 1.0.0.0 + client_destination_mask : 255.0.0.0 + server_destination_mask : 255.0.0.0 + +nat_dict: &nat_dict + clients_net_start : 16.0.0.0 + client_acl_wildcard_mask : 0.0.0.255 + dual_port_mask : 1.0.0.0 + pool_start : 200.0.0.0 + pool_netmask : 255.255.255.0 + + +### stateful ### + +test_jumbo: + multiplier : 6 + cores : 1 + bw_per_core : 443.970 + + +test_routing_imix: + multiplier : 4 + cores : 1 + bw_per_core : 26.509 + + +test_routing_imix_64: + multiplier : 600 + cores : 1 + bw_per_core : 6.391 + + +test_static_routing_imix: + stat_route_dict : *stat_route_dict + multiplier : 2.8 + cores : 1 + bw_per_core : 24.510 + + + +test_static_routing_imix_asymmetric: + stat_route_dict : *stat_route_dict + multiplier : 3.2 + cores : 1 + bw_per_core : 28.229 + + +test_ipv6_simple: + multiplier : 6 + cores : 1 + bw_per_core : 19.185 + + +test_nat_simple_mode1: &test_nat_simple + stat_route_dict : *stat_route_dict + nat_dict : *nat_dict + multiplier : 2200 + cores : 1 + allow_timeout_dev : True + bw_per_core : 32.171 + +test_nat_simple_mode2: *test_nat_simple + +test_nat_learning: + << : *test_nat_simple + nat_opened : 40000 + + +test_nbar_simple: + multiplier : 6 + cores : 1 + bw_per_core : 16.645 + nbar_classification: + http : 24.55 + rtp : 19.15 + sqlnet : 10.38 + secure-http : 5.11 + citrix : 4.68 + mapi : 4.04 + dns : 1.56 + sctp : 0.66 + smtp : 0.48 + pop3 : 0.30 + novadigm : 0.09 + sip : 0.08 + h323 : 0.05 + rtsp : 0.04 + unknown : 28.52 + + +test_rx_check_http: &rx_http + multiplier : 8800 + cores : 1 + rx_sample_rate : 16 + bw_per_core : 31.389 + +test_rx_check_http_ipv6: + << : *rx_http + bw_per_core : 37.114 + +test_rx_check_http_negative: + << : *rx_http + stat_route_dict : *stat_route_dict + nat_dict : *nat_dict + + +test_rx_check_sfr: &rx_sfr + multiplier : 6.8 + cores : 1 + rx_sample_rate : 16 + bw_per_core : 16.063 + +test_rx_check_sfr_ipv6: + << : *rx_sfr + bw_per_core : 19.663 + + +### stateless ### + +test_CPU_benchmark: + profiles: + - name : stl/udp_1pkt_simple.py + kwargs : {packet_len: 64} + cpu_util : 1 + bw_per_core : 1 + + - name : stl/udp_1pkt_simple.py + kwargs : {packet_len: 64, packet_count: 10} + cpu_util : 1 + bw_per_core : 1 + + - name : stl/udp_1pkt_simple.py + kwargs : {packet_len: 64, packet_count: 100} + cpu_util : 1 + bw_per_core : 1 + +# causes queue full +# - name : stl/udp_1pkt_simple.py +# kwargs : {packet_len: 64, packet_count: 1000} +# cpu_util : 1 +# bw_per_core : 1 + + - name : stl/udp_1pkt_simple.py + kwargs : {packet_len: 128} + cpu_util : 1 + bw_per_core : 1 + + - name : stl/udp_1pkt_simple.py + kwargs : {packet_len: 256} + cpu_util : 1 + bw_per_core : 1 + + - name : stl/udp_1pkt_simple.py + kwargs : {packet_len: 512} + cpu_util : 1 + bw_per_core : 1 + + - name : stl/udp_1pkt_simple.py + kwargs : {packet_len: 1500} + cpu_util : 1 + bw_per_core : 1 + + - name : stl/udp_1pkt_simple.py + kwargs : {packet_len: 4000} + cpu_util : 1 + bw_per_core : 1 + + - name : stl/udp_1pkt_simple.py + kwargs : {packet_len: 9000} + cpu_util : 1 + bw_per_core : 1 + +# problem stabilizing CPU utilization at this setup +# - name : stl/udp_1pkt_simple.py +# kwargs : {packet_len: 9000, packet_count: 10} +# cpu_util : 1 +# bw_per_core : 1 + +# problem stabilizing CPU utilization at this setup +# - name : stl/udp_1pkt_simple.py +# kwargs : {packet_len: 9000, packet_count: 100} +# cpu_util : 1 +# bw_per_core : 1 + +# not enough memory + queue full if memory increase +# - name : stl/udp_1pkt_simple.py +# kwargs : {packet_len: 9000, packet_count: 1000} +# cpu_util : 1 +# bw_per_core : 1 + + - name : stl/imix.py + cpu_util : 1 + bw_per_core : 1 + + - name : stl/udp_1pkt_tuple_gen.py + kwargs : {packet_len: 64} + cpu_util : 1 + bw_per_core : 1 + + - name : stl/udp_1pkt_tuple_gen.py + kwargs : {packet_len: 128} + cpu_util : 1 + bw_per_core : 1 + + - name : stl/udp_1pkt_tuple_gen.py + kwargs : {packet_len: 256} + cpu_util : 1 + bw_per_core : 1 + + - name : stl/udp_1pkt_tuple_gen.py + kwargs : {packet_len: 512} + cpu_util : 1 + bw_per_core : 1 + + - name : stl/udp_1pkt_tuple_gen.py + kwargs : {packet_len: 1500} + cpu_util : 1 + bw_per_core : 1 + + - name : stl/udp_1pkt_tuple_gen.py + kwargs : {packet_len: 4000} + cpu_util : 1 + bw_per_core : 1 + + - name : stl/udp_1pkt_tuple_gen.py + kwargs : {packet_len: 9000} + cpu_util : 1 + bw_per_core : 1 + + - name : stl/pcap.py + kwargs : {ipg_usec: 2, loop_count: 0} + cpu_util : 1 + bw_per_core : 1 + + - name : stl/udp_rand_len_9k.py + cpu_util : 1 + bw_per_core : 1 + + - name : stl/hlt/hlt_udp_rand_len_9k.py + cpu_util : 1 + bw_per_core : 1 + + diff --git a/scripts/automation/regression/setups/trex25/config.yaml b/scripts/automation/regression/setups/trex25/config.yaml new file mode 100644 index 00000000..821208a5 --- /dev/null +++ b/scripts/automation/regression/setups/trex25/config.yaml @@ -0,0 +1,93 @@ +################################################################ +#### T-Rex nightly test configuration file #### +################################################################ + + +### T-Rex configuration: +# hostname - can be DNS name or IP for the TRex machine for ssh to the box +# is_dual - should the TRex inject with -p ? +# version_path - path to the t-rex version and executable +# cores - how many cores should be used +# latency - rate of latency packets injected by the TRex +# modes - list of modes (tagging) of this setup (loopback, virtual etc.) +# * loopback - Trex works via loopback. Router and TFTP configurations may be skipped. +# * VM - Virtual OS (accept low CPU utilization in tests, latency can get spikes) +# * virt_nics - NICs are virtual (VMXNET3 etc.) + +### Router configuration: +# hostname - the router hostname as apears in ______# cli prefix +# ip_address - the router's ip that can be used to communicate with +# image - the desired imaged wished to be loaded as the router's running config +# line_password - router password when access via Telent +# en_password - router password when changing to "enable" mode +# interfaces - an array of client-server pairs, representing the interfaces configurations of the router +# configurations - an array of configurations that could possibly loaded into the router during the test. +# The "clean" configuration is a mandatory configuration the router will load with to run the basic test bench + +### TFTP configuration: +# hostname - the tftp hostname +# ip_address - the tftp's ip address +# images_path - the tftp's relative path in which the router's images are located + +### Test_misc configuration: +# expected_bw - the "golden" bandwidth (in Gbps) results planned on receiving from the test + +trex: + hostname : csi-trex-25 + cores : 2 + modes : ['1G'] + +router: + model : ASR1004(RP2) + hostname : csi-mcp-asr1k-4ru-12 + ip_address : 10.56.217.181 + image : asr1000rp2-adventerprisek9.BLD_V151_1_S_XE32_THROTTLE_LATEST_20100926_034325_2.bin + line_password : cisco + en_password : cisco + mgmt_interface : GigabitEthernet0/0/0 + clean_config : clean_config.cfg + intf_masking : 255.255.255.0 + ipv6_mask : 64 + interfaces : + - client : + name : GigabitEthernet0/1/0 + src_mac_addr : 0000.0001.0000 + dest_mac_addr : 0000.0001.0000 + server : + name : GigabitEthernet0/1/1 + src_mac_addr : 0000.0001.0000 + dest_mac_addr : 0000.0001.0000 + vrf_name : + - client : + name : GigabitEthernet0/1/2 + src_mac_addr : 0000.0001.0000 + dest_mac_addr : 0000.0001.0000 + server : + name : GigabitEthernet0/1/4 + src_mac_addr : 0000.0001.0000 + dest_mac_addr : 0000.0001.0000 + vrf_name : + - client : + name : GigabitEthernet0/1/5 + src_mac_addr : 0000.0001.0000 + dest_mac_addr : 0000.0001.0000 + server : + name : GigabitEthernet0/1/3 + src_mac_addr : 0000.0001.0000 + dest_mac_addr : 0000.0001.0000 + vrf_name : + - client : + name : GigabitEthernet0/1/6 + src_mac_addr : 0000.0001.0000 + dest_mac_addr : 0000.0001.0000 + server : + name : GigabitEthernet0/1/7 + src_mac_addr : 0000.0001.0000 + dest_mac_addr : 0000.0001.0000 + vrf_name : + +tftp: + hostname : ats-asr-srv-1 + ip_address : 10.56.128.23 + root_dir : /auto/avc-devtest/ + images_path : /images/1RU/ diff --git a/scripts/automation/regression/stateful_tests/trex_general_test.py b/scripts/automation/regression/stateful_tests/trex_general_test.py index 1a44970a..42720f70 100755 --- a/scripts/automation/regression/stateful_tests/trex_general_test.py +++ b/scripts/automation/regression/stateful_tests/trex_general_test.py @@ -48,6 +48,7 @@ def tearDownModule(module): class CTRexGeneral_Test(unittest.TestCase): """This class defines the general stateful testcase of the T-Rex traffic generator""" def __init__ (self, *args, **kwargs): + sys.stdout.flush() unittest.TestCase.__init__(self, *args, **kwargs) if CTRexScenario.is_test_list: return @@ -57,6 +58,7 @@ class CTRexGeneral_Test(unittest.TestCase): self.trex = CTRexScenario.trex self.trex_crashed = CTRexScenario.trex_crashed self.modes = CTRexScenario.modes + self.GAManager = CTRexScenario.GAManager self.skipping = False self.fail_reasons = [] if not hasattr(self, 'unsupported_modes'): @@ -135,15 +137,12 @@ class CTRexGeneral_Test(unittest.TestCase): if res[name] != float(val): self.fail('TRex results[%s]==%f and not as expected %f ' % (name, res[name], val)) - def check_CPU_benchmark (self, trex_res, err = 10, minimal_cpu = None, maximal_cpu = 85): + def check_CPU_benchmark (self, trex_res, err = 25, minimal_cpu = 30, maximal_cpu = 85): #cpu_util = float(trex_res.get_last_value("trex-global.data.m_cpu_util")) - cpu_util = sum([float(x) for x in trex_res.get_value_list("trex-global.data.m_cpu_util")[-4:-1]]) / 3 # mean of 3 values before last + cpu_util = sum(trex_res.get_value_list("trex-global.data.m_cpu_util")[-4:-1]) / 3.0 # mean of 3 values before last - if minimal_cpu is None: - if '1G' in self.modes: - minimal_cpu = 1 - else: - minimal_cpu = 30 + if '1G' in self.modes: + minimal_cpu /= 10.0 if not self.is_virt_nics: if cpu_util > maximal_cpu: @@ -151,23 +150,25 @@ class CTRexGeneral_Test(unittest.TestCase): if cpu_util < minimal_cpu: self.fail("CPU is too low (%s%%), can't verify performance in such low CPU%%." % cpu_util ) - cores = self.get_benchmark_param('cores') - trex_tx_bps = trex_res.get_last_value("trex-global.data.m_total_tx_bytes") - test_norm_cpu = 100.0*(trex_tx_bps/(cores*cpu_util))/1e6 + test_norm_cpu = sum(trex_res.get_value_list("trex-global.data.m_bw_per_core")[-4:-1]) / 3.0 - print("TRex CPU utilization: %g%%, norm_cpu is : %d Mb/core" % (round(cpu_util), int(test_norm_cpu))) + print("TRex CPU utilization: %g%%, norm_cpu is : %g Gb/core" % (round(cpu_util, 2), round(test_norm_cpu))) - #expected_norm_cpu = self.get_benchmark_param('cpu_to_core_ratio') + expected_norm_cpu = self.get_benchmark_param('bw_per_core') + if not expected_norm_cpu: + expected_norm_cpu = 1 - #calc_error_precent = abs(100.0*(test_norm_cpu/expected_norm_cpu)-100.0) - -# if calc_error_precent > err: -# msg ='Normalized bandwidth to CPU utilization ratio is %2.0f Mb/core expected %2.0f Mb/core more than %2.0f %% - ERROR' % (test_norm_cpu, expected_norm_cpu, err) -# raise AbnormalResultError(msg) -# else: -# msg ='Normalized bandwidth to CPU utilization ratio is %2.0f Mb/core expected %2.0f Mb/core less than %2.0f %% - OK' % (test_norm_cpu, expected_norm_cpu, err) -# print msg + calc_error_precent = abs(100.0 * test_norm_cpu / expected_norm_cpu - 100) + print('Err percent: %s' % calc_error_precent) + if calc_error_precent > err and cpu_util > 10: + self.fail('Excepted bw_per_core ratio: %s, got: %g' % (expected_norm_cpu, round(test_norm_cpu))) + # report benchmarks + if self.GAManager: + setup_test = '%s.%s' % (CTRexScenario.setup_name, self.get_name()) + self.GAManager.gaAddAction(Event = 'stateful_test', action = setup_test, label = 'bw_per_core', value = int(test_norm_cpu)) + self.GAManager.gaAddAction(Event = 'stateful_test', action = setup_test, label = 'bw_per_core_exp', value = int(expected_norm_cpu)) + self.GAManager.emptyAndReportQ() def check_results_gt (self, res, name, val): if res is None: @@ -341,7 +342,9 @@ class CTRexGeneral_Test(unittest.TestCase): except Exception as e: print("Can't get TRex log:", e) if len(self.fail_reasons): + sys.stdout.flush() raise Exception('The test is failed, reasons:\n%s' % '\n'.join(self.fail_reasons)) + sys.stdout.flush() def check_for_trex_crash(self): pass diff --git a/scripts/automation/regression/stateful_tests/trex_imix_test.py b/scripts/automation/regression/stateful_tests/trex_imix_test.py index 95a5471d..cafa1c55 100755 --- a/scripts/automation/regression/stateful_tests/trex_imix_test.py +++ b/scripts/automation/regression/stateful_tests/trex_imix_test.py @@ -161,7 +161,7 @@ class CTRexIMIX_Test(CTRexGeneral_Test): self.check_general_scenario_results(trex_res) - self.check_CPU_benchmark(trex_res) + self.check_CPU_benchmark(trex_res, minimal_cpu = 25) def test_jumbo(self, duration = 100, **kwargs): diff --git a/scripts/automation/regression/stateful_tests/trex_nat_test.py b/scripts/automation/regression/stateful_tests/trex_nat_test.py index 512ad4e4..b2df684d 100755 --- a/scripts/automation/regression/stateful_tests/trex_nat_test.py +++ b/scripts/automation/regression/stateful_tests/trex_nat_test.py @@ -54,9 +54,9 @@ class CTRexNoNat_Test(CTRexGeneral_Test):#(unittest.TestCase): learning_stats = trex_res.get_last_value("trex-global.data", ".*nat.*") # extract all nat data if self.get_benchmark_param('allow_timeout_dev'): - nat_timeout_ratio = learning_stats['m_total_nat_time_out']/learning_stats['m_total_nat_open'] + nat_timeout_ratio = float(learning_stats['m_total_nat_time_out']) / learning_stats['m_total_nat_open'] if nat_timeout_ratio > 0.005: - self.fail('TRex nat_timeout ratio %f > 0.005 (0.5%) and not as expected to be less than 0.5%' %(nat_timeout_ratio)) + self.fail('TRex nat_timeout ratio %f > 0.5%%' % nat_timeout_ratio) else: self.check_results_eq (learning_stats, 'm_total_nat_time_out', 0.0) self.check_results_eq (learning_stats, 'm_total_nat_no_fid', 0.0) @@ -128,7 +128,7 @@ class CTRexNat_Test(CTRexGeneral_Test):#(unittest.TestCase): trex_nat_stats = trex_res.get_last_value("trex-global.data", ".*nat.*") # extract all nat data if self.get_benchmark_param('allow_timeout_dev'): - nat_timeout_ratio = trex_nat_stats['m_total_nat_time_out']/trex_nat_stats['m_total_nat_open'] + nat_timeout_ratio = float(trex_nat_stats['m_total_nat_time_out']) / trex_nat_stats['m_total_nat_open'] if nat_timeout_ratio > 0.005: self.fail('TRex nat_timeout ratio %f > 0.5%%' % nat_timeout_ratio) else: diff --git a/scripts/automation/regression/stateful_tests/trex_nbar_test.py b/scripts/automation/regression/stateful_tests/trex_nbar_test.py index 69c3f605..fa3f3485 100755 --- a/scripts/automation/regression/stateful_tests/trex_nbar_test.py +++ b/scripts/automation/regression/stateful_tests/trex_nbar_test.py @@ -82,70 +82,10 @@ class CTRexNbar_Test(CTRexGeneral_Test): print("\nLATEST DUMP:") print(trex_res.get_latest_dump()) - self.check_general_scenario_results(trex_res, check_latency = False) # NBAR can cause latency - # test_norm_cpu = 2*(trex_res.result['total-tx']/(core*trex_res.result['cpu_utilization'])) - trex_tx_pckt = trex_res.get_last_value("trex-global.data.m_total_tx_pkts") - cpu_util = trex_res.get_last_value("trex-global.data.m_cpu_util") - cpu_util_hist = trex_res.get_value_list("trex-global.data.m_cpu_util") - print("cpu util is:", cpu_util) - print(cpu_util_hist) - test_norm_cpu = 2 * trex_tx_pckt / (core * cpu_util) - print("test_norm_cpu is:", test_norm_cpu) - - - if self.get_benchmark_param('cpu2core_custom_dev'): - # check this test by custom deviation - deviation_compare_value = self.get_benchmark_param('cpu2core_dev') - print("Comparing test with custom deviation value- {dev_val}%".format( dev_val = int(deviation_compare_value*100) )) - - # need to be fixed ! - #if ( abs((test_norm_cpu/self.get_benchmark_param('cpu_to_core_ratio')) - 1) > deviation_compare_value): - # raise AbnormalResultError('Normalized bandwidth to CPU utilization ratio exceeds benchmark boundaries') - + self.check_CPU_benchmark(trex_res) self.match_classification() - assert True - - @nottest - def test_rx_check (self): - # test initializtion - self.router.configure_basic_interfaces() - - self.router.config_pbr(mode = "config") - self.router.config_nbar_pd() - - mult = self.get_benchmark_param('multiplier') - core = self.get_benchmark_param('cores') - sample_rate = self.get_benchmark_param('rx_sample_rate') - - ret = self.trex.start_trex( - c = core, - m = mult, - p = True, - nc = True, - rx_check = sample_rate, - d = 100, - f = 'cap2/sfr.yaml', - l = 1000) - - trex_res = self.trex.sample_to_run_finish() - - # trex_res is a CTRexResult instance- and contains the summary of the test results - # you may see all the results keys by simply calling here for 'print trex_res.result' - print("\nLATEST RESULT OBJECT:") - print(trex_res) - print("\nLATEST DUMP:") - print(trex_res.get_latest_dump()) - - self.check_general_scenario_results(trex_res) - - self.check_CPU_benchmark(trex_res, 10) - -# if trex_res.result['rx_check_tx']==trex_res.result['rx_check_rx']: # rx_check verification shoud pass -# assert trex_res.result['rx_check_verification'] == "OK" -# else: -# assert trex_res.result['rx_check_verification'] == "FAIL" # the name intentionally not matches nose default pattern, including the test should be specified explicitly def NBarLong(self): diff --git a/scripts/automation/regression/stateful_tests/trex_rx_test.py b/scripts/automation/regression/stateful_tests/trex_rx_test.py index 2f0a24f4..c097b180 100755 --- a/scripts/automation/regression/stateful_tests/trex_rx_test.py +++ b/scripts/automation/regression/stateful_tests/trex_rx_test.py @@ -52,9 +52,10 @@ class CTRexRx_Test(CTRexGeneral_Test): path = 'rx-check.data.stats.m_total_rx' total_rx = trex_res.get_last_value(path) - if not total_rx: + if total_rx is None: raise AbnormalResultError('No TRex results by path: %s' % path) - + elif not total_rx: + raise AbnormalResultError('Total rx_check (%s) packets is zero.' % path) print('Total packets checked: %s' % total_rx) print('Latency counters: %s' % latency_counters_display) @@ -69,7 +70,7 @@ class CTRexRx_Test(CTRexGeneral_Test): error_tolerance = self.get_benchmark_param('error_tolerance') if not error_tolerance or not allow_error_tolerance: error_tolerance = 0 - error_percentage = float(total_errors) * 100 / total_rx + error_percentage = total_errors * 100.0 / total_rx if total_errors > 0: if self.is_loopback or error_percentage > error_tolerance: @@ -255,7 +256,8 @@ class CTRexRx_Test(CTRexGeneral_Test): self.router.config_zbf() trex_res = self.trex.sample_to_run_finish() self.router.config_no_zbf() - self.router.clear_nat_translations() + self.router.config_no_nat(nat_obj) + #self.router.clear_nat_translations() print("\nLATEST RESULT OBJECT:") print(trex_res) self.check_rx_errors(trex_res, allow_error_tolerance = False) diff --git a/scripts/automation/regression/stateless_tests/stl_benchmark_test.py b/scripts/automation/regression/stateless_tests/stl_benchmark_test.py new file mode 100755 index 00000000..ef4c435f --- /dev/null +++ b/scripts/automation/regression/stateless_tests/stl_benchmark_test.py @@ -0,0 +1,69 @@ +#!/router/bin/python +from .stl_general_test import CStlGeneral_Test, CTRexScenario +from trex_stl_lib.api import * +import os, sys +from collections import deque +from time import time, sleep + +class STLBenchmark_Test(CStlGeneral_Test): + """Benchark stateless performance""" + + def test_CPU_benchmark(self): + timeout = 60 # max time to wait for stabilization + stabilize = 5 # ensure stabilization over this period + print('') + + for profile_bench in self.get_benchmark_param('profiles'): + cpu_utils = deque([0] * stabilize, maxlen = stabilize) + bws_per_core = deque([0] * stabilize, maxlen = stabilize) + kwargs = profile_bench.get('kwargs', {}) + print('Testing profile %s, kwargs: %s' % (profile_bench['name'], kwargs)) + profile = STLProfile.load(os.path.join(CTRexScenario.scripts_path, profile_bench['name']), **kwargs) + + self.stl_trex.reset() + self.stl_trex.clear_stats() + sleep(1) + self.stl_trex.add_streams(profile) + self.stl_trex.start(mult = '10%') + start_time = time() + + for i in range(timeout + 1): + stats = self.stl_trex.get_stats() + cpu_utils.append(stats['global']['cpu_util']) + bws_per_core.append(stats['global']['bw_per_core']) + if i > stabilize and min(cpu_utils) > max(cpu_utils) * 0.95: + break + sleep(0.5) + + agv_cpu_util = sum(cpu_utils) / stabilize + agv_bw_per_core = sum(bws_per_core) / stabilize + + if i == timeout and agv_cpu_util > 10: + raise Exception('Timeout on waiting for stabilization, last CPU util values: %s' % list(cpu_utils)) + if stats[0]['opackets'] < 1000 or stats[1]['opackets'] < 1000: + raise Exception('Too few opackets, port0: %s, port1: %s' % (stats[0]['opackets'], stats[1]['opackets'])) + if stats['global']['queue_full'] > 100000: + raise Exception('Too much queue_full: %s' % stats['global']['queue_full']) + if not cpu_utils[-1]: + raise Exception('CPU util is zero, last values: %s' % list(cpu_utils)) + print('Done (%ss), CPU util: %4g, bw_per_core: %6sGb/core' % (int(time() - start_time), agv_cpu_util, round(agv_bw_per_core, 2))) + # TODO: add check of benchmark based on results from regression + + # report benchmarks + if self.GAManager: + profile_repr = '%s.%s %s' % (CTRexScenario.setup_name, + os.path.basename(profile_bench['name']), + repr(kwargs).replace("'", '')) + self.GAManager.gaAddAction(Event = 'stateless_test', action = profile_repr, + label = 'bw_per_core', value = int(agv_bw_per_core)) + # TODO: report expected once acquired + #self.GAManager.gaAddAction(Event = 'stateless_test', action = profile_repr, + # label = 'bw_per_core_exp', value = int(expected_norm_cpu)) + self.GAManager.emptyAndReportQ() + + def tearDown(self): + self.stl_trex.reset() + self.stl_trex.clear_stats() + sleep(1) + CStlGeneral_Test.tearDown(self) + diff --git a/scripts/automation/regression/stateless_tests/trex_client_pkg_test.py b/scripts/automation/regression/stateless_tests/trex_client_pkg_test.py index 64d5000e..fe666ac3 100755 --- a/scripts/automation/regression/stateless_tests/trex_client_pkg_test.py +++ b/scripts/automation/regression/stateless_tests/trex_client_pkg_test.py @@ -12,7 +12,7 @@ class CTRexClientPKG_Test(CStlGeneral_Test): CStlGeneral_Test.setUp(self) self.unzip_client_package() - def run_client_package_stf_example(self, python_version): + def run_client_package_stl_example(self, python_version): commands = [ 'cd %s' % CTRexScenario.scripts_path, 'source find_python.sh --%s' % python_version, @@ -25,7 +25,7 @@ class CTRexClientPKG_Test(CStlGeneral_Test): self.fail('Error in running stf_example using %s: %s' % (python_version, stderr)) def test_client_python2(self): - self.run_client_package_stf_example(python_version = 'python2') + self.run_client_package_stl_example(python_version = 'python2') def test_client_python3(self): - self.run_client_package_stf_example(python_version = 'python3') + self.run_client_package_stl_example(python_version = 'python3') diff --git a/scripts/automation/regression/trex.py b/scripts/automation/regression/trex.py index 9459e7c6..7440d76d 100644 --- a/scripts/automation/regression/trex.py +++ b/scripts/automation/regression/trex.py @@ -36,6 +36,7 @@ class CTRexScenario: # logger = None test_types = {'functional_tests': [], 'stateful_tests': [], 'stateless_tests': []} is_copied = False + GAManager = None class CTRexRunner: """This is an instance for generating a CTRexRunner""" diff --git a/scripts/automation/regression/trex_unit_test.py b/scripts/automation/regression/trex_unit_test.py index 5d29ff31..b9f0c05c 100755 --- a/scripts/automation/regression/trex_unit_test.py +++ b/scripts/automation/regression/trex_unit_test.py @@ -39,6 +39,7 @@ from trex import CTRexScenario from trex_stf_lib.trex_client import * from trex_stf_lib.trex_exceptions import * from trex_stl_lib.api import * +from trex_stl_lib.utils.GAObjClass import GAmanager import trex import socket from pprint import pprint @@ -117,9 +118,12 @@ class CTRexTestConfiguringPlugin(Plugin): parser.add_option('--log-path', '--log_path', action='store', dest='log_path', help='Specify path for the tests` log to be saved at. Once applied, logs capturing by nose will be disabled.') # Default is CURRENT/WORKING/PATH/trex_log/trex_log.log') - parser.add_option('--verbose-mode', '--verbose_mode', action="store_true", default = False, - dest="verbose_mode", - help="Print RPC command and router commands.") + parser.add_option('--json-verbose', '--json_verbose', action="store_true", default = False, + dest="json_verbose", + help="Print JSON-RPC commands.") + parser.add_option('--telnet-verbose', '--telnet_verbose', action="store_true", default = False, + dest="telnet_verbose", + help="Print telnet commands and responces.") parser.add_option('--server-logs', '--server_logs', action="store_true", default = False, dest="server_logs", help="Print server side (TRex and trex_daemon) logs per test.") @@ -150,17 +154,21 @@ class CTRexTestConfiguringPlugin(Plugin): parser.add_option('--test-client-package', '--test_client_package', action="store_true", default = False, dest="test_client_package", help="Includes tests of client package.") + parser.add_option('--long', action="store_true", default = False, + dest="long", + help="Flag of long tests (stability).") def configure(self, options, conf): self.collect_only = options.collect_only if self.collect_only: return - self.functional = options.functional - self.stateless = options.stateless - self.stateful = options.stateful - self.pkg = options.pkg - self.no_ssh = options.no_ssh - self.verbose_mode = options.verbose_mode + self.functional = options.functional + self.stateless = options.stateless + self.stateful = options.stateful + self.pkg = options.pkg + self.no_ssh = options.no_ssh + self.json_verbose = options.json_verbose + self.telnet_verbose = options.telnet_verbose if self.functional and (not self.pkg or self.no_ssh): return if CTRexScenario.setup_dir and options.config_path: @@ -215,7 +223,7 @@ class CTRexTestConfiguringPlugin(Plugin): if self.stateful: if not self.no_ssh: trex_remote_command(self.configuration.trex, STATEFUL_RUN_COMMAND) - CTRexScenario.trex = CTRexClient(trex_host = self.configuration.trex['trex_name'], verbose = self.verbose_mode) + CTRexScenario.trex = CTRexClient(trex_host = self.configuration.trex['trex_name'], verbose = self.json_verbose) elif self.stateless: if not self.no_ssh: cores = self.configuration.trex.get('trex_cores', 1) @@ -224,11 +232,11 @@ class CTRexTestConfiguringPlugin(Plugin): trex_remote_command(self.configuration.trex, './t-rex-64 -i -c %s' % cores, background = True) CTRexScenario.stl_trex = STLClient(username = 'TRexRegression', server = self.configuration.trex['trex_name'], - verbose_level = self.verbose_mode) + verbose_level = self.json_verbose) if 'loopback' not in self.modes: CTRexScenario.router_cfg = dict(config_dict = self.configuration.router, forceImageReload = self.load_image, - silent_mode = not self.verbose_mode, + silent_mode = not self.telnet_verbose, forceCleanConfig = self.clean_config, tftp_config_dict = self.configuration.tftp) try: @@ -297,6 +305,7 @@ if __name__ == "__main__": xml_name = 'unit_test.xml' if CTRexScenario.setup_dir: CTRexScenario.setup_name = os.path.basename(CTRexScenario.setup_dir) + CTRexScenario.GAManager = GAmanager(GoogleID='UA-75220362-4', UserID=CTRexScenario.setup_name, QueueSize=100, Timeout=5, UserPermission=1, BlockingMode=1, appName='TRex', appVer='1.11.232') #timeout in seconds xml_name = 'report_%s.xml' % CTRexScenario.setup_name xml_arg= '--xunit-file=%s/%s' % (CTRexScenario.report_dir, xml_name) set_report_dir(CTRexScenario.report_dir) diff --git a/scripts/automation/trex_control_plane/doc_stl/api/client_code.rst b/scripts/automation/trex_control_plane/doc_stl/api/client_code.rst index 4ae2b9fd..d3e48dab 100755 --- a/scripts/automation/trex_control_plane/doc_stl/api/client_code.rst +++ b/scripts/automation/trex_control_plane/doc_stl/api/client_code.rst @@ -82,6 +82,11 @@ STLClient snippet # block until done c.wait_on_traffic(ports = [0, 1]) + # check for any warnings + if c.get_warnings(): + # handle warnings here + pass + finally: c.disconnect() diff --git a/scripts/automation/trex_control_plane/server/trex_server.py b/scripts/automation/trex_control_plane/server/trex_server.py index 3f8bc374..e32fc9d1 100755 --- a/scripts/automation/trex_control_plane/server/trex_server.py +++ b/scripts/automation/trex_control_plane/server/trex_server.py @@ -84,10 +84,10 @@ class CTRexServer(object): def push_file (self, filename, bin_data): logger.info("Processing push_file() command.") try: - filepath = os.path.abspath(os.path.join(self.trex_files_path, filename)) + filepath = os.path.join(self.trex_files_path, os.path.basename(filename)) with open(filepath, 'wb') as f: f.write(binascii.a2b_base64(bin_data)) - logger.info("push_file() command finished. `{name}` was saved at {fpath}".format( name = filename, fpath = self.trex_files_path)) + logger.info("push_file() command finished. File is saved as %s" % filepath) return True except IOError as inst: logger.error("push_file method failed. " + str(inst)) @@ -125,28 +125,32 @@ class CTRexServer(object): # set further functionality and peripherals to server instance try: self.server.register_function(self.add) - self.server.register_function(self.get_trex_log) - self.server.register_function(self.get_trex_daemon_log) - self.server.register_function(self.get_trex_version) + self.server.register_function(self.cancel_reservation) self.server.register_function(self.connectivity_check) - self.server.register_function(self.start_trex) - self.server.register_function(self.stop_trex) - self.server.register_function(self.wait_until_kickoff_finish) - self.server.register_function(self.get_running_status) - self.server.register_function(self.is_running) + self.server.register_function(self.force_trex_kill) + self.server.register_function(self.get_file) + self.server.register_function(self.get_files_list) + self.server.register_function(self.get_files_path) self.server.register_function(self.get_running_info) + self.server.register_function(self.get_running_status) + self.server.register_function(self.get_trex_daemon_log) + self.server.register_function(self.get_trex_log) + self.server.register_function(self.get_trex_version) self.server.register_function(self.is_reserved) - self.server.register_function(self.get_files_path) + self.server.register_function(self.is_running) self.server.register_function(self.push_file) self.server.register_function(self.reserve_trex) - self.server.register_function(self.cancel_reservation) - self.server.register_function(self.force_trex_kill) + self.server.register_function(self.start_trex) + self.server.register_function(self.stop_trex) + self.server.register_function(self.wait_until_kickoff_finish) signal.signal(signal.SIGTSTP, self.stop_handler) signal.signal(signal.SIGTERM, self.stop_handler) self.zmq_monitor.start() self.server.serve_forever() except KeyboardInterrupt: logger.info("Daemon shutdown request detected." ) + except Exception as e: + logger.error(e) finally: self.zmq_monitor.join() # close ZMQ monitor thread resources self.server.shutdown() @@ -160,8 +164,40 @@ class CTRexServer(object): file_content = f.read() return binascii.b2a_base64(file_content) except Exception as e: - err_str = "Can't get requested file: {0}, possibly due to TRex that did not run".format(filepath) - logger.error('{0}, error: {1}'.format(err_str, e)) + err_str = "Can't get requested file %s: %s" % (filepath, e) + logger.error(err_str) + return Fault(-33, err_str) + + # returns True if given path is under TRex package or under /tmp/trex_files + def _check_path_under_TRex_or_temp(self, path): + if not os.path.relpath(path, self.trex_files_path).startswith(os.pardir): + return True + if not os.path.relpath(path, self.TREX_PATH).startswith(os.pardir): + return True + return False + + # gets the file content encoded base64 either from /tmp/trex_files or TRex server dir + def get_file(self, filepath): + try: + logger.info("Processing get_file() command.") + if not self._check_path_under_TRex_or_temp(filepath): + raise Exception('Given path should be under current TRex package or /tmp/trex_files') + return self._pull_file(filepath) + except Exception as e: + err_str = "Can't get requested file %s: %s" % (filepath, e) + logger.error(err_str) + return Fault(-33, err_str) + + # get tuple (dirs, files) with directories and files lists from given path (limited under TRex package or /tmp/trex_files) + def get_files_list(self, path): + try: + logger.info("Processing get_files_list() command, given path: %s" % path) + if not self._check_path_under_TRex_or_temp(path): + raise Exception('Given path should be under current TRex package or /tmp/trex_files') + return os.walk(path).next()[1:3] + except Exception as e: + err_str = "Error processing get_files_list(): %s" % e + logger.error(err_str) return Fault(-33, err_str) # get Trex log /tmp/trex.txt diff --git a/scripts/automation/trex_control_plane/stf/trex_stf_lib/trex_client.py b/scripts/automation/trex_control_plane/stf/trex_stf_lib/trex_client.py index 4fd1e4c7..fd409b16 100755 --- a/scripts/automation/trex_control_plane/stf/trex_stf_lib/trex_client.py +++ b/scripts/automation/trex_control_plane/stf/trex_stf_lib/trex_client.py @@ -725,7 +725,59 @@ class CTRexClient(object): raise finally: self.prompt_verbose_data() - + + def get_files_list (self, path): + """ + Gets a list of dirs and files either from /tmp/trex_files or path relative to TRex server. + + :parameters: + path : str + a path to directory to read. + + :return: + Tuple: list of dirs and list of files in given path + + :raises: + + :exc:`trex_exceptions.TRexRequestDenied`, in case TRex is reserved for another user than the one trying to cancel the reservation. + + ProtocolError, in case of error in JSON-RPC protocol. + + """ + + try: + return self.server.get_files_list(path) + except AppError as err: + self._handle_AppError_exception(err.args[0]) + except ProtocolError: + raise + finally: + self.prompt_verbose_data() + + def get_file(self, filepath): + """ + Gets content of file as bytes string from /tmp/trex_files or TRex server directory. + + :parameters: + filepath : str + a path to a file at server. + it can be either relative to TRex server or absolute path starting with /tmp/trex_files + + :return: + Content of the file + + :raises: + + :exc:`trex_exceptions.TRexRequestDenied`, in case TRex is reserved for another user than the one trying to cancel the reservation. + + ProtocolError, in case of error in JSON-RPC protocol. + """ + + try: + return binascii.a2b_base64(self.server.get_file(filepath)) + except AppError as err: + self._handle_AppError_exception(err.args[0]) + except ProtocolError: + raise + finally: + self.prompt_verbose_data() + def push_files (self, filepaths): """ Pushes a file (or a list of files) to store locally on server. @@ -761,7 +813,7 @@ class CTRexClient(object): filename = os.path.basename(filepath) with open(filepath, 'rb') as f: file_content = f.read() - self.server.push_file(filename, binascii.b2a_base64(file_content)) + self.server.push_file(filename, binascii.b2a_base64(file_content).decode()) finally: self.prompt_verbose_data() return True @@ -1042,7 +1094,7 @@ class CTRexResult(object): if not self.is_valid_hist(): return None else: - return CTRexResult.__get_value_by_path(self._history[len(self._history)-1], tree_path_to_key, regex) + return CTRexResult.__get_value_by_path(self._history[-1], tree_path_to_key, regex) def get_value_list (self, tree_path_to_key, regex = None, filter_none = True): """ @@ -1093,11 +1145,23 @@ class CTRexResult(object): + an empty dictionary if history is empty. """ - history_size = len(self._history) - if history_size != 0: - return self._history[len(self._history) - 1] - else: - return {} + if len(self._history): + return self._history[-1] + return {} + + def get_ports_count(self): + """ + Returns number of ports based on TRex result + + :return: + + number of ports in TRex result + + -1 if history is empty. + """ + + if not len(self._history): + return -1 + return len(self.__get_value_by_path(self._history[-1], 'trex-global.data', 'opackets-\d+')) + def update_result_data (self, latest_dump): """ @@ -1225,26 +1289,25 @@ class CTRexResult(object): def __get_filtered_max_latency (src_dict, filtered_latency_amount = 0.001): result = {} for port, data in src_dict.items(): - if port.startswith('port-'): - max_port = 'max-%s' % port[5:] - res = data['hist'] - if not len(res['histogram']): - result[max_port] = 0 - continue - hist_last_keys = deque([res['histogram'][-1]['key']], maxlen = 2) - sum_high = 0.0 - for elem in reversed(res['histogram']): - sum_high += elem['val'] - hist_last_keys.append(elem['key']) - if sum_high / res['cnt'] >= filtered_latency_amount: - break - result[max_port] = (hist_last_keys[0] + hist_last_keys[-1]) / 2 - else: - return {} + if not port.startswith('port-'): + continue + max_port = 'max-%s' % port[5:] + res = data['hist'] + if not len(res['histogram']): + result[max_port] = 0 + continue + result[max_port] = 5 # if sum below will not get to filtered amount, use this value + sum_high = 0.0 + for elem in reversed(res['histogram']): + sum_high += elem['val'] + if sum_high >= filtered_latency_amount * res['cnt']: + result[max_port] = elem['key'] + int('5' + repr(elem['key'])[2:]) + break return result + if __name__ == "__main__": pass diff --git a/scripts/automation/trex_control_plane/stl/console/trex_console.py b/scripts/automation/trex_control_plane/stl/console/trex_console.py index 2b53b7ec..f8161dcb 100755 --- a/scripts/automation/trex_control_plane/stl/console/trex_console.py +++ b/scripts/automation/trex_control_plane/stl/console/trex_console.py @@ -289,7 +289,7 @@ class TRexConsole(TRexGeneralCmd): @verify_connected def do_ping (self, line): '''Ping the server\n''' - self.stateless_client.ping() + self.stateless_client.ping_line(line) # set verbose on / off @@ -421,6 +421,9 @@ class TRexConsole(TRexGeneralCmd): '''Release ports\n''' self.stateless_client.release_line(line) + def do_reacquire (self, line): + '''reacquire all the ports under your logged user name''' + self.stateless_client.reacquire_line(line) def help_acquire (self): self.do_acquire("-h") @@ -428,6 +431,9 @@ class TRexConsole(TRexGeneralCmd): def help_release (self): self.do_release("-h") + def help_reacquire (self): + self.do_reacquire("-h") + ############### start def complete_start(self, text, line, begidx, endidx): @@ -571,7 +577,7 @@ class TRexConsole(TRexGeneralCmd): info = self.stateless_client.get_connection_info() exe = './trex-console --top -t -q -s {0} -p {1} --async_port {2}'.format(info['server'], info['sync_port'], info['async_port']) - cmd = ['/usr/bin/xterm', '-geometry', '111x48', '-sl', '0', '-title', 'trex_tui', '-e', exe] + cmd = ['/usr/bin/xterm', '-geometry', '111x49', '-sl', '0', '-title', 'trex_tui', '-e', exe] # detach child self.terminal = subprocess.Popen(cmd, preexec_fn = os.setpgrp) @@ -774,7 +780,29 @@ def setParserOptions(): return parser - +# a simple info printed on log on +def show_intro (logger, c): + x = c.get_server_system_info() + ver = c.get_server_version().get('version', 'N/A') + + # find out which NICs the server has + port_types = {} + for port in x['ports']: + key = (port['speed'], port['driver']) + if not key in port_types: + port_types[key] = 0 + port_types[key] += 1 + + port_line = '' + for k, v in port_types.items(): + port_line += "{0} x {1}Gbps @ {2}".format(v, k[0], k[1]) + + logger.log(format_text("\nServer Info:\n", 'underline')) + logger.log("Server version: {:>}".format(format_text(ver, 'bold'))) + logger.log("Server CPU: {:>}".format(format_text("{:>} x {:>}".format(x.get('dp_core_count'), x.get('core_type')), 'bold'))) + logger.log("Ports count: {:>}".format(format_text(port_line, 'bold'))) + + def main(): parser = setParserOptions() options = parser.parse_args() @@ -824,6 +852,9 @@ def main(): if options.readonly: logger.log(format_text("\nRead only mode - only few commands will be available", 'bold')) + show_intro(logger, stateless_client) + + # a script mode if options.batch: cont = run_script_file(options.batch[0], stateless_client) diff --git a/scripts/automation/trex_control_plane/stl/console/trex_tui.py b/scripts/automation/trex_control_plane/stl/console/trex_tui.py index effcf55e..0c3ea8d6 100644 --- a/scripts/automation/trex_control_plane/stl/console/trex_tui.py +++ b/scripts/automation/trex_control_plane/stl/console/trex_tui.py @@ -13,6 +13,7 @@ else: from trex_stl_lib.utils.text_opts import * from trex_stl_lib.utils import text_tables from trex_stl_lib import trex_stl_stats +from trex_stl_lib.utils.filters import ToggleFilter # for STL exceptions from trex_stl_lib.api import * @@ -62,31 +63,37 @@ class TrexTUIDashBoard(TrexTUIPanel): def __init__ (self, mng): super(TrexTUIDashBoard, self).__init__(mng, "dashboard") + self.ports = self.stateless_client.get_all_ports() + self.key_actions = OrderedDict() self.key_actions['c'] = {'action': self.action_clear, 'legend': 'clear', 'show': True} - self.key_actions['p'] = {'action': self.action_pause, 'legend': 'pause', 'show': True} - self.key_actions['r'] = {'action': self.action_resume, 'legend': 'resume', 'show': True} - self.key_actions['+'] = {'action': self.action_raise, 'legend': 'up 5%', 'show': True} - self.key_actions['-'] = {'action': self.action_lower, 'legend': 'low 5%', 'show': True} + self.key_actions['p'] = {'action': self.action_pause, 'legend': 'pause', 'show': True, 'color': 'red'} + self.key_actions['r'] = {'action': self.action_resume, 'legend': 'resume', 'show': True, 'color': 'blue'} self.key_actions['o'] = {'action': self.action_show_owned, 'legend': 'owned ports', 'show': True} + self.key_actions['n'] = {'action': self.action_reset_view, 'legend': 'reset view', 'show': True} self.key_actions['a'] = {'action': self.action_show_all, 'legend': 'all ports', 'show': True} - self.ports_filter = self.FILTER_ALL + # register all the ports to the toggle action + for port_id in self.ports: + self.key_actions[str(port_id)] = {'action': self.action_toggle_port(port_id), 'legend': 'port {0}'.format(port_id), 'show': False} + + + self.toggle_filter = ToggleFilter(self.ports) + if self.stateless_client.get_acquired_ports(): + self.action_show_owned() + else: + self.action_show_all() - def get_ports (self): - if self.ports_filter == self.FILTER_ACQUIRED: - return self.stateless_client.get_acquired_ports() - elif self.ports_filter == self.FILTER_ALL: - return self.stateless_client.get_all_ports() + def get_showed_ports (self): + return self.toggle_filter.filter_items() - assert(0) def show (self): - stats = self.stateless_client._get_formatted_stats(self.get_ports()) + stats = self.stateless_client._get_formatted_stats(self.get_showed_ports()) # print stats to screen for stat_type, stat_data in stats.items(): text_tables.print_table_with_header(stat_data.text_table, stat_type) @@ -95,29 +102,37 @@ class TrexTUIDashBoard(TrexTUIPanel): def get_key_actions (self): allowed = OrderedDict() - allowed['c'] = self.key_actions['c'] + + allowed['n'] = self.key_actions['n'] allowed['o'] = self.key_actions['o'] allowed['a'] = self.key_actions['a'] + for i in self.ports: + allowed[str(i)] = self.key_actions[str(i)] - if self.ports_filter == self.FILTER_ALL: - return allowed - if len(self.stateless_client.get_transmitting_ports()) > 0: - allowed['p'] = self.key_actions['p'] - allowed['+'] = self.key_actions['+'] - allowed['-'] = self.key_actions['-'] + if self.get_showed_ports(): + allowed['c'] = self.key_actions['c'] + # if not all ports are acquired - no operations + if not (set(self.get_showed_ports()) <= set(self.stateless_client.get_acquired_ports())): + return allowed - if len(self.stateless_client.get_paused_ports()) > 0: + # if any/some ports can be resumed + if set(self.get_showed_ports()) & set(self.stateless_client.get_paused_ports()): allowed['r'] = self.key_actions['r'] + # if any/some ports are transmitting - support those actions + if set(self.get_showed_ports()) & set(self.stateless_client.get_transmitting_ports()): + allowed['p'] = self.key_actions['p'] + + return allowed ######### actions def action_pause (self): try: - rc = self.stateless_client.pause(ports = self.mng.ports) + rc = self.stateless_client.pause(ports = self.get_showed_ports()) except STLError: pass @@ -127,142 +142,38 @@ class TrexTUIDashBoard(TrexTUIPanel): def action_resume (self): try: - self.stateless_client.resume(ports = self.mng.ports) - except STLError: - pass - - return "" - - - def action_raise (self): - try: - self.stateless_client.update(mult = "5%+", ports = self.mng.ports) + self.stateless_client.resume(ports = self.get_showed_ports()) except STLError: pass return "" - def action_lower (self): - try: - self.stateless_client.update(mult = "5%-", ports = self.mng.ports) - except STLError: - pass - + def action_reset_view (self): + self.toggle_filter.reset() return "" - def action_show_owned (self): - self.ports_filter = self.FILTER_ACQUIRED + self.toggle_filter.reset() + self.toggle_filter.toggle_items(*self.stateless_client.get_acquired_ports()) return "" def action_show_all (self): - self.ports_filter = self.FILTER_ALL + self.toggle_filter.reset() + self.toggle_filter.toggle_items(*self.stateless_client.get_all_ports()) return "" def action_clear (self): - self.stateless_client.clear_stats(self.mng.ports) + self.stateless_client.clear_stats(self.toggle_filter.filter_items()) return "cleared all stats" -# port panel -class TrexTUIPort(TrexTUIPanel): - def __init__ (self, mng, port_id): - super(TrexTUIPort, self).__init__(mng, "port {0}".format(port_id)) - - self.port_id = port_id - self.port = self.mng.stateless_client.get_port(port_id) - - self.key_actions = OrderedDict() - - self.key_actions['c'] = {'action': self.action_clear, 'legend': 'clear', 'show': True} - self.key_actions['p'] = {'action': self.action_pause, 'legend': 'pause', 'show': True} - self.key_actions['r'] = {'action': self.action_resume, 'legend': 'resume', 'show': True} - self.key_actions['+'] = {'action': self.action_raise, 'legend': 'up 5%', 'show': True} - self.key_actions['-'] = {'action': self.action_lower, 'legend': 'low 5%', 'show': True} - self.key_actions['t'] = {'action': self.action_toggle_graph, 'legend': 'toggle graph', 'show': True} - - - def show (self): - if self.mng.tui.is_graph is False: - stats = self.stateless_client._get_formatted_stats([self.port_id]) - # print stats to screen - for stat_type, stat_data in stats.items(): - text_tables.print_table_with_header(stat_data.text_table, stat_type) - else: - stats = self.stateless_client._get_formatted_stats([self.port_id], stats_mask = trex_stl_stats.GRAPH_PORT_COMPACT) - for stat_type, stat_data in stats.items(): - text_tables.print_table_with_header(stat_data.text_table, stat_type) - - def get_key_actions (self): - - allowed = OrderedDict() - - allowed['c'] = self.key_actions['c'] - allowed['t'] = self.key_actions['t'] - - if self.stateless_client.is_all_ports_acquired(): - return allowed - - if self.port.state == self.port.STATE_TX: - allowed['p'] = self.key_actions['p'] - allowed['+'] = self.key_actions['+'] - allowed['-'] = self.key_actions['-'] - - elif self.port.state == self.port.STATE_PAUSE: - allowed['r'] = self.key_actions['r'] - - - return allowed - - def action_toggle_graph(self): - try: - self.mng.tui.is_graph = not self.mng.tui.is_graph - except Exception: - pass - - return "" - - def action_pause (self): - try: - self.stateless_client.pause(ports = [self.port_id]) - except STLError: - pass - - return "" - - def action_resume (self): - try: - self.stateless_client.resume(ports = [self.port_id]) - except STLError: - pass - - return "" - - - def action_raise (self): - mult = {'type': 'percentage', 'value': 5, 'op': 'add'} - - try: - self.stateless_client.update(mult = mult, ports = [self.port_id]) - except STLError: - pass - - return "" - - def action_lower (self): - mult = {'type': 'percentage', 'value': 5, 'op': 'sub'} - - try: - self.stateless_client.update(mult = mult, ports = [self.port_id]) - except STLError: - pass + def action_toggle_port(self, port_id): + def action_toggle_port_x(): + self.toggle_filter.toggle_item(port_id) + return "" - return "" - - def action_clear (self): - self.stateless_client.clear_stats([self.port_id]) - return "port {0}: cleared stats".format(self.port_id) + return action_toggle_port_x @@ -330,10 +241,6 @@ class TrexTUIPanelManager(): self.key_actions['g'] = {'action': self.action_show_dash, 'legend': 'dashboard', 'show': True} self.key_actions['s'] = {'action': self.action_show_sstats, 'legend': 'streams stats', 'show': True} - for port_id in self.ports: - self.key_actions[str(port_id)] = {'action': self.action_show_port(port_id), 'legend': 'port {0}'.format(port_id), 'show': False} - self.panels['port {0}'.format(port_id)] = TrexTUIPort(self, port_id) - # start with dashboard self.main_panel = self.panels['dashboard'] @@ -346,23 +253,31 @@ class TrexTUIPanelManager(): self.dis_bar = SimpleBar('status: ', ['X', ' ']) self.show_log = False + def generate_legend (self): + self.legend = "\n{:<12}".format("browse:") for k, v in self.key_actions.items(): if v['show']: x = "'{0}' - {1}, ".format(k, v['legend']) - self.legend += "{:}".format(x) - - self.legend += "'0-{0}' - port display".format(len(self.ports) - 1) + if v.get('color'): + self.legend += "{:}".format(format_text(x, v.get('color'))) + else: + self.legend += "{:}".format(x) self.legend += "\n{:<12}".format(self.main_panel.get_name() + ":") + for k, v in self.main_panel.get_key_actions().items(): if v['show']: x = "'{0}' - {1}, ".format(k, v['legend']) - self.legend += "{:}".format(x) + + if v.get('color'): + self.legend += "{:}".format(format_text(x, v.get('color'))) + else: + self.legend += "{:}".format(x) def print_connection_status (self): @@ -430,6 +345,7 @@ class TrexTUIPanelManager(): return action_show_port_x + def action_show_sstats (self): self.main_panel = self.panels['sstats'] self.init(self.show_log) @@ -535,7 +451,9 @@ class TrexTUI(): sys.stdout = old_stdout self.clear_screen() + print(mystdout.getvalue()) + sys.stdout.flush() self.draw_policer = 0 diff --git a/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_client.py b/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_client.py index aa95f037..862a9979 100755 --- a/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_client.py +++ b/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_client.py @@ -264,22 +264,55 @@ class EventsHandler(object): self.__async_event_port_job_done(port_id) show_event = True - # port was stolen... + # port was acquired - maybe stolen... elif (type == 5): session_id = data['session_id'] - # false alarm, its us + port_id = int(data['port_id']) + who = data['who'] + force = data['force'] + + # if we hold the port and it was not taken by this session - show it + if port_id in self.client.get_acquired_ports() and session_id != self.client.session_id: + show_event = True + + # format the thief/us... if session_id == self.client.session_id: - return + user = 'you' + elif who == self.client.username: + user = 'another session of you' + else: + user = "'{0}'".format(who) - port_id = int(data['port_id']) - who = data['who'] + if force: + ev = "Port {0} was forcely taken by {1}".format(port_id, user) + else: + ev = "Port {0} was taken by {1}".format(port_id, user) - ev = "Port {0} was forcely taken by '{1}'".format(port_id, who) + # call the handler in case its not this session + if session_id != self.client.session_id: + self.__async_event_port_acquired(port_id, who) + + + # port was released + elif (type == 6): + port_id = int(data['port_id']) + who = data['who'] + session_id = data['session_id'] + + if session_id == self.client.session_id: + user = 'you' + elif who == self.client.username: + user = 'another session of you' + else: + user = "'{0}'".format(who) + + ev = "Port {0} was released by {1}".format(port_id, user) + + # call the handler in case its not this session + if session_id != self.client.session_id: + self.__async_event_port_released(port_id) - # call the handler - self.__async_event_port_forced_acquired(port_id, who) - show_event = True # server stopped elif (type == 100): @@ -317,9 +350,11 @@ class EventsHandler(object): self.client.ports[port_id].async_event_port_resumed() - def __async_event_port_forced_acquired (self, port_id, who): - self.client.ports[port_id].async_event_forced_acquired(who) + def __async_event_port_acquired (self, port_id, who): + self.client.ports[port_id].async_event_acquired(who) + def __async_event_port_released (self, port_id): + self.client.ports[port_id].async_event_released() def __async_event_server_stopped (self): self.client.connected = False @@ -462,6 +497,11 @@ class STLClient(object): self.session_id = random.getrandbits(32) self.connected = False + # API classes + self.api_vers = [ {'type': 'core', 'major': 1, 'minor':2 } + ] + self.api_h = {'core': None} + # logger self.logger = DefaultLogger() if not logger else logger @@ -505,10 +545,7 @@ class STLClient(object): self.flow_stats) - # API classes - self.api_vers = [ {'type': 'core', 'major': 1, 'minor':1 } - ] - self.api_h = {'core': None} + ############# private functions - used by the class itself ########### @@ -545,13 +582,13 @@ class STLClient(object): return rc # acquire ports, if port_list is none - get all - def __acquire (self, port_id_list = None, force = False): + def __acquire (self, port_id_list = None, force = False, sync_streams = True): port_id_list = self.__ports(port_id_list) rc = RC() for port_id in port_id_list: - rc.add(self.ports[port_id].acquire(force)) + rc.add(self.ports[port_id].acquire(force, sync_streams)) return rc @@ -1335,16 +1372,20 @@ class STLClient(object): @__api_check(True) - def acquire (self, ports = None, force = False): + def acquire (self, ports = None, force = False, sync_streams = True): """ Acquires ports for executing commands :parameters: ports : list Ports on which to execute the command + force : bool Force acquire the ports. + sync_streams: bool + sync with the server about the configured streams + :raises: + :exc:`STLError` @@ -1359,7 +1400,7 @@ class STLClient(object): else: self.logger.pre_cmd("Acquiring ports {0}:".format(ports)) - rc = self.__acquire(ports, force) + rc = self.__acquire(ports, force, sync_streams) self.logger.post_cmd(rc) @@ -1459,7 +1500,8 @@ class STLClient(object): ports = ports if ports is not None else self.get_all_ports() ports = self._validate_port_list(ports) - self.acquire(ports, force = True) + # force take the port and ignore any streams on it + self.acquire(ports, force = True, sync_streams = False) self.stop(ports, rx_delay_ms = 0) self.remove_all_streams(ports) self.clear_stats(ports) @@ -2038,6 +2080,11 @@ class STLClient(object): return wrap + @__console + def ping_line (self, line): + '''pings the server''' + self.ping() + return True @__console def connect_line (self, line): @@ -2117,6 +2164,28 @@ class STLClient(object): @__console + def reacquire_line (self, line): + '''reacquire all the ports under your username which are not acquired by your session''' + + parser = parsing_opts.gen_parser(self, + "reacquire", + self.reacquire_line.__doc__) + + opts = parser.parse_args(line.split()) + if opts is None: + return + + # find all the on-owned ports under your name + my_unowned_ports = list_difference([k for k, v in self.ports.items() if v.get_owner() == self.username], self.get_acquired_ports()) + if not my_unowned_ports: + self.logger.log("reacquire - no unowned ports under '{0}'".format(self.username)) + return + + self.acquire(ports = my_unowned_ports, force = True) + return True + + + @__console def disconnect_line (self, line): self.disconnect() @@ -2491,15 +2560,20 @@ class STLClient(object): '''Sets port attributes ''' parser = parsing_opts.gen_parser(self, - "port", + "port_attr", self.set_port_attr_line.__doc__, parsing_opts.PORT_LIST_WITH_ALL, parsing_opts.PROMISCUOUS_SWITCH) - opts = parser.parse_args(line.split()) + opts = parser.parse_args(line.split(), default_ports = self.get_acquired_ports(), verify_acquired = True) if opts is None: return + # if no attributes - fall back to printing the status + if opts.prom is None: + self.show_stats_line("--ps --port {0}".format(' '.join(str(port) for port in opts.ports))) + return + self.set_port_attr(opts.ports, opts.prom) @@ -2592,5 +2666,4 @@ class STLClient(object): if opts.clear: self.clear_events() - self.logger.log(format_text("\nEvent log was cleared\n")) - +
\ No newline at end of file diff --git a/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_jsonrpc_client.py b/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_jsonrpc_client.py index bd5ba8e7..fa04b9f6 100644 --- a/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_jsonrpc_client.py +++ b/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_jsonrpc_client.py @@ -47,7 +47,7 @@ class JsonRpcClient(object): MSG_COMPRESS_HEADER_MAGIC = 0xABE85CEA def __init__ (self, default_server, default_port, client): - self.client = client + self.client_api = client.api_h self.logger = client.logger self.connected = False @@ -104,7 +104,7 @@ class JsonRpcClient(object): # if this RPC has an API class - add it's handler if api_class: - msg["params"]["api_h"] = self.client.api_h[api_class] + msg["params"]["api_h"] = self.client_api[api_class] if encode: diff --git a/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_port.py b/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_port.py index 6f6f50b1..e8f89b27 100644 --- a/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_port.py +++ b/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_port.py @@ -76,29 +76,55 @@ class Port(object): def get_speed_bps (self): return (self.info['speed'] * 1000 * 1000 * 1000) + def get_formatted_speed (self): + return "{0} Gbps".format(self.info['speed']) + # take the port - def acquire(self, force = False): + def acquire(self, force = False, sync_streams = True): params = {"port_id": self.port_id, "user": self.user, "session_id": self.session_id, "force": force} rc = self.transmit("acquire", params) - if rc.good(): - self.handler = rc.data() - return self.ok() + if not rc: + return self.err(rc.err()) + + self.handler = rc.data() + + if sync_streams: + return self.sync_streams() else: + return self.ok() + + + # sync all the streams with the server + def sync_streams (self): + params = {"port_id": self.port_id} + + rc = self.transmit("get_all_streams", params) + if rc.bad(): return self.err(rc.err()) + for k, v in rc.data()['streams'].items(): + self.streams[k] = {'next_id': v['next_stream_id'], + 'pkt' : base64.b64decode(v['packet']['binary']), + 'mode' : v['mode']['type'], + 'rate' : STLStream.get_rate_from_field(v['mode']['rate'])} + return self.ok() + # release the port def release(self): params = {"port_id": self.port_id, "handler": self.handler} rc = self.transmit("release", params) - self.handler = None - + if rc.good(): + + self.handler = None + self.owner = '' + return self.ok() else: return self.err(rc.err()) @@ -151,19 +177,7 @@ class Port(object): # attributes self.attr = rc.data()['attr'] - # sync the streams - params = {"port_id": self.port_id} - - rc = self.transmit("get_all_streams", params) - if rc.bad(): - return self.err(rc.err()) - - for k, v in rc.data()['streams'].items(): - self.streams[k] = {'next_id': v['next_stream_id'], - 'pkt' : base64.b64decode(v['packet']['binary']), - 'mode' : v['mode']['type'], - 'rate' : STLStream.get_rate_from_field(v['mode']['rate'])} - + return self.ok() @@ -679,7 +693,10 @@ class Port(object): if not self.is_acquired(): self.state = self.STATE_TX - def async_event_forced_acquired (self, who): + def async_event_acquired (self, who): self.handler = None self.owner = who + def async_event_released (self): + self.owner = '' + diff --git a/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_stats.py b/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_stats.py index 6b1185ef..c7513144 100644 --- a/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_stats.py +++ b/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_stats.py @@ -201,6 +201,10 @@ class CTRexInfoGenerator(object): def _get_rational_block_char(value, range_start, interval): # in Konsole, utf-8 is sometimes printed with artifacts, return ascii for now #return 'X' if value >= range_start + float(interval) / 2 else ' ' + + if sys.__stdout__.encoding != 'UTF-8': + return 'X' if value >= range_start + float(interval) / 2 else ' ' + value -= range_start ratio = float(value) / interval if ratio <= 0.0625: @@ -208,7 +212,7 @@ class CTRexInfoGenerator(object): if ratio <= 0.1875: return u'\u2581' # 1/8 if ratio <= 0.3125: - return u'\u2582' # 2/4 + return u'\u2582' # 2/8 if ratio <= 0.4375: return u'\u2583' # 3/8 if ratio <= 0.5625: @@ -247,6 +251,7 @@ class CTRexInfoGenerator(object): return_stats_data = {} per_field_stats = OrderedDict([("owner", []), ("state", []), + ("speed", []), ("--", []), ("Tx bps L2", []), ("Tx bps L1", []), @@ -532,7 +537,12 @@ class CTRexStats(object): v = self.get_trend(field, use_raw) value = abs(v) - arrow = u'\u25b2' if v > 0 else u'\u25bc' + + # use arrows if utf-8 is supported + if sys.__stdout__.encoding == 'UTF-8': + arrow = u'\u25b2' if v > 0 else u'\u25bc' + else: + arrow = '' if sys.version_info < (3,0): arrow = arrow.encode('utf-8') @@ -584,6 +594,7 @@ class CGlobalStats(CTRexStats): # absolute stats['cpu_util'] = self.get("m_cpu_util") stats['rx_cpu_util'] = self.get("m_rx_cpu_util") + stats['bw_per_core'] = self.get("m_bw_per_core") stats['tx_bps'] = self.get("m_tx_bps") stats['tx_pps'] = self.get("m_tx_pps") @@ -688,12 +699,13 @@ class CPortStats(CTRexStats): else: self.__merge_dicts(self.reference_stats, x.reference_stats) - # history - if not self.history: - self.history = copy.deepcopy(x.history) - else: - for h1, h2 in zip(self.history, x.history): - self.__merge_dicts(h1, h2) + # history - should be traverse with a lock + with self.lock, x.lock: + if not self.history: + self.history = copy.deepcopy(x.history) + else: + for h1, h2 in zip(self.history, x.history): + self.__merge_dicts(h1, h2) return self @@ -758,9 +770,17 @@ class CPortStats(CTRexStats): else: state = format_text(state, 'bold') + # mark owned ports by color + if self._port_obj: + owner = self._port_obj.get_owner() + if self._port_obj.is_acquired(): + owner = format_text(owner, 'green') + else: + owner = '' - return {"owner": self._port_obj.get_owner() if self._port_obj else "", + return {"owner": owner, "state": "{0}".format(state), + "speed": self._port_obj.get_formatted_speed() if self._port_obj else '', "--": " ", "---": " ", diff --git a/scripts/automation/trex_control_plane/stl/trex_stl_lib/utils/GAObjClass.py b/scripts/automation/trex_control_plane/stl/trex_stl_lib/utils/GAObjClass.py new file mode 100755 index 00000000..164aae7a --- /dev/null +++ b/scripts/automation/trex_control_plane/stl/trex_stl_lib/utils/GAObjClass.py @@ -0,0 +1,295 @@ +#import requests # need external lib for that
+try: # Python2
+ import Queue
+ from urllib2 import *
+except: # Python3
+ import queue as Queue
+ from urllib.request import *
+ from urllib.error import *
+import threading
+import sys
+from time import sleep
+
+"""
+GAObjClass is a class destined to send Google Analytics Information.
+
+cid - unique number per user.
+command - the Event Category rubric appears on site. type: TEXT
+action - the Event Action rubric appears on site - type: TEXT
+label - the Event Label rubric - type: TEXT
+value - the event value metric - type: INTEGER
+
+QUOTAS:
+1 single payload - up to 8192Bytes
+batched:
+A maximum of 20 hits can be specified per request.
+The total size of all hit payloads cannot be greater than 16K bytes.
+No single hit payload can be greater than 8K bytes.
+"""
+
+url_single = 'http://www.google-analytics.com/collect' #sending single event
+url_batched = 'http://www.google-analytics.com/batch' #sending batched events
+url_debug = 'http://www.google-analytics.com/debug/collect' #verifying hit is valid
+url_conn = 'http://172.217.2.196' # testing internet connection to this address (google-analytics server)
+
+
+#..................................................................class GA_EVENT_ObjClass................................................................
+class GA_EVENT_ObjClass:
+ def __init__(self,cid,trackerID,command,action,label,value,appName,appVer):
+ self.cid = cid
+ self.trackerID = trackerID
+ self.command = command
+ self.action = action
+ self.label = label
+ self.value = value
+ self.appName = appName
+ self.appVer = appVer
+ self.generate_payload()
+ self.size = sys.getsizeof(self.payload)
+
+ def generate_payload(self):
+ self.payload ='v=1&t=event&tid='+str(self.trackerID)
+ self.payload+='&cid='+str(self.cid)
+ self.payload+='&ec='+str(self.command)
+ self.payload+='&ea='+str(self.action)
+ self.payload+='&el='+str(self.label)
+ self.payload+='&ev='+str(self.value)
+ self.payload+='&an='+str(self.appName)
+ self.payload+='&av='+str(self.appVer)
+
+#..................................................................class GA_EXCEPTION_ObjClass................................................................
+#ExceptionFatal - BOOLEAN
+class GA_EXCEPTION_ObjClass:
+ def __init__(self,cid,trackerID,ExceptionName,ExceptionFatal,appName,appVer):
+ self.cid = cid
+ self.trackerID = trackerID
+ self.ExceptionName = ExceptionName
+ self.ExceptionFatal = ExceptionFatal
+ self.appName = appName
+ self.appVer = appVer
+ self.generate_payload()
+
+ def generate_payload(self):
+ self.payload ='v=1&t=exception&tid='+str(self.trackerID)
+ self.payload+='&cid='+str(self.cid)
+ self.payload+='&exd='+str(self.ExceptionName)
+ self.payload+='&exf='+str(self.ExceptionFatal)
+ self.payload+='&an='+str(self.appName)
+ self.payload+='&av='+str(self.appVer)
+
+#.....................................................................class ga_Thread.................................................................
+"""
+
+Google analytics thread manager:
+
+will report and empty queue of google analytics items to GA server, every Timeout (parameter given on initialization)
+will perform connectivity check every timeout*10 seconds
+
+
+"""
+
+class ga_Thread (threading.Thread):
+ def __init__(self,threadID,gManager):
+ threading.Thread.__init__(self)
+ self.threadID = threadID
+ self.gManager = gManager
+
+ def run(self):
+ keepAliveCounter=0
+ #sys.stdout.write('thread started \n')
+ #sys.stdout.flush()
+ while True:
+ if (keepAliveCounter==10):
+ keepAliveCounter=0
+ if (self.gManager.internet_on()==True):
+ self.gManager.connectedToInternet=1
+ else:
+ self.gManager.connectedToInternet=0
+ sleep(self.gManager.Timeout)
+ keepAliveCounter+=1
+ if not self.gManager.GA_q.empty():
+ self.gManager.threadLock.acquire(1)
+# sys.stdout.write('lock acquired: reporting to GA \n')
+# sys.stdout.flush()
+ if (self.gManager.connectedToInternet==1):
+ self.gManager.emptyAndReportQ()
+ self.gManager.threadLock.release()
+# sys.stdout.write('finished \n')
+# sys.stdout.flush()
+
+
+
+#.....................................................................class GAmanager.................................................................
+"""
+
+Google ID - specify tracker property, example: UA-75220362-2 (when the suffix '2' specifies the analytics property profile)
+
+UserID - unique userID, this will differ between users on GA
+
+appName - s string to determine app name
+
+appVer - a string to determine app version
+
+QueueSize - the size of the queue that holds reported items. once the Queue is full:
+ on blocking mode:
+ will block program until next submission to GA server, which will make new space
+ on non-blocking mode:
+ will drop new requests
+
+Timout - the timeout the queue uses between data transmissions. Timeout should be shorter than the time it takes to generate 20 events. MIN VALUE = 11 seconds
+
+User Permission - the user must accept data transmission, use this flag as 1/0 flag, when UserPermission=1 allows data collection
+
+BlockingMode - set to 1 if you wish every Google Analytic Object will be submitted and processed, with no drops allowed.
+ this will block the running of the program until every item is processed
+
+*** Restriction - Google's restriction for amount of packages being sent per session per second is: 1 event per second, per session. session length is 30min ***
+"""
+class GAmanager:
+ def __init__(self,GoogleID,UserID,appName,appVer,QueueSize,Timeout,UserPermission,BlockingMode):
+ self.UserID = UserID
+ self.GoogleID = GoogleID
+ self.QueueSize = QueueSize
+ self.Timeout = Timeout
+ self.appName = appName
+ self.appVer = appVer
+ self.UserPermission = UserPermission
+ self.GA_q = Queue.Queue(QueueSize)
+ self.thread = ga_Thread(UserID,self)
+ self.threadLock = threading.Lock()
+ self.BlockingMode = BlockingMode
+ self.connectedToInternet =0
+ if (self.internet_on()==True):
+# sys.stdout.write('internet connection active \n')
+# sys.stdout.flush()
+ self.connectedToInternet=1
+ else:
+ self.connectedToInternet=0
+
+ def gaAddAction(self,Event,action,label,value):
+ self.gaAddObject(GA_EVENT_ObjClass(self.UserID,self.GoogleID,Event,action,label,value,self.appName,self.appVer))
+
+ def gaAddException(self,ExceptionName,ExceptionFatal):
+ self.gaAddObject(GA_EXCEPTION_ObjClass(self.UserID,self.GoogleID,ExceptionName,ExceptionFatal,self.appName,self.appVer))
+
+ def gaAddObject(self,Object):
+ if self.BlockingMode==1:
+ while self.GA_q.full():
+ sleep(self.Timeout)
+# sys.stdout.write('blocking mode=1 \n queue full - sleeping for timeout \n') # within Timout, the thread will empty part of the queue
+# sys.stdout.flush()
+ lockState = self.threadLock.acquire(self.BlockingMode)
+ if lockState==1:
+# sys.stdout.write('got lock, adding item \n')
+# sys.stdout.flush()
+ try:
+ self.GA_q.put_nowait(Object)
+# sys.stdout.write('got lock, item added \n')
+# sys.stdout.flush()
+ except Queue.Full:
+# sys.stdout.write('Queue full \n')
+# sys.stdout.flush()
+ pass
+ self.threadLock.release()
+
+ def emptyQueueToList(self,obj_list):
+ items=0
+ while ((not self.GA_q.empty()) and (items<20)):
+ obj_list.append(self.GA_q.get_nowait().payload)
+ items+=1
+# print items
+
+ def reportBatched(self,batched):
+ req = Request(url_batched, data=batched.encode('ascii'))
+ urlopen(req)
+ #requests.post(url_batched,data=batched)
+
+ def emptyAndReportQ(self):
+ obj_list = []
+ self.emptyQueueToList(obj_list)
+ if not len(obj_list):
+ return
+ batched = '\n'.join(obj_list)
+# print batched # - for debug
+ self.reportBatched(batched)
+
+ def printSelf(self):
+ print('remaining in queue:')
+ while not self.GA_q.empty():
+ obj = self.GA_q.get_nowait()
+ print(obj.payload)
+
+ def internet_on(self):
+ try:
+ urlopen(url_conn,timeout=10)
+ return True
+ except URLError as err: pass
+ return False
+
+ def activate(self):
+ if (self.UserPermission==1):
+ self.thread.start()
+
+
+
+#***************************************------TEST--------------**************************************
+
+if __name__ == '__main__':
+ g = GAmanager(GoogleID='UA-75220362-4',UserID="Foo",QueueSize=100,Timeout=5,UserPermission=1,BlockingMode=1,appName='TRex',appVer='1.11.232') #timeout in seconds
+#for i in range(0,35,1):
+#i = 42
+ g.gaAddAction(Event='stl',action='stl/udp_1pkt_simple.py {packet_count:1000,packet_len:9000}',label='Boo',value=20)
+ #g.gaAddAction(Event='test',action='start',label='Boo1',value=20)
+
+#g.gaAddException('MEMFAULT',1)
+#g.gaAddException('MEMFAULT',1)
+#g.gaAddException('MEMFAULT',1)
+#g.gaAddException('MEMFAULT',1)
+#g.gaAddException('MEMFAULT',1)
+#g.gaAddException('MEMFAULT',1)
+ g.emptyAndReportQ()
+# g.printSelf()
+#print g.payload
+#print g.size
+
+
+
+
+#g.activate()
+#g.gaAddAction(Event='test',action='start',label='1',value='1')
+#sys.stdout.write('element added \n')
+#sys.stdout.flush()
+#g.gaAddAction(Event='test',action='start',label='2',value='1')
+#sys.stdout.write('element added \n')
+#sys.stdout.flush()
+#g.gaAddAction(Event='test',action='start',label='3',value='1')
+#sys.stdout.write('element added \n')
+#sys.stdout.flush()
+
+#testdata = "v=1&t=event&tid=UA-75220362-4&cid=2&ec=test&ea=testing&el=testpacket&ev=2"
+#r = requests.post(url_debug,data=testdata)
+#print r
+
+#thread1 = ga_Thread(1,g)
+#thread1.start()
+#thread1.join()
+#for i in range(1,10,1):
+# sys.stdout.write('yesh %d'% (i))
+# sys.stdout.flush()
+
+
+# add timing mechanism - DONE
+# add exception mechanism - DONE
+# add version mechanism - DONE
+# ask Itay for unique ID generation per user
+
+
+
+
+
+
+
+
+
+
+
diff --git a/scripts/automation/trex_control_plane/stl/trex_stl_lib/utils/filters.py b/scripts/automation/trex_control_plane/stl/trex_stl_lib/utils/filters.py new file mode 100644 index 00000000..714f7807 --- /dev/null +++ b/scripts/automation/trex_control_plane/stl/trex_stl_lib/utils/filters.py @@ -0,0 +1,144 @@ + +def shallow_copy(x): + return type(x)(x) + + +class ToggleFilter(object): + """ + This class provides a "sticky" filter, that works by "toggling" items of the original database on and off. + """ + def __init__(self, db_ref, show_by_default=True): + """ + Instantiate a ToggleFilter object + + :parameters: + db_ref : iterable + an iterable object (i.e. list, set etc) that would serve as the reference db of the instance. + Changes in that object will affect the output of ToggleFilter instance. + + show_by_default: bool + decide if by default all the items are "on", i.e. these items will be presented if no other + toggling occurred. + + default value : **True** + + """ + self._data = db_ref + self._toggle_db = set() + self._filter_method = filter + self.__set_initial_state(show_by_default) + + def reset (self): + """ + Toggles off all the items + """ + self._toggle_db = set() + + + def toggle_item(self, item_key): + """ + Toggle a single item in/out. + + :parameters: + item_key : + an item the by its value the filter can decide to toggle or not. + Example: int, str and so on. + + :return: + + **True** if item toggled **into** the filtered items + + **False** if item toggled **out from** the filtered items + + :raises: + + KeyError, in case if item key is not part of the toggled list and not part of the referenced db. + + """ + if item_key in self._toggle_db: + self._toggle_db.remove(item_key) + return False + elif item_key in self._data: + self._toggle_db.add(item_key) + return True + else: + raise KeyError("Provided item key isn't a key of the referenced data structure.") + + def toggle_items(self, *args): + """ + Toggle multiple items in/out with a single call. Each item will be ha. + + :parameters: + args : iterable + an iterable object containing all item keys to be toggled in/out + + :return: + + **True** if all toggled items were toggled **into** the filtered items + + **False** if at least one of the items was toggled **out from** the filtered items + + :raises: + + KeyError, in case if ont of the item keys was not part of the toggled list and not part of the referenced db. + + """ + # in python 3, 'map' returns an iterator, so wrapping with 'list' call creates same effect for both python 2 and 3 + return all(list(map(self.toggle_item, args))) + + def filter_items(self): + """ + Filters the pointed database by showing only the items mapped at toggle_db set. + + :returns: + Filtered data of the original object. + + """ + return self._filter_method(self.__toggle_filter, self._data) + + # private methods + + def __set_initial_state(self, show_by_default): + try: + _ = (x for x in self._data) + if isinstance(self._data, dict): + self._filter_method = ToggleFilter.dict_filter + if show_by_default: + self._toggle_db = set(self._data.keys()) + return + elif isinstance(self._data, list): + self._filter_method = ToggleFilter.list_filter + elif isinstance(self._data, set): + self._filter_method = ToggleFilter.set_filter + elif isinstance(self._data, tuple): + self._filter_method = ToggleFilter.tuple_filter + if show_by_default: + self._toggle_db = set(shallow_copy(self._data)) # assuming all relevant data with unique identifier + return + except TypeError: + raise TypeError("provided data object is not iterable") + + def __toggle_filter(self, x): + return (x in self._toggle_db) + + # static utility methods + + @staticmethod + def dict_filter(function, iterable): + assert isinstance(iterable, dict) + return {k: v + for k,v in iterable.items() + if function(k)} + + @staticmethod + def list_filter(function, iterable): + # in python 3, filter returns an iterator, so wrapping with list creates same effect for both python 2 and 3 + return list(filter(function, iterable)) + + @staticmethod + def set_filter(function, iterable): + return {x + for x in iterable + if function(x)} + + @staticmethod + def tuple_filter(function, iterable): + return tuple(filter(function, iterable)) + + +if __name__ == "__main__": + pass diff --git a/scripts/automation/trex_control_plane/stl/trex_stl_lib/utils/text_opts.py b/scripts/automation/trex_control_plane/stl/trex_stl_lib/utils/text_opts.py index bc2d44f4..5c0dfb14 100644 --- a/scripts/automation/trex_control_plane/stl/trex_stl_lib/utils/text_opts.py +++ b/scripts/automation/trex_control_plane/stl/trex_stl_lib/utils/text_opts.py @@ -124,16 +124,9 @@ def underline(text): def text_attribute(text, attribute): - if isinstance(text, str): - return "{start}{txt}{stop}".format(start=TEXT_CODES[attribute]['start'], - txt=text, - stop=TEXT_CODES[attribute]['end']) - elif isinstance(text, unicode): - return u"{start}{txt}{stop}".format(start=TEXT_CODES[attribute]['start'], - txt=text, - stop=TEXT_CODES[attribute]['end']) - else: - raise Exception("not a string") + return "{start}{txt}{stop}".format(start=TEXT_CODES[attribute]['start'], + txt=text, + stop=TEXT_CODES[attribute]['end']) FUNC_DICT = {'blue': blue, diff --git a/scripts/external_libs/jsonrpclib-pelix-0.2.5/jsonrpclib/SimpleJSONRPCServer.py b/scripts/external_libs/jsonrpclib-pelix-0.2.5/jsonrpclib/SimpleJSONRPCServer.py index e9fe4e68..d55ce0fb 100644 --- a/scripts/external_libs/jsonrpclib-pelix-0.2.5/jsonrpclib/SimpleJSONRPCServer.py +++ b/scripts/external_libs/jsonrpclib-pelix-0.2.5/jsonrpclib/SimpleJSONRPCServer.py @@ -397,6 +397,11 @@ class SimpleJSONRPCRequestHandler(xmlrpcserver.SimpleXMLRPCRequestHandler): The server that receives the requests must have a json_config member, containing a JSONRPClib Config instance """ + + # disable logging + def log_message(*args, **kwargs): + pass + def do_POST(self): """ Handles POST requests diff --git a/scripts/stl/hlt/hlt_udp_rand_len_9k.py b/scripts/stl/hlt/hlt_udp_rand_len_9k.py new file mode 100755 index 00000000..1966823c --- /dev/null +++ b/scripts/stl/hlt/hlt_udp_rand_len_9k.py @@ -0,0 +1,31 @@ +from trex_stl_lib.trex_stl_hltapi import STLHltStream + + +class STLS1(object): + ''' + Create Eth/IP/UDP steam with random packet size (L3 size from 50 to 9*1024) + ''' + + def get_streams (self, direction = 0, **kwargs): + min_size = 50 + max_size = 9*1024 + return [STLHltStream(length_mode = 'random', + l3_length_min = min_size, + l3_length_max = max_size, + l3_protocol = 'ipv4', + ip_src_addr = '16.0.0.1', + ip_dst_addr = '48.0.0.1', + l4_protocol = 'udp', + udp_src_port = 1025, + udp_dst_port = 12, + rate_pps = 1000, + ignore_macs = True, + ) + ] + +# dynamic load - used for trex console or simulator +def register(): + return STLS1() + + + diff --git a/scripts/stl/udp_1pkt_simple.py b/scripts/stl/udp_1pkt_simple.py index 0407f9c9..b84626a1 100644 --- a/scripts/stl/udp_1pkt_simple.py +++ b/scripts/stl/udp_1pkt_simple.py @@ -2,18 +2,21 @@ from trex_stl_lib.api import * class STLS1(object): - def create_stream (self): - return STLStream( - packet = - STLPktBuilder( - pkt = Ether()/IP(src="16.0.0.1",dst="48.0.0.1")/ - UDP(dport=12,sport=1025)/(10*'x') - ), - mode = STLTXCont()) - - def get_streams (self, direction = 0, **kwargs): + def create_stream (self, packet_len, packet_count): + base_pkt = Ether()/IP(src="16.0.0.1",dst="48.0.0.1")/UDP(dport=12,sport=1025) + base_pkt_len = len(base_pkt) + base_pkt /= 'x' * max(0, packet_len - base_pkt_len) + packets = [] + for i in range(packet_count): + packets.append(STLStream( + packet = STLPktBuilder(pkt = base_pkt), + mode = STLTXCont() + )) + return packets + + def get_streams (self, direction = 0, packet_len = 64, packet_count = 1, **kwargs): # create 1 stream - return [ self.create_stream() ] + return self.create_stream(packet_len - 4, packet_count) # dynamic load - used for trex console or simulator diff --git a/scripts/stl/udp_1pkt_tuple_gen.py b/scripts/stl/udp_1pkt_tuple_gen.py index be8620c8..4e9ab12d 100644 --- a/scripts/stl/udp_1pkt_tuple_gen.py +++ b/scripts/stl/udp_1pkt_tuple_gen.py @@ -2,16 +2,12 @@ from trex_stl_lib.api import * class STLS1(object): - def __init__ (self): - self.fsize =64; - - def create_stream (self): + def create_stream (self, packet_len): # create a base packet and pad it to size - size = self.fsize - 4; # no FCS base_pkt = Ether()/IP(src="16.0.0.1",dst="48.0.0.1")/UDP(dport=12,sport=1025) - pad = max(0, size - len(base_pkt)) * 'x' + pad = max(0, packet_len - len(base_pkt)) * 'x' vm = STLScVmRaw( [ STLVmTupleGen ( ip_min="16.0.0.1", ip_max="16.0.0.2", port_min=1025, port_max=65535, @@ -31,9 +27,9 @@ class STLS1(object): - def get_streams (self, direction = 0, **kwargs): + def get_streams (self, direction = 0, packet_len = 64, **kwargs): # create 1 stream - return [ self.create_stream() ] + return [ self.create_stream(packet_len - 4) ] # dynamic load - used for trex console or simulator diff --git a/src/bp_sim.cpp b/src/bp_sim.cpp index 2c122e72..94f8a2ba 100755 --- a/src/bp_sim.cpp +++ b/src/bp_sim.cpp @@ -2258,11 +2258,11 @@ enum CCapFileFlowInfo::load_cap_file_err CCapFileFlowInfo::load_cap_file(std::st } }else{ - fprintf(stderr, "ERROR packet %d is not supported, should be IP(0x0800)/TCP/UDP format try to convert it using Wireshark !\n",cnt); + fprintf(stderr, "ERROR packet %d is not supported, should be Ethernet/IP(0x0800)/(TCP|UDP) format try to convert it using Wireshark !\n",cnt); return kPktNotSupp; } }else{ - fprintf(stderr, "ERROR packet %d is not supported, should be IP(0x0800)/TCP/UDP format try to convert it using Wireshark !\n",cnt); + fprintf(stderr, "ERROR packet %d is not supported, should be Ethernet/IP(0x0800)/(TCP|UDP) format try to convert it using Wireshark !\n",cnt); return kPktProcessFail; } } diff --git a/src/main_dpdk.cpp b/src/main_dpdk.cpp index 440cf820..6dec3dec 100644 --- a/src/main_dpdk.cpp +++ b/src/main_dpdk.cpp @@ -2317,6 +2317,7 @@ public: float m_open_flows; float m_cpu_util; float m_rx_cpu_util; + float m_bw_per_core; uint8_t m_threads; uint32_t m_num_of_ports; @@ -2335,7 +2336,10 @@ private: std::string CGlobalStats::get_field(std::string name,float &f){ char buff[200]; - snprintf(buff, sizeof(buff), "\"%s\":%.1f,",name.c_str(),f); + if(f <= -10.0 or f >= 10.0) + snprintf(buff, sizeof(buff), "\"%s\":%.1f,",name.c_str(),f); + else + snprintf(buff, sizeof(buff), "\"%s\":%.3e,",name.c_str(),f); return (std::string(buff)); } @@ -2347,7 +2351,10 @@ std::string CGlobalStats::get_field(std::string name,uint64_t &f){ std::string CGlobalStats::get_field_port(int port,std::string name,float &f){ char buff[200]; - snprintf(buff, sizeof(buff), "\"%s-%d\":%.1f,",name.c_str(),port,f); + if(f <= -10.0 or f >= 10.0) + snprintf(buff, sizeof(buff), "\"%s-%d\":%.1f,",name.c_str(),port,f); + else + snprintf(buff, sizeof(buff), "\"%s-%d\":%.3e,",name.c_str(),port,f); return (std::string(buff)); } @@ -2376,6 +2383,7 @@ void CGlobalStats::dump_json(std::string & json, bool baseline,uint32_t stats_ti #define GET_FIELD_PORT(p,f) get_field_port(p,std::string(#f),lp->f) json+=GET_FIELD(m_cpu_util); + json+=GET_FIELD(m_bw_per_core); json+=GET_FIELD(m_rx_cpu_util); json+=GET_FIELD(m_platform_factor); json+=GET_FIELD(m_tx_bps); @@ -2441,7 +2449,7 @@ void CGlobalStats::DumpAllPorts(FILE *fd){ - fprintf (fd," Cpu Utilization : %2.1f %% %2.1f Gb/core \n",m_cpu_util,(2*(m_tx_bps/1e9)*100.0/(m_cpu_util*m_threads))); + fprintf (fd," Cpu Utilization : %2.1f %% %2.1f Gb/core \n",m_cpu_util,m_bw_per_core); fprintf (fd," Platform_factor : %2.1f \n",m_platform_factor); fprintf (fd," Total-Tx : %s ",double_to_human_str(m_tx_bps,"bps",KBYE_1000).c_str()); if ( CGlobalInfo::is_learn_mode() ) { @@ -3554,6 +3562,10 @@ void CGlobalTRex::get_stats(CGlobalStats & stats){ stats.m_tx_pps = total_tx_pps*pf; stats.m_rx_pps = total_rx_pps*pf; stats.m_tx_cps = m_last_total_cps*pf; + if(stats.m_cpu_util < 0.0001) + stats.m_bw_per_core = 0; + else + stats.m_bw_per_core = 2*(stats.m_tx_bps/1e9)*100.0/(stats.m_cpu_util*stats.m_threads); stats.m_tx_expected_cps = m_expected_cps*pf; stats.m_tx_expected_pps = m_expected_pps*pf; diff --git a/src/publisher/trex_publisher.h b/src/publisher/trex_publisher.h index f086babb..f8843758 100644 --- a/src/publisher/trex_publisher.h +++ b/src/publisher/trex_publisher.h @@ -46,7 +46,8 @@ public: EVENT_PORT_PAUSED = 2, EVENT_PORT_RESUMED = 3, EVENT_PORT_FINISHED_TX = 4, - EVENT_PORT_FORCE_ACQUIRED = 5, + EVENT_PORT_ACQUIRED = 5, + EVENT_PORT_RELEASED = 6, EVENT_SERVER_STOPPED = 100, diff --git a/src/stateless/cp/trex_stateless.cpp b/src/stateless/cp/trex_stateless.cpp index c86c5f65..5bbe9faf 100644 --- a/src/stateless/cp/trex_stateless.cpp +++ b/src/stateless/cp/trex_stateless.cpp @@ -54,7 +54,7 @@ TrexStateless::TrexStateless(const TrexStatelessCfg &cfg) { m_publisher = cfg.m_publisher; /* API core version */ - m_api_classes[APIClass::API_CLASS_TYPE_CORE].init(APIClass::API_CLASS_TYPE_CORE, 1, 1); + m_api_classes[APIClass::API_CLASS_TYPE_CORE].init(APIClass::API_CLASS_TYPE_CORE, 1, 2); } /** diff --git a/src/stateless/cp/trex_stateless_port.cpp b/src/stateless/cp/trex_stateless_port.cpp index 605995ae..90142d9b 100644 --- a/src/stateless/cp/trex_stateless_port.cpp +++ b/src/stateless/cp/trex_stateless_port.cpp @@ -119,23 +119,10 @@ TrexStatelessPort::~TrexStatelessPort() { void TrexStatelessPort::acquire(const std::string &user, uint32_t session_id, bool force) { - /* if port is free - just take it */ - if (get_owner().is_free()) { - get_owner().own(user); - return; - } - - if (force) { - get_owner().own(user); - - /* inform the other client of the steal... */ - Json::Value data; + bool used_force = !get_owner().is_free() && force; - data["port_id"] = m_port_id; - data["who"] = user; - data["session_id"] = session_id; - - get_stateless_obj()->get_publisher()->publish_event(TrexPublisher::EVENT_PORT_FORCE_ACQUIRED, data); + if (get_owner().is_free() || force) { + get_owner().own(user, session_id); } else { /* not same user or session id and not force - report error */ @@ -146,11 +133,30 @@ TrexStatelessPort::acquire(const std::string &user, uint32_t session_id, bool fo } } + Json::Value data; + + data["port_id"] = m_port_id; + data["who"] = user; + data["session_id"] = session_id; + data["force"] = used_force; + + get_stateless_obj()->get_publisher()->publish_event(TrexPublisher::EVENT_PORT_ACQUIRED, data); + } void TrexStatelessPort::release(void) { + + + Json::Value data; + + data["port_id"] = m_port_id; + data["who"] = get_owner().get_name(); + data["session_id"] = get_owner().get_session_id(); + get_owner().release(); + + get_stateless_obj()->get_publisher()->publish_event(TrexPublisher::EVENT_PORT_RELEASED, data); } /** @@ -776,6 +782,7 @@ TrexStatelessPort::remove_and_delete_all_streams() { TrexPortOwner::TrexPortOwner() { m_is_free = true; + m_session_id = 0; /* for handlers random generation */ m_seed = time(NULL); diff --git a/src/stateless/cp/trex_stateless_port.h b/src/stateless/cp/trex_stateless_port.h index 2167e735..520940d8 100644 --- a/src/stateless/cp/trex_stateless_port.h +++ b/src/stateless/cp/trex_stateless_port.h @@ -54,16 +54,18 @@ public: m_is_free = true; m_owner_name = ""; m_handler = ""; + m_session_id = 0; } bool is_owned_by(const std::string &user) { return ( !m_is_free && (m_owner_name == user) ); } - void own(const std::string &owner_name) { + void own(const std::string &owner_name, uint32_t session_id) { /* save user data */ m_owner_name = owner_name; + m_session_id = session_id; /* internal data */ m_handler = utl_generate_random_str(m_seed, 8); @@ -82,6 +84,9 @@ public: return (!m_is_free ? m_handler : g_unowned_handler); } + const uint32_t get_session_id() { + return m_session_id; + } private: @@ -91,6 +96,9 @@ private: /* user provided info */ std::string m_owner_name; + /* which session of the user holds this port*/ + uint32_t m_session_id; + /* handler genereated internally */ std::string m_handler; |