diff options
author | 2017-01-22 16:20:45 +0200 | |
---|---|---|
committer | 2017-01-22 16:20:45 +0200 | |
commit | 904eacd9be1230efb7ae0ab7997ec131b588ec8a (patch) | |
tree | 8e4bcd1b1a5f683efdb8f3eeb962acefc3201961 /scripts/automation/regression | |
parent | d2f1c8451e2e8ffc47b208f68f9b16697d706d60 (diff) | |
parent | b81cdb6c2d6d118c1c346e7c8dae6a5e747d867d (diff) |
Merge branch 'master' into capture
Signed-off-by: imarom <imarom@cisco.com>
Conflicts:
scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_client.py
scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_jsonrpc_client.py
scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_port.py
src/main_dpdk.cpp
Diffstat (limited to 'scripts/automation/regression')
24 files changed, 1322 insertions, 237 deletions
diff --git a/scripts/automation/regression/CPlatform.py b/scripts/automation/regression/CPlatform.py index 606235a6..332f700a 100755 --- a/scripts/automation/regression/CPlatform.py +++ b/scripts/automation/regression/CPlatform.py @@ -19,45 +19,82 @@ class CPlatform(object): self.needed_image_path = None self.tftp_cfg = None self.config_history = { 'basic_if_config' : False, 'tftp_server_config' : False } + self.client_vlan = "100" + self.server_vlan = "200" - def configure_basic_interfaces(self, mtu = 9050): + def configure_basic_interfaces(self, mtu = 9050, vlan=False): cache = CCommandCache() for dual_if in self.if_mngr.get_dual_if_list(): client_if_command_set = [] server_if_command_set = [] + client_if_command_set_vlan = [] + server_if_command_set_vlan = [] + + client_if_name = dual_if.client_if.get_name() + server_if_name = dual_if.server_if.get_name() + + if vlan: + client_if_name_vlan = client_if_name + "." + self.client_vlan + server_if_name_vlan = server_if_name + "." + self.server_vlan + client_if_command_set_vlan.append('encapsulation dot1Q {vlan}'. format(vlan = self.client_vlan)); + server_if_command_set_vlan.append('encapsulation dot1Q {vlan}'. format(vlan = self.server_vlan)); client_if_command_set.append ('mac-address {mac}'.format( mac = dual_if.client_if.get_src_mac_addr()) ) client_if_command_set.append ('mtu %s' % mtu) - client_if_command_set.append ('ip address {ip} 255.255.255.0'.format( ip = dual_if.client_if.get_ipv4_addr() )) - client_if_command_set.append ('ipv6 address {ip}/64'.format( ip = dual_if.client_if.get_ipv6_addr() )) - cache.add('IF', client_if_command_set, dual_if.client_if.get_name()) + client_ip_command = 'ip address {ip} 255.255.255.0'.format( ip = dual_if.client_if.get_ipv4_addr() ) + client_ipv6_command = 'ipv6 address {ip}/64'.format( ip = dual_if.client_if.get_ipv6_addr() ) + if vlan: + client_if_command_set_vlan.append (client_ip_command) + client_if_command_set_vlan.append (client_ipv6_command) + else: + client_if_command_set.append (client_ip_command) + client_if_command_set.append (client_ipv6_command) + + cache.add('IF', client_if_command_set, client_if_name) + if vlan: + cache.add('IF', client_if_command_set_vlan, client_if_name_vlan) server_if_command_set.append ('mac-address {mac}'.format( mac = dual_if.server_if.get_src_mac_addr()) ) server_if_command_set.append ('mtu %s' % mtu) - server_if_command_set.append ('ip address {ip} 255.255.255.0'.format( ip = dual_if.server_if.get_ipv4_addr() )) - server_if_command_set.append ('ipv6 address {ip}/64'.format( ip = dual_if.server_if.get_ipv6_addr() )) - cache.add('IF', server_if_command_set, dual_if.server_if.get_name()) + server_ip_command = 'ip address {ip} 255.255.255.0'.format( ip = dual_if.server_if.get_ipv4_addr() ) + server_ipv6_command = 'ipv6 address {ip}/64'.format( ip = dual_if.server_if.get_ipv6_addr() ) + if vlan: + server_if_command_set_vlan.append (server_ip_command) + server_if_command_set_vlan.append (server_ipv6_command) + else: + server_if_command_set.append (server_ip_command) + server_if_command_set.append (server_ipv6_command) + + cache.add('IF', server_if_command_set, server_if_name) + if vlan: + cache.add('IF', server_if_command_set_vlan, server_if_name_vlan) self.cmd_link.run_single_command(cache) self.config_history['basic_if_config'] = True - - - def configure_basic_filtered_interfaces(self, intf_list, mtu = 9050): + def configure_basic_filtered_interfaces(self, intf_list, mtu = 9050, vlan = False): cache = CCommandCache() for intf in intf_list: if_command_set = [] + if_command_set_vlan = [] if_command_set.append ('mac-address {mac}'.format( mac = intf.get_src_mac_addr()) ) if_command_set.append ('mtu %s' % mtu) - if_command_set.append ('ip address {ip} 255.255.255.0'.format( ip = intf.get_ipv4_addr() )) - if_command_set.append ('ipv6 address {ip}/64'.format( ip = intf.get_ipv6_addr() )) + ip_commands = ['ip address {ip} 255.255.255.0'.format( ip = intf.get_ipv4_addr() ), + 'ipv6 address {ip}/64'.format( ip = intf.get_ipv6_addr() )] + if vlan: + if_command_set_vlan.extend(ip_commands) + else: + if_command_set.extend(ip_commands) cache.add('IF', if_command_set, intf.get_name()) + if vlan: + if_name = intf.get_name() + '.' + (self.client_vlan if intf.is_client() else self.server_vlan) + cache.add('IF', if_command_set_vlan, if_name) self.cmd_link.run_single_command(cache) @@ -74,8 +111,9 @@ class CPlatform(object): if i < 4: continue raise Exception('Could not load clean config, response: %s' % res) + break - def config_pbr (self, mode = 'config'): + def config_pbr (self, mode = 'config', vlan = False): idx = 1 unconfig_str = '' if mode=='config' else 'no ' @@ -93,30 +131,30 @@ class CPlatform(object): if dual_if.is_duplicated(): # define the relevant VRF name pre_commit_set.add('{mode}ip vrf {dup}'.format( mode = unconfig_str, dup = dual_if.get_vrf_name()) ) - + # assign VRF to interfaces, config interfaces with relevant route-map client_if_command_set.append ('{mode}ip vrf forwarding {dup}'.format( mode = unconfig_str, dup = dual_if.get_vrf_name()) ) - client_if_command_set.append ('{mode}ip policy route-map {dup}_{p1}_to_{p2}'.format( + client_if_command_set.append ('{mode}ip policy route-map {dup}_{p1}_to_{p2}'.format( mode = unconfig_str, - dup = dual_if.get_vrf_name(), + dup = dual_if.get_vrf_name(), p1 = 'p'+str(idx), p2 = 'p'+str(idx+1) ) ) server_if_command_set.append ('{mode}ip vrf forwarding {dup}'.format( mode = unconfig_str, dup = dual_if.get_vrf_name()) ) - server_if_command_set.append ('{mode}ip policy route-map {dup}_{p2}_to_{p1}'.format( + server_if_command_set.append ('{mode}ip policy route-map {dup}_{p2}_to_{p1}'.format( mode = unconfig_str, - dup = dual_if.get_vrf_name(), + dup = dual_if.get_vrf_name(), p1 = 'p'+str(idx), p2 = 'p'+str(idx+1) ) ) # config route-map routing conf_t_command_set.append('{mode}route-map {dup}_{p1}_to_{p2} permit 10'.format( mode = unconfig_str, - dup = dual_if.get_vrf_name(), + dup = dual_if.get_vrf_name(), p1 = 'p'+str(idx), p2 = 'p'+str(idx+1) ) ) if mode == 'config': conf_t_command_set.append('set ip next-hop {next_hop}'.format( next_hop = client_net_next_hop) ) conf_t_command_set.append('{mode}route-map {dup}_{p2}_to_{p1} permit 10'.format( mode = unconfig_str, - dup = dual_if.get_vrf_name(), + dup = dual_if.get_vrf_name(), p1 = 'p'+str(idx), p2 = 'p'+str(idx+1) ) ) if mode == 'config': conf_t_command_set.append('set ip next-hop {next_hop}'.format( @@ -127,21 +165,21 @@ class CPlatform(object): if dual_if.client_if.get_dest_mac(): conf_t_command_set.append('{mode}arp vrf {dup} {next_hop} {dest_mac} arpa'.format( mode = unconfig_str, - dup = dual_if.get_vrf_name(), - next_hop = server_net_next_hop, + dup = dual_if.get_vrf_name(), + next_hop = server_net_next_hop, dest_mac = dual_if.client_if.get_dest_mac())) if dual_if.server_if.get_dest_mac(): conf_t_command_set.append('{mode}arp vrf {dup} {next_hop} {dest_mac} arpa'.format( - mode = unconfig_str, - dup = dual_if.get_vrf_name(), - next_hop = client_net_next_hop, + mode = unconfig_str, + dup = dual_if.get_vrf_name(), + next_hop = client_net_next_hop, dest_mac = dual_if.server_if.get_dest_mac())) else: # config interfaces with relevant route-map - client_if_command_set.append ('{mode}ip policy route-map {p1}_to_{p2}'.format( + client_if_command_set.append ('{mode}ip policy route-map {p1}_to_{p2}'.format( mode = unconfig_str, p1 = 'p'+str(idx), p2 = 'p'+str(idx+1) ) ) - server_if_command_set.append ('{mode}ip policy route-map {p2}_to_{p1}'.format( + server_if_command_set.append ('{mode}ip policy route-map {p2}_to_{p1}'.format( mode = unconfig_str, p1 = 'p'+str(idx), p2 = 'p'+str(idx+1) ) ) @@ -164,17 +202,22 @@ class CPlatform(object): if dual_if.client_if.get_dest_mac(): conf_t_command_set.append('{mode}arp {next_hop} {dest_mac} arpa'.format( mode = unconfig_str, - next_hop = server_net_next_hop, + next_hop = server_net_next_hop, dest_mac = dual_if.client_if.get_dest_mac())) if dual_if.server_if.get_dest_mac(): conf_t_command_set.append('{mode}arp {next_hop} {dest_mac} arpa'.format( mode = unconfig_str, - next_hop = client_net_next_hop, + next_hop = client_net_next_hop, dest_mac = dual_if.server_if.get_dest_mac())) # assign generated config list to cache - cache.add('IF', server_if_command_set, dual_if.server_if.get_name()) - cache.add('IF', client_if_command_set, dual_if.client_if.get_name()) + client_if_name = dual_if.client_if.get_name() + server_if_name = dual_if.server_if.get_name() + if vlan: + client_if_name += "." + self.client_vlan + server_if_name += "." + self.server_vlan + cache.add('IF', server_if_command_set, server_if_name) + cache.add('IF', client_if_command_set, client_if_name) cache.add('CONF', conf_t_command_set) idx += 2 @@ -186,12 +229,12 @@ class CPlatform(object): # deploy the configs (order is important!) self.cmd_link.run_command( [pre_commit_cache, cache] ) if self.config_history['basic_if_config']: - # in this case, duplicated interfaces will lose its ip address. + # in this case, duplicated interfaces will lose its ip address. # re-config IPv4 addresses - self.configure_basic_filtered_interfaces(self.if_mngr.get_duplicated_if() ) + self.configure_basic_filtered_interfaces(self.if_mngr.get_duplicated_if(), vlan = vlan) - def config_no_pbr (self): - self.config_pbr(mode = 'unconfig') + def config_no_pbr (self, vlan = False): + self.config_pbr(mode = 'unconfig', vlan = vlan) def config_static_routing (self, stat_route_obj, mode = 'config'): @@ -241,13 +284,13 @@ class CPlatform(object): conf_t_command_set.append( "{mode}ip route vrf {dup} {next_net} {dest_mask} {next_hop}".format( mode = unconfig_str, - dup = dual_if.get_vrf_name(), + dup = dual_if.get_vrf_name(), next_net = client_net, dest_mask = stat_route_obj.client_mask, next_hop = client_net_next_hop)) conf_t_command_set.append( "{mode}ip route vrf {dup} {next_net} {dest_mask} {next_hop}".format( mode = unconfig_str, - dup = dual_if.get_vrf_name(), + dup = dual_if.get_vrf_name(), next_net = server_net, dest_mask = stat_route_obj.server_mask, next_hop = server_net_next_hop)) @@ -256,14 +299,14 @@ class CPlatform(object): if dual_if.client_if.get_dest_mac(): conf_t_command_set.append('{mode}arp vrf {dup} {next_hop} {dest_mac} arpa'.format( mode = unconfig_str, - dup = dual_if.get_vrf_name(), - next_hop = server_net_next_hop, + dup = dual_if.get_vrf_name(), + next_hop = server_net_next_hop, dest_mac = dual_if.client_if.get_dest_mac())) if dual_if.server_if.get_dest_mac(): conf_t_command_set.append('{mode}arp vrf {dup} {next_hop} {dest_mac} arpa'.format( - mode = unconfig_str, - dup = dual_if.get_vrf_name(), - next_hop = client_net_next_hop, + mode = unconfig_str, + dup = dual_if.get_vrf_name(), + next_hop = client_net_next_hop, dest_mac = dual_if.server_if.get_dest_mac())) # assign generated interfaces config list to cache @@ -286,12 +329,12 @@ class CPlatform(object): if dual_if.client_if.get_dest_mac(): conf_t_command_set.append('{mode}arp {next_hop} {dest_mac} arpa'.format( mode = unconfig_str, - next_hop = server_net_next_hop, + next_hop = server_net_next_hop, dest_mac = dual_if.client_if.get_dest_mac())) if dual_if.server_if.get_dest_mac(): conf_t_command_set.append('{mode}arp {next_hop} {dest_mac} arpa'.format( mode = unconfig_str, - next_hop = client_net_next_hop, + next_hop = client_net_next_hop, dest_mac = dual_if.server_if.get_dest_mac())) # bump up to the next client network address @@ -309,9 +352,9 @@ class CPlatform(object): # deploy the configs (order is important!) self.cmd_link.run_command( [pre_commit_cache, cache] ) if self.config_history['basic_if_config']: - # in this case, duplicated interfaces will lose its ip address. + # in this case, duplicated interfaces will lose its ip address. # re-config IPv4 addresses - self.configure_basic_filtered_interfaces(self.if_mngr.get_duplicated_if() ) + self.configure_basic_filtered_interfaces(self.if_mngr.get_duplicated_if()) def config_no_static_routing (self, stat_route_obj = None): @@ -424,7 +467,7 @@ class CPlatform(object): def config_zbf (self, mode = 'config'): cache = CCommandCache() pre_commit_cache = CCommandCache() - conf_t_command_set = [] + conf_t_command_set = [] # toggle all duplicate interfaces down self.toggle_duplicated_intf(action = 'down') @@ -460,7 +503,7 @@ class CPlatform(object): def config_no_zbf (self): cache = CCommandCache() - conf_t_command_set = [] + conf_t_command_set = [] # define security zones and security service policy to be applied on the interfaces conf_t_command_set.append('no zone-pair security in2out source z_in destination z_out') @@ -485,7 +528,7 @@ class CPlatform(object): # self.__toggle_interfaces(dup_ifs) - def config_ipv6_pbr (self, mode = 'config'): + def config_ipv6_pbr (self, mode = 'config', vlan=False): idx = 1 unconfig_str = '' if mode=='config' else 'no ' cache = CCommandCache() @@ -496,7 +539,7 @@ class CPlatform(object): for dual_if in self.if_mngr.get_dual_if_list(): client_if_command_set = [] server_if_command_set = [] - + client_net_next_hop = misc_methods.get_single_net_client_addr(dual_if.server_if.get_ipv6_addr(), {'7':1}, ip_type = 'ipv6' ) server_net_next_hop = misc_methods.get_single_net_client_addr(dual_if.client_if.get_ipv6_addr(), {'7':1}, ip_type = 'ipv6' ) client_net_next_hop_v4 = misc_methods.get_single_net_client_addr(dual_if.server_if.get_ipv4_addr() ) @@ -510,22 +553,22 @@ class CPlatform(object): prefix = 'ipv6_' + dual_if.get_vrf_name() else: prefix = 'ipv6' - + # config interfaces with relevant route-map - client_if_command_set.append ('{mode}ipv6 policy route-map {pre}_{p1}_to_{p2}'.format( + client_if_command_set.append ('{mode}ipv6 policy route-map {pre}_{p1}_to_{p2}'.format( mode = unconfig_str, - pre = prefix, + pre = prefix, p1 = 'p'+str(idx), p2 = 'p'+str(idx+1) ) ) - server_if_command_set.append ('{mode}ipv6 policy route-map {pre}_{p2}_to_{p1}'.format( + server_if_command_set.append ('{mode}ipv6 policy route-map {pre}_{p2}_to_{p1}'.format( mode = unconfig_str, - pre = prefix, + pre = prefix, p1 = 'p'+str(idx), p2 = 'p'+str(idx+1) ) ) # config global arp to interfaces net address and vrf if dual_if.client_if.get_ipv6_dest_mac(): conf_t_command_set.append('{mode}ipv6 neighbor {next_hop} {intf} {dest_mac}'.format( mode = unconfig_str, - next_hop = server_net_next_hop, + next_hop = server_net_next_hop, intf = dual_if.client_if.get_name(), dest_mac = dual_if.client_if.get_ipv6_dest_mac())) # For latency packets (which are IPv4), we need to configure also static ARP @@ -561,17 +604,24 @@ class CPlatform(object): conf_t_command_set.append('exit') # assign generated config list to cache - cache.add('IF', server_if_command_set, dual_if.server_if.get_name()) - cache.add('IF', client_if_command_set, dual_if.client_if.get_name()) + client_if_name = dual_if.client_if.get_name() + server_if_name = dual_if.server_if.get_name() + if vlan: + client_if_name += "." + self.client_vlan + server_if_name += "." + self.server_vlan + + cache.add('IF', server_if_command_set, server_if_name) + cache.add('IF', client_if_command_set, client_if_name) + idx += 2 cache.add('CONF', conf_t_command_set) - + # deploy the configs (order is important!) self.cmd_link.run_command( [cache] ) - def config_no_ipv6_pbr (self): - self.config_ipv6_pbr(mode = 'unconfig') + def config_no_ipv6_pbr (self, vlan = False): + self.config_ipv6_pbr(mode = 'unconfig', vlan = vlan) # show methods def get_cpu_util (self): @@ -679,7 +729,7 @@ class CPlatform(object): parsed_info = CShowParser.parse_show_image_version(response) self.running_image = parsed_info return parsed_info - + def check_image_existence (self, img_name): """ check_image_existence(self, img_name) -> boolean @@ -716,7 +766,7 @@ class CPlatform(object): # tmp_tftp_config = external_tftp_config if external_tftp_config is not None else self.tftp_server_config self.tftp_cfg = device_cfg_obj.get_tftp_info() cache = CCommandCache() - + command = "ip tftp source-interface {intf}".format( intf = device_cfg_obj.get_mgmt_interface() ) cache.add('CONF', command ) self.cmd_link.run_single_command(cache) @@ -737,12 +787,12 @@ class CPlatform(object): """ if not self.check_image_existence(img_filename): # check if this image isn't already saved in platform #tmp_tftp_config = external_tftp_config if external_tftp_config is not None else self.tftp_cfg - + if self.config_history['tftp_server_config']: # make sure a TFTP configuration has been loaded cache = CCommandCache() if self.running_image is None: self.get_running_image_details() - + command = "copy tftp://{tftp_ip}/{img_path}/{image} bootflash:".format( tftp_ip = self.tftp_cfg['ip_address'], img_path = self.tftp_cfg['images_path'], @@ -795,7 +845,7 @@ class CPlatform(object): An image file to compare router running image Compares image name to router running image, returns match result. - + """ if self.running_image is None: self.get_running_image_details() @@ -839,7 +889,7 @@ class CPlatform(object): i = 0 sleep_time = 30 # seconds - try: + try: cache = CCommandCache() cache.add('EXEC', ['reload','n\r','\r'] ) @@ -861,7 +911,7 @@ class CPlatform(object): raise TimeoutError('Platform failed to reload after reboot for over {minutes} minutes!'.format(minutes = round(1 + i * sleep_time / 60))) else: i += 1 - + time.sleep(30) self.reload_connection(device_cfg_obj) progress_thread.join() diff --git a/scripts/automation/regression/cfg/client_cfg_vlan.yaml b/scripts/automation/regression/cfg/client_cfg_vlan.yaml new file mode 100644 index 00000000..db70e4e1 --- /dev/null +++ b/scripts/automation/regression/cfg/client_cfg_vlan.yaml @@ -0,0 +1,31 @@ +vlan: true +#vlan: false + +groups: + +- ip_start : 16.0.0.1 + ip_end : 16.0.1.255 + initiator : + next_hop: 1.1.1.1 + src_ip : 1.1.1.2 + vlan : 100 + responder : + next_hop: 1.1.2.1 + src_ip : 1.1.2.2 + vlan : 200 + + count : 1 + +- ip_start : 17.0.0.1 + ip_end : 17.0.1.255 + initiator : + next_hop: 1.1.3.1 + src_ip : 1.1.3.2 + vlan : 100 + responder : + next_hop: 1.1.4.1 + src_ip : 1.1.4.2 + vlan : 200 + + count : 1 + diff --git a/scripts/automation/regression/cfg/client_cfg_vlan_mac.yaml b/scripts/automation/regression/cfg/client_cfg_vlan_mac.yaml new file mode 100644 index 00000000..d6d24dbb --- /dev/null +++ b/scripts/automation/regression/cfg/client_cfg_vlan_mac.yaml @@ -0,0 +1,26 @@ +vlan: true + +groups: + +- ip_start : 16.0.0.1 + ip_end : 16.0.0.4 + initiator : + dst_mac : "00:00:00:01:00:00" + vlan : 100 + responder : + dst_mac : "00:00:00:02:00:00" + vlan : 200 + + count : 1 + +- ip_start : 16.0.0.5 + ip_end : 16.0.1.255 + initiator : + dst_mac : "00:00:00:03:00:00" + vlan : 300 + responder : + dst_mac : "00:00:00:04:00:00" + vlan : 400 + + count : 1 + diff --git a/scripts/automation/regression/functional_tests/cpp_gtests_test.py b/scripts/automation/regression/functional_tests/cpp_gtests_test.py index 6535da84..a60b715a 100644 --- a/scripts/automation/regression/functional_tests/cpp_gtests_test.py +++ b/scripts/automation/regression/functional_tests/cpp_gtests_test.py @@ -4,6 +4,7 @@ import functional_general_test from trex import CTRexScenario import os, sys from subprocess import Popen, STDOUT +from stl_basic_tests import compare_caps import shlex import time import errno @@ -44,3 +45,15 @@ class CPP_Test(functional_general_test.CGeneralFunctional_Test): print('Output:\n%s' % out) if ret: raise Exception('Non zero return status of Valgrind gtests (%s)' % ret) + + def test_bp_sim_client_cfg(self): + print('') + cmd = './bp-sim-64 --pcap -f cap2/dns.yaml --client_cfg automation/regression/cfg/client_cfg_vlan_mac.yaml -o generated/bp_sim_dns_vlans_gen.pcap' + ret, out = run_command(os.path.join(CTRexScenario.scripts_path, cmd), cwd = CTRexScenario.scripts_path) + print('Output:\n%s' % out) + if ret: + raise Exception('Non zero return status of Valgrind gtests (%s)' % ret) + + compare_caps(output = os.path.join(CTRexScenario.scripts_path, 'generated/bp_sim_dns_vlans_gen.pcap'), + golden = 'functional_tests/golden/bp_sim_dns_vlans.pcap') + diff --git a/scripts/automation/regression/functional_tests/golden/bp_sim_dns_vlans.pcap b/scripts/automation/regression/functional_tests/golden/bp_sim_dns_vlans.pcap Binary files differnew file mode 100644 index 00000000..3dd4890c --- /dev/null +++ b/scripts/automation/regression/functional_tests/golden/bp_sim_dns_vlans.pcap diff --git a/scripts/automation/regression/functional_tests/stl_basic_tests.py b/scripts/automation/regression/functional_tests/stl_basic_tests.py index bc5bc4d5..aecbf6d1 100644 --- a/scripts/automation/regression/functional_tests/stl_basic_tests.py +++ b/scripts/automation/regression/functional_tests/stl_basic_tests.py @@ -25,6 +25,71 @@ import shlex from threading import Thread from collections import defaultdict + +def scapy_pkt_show_to_str (scapy_pkt): + capture = StringIO() + save_stdout = sys.stdout + sys.stdout = capture + scapy_pkt.show() + sys.stdout = save_stdout + return capture.getvalue() + + +def compare_caps (output, golden, max_diff_sec = 0.000005): + pkts1 = [] + pkts2 = [] + pkts_ts_buckets = defaultdict(list) + + for pkt in RawPcapReader(output): + ts = pkt[1][0] * 1e6 + pkt[1][1] + pkts_ts_buckets[ts].append(pkt) + # don't take last ts bucket, it can be cut in middle and packets inside bucket might be different + #for ts in sorted(pkts_ts_buckets.keys())[:-1]: + for ts in sorted(pkts_ts_buckets.keys()): + pkts1.extend(sorted(pkts_ts_buckets[ts])) + pkts_ts_buckets.clear() + + for pkt in RawPcapReader(golden): + ts = pkt[1][0] * 1e6 + pkt[1][1] + pkts_ts_buckets[ts].append(pkt) + # don't take last ts bucket, it can be cut in middle and packets inside bucket might be different + #for ts in sorted(pkts_ts_buckets.keys())[:-1]: + for ts in sorted(pkts_ts_buckets.keys()): + pkts2.extend(sorted(pkts_ts_buckets[ts])) + + assert_equal(len(pkts1), len(pkts2), 'Lengths of generated pcap (%s) and golden (%s) are different' % (output, golden)) + + for pkt1, pkt2, i in zip(pkts1, pkts2, range(1, len(pkts1))): + ts1 = float(pkt1[1][0]) + (float(pkt1[1][1]) / 1e6) + ts2 = float(pkt2[1][0]) + (float(pkt2[1][1]) / 1e6) + + if abs(ts1-ts2) > max_diff_sec: # 5 nsec + raise AssertionError("TS error: cap files '{0}', '{1}' differ in cap #{2} - '{3}' vs. '{4}'".format(output, golden, i, ts1, ts2)) + + if pkt1[0] != pkt2[0]: + errmsg = "RAW error: output file '{0}', differs from golden '{1}' in cap #{2}".format(output, golden, i) + print(errmsg) + + print(format_text("\ndifferent fields for packet #{0}:".format(i), 'underline')) + + scapy_pkt1_info = scapy_pkt_show_to_str(Ether(pkt1[0])).split('\n') + scapy_pkt2_info = scapy_pkt_show_to_str(Ether(pkt2[0])).split('\n') + + print(format_text("\nGot:\n", 'bold', 'underline')) + for line, ref in zip(scapy_pkt1_info, scapy_pkt2_info): + if line != ref: + print(format_text(line, 'bold')) + + print(format_text("\nExpected:\n", 'bold', 'underline')) + for line, ref in zip(scapy_pkt2_info, scapy_pkt1_info): + if line != ref: + print(format_text(line, 'bold')) + + print("\n") + raise AssertionError(errmsg) + + + @attr('run_on_trex') class CStlBasic_Test(functional_general_test.CGeneralFunctional_Test): def setUp (self): @@ -73,69 +138,6 @@ class CStlBasic_Test(functional_general_test.CGeneralFunctional_Test): raise Exception("cannot find '{0}'".format(name)) - def scapy_pkt_show_to_str (self, scapy_pkt): - capture = StringIO() - save_stdout = sys.stdout - sys.stdout = capture - scapy_pkt.show() - sys.stdout = save_stdout - return capture.getvalue() - - - def compare_caps (self, output, golden, max_diff_sec = 0.01): - pkts1 = [] - pkts2 = [] - pkts_ts_buckets = defaultdict(list) - - for pkt in RawPcapReader(output): - ts = pkt[1][0] * 1e6 + pkt[1][1] - pkts_ts_buckets[ts].append(pkt) - # don't take last ts bucket, it can be cut in middle and packets inside bucket might be different - #for ts in sorted(pkts_ts_buckets.keys())[:-1]: - for ts in sorted(pkts_ts_buckets.keys()): - pkts1.extend(sorted(pkts_ts_buckets[ts])) - pkts_ts_buckets.clear() - - for pkt in RawPcapReader(golden): - ts = pkt[1][0] * 1e6 + pkt[1][1] - pkts_ts_buckets[ts].append(pkt) - # don't take last ts bucket, it can be cut in middle and packets inside bucket might be different - #for ts in sorted(pkts_ts_buckets.keys())[:-1]: - for ts in sorted(pkts_ts_buckets.keys()): - pkts2.extend(sorted(pkts_ts_buckets[ts])) - - assert_equal(len(pkts1), len(pkts2), 'Lengths of generated pcap (%s) and golden (%s) are different' % (output, golden)) - - for pkt1, pkt2, i in zip(pkts1, pkts2, range(1, len(pkts1))): - ts1 = float(pkt1[1][0]) + (float(pkt1[1][1]) / 1e6) - ts2 = float(pkt2[1][0]) + (float(pkt2[1][1]) / 1e6) - - if abs(ts1-ts2) > 0.000005: # 5 nsec - raise AssertionError("TS error: cap files '{0}', '{1}' differ in cap #{2} - '{3}' vs. '{4}'".format(output, golden, i, ts1, ts2)) - - if pkt1[0] != pkt2[0]: - errmsg = "RAW error: output file '{0}', differs from golden '{1}' in cap #{2}".format(output, golden, i) - print(errmsg) - - print(format_text("\ndifferent fields for packet #{0}:".format(i), 'underline')) - - scapy_pkt1_info = self.scapy_pkt_show_to_str(Ether(pkt1[0])).split('\n') - scapy_pkt2_info = self.scapy_pkt_show_to_str(Ether(pkt2[0])).split('\n') - - print(format_text("\nGot:\n", 'bold', 'underline')) - for line, ref in zip(scapy_pkt1_info, scapy_pkt2_info): - if line != ref: - print(format_text(line, 'bold')) - - print(format_text("\nExpected:\n", 'bold', 'underline')) - for line, ref in zip(scapy_pkt2_info, scapy_pkt1_info): - if line != ref: - print(format_text(line, 'bold')) - - print("\n") - raise AssertionError(errmsg) - - def run_sim (self, yaml, output, options = "", silent = False, obj = None, tunables = None): if output: user_cmd = "-f {0} -o {1} {2} -p {3}".format(yaml, output, options, self.scripts_path) @@ -169,7 +171,7 @@ class CStlBasic_Test(functional_general_test.CGeneralFunctional_Test): tunables = None): print('Testing profile: %s' % profile) - output_cap = "a.pcap" + output_cap = "generated/a.pcap" input_file = os.path.join('stl/', profile) golden_file = os.path.join('exp',os.path.basename(profile).split('.')[0]+'.pcap'); if os.path.exists(output_cap): @@ -186,7 +188,7 @@ class CStlBasic_Test(functional_general_test.CGeneralFunctional_Test): #os.system(s) if compare: - self.compare_caps(output_cap, golden_file) + compare_caps(output_cap, golden_file) finally: if not do_no_remove: os.unlink(output_cap) @@ -208,7 +210,7 @@ class CStlBasic_Test(functional_general_test.CGeneralFunctional_Test): assert_equal(rc, True, 'Simulation on profile %s (generated) failed.' % profile) if compare: - self.compare_caps(output_cap, golden_file) + compare_caps(output_cap, golden_file) finally: diff --git a/scripts/automation/regression/interactive_platform.py b/scripts/automation/regression/interactive_platform.py index 10e89910..7a15bb0c 100755 --- a/scripts/automation/regression/interactive_platform.py +++ b/scripts/automation/regression/interactive_platform.py @@ -90,16 +90,31 @@ class InteractivePlatform(cmd.Cmd): self.platform.configure_basic_interfaces() print(termstyle.green("Basic interfaces configuration applied successfully.")) + def do_basic_if_config_vlan(self, line): + """Apply basic interfaces configuartion with vlan to all platform interfaces""" + self.platform.configure_basic_interfaces(vlan = True) + print(termstyle.green("Basic VLAN interfaces configuration applied successfully.")) + def do_pbr(self, line): """Apply IPv4 PBR configuration on all interfaces""" self.platform.config_pbr() print(termstyle.green("IPv4 PBR configuration applied successfully.")) + def do_pbr_vlan(self, line): + """Apply IPv4 PBR configuration on all VLAN interfaces""" + self.platform.config_pbr(vlan = True) + print(termstyle.green("IPv4 VLAN PBR configuration applied successfully.")) + def do_no_pbr(self, line): """Removes IPv4 PBR configuration from all interfaces""" self.platform.config_no_pbr() print(termstyle.green("IPv4 PBR configuration removed successfully.")) + def do_no_pbr_vlan(self, line): + """Removes IPv4 PBR configuration from all VLAN interfaces""" + self.platform.config_no_pbr(vlan = True) + print(termstyle.green("IPv4 PBR VLAN configuration removed successfully.")) + def do_nbar(self, line): """Apply NBAR PD configuration on all interfaces""" self.platform.config_nbar_pd() @@ -180,11 +195,21 @@ class InteractivePlatform(cmd.Cmd): self.platform.config_ipv6_pbr() print(termstyle.green("IPv6 PBR configuration applied successfully.")) + def do_ipv6_pbr_vlan(self, line): + """Apply IPv6 PBR configuration on all vlan interfaces""" + self.platform.config_ipv6_pbr(vlan = True) + print(termstyle.green("IPv6 VLAN PBR configuration applied successfully.")) + def do_no_ipv6_pbr(self, line): """Removes IPv6 PBR configuration from all interfaces""" self.platform.config_no_ipv6_pbr() print(termstyle.green("IPv6 PBR configuration removed successfully.")) + def do_no_ipv6_pbr_vlan(self, line): + """Removes IPv6 PBR configuration from all VLAN interfaces""" + self.platform.config_no_ipv6_pbr(vlan = True) + print(termstyle.green("IPv6 VLAN PBR configuration removed successfully.")) + def do_zbf(self, line): """Apply Zone-Based policy Firewall configuration on all interfaces""" self.platform.config_zbf() @@ -318,6 +343,14 @@ class InteractivePlatform(cmd.Cmd): self.do_pbr('') self.do_ipv6_pbr('') + def do_all_vlan(self, arg): + """Configures bundle of commands to set PBR routing using on vlan interfaces""" + self.do_load_clean('') + self.do_set_tftp_server('') + self.do_basic_if_config_vlan('') + self.do_pbr_vlan('') + self.do_ipv6_pbr_vlan('') + if __name__ == "__main__": diff --git a/scripts/automation/regression/outer_packages.py b/scripts/automation/regression/outer_packages.py index 61ddc5cd..b2839dee 100755 --- a/scripts/automation/regression/outer_packages.py +++ b/scripts/automation/regression/outer_packages.py @@ -1,7 +1,7 @@ #!/router/bin/python - import sys, site import platform, os +import pprint CURRENT_PATH = os.path.dirname(os.path.realpath(__file__)) # alternate use with: os.getcwd() TREX_PATH = os.getenv('TREX_UNDER_TEST') # path to <trex-core>/scripts directory, env. variable TREX_UNDER_TEST should override it. @@ -18,8 +18,11 @@ NIGHTLY_MODULES = [ {'name': 'ansi2html'}, {'name': 'rednose-0.4.1'}, {'name': 'progressbar-2.2'}, {'name': 'termstyle'}, + {'name': 'urllib3'}, + {'name': 'elasticsearch'}, + {'name': 'requests'}, {'name': 'pyyaml-3.11', 'py-dep': True}, - {'name': 'nose-1.3.4', 'py-dep': True} + {'name': 'nose-1.3.4', 'py-dep': True}, ] @@ -62,6 +65,7 @@ def import_nightly_modules (): sys.path.append(PATH_STL_API) sys.path.append(PATH_STF_API) import_module_list(NIGHTLY_MODULES) + #pprint.pprint(sys.path) import_nightly_modules() diff --git a/scripts/automation/regression/setups/kiwi02/benchmark.yaml b/scripts/automation/regression/setups/kiwi02/benchmark.yaml index 4e091d53..1eefccaf 100644 --- a/scripts/automation/regression/setups/kiwi02/benchmark.yaml +++ b/scripts/automation/regression/setups/kiwi02/benchmark.yaml @@ -4,6 +4,24 @@ #### common templates ### +test_short_flow: + multiplier : 20000 + cores : 4 + bw_per_core : 1000 + +test_short_flow_high_active: + multiplier : 20000 + cores : 4 + bw_per_core : 1000 + active_flows : 4000000 + +test_short_flow_high_active2: + multiplier : 15000 + cores : 4 + bw_per_core : 1000 + active_flows : 4000000 + + stat_route_dict: &stat_route_dict clients_start : 16.0.0.1 servers_start : 48.0.0.1 @@ -254,7 +272,7 @@ test_performance_vm_single_cpu: cfg: mult : "90%" mpps_per_core_golden : - min: 11.5 + min: 11.2 max: 13.1 @@ -262,7 +280,7 @@ test_performance_vm_single_cpu_cached: cfg: mult : "90%" mpps_per_core_golden : - min: 22.0 + min: 20.5 max: 25.0 @@ -271,7 +289,7 @@ test_performance_syn_attack_single_cpu: cfg: mult : "90%" mpps_per_core_golden : - min: 9.5 + min: 9.3 max: 11.5 test_performance_vm_multi_cpus: @@ -296,6 +314,6 @@ test_performance_syn_attack_multi_cpus: core_count : 4 mult : "90%" mpps_per_core_golden : - min: 8.5 + min: 8.4 max: 10.5 diff --git a/scripts/automation/regression/setups/trex07/benchmark.yaml b/scripts/automation/regression/setups/trex07/benchmark.yaml index 26588ba7..7abc2e4d 100644 --- a/scripts/automation/regression/setups/trex07/benchmark.yaml +++ b/scripts/automation/regression/setups/trex07/benchmark.yaml @@ -4,6 +4,24 @@ #### common templates ### +test_short_flow: + multiplier : 25000 + cores : 7 + bw_per_core : 1000 + +test_short_flow_high_active: + multiplier : 20000 + cores : 7 + bw_per_core : 1000 + active_flows : 4000000 + +test_short_flow_high_active2: + multiplier : 10000 + cores : 7 + bw_per_core : 1000 + active_flows : 4000000 + + test_jumbo: multiplier : 120 cores : 4 @@ -190,7 +208,7 @@ test_performance_vm_single_cpu_cached: cfg: mult : "10%" mpps_per_core_golden : - min: 16.0 + min: 20.0 max: 25.0 @@ -199,7 +217,7 @@ test_performance_syn_attack_single_cpu: cfg: mult : "90%" mpps_per_core_golden : - min: 9.0 + min: 8.4 max: 14.0 test_performance_vm_multi_cpus: @@ -216,8 +234,8 @@ test_performance_vm_multi_cpus_cached: core_count : 7 mult : "35%" mpps_per_core_golden : - min: 9.0 - max: 15.0 + min: 24.5 + max: 27.0 test_performance_syn_attack_multi_cpus: cfg: @@ -225,9 +243,10 @@ test_performance_syn_attack_multi_cpus: mult : "90%" mpps_per_core_golden : min: 8.0 - max: 16.0 + max: 11.0 test_all_profiles : mult : "5%" + skip : ['udp_rand_len_9k.py', 'udp_inc_len_9k.py'] diff --git a/scripts/automation/regression/setups/trex08/benchmark.yaml b/scripts/automation/regression/setups/trex08/benchmark.yaml index 70995b21..f409edb8 100644 --- a/scripts/automation/regression/setups/trex08/benchmark.yaml +++ b/scripts/automation/regression/setups/trex08/benchmark.yaml @@ -4,6 +4,25 @@ ### stateful ### +test_short_flow: + multiplier : 50000 + cores : 7 + bw_per_core : 1000 + +test_short_flow_high_active: + multiplier : 40000 + cores : 7 + bw_per_core : 1000 + active_flows : 4000000 + +test_short_flow_high_active2: + multiplier : 30000 + cores : 7 + bw_per_core : 1000 + active_flows : 4000000 + + + test_jumbo: multiplier : 150 cores : 2 @@ -183,7 +202,7 @@ test_performance_vm_single_cpu: cfg: mult : "90%" mpps_per_core_golden : - min: 15.1 + min: 15.5 max: 20.3 @@ -191,7 +210,7 @@ test_performance_vm_single_cpu_cached: cfg: mult : "10%" mpps_per_core_golden : - min: 29.1 + min: 28.0 max: 32.0 diff --git a/scripts/automation/regression/setups/trex09/benchmark.yaml b/scripts/automation/regression/setups/trex09/benchmark.yaml index d8623811..79dab3e8 100644 --- a/scripts/automation/regression/setups/trex09/benchmark.yaml +++ b/scripts/automation/regression/setups/trex09/benchmark.yaml @@ -195,7 +195,7 @@ test_performance_vm_single_cpu_cached: cfg: mult : "90%" mpps_per_core_golden : - min: 28.5 + min: 28.3 max: 31.2 @@ -204,7 +204,7 @@ test_performance_syn_attack_single_cpu: cfg: mult : "90%" mpps_per_core_golden : - min: 12.9 + min: 12.5 max: 14.5 test_performance_vm_multi_cpus: @@ -212,7 +212,7 @@ test_performance_vm_multi_cpus: core_count : 2 mult : "90%" mpps_per_core_golden : - min: 15.2 + min: 14.4 max: 16.3 @@ -221,14 +221,14 @@ test_performance_vm_multi_cpus_cached: core_count : 2 mult : "90%" mpps_per_core_golden : - min: 26.8 - max: 29.5 + min: 29.5 + max: 31.5 test_performance_syn_attack_multi_cpus: cfg: core_count : 2 mult : "90%" mpps_per_core_golden : - min: 13.0 + min: 12.5 max: 13.8 diff --git a/scripts/automation/regression/setups/trex11/benchmark.yaml b/scripts/automation/regression/setups/trex11/benchmark.yaml index 5ebcdd55..87654a35 100644 --- a/scripts/automation/regression/setups/trex11/benchmark.yaml +++ b/scripts/automation/regression/setups/trex11/benchmark.yaml @@ -183,4 +183,54 @@ test_CPU_benchmark: test_all_profiles : mult : "5%" skip : ['udp_rand_len_9k.py','udp_inc_len_9k.py'] # due to VIC 9K defect trex-282 - + + +test_performance_vm_single_cpu: + cfg: + mult : "5%" + mpps_per_core_golden : + min: 9.5 + max: 11.5 + + +test_performance_vm_single_cpu_cached: + cfg: + mult : "5%" + mpps_per_core_golden : + min: 26.5 + max: 29.0 + + + +test_performance_syn_attack_single_cpu: + cfg: + mult : "5%" + mpps_per_core_golden : + min: 8.5 + max: 10.0 + +test_performance_vm_multi_cpus: + cfg: + core_count : 1 + mult : "5%" + mpps_per_core_golden : + min: 9.0 + max: 11.5 + + +test_performance_vm_multi_cpus_cached: + cfg: + core_count : 1 + mult : "5%" + mpps_per_core_golden : + min: 26.5 + max: 29.0 + +test_performance_syn_attack_multi_cpus: + cfg: + core_count : 1 + mult : "5%" + mpps_per_core_golden : + min: 8.0 + max: 10.0 + diff --git a/scripts/automation/regression/setups/trex25/benchmark.yaml b/scripts/automation/regression/setups/trex25/benchmark.yaml index f8fd0bee..2c677b81 100644 --- a/scripts/automation/regression/setups/trex25/benchmark.yaml +++ b/scripts/automation/regression/setups/trex25/benchmark.yaml @@ -79,7 +79,7 @@ test_nat_learning: nat_opened : 40000 -test_nbar_simple: +test_nbar_simple: &nbar_simple multiplier : 6 cores : 1 bw_per_core : 16.645 @@ -100,6 +100,12 @@ test_nbar_simple: rtsp : 0.04 unknown : 28.52 +test_client_cfg_nbar: &client_cfg_nbar + << : *nbar_simple + +test_client_cfg_vlan: + cores : 1 + multiplier : 10 test_rx_check_http: &rx_http multiplier : 8800 @@ -118,7 +124,7 @@ test_rx_check_http_negative: test_rx_check_sfr: &rx_sfr - multiplier : 6.8 + multiplier : 3.2 cores : 1 rx_sample_rate : 16 bw_per_core : 16.063 diff --git a/scripts/automation/regression/stateful_tests/trex_client_cfg_test.py b/scripts/automation/regression/stateful_tests/trex_client_cfg_test.py index 852e745d..158f59b9 100644 --- a/scripts/automation/regression/stateful_tests/trex_client_cfg_test.py +++ b/scripts/automation/regression/stateful_tests/trex_client_cfg_test.py @@ -1,5 +1,6 @@ #!/router/bin/python from .trex_general_test import CTRexGeneral_Test, CTRexScenario +from .trex_nbar_test import CTRexNbarBase from CPlatform import CStaticRouteConfig from .tests_exceptions import * #import sys @@ -8,44 +9,71 @@ from nose.tools import nottest # Testing client cfg ARP resolve. Actually, just need to check that TRex run finished with no errors. # If resolve will fail, TRex will exit with exit code != 0 -class CTRexClientCfg_Test(CTRexGeneral_Test): +class CTRexClientCfg_Test(CTRexNbarBase): """This class defines the IMIX testcase of the TRex traffic generator""" def __init__(self, *args, **kwargs): - # super(CTRexClientCfg_Test, self).__init__() - CTRexGeneral_Test.__init__(self, *args, **kwargs) + CTRexNbarBase.__init__(self, *args, **kwargs) def setUp(self): if CTRexScenario.setup_name == 'kiwi02': self.skip("Can't run currently on kiwi02") + super(CTRexClientCfg_Test, self).setUp() # launch super test class setUp process - pass - def test_client_cfg(self): - # test initializtion + def test_client_cfg_nbar(self): if self.is_loopback: - return - else: - self.router.configure_basic_interfaces() - self.router.config_pbr(mode = "config") - - ret = self.trex.start_trex( - c = 1, - m = 1, - d = 10, - f = 'cap2/dns.yaml', - v = 3, + self.skip('No NBAR on loopback') + + self.router.configure_basic_interfaces() + self.router.config_pbr(mode = "config") + self.router.config_nbar_pd() + + mult = self.get_benchmark_param('multiplier') + core = self.get_benchmark_param('cores') + + ret = self.trex.start_trex ( + c = core, + m = mult, + nc = True, + p = True, + d = 100, + f = 'avl/sfr_delay_10_1g.yaml', client_cfg = 'automation/regression/cfg/client_cfg.yaml', l = 1000) trex_res = self.trex.sample_to_run_finish() - print("\nLATEST RESULT OBJECT:") print(trex_res) + self.check_general_scenario_results(trex_res, check_latency = False) # no latency with client config + self.match_classification() + + def test_client_cfg_vlan(self): + if self.is_loopback: + self.skip('Not relevant on loopback') + + self.router.configure_basic_interfaces(vlan = True) + self.router.config_pbr(mode = "config", vlan = True) + + mult = self.get_benchmark_param('multiplier') + core = self.get_benchmark_param('cores') + + ret = self.trex.start_trex ( + c = core, + m = mult, + nc = True, + p = True, + d = 60, + f = 'cap2/dns.yaml', + limit_ports = 4, + client_cfg = 'automation/regression/cfg/client_cfg_vlan.yaml') - self.check_general_scenario_results(trex_res) + trex_res = self.trex.sample_to_run_finish() + print("\nLATEST RESULT OBJECT:") + print(trex_res) + self.check_general_scenario_results(trex_res, check_latency = False) # no latency with client config def tearDown(self): - CTRexGeneral_Test.tearDown(self) + CTRexNbarBase.tearDown(self) pass if __name__ == "__main__": diff --git a/scripts/automation/regression/stateful_tests/trex_general_test.py b/scripts/automation/regression/stateful_tests/trex_general_test.py index f6d2b917..4453fd94 100755 --- a/scripts/automation/regression/stateful_tests/trex_general_test.py +++ b/scripts/automation/regression/stateful_tests/trex_general_test.py @@ -28,6 +28,7 @@ from nose.plugins.skip import SkipTest import trex from trex import CTRexScenario import misc_methods +import pprint import sys import os # from CPlatformUnderTest import * @@ -60,6 +61,7 @@ class CTRexGeneral_Test(unittest.TestCase): self.trex_crashed = CTRexScenario.trex_crashed self.modes = CTRexScenario.modes self.GAManager = CTRexScenario.GAManager + self.elk = CTRexScenario.elk self.no_daemon = CTRexScenario.no_daemon self.skipping = False self.fail_reasons = [] @@ -70,6 +72,21 @@ class CTRexGeneral_Test(unittest.TestCase): self.is_VM = True if 'VM' in self.modes else False if not CTRexScenario.is_init: + #update elk const object + if self.elk: + setup = CTRexScenario.elk_info['info']['setup'] + + if self.is_loopback : + setup['dut'] = 'loopback' + else: + setup['dut'] = 'router' + + if self.is_VM: + setup['baremetal'] = False + setup['hypervisor'] = 'ESXi' #TBD + else: + setup['baremetal'] = True + if self.trex and not self.no_daemon: # stateful CTRexScenario.trex_version = self.trex.get_trex_version() if not self.is_loopback: @@ -81,8 +98,12 @@ class CTRexGeneral_Test(unittest.TestCase): CTRexScenario.router.load_platform_data_from_file(device_cfg) CTRexScenario.router.launch_connection(device_cfg) if CTRexScenario.router_cfg['forceImageReload']: - running_image = CTRexScenario.router.get_running_image_details()['image'] + image_d = CTRexScenario.router.get_running_image_details(); + running_image = image_d['image'] print('Current router image: %s' % running_image) + if self.elk: + setup['dut'] = image_d.get('model','router'); + print('Current router model : %s' % setup['dut']) needed_image = device_cfg.get_image_name() if not CTRexScenario.router.is_image_matches(needed_image): print('Setting router image: %s' % needed_image) @@ -107,7 +128,9 @@ class CTRexGeneral_Test(unittest.TestCase): # raise RuntimeError('CTRexScenario class is not initialized!') self.router = CTRexScenario.router - + def get_elk_obj (self): + obj=trex.copy_elk_info () + return (obj); # def assert_dict_eq (self, dict, key, val, error=''): # v1 = int(dict[key])) @@ -142,9 +165,11 @@ class CTRexGeneral_Test(unittest.TestCase): def check_CPU_benchmark (self, trex_res, err = 25, minimal_cpu = 10, maximal_cpu = 85): cpu_util = trex_res.get_avg_steady_state_value('trex-global.data.m_cpu_util_raw') trex_tx_bps = trex_res.get_avg_steady_state_value('trex-global.data.m_tx_bps') + trex_tx_pps = trex_res.get_avg_steady_state_value('trex-global.data.m_tx_pps') expected_norm_cpu = self.get_benchmark_param('bw_per_core') cores = self.get_benchmark_param('cores') ports_count = trex_res.get_ports_count() + total_dp_cores = cores * (ports_count/2); if not (cpu_util and ports_count and cores): print("Can't calculate CPU benchmark, need to divide by zero: cpu util: %s, ports: %s, cores: %s" % (cpu_util, ports_count, cores)) test_norm_cpu = -1 @@ -172,16 +197,42 @@ class CTRexGeneral_Test(unittest.TestCase): #if calc_error_precent > err and cpu_util > 10: # self.fail('Excepted bw_per_core ratio: %s, got: %g' % (expected_norm_cpu, round(test_norm_cpu))) + trex_tx_gbps = trex_tx_bps/1e9 + trex_tx_mpps = trex_tx_pps/1e6 + + trex_tx_gbps_pc = trex_tx_gbps*100.0/(cpu_util*total_dp_cores); + trex_tx_mpps_pc = trex_tx_mpps*100.0/(cpu_util*total_dp_cores) + + trex_tx_pckt = trex_res.get_last_value("trex-global.data.m_total_tx_pkts") + trex_drops = trex_res.get_total_drops() + trex_drop_precent = trex_drops *100.0/trex_tx_pckt; + # report benchmarks - if self.GAManager: - try: - pass - #setup_test = '%s.%s' % (CTRexScenario.setup_name, self.get_name()) - #self.GAManager.gaAddAction(Event = 'stateful_test', action = setup_test, label = 'bw_per_core', value = int(test_norm_cpu)) - #self.GAManager.gaAddAction(Event = 'stateful_test', action = setup_test, label = 'bw_per_core_exp', value = int(expected_norm_cpu)) - #self.GAManager.emptyAndReportQ() - except Exception as e: - print('Sending GA failed: %s' % e) + if self.elk: + elk_obj = self.get_elk_obj() + print("Reporting to elk") + elk_obj['test']={ "name" : self.get_name(), + "type" : "stateful", + "cores" : total_dp_cores, + "cpu%" : cpu_util, + "mpps" : (trex_tx_mpps), + "streams_count" :1, + "mpps_pc" : (trex_tx_mpps_pc), + "gbps_pc" : (trex_tx_gbps_pc), + "gbps" : (trex_tx_gbps), + "kcps" : (trex_res.get_last_value("trex-global.data.m_tx_cps")/1000.0), + "avg-pktsize" : round((1000.0*trex_tx_gbps/(8.0*trex_tx_mpps))), + "latecny" : { "min" : min(trex_res.get_min_latency().values()), + "max" : max(trex_res.get_max_latency().values()), + "avr" : max(trex_res.get_avg_latency().values()), + "jitter" : max(trex_res.get_jitter_latency().values()), + "max-win" : max(trex_res.get_avg_window_latency ().values()), + "drop-rate" :trex_drop_precent + } + }; + pprint.pprint(elk_obj['test']); + self.elk.perf.push_data(elk_obj) + def check_results_gt (self, res, name, val): if res is None: diff --git a/scripts/automation/regression/stateful_tests/trex_imix_test.py b/scripts/automation/regression/stateful_tests/trex_imix_test.py index f8fe0ed1..5f52fab7 100755 --- a/scripts/automation/regression/stateful_tests/trex_imix_test.py +++ b/scripts/automation/regression/stateful_tests/trex_imix_test.py @@ -18,6 +18,99 @@ class CTRexIMIX_Test(CTRexGeneral_Test): # self.router.clear_counters() pass + def test_short_flow(self): + """ short UDP flow with 64B packets, this test with small number of active flows """ + # test initializtion + if not self.is_loopback: + self.router.configure_basic_interfaces() + self.router.config_pbr(mode = "config") + + mult = self.get_benchmark_param('multiplier') + core = self.get_benchmark_param('cores') + + ret = self.trex.start_trex( + c = core, + m = mult, + p = True, + nc = True, + d = 30, + f = 'cap2/cur_flow.yaml', + l = 1000) + + trex_res = self.trex.sample_to_run_finish() + + # trex_res is a CTRexResult instance- and contains the summary of the test results + # you may see all the results keys by simply calling here for 'print trex_res.result' + print("\nLATEST RESULT OBJECT:") + print(trex_res) + + self.check_general_scenario_results(trex_res) + self.check_CPU_benchmark(trex_res) + + def test_short_flow_high_active(self): + """ short UDP flow with 64B packets, this test with 8M active flows """ + # test initializtion + if not self.is_loopback: + self.router.configure_basic_interfaces() + self.router.config_pbr(mode = "config") + + mult = self.get_benchmark_param('multiplier') + core = self.get_benchmark_param('cores') + active_flows =self.get_benchmark_param('active_flows') + + + ret = self.trex.start_trex( + c = core, + m = mult, + p = True, + nc = True, + d = 60, + active_flows = active_flows, + f = 'cap2/cur_flow.yaml', + l = 1000) + + trex_res = self.trex.sample_to_run_finish() + + # trex_res is a CTRexResult instance- and contains the summary of the test results + # you may see all the results keys by simply calling here for 'print trex_res.result' + print("\nLATEST RESULT OBJECT:") + print(trex_res) + + self.check_general_scenario_results(trex_res) + self.check_CPU_benchmark(trex_res) + + def test_short_flow_high_active2(self): + """ short UDP flow with 64B packets, this test with 8M active flows """ + # test initializtion + if not self.is_loopback: + self.router.configure_basic_interfaces() + self.router.config_pbr(mode = "config") + + mult = self.get_benchmark_param('multiplier') + core = self.get_benchmark_param('cores') + active_flows =self.get_benchmark_param('active_flows') + + + ret = self.trex.start_trex( + c = core, + m = mult, + p = True, + nc = True, + d = 60, + active_flows = active_flows, + f = 'cap2/cur_flow_single.yaml', + l = 1000) + + trex_res = self.trex.sample_to_run_finish() + + # trex_res is a CTRexResult instance- and contains the summary of the test results + # you may see all the results keys by simply calling here for 'print trex_res.result' + print("\nLATEST RESULT OBJECT:") + print(trex_res) + + self.check_general_scenario_results(trex_res) + self.check_CPU_benchmark(trex_res) + def test_routing_imix_64(self): # test initializtion if not self.is_loopback: @@ -112,7 +205,7 @@ class CTRexIMIX_Test(CTRexGeneral_Test): ret = self.trex.start_trex( c = core, m = mult, - p = True, + e = True, nc = True, d = 60, f = 'cap2/imix_fast_1g.yaml', diff --git a/scripts/automation/regression/stateful_tests/trex_nbar_test.py b/scripts/automation/regression/stateful_tests/trex_nbar_test.py index 6611ac96..a98da9ac 100755 --- a/scripts/automation/regression/stateful_tests/trex_nbar_test.py +++ b/scripts/automation/regression/stateful_tests/trex_nbar_test.py @@ -5,18 +5,7 @@ from interfaces_e import IFType from nose.tools import nottest from misc_methods import print_r -class CTRexNbar_Test(CTRexGeneral_Test): - """This class defines the NBAR testcase of the TRex traffic generator""" - def __init__(self, *args, **kwargs): - super(CTRexNbar_Test, self).__init__(*args, **kwargs) - self.unsupported_modes = ['loopback'] # obviously no NBar in loopback - - def setUp(self): - super(CTRexNbar_Test, self).setUp() # launch super test class setUp process -# self.router.kill_nbar_flows() - self.router.clear_cft_counters() - self.router.clear_nbar_stats() - +class CTRexNbarBase(CTRexGeneral_Test): def match_classification (self): nbar_benchmark = self.get_benchmark_param("nbar_classification") test_classification = self.router.get_nbar_stats() @@ -52,6 +41,17 @@ class CTRexNbar_Test(CTRexGeneral_Test): if missmatchFlag: self.fail(missmatchMsg) +class CTRexNbar_Test(CTRexNbarBase): + """This class defines the NBAR testcase of the TRex traffic generator""" + def __init__(self, *args, **kwargs): + super(CTRexNbar_Test, self).__init__(*args, **kwargs) + self.unsupported_modes = ['loopback'] # obviously no NBar in loopback + + def setUp(self): + super(CTRexNbar_Test, self).setUp() # launch super test class setUp process +# self.router.kill_nbar_flows() + self.router.clear_cft_counters() + self.router.clear_nbar_stats() def test_nbar_simple(self): # test initializtion diff --git a/scripts/automation/regression/stateless_tests/stl_benchmark_test.py b/scripts/automation/regression/stateless_tests/stl_benchmark_test.py index 6940efd3..fbc58765 100755 --- a/scripts/automation/regression/stateless_tests/stl_benchmark_test.py +++ b/scripts/automation/regression/stateless_tests/stl_benchmark_test.py @@ -4,6 +4,7 @@ from trex_stl_lib.api import * import os, sys from collections import deque from time import time, sleep +import pprint class STLBenchmark_Test(CStlGeneral_Test): """Benchark stateless performance""" @@ -14,9 +15,21 @@ class STLBenchmark_Test(CStlGeneral_Test): stabilize = 5 # ensure stabilization over this period print('') - for profile_bench in self.get_benchmark_param('profiles'): + #self.get_benchmark_param('profiles') + #profiles=[{'bw_per_core': 1, + # 'cpu_util': 1, + # 'kwargs': {'packet_len': 64}, + # 'name': 'stl/udp_for_benchmarks.py'}] + + profiles = self.get_benchmark_param('profiles') + dp_cores = self.stl_trex.system_info.get('dp_core_count', 0) + + for profile_bench in profiles: + cpu_utils = deque([0] * stabilize, maxlen = stabilize) - bws_per_core = deque([0] * stabilize, maxlen = stabilize) + bps = deque([0] * stabilize, maxlen = stabilize) + pps = deque([0] * stabilize, maxlen = stabilize) + kwargs = profile_bench.get('kwargs', {}) print('Testing profile %s, kwargs: %s' % (profile_bench['name'], kwargs)) profile = STLProfile.load(os.path.join(CTRexScenario.scripts_path, profile_bench['name']), **kwargs) @@ -32,13 +45,30 @@ class STLBenchmark_Test(CStlGeneral_Test): for i in range(timeout + 1): stats = self.stl_trex.get_stats() cpu_utils.append(stats['global']['cpu_util']) - bws_per_core.append(stats['global']['bw_per_core']) + bps.append(stats['global']['tx_bps']) + pps.append(stats['global']['tx_pps']) + if i > stabilize and min(cpu_utils) > max(cpu_utils) * 0.95: break sleep(0.5) agv_cpu_util = sum(cpu_utils) / stabilize - agv_bw_per_core = sum(bws_per_core) / stabilize + agv_pps = sum(pps) / stabilize + agv_bps = sum(bps) / stabilize + + if agv_cpu_util == 0.0: + agv_cpu_util=1.0; + + agv_mpps = (agv_pps/1e6); + agv_gbps = (agv_bps/1e9) + + + agv_gbps_norm = agv_gbps * 100.0/agv_cpu_util; + agv_mpps_norm = agv_mpps * 100.0/agv_cpu_util; + + agv_gbps_norm_pc = agv_gbps_norm/dp_cores; + agv_mpps_norm_pc = agv_mpps_norm/dp_cores; + if critical_test and i == timeout and agv_cpu_util > 10: raise Exception('Timeout on waiting for stabilization, last CPU util values: %s' % list(cpu_utils)) @@ -48,24 +78,32 @@ class STLBenchmark_Test(CStlGeneral_Test): raise Exception('Too much queue_full: %s' % stats['global']['queue_full']) if not cpu_utils[-1]: raise Exception('CPU util is zero, last values: %s' % list(cpu_utils)) - print('Done (%ss), CPU util: %4g, bw_per_core: %6sGb/core' % (int(time() - start_time), agv_cpu_util, round(agv_bw_per_core, 2))) - # TODO: add check of benchmark based on results from regression - - # report benchmarks - if self.GAManager: - try: - pass - #profile_repr = '%s.%s %s' % (CTRexScenario.setup_name, - # os.path.basename(profile_bench['name']), - # repr(kwargs).replace("'", '')) - #self.GAManager.gaAddAction(Event = 'stateless_test', action = profile_repr, - # label = 'bw_per_core', value = int(agv_bw_per_core)) - # TODO: report expected once acquired - #self.GAManager.gaAddAction(Event = 'stateless_test', action = profile_repr, - # label = 'bw_per_core_exp', value = int(expected_norm_cpu)) - #self.GAManager.emptyAndReportQ() - except Exception as e: - print('Sending GA failed: %s' % e) + print('Done (%ss), CPU util: %4g, norm_pps_per_core:%6smpps norm_bw_per_core: %6sGb/core' % (int(time() - start_time), agv_cpu_util, round(agv_mpps_norm_pc,2), round(agv_gbps_norm_pc, 2))) + + # report benchmarks to elk + if self.elk: + streams=kwargs.get('stream_count',1) + elk_obj = self.get_elk_obj() + print("\n* Reporting to elk *\n") + name=profile_bench['name'] + elk_obj['test']={ "name" : name, + "type" : "stateless-range", + "cores" : dp_cores, + "cpu%" : agv_cpu_util, + "mpps" : (agv_mpps), + "streams_count" :streams, + "mpps_pc" : (agv_mpps_norm_pc), + "gbps_pc" : (agv_gbps_norm_pc), + "gbps" : (agv_gbps), + "avg-pktsize" : round((1000.0*agv_gbps/(8.0*agv_mpps))), + "latecny" : { "min" : -1.0, + "max" : -1.0, + "avr" : -1.0 + } + }; + #pprint.pprint(elk_obj); + self.elk.perf.push_data(elk_obj) + def tearDown(self): self.stl_trex.reset() diff --git a/scripts/automation/regression/stateless_tests/stl_performance_test.py b/scripts/automation/regression/stateless_tests/stl_performance_test.py index 641f0a33..a1f4dd3b 100644 --- a/scripts/automation/regression/stateless_tests/stl_performance_test.py +++ b/scripts/automation/regression/stateless_tests/stl_performance_test.py @@ -1,6 +1,7 @@ import os from .stl_general_test import CStlGeneral_Test, CTRexScenario from trex_stl_lib.api import * +import pprint def avg (values): return (sum(values) / float(len(values))) @@ -67,6 +68,42 @@ class PerformanceReport(object): ga.emptyAndReportQ() + def norm_senario (self): + s=self.scenario + s='+'.join(s.split(' ')); + s='+'.join(s.split('-')); + s='+'.join(s.split(',')); + l=s.split('+') + lr=[] + for obj in l: + if len(obj): + lr.append(obj); + s='-'.join(lr); + return(s); + + def report_to_elk(self, elk,elk_obj, golden_mpps): + print("\n* Reporting to elk *\n") + elk_obj['test']={ "name" : self.norm_senario(), + "type" : "stateless", + "cores" : self.core_count, + "cpu%" : self.avg_cpu, + "mpps" : self.avg_mpps, + "streams_count" : 1, + "mpps_pc" : self.avg_mpps_per_core, + "gbps_pc" : self.avg_gbps_per_core, + "gbps" : self.avg_gbps, + "avg-pktsize" : ((1000.0*self.avg_gbps/(8.0*self.avg_mpps))), + "latecny" : { "min" : -1.0, + "max" : -1.0, + "avr" : -1.0 + } + }; + + #pprint.pprint(elk_obj); + # push to elk + elk.perf.push_data(elk_obj) + + class STLPerformance_Test(CStlGeneral_Test): """Tests for stateless client""" @@ -238,24 +275,25 @@ class STLPerformance_Test(CStlGeneral_Test): ############################################# test's infra functions ########################################### - def execute_single_scenario (self, scenario_cfg, iterations = 4): + def execute_single_scenario (self, scenario_cfg): golden = scenario_cfg['mpps_per_core_golden'] - - for i in range(iterations, -1, -1): - report = self.execute_single_scenario_iteration(scenario_cfg) - rc = report.check_golden(golden) + report = self.execute_single_scenario_iteration(scenario_cfg) + + if self.GAManager: + report.report_to_analytics(self.GAManager, golden) - if (rc == PerformanceReport.GOLDEN_NORMAL) or (rc == PerformanceReport.GOLDEN_BETTER): - if self.GAManager: - report.report_to_analytics(self.GAManager, golden) + #report to elk + if self.elk: + elk_obj = self.get_elk_obj() + report.report_to_elk(self.elk,elk_obj, golden) - return + rc = report.check_golden(golden) - if rc == PerformanceReport.GOLDEN_BETTER: - return + if rc == PerformanceReport.GOLDEN_NORMAL or rc == PerformanceReport.GOLDEN_BETTER: + return - print("\n*** Measured Mpps per core '{0}' is lower than expected golden '{1} - re-running scenario...{2} attempts left".format(report.avg_mpps_per_core, scenario_cfg['mpps_per_core_golden'], i)) + print("\n*** Measured Mpps per core '{0}' is lower than expected golden '{1}'".format(report.avg_mpps_per_core, scenario_cfg['mpps_per_core_golden'])) assert 0, "performance failure" @@ -296,7 +334,8 @@ class STLPerformance_Test(CStlGeneral_Test): # sample bps/pps for _ in range(0, 20): stats = self.c.get_stats(ports = 0) - if stats['global'][ 'queue_full']>10000: + max_queue_full = 100000 if self.is_VM else 10000 + if stats['global'][ 'queue_full'] > max_queue_full: assert 0, "Queue is full need to tune the multiplier" # CPU results are not valid cannot use them diff --git a/scripts/automation/regression/stateless_tests/stl_rx_test.py b/scripts/automation/regression/stateless_tests/stl_rx_test.py index 4dad712f..8812ac48 100644 --- a/scripts/automation/regression/stateless_tests/stl_rx_test.py +++ b/scripts/automation/regression/stateless_tests/stl_rx_test.py @@ -52,7 +52,7 @@ class STLRX_Test(CStlGeneral_Test): 'allow_packets_drop_num': 1, # allow 1 pkt drop }, - 'librte_pmd_mlx5': { + 'net_mlx5': { 'rate_percent': 80, 'total_pkts': 1000, 'rate_latency': 1, diff --git a/scripts/automation/regression/trex.py b/scripts/automation/regression/trex.py index 416a6e3b..4f5eba60 100644 --- a/scripts/automation/regression/trex.py +++ b/scripts/automation/regression/trex.py @@ -10,6 +10,7 @@ import time from CProgressDisp import TimedProgressBar from stateful_tests.tests_exceptions import TRexInUseError import datetime +import copy class CTRexScenario: modes = set() # list of modes of this setup: loopback, virtual etc. @@ -41,6 +42,20 @@ class CTRexScenario: debug_image = False test = None json_verbose = False + elk = None + elk_info = None + +def copy_elk_info (): + assert(CTRexScenario.elk_info) + d = copy.deepcopy(CTRexScenario.elk_info); + + timestamp = datetime.datetime.now() - datetime.timedelta(hours=2); # Jerusalem timeZone, Kibana does not have feature to change timezone + d['timestamp']=timestamp.strftime("%Y-%m-%d %H:%M:%S") + return(d) + + + + class CTRexRunner: """This is an instance for generating a CTRexRunner""" diff --git a/scripts/automation/regression/trex_elk.py b/scripts/automation/regression/trex_elk.py new file mode 100644 index 00000000..a5ef7a88 --- /dev/null +++ b/scripts/automation/regression/trex_elk.py @@ -0,0 +1,322 @@ +import os +import outer_packages +import json +import pprint +from elasticsearch import Elasticsearch +from pprint import pprint +from elasticsearch import helpers +import random +import datetime + +# one object example for perf +def create_one_object (build_id): + d={}; + + sim_date=datetime.datetime.now()-datetime.timedelta(hours=random.randint(0,24*30)); + info = {}; + + + img={} + img['sha'] = random.choice(["v2.11","v2.10","v2.12","v2.13","v2.14"]) + img['build_time'] = sim_date.strftime("%Y-%m-%d %H:%M:%S") + img['version'] = random.choice(["v2.11","v2.10","v2.12","v2.13","v2.14"]) + img['formal'] = False + + setup={} + + setup['distro']='Ubunto14.03' + setup['kernel']='2.6.12' + setup['baremetal']=True + setup['hypervisor']='None' + setup['name']='trex07' + setup['cpu-sockets']=2 + setup['cores']=16 + setup['cpu-speed']=3.5 + + setup['dut'] ='loopback' + setup['drv-name']='mlx5' + setup['nic-ports']=2 + setup['total-nic-ports']=2 + setup['nic-speed'] ="40GbE" + + + + info['image'] = img + info['setup'] = setup + + d['info'] =info; + + d['timestamp']=sim_date.strftime("%Y-%m-%d %H:%M:%S") + d['build_id']=str("build-%d" %(build_id)) + d['test']={ "name" : "test1", + "type" : "stateless", + "cores" : random.randint(1,10), + "cpu%" : random.randint(60,99), + "mpps" : random.randint(9,32), + "mpps_pc" : random.randint(9,32), + "gbps_pc" : random.randint(9,32), + "gbps" : random.randint(9,32), + "avg-pktsize" : random.randint(60,1500), + "latecny" : { "min" : random.randint(1,10), + "max" : random.randint(100,120), + "avr" : random.randint(1,60) + } + }; + + + return(d) + + +class EsHelper(object): + + def __init__ (self, es, + alias, + index_name, + mapping): + self.es = es + self.alias = alias + self.index_name = index_name + self.mapping = mapping + self.setting = { "index.mapper.dynamic":"false"}; + + def delete (self): + es=self.es; + es.indices.delete(index=self.alias, ignore=[400, 404]); + + def is_exists (self): + es=self.es; + return es.indices.exists(index=self.alias, ignore=[400, 404]) + + def create_first_fime (self): + es=self.es; + index_name=self.index_name + es.indices.create(index=index_name, ignore=[],body = { + "aliases": { self.alias : {} }, + "mappings" : { "data": self.mapping }, + "settings" : self.setting + }); + + def update(self): + es=self.es; + es.indices.put_mapping(index=self.alias, doc_type="data",body=self.mapping); + es.indices.rollover(alias=self.alias,body={ + "conditions": { + "max_age": "30d", + "max_docs": 100000 + }, + "mappings" : { "data": self.mapping }, + "settings" : self.setting + } + ); + + def open(self): + if not self.is_exists(): + self.create_first_fime () + else: + self.update() + + def close(self): + pass; + + def push_data(self,data): + es=self.es; + es.index(index=self.alias,doc_type="data", body=data); + + + + +def create_reg_object (build_id): + d={}; + + sim_date=datetime.datetime.now()-datetime.timedelta(hours=random.randint(0,24*30)); + info = {}; + + + img={} + img['sha'] = random.choice(["v2.11","v2.10","v2.12","v2.13","v2.14"]) + img['build_time'] = sim_date.strftime("%Y-%m-%d %H:%M:%S") + img['version'] = random.choice(["v2.11","v2.10","v2.12","v2.13","v2.14"]) + img['formal'] = False + + setup={} + + setup['distro']='Ubunto14.03' + setup['kernel']='2.6.12' + setup['baremetal']=True + setup['hypervisor']='None' + setup['name']='trex07' + setup['cpu-sockets']=2 + setup['cores']=16 + setup['cpu-speed']=3.5 + + setup['dut'] ='loopback' + setup['drv-name']='mlx5' + setup['nic-ports']=2 + setup['total-nic-ports']=2 + setup['nic-speed'] ="40GbE" + + + + info['image'] = img + info['setup'] = setup + + d['info'] =info; + + d['timestamp']=sim_date.strftime("%Y-%m-%d %H:%M:%S") + d['build_id']=str("build-%d" %(build_id)) + d['test']= { "name" : "stateful_tests.trex_imix_test.CTRexIMIX_Test.test_routing_imix" , + "type" : "stateless", + "duration_sec" : random.uniform(1,10), + "result" : random.choice(["PASS","SKIP","FAIL"]), + "stdout" : """ + LATEST RESULT OBJECT: + Total ARP received : 16 pkts + maximum-latency : 300 usec + average-latency : 277 usec + latency-any-error : ERROR + """ + }; + + return(d) + + + +# how to add new keyword +# you can add a new field but you can't remove old field +class TRexEs(object): + + def __init__ (self, host, + port, + ): + self.es = Elasticsearch([{"host": host, "port": port}]) + es=self.es; + res=es.info() + es_version=res["version"]["number"]; + l=es_version.split('.'); + if not(len(l)==3 and int(l[0])>=5): + print("NOT valid ES version should be at least 5.0.x",es_version); + raise RuntimeError + + setup_info = { # constant per setup + "properties": { + + "image" : { + "properties": { + "sha" : { "type": "keyword" }, # git sha + "build_time" : { "type": "date", # build time + "format": "yyyy-MM-dd HH:mm:ss||yyyy-MM-dd||epoch_millis"}, + "version" : { "type": "keyword" }, # version name like 'v2.12' + "formal" : { "type": "boolean" }, # true for formal release + } + }, + + "setup" : { + "properties": { + "distro" : { "type": "keyword" }, # 'ubuntu' + "kernel" : { "type": "keyword" }, # 2.3.19 + "baremetal" : { "type": "boolean" }, # true or false for + "hypervisor" : { "type": "keyword" }, # kvm,esxi , none + "name" : { "type": "keyword" }, # setup name , e.g. kiwi02 + "cpu-sockets" : { "type": "long" }, # number of socket + "cores" : { "type": "long" }, # total cores + "cpu-speed" : { "type": "double" }, # 3.5 in ghz + "dut" : { "type": "keyword" }, # asr1k, loopback + "drv-name" : { "type": "keyword" }, # vic, mlx5,599,xl710,x710 + "nic-ports" : { "type": "long" }, #2,1,4 + "total-nic-ports" : { "type": "long" }, #8 + "nic-speed" : { "type": "keyword" }, #40Gb + } + } + } + } + + + perf_mapping = { + "dynamic": "strict", + "properties": { + + "scenario" : { "type": "keyword" }, + "build_id" : { "type": "keyword" }, + "timestamp" : { "type": "date", + "format": "yyyy-MM-dd HH:mm:ss||yyyy-MM-dd||epoch_millis"}, + + "info" : setup_info, + + "test" : { + "properties": { + "name" : { "type": "keyword" }, # name of the test + "type" : { "type": "keyword" }, # stateless,stateful, other + "cores" : { "type": "long" }, + "cpu%" : { "type": "double" }, + "mpps" : { "type": "double" }, + "streams_count" : { "type": "long" }, + "mpps_pc" : { "type": "double" }, + "gbps_pc" : { "type": "double" }, + "gbps" : { "type": "double" }, + "avg-pktsize" : { "type": "long" }, + "kcps" : { "type": "double" }, + "latecny" : { + "properties": { + "min" : { "type": "double" }, + "max" : { "type": "double" }, + "avr" : { "type": "double" }, + "max-win" : { "type": "double" }, + "drop-rate" : { "type": "double" }, + "jitter" : { "type": "double" }, + } + } + + } + } + } + } + + self.perf = EsHelper(es=es, + alias="perf", + index_name='trex_perf-000001', + mapping=perf_mapping) + + + + reg_mapping = { + "dynamic": "strict", + "properties": { + + "scenario" : { "type": "keyword" }, + "build_id" : { "type": "keyword" }, + "timestamp" : { "type": "date", + "format": "yyyy-MM-dd HH:mm:ss||yyyy-MM-dd||epoch_millis"}, + + "info" : setup_info, + + "test" : { + "properties": { + "name" : { "type" : "text" }, # name of the test + "name_key" : { "type" : "keyword" }, # name of the test + "name_full" : { "type" : "keyword" }, # full name of the test + "type" : { "type" : "keyword" }, # stateless,stateful, other + "duration_sec" : { "type": "double" }, # sec + "result" : { "type" : "keyword" }, # PASS,FAIL,SKIP + "stdout" : { "type" : "text" }, # output in case of faliue + } + } + } + } + + + self.reg = EsHelper(es=es, + alias="reg", + index_name='trex_reg-000001', + mapping=reg_mapping) + + + self.perf.open(); + self.reg.open(); + + + + + + + + diff --git a/scripts/automation/regression/trex_unit_test.py b/scripts/automation/regression/trex_unit_test.py index 34d2c430..39984c7d 100755 --- a/scripts/automation/regression/trex_unit_test.py +++ b/scripts/automation/regression/trex_unit_test.py @@ -27,10 +27,13 @@ Description: import os import sys import outer_packages - +import datetime import nose from nose.plugins import Plugin +from nose.plugins.xunit import escape_cdata from nose.selector import Selector +from nose.exc import SkipTest +from nose.pyversion import force_unicode, format_exception import CustomLogger import misc_methods from rednose import RedNose @@ -40,11 +43,27 @@ from trex_stf_lib.trex_client import * from trex_stf_lib.trex_exceptions import * from trex_stl_lib.api import * from trex_stl_lib.utils.GAObjClass import GAmanager_Regression +import trex_elk import trex import socket from pprint import pprint import time from distutils.dir_util import mkpath +import re +from io import StringIO + + + +TEST_ID = re.compile(r'^(.*?)(\(.*\))$') + +def id_split(idval): + m = TEST_ID.match(idval) + if m: + name, fargs = m.groups() + head, tail = name.rsplit(".", 1) + return [head, tail+fargs] + else: + return idval.rsplit(".", 1) # nose overrides @@ -105,7 +124,167 @@ def address_to_ip(address): return socket.gethostbyname(address) +class TRexTee(object): + def __init__(self, encoding, *args): + self._encoding = encoding + self._streams = args + + def write(self, data): + data = force_unicode(data, self._encoding) + for s in self._streams: + s.write(data) + + def writelines(self, lines): + for line in lines: + self.write(line) + + def flush(self): + for s in self._streams: + s.flush() + + def isatty(self): + return False + + class CTRexTestConfiguringPlugin(Plugin): + encoding = 'UTF-8' + + def __init__(self): + super(CTRexTestConfiguringPlugin, self).__init__() + self._capture_stack = [] + self._currentStdout = None + self._currentStderr = None + + def _timeTaken(self): + if hasattr(self, '_timer'): + taken = time.time() - self._timer + else: + # test died before it ran (probably error in setup()) + # or success/failure added before test started probably + # due to custom TestResult munging + taken = 0.0 + return taken + + def _startCapture(self): + self._capture_stack.append((sys.stdout, sys.stderr)) + self._currentStdout = StringIO() + self._currentStderr = StringIO() + sys.stdout = TRexTee(self.encoding, self._currentStdout, sys.stdout) + sys.stderr = TRexTee(self.encoding, self._currentStderr, sys.stderr) + + def startContext(self, context): + self._startCapture() + + def stopContext(self, context): + self._endCapture() + + def beforeTest(self, test): + self._timer = time.time() + self._startCapture() + + def _endCapture(self): + if self._capture_stack: + sys.stdout, sys.stderr = self._capture_stack.pop() + + def afterTest(self, test): + self._endCapture() + self._currentStdout = None + self._currentStderr = None + + def _getCapturedStdout(self): + if self._currentStdout: + value = self._currentStdout.getvalue() + if value: + return '<system-out><![CDATA[%s]]></system-out>' % escape_cdata( + value) + return '' + + def _getCapturedStderr(self): + if self._currentStderr: + value = self._currentStderr.getvalue() + if value: + return '<system-err><![CDATA[%s]]></system-err>' % escape_cdata( + value) + return '' + + def addError(self, test, err, capt=None): + elk = CTRexScenario.elk + if elk: + taken = self._timeTaken() + id = test.id() + err_msg=self._getCapturedStdout()+self._getCapturedStderr(); + name=id_split(id)[-1] + + elk_obj = trex.copy_elk_info () + elk_obj['test']={ + "name" : name, + "name_key" : name, + "name_full" : id, + "type" : self.get_operation_mode (), + "duration_sec" : taken, + "result" : "ERROR", + "stdout" : err_msg, + }; + #pprint(elk_obj['test']); + elk.reg.push_data(elk_obj) + + + + def addFailure(self, test, err, capt=None, tb_info=None): + elk = CTRexScenario.elk + if elk: + taken = self._timeTaken() + tb = format_exception(err, self.encoding) + id = test.id() + err_msg=self._getCapturedStdout()+self._getCapturedStderr(); + name=id_split(id)[-1] + + elk_obj = trex.copy_elk_info () + elk_obj['test']={ + "name" : name, + "name_key" : name, + "name_full" : id, + "type" : self.get_operation_mode (), + "duration_sec" : taken, + "result" : "FAILURE", + "stdout" : err_msg, + }; + #pprint(elk_obj['test']); + elk.reg.push_data(elk_obj) + + + + def addSuccess(self, test, capt=None): + elk = CTRexScenario.elk + if elk: + taken = self._timeTaken() + id = test.id() + name=id_split(id)[-1] + elk_obj = trex.copy_elk_info () + elk_obj['test']={ + "name" : name, + "name_key" : name, + "name_full" : id, + "type" : self.get_operation_mode (), + "duration_sec" : taken, + "result" : "PASS", + "stdout" : "", + }; + #pprint(elk_obj['test']); + elk.reg.push_data(elk_obj) + + + + def get_operation_mode (self): + if self.stateful: + return('stateful'); + return('stateless'); + + + + +##### option/configure + def options(self, parser, env = os.environ): super(CTRexTestConfiguringPlugin, self).options(parser, env) parser.add_option('--cfg', '--trex-scenario-config', action='store', @@ -229,6 +408,52 @@ class CTRexTestConfiguringPlugin(Plugin): appName = 'TRex', appVer = CTRexScenario.trex_version) + CTRexScenario.elk = trex_elk.TRexEs('sceasr-b20',9200); + self.set_cont_elk_info () + + def set_cont_elk_info (self): + elk_info={} + timestamp = datetime.datetime.now() - datetime.timedelta(hours=2); # need to update this + info = {}; + + + img={} + img['sha'] = "v2.14" #TBD + img['build_time'] = timestamp.strftime("%Y-%m-%d %H:%M:%S") + img['version'] = "v2.14" #TBD need to fix + img['formal'] = False + + setup={} + + setup['distro']='None' #TBD 'Ubunto14.03' + setup['kernel']='None' #TBD '2.6.12' + setup['baremetal']=True #TBD + setup['hypervisor']='None' #TBD + setup['name']=CTRexScenario.setup_name + + setup['cpu-sockets']=0 #TBD 2 + setup['cores']=0 #TBD 16 + setup['cpu-speed']=-1 #TBD 3.5 + + setup['dut'] ='None' #TBD 'loopback' + setup['drv-name']='None' #TBD 'mlx5' + setup['nic-ports']=0 #TBD 2 + setup['total-nic-ports']=0 #TBD 2 + setup['nic-speed'] = "None" #"40GbE" TBD + + + + info['image'] = img + info['setup'] = setup + + elk_info['info'] =info; + + elk_info['timestamp']=timestamp.strftime("%Y-%m-%d %H:%M:%S") # need to update it + elk_info['build_id']=os.environ.get('BUILD_ID') + elk_info['scenario']=os.environ.get('SCENARIO') + + CTRexScenario.elk_info = elk_info + def begin (self): client = CTRexScenario.trex @@ -274,6 +499,9 @@ class CTRexTestConfiguringPlugin(Plugin): CustomLogger.setup_custom_logger('TRexLogger') def finalize(self, result): + while self._capture_stack: + self._endCapture() + if self.functional or self.collect_only: return #CTRexScenario.is_init = False |